2018-06-04 16:34:32 +00:00
|
|
|
import functools
|
|
|
|
import os
|
2018-04-03 16:55:10 +00:00
|
|
|
import pytest
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
from trezorlib.transport import get_transport
|
|
|
|
from trezorlib.client import TrezorClient, TrezorClientDebugLink
|
|
|
|
from trezorlib import log, coins
|
|
|
|
|
|
|
|
|
|
|
|
def get_device():
|
|
|
|
path = os.environ.get('TREZOR_PATH')
|
|
|
|
return get_transport(path)
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
def device_version():
|
2018-06-04 16:34:32 +00:00
|
|
|
device = get_device()
|
2018-04-03 16:55:10 +00:00
|
|
|
if not device:
|
|
|
|
raise RuntimeError()
|
|
|
|
client = TrezorClient(device)
|
|
|
|
if client.features.model == "T":
|
|
|
|
return 2
|
|
|
|
else:
|
|
|
|
return 1
|
|
|
|
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
TREZOR_VERSION = device_version()
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
def client():
|
|
|
|
wirelink = get_device()
|
|
|
|
debuglink = wirelink.find_debug()
|
|
|
|
client = TrezorClientDebugLink(wirelink)
|
|
|
|
client.set_debuglink(debuglink)
|
|
|
|
client.set_tx_api(coins.tx_api['Bitcoin'])
|
|
|
|
client.wipe_device()
|
|
|
|
client.transport.session_begin()
|
|
|
|
|
|
|
|
yield client
|
|
|
|
|
|
|
|
client.transport.session_end()
|
|
|
|
|
|
|
|
|
|
|
|
def setup_client(mnemonic=None, pin='', passphrase=False):
|
|
|
|
if mnemonic is None:
|
|
|
|
mnemonic = ' '.join(['all'] * 12)
|
|
|
|
if pin is True:
|
|
|
|
pin = '1234'
|
|
|
|
|
|
|
|
def client_decorator(function):
|
|
|
|
@functools.wraps(function)
|
|
|
|
def wrapper(client, *args, **kwargs):
|
|
|
|
client.load_device_by_mnemonic(mnemonic=mnemonic, pin=pin, passphrase_protection=passphrase, label='test', language='english')
|
|
|
|
return function(client, *args, **kwargs)
|
|
|
|
return wrapper
|
|
|
|
|
|
|
|
return client_decorator
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2018-05-09 16:12:31 +00:00
|
|
|
def pytest_configure(config):
|
|
|
|
if config.getoption('verbose'):
|
|
|
|
log.enable_debug_output()
|
|
|
|
|
|
|
|
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
def pytest_addoption(parser):
|
|
|
|
parser.addini("run_xfail", "List of markers that will run even if marked as xfail", "args", [])
|
|
|
|
|
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
def pytest_runtest_setup(item):
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
"""
|
2018-04-03 16:55:10 +00:00
|
|
|
Called for each test item (class, individual tests).
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
|
|
|
Performs custom processing, mainly useful for trezor CI testing:
|
|
|
|
* 'skip_t2' tests are skipped on T2 and 'skip_t1' tests are skipped on T1.
|
|
|
|
* no test should have both skips at the same time
|
|
|
|
* allows to 'runxfail' tests specified by 'run_xfail' in pytest.ini
|
|
|
|
"""
|
|
|
|
if item.get_marker("skip_t1") and item.get_marker("skip_t2"):
|
|
|
|
pytest.fail("Don't skip tests for both trezors!")
|
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
if item.get_marker("skip_t2") and TREZOR_VERSION == 2:
|
|
|
|
pytest.skip("Test excluded on Trezor T")
|
|
|
|
if item.get_marker("skip_t1") and TREZOR_VERSION == 1:
|
|
|
|
pytest.skip("Test excluded on Trezor 1")
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
|
|
|
xfail = item.get_marker("xfail")
|
|
|
|
run_xfail = any(item.get_marker(marker) for marker in item.config.getini("run_xfail"))
|
|
|
|
if xfail and run_xfail:
|
|
|
|
# Deep hack: pytest's private _evalxfail helper determines whether the test should xfail or not.
|
|
|
|
# The helper caches its result even before this hook runs.
|
|
|
|
# Here we force-set the result to False, meaning "test does NOT xfail, run as normal"
|
|
|
|
# IOW, this is basically per-item "--runxfail"
|
|
|
|
item._evalxfail.result = False
|