2018-06-21 14:28:34 +00:00
|
|
|
# This file is part of the Trezor project.
|
|
|
|
#
|
|
|
|
# Copyright (C) 2012-2018 SatoshiLabs and contributors
|
|
|
|
#
|
|
|
|
# This library is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Lesser General Public License version 3
|
|
|
|
# as published by the Free Software Foundation.
|
|
|
|
#
|
|
|
|
# This library is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Lesser General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the License along with this library.
|
|
|
|
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
import functools
|
|
|
|
import os
|
2018-08-21 14:21:49 +00:00
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
import pytest
|
|
|
|
|
2018-11-02 15:25:51 +00:00
|
|
|
from trezorlib import debuglink, log
|
2018-10-02 15:18:13 +00:00
|
|
|
from trezorlib.debuglink import TrezorClientDebugLink
|
2018-10-02 15:37:03 +00:00
|
|
|
from trezorlib.device import wipe as wipe_device
|
2018-08-21 14:21:49 +00:00
|
|
|
from trezorlib.transport import enumerate_devices, get_transport
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2018-08-21 14:06:18 +00:00
|
|
|
TREZOR_VERSION = None
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
|
|
|
|
def get_device():
|
2018-08-13 16:21:24 +00:00
|
|
|
path = os.environ.get("TREZOR_PATH")
|
2018-08-21 14:06:18 +00:00
|
|
|
if path:
|
2019-01-31 14:23:39 +00:00
|
|
|
transport = get_transport(path)
|
2018-08-21 14:06:18 +00:00
|
|
|
else:
|
|
|
|
devices = enumerate_devices()
|
|
|
|
for device in devices:
|
|
|
|
if hasattr(device, "find_debug"):
|
2019-01-31 14:23:39 +00:00
|
|
|
transport = device
|
|
|
|
break
|
|
|
|
else:
|
|
|
|
raise RuntimeError("No debuggable device found")
|
|
|
|
env_interactive = int(os.environ.get("INTERACT", 0))
|
|
|
|
try:
|
|
|
|
return TrezorClientDebugLink(transport, auto_interact=not env_interactive)
|
|
|
|
except Exception as e:
|
|
|
|
raise RuntimeError(
|
|
|
|
"Failed to open debuglink for {}".format(transport.get_path())
|
|
|
|
) from e
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
|
|
|
def device_version():
|
2019-01-31 14:23:39 +00:00
|
|
|
client = get_device()
|
2018-04-03 16:55:10 +00:00
|
|
|
if client.features.model == "T":
|
|
|
|
return 2
|
|
|
|
else:
|
|
|
|
return 1
|
|
|
|
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
def client():
|
2019-01-31 14:23:39 +00:00
|
|
|
client = get_device()
|
2018-10-02 15:37:03 +00:00
|
|
|
wipe_device(client)
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2018-11-08 17:08:02 +00:00
|
|
|
client.open()
|
2018-06-04 16:34:32 +00:00
|
|
|
yield client
|
2018-06-22 12:17:59 +00:00
|
|
|
client.close()
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2018-08-13 16:21:24 +00:00
|
|
|
def setup_client(mnemonic=None, pin="", passphrase=False):
|
2018-06-04 16:34:32 +00:00
|
|
|
if mnemonic is None:
|
2018-08-13 16:21:24 +00:00
|
|
|
mnemonic = " ".join(["all"] * 12)
|
2018-06-04 16:34:32 +00:00
|
|
|
if pin is True:
|
2018-08-13 16:21:24 +00:00
|
|
|
pin = "1234"
|
2018-06-04 16:34:32 +00:00
|
|
|
|
|
|
|
def client_decorator(function):
|
|
|
|
@functools.wraps(function)
|
|
|
|
def wrapper(client, *args, **kwargs):
|
2018-10-02 15:18:13 +00:00
|
|
|
debuglink.load_device_by_mnemonic(
|
|
|
|
client,
|
2018-08-13 16:21:24 +00:00
|
|
|
mnemonic=mnemonic,
|
|
|
|
pin=pin,
|
|
|
|
passphrase_protection=passphrase,
|
|
|
|
label="test",
|
|
|
|
language="english",
|
|
|
|
)
|
2018-06-04 16:34:32 +00:00
|
|
|
return function(client, *args, **kwargs)
|
2018-08-13 16:21:24 +00:00
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
return wrapper
|
|
|
|
|
|
|
|
return client_decorator
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2018-05-09 16:12:31 +00:00
|
|
|
def pytest_configure(config):
|
2018-08-21 14:06:18 +00:00
|
|
|
global TREZOR_VERSION
|
|
|
|
TREZOR_VERSION = device_version()
|
|
|
|
|
2018-08-13 16:21:24 +00:00
|
|
|
if config.getoption("verbose"):
|
2018-05-09 16:12:31 +00:00
|
|
|
log.enable_debug_output()
|
|
|
|
|
|
|
|
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
def pytest_addoption(parser):
|
2018-08-13 16:21:24 +00:00
|
|
|
parser.addini(
|
|
|
|
"run_xfail",
|
|
|
|
"List of markers that will run even tests that are marked as xfail",
|
|
|
|
"args",
|
|
|
|
[],
|
|
|
|
)
|
2019-01-31 14:23:39 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--interactive",
|
|
|
|
action="store_true",
|
|
|
|
help="Wait for user to do interaction manually",
|
|
|
|
)
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
def pytest_runtest_setup(item):
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
"""
|
2018-04-03 16:55:10 +00:00
|
|
|
Called for each test item (class, individual tests).
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
|
|
|
Performs custom processing, mainly useful for trezor CI testing:
|
|
|
|
* 'skip_t2' tests are skipped on T2 and 'skip_t1' tests are skipped on T1.
|
|
|
|
* no test should have both skips at the same time
|
|
|
|
* allows to 'runxfail' tests specified by 'run_xfail' in pytest.ini
|
|
|
|
"""
|
2019-01-28 16:11:37 +00:00
|
|
|
if item.get_closest_marker("skip_t1") and item.get_closest_marker("skip_t2"):
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
pytest.fail("Don't skip tests for both trezors!")
|
|
|
|
|
2019-01-28 16:11:37 +00:00
|
|
|
if item.get_closest_marker("skip_t2") and TREZOR_VERSION == 2:
|
2018-04-03 16:55:10 +00:00
|
|
|
pytest.skip("Test excluded on Trezor T")
|
2019-01-28 16:11:37 +00:00
|
|
|
if item.get_closest_marker("skip_t1") and TREZOR_VERSION == 1:
|
2018-04-03 16:55:10 +00:00
|
|
|
pytest.skip("Test excluded on Trezor 1")
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-01-28 16:11:37 +00:00
|
|
|
xfail = item.get_closest_marker("xfail")
|
2018-08-13 16:21:24 +00:00
|
|
|
runxfail_markers = item.config.getini("run_xfail")
|
2019-01-28 16:11:37 +00:00
|
|
|
run_xfail = any(item.get_closest_marker(marker) for marker in runxfail_markers)
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
if xfail and run_xfail:
|
|
|
|
# Deep hack: pytest's private _evalxfail helper determines whether the test should xfail or not.
|
|
|
|
# The helper caches its result even before this hook runs.
|
|
|
|
# Here we force-set the result to False, meaning "test does NOT xfail, run as normal"
|
|
|
|
# IOW, this is basically per-item "--runxfail"
|
|
|
|
item._evalxfail.result = False
|