2018-06-21 14:28:34 +00:00
|
|
|
# This file is part of the Trezor project.
|
|
|
|
#
|
2019-05-29 16:44:09 +00:00
|
|
|
# Copyright (C) 2012-2019 SatoshiLabs and contributors
|
2018-06-21 14:28:34 +00:00
|
|
|
#
|
|
|
|
# This library is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Lesser General Public License version 3
|
|
|
|
# as published by the Free Software Foundation.
|
|
|
|
#
|
|
|
|
# This library is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Lesser General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the License along with this library.
|
|
|
|
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
import os
|
2018-08-21 14:21:49 +00:00
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
import pytest
|
|
|
|
|
2018-11-02 15:25:51 +00:00
|
|
|
from trezorlib import debuglink, log
|
2018-10-02 15:18:13 +00:00
|
|
|
from trezorlib.debuglink import TrezorClientDebugLink
|
2020-10-04 21:46:54 +00:00
|
|
|
from trezorlib.device import apply_settings, wipe as wipe_device
|
2018-08-21 14:21:49 +00:00
|
|
|
from trezorlib.transport import enumerate_devices, get_transport
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
from . import ui_tests
|
2019-10-18 11:35:55 +00:00
|
|
|
from .device_handler import BackgroundDeviceHandler
|
2020-05-11 14:23:12 +00:00
|
|
|
from .ui_tests.reporting import testreport
|
2019-10-16 15:39:06 +00:00
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
|
|
|
|
def get_device():
|
2018-08-13 16:21:24 +00:00
|
|
|
path = os.environ.get("TREZOR_PATH")
|
2019-08-06 13:49:56 +00:00
|
|
|
interact = int(os.environ.get("INTERACT", 0))
|
2018-08-21 14:06:18 +00:00
|
|
|
if path:
|
2019-08-06 13:49:56 +00:00
|
|
|
try:
|
|
|
|
transport = get_transport(path)
|
|
|
|
return TrezorClientDebugLink(transport, auto_interact=not interact)
|
2020-02-13 15:30:36 +00:00
|
|
|
except Exception as e:
|
|
|
|
raise RuntimeError("Failed to open debuglink for {}".format(path)) from e
|
2019-08-06 13:49:56 +00:00
|
|
|
|
2018-08-21 14:06:18 +00:00
|
|
|
else:
|
|
|
|
devices = enumerate_devices()
|
|
|
|
for device in devices:
|
2019-08-06 13:49:56 +00:00
|
|
|
try:
|
|
|
|
return TrezorClientDebugLink(device, auto_interact=not interact)
|
|
|
|
except Exception:
|
|
|
|
pass
|
2019-01-31 14:23:39 +00:00
|
|
|
else:
|
2020-02-13 15:30:36 +00:00
|
|
|
raise RuntimeError("No debuggable device found")
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
@pytest.fixture(scope="function")
|
2019-12-29 10:42:15 +00:00
|
|
|
def client(request):
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Client fixture.
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Every test function that requires a client instance will get it from here.
|
|
|
|
If we can't connect to a debuggable device, the test will fail.
|
|
|
|
If 'skip_t2' is used and TT is connected, the test is skipped. Vice versa with T1
|
|
|
|
and 'skip_t1'.
|
|
|
|
|
|
|
|
The client instance is wiped and preconfigured with "all all all..." mnemonic, no
|
|
|
|
password and no pin. It is possible to customize this with the `setup_client`
|
|
|
|
marker.
|
|
|
|
|
|
|
|
To specify a custom mnemonic and/or custom pin and/or enable passphrase:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(mnemonic=MY_MNEMONIC, pin="9999", passphrase=True)
|
|
|
|
|
|
|
|
To receive a client instance that was not initialized:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(uninitialized=True)
|
|
|
|
"""
|
|
|
|
try:
|
|
|
|
client = get_device()
|
|
|
|
except RuntimeError:
|
2020-02-13 15:30:36 +00:00
|
|
|
request.session.shouldstop = "No debuggable Trezor is available"
|
2019-09-12 10:25:13 +00:00
|
|
|
pytest.fail("No debuggable Trezor is available")
|
|
|
|
|
|
|
|
if request.node.get_closest_marker("skip_t2") and client.features.model == "T":
|
|
|
|
pytest.skip("Test excluded on Trezor T")
|
|
|
|
if request.node.get_closest_marker("skip_t1") and client.features.model == "1":
|
|
|
|
pytest.skip("Test excluded on Trezor 1")
|
|
|
|
|
2020-02-17 16:35:46 +00:00
|
|
|
sd_marker = request.node.get_closest_marker("sd_card")
|
|
|
|
if sd_marker and not client.features.sd_card_present:
|
2019-10-21 09:46:32 +00:00
|
|
|
raise RuntimeError(
|
|
|
|
"This test requires SD card.\n"
|
|
|
|
"To skip all such tests, run:\n"
|
|
|
|
" pytest -m 'not sd_card' <test path>"
|
|
|
|
)
|
|
|
|
|
2020-01-07 11:34:38 +00:00
|
|
|
test_ui = request.config.getoption("ui")
|
|
|
|
run_ui_tests = not request.node.get_closest_marker("skip_ui") and test_ui
|
|
|
|
|
|
|
|
client.open()
|
|
|
|
if run_ui_tests:
|
|
|
|
# we need to reseed before the wipe
|
|
|
|
client.debug.reseed(0)
|
|
|
|
|
2020-02-17 16:35:46 +00:00
|
|
|
if sd_marker:
|
|
|
|
should_format = sd_marker.kwargs.get("formatted", True)
|
|
|
|
client.debug.erase_sd_card(format=should_format)
|
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
wipe_device(client)
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
setup_params = dict(
|
|
|
|
uninitialized=False,
|
|
|
|
mnemonic=" ".join(["all"] * 12),
|
|
|
|
pin=None,
|
|
|
|
passphrase=False,
|
2019-11-13 11:47:51 +00:00
|
|
|
needs_backup=False,
|
|
|
|
no_backup=False,
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
marker = request.node.get_closest_marker("setup_client")
|
|
|
|
if marker:
|
|
|
|
setup_params.update(marker.kwargs)
|
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
use_passphrase = setup_params["passphrase"] is True or isinstance(
|
|
|
|
setup_params["passphrase"], str
|
|
|
|
)
|
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
if not setup_params["uninitialized"]:
|
2019-11-13 11:47:51 +00:00
|
|
|
debuglink.load_device(
|
2019-08-26 16:26:57 +00:00
|
|
|
client,
|
|
|
|
mnemonic=setup_params["mnemonic"],
|
|
|
|
pin=setup_params["pin"],
|
2020-08-28 19:12:25 +00:00
|
|
|
passphrase_protection=use_passphrase,
|
2019-08-26 16:26:57 +00:00
|
|
|
label="test",
|
2019-12-07 11:11:51 +00:00
|
|
|
language="en-US",
|
2019-11-13 11:47:51 +00:00
|
|
|
needs_backup=setup_params["needs_backup"],
|
|
|
|
no_backup=setup_params["no_backup"],
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
2018-08-13 16:21:24 +00:00
|
|
|
|
2020-10-04 21:46:54 +00:00
|
|
|
if client.features.model == "T":
|
|
|
|
apply_settings(client, experimental_features=True)
|
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
if use_passphrase and isinstance(setup_params["passphrase"], str):
|
|
|
|
client.use_passphrase(setup_params["passphrase"])
|
|
|
|
|
2020-08-26 10:06:51 +00:00
|
|
|
client.clear_session()
|
2019-11-13 11:47:51 +00:00
|
|
|
|
2020-01-07 09:16:08 +00:00
|
|
|
if run_ui_tests:
|
|
|
|
with ui_tests.screen_recording(client, request):
|
|
|
|
yield client
|
|
|
|
else:
|
2019-12-09 16:01:04 +00:00
|
|
|
yield client
|
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
client.close()
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2020-01-09 14:25:45 +00:00
|
|
|
def pytest_sessionstart(session):
|
2020-01-31 11:28:44 +00:00
|
|
|
ui_tests.read_fixtures()
|
2020-01-09 14:25:45 +00:00
|
|
|
if session.config.getoption("ui") == "test":
|
2020-05-11 14:23:12 +00:00
|
|
|
testreport.clear_dir()
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
2020-02-24 14:38:02 +00:00
|
|
|
def _should_write_ui_report(exitstatus):
|
|
|
|
# generate UI report and check missing only if pytest is exitting cleanly
|
|
|
|
# I.e., the test suite passed or failed (as opposed to ctrl+c break, internal error,
|
|
|
|
# etc.)
|
|
|
|
return exitstatus in (pytest.ExitCode.OK, pytest.ExitCode.TESTS_FAILED)
|
|
|
|
|
|
|
|
|
2020-01-09 14:25:45 +00:00
|
|
|
def pytest_sessionfinish(session, exitstatus):
|
2020-02-24 14:38:02 +00:00
|
|
|
if not _should_write_ui_report(exitstatus):
|
2020-02-21 14:47:25 +00:00
|
|
|
return
|
|
|
|
|
2020-09-29 17:53:28 +00:00
|
|
|
missing = session.config.getoption("ui_check_missing")
|
2020-01-10 08:32:04 +00:00
|
|
|
if session.config.getoption("ui") == "test":
|
2020-09-29 17:53:28 +00:00
|
|
|
if missing and ui_tests.list_missing():
|
2020-02-21 14:47:25 +00:00
|
|
|
session.exitstatus = pytest.ExitCode.TESTS_FAILED
|
2020-09-29 17:53:28 +00:00
|
|
|
ui_tests.write_fixtures_suggestion(missing)
|
2020-05-11 14:23:12 +00:00
|
|
|
testreport.index()
|
2020-01-31 11:28:44 +00:00
|
|
|
if session.config.getoption("ui") == "record":
|
2020-09-29 17:53:28 +00:00
|
|
|
ui_tests.write_fixtures(missing)
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
|
|
|
def pytest_terminal_summary(terminalreporter, exitstatus, config):
|
2020-04-28 08:39:39 +00:00
|
|
|
println = terminalreporter.write_line
|
|
|
|
println("")
|
2020-02-21 14:47:25 +00:00
|
|
|
|
|
|
|
ui_option = config.getoption("ui")
|
|
|
|
missing_tests = ui_tests.list_missing()
|
2020-02-24 14:38:02 +00:00
|
|
|
if ui_option and _should_write_ui_report(exitstatus) and missing_tests:
|
2020-02-21 14:47:25 +00:00
|
|
|
println(f"{len(missing_tests)} expected UI tests did not run.")
|
|
|
|
if config.getoption("ui_check_missing"):
|
2020-09-29 17:53:28 +00:00
|
|
|
println("-------- List of missing tests follows: --------")
|
2020-02-21 14:47:25 +00:00
|
|
|
for test in missing_tests:
|
|
|
|
println("\t" + test)
|
|
|
|
|
|
|
|
if ui_option == "test":
|
|
|
|
println("UI test failed.")
|
|
|
|
elif ui_option == "record":
|
|
|
|
println("Removing missing tests from record.")
|
2020-04-28 08:39:39 +00:00
|
|
|
println("")
|
2020-02-21 14:47:25 +00:00
|
|
|
|
2020-09-29 17:53:28 +00:00
|
|
|
if ui_option == "test" and _should_write_ui_report(exitstatus):
|
|
|
|
println("\n-------- Suggested fixtures.json diff: --------")
|
|
|
|
print("See", ui_tests.SUGGESTION_FILE)
|
|
|
|
println("")
|
|
|
|
|
2020-02-24 14:38:02 +00:00
|
|
|
if _should_write_ui_report(exitstatus):
|
2020-09-29 17:53:28 +00:00
|
|
|
println("-------- UI tests summary: --------")
|
|
|
|
println(f"{testreport.REPORTS_PATH / 'index.html'}")
|
|
|
|
println("")
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
2019-12-09 16:01:04 +00:00
|
|
|
def pytest_addoption(parser):
|
|
|
|
parser.addoption(
|
2020-01-07 09:13:08 +00:00
|
|
|
"--ui",
|
2019-12-09 16:01:04 +00:00
|
|
|
action="store",
|
2020-05-19 08:00:37 +00:00
|
|
|
choices=["test", "record"],
|
2019-12-29 10:42:15 +00:00
|
|
|
help="Enable UI intergration tests: 'record' or 'test'",
|
2019-12-09 16:01:04 +00:00
|
|
|
)
|
2020-02-17 14:38:26 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--ui-check-missing",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Check UI fixtures are containing the appropriate test cases (fails on `test`,"
|
|
|
|
"deletes old ones on `record`).",
|
|
|
|
)
|
2019-12-09 16:01:04 +00:00
|
|
|
|
|
|
|
|
2018-05-09 16:12:31 +00:00
|
|
|
def pytest_configure(config):
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called at testsuite setup time.
|
2019-05-27 13:42:41 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Registers known markers, enables verbose output if requested.
|
|
|
|
"""
|
2019-05-27 13:42:41 +00:00
|
|
|
# register known markers
|
|
|
|
config.addinivalue_line("markers", "skip_t1: skip the test on Trezor One")
|
|
|
|
config.addinivalue_line("markers", "skip_t2: skip the test on Trezor T")
|
2019-08-26 16:26:57 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers",
|
|
|
|
'setup_client(mnemonic="all all all...", pin=None, passphrase=False, uninitialized=False): configure the client instance',
|
|
|
|
)
|
2019-12-09 16:01:04 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers", "skip_ui: skip UI integration checks for this test"
|
|
|
|
)
|
2019-05-27 13:42:41 +00:00
|
|
|
with open(os.path.join(os.path.dirname(__file__), "REGISTERED_MARKERS")) as f:
|
|
|
|
for line in f:
|
|
|
|
config.addinivalue_line("markers", line.strip())
|
|
|
|
|
|
|
|
# enable debug
|
2018-08-13 16:21:24 +00:00
|
|
|
if config.getoption("verbose"):
|
2018-05-09 16:12:31 +00:00
|
|
|
log.enable_debug_output()
|
|
|
|
|
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
def pytest_runtest_setup(item):
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called for each test item (class, individual tests).
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Ensures that altcoin tests are skipped, and that no test is skipped on
|
|
|
|
both T1 and TT.
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
"""
|
2019-01-28 16:11:37 +00:00
|
|
|
if item.get_closest_marker("skip_t1") and item.get_closest_marker("skip_t2"):
|
2019-10-21 09:46:32 +00:00
|
|
|
raise RuntimeError("Don't skip tests for both trezors!")
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-08-26 16:25:22 +00:00
|
|
|
skip_altcoins = int(os.environ.get("TREZOR_PYTEST_SKIP_ALTCOINS", 0))
|
|
|
|
if item.get_closest_marker("altcoin") and skip_altcoins:
|
2019-08-22 18:18:44 +00:00
|
|
|
pytest.skip("Skipping altcoin test")
|
2019-10-16 15:39:06 +00:00
|
|
|
|
|
|
|
|
2020-12-10 09:37:02 +00:00
|
|
|
def pytest_runtest_teardown(item):
|
|
|
|
"""Called after a test item finishes.
|
|
|
|
|
|
|
|
Dumps the current UI test report HTML.
|
|
|
|
"""
|
|
|
|
if item.session.config.getoption("ui") == "test":
|
|
|
|
testreport.index()
|
|
|
|
|
|
|
|
|
2019-10-16 15:39:06 +00:00
|
|
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
|
|
|
def pytest_runtest_makereport(item, call):
|
|
|
|
# Make test results available in fixtures.
|
|
|
|
# See https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
|
2019-10-18 11:35:55 +00:00
|
|
|
# The device_handler fixture uses this as 'request.node.rep_call.passed' attribute,
|
|
|
|
# in order to raise error only if the test passed.
|
2019-10-16 15:39:06 +00:00
|
|
|
outcome = yield
|
|
|
|
rep = outcome.get_result()
|
|
|
|
setattr(item, f"rep_{rep.when}", rep)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def device_handler(client, request):
|
|
|
|
device_handler = BackgroundDeviceHandler(client)
|
|
|
|
yield device_handler
|
|
|
|
|
2020-07-10 09:52:19 +00:00
|
|
|
# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport
|
|
|
|
# did not create the attribute we need
|
|
|
|
if not hasattr(request.node, "rep_call"):
|
|
|
|
return
|
|
|
|
|
|
|
|
# if test finished, make sure all background tasks are done
|
2019-10-16 15:39:06 +00:00
|
|
|
finalized_ok = device_handler.check_finalize()
|
|
|
|
if request.node.rep_call.passed and not finalized_ok:
|
|
|
|
raise RuntimeError("Test did not check result of background task")
|