1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2024-11-12 10:39:00 +00:00
trezor-firmware/tests/conftest.py

310 lines
11 KiB
Python
Raw Normal View History

# This file is part of the Trezor project.
#
2019-05-29 16:44:09 +00:00
# Copyright (C) 2012-2019 SatoshiLabs and contributors
#
# This library is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License version 3
# as published by the Free Software Foundation.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the License along with this library.
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
import os
2022-02-08 15:37:34 +00:00
from typing import TYPE_CHECKING, Generator
import pytest
2018-11-02 15:25:51 +00:00
from trezorlib import debuglink, log
from trezorlib.debuglink import TrezorClientDebugLink as Client
from trezorlib.device import apply_settings, wipe as wipe_device
from trezorlib.transport import enumerate_devices, get_transport
2019-12-30 11:20:51 +00:00
from . import ui_tests
from .device_handler import BackgroundDeviceHandler
from .ui_tests.reporting import testreport
if TYPE_CHECKING:
from _pytest.config import Config
from _pytest.config.argparsing import Parser
from _pytest.terminal import TerminalReporter
# So that we see details of failed asserts from this module
pytest.register_assert_rewrite("tests.common")
@pytest.fixture(scope="session")
def _raw_client(request: pytest.FixtureRequest) -> Client:
2018-08-13 16:21:24 +00:00
path = os.environ.get("TREZOR_PATH")
interact = int(os.environ.get("INTERACT", 0))
if path:
try:
transport = get_transport(path)
return Client(transport, auto_interact=not interact)
except Exception as e:
request.session.shouldstop = "Failed to communicate with Trezor"
raise RuntimeError(f"Failed to open debuglink for {path}") from e
else:
devices = enumerate_devices()
for device in devices:
try:
return Client(device, auto_interact=not interact)
except Exception:
pass
request.session.shouldstop = "Failed to communicate with Trezor"
raise RuntimeError("No debuggable device found")
@pytest.fixture(scope="function")
2022-02-08 15:37:34 +00:00
def client(
request: pytest.FixtureRequest, _raw_client: Client
) -> Generator[Client, None, None]:
2019-09-12 10:25:13 +00:00
"""Client fixture.
2019-09-12 10:25:13 +00:00
Every test function that requires a client instance will get it from here.
If we can't connect to a debuggable device, the test will fail.
If 'skip_t2' is used and TT is connected, the test is skipped. Vice versa with T1
and 'skip_t1'.
The client instance is wiped and preconfigured with "all all all..." mnemonic, no
password and no pin. It is possible to customize this with the `setup_client`
marker.
To specify a custom mnemonic and/or custom pin and/or enable passphrase:
@pytest.mark.setup_client(mnemonic=MY_MNEMONIC, pin="9999", passphrase=True)
To receive a client instance that was not initialized:
@pytest.mark.setup_client(uninitialized=True)
"""
if request.node.get_closest_marker("skip_t2") and _raw_client.features.model == "T":
2019-09-12 10:25:13 +00:00
pytest.skip("Test excluded on Trezor T")
if request.node.get_closest_marker("skip_t1") and _raw_client.features.model == "1":
2019-09-12 10:25:13 +00:00
pytest.skip("Test excluded on Trezor 1")
sd_marker = request.node.get_closest_marker("sd_card")
if sd_marker and not _raw_client.features.sd_card_present:
raise RuntimeError(
"This test requires SD card.\n"
"To skip all such tests, run:\n"
" pytest -m 'not sd_card' <test path>"
)
2020-01-07 11:34:38 +00:00
test_ui = request.config.getoption("ui")
_raw_client.reset_debug_features()
_raw_client.open()
try:
_raw_client.init_device()
except Exception:
request.session.shouldstop = "Failed to communicate with Trezor"
pytest.fail("Failed to communicate with Trezor")
2022-02-08 12:45:15 +00:00
if test_ui:
2020-01-07 11:34:38 +00:00
# we need to reseed before the wipe
_raw_client.debug.reseed(0)
2020-01-07 11:34:38 +00:00
if sd_marker:
should_format = sd_marker.kwargs.get("formatted", True)
_raw_client.debug.erase_sd_card(format=should_format)
wipe_device(_raw_client)
setup_params = dict(
uninitialized=False,
mnemonic=" ".join(["all"] * 12),
pin=None,
passphrase=False,
needs_backup=False,
no_backup=False,
)
marker = request.node.get_closest_marker("setup_client")
if marker:
setup_params.update(marker.kwargs)
use_passphrase = setup_params["passphrase"] is True or isinstance(
setup_params["passphrase"], str
)
if not setup_params["uninitialized"]:
debuglink.load_device(
_raw_client,
mnemonic=setup_params["mnemonic"],
pin=setup_params["pin"],
passphrase_protection=use_passphrase,
label="test",
language="en-US",
needs_backup=setup_params["needs_backup"],
no_backup=setup_params["no_backup"],
)
2018-08-13 16:21:24 +00:00
if _raw_client.features.model == "T":
apply_settings(_raw_client, experimental_features=True)
if use_passphrase and isinstance(setup_params["passphrase"], str):
_raw_client.use_passphrase(setup_params["passphrase"])
_raw_client.clear_session()
2022-02-08 12:45:15 +00:00
if test_ui:
with ui_tests.screen_recording(_raw_client, request):
yield _raw_client
else:
yield _raw_client
2019-12-09 16:01:04 +00:00
_raw_client.close()
def pytest_sessionstart(session: pytest.Session) -> None:
ui_tests.read_fixtures()
if session.config.getoption("ui") == "test":
testreport.clear_dir()
def _should_write_ui_report(exitstatus: pytest.ExitCode) -> bool:
2020-02-24 14:38:02 +00:00
# generate UI report and check missing only if pytest is exitting cleanly
# I.e., the test suite passed or failed (as opposed to ctrl+c break, internal error,
# etc.)
return exitstatus in (pytest.ExitCode.OK, pytest.ExitCode.TESTS_FAILED)
def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -> None:
2020-02-24 14:38:02 +00:00
if not _should_write_ui_report(exitstatus):
return
missing = session.config.getoption("ui_check_missing")
if session.config.getoption("ui") == "test":
if missing and ui_tests.list_missing():
session.exitstatus = pytest.ExitCode.TESTS_FAILED
ui_tests.write_fixtures_suggestion(missing)
testreport.index()
if session.config.getoption("ui") == "record":
ui_tests.write_fixtures(missing)
def pytest_terminal_summary(
terminalreporter: "TerminalReporter", exitstatus: pytest.ExitCode, config: "Config"
) -> None:
println = terminalreporter.write_line
println("")
ui_option = config.getoption("ui")
missing_tests = ui_tests.list_missing()
2020-02-24 14:38:02 +00:00
if ui_option and _should_write_ui_report(exitstatus) and missing_tests:
println(f"{len(missing_tests)} expected UI tests did not run.")
if config.getoption("ui_check_missing"):
println("-------- List of missing tests follows: --------")
for test in missing_tests:
println("\t" + test)
if ui_option == "test":
println("UI test failed.")
elif ui_option == "record":
println("Removing missing tests from record.")
println("")
if ui_option == "test" and _should_write_ui_report(exitstatus):
println("\n-------- Suggested fixtures.json diff: --------")
print("See", ui_tests.SUGGESTION_FILE)
println("")
2020-02-24 14:38:02 +00:00
if _should_write_ui_report(exitstatus):
println("-------- UI tests summary: --------")
println("Run ./tests/show_results.py to open test summary")
println("")
def pytest_addoption(parser: "Parser") -> None:
2019-12-09 16:01:04 +00:00
parser.addoption(
2020-01-07 09:13:08 +00:00
"--ui",
2019-12-09 16:01:04 +00:00
action="store",
choices=["test", "record"],
help="Enable UI intergration tests: 'record' or 'test'",
2019-12-09 16:01:04 +00:00
)
parser.addoption(
"--ui-check-missing",
action="store_true",
default=False,
help="Check UI fixtures are containing the appropriate test cases (fails on `test`,"
"deletes old ones on `record`).",
)
2019-12-09 16:01:04 +00:00
def pytest_configure(config: "Config") -> None:
2019-09-12 10:25:13 +00:00
"""Called at testsuite setup time.
2019-09-12 10:25:13 +00:00
Registers known markers, enables verbose output if requested.
"""
# register known markers
config.addinivalue_line("markers", "skip_t1: skip the test on Trezor One")
config.addinivalue_line("markers", "skip_t2: skip the test on Trezor T")
config.addinivalue_line(
"markers",
'setup_client(mnemonic="all all all...", pin=None, passphrase=False, uninitialized=False): configure the client instance',
)
with open(os.path.join(os.path.dirname(__file__), "REGISTERED_MARKERS")) as f:
for line in f:
config.addinivalue_line("markers", line.strip())
# enable debug
2018-08-13 16:21:24 +00:00
if config.getoption("verbose"):
log.enable_debug_output()
def pytest_runtest_setup(item: pytest.Item) -> None:
2019-09-12 10:25:13 +00:00
"""Called for each test item (class, individual tests).
2019-09-12 10:25:13 +00:00
Ensures that altcoin tests are skipped, and that no test is skipped on
both T1 and TT.
"""
if item.get_closest_marker("skip_t1") and item.get_closest_marker("skip_t2"):
raise RuntimeError("Don't skip tests for both trezors!")
2019-08-26 16:25:22 +00:00
skip_altcoins = int(os.environ.get("TREZOR_PYTEST_SKIP_ALTCOINS", 0))
if item.get_closest_marker("altcoin") and skip_altcoins:
2019-08-22 18:18:44 +00:00
pytest.skip("Skipping altcoin test")
def pytest_runtest_teardown(item: pytest.Item) -> None:
"""Called after a test item finishes.
Dumps the current UI test report HTML.
"""
if item.session.config.getoption("ui") == "test":
testreport.index()
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
def pytest_runtest_makereport(item: pytest.Item, call) -> None:
# Make test results available in fixtures.
# See https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
# The device_handler fixture uses this as 'request.node.rep_call.passed' attribute,
# in order to raise error only if the test passed.
outcome = yield
rep = outcome.get_result()
setattr(item, f"rep_{rep.when}", rep)
@pytest.fixture
def device_handler(client: Client, request: pytest.FixtureRequest) -> None:
device_handler = BackgroundDeviceHandler(client)
yield device_handler
# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport
# did not create the attribute we need
if not hasattr(request.node, "rep_call"):
return
# if test finished, make sure all background tasks are done
finalized_ok = device_handler.check_finalize()
if request.node.rep_call.passed and not finalized_ok: # type: ignore [rep_call must exist]
raise RuntimeError("Test did not check result of background task")