2018-06-21 14:28:34 +00:00
|
|
|
# This file is part of the Trezor project.
|
|
|
|
#
|
2019-05-29 16:44:09 +00:00
|
|
|
# Copyright (C) 2012-2019 SatoshiLabs and contributors
|
2018-06-21 14:28:34 +00:00
|
|
|
#
|
|
|
|
# This library is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Lesser General Public License version 3
|
|
|
|
# as published by the Free Software Foundation.
|
|
|
|
#
|
|
|
|
# This library is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Lesser General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the License along with this library.
|
|
|
|
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
|
|
|
|
|
2022-08-18 15:21:27 +00:00
|
|
|
from __future__ import annotations
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
import os
|
2023-03-08 10:38:52 +00:00
|
|
|
from pathlib import Path
|
2023-03-30 10:31:09 +00:00
|
|
|
from typing import TYPE_CHECKING, Generator, Iterator
|
2018-08-21 14:21:49 +00:00
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
import pytest
|
2022-09-07 11:53:25 +00:00
|
|
|
import xdist
|
2018-04-03 16:55:10 +00:00
|
|
|
|
2024-03-09 21:05:05 +00:00
|
|
|
from trezorlib import debuglink, log, models
|
2022-01-31 12:25:30 +00:00
|
|
|
from trezorlib.debuglink import TrezorClientDebugLink as Client
|
2023-08-15 15:58:32 +00:00
|
|
|
from trezorlib.device import apply_settings
|
|
|
|
from trezorlib.device import wipe as wipe_device
|
2018-08-21 14:21:49 +00:00
|
|
|
from trezorlib.transport import enumerate_devices, get_transport
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2023-08-11 15:57:32 +00:00
|
|
|
# register rewrites before importing from local package
|
|
|
|
# so that we see details of failed asserts from this module
|
|
|
|
pytest.register_assert_rewrite("tests.common")
|
|
|
|
|
|
|
|
from . import translations, ui_tests
|
2019-10-18 11:35:55 +00:00
|
|
|
from .device_handler import BackgroundDeviceHandler
|
2022-08-18 15:21:27 +00:00
|
|
|
from .emulators import EmulatorWrapper
|
2019-10-16 15:39:06 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from _pytest.config import Config
|
|
|
|
from _pytest.config.argparsing import Parser
|
|
|
|
from _pytest.terminal import TerminalReporter
|
|
|
|
|
2023-08-15 15:58:32 +00:00
|
|
|
from trezorlib._internal.emulator import Emulator
|
|
|
|
|
2023-03-08 10:38:52 +00:00
|
|
|
|
|
|
|
HERE = Path(__file__).resolve().parent
|
2023-08-11 15:57:32 +00:00
|
|
|
CORE = HERE.parent / "core"
|
2022-01-25 17:22:11 +00:00
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2023-03-30 10:31:09 +00:00
|
|
|
def _emulator_wrapper_main_args() -> list[str]:
|
|
|
|
"""Look at TREZOR_PROFILING env variable, so that we can generate coverage reports."""
|
|
|
|
do_profiling = os.environ.get("TREZOR_PROFILING") == "1"
|
|
|
|
if do_profiling:
|
|
|
|
core_dir = HERE.parent / "core"
|
|
|
|
profiling_wrapper = core_dir / "prof" / "prof.py"
|
|
|
|
# So that the coverage reports have the correct paths
|
|
|
|
os.environ["TREZOR_SRC"] = str(core_dir / "src")
|
|
|
|
return [str(profiling_wrapper)]
|
|
|
|
else:
|
|
|
|
return ["-m", "main"]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def core_emulator(request: pytest.FixtureRequest) -> Iterator[Emulator]:
|
|
|
|
"""Fixture returning default core emulator with possibility of screen recording."""
|
|
|
|
with EmulatorWrapper("core", main_args=_emulator_wrapper_main_args()) as emu:
|
|
|
|
# Modifying emu.client to add screen recording (when --ui=test is used)
|
|
|
|
with ui_tests.screen_recording(emu.client, request) as _:
|
|
|
|
yield emu
|
|
|
|
|
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
@pytest.fixture(scope="session")
|
2022-08-18 15:21:27 +00:00
|
|
|
def emulator(request: pytest.FixtureRequest) -> Generator["Emulator", None, None]:
|
|
|
|
"""Fixture for getting emulator connection in case tests should operate it on their own.
|
|
|
|
|
|
|
|
Is responsible for starting it at the start of the session and stopping
|
|
|
|
it at the end of the session - using `with EmulatorWrapper...`.
|
|
|
|
|
|
|
|
Makes sure that each process will run the emulator on a different
|
|
|
|
port and with different profile directory, which is cleaned afterwards.
|
|
|
|
|
|
|
|
Used so that we can run the device tests in parallel using `pytest-xdist` plugin.
|
|
|
|
Docs: https://pypi.org/project/pytest-xdist/
|
|
|
|
|
|
|
|
NOTE for parallel tests:
|
|
|
|
So that all worker processes will explore the tests in the exact same order,
|
|
|
|
we cannot use the "built-in" random order, we need to specify our own,
|
|
|
|
so that all the processes share the same order.
|
|
|
|
Done by appending `--random-order-seed=$RANDOM` as a `pytest` argument,
|
|
|
|
using system RNG.
|
|
|
|
"""
|
|
|
|
|
|
|
|
model = str(request.session.config.getoption("model"))
|
|
|
|
interact = os.environ.get("INTERACT") == "1"
|
|
|
|
|
|
|
|
assert model in ("core", "legacy")
|
|
|
|
if model == "legacy":
|
|
|
|
raise RuntimeError(
|
|
|
|
"Legacy emulator is not supported until it can be run on arbitrary ports."
|
|
|
|
)
|
|
|
|
|
|
|
|
def _get_port() -> int:
|
|
|
|
"""Get a unique port for this worker process on which it can run.
|
|
|
|
|
|
|
|
Guarantees to be unique because each worker has a different name.
|
|
|
|
gw0=>20000, gw1=>20003, gw2=>20006, etc.
|
|
|
|
"""
|
2022-09-07 11:53:25 +00:00
|
|
|
worker_id = xdist.get_xdist_worker_id(request)
|
2022-08-18 15:21:27 +00:00
|
|
|
assert worker_id.startswith("gw")
|
|
|
|
# One emulator instance occupies 3 consecutive ports:
|
|
|
|
# 1. normal link, 2. debug link and 3. webauthn fake interface
|
|
|
|
return 20000 + int(worker_id[2:]) * 3
|
|
|
|
|
|
|
|
with EmulatorWrapper(
|
2023-03-08 10:38:52 +00:00
|
|
|
model,
|
|
|
|
port=_get_port(),
|
|
|
|
headless=True,
|
|
|
|
auto_interact=not interact,
|
2023-03-30 10:31:09 +00:00
|
|
|
main_args=_emulator_wrapper_main_args(),
|
2022-08-18 15:21:27 +00:00
|
|
|
) as emu:
|
|
|
|
yield emu
|
|
|
|
|
2019-08-06 13:49:56 +00:00
|
|
|
|
2022-08-18 15:21:27 +00:00
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def _raw_client(request: pytest.FixtureRequest) -> Client:
|
|
|
|
# In case tests run in parallel, each process has its own emulator/client.
|
|
|
|
# Requesting the emulator fixture only if relevant.
|
|
|
|
if request.session.config.getoption("control_emulators"):
|
|
|
|
emu_fixture = request.getfixturevalue("emulator")
|
2023-08-11 15:57:32 +00:00
|
|
|
client = emu_fixture.client
|
2018-08-21 14:06:18 +00:00
|
|
|
else:
|
2022-08-18 15:21:27 +00:00
|
|
|
interact = os.environ.get("INTERACT") == "1"
|
|
|
|
path = os.environ.get("TREZOR_PATH")
|
|
|
|
if path:
|
2023-08-11 15:57:32 +00:00
|
|
|
client = _client_from_path(request, path, interact)
|
2022-08-18 15:21:27 +00:00
|
|
|
else:
|
2023-08-11 15:57:32 +00:00
|
|
|
client = _find_client(request, interact)
|
|
|
|
|
|
|
|
# Setting the appropriate language
|
|
|
|
# Not doing it for T1
|
2024-03-11 15:20:08 +00:00
|
|
|
if client.model is not models.T1B1:
|
2023-08-11 15:57:32 +00:00
|
|
|
lang = request.session.config.getoption("lang") or "en"
|
|
|
|
assert isinstance(lang, str)
|
|
|
|
translations.set_language(client, lang)
|
|
|
|
|
|
|
|
return client
|
2022-08-18 15:21:27 +00:00
|
|
|
|
2021-10-13 13:45:53 +00:00
|
|
|
|
2022-08-18 15:21:27 +00:00
|
|
|
def _client_from_path(
|
|
|
|
request: pytest.FixtureRequest, path: str, interact: bool
|
|
|
|
) -> Client:
|
|
|
|
try:
|
|
|
|
transport = get_transport(path)
|
|
|
|
return Client(transport, auto_interact=not interact)
|
|
|
|
except Exception as e:
|
2021-10-13 13:45:53 +00:00
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
2022-08-18 15:21:27 +00:00
|
|
|
raise RuntimeError(f"Failed to open debuglink for {path}") from e
|
|
|
|
|
|
|
|
|
|
|
|
def _find_client(request: pytest.FixtureRequest, interact: bool) -> Client:
|
|
|
|
devices = enumerate_devices()
|
|
|
|
for device in devices:
|
|
|
|
try:
|
|
|
|
return Client(device, auto_interact=not interact)
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
|
|
|
raise RuntimeError("No debuggable device found")
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
@pytest.fixture(scope="function")
|
2022-02-08 15:37:34 +00:00
|
|
|
def client(
|
|
|
|
request: pytest.FixtureRequest, _raw_client: Client
|
|
|
|
) -> Generator[Client, None, None]:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Client fixture.
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Every test function that requires a client instance will get it from here.
|
|
|
|
If we can't connect to a debuggable device, the test will fail.
|
2024-03-11 15:20:08 +00:00
|
|
|
If 'skip_t2t1' is used and TT is connected, the test is skipped. Vice versa with T1
|
|
|
|
and 'skip_t1b1'. Same with T2B1, T3T1.
|
2019-09-12 10:25:13 +00:00
|
|
|
|
|
|
|
The client instance is wiped and preconfigured with "all all all..." mnemonic, no
|
|
|
|
password and no pin. It is possible to customize this with the `setup_client`
|
|
|
|
marker.
|
|
|
|
|
|
|
|
To specify a custom mnemonic and/or custom pin and/or enable passphrase:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(mnemonic=MY_MNEMONIC, pin="9999", passphrase=True)
|
|
|
|
|
|
|
|
To receive a client instance that was not initialized:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(uninitialized=True)
|
2022-11-02 09:45:11 +00:00
|
|
|
|
|
|
|
To enable experimental features:
|
|
|
|
|
|
|
|
@pytest.mark.experimental
|
2019-09-12 10:25:13 +00:00
|
|
|
"""
|
2024-03-11 15:20:08 +00:00
|
|
|
if (
|
|
|
|
request.node.get_closest_marker("skip_t2t1")
|
|
|
|
and _raw_client.model is models.T2T1
|
|
|
|
):
|
2019-09-12 10:25:13 +00:00
|
|
|
pytest.skip("Test excluded on Trezor T")
|
2024-03-11 15:20:08 +00:00
|
|
|
if (
|
|
|
|
request.node.get_closest_marker("skip_t1b1")
|
|
|
|
and _raw_client.model is models.T1B1
|
|
|
|
):
|
2019-09-12 10:25:13 +00:00
|
|
|
pytest.skip("Test excluded on Trezor 1")
|
2023-10-11 22:31:02 +00:00
|
|
|
if (
|
2024-03-11 15:20:08 +00:00
|
|
|
request.node.get_closest_marker("skip_t2b1")
|
|
|
|
and _raw_client.model is models.T2B1
|
2023-10-11 22:31:02 +00:00
|
|
|
):
|
2024-03-11 15:20:08 +00:00
|
|
|
pytest.skip("Test excluded on Trezor T2B1")
|
2024-03-09 21:05:05 +00:00
|
|
|
if (
|
|
|
|
request.node.get_closest_marker("skip_t3t1")
|
|
|
|
and _raw_client.model is models.T3T1
|
|
|
|
):
|
|
|
|
pytest.skip("Test excluded on Trezor T3T1")
|
2019-09-12 10:25:13 +00:00
|
|
|
|
2020-02-17 16:35:46 +00:00
|
|
|
sd_marker = request.node.get_closest_marker("sd_card")
|
2021-07-30 11:59:16 +00:00
|
|
|
if sd_marker and not _raw_client.features.sd_card_present:
|
2019-10-21 09:46:32 +00:00
|
|
|
raise RuntimeError(
|
|
|
|
"This test requires SD card.\n"
|
|
|
|
"To skip all such tests, run:\n"
|
|
|
|
" pytest -m 'not sd_card' <test path>"
|
|
|
|
)
|
|
|
|
|
2020-01-07 11:34:38 +00:00
|
|
|
test_ui = request.config.getoption("ui")
|
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.reset_debug_features()
|
|
|
|
_raw_client.open()
|
|
|
|
try:
|
|
|
|
_raw_client.init_device()
|
|
|
|
except Exception:
|
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
|
|
|
pytest.fail("Failed to communicate with Trezor")
|
|
|
|
|
2023-05-04 12:23:33 +00:00
|
|
|
# Resetting all the debug events to not be influenced by previous test
|
|
|
|
_raw_client.debug.reset_debug_events()
|
|
|
|
|
2022-02-08 12:45:15 +00:00
|
|
|
if test_ui:
|
2020-01-07 11:34:38 +00:00
|
|
|
# we need to reseed before the wipe
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.debug.reseed(0)
|
2020-01-07 11:34:38 +00:00
|
|
|
|
2020-02-17 16:35:46 +00:00
|
|
|
if sd_marker:
|
|
|
|
should_format = sd_marker.kwargs.get("formatted", True)
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.debug.erase_sd_card(format=should_format)
|
2020-02-17 16:35:46 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
wipe_device(_raw_client)
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2023-08-11 15:57:32 +00:00
|
|
|
# Load language again, as it got erased in wipe
|
2024-03-11 15:20:08 +00:00
|
|
|
if _raw_client.model is not models.T1B1:
|
2023-08-11 15:57:32 +00:00
|
|
|
lang = request.session.config.getoption("lang") or "en"
|
|
|
|
assert isinstance(lang, str)
|
|
|
|
if lang != "en":
|
|
|
|
translations.set_language(_raw_client, lang)
|
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
setup_params = dict(
|
|
|
|
uninitialized=False,
|
|
|
|
mnemonic=" ".join(["all"] * 12),
|
|
|
|
pin=None,
|
|
|
|
passphrase=False,
|
2019-11-13 11:47:51 +00:00
|
|
|
needs_backup=False,
|
|
|
|
no_backup=False,
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
marker = request.node.get_closest_marker("setup_client")
|
|
|
|
if marker:
|
|
|
|
setup_params.update(marker.kwargs)
|
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
use_passphrase = setup_params["passphrase"] is True or isinstance(
|
|
|
|
setup_params["passphrase"], str
|
|
|
|
)
|
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
if not setup_params["uninitialized"]:
|
2019-11-13 11:47:51 +00:00
|
|
|
debuglink.load_device(
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client,
|
2023-01-27 14:13:12 +00:00
|
|
|
mnemonic=setup_params["mnemonic"], # type: ignore
|
|
|
|
pin=setup_params["pin"], # type: ignore
|
2020-08-28 19:12:25 +00:00
|
|
|
passphrase_protection=use_passphrase,
|
2019-08-26 16:26:57 +00:00
|
|
|
label="test",
|
2023-01-27 14:13:12 +00:00
|
|
|
needs_backup=setup_params["needs_backup"], # type: ignore
|
|
|
|
no_backup=setup_params["no_backup"], # type: ignore
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
2018-08-13 16:21:24 +00:00
|
|
|
|
2022-11-02 09:45:11 +00:00
|
|
|
if request.node.get_closest_marker("experimental"):
|
2021-07-30 11:59:16 +00:00
|
|
|
apply_settings(_raw_client, experimental_features=True)
|
2020-10-04 21:46:54 +00:00
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
if use_passphrase and isinstance(setup_params["passphrase"], str):
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.use_passphrase(setup_params["passphrase"])
|
2020-08-28 19:12:25 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.clear_session()
|
2019-11-13 11:47:51 +00:00
|
|
|
|
2023-01-24 12:24:45 +00:00
|
|
|
with ui_tests.screen_recording(_raw_client, request):
|
2021-07-30 11:59:16 +00:00
|
|
|
yield _raw_client
|
2019-12-09 16:01:04 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.close()
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2022-09-07 11:53:25 +00:00
|
|
|
def _is_main_runner(session_or_request: pytest.Session | pytest.FixtureRequest) -> bool:
|
|
|
|
"""Return True if the current process is the main test runner.
|
|
|
|
|
|
|
|
In case tests are run in parallel, the main runner is the xdist controller.
|
|
|
|
We cannot use `is_xdist_controller` directly because it is False when xdist is
|
|
|
|
not used.
|
|
|
|
"""
|
|
|
|
return xdist.get_xdist_worker_id(session_or_request) == "master"
|
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_sessionstart(session: pytest.Session) -> None:
|
2023-01-27 14:13:12 +00:00
|
|
|
if session.config.getoption("ui"):
|
|
|
|
ui_tests.setup(main_runner=_is_main_runner(session))
|
2020-02-24 14:38:02 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -> None:
|
2022-05-25 12:54:03 +00:00
|
|
|
test_ui = session.config.getoption("ui")
|
2023-01-27 14:13:12 +00:00
|
|
|
if test_ui and _is_main_runner(session):
|
|
|
|
session.exitstatus = ui_tests.sessionfinish(
|
|
|
|
exitstatus,
|
|
|
|
test_ui, # type: ignore
|
|
|
|
bool(session.config.getoption("ui_check_missing")),
|
2023-01-18 10:57:32 +00:00
|
|
|
bool(session.config.getoption("record_text_layout")),
|
2023-06-27 12:36:11 +00:00
|
|
|
bool(session.config.getoption("do_master_diff")),
|
2023-01-27 14:13:12 +00:00
|
|
|
)
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_terminal_summary(
|
|
|
|
terminalreporter: "TerminalReporter", exitstatus: pytest.ExitCode, config: "Config"
|
|
|
|
) -> None:
|
2020-04-28 08:39:39 +00:00
|
|
|
println = terminalreporter.write_line
|
|
|
|
println("")
|
2020-02-21 14:47:25 +00:00
|
|
|
|
|
|
|
ui_option = config.getoption("ui")
|
2023-01-27 14:13:12 +00:00
|
|
|
if ui_option:
|
|
|
|
ui_tests.terminal_summary(
|
|
|
|
terminalreporter.write_line,
|
|
|
|
ui_option, # type: ignore
|
|
|
|
bool(config.getoption("ui_check_missing")),
|
|
|
|
exitstatus,
|
2022-05-25 12:54:03 +00:00
|
|
|
)
|
2023-01-02 16:57:07 +00:00
|
|
|
|
2020-01-09 14:25:45 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_addoption(parser: "Parser") -> None:
|
2019-12-09 16:01:04 +00:00
|
|
|
parser.addoption(
|
2020-01-07 09:13:08 +00:00
|
|
|
"--ui",
|
2019-12-09 16:01:04 +00:00
|
|
|
action="store",
|
2020-05-19 08:00:37 +00:00
|
|
|
choices=["test", "record"],
|
2022-08-18 15:21:27 +00:00
|
|
|
help="Enable UI integration tests: 'record' or 'test'",
|
2019-12-09 16:01:04 +00:00
|
|
|
)
|
2020-02-17 14:38:26 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--ui-check-missing",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Check UI fixtures are containing the appropriate test cases (fails on `test`,"
|
|
|
|
"deletes old ones on `record`).",
|
|
|
|
)
|
2022-08-18 15:21:27 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--control-emulators",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Pytest will be responsible for starting and stopping the emulators. "
|
|
|
|
"Useful when running tests in parallel.",
|
|
|
|
)
|
|
|
|
parser.addoption(
|
|
|
|
"--model",
|
|
|
|
action="store",
|
|
|
|
choices=["core", "legacy"],
|
|
|
|
help="Which emulator to use: 'core' or 'legacy'. "
|
|
|
|
"Only valid in connection with `--control-emulators`",
|
|
|
|
)
|
2023-01-18 10:57:32 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--record-text-layout",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Saving debugging traces for each screen change. "
|
2023-06-27 12:36:11 +00:00
|
|
|
"Will generate a report with text from all test-cases.",
|
|
|
|
)
|
|
|
|
parser.addoption(
|
|
|
|
"--do-master-diff",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Generating a master-diff report. "
|
|
|
|
"This shows all unique differing screens compared to master.",
|
2023-01-18 10:57:32 +00:00
|
|
|
)
|
2023-08-11 15:57:32 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--lang",
|
|
|
|
action="store",
|
|
|
|
choices=translations.LANGUAGES,
|
|
|
|
help="Run tests with a specified language: 'en' is the default",
|
|
|
|
)
|
2019-12-09 16:01:04 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_configure(config: "Config") -> None:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called at testsuite setup time.
|
2019-05-27 13:42:41 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Registers known markers, enables verbose output if requested.
|
|
|
|
"""
|
2019-05-27 13:42:41 +00:00
|
|
|
# register known markers
|
2024-03-11 15:20:08 +00:00
|
|
|
config.addinivalue_line("markers", "skip_t1b1: skip the test on Trezor One")
|
|
|
|
config.addinivalue_line("markers", "skip_t2t1: skip the test on Trezor T")
|
|
|
|
config.addinivalue_line("markers", "skip_t2b1: skip the test on Trezor T2B1")
|
2024-03-09 21:05:05 +00:00
|
|
|
config.addinivalue_line("markers", "skip_t3t1: skip the test on Trezor T3T1")
|
2022-11-02 09:45:11 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers", "experimental: enable experimental features on Trezor"
|
|
|
|
)
|
2019-08-26 16:26:57 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers",
|
|
|
|
'setup_client(mnemonic="all all all...", pin=None, passphrase=False, uninitialized=False): configure the client instance',
|
|
|
|
)
|
2019-05-27 13:42:41 +00:00
|
|
|
with open(os.path.join(os.path.dirname(__file__), "REGISTERED_MARKERS")) as f:
|
|
|
|
for line in f:
|
|
|
|
config.addinivalue_line("markers", line.strip())
|
|
|
|
|
|
|
|
# enable debug
|
2018-08-13 16:21:24 +00:00
|
|
|
if config.getoption("verbose"):
|
2018-05-09 16:12:31 +00:00
|
|
|
log.enable_debug_output()
|
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_runtest_setup(item: pytest.Item) -> None:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called for each test item (class, individual tests).
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Ensures that altcoin tests are skipped, and that no test is skipped on
|
|
|
|
both T1 and TT.
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
"""
|
2023-05-12 09:19:35 +00:00
|
|
|
if all(
|
2024-03-11 15:20:08 +00:00
|
|
|
item.get_closest_marker(marker)
|
|
|
|
for marker in ("skip_t1b1", "skip_t2t1", "skip_t2b1")
|
2023-05-12 09:19:35 +00:00
|
|
|
):
|
|
|
|
raise RuntimeError("Don't skip tests for all trezor models!")
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-08-26 16:25:22 +00:00
|
|
|
skip_altcoins = int(os.environ.get("TREZOR_PYTEST_SKIP_ALTCOINS", 0))
|
|
|
|
if item.get_closest_marker("altcoin") and skip_altcoins:
|
2019-08-22 18:18:44 +00:00
|
|
|
pytest.skip("Skipping altcoin test")
|
2019-10-16 15:39:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
2023-01-27 14:13:12 +00:00
|
|
|
def pytest_runtest_makereport(item: pytest.Item, call) -> Generator:
|
2019-10-16 15:39:06 +00:00
|
|
|
# Make test results available in fixtures.
|
|
|
|
# See https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
|
2019-10-18 11:35:55 +00:00
|
|
|
# The device_handler fixture uses this as 'request.node.rep_call.passed' attribute,
|
|
|
|
# in order to raise error only if the test passed.
|
2019-10-16 15:39:06 +00:00
|
|
|
outcome = yield
|
|
|
|
rep = outcome.get_result()
|
|
|
|
setattr(item, f"rep_{rep.when}", rep)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2023-01-27 14:13:12 +00:00
|
|
|
def device_handler(client: Client, request: pytest.FixtureRequest) -> Generator:
|
2019-10-16 15:39:06 +00:00
|
|
|
device_handler = BackgroundDeviceHandler(client)
|
|
|
|
yield device_handler
|
|
|
|
|
2022-05-25 12:54:03 +00:00
|
|
|
# get call test result
|
2023-01-27 14:13:12 +00:00
|
|
|
test_res = ui_tests.common.get_last_call_test_result(request)
|
2022-05-25 12:54:03 +00:00
|
|
|
|
|
|
|
if test_res is None:
|
2020-07-10 09:52:19 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# if test finished, make sure all background tasks are done
|
2019-10-16 15:39:06 +00:00
|
|
|
finalized_ok = device_handler.check_finalize()
|
2022-05-25 12:54:03 +00:00
|
|
|
if test_res and not finalized_ok: # type: ignore [rep_call must exist]
|
2019-10-16 15:39:06 +00:00
|
|
|
raise RuntimeError("Test did not check result of background task")
|