2018-06-21 14:28:34 +00:00
|
|
|
# This file is part of the Trezor project.
|
|
|
|
#
|
2019-05-29 16:44:09 +00:00
|
|
|
# Copyright (C) 2012-2019 SatoshiLabs and contributors
|
2018-06-21 14:28:34 +00:00
|
|
|
#
|
|
|
|
# This library is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Lesser General Public License version 3
|
|
|
|
# as published by the Free Software Foundation.
|
|
|
|
#
|
|
|
|
# This library is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Lesser General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the License along with this library.
|
|
|
|
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
|
|
|
|
|
2022-08-18 15:21:27 +00:00
|
|
|
from __future__ import annotations
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
import os
|
2024-09-02 11:01:16 +00:00
|
|
|
import typing as t
|
2024-07-30 09:28:34 +00:00
|
|
|
from enum import IntEnum
|
2023-03-08 10:38:52 +00:00
|
|
|
from pathlib import Path
|
2024-12-02 14:49:30 +00:00
|
|
|
from time import sleep
|
2018-08-21 14:21:49 +00:00
|
|
|
|
2024-12-02 14:49:30 +00:00
|
|
|
import cryptography
|
2018-04-03 16:55:10 +00:00
|
|
|
import pytest
|
2022-09-07 11:53:25 +00:00
|
|
|
import xdist
|
2024-07-30 09:28:34 +00:00
|
|
|
from _pytest.python import IdMaker
|
2024-03-30 12:55:00 +00:00
|
|
|
from _pytest.reports import TestReport
|
2018-04-03 16:55:10 +00:00
|
|
|
|
2024-03-09 21:05:05 +00:00
|
|
|
from trezorlib import debuglink, log, models
|
2024-12-02 14:49:30 +00:00
|
|
|
from trezorlib.client import ProtocolVersion
|
|
|
|
from trezorlib.debuglink import SessionDebugWrapper
|
2022-01-31 12:25:30 +00:00
|
|
|
from trezorlib.debuglink import TrezorClientDebugLink as Client
|
2023-08-15 15:58:32 +00:00
|
|
|
from trezorlib.device import apply_settings
|
|
|
|
from trezorlib.device import wipe as wipe_device
|
2018-08-21 14:21:49 +00:00
|
|
|
from trezorlib.transport import enumerate_devices, get_transport
|
2024-12-02 14:49:30 +00:00
|
|
|
from trezorlib.transport.thp.protocol_v1 import ProtocolV1
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2023-08-11 15:57:32 +00:00
|
|
|
# register rewrites before importing from local package
|
|
|
|
# so that we see details of failed asserts from this module
|
|
|
|
pytest.register_assert_rewrite("tests.common")
|
|
|
|
|
|
|
|
from . import translations, ui_tests
|
2019-10-18 11:35:55 +00:00
|
|
|
from .device_handler import BackgroundDeviceHandler
|
2022-08-18 15:21:27 +00:00
|
|
|
from .emulators import EmulatorWrapper
|
2019-10-16 15:39:06 +00:00
|
|
|
|
2024-09-02 11:01:16 +00:00
|
|
|
if t.TYPE_CHECKING:
|
2022-01-28 18:26:03 +00:00
|
|
|
from _pytest.config import Config
|
|
|
|
from _pytest.config.argparsing import Parser
|
2024-09-02 11:01:16 +00:00
|
|
|
from _pytest.mark import Mark
|
|
|
|
from _pytest.nodes import Node
|
2022-01-28 18:26:03 +00:00
|
|
|
from _pytest.terminal import TerminalReporter
|
|
|
|
|
2023-08-15 15:58:32 +00:00
|
|
|
from trezorlib._internal.emulator import Emulator
|
|
|
|
|
2023-03-08 10:38:52 +00:00
|
|
|
|
|
|
|
HERE = Path(__file__).resolve().parent
|
2023-08-11 15:57:32 +00:00
|
|
|
CORE = HERE.parent / "core"
|
2022-01-25 17:22:11 +00:00
|
|
|
|
2023-11-21 12:51:59 +00:00
|
|
|
# So that we see details of failed asserts from this module
|
|
|
|
pytest.register_assert_rewrite("tests.common")
|
|
|
|
pytest.register_assert_rewrite("tests.input_flows")
|
|
|
|
pytest.register_assert_rewrite("tests.input_flows_helpers")
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2023-03-30 10:31:09 +00:00
|
|
|
def _emulator_wrapper_main_args() -> list[str]:
|
|
|
|
"""Look at TREZOR_PROFILING env variable, so that we can generate coverage reports."""
|
|
|
|
do_profiling = os.environ.get("TREZOR_PROFILING") == "1"
|
|
|
|
if do_profiling:
|
|
|
|
core_dir = HERE.parent / "core"
|
|
|
|
profiling_wrapper = core_dir / "prof" / "prof.py"
|
|
|
|
# So that the coverage reports have the correct paths
|
|
|
|
os.environ["TREZOR_SRC"] = str(core_dir / "src")
|
|
|
|
return [str(profiling_wrapper)]
|
|
|
|
else:
|
|
|
|
return ["-m", "main"]
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2024-09-02 11:01:16 +00:00
|
|
|
def core_emulator(request: pytest.FixtureRequest) -> t.Iterator[Emulator]:
|
2023-03-30 10:31:09 +00:00
|
|
|
"""Fixture returning default core emulator with possibility of screen recording."""
|
|
|
|
with EmulatorWrapper("core", main_args=_emulator_wrapper_main_args()) as emu:
|
|
|
|
# Modifying emu.client to add screen recording (when --ui=test is used)
|
|
|
|
with ui_tests.screen_recording(emu.client, request) as _:
|
|
|
|
yield emu
|
|
|
|
|
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
@pytest.fixture(scope="session")
|
2024-09-02 11:01:16 +00:00
|
|
|
def emulator(request: pytest.FixtureRequest) -> t.Generator["Emulator", None, None]:
|
2022-08-18 15:21:27 +00:00
|
|
|
"""Fixture for getting emulator connection in case tests should operate it on their own.
|
|
|
|
|
|
|
|
Is responsible for starting it at the start of the session and stopping
|
|
|
|
it at the end of the session - using `with EmulatorWrapper...`.
|
|
|
|
|
|
|
|
Makes sure that each process will run the emulator on a different
|
|
|
|
port and with different profile directory, which is cleaned afterwards.
|
|
|
|
|
|
|
|
Used so that we can run the device tests in parallel using `pytest-xdist` plugin.
|
|
|
|
Docs: https://pypi.org/project/pytest-xdist/
|
|
|
|
|
|
|
|
NOTE for parallel tests:
|
|
|
|
So that all worker processes will explore the tests in the exact same order,
|
|
|
|
we cannot use the "built-in" random order, we need to specify our own,
|
|
|
|
so that all the processes share the same order.
|
|
|
|
Done by appending `--random-order-seed=$RANDOM` as a `pytest` argument,
|
|
|
|
using system RNG.
|
|
|
|
"""
|
|
|
|
|
|
|
|
model = str(request.session.config.getoption("model"))
|
|
|
|
interact = os.environ.get("INTERACT") == "1"
|
|
|
|
|
|
|
|
assert model in ("core", "legacy")
|
|
|
|
if model == "legacy":
|
|
|
|
raise RuntimeError(
|
|
|
|
"Legacy emulator is not supported until it can be run on arbitrary ports."
|
|
|
|
)
|
|
|
|
|
|
|
|
def _get_port() -> int:
|
|
|
|
"""Get a unique port for this worker process on which it can run.
|
|
|
|
|
|
|
|
Guarantees to be unique because each worker has a different name.
|
|
|
|
gw0=>20000, gw1=>20003, gw2=>20006, etc.
|
|
|
|
"""
|
2022-09-07 11:53:25 +00:00
|
|
|
worker_id = xdist.get_xdist_worker_id(request)
|
2022-08-18 15:21:27 +00:00
|
|
|
assert worker_id.startswith("gw")
|
|
|
|
# One emulator instance occupies 3 consecutive ports:
|
|
|
|
# 1. normal link, 2. debug link and 3. webauthn fake interface
|
|
|
|
return 20000 + int(worker_id[2:]) * 3
|
|
|
|
|
|
|
|
with EmulatorWrapper(
|
2023-03-08 10:38:52 +00:00
|
|
|
model,
|
|
|
|
port=_get_port(),
|
|
|
|
headless=True,
|
|
|
|
auto_interact=not interact,
|
2023-03-30 10:31:09 +00:00
|
|
|
main_args=_emulator_wrapper_main_args(),
|
2022-08-18 15:21:27 +00:00
|
|
|
) as emu:
|
|
|
|
yield emu
|
|
|
|
|
2019-08-06 13:49:56 +00:00
|
|
|
|
2022-08-18 15:21:27 +00:00
|
|
|
@pytest.fixture(scope="session")
|
|
|
|
def _raw_client(request: pytest.FixtureRequest) -> Client:
|
2024-12-02 14:49:30 +00:00
|
|
|
return _get_raw_client(request)
|
|
|
|
|
|
|
|
|
|
|
|
def _get_raw_client(request: pytest.FixtureRequest) -> Client:
|
2022-08-18 15:21:27 +00:00
|
|
|
# In case tests run in parallel, each process has its own emulator/client.
|
|
|
|
# Requesting the emulator fixture only if relevant.
|
|
|
|
if request.session.config.getoption("control_emulators"):
|
|
|
|
emu_fixture = request.getfixturevalue("emulator")
|
2023-08-11 15:57:32 +00:00
|
|
|
client = emu_fixture.client
|
2018-08-21 14:06:18 +00:00
|
|
|
else:
|
2022-08-18 15:21:27 +00:00
|
|
|
interact = os.environ.get("INTERACT") == "1"
|
|
|
|
path = os.environ.get("TREZOR_PATH")
|
|
|
|
if path:
|
2023-08-11 15:57:32 +00:00
|
|
|
client = _client_from_path(request, path, interact)
|
2022-08-18 15:21:27 +00:00
|
|
|
else:
|
2023-08-11 15:57:32 +00:00
|
|
|
client = _find_client(request, interact)
|
|
|
|
|
|
|
|
return client
|
2022-08-18 15:21:27 +00:00
|
|
|
|
2021-10-13 13:45:53 +00:00
|
|
|
|
2022-08-18 15:21:27 +00:00
|
|
|
def _client_from_path(
|
|
|
|
request: pytest.FixtureRequest, path: str, interact: bool
|
|
|
|
) -> Client:
|
|
|
|
try:
|
|
|
|
transport = get_transport(path)
|
|
|
|
return Client(transport, auto_interact=not interact)
|
|
|
|
except Exception as e:
|
2021-10-13 13:45:53 +00:00
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
2022-08-18 15:21:27 +00:00
|
|
|
raise RuntimeError(f"Failed to open debuglink for {path}") from e
|
|
|
|
|
|
|
|
|
|
|
|
def _find_client(request: pytest.FixtureRequest, interact: bool) -> Client:
|
|
|
|
devices = enumerate_devices()
|
|
|
|
for device in devices:
|
|
|
|
try:
|
|
|
|
return Client(device, auto_interact=not interact)
|
|
|
|
except Exception:
|
|
|
|
pass
|
|
|
|
|
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
|
|
|
raise RuntimeError("No debuggable device found")
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2024-09-02 11:01:16 +00:00
|
|
|
class ModelsFilter:
|
|
|
|
MODEL_SHORTCUTS = {
|
|
|
|
"core": models.TREZORS - {models.T1B1},
|
|
|
|
"legacy": {models.T1B1},
|
|
|
|
"t1": {models.T1B1},
|
|
|
|
"t2": {models.T2T1},
|
|
|
|
"tt": {models.T2T1},
|
|
|
|
"safe": {models.T2B1, models.T3T1, models.T3B1},
|
|
|
|
"safe3": {models.T2B1, models.T3B1},
|
|
|
|
"safe5": {models.T3T1},
|
|
|
|
"mercury": {models.T3T1},
|
|
|
|
}
|
|
|
|
|
|
|
|
def __init__(self, node: Node) -> None:
|
|
|
|
markers = node.iter_markers("models")
|
|
|
|
self.models = set(models.TREZORS)
|
|
|
|
for marker in markers:
|
|
|
|
self._refine_by_marker(marker)
|
|
|
|
|
|
|
|
def __contains__(self, model: models.TrezorModel) -> bool:
|
|
|
|
return model in self.models
|
|
|
|
|
|
|
|
def __bool__(self) -> bool:
|
|
|
|
return bool(self.models)
|
|
|
|
|
|
|
|
def _refine_by_marker(self, marker: Mark) -> None:
|
|
|
|
"""Apply the marker selector to the current models selection."""
|
|
|
|
if marker.args:
|
|
|
|
self.models &= self._set_from_marker_list(marker.args)
|
|
|
|
if "skip" in marker.kwargs:
|
|
|
|
self.models -= self._set_from_marker_list(marker.kwargs["skip"])
|
|
|
|
|
|
|
|
@classmethod
|
|
|
|
def _set_from_marker_list(
|
|
|
|
cls, marker_list: str | t.Sequence[str] | t.Sequence[models.TrezorModel]
|
|
|
|
) -> set[models.TrezorModel]:
|
|
|
|
"""Given either a possible value of pytest.mark.models positional args,
|
|
|
|
or a value of the `skip` kwarg, return a set of models specified by that value.
|
|
|
|
"""
|
|
|
|
if not marker_list:
|
|
|
|
raise ValueError("No models specified")
|
|
|
|
|
|
|
|
if isinstance(marker_list[0], models.TrezorModel):
|
|
|
|
# raw list of TrezorModels
|
|
|
|
return set(marker_list) # type: ignore [incompatible with return type]
|
|
|
|
|
|
|
|
if len(marker_list) == 1:
|
|
|
|
# @pytest.mark.models("t2t1,t2b1") -> ("t2t1,t2b1",) -> "t2t1,t2b1"
|
|
|
|
marker_list = marker_list[0]
|
|
|
|
|
|
|
|
if isinstance(marker_list, str):
|
|
|
|
# either a single model / shortcut, or a comma-separated text list
|
|
|
|
# @pytest.mark.models("t2t1,t2b1") -> "t2t1,t2b1" -> ["t2t1", "t2b1"]
|
|
|
|
marker_list = [s.strip() for s in marker_list.split(",")]
|
|
|
|
|
|
|
|
selected_models = set()
|
|
|
|
for marker in marker_list:
|
|
|
|
assert isinstance(marker, str)
|
|
|
|
if marker in cls.MODEL_SHORTCUTS:
|
|
|
|
selected_models |= cls.MODEL_SHORTCUTS[marker]
|
|
|
|
elif (model := models.by_internal_name(marker.upper())) is not None:
|
|
|
|
selected_models.add(model)
|
|
|
|
else:
|
|
|
|
raise ValueError(f"Unknown model: {marker}")
|
|
|
|
|
|
|
|
return selected_models
|
|
|
|
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
@pytest.fixture(scope="function")
|
2024-12-05 13:18:41 +00:00
|
|
|
def _client_unlocked(
|
2022-02-08 15:37:34 +00:00
|
|
|
request: pytest.FixtureRequest, _raw_client: Client
|
2024-09-02 11:01:16 +00:00
|
|
|
) -> t.Generator[Client, None, None]:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Client fixture.
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Every test function that requires a client instance will get it from here.
|
|
|
|
If we can't connect to a debuggable device, the test will fail.
|
2024-03-11 15:20:08 +00:00
|
|
|
If 'skip_t2t1' is used and TT is connected, the test is skipped. Vice versa with T1
|
|
|
|
and 'skip_t1b1'. Same with T2B1, T3T1.
|
2019-09-12 10:25:13 +00:00
|
|
|
|
|
|
|
The client instance is wiped and preconfigured with "all all all..." mnemonic, no
|
|
|
|
password and no pin. It is possible to customize this with the `setup_client`
|
|
|
|
marker.
|
|
|
|
|
|
|
|
To specify a custom mnemonic and/or custom pin and/or enable passphrase:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(mnemonic=MY_MNEMONIC, pin="9999", passphrase=True)
|
|
|
|
|
|
|
|
To receive a client instance that was not initialized:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(uninitialized=True)
|
2022-11-02 09:45:11 +00:00
|
|
|
|
|
|
|
To enable experimental features:
|
|
|
|
|
|
|
|
@pytest.mark.experimental
|
2019-09-12 10:25:13 +00:00
|
|
|
"""
|
2024-09-02 11:01:16 +00:00
|
|
|
models_filter = ModelsFilter(request.node)
|
|
|
|
if _raw_client.model not in models_filter:
|
|
|
|
pytest.skip(f"Skipping test for model {_raw_client.model.internal_name}")
|
2019-09-12 10:25:13 +00:00
|
|
|
|
2024-12-02 14:49:30 +00:00
|
|
|
protocol_marker: Mark | None = request.node.get_closest_marker("protocol")
|
|
|
|
if protocol_marker:
|
|
|
|
args = protocol_marker.args
|
|
|
|
protocol_version = _raw_client.protocol_version
|
|
|
|
|
|
|
|
if (
|
|
|
|
protocol_version == ProtocolVersion.PROTOCOL_V1
|
|
|
|
and "protocol_v1" not in args
|
|
|
|
):
|
2024-12-04 15:20:22 +00:00
|
|
|
pytest.skip(
|
2024-12-02 14:49:30 +00:00
|
|
|
f"Skipping test for device/emulator with protocol_v{protocol_version} - the protocol is not supported."
|
|
|
|
)
|
|
|
|
|
|
|
|
if (
|
|
|
|
protocol_version == ProtocolVersion.PROTOCOL_V2
|
|
|
|
and "protocol_v2" not in args
|
|
|
|
):
|
2024-12-04 15:20:22 +00:00
|
|
|
pytest.skip(
|
2024-12-02 14:49:30 +00:00
|
|
|
f"Skipping test for device/emulator with protocol_v{protocol_version} - the protocol is not supported."
|
|
|
|
)
|
|
|
|
|
|
|
|
if _raw_client.protocol_version is ProtocolVersion.PROTOCOL_V2:
|
|
|
|
pass
|
2020-02-17 16:35:46 +00:00
|
|
|
sd_marker = request.node.get_closest_marker("sd_card")
|
2021-07-30 11:59:16 +00:00
|
|
|
if sd_marker and not _raw_client.features.sd_card_present:
|
2019-10-21 09:46:32 +00:00
|
|
|
raise RuntimeError(
|
|
|
|
"This test requires SD card.\n"
|
|
|
|
"To skip all such tests, run:\n"
|
|
|
|
" pytest -m 'not sd_card' <test path>"
|
|
|
|
)
|
|
|
|
|
2020-01-07 11:34:38 +00:00
|
|
|
test_ui = request.config.getoption("ui")
|
|
|
|
|
2024-12-02 14:49:30 +00:00
|
|
|
_raw_client.reset_debug_features(new_management_session=True)
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.open()
|
2024-12-02 14:49:30 +00:00
|
|
|
if isinstance(_raw_client.protocol, ProtocolV1):
|
|
|
|
try:
|
|
|
|
_raw_client.sync_responses()
|
|
|
|
# TODO _raw_client.init_device()
|
|
|
|
except Exception:
|
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
|
|
|
pytest.fail("Failed to communicate with Trezor")
|
2021-07-30 11:59:16 +00:00
|
|
|
|
2023-05-04 12:23:33 +00:00
|
|
|
# Resetting all the debug events to not be influenced by previous test
|
|
|
|
_raw_client.debug.reset_debug_events()
|
|
|
|
|
2022-02-08 12:45:15 +00:00
|
|
|
if test_ui:
|
2020-01-07 11:34:38 +00:00
|
|
|
# we need to reseed before the wipe
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.debug.reseed(0)
|
2020-01-07 11:34:38 +00:00
|
|
|
|
2020-02-17 16:35:46 +00:00
|
|
|
if sd_marker:
|
|
|
|
should_format = sd_marker.kwargs.get("formatted", True)
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.debug.erase_sd_card(format=should_format)
|
2020-02-17 16:35:46 +00:00
|
|
|
|
2024-12-02 14:49:30 +00:00
|
|
|
while True:
|
|
|
|
try:
|
|
|
|
session = _raw_client.get_management_session()
|
|
|
|
wipe_device(session)
|
|
|
|
sleep(1.5) # Makes tests more stable (wait for wipe to finish)
|
|
|
|
break
|
|
|
|
except cryptography.exceptions.InvalidTag:
|
|
|
|
# Get a new client
|
|
|
|
_raw_client = _get_raw_client(request)
|
|
|
|
|
|
|
|
from trezorlib.transport.thp.channel_database import get_channel_db
|
|
|
|
|
|
|
|
get_channel_db().clear_stored_channels()
|
|
|
|
_raw_client.protocol = None
|
|
|
|
_raw_client.__init__(
|
|
|
|
transport=_raw_client.transport,
|
|
|
|
auto_interact=_raw_client.debug.allow_interactions,
|
|
|
|
)
|
|
|
|
if not _raw_client.features.bootloader_mode:
|
|
|
|
_raw_client.refresh_features()
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2023-08-11 15:57:32 +00:00
|
|
|
# Load language again, as it got erased in wipe
|
2024-03-11 15:20:08 +00:00
|
|
|
if _raw_client.model is not models.T1B1:
|
2023-08-11 15:57:32 +00:00
|
|
|
lang = request.session.config.getoption("lang") or "en"
|
|
|
|
assert isinstance(lang, str)
|
2024-12-02 14:49:30 +00:00
|
|
|
translations.set_language(
|
|
|
|
SessionDebugWrapper(_raw_client.get_management_session()), lang
|
|
|
|
)
|
2023-08-11 15:57:32 +00:00
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
setup_params = dict(
|
|
|
|
uninitialized=False,
|
|
|
|
mnemonic=" ".join(["all"] * 12),
|
|
|
|
pin=None,
|
|
|
|
passphrase=False,
|
2019-11-13 11:47:51 +00:00
|
|
|
needs_backup=False,
|
|
|
|
no_backup=False,
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
marker = request.node.get_closest_marker("setup_client")
|
|
|
|
if marker:
|
|
|
|
setup_params.update(marker.kwargs)
|
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
use_passphrase = setup_params["passphrase"] is True or isinstance(
|
|
|
|
setup_params["passphrase"], str
|
|
|
|
)
|
2019-08-26 16:26:57 +00:00
|
|
|
if not setup_params["uninitialized"]:
|
2024-12-02 14:49:30 +00:00
|
|
|
session = _raw_client.get_management_session(new_session=True)
|
2019-11-13 11:47:51 +00:00
|
|
|
debuglink.load_device(
|
2024-12-02 14:49:30 +00:00
|
|
|
session,
|
2023-01-27 14:13:12 +00:00
|
|
|
mnemonic=setup_params["mnemonic"], # type: ignore
|
|
|
|
pin=setup_params["pin"], # type: ignore
|
2020-08-28 19:12:25 +00:00
|
|
|
passphrase_protection=use_passphrase,
|
2019-08-26 16:26:57 +00:00
|
|
|
label="test",
|
2023-01-27 14:13:12 +00:00
|
|
|
needs_backup=setup_params["needs_backup"], # type: ignore
|
|
|
|
no_backup=setup_params["no_backup"], # type: ignore
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
2024-12-05 11:55:02 +00:00
|
|
|
_raw_client._setup_pin = setup_params["pin"]
|
2018-08-13 16:21:24 +00:00
|
|
|
|
2022-11-02 09:45:11 +00:00
|
|
|
if request.node.get_closest_marker("experimental"):
|
2024-12-02 14:49:30 +00:00
|
|
|
apply_settings(session, experimental_features=True)
|
2020-10-04 21:46:54 +00:00
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
if use_passphrase and isinstance(setup_params["passphrase"], str):
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.use_passphrase(setup_params["passphrase"])
|
2020-08-28 19:12:25 +00:00
|
|
|
|
2024-12-02 14:49:30 +00:00
|
|
|
# TODO _raw_client.clear_session()
|
2019-11-13 11:47:51 +00:00
|
|
|
|
2024-12-05 13:18:41 +00:00
|
|
|
yield _raw_client
|
2019-12-09 16:01:04 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.close()
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2024-12-05 13:18:41 +00:00
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
def client(
|
|
|
|
request: pytest.FixtureRequest, _client_unlocked: Client
|
|
|
|
) -> t.Generator[Client, None, None]:
|
|
|
|
_client_unlocked.lock()
|
|
|
|
with ui_tests.screen_recording(_client_unlocked, request):
|
|
|
|
yield _client_unlocked
|
|
|
|
|
|
|
|
|
2024-12-02 14:49:30 +00:00
|
|
|
@pytest.fixture(scope="function")
|
|
|
|
def session(
|
2024-12-05 13:18:41 +00:00
|
|
|
request: pytest.FixtureRequest, _client_unlocked: Client
|
2024-12-02 14:49:30 +00:00
|
|
|
) -> t.Generator[SessionDebugWrapper, None, None]:
|
|
|
|
if bool(request.node.get_closest_marker("uninitialized_session")):
|
2024-12-05 13:18:41 +00:00
|
|
|
session = _client_unlocked.get_management_session()
|
2024-12-02 14:49:30 +00:00
|
|
|
else:
|
|
|
|
derive_cardano = bool(request.node.get_closest_marker("cardano"))
|
2024-12-05 13:18:41 +00:00
|
|
|
passphrase = _client_unlocked.passphrase or ""
|
|
|
|
if _client_unlocked._setup_pin is not None:
|
|
|
|
_client_unlocked.use_pin_sequence([_client_unlocked._setup_pin])
|
|
|
|
session = _client_unlocked.get_session(
|
2024-12-02 14:49:30 +00:00
|
|
|
derive_cardano=derive_cardano, passphrase=passphrase
|
|
|
|
)
|
|
|
|
try:
|
|
|
|
wrapped_session = SessionDebugWrapper(session)
|
2024-12-05 13:18:41 +00:00
|
|
|
if _client_unlocked._setup_pin is not None:
|
2024-12-02 14:49:30 +00:00
|
|
|
wrapped_session.lock()
|
2024-12-05 13:18:41 +00:00
|
|
|
with ui_tests.screen_recording(_client_unlocked, request):
|
|
|
|
yield wrapped_session
|
2024-12-02 14:49:30 +00:00
|
|
|
finally:
|
|
|
|
pass
|
|
|
|
# TODO
|
|
|
|
# session.end()
|
|
|
|
|
|
|
|
|
2022-09-07 11:53:25 +00:00
|
|
|
def _is_main_runner(session_or_request: pytest.Session | pytest.FixtureRequest) -> bool:
|
|
|
|
"""Return True if the current process is the main test runner.
|
|
|
|
|
|
|
|
In case tests are run in parallel, the main runner is the xdist controller.
|
|
|
|
We cannot use `is_xdist_controller` directly because it is False when xdist is
|
|
|
|
not used.
|
|
|
|
"""
|
|
|
|
return xdist.get_xdist_worker_id(session_or_request) == "master"
|
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_sessionstart(session: pytest.Session) -> None:
|
2023-01-27 14:13:12 +00:00
|
|
|
if session.config.getoption("ui"):
|
|
|
|
ui_tests.setup(main_runner=_is_main_runner(session))
|
2020-02-24 14:38:02 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -> None:
|
2022-05-25 12:54:03 +00:00
|
|
|
test_ui = session.config.getoption("ui")
|
2023-01-27 14:13:12 +00:00
|
|
|
if test_ui and _is_main_runner(session):
|
|
|
|
session.exitstatus = ui_tests.sessionfinish(
|
|
|
|
exitstatus,
|
|
|
|
test_ui, # type: ignore
|
|
|
|
bool(session.config.getoption("ui_check_missing")),
|
2023-01-18 10:57:32 +00:00
|
|
|
bool(session.config.getoption("record_text_layout")),
|
2023-06-27 12:36:11 +00:00
|
|
|
bool(session.config.getoption("do_master_diff")),
|
2023-01-27 14:13:12 +00:00
|
|
|
)
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_terminal_summary(
|
|
|
|
terminalreporter: "TerminalReporter", exitstatus: pytest.ExitCode, config: "Config"
|
|
|
|
) -> None:
|
2020-04-28 08:39:39 +00:00
|
|
|
println = terminalreporter.write_line
|
|
|
|
println("")
|
2020-02-21 14:47:25 +00:00
|
|
|
|
|
|
|
ui_option = config.getoption("ui")
|
2023-01-27 14:13:12 +00:00
|
|
|
if ui_option:
|
|
|
|
ui_tests.terminal_summary(
|
|
|
|
terminalreporter.write_line,
|
|
|
|
ui_option, # type: ignore
|
|
|
|
bool(config.getoption("ui_check_missing")),
|
|
|
|
exitstatus,
|
2022-05-25 12:54:03 +00:00
|
|
|
)
|
2023-01-02 16:57:07 +00:00
|
|
|
|
2020-01-09 14:25:45 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_addoption(parser: "Parser") -> None:
|
2019-12-09 16:01:04 +00:00
|
|
|
parser.addoption(
|
2020-01-07 09:13:08 +00:00
|
|
|
"--ui",
|
2019-12-09 16:01:04 +00:00
|
|
|
action="store",
|
2020-05-19 08:00:37 +00:00
|
|
|
choices=["test", "record"],
|
2022-08-18 15:21:27 +00:00
|
|
|
help="Enable UI integration tests: 'record' or 'test'",
|
2019-12-09 16:01:04 +00:00
|
|
|
)
|
2020-02-17 14:38:26 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--ui-check-missing",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Check UI fixtures are containing the appropriate test cases (fails on `test`,"
|
|
|
|
"deletes old ones on `record`).",
|
|
|
|
)
|
2022-08-18 15:21:27 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--control-emulators",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Pytest will be responsible for starting and stopping the emulators. "
|
|
|
|
"Useful when running tests in parallel.",
|
|
|
|
)
|
|
|
|
parser.addoption(
|
|
|
|
"--model",
|
|
|
|
action="store",
|
|
|
|
choices=["core", "legacy"],
|
|
|
|
help="Which emulator to use: 'core' or 'legacy'. "
|
|
|
|
"Only valid in connection with `--control-emulators`",
|
|
|
|
)
|
2023-01-18 10:57:32 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--record-text-layout",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Saving debugging traces for each screen change. "
|
2023-06-27 12:36:11 +00:00
|
|
|
"Will generate a report with text from all test-cases.",
|
|
|
|
)
|
|
|
|
parser.addoption(
|
|
|
|
"--do-master-diff",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Generating a master-diff report. "
|
|
|
|
"This shows all unique differing screens compared to master.",
|
2023-01-18 10:57:32 +00:00
|
|
|
)
|
2023-08-11 15:57:32 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--lang",
|
|
|
|
action="store",
|
|
|
|
choices=translations.LANGUAGES,
|
|
|
|
help="Run tests with a specified language: 'en' is the default",
|
|
|
|
)
|
2019-12-09 16:01:04 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_configure(config: "Config") -> None:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called at testsuite setup time.
|
2019-05-27 13:42:41 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Registers known markers, enables verbose output if requested.
|
|
|
|
"""
|
2019-05-27 13:42:41 +00:00
|
|
|
# register known markers
|
2024-09-02 11:01:16 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers",
|
|
|
|
'models("core", "t1b1", ..., skip=[...], reason="..."): select which models or families to run the test on',
|
|
|
|
)
|
2022-11-02 09:45:11 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers", "experimental: enable experimental features on Trezor"
|
|
|
|
)
|
2019-08-26 16:26:57 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers",
|
|
|
|
'setup_client(mnemonic="all all all...", pin=None, passphrase=False, uninitialized=False): configure the client instance',
|
|
|
|
)
|
2024-12-02 14:49:30 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers",
|
|
|
|
"uninitialized_session: use uninitialized session instance",
|
|
|
|
)
|
2019-05-27 13:42:41 +00:00
|
|
|
with open(os.path.join(os.path.dirname(__file__), "REGISTERED_MARKERS")) as f:
|
|
|
|
for line in f:
|
|
|
|
config.addinivalue_line("markers", line.strip())
|
|
|
|
|
|
|
|
# enable debug
|
2018-08-13 16:21:24 +00:00
|
|
|
if config.getoption("verbose"):
|
2018-05-09 16:12:31 +00:00
|
|
|
log.enable_debug_output()
|
|
|
|
|
2024-07-30 09:28:34 +00:00
|
|
|
idval_orig = IdMaker._idval_from_value
|
|
|
|
|
|
|
|
def idval_from_value(self: IdMaker, val: object) -> str | None:
|
|
|
|
if isinstance(val, IntEnum):
|
|
|
|
return f"{type(val).__name__}.{val.name}"
|
|
|
|
return idval_orig(self, val)
|
|
|
|
|
|
|
|
IdMaker._idval_from_value = idval_from_value
|
|
|
|
|
2018-05-09 16:12:31 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_runtest_setup(item: pytest.Item) -> None:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called for each test item (class, individual tests).
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2024-05-09 16:01:51 +00:00
|
|
|
Ensures that altcoin tests are skipped, and that no test is skipped for all models.
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
"""
|
2024-09-02 11:01:16 +00:00
|
|
|
models_filter = ModelsFilter(item)
|
|
|
|
if not models_filter:
|
2023-05-12 09:19:35 +00:00
|
|
|
raise RuntimeError("Don't skip tests for all trezor models!")
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-08-26 16:25:22 +00:00
|
|
|
skip_altcoins = int(os.environ.get("TREZOR_PYTEST_SKIP_ALTCOINS", 0))
|
|
|
|
if item.get_closest_marker("altcoin") and skip_altcoins:
|
2019-08-22 18:18:44 +00:00
|
|
|
pytest.skip("Skipping altcoin test")
|
2019-10-16 15:39:06 +00:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
2024-09-02 11:01:16 +00:00
|
|
|
def pytest_runtest_makereport(item: pytest.Item, call) -> t.Generator:
|
2019-10-16 15:39:06 +00:00
|
|
|
# Make test results available in fixtures.
|
|
|
|
# See https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
|
2019-10-18 11:35:55 +00:00
|
|
|
# The device_handler fixture uses this as 'request.node.rep_call.passed' attribute,
|
|
|
|
# in order to raise error only if the test passed.
|
2019-10-16 15:39:06 +00:00
|
|
|
outcome = yield
|
|
|
|
rep = outcome.get_result()
|
|
|
|
setattr(item, f"rep_{rep.when}", rep)
|
|
|
|
|
|
|
|
|
2024-03-30 12:55:00 +00:00
|
|
|
@pytest.hookimpl(tryfirst=True)
|
|
|
|
def pytest_report_teststatus(
|
|
|
|
report: TestReport, config: Config
|
|
|
|
) -> tuple[str, str, tuple[str, dict[str, bool]]] | None:
|
|
|
|
if report.passed:
|
|
|
|
for prop, _ in report.user_properties:
|
|
|
|
if prop == "ui_failed":
|
|
|
|
return "ui_failed", "U", ("UI-FAILED", {"red": True})
|
|
|
|
if prop == "ui_missing":
|
|
|
|
return "ui_missing", "M", ("UI-MISSING", {"yellow": True})
|
|
|
|
# else use default handling
|
|
|
|
return None
|
|
|
|
|
|
|
|
|
2019-10-16 15:39:06 +00:00
|
|
|
@pytest.fixture
|
2024-09-02 11:01:16 +00:00
|
|
|
def device_handler(client: Client, request: pytest.FixtureRequest) -> t.Generator:
|
2019-10-16 15:39:06 +00:00
|
|
|
device_handler = BackgroundDeviceHandler(client)
|
|
|
|
yield device_handler
|
|
|
|
|
2022-05-25 12:54:03 +00:00
|
|
|
# get call test result
|
2023-01-27 14:13:12 +00:00
|
|
|
test_res = ui_tests.common.get_last_call_test_result(request)
|
2022-05-25 12:54:03 +00:00
|
|
|
|
|
|
|
if test_res is None:
|
2020-07-10 09:52:19 +00:00
|
|
|
return
|
|
|
|
|
|
|
|
# if test finished, make sure all background tasks are done
|
2019-10-16 15:39:06 +00:00
|
|
|
finalized_ok = device_handler.check_finalize()
|
2022-05-25 12:54:03 +00:00
|
|
|
if test_res and not finalized_ok: # type: ignore [rep_call must exist]
|
2019-10-16 15:39:06 +00:00
|
|
|
raise RuntimeError("Test did not check result of background task")
|