2018-06-21 14:28:34 +00:00
|
|
|
# This file is part of the Trezor project.
|
|
|
|
#
|
2019-05-29 16:44:09 +00:00
|
|
|
# Copyright (C) 2012-2019 SatoshiLabs and contributors
|
2018-06-21 14:28:34 +00:00
|
|
|
#
|
|
|
|
# This library is free software: you can redistribute it and/or modify
|
|
|
|
# it under the terms of the GNU Lesser General Public License version 3
|
|
|
|
# as published by the Free Software Foundation.
|
|
|
|
#
|
|
|
|
# This library is distributed in the hope that it will be useful,
|
|
|
|
# but WITHOUT ANY WARRANTY; without even the implied warranty of
|
|
|
|
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
|
|
|
|
# GNU Lesser General Public License for more details.
|
|
|
|
#
|
|
|
|
# You should have received a copy of the License along with this library.
|
|
|
|
# If not, see <https://www.gnu.org/licenses/lgpl-3.0.html>.
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
import os
|
2022-02-08 15:37:34 +00:00
|
|
|
from typing import TYPE_CHECKING, Generator
|
2018-08-21 14:21:49 +00:00
|
|
|
|
2018-04-03 16:55:10 +00:00
|
|
|
import pytest
|
|
|
|
|
2018-11-02 15:25:51 +00:00
|
|
|
from trezorlib import debuglink, log
|
2022-01-31 12:25:30 +00:00
|
|
|
from trezorlib.debuglink import TrezorClientDebugLink as Client
|
2020-10-04 21:46:54 +00:00
|
|
|
from trezorlib.device import apply_settings, wipe as wipe_device
|
2018-08-21 14:21:49 +00:00
|
|
|
from trezorlib.transport import enumerate_devices, get_transport
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
from . import ui_tests
|
2019-10-18 11:35:55 +00:00
|
|
|
from .device_handler import BackgroundDeviceHandler
|
2020-05-11 14:23:12 +00:00
|
|
|
from .ui_tests.reporting import testreport
|
2019-10-16 15:39:06 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
if TYPE_CHECKING:
|
|
|
|
from _pytest.config import Config
|
|
|
|
from _pytest.config.argparsing import Parser
|
|
|
|
from _pytest.terminal import TerminalReporter
|
|
|
|
|
2022-01-25 17:22:11 +00:00
|
|
|
# So that we see details of failed asserts from this module
|
|
|
|
pytest.register_assert_rewrite("tests.common")
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
@pytest.fixture(scope="session")
|
2022-01-31 12:25:30 +00:00
|
|
|
def _raw_client(request: pytest.FixtureRequest) -> Client:
|
2018-08-13 16:21:24 +00:00
|
|
|
path = os.environ.get("TREZOR_PATH")
|
2019-08-06 13:49:56 +00:00
|
|
|
interact = int(os.environ.get("INTERACT", 0))
|
2018-08-21 14:06:18 +00:00
|
|
|
if path:
|
2019-08-06 13:49:56 +00:00
|
|
|
try:
|
|
|
|
transport = get_transport(path)
|
2022-01-31 12:25:30 +00:00
|
|
|
return Client(transport, auto_interact=not interact)
|
2020-02-13 15:30:36 +00:00
|
|
|
except Exception as e:
|
2021-07-30 11:59:16 +00:00
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
2021-09-27 10:13:51 +00:00
|
|
|
raise RuntimeError(f"Failed to open debuglink for {path}") from e
|
2019-08-06 13:49:56 +00:00
|
|
|
|
2018-08-21 14:06:18 +00:00
|
|
|
else:
|
|
|
|
devices = enumerate_devices()
|
|
|
|
for device in devices:
|
2019-08-06 13:49:56 +00:00
|
|
|
try:
|
2022-01-31 12:25:30 +00:00
|
|
|
return Client(device, auto_interact=not interact)
|
2019-08-06 13:49:56 +00:00
|
|
|
except Exception:
|
|
|
|
pass
|
2021-10-13 13:45:53 +00:00
|
|
|
|
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
|
|
|
raise RuntimeError("No debuggable device found")
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2018-06-04 16:34:32 +00:00
|
|
|
@pytest.fixture(scope="function")
|
2022-02-08 15:37:34 +00:00
|
|
|
def client(
|
|
|
|
request: pytest.FixtureRequest, _raw_client: Client
|
|
|
|
) -> Generator[Client, None, None]:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Client fixture.
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Every test function that requires a client instance will get it from here.
|
|
|
|
If we can't connect to a debuggable device, the test will fail.
|
|
|
|
If 'skip_t2' is used and TT is connected, the test is skipped. Vice versa with T1
|
|
|
|
and 'skip_t1'.
|
|
|
|
|
|
|
|
The client instance is wiped and preconfigured with "all all all..." mnemonic, no
|
|
|
|
password and no pin. It is possible to customize this with the `setup_client`
|
|
|
|
marker.
|
|
|
|
|
|
|
|
To specify a custom mnemonic and/or custom pin and/or enable passphrase:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(mnemonic=MY_MNEMONIC, pin="9999", passphrase=True)
|
|
|
|
|
|
|
|
To receive a client instance that was not initialized:
|
|
|
|
|
|
|
|
@pytest.mark.setup_client(uninitialized=True)
|
|
|
|
"""
|
2021-07-30 11:59:16 +00:00
|
|
|
if request.node.get_closest_marker("skip_t2") and _raw_client.features.model == "T":
|
2019-09-12 10:25:13 +00:00
|
|
|
pytest.skip("Test excluded on Trezor T")
|
2021-07-30 11:59:16 +00:00
|
|
|
if request.node.get_closest_marker("skip_t1") and _raw_client.features.model == "1":
|
2019-09-12 10:25:13 +00:00
|
|
|
pytest.skip("Test excluded on Trezor 1")
|
|
|
|
|
2020-02-17 16:35:46 +00:00
|
|
|
sd_marker = request.node.get_closest_marker("sd_card")
|
2021-07-30 11:59:16 +00:00
|
|
|
if sd_marker and not _raw_client.features.sd_card_present:
|
2019-10-21 09:46:32 +00:00
|
|
|
raise RuntimeError(
|
|
|
|
"This test requires SD card.\n"
|
|
|
|
"To skip all such tests, run:\n"
|
|
|
|
" pytest -m 'not sd_card' <test path>"
|
|
|
|
)
|
|
|
|
|
2020-01-07 11:34:38 +00:00
|
|
|
test_ui = request.config.getoption("ui")
|
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.reset_debug_features()
|
|
|
|
_raw_client.open()
|
|
|
|
try:
|
|
|
|
_raw_client.init_device()
|
|
|
|
except Exception:
|
|
|
|
request.session.shouldstop = "Failed to communicate with Trezor"
|
|
|
|
pytest.fail("Failed to communicate with Trezor")
|
|
|
|
|
2022-02-08 12:45:15 +00:00
|
|
|
if test_ui:
|
2020-01-07 11:34:38 +00:00
|
|
|
# we need to reseed before the wipe
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.debug.reseed(0)
|
2020-01-07 11:34:38 +00:00
|
|
|
|
2020-02-17 16:35:46 +00:00
|
|
|
if sd_marker:
|
|
|
|
should_format = sd_marker.kwargs.get("formatted", True)
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.debug.erase_sd_card(format=should_format)
|
2020-02-17 16:35:46 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
wipe_device(_raw_client)
|
2018-06-04 16:34:32 +00:00
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
setup_params = dict(
|
|
|
|
uninitialized=False,
|
|
|
|
mnemonic=" ".join(["all"] * 12),
|
|
|
|
pin=None,
|
|
|
|
passphrase=False,
|
2019-11-13 11:47:51 +00:00
|
|
|
needs_backup=False,
|
|
|
|
no_backup=False,
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
|
|
|
|
|
|
|
marker = request.node.get_closest_marker("setup_client")
|
|
|
|
if marker:
|
|
|
|
setup_params.update(marker.kwargs)
|
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
use_passphrase = setup_params["passphrase"] is True or isinstance(
|
|
|
|
setup_params["passphrase"], str
|
|
|
|
)
|
|
|
|
|
2019-08-26 16:26:57 +00:00
|
|
|
if not setup_params["uninitialized"]:
|
2019-11-13 11:47:51 +00:00
|
|
|
debuglink.load_device(
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client,
|
2019-08-26 16:26:57 +00:00
|
|
|
mnemonic=setup_params["mnemonic"],
|
|
|
|
pin=setup_params["pin"],
|
2020-08-28 19:12:25 +00:00
|
|
|
passphrase_protection=use_passphrase,
|
2019-08-26 16:26:57 +00:00
|
|
|
label="test",
|
2019-12-07 11:11:51 +00:00
|
|
|
language="en-US",
|
2019-11-13 11:47:51 +00:00
|
|
|
needs_backup=setup_params["needs_backup"],
|
|
|
|
no_backup=setup_params["no_backup"],
|
2019-08-26 16:26:57 +00:00
|
|
|
)
|
2018-08-13 16:21:24 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
if _raw_client.features.model == "T":
|
|
|
|
apply_settings(_raw_client, experimental_features=True)
|
2020-10-04 21:46:54 +00:00
|
|
|
|
2020-08-28 19:12:25 +00:00
|
|
|
if use_passphrase and isinstance(setup_params["passphrase"], str):
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.use_passphrase(setup_params["passphrase"])
|
2020-08-28 19:12:25 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.clear_session()
|
2019-11-13 11:47:51 +00:00
|
|
|
|
2022-02-08 12:45:15 +00:00
|
|
|
if test_ui:
|
2021-07-30 11:59:16 +00:00
|
|
|
with ui_tests.screen_recording(_raw_client, request):
|
|
|
|
yield _raw_client
|
2020-01-07 09:16:08 +00:00
|
|
|
else:
|
2021-07-30 11:59:16 +00:00
|
|
|
yield _raw_client
|
2019-12-09 16:01:04 +00:00
|
|
|
|
2021-07-30 11:59:16 +00:00
|
|
|
_raw_client.close()
|
2018-04-03 16:55:10 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_sessionstart(session: pytest.Session) -> None:
|
2020-01-31 11:28:44 +00:00
|
|
|
ui_tests.read_fixtures()
|
2020-01-09 14:25:45 +00:00
|
|
|
if session.config.getoption("ui") == "test":
|
2020-05-11 14:23:12 +00:00
|
|
|
testreport.clear_dir()
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def _should_write_ui_report(exitstatus: pytest.ExitCode) -> bool:
|
2020-02-24 14:38:02 +00:00
|
|
|
# generate UI report and check missing only if pytest is exitting cleanly
|
|
|
|
# I.e., the test suite passed or failed (as opposed to ctrl+c break, internal error,
|
|
|
|
# etc.)
|
|
|
|
return exitstatus in (pytest.ExitCode.OK, pytest.ExitCode.TESTS_FAILED)
|
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -> None:
|
2020-02-24 14:38:02 +00:00
|
|
|
if not _should_write_ui_report(exitstatus):
|
2020-02-21 14:47:25 +00:00
|
|
|
return
|
|
|
|
|
2020-09-29 17:53:28 +00:00
|
|
|
missing = session.config.getoption("ui_check_missing")
|
2020-01-10 08:32:04 +00:00
|
|
|
if session.config.getoption("ui") == "test":
|
2020-09-29 17:53:28 +00:00
|
|
|
if missing and ui_tests.list_missing():
|
2020-02-21 14:47:25 +00:00
|
|
|
session.exitstatus = pytest.ExitCode.TESTS_FAILED
|
2020-09-29 17:53:28 +00:00
|
|
|
ui_tests.write_fixtures_suggestion(missing)
|
2020-05-11 14:23:12 +00:00
|
|
|
testreport.index()
|
2020-01-31 11:28:44 +00:00
|
|
|
if session.config.getoption("ui") == "record":
|
2020-09-29 17:53:28 +00:00
|
|
|
ui_tests.write_fixtures(missing)
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_terminal_summary(
|
|
|
|
terminalreporter: "TerminalReporter", exitstatus: pytest.ExitCode, config: "Config"
|
|
|
|
) -> None:
|
2020-04-28 08:39:39 +00:00
|
|
|
println = terminalreporter.write_line
|
|
|
|
println("")
|
2020-02-21 14:47:25 +00:00
|
|
|
|
|
|
|
ui_option = config.getoption("ui")
|
|
|
|
missing_tests = ui_tests.list_missing()
|
2020-02-24 14:38:02 +00:00
|
|
|
if ui_option and _should_write_ui_report(exitstatus) and missing_tests:
|
2020-02-21 14:47:25 +00:00
|
|
|
println(f"{len(missing_tests)} expected UI tests did not run.")
|
|
|
|
if config.getoption("ui_check_missing"):
|
2020-09-29 17:53:28 +00:00
|
|
|
println("-------- List of missing tests follows: --------")
|
2020-02-21 14:47:25 +00:00
|
|
|
for test in missing_tests:
|
|
|
|
println("\t" + test)
|
|
|
|
|
|
|
|
if ui_option == "test":
|
|
|
|
println("UI test failed.")
|
|
|
|
elif ui_option == "record":
|
|
|
|
println("Removing missing tests from record.")
|
2020-04-28 08:39:39 +00:00
|
|
|
println("")
|
2020-02-21 14:47:25 +00:00
|
|
|
|
2020-09-29 17:53:28 +00:00
|
|
|
if ui_option == "test" and _should_write_ui_report(exitstatus):
|
|
|
|
println("\n-------- Suggested fixtures.json diff: --------")
|
|
|
|
print("See", ui_tests.SUGGESTION_FILE)
|
|
|
|
println("")
|
|
|
|
|
2020-02-24 14:38:02 +00:00
|
|
|
if _should_write_ui_report(exitstatus):
|
2020-09-29 17:53:28 +00:00
|
|
|
println("-------- UI tests summary: --------")
|
2021-06-22 09:01:29 +00:00
|
|
|
println("Run ./tests/show_results.py to open test summary")
|
2020-09-29 17:53:28 +00:00
|
|
|
println("")
|
2020-01-09 14:25:45 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_addoption(parser: "Parser") -> None:
|
2019-12-09 16:01:04 +00:00
|
|
|
parser.addoption(
|
2020-01-07 09:13:08 +00:00
|
|
|
"--ui",
|
2019-12-09 16:01:04 +00:00
|
|
|
action="store",
|
2020-05-19 08:00:37 +00:00
|
|
|
choices=["test", "record"],
|
2019-12-29 10:42:15 +00:00
|
|
|
help="Enable UI intergration tests: 'record' or 'test'",
|
2019-12-09 16:01:04 +00:00
|
|
|
)
|
2020-02-17 14:38:26 +00:00
|
|
|
parser.addoption(
|
|
|
|
"--ui-check-missing",
|
|
|
|
action="store_true",
|
|
|
|
default=False,
|
|
|
|
help="Check UI fixtures are containing the appropriate test cases (fails on `test`,"
|
|
|
|
"deletes old ones on `record`).",
|
|
|
|
)
|
2019-12-09 16:01:04 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_configure(config: "Config") -> None:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called at testsuite setup time.
|
2019-05-27 13:42:41 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Registers known markers, enables verbose output if requested.
|
|
|
|
"""
|
2019-05-27 13:42:41 +00:00
|
|
|
# register known markers
|
|
|
|
config.addinivalue_line("markers", "skip_t1: skip the test on Trezor One")
|
|
|
|
config.addinivalue_line("markers", "skip_t2: skip the test on Trezor T")
|
2019-08-26 16:26:57 +00:00
|
|
|
config.addinivalue_line(
|
|
|
|
"markers",
|
|
|
|
'setup_client(mnemonic="all all all...", pin=None, passphrase=False, uninitialized=False): configure the client instance',
|
|
|
|
)
|
2019-05-27 13:42:41 +00:00
|
|
|
with open(os.path.join(os.path.dirname(__file__), "REGISTERED_MARKERS")) as f:
|
|
|
|
for line in f:
|
|
|
|
config.addinivalue_line("markers", line.strip())
|
|
|
|
|
|
|
|
# enable debug
|
2018-08-13 16:21:24 +00:00
|
|
|
if config.getoption("verbose"):
|
2018-05-09 16:12:31 +00:00
|
|
|
log.enable_debug_output()
|
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_runtest_setup(item: pytest.Item) -> None:
|
2019-09-12 10:25:13 +00:00
|
|
|
"""Called for each test item (class, individual tests).
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-09-12 10:25:13 +00:00
|
|
|
Ensures that altcoin tests are skipped, and that no test is skipped on
|
|
|
|
both T1 and TT.
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
"""
|
2019-01-28 16:11:37 +00:00
|
|
|
if item.get_closest_marker("skip_t1") and item.get_closest_marker("skip_t2"):
|
2019-10-21 09:46:32 +00:00
|
|
|
raise RuntimeError("Don't skip tests for both trezors!")
|
device_tests: allow custom runxfail
We can now selectively runxfail certain tests. This is useful for
accepting PRs into trezor-core:
1. trezor-core is going to get a pytest.ini that sets xfail_strict.
That means that if an `xfail`ed test actually passes, that will
break the test suite. So it will be visible when we implement
a feature for which tests exist.
2. To allow PRs to pass the test suite without touching python-trezor
directly, we add a new pytest.ini option: run_xfail.
This adds a list of markers which will ignore `xfail`.
So:
2.1 First, the python-trezor PR marks the tests with the name
of the feature. This commit already does that: Lisk tests
are marked `@pytest.mark.lisk`, NEMs are `@pytest.mark.nem`,
etc.
The tests will be also marked with `xfail`, because the
feature is not in core yet.
2.2 Then, the trezor-core PR implements the feature, which makes
the `xfail`ed tests pass. That breaks the test suite.
2.3 To fix the test suite, the core PR also adds a `run_xfail`
to `pytest.ini`: `run_xfail = lisk`.
(it can take a list: `run_xfail = lisk nem stellar`)
That will make the test suite behave as if the tests are not
`xfail`ed. If the feature is implemented correctly, the tests
will pass.
2.4 When the PR is accepted to core, the next step should be
a PR to python-trezor that removes the `xfail`s. After that,
we should also remove the `run_xfail` option, just to be tidy.
2018-05-21 15:44:02 +00:00
|
|
|
|
2019-08-26 16:25:22 +00:00
|
|
|
skip_altcoins = int(os.environ.get("TREZOR_PYTEST_SKIP_ALTCOINS", 0))
|
|
|
|
if item.get_closest_marker("altcoin") and skip_altcoins:
|
2019-08-22 18:18:44 +00:00
|
|
|
pytest.skip("Skipping altcoin test")
|
2019-10-16 15:39:06 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_runtest_teardown(item: pytest.Item) -> None:
|
2020-12-10 09:37:02 +00:00
|
|
|
"""Called after a test item finishes.
|
|
|
|
|
|
|
|
Dumps the current UI test report HTML.
|
|
|
|
"""
|
|
|
|
if item.session.config.getoption("ui") == "test":
|
|
|
|
testreport.index()
|
|
|
|
|
|
|
|
|
2019-10-16 15:39:06 +00:00
|
|
|
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
2022-01-28 18:26:03 +00:00
|
|
|
def pytest_runtest_makereport(item: pytest.Item, call) -> None:
|
2019-10-16 15:39:06 +00:00
|
|
|
# Make test results available in fixtures.
|
|
|
|
# See https://docs.pytest.org/en/latest/example/simple.html#making-test-result-information-available-in-fixtures
|
2019-10-18 11:35:55 +00:00
|
|
|
# The device_handler fixture uses this as 'request.node.rep_call.passed' attribute,
|
|
|
|
# in order to raise error only if the test passed.
|
2019-10-16 15:39:06 +00:00
|
|
|
outcome = yield
|
|
|
|
rep = outcome.get_result()
|
|
|
|
setattr(item, f"rep_{rep.when}", rep)
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2022-01-31 12:25:30 +00:00
|
|
|
def device_handler(client: Client, request: pytest.FixtureRequest) -> None:
|
2019-10-16 15:39:06 +00:00
|
|
|
device_handler = BackgroundDeviceHandler(client)
|
|
|
|
yield device_handler
|
|
|
|
|
2020-07-10 09:52:19 +00:00
|
|
|
# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport
|
|
|
|
# did not create the attribute we need
|
|
|
|
if not hasattr(request.node, "rep_call"):
|
|
|
|
return
|
|
|
|
|
|
|
|
# if test finished, make sure all background tasks are done
|
2019-10-16 15:39:06 +00:00
|
|
|
finalized_ok = device_handler.check_finalize()
|
2022-01-28 18:26:03 +00:00
|
|
|
if request.node.rep_call.passed and not finalized_ok: # type: ignore [rep_call must exist]
|
2019-10-16 15:39:06 +00:00
|
|
|
raise RuntimeError("Test did not check result of background task")
|