1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2024-12-18 12:28:09 +00:00

feat(tests): reduce ui error spam

This commit is contained in:
matejcik 2024-03-30 13:55:00 +01:00 committed by matejcik
parent 863435cc9d
commit 25f4e07ad3
3 changed files with 34 additions and 23 deletions

View File

@ -22,6 +22,7 @@ from typing import TYPE_CHECKING, Generator, Iterator
import pytest import pytest
import xdist import xdist
from _pytest.reports import TestReport
from trezorlib import debuglink, log, models from trezorlib import debuglink, log, models
from trezorlib.debuglink import TrezorClientDebugLink as Client from trezorlib.debuglink import TrezorClientDebugLink as Client
@ -445,6 +446,20 @@ def pytest_runtest_makereport(item: pytest.Item, call) -> Generator:
setattr(item, f"rep_{rep.when}", rep) setattr(item, f"rep_{rep.when}", rep)
@pytest.hookimpl(tryfirst=True)
def pytest_report_teststatus(
report: TestReport, config: Config
) -> tuple[str, str, tuple[str, dict[str, bool]]] | None:
if report.passed:
for prop, _ in report.user_properties:
if prop == "ui_failed":
return "ui_failed", "U", ("UI-FAILED", {"red": True})
if prop == "ui_missing":
return "ui_missing", "M", ("UI-MISSING", {"yellow": True})
# else use default handling
return None
@pytest.fixture @pytest.fixture
def device_handler(client: Client, request: pytest.FixtureRequest) -> Generator: def device_handler(client: Client, request: pytest.FixtureRequest) -> Generator:
device_handler = BackgroundDeviceHandler(client) device_handler = BackgroundDeviceHandler(client)

View File

@ -5,6 +5,7 @@ from contextlib import contextmanager
from typing import Callable, Generator from typing import Callable, Generator
import pytest import pytest
from _pytest.nodes import Node
from _pytest.outcomes import Failed from _pytest.outcomes import Failed
from trezorlib.debuglink import TrezorClientDebugLink as Client from trezorlib.debuglink import TrezorClientDebugLink as Client
@ -23,23 +24,13 @@ def _process_recorded(result: TestResult) -> None:
testreport.recorded(result) testreport.recorded(result)
def _process_tested(result: TestResult) -> None: def _process_tested(result: TestResult, item: Node) -> None:
if result.expected_hash is None: if result.expected_hash is None:
file_path = testreport.missing(result) testreport.missing(result)
pytest.fail( item.user_properties.append(("ui_missing", None))
f"Hash of {result.test.id} not found in fixtures.json\n"
f"Expected: {result.expected_hash}\n"
f"Actual: {result.actual_hash}\n"
f"Diff file: {file_path}"
)
elif result.actual_hash != result.expected_hash: elif result.actual_hash != result.expected_hash:
file_path = testreport.failed(result) testreport.failed(result)
pytest.fail( item.user_properties.append(("ui_failed", None))
f"Hash of {result.test.id} differs\n"
f"Expected: {result.expected_hash}\n"
f"Actual: {result.actual_hash}\n"
f"Diff file: {file_path}"
)
else: else:
testreport.passed(result) testreport.passed(result)
@ -83,7 +74,7 @@ def screen_recording(
if test_ui == "record": if test_ui == "record":
_process_recorded(result) _process_recorded(result)
else: else:
_process_tested(result) _process_tested(result, request.node)
def setup(main_runner: bool) -> None: def setup(main_runner: bool) -> None:
@ -156,6 +147,9 @@ def terminal_summary(
if normal_exit: if normal_exit:
println("-------- UI tests summary: --------") println("-------- UI tests summary: --------")
for result in TestResult.recent_results():
if result.passed and not result.ui_passed:
println(f"UI_FAILED: {result.test.id} ({result.actual_hash})")
println("Run ./tests/show_results.py to open test summary") println("Run ./tests/show_results.py to open test summary")
println("") println("")
@ -176,15 +170,16 @@ def sessionfinish(
testreport.generate_reports(record_text_layout, do_master_diff) testreport.generate_reports(record_text_layout, do_master_diff)
recents = list(TestResult.recent_results())
if test_ui == "test": if test_ui == "test":
common.write_fixtures_only_new_results( common.write_fixtures_only_new_results(recents, dest=FIXTURES_RESULTS_FILE)
TestResult.recent_results(), if any(t.passed and not t.ui_passed for t in recents):
dest=FIXTURES_RESULTS_FILE, return pytest.ExitCode.TESTS_FAILED
)
if test_ui == "test" and check_missing and list_missing(): if test_ui == "test" and check_missing and list_missing():
common.write_fixtures_complete( common.write_fixtures_complete(
TestResult.recent_results(), recents,
remove_missing=True, remove_missing=True,
dest=FIXTURES_SUGGESTION_FILE, dest=FIXTURES_SUGGESTION_FILE,
) )

View File

@ -325,7 +325,8 @@ class TestResult:
json.dumps(metadata, indent=2, sort_keys=True) + "\n" json.dumps(metadata, indent=2, sort_keys=True) + "\n"
) )
def succeeded_in_ui_comparison(self) -> bool: @property
def ui_passed(self) -> bool:
return self.actual_hash == self.expected_hash return self.actual_hash == self.expected_hash
@classmethod @classmethod
@ -357,7 +358,7 @@ class TestResult:
def recent_ui_failures(cls) -> t.Iterator[Self]: def recent_ui_failures(cls) -> t.Iterator[Self]:
"""Returning just the results that resulted in UI failure.""" """Returning just the results that resulted in UI failure."""
for result in cls.recent_results(): for result in cls.recent_results():
if not result.succeeded_in_ui_comparison(): if not result.ui_passed:
yield result yield result
def store_recorded(self) -> None: def store_recorded(self) -> None: