1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2024-11-24 08:28:12 +00:00
trezor-firmware/tests/ui_tests/__init__.py

204 lines
6.3 KiB
Python
Raw Normal View History

2023-01-27 14:13:12 +00:00
from __future__ import annotations
2019-12-30 11:20:51 +00:00
import shutil
from contextlib import contextmanager
2023-01-27 14:13:12 +00:00
from typing import Callable, Generator
2019-12-30 11:20:51 +00:00
import pytest
2024-03-30 12:55:00 +00:00
from _pytest.nodes import Node
from _pytest.outcomes import Failed
2019-12-30 11:20:51 +00:00
from trezorlib.debuglink import TrezorClientDebugLink as Client
2023-01-27 14:13:12 +00:00
from . import common
from .common import SCREENS_DIR, UI_TESTS_DIR, TestCase, TestResult
from .reporting import testreport
2019-12-30 11:20:51 +00:00
2023-01-27 14:13:12 +00:00
FIXTURES_SUGGESTION_FILE = UI_TESTS_DIR / "fixtures.suggestion.json"
FIXTURES_RESULTS_FILE = UI_TESTS_DIR / "fixtures.results.json"
2019-12-30 11:20:51 +00:00
2023-01-27 14:13:12 +00:00
def _process_recorded(result: TestResult) -> None:
# calculate hash
result.store_recorded()
testreport.recorded(result)
2024-03-30 12:55:00 +00:00
def _process_tested(result: TestResult, item: Node) -> None:
2023-01-27 14:13:12 +00:00
if result.expected_hash is None:
2024-03-30 12:55:00 +00:00
testreport.missing(result)
item.user_properties.append(("ui_missing", None))
2023-01-27 14:13:12 +00:00
elif result.actual_hash != result.expected_hash:
2024-03-30 12:55:00 +00:00
testreport.failed(result)
item.user_properties.append(("ui_failed", None))
2019-12-30 11:20:51 +00:00
else:
2023-01-27 14:13:12 +00:00
testreport.passed(result)
2019-12-30 11:20:51 +00:00
@contextmanager
def screen_recording(
client: Client, request: pytest.FixtureRequest
) -> Generator[None, None, None]:
2020-01-07 09:13:08 +00:00
test_ui = request.config.getoption("ui")
if not test_ui:
yield
return
record_text_layout = request.config.getoption("record_text_layout")
2023-01-27 14:13:12 +00:00
testcase = TestCase.build(client, request)
testcase.dir.mkdir(exist_ok=True, parents=True)
2019-12-30 11:20:51 +00:00
# remove previous files
2023-01-27 14:13:12 +00:00
shutil.rmtree(testcase.actual_dir, ignore_errors=True)
testcase.actual_dir.mkdir()
2019-12-30 11:20:51 +00:00
try:
2023-01-27 14:13:12 +00:00
client.debug.start_recording(str(testcase.actual_dir))
if record_text_layout:
client.debug.set_screen_text_file(testcase.screen_text_file)
client.debug.watch_layout(True)
2019-12-30 11:20:51 +00:00
yield
finally:
client.ensure_open()
client.sync_responses()
# Wait for response to Initialize, which gives the emulator time to catch up
# and redraw the homescreen. Otherwise there's a race condition between that
# and stopping recording.
if record_text_layout:
client.debug.set_screen_text_file(None)
client.debug.watch_layout(False)
client.init_device()
client.debug.stop_recording()
2023-01-27 14:13:12 +00:00
result = testcase.build_result(request)
if test_ui == "record":
2023-01-27 14:13:12 +00:00
_process_recorded(result)
else:
2024-03-30 12:55:00 +00:00
_process_tested(result, request.node)
2023-01-27 14:13:12 +00:00
def setup(main_runner: bool) -> None:
# clear metadata and "actual" recordings before current run, keep "recorded" around
if main_runner:
for meta in SCREENS_DIR.glob("*/metadata.json"):
meta.unlink()
shutil.rmtree(meta.parent / "actual", ignore_errors=True)
2023-01-27 14:13:12 +00:00
# clear testreport
testreport.setup(main_runner)
2023-01-27 14:13:12 +00:00
def list_missing() -> set[str]:
# Only listing the ones for the current model
_, missing = common.prepare_fixtures(
TestResult.recent_results(), remove_missing=True
)
2023-01-27 14:13:12 +00:00
return {test.id for test in missing}
2023-01-27 14:13:12 +00:00
def update_fixtures(remove_missing: bool = False) -> int:
"""Update the fixtures.json file with the actual hashes from the latest run.
2023-01-27 14:13:12 +00:00
Used in --ui=record and in update_fixtures.py
"""
results = list(TestResult.recent_results())
2023-01-27 14:13:12 +00:00
for result in results:
result.store_recorded()
common.write_fixtures_complete(results, remove_missing=remove_missing)
2023-01-27 14:13:12 +00:00
return len(results)
2023-01-27 14:13:12 +00:00
def _should_write_ui_report(exitstatus: pytest.ExitCode) -> bool:
# generate UI report and check missing only if pytest is exitting cleanly
# I.e., the test suite passed or failed (as opposed to ctrl+c break, internal error,
# etc.)
return exitstatus in (pytest.ExitCode.OK, pytest.ExitCode.TESTS_FAILED)
2023-01-27 14:13:12 +00:00
def terminal_summary(
println: Callable[[str], None],
ui_option: str,
check_missing: bool,
exitstatus: pytest.ExitCode,
) -> None:
println("")
normal_exit = _should_write_ui_report(exitstatus)
missing_tests = list_missing()
if ui_option and normal_exit and missing_tests:
println(f"{len(missing_tests)} expected UI tests did not run.")
if check_missing:
println("-------- List of missing tests follows: --------")
for test in missing_tests:
println("\t" + test)
if ui_option == "test":
println("UI test failed.")
elif ui_option == "record":
println("Removing missing tests from record.")
println("")
if ui_option == "record" and exitstatus != pytest.ExitCode.OK:
println(
"\n-------- WARNING! Recording to fixtures.json was disabled due to failed tests. --------"
)
println("")
2023-01-27 14:13:12 +00:00
if normal_exit:
println("-------- UI tests summary: --------")
2024-03-30 12:55:00 +00:00
for result in TestResult.recent_results():
if result.passed and not result.ui_passed:
println(f"UI_FAILED: {result.test.id} ({result.actual_hash})")
2023-01-27 14:13:12 +00:00
println("Run ./tests/show_results.py to open test summary")
println("")
2023-01-27 14:13:12 +00:00
println("-------- Accepting all recent UI changes: --------")
println("Run ./tests/update_fixtures.py to apply all changes")
println("")
2023-01-27 14:13:12 +00:00
def sessionfinish(
exitstatus: pytest.ExitCode,
test_ui: str,
check_missing: bool,
record_text_layout: bool,
do_master_diff: bool,
2023-01-27 14:13:12 +00:00
) -> pytest.ExitCode:
if not _should_write_ui_report(exitstatus):
return exitstatus
testreport.generate_reports(record_text_layout, do_master_diff)
2024-03-30 12:55:00 +00:00
recents = list(TestResult.recent_results())
if test_ui == "test":
2024-03-30 12:55:00 +00:00
common.write_fixtures_only_new_results(recents, dest=FIXTURES_RESULTS_FILE)
if any(t.passed and not t.ui_passed for t in recents):
return pytest.ExitCode.TESTS_FAILED
2023-01-27 14:13:12 +00:00
if test_ui == "test" and check_missing and list_missing():
common.write_fixtures_complete(
2024-03-30 12:55:00 +00:00
recents,
2023-01-27 14:13:12 +00:00
remove_missing=True,
dest=FIXTURES_SUGGESTION_FILE,
)
return pytest.ExitCode.TESTS_FAILED
2023-01-27 14:13:12 +00:00
if test_ui == "record" and exitstatus == pytest.ExitCode.OK:
update_fixtures(check_missing)
2023-01-27 14:13:12 +00:00
return exitstatus
def main() -> None:
for result in TestResult.recent_results():
try:
2023-01-27 14:13:12 +00:00
_process_tested(result)
print("PASSED:", result.test.id)
except Failed:
2023-01-27 14:13:12 +00:00
print("FAILED:", result.test.id)
testreport.generate_reports()