mirror of
https://github.com/trezor/trezor-firmware.git
synced 2025-07-25 16:08:32 +00:00
feat(tests): save text representation of all screens during UI tests
This commit is contained in:
parent
a86c29cfe5
commit
b9dfd3a8d0
@ -111,7 +111,7 @@ test_emu_click: ## run click tests
|
|||||||
|
|
||||||
test_emu_ui: ## run ui integration tests
|
test_emu_ui: ## run ui integration tests
|
||||||
$(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests $(TESTOPTS) \
|
$(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests $(TESTOPTS) \
|
||||||
--ui=test --ui-check-missing --not-generate-report-after-each-test
|
--ui=test --ui-check-missing --not-generate-report-after-each-test --record-text-layout
|
||||||
|
|
||||||
test_emu_ui_multicore: ## run ui integration tests using multiple cores
|
test_emu_ui_multicore: ## run ui integration tests using multiple cores
|
||||||
PYTEST_TIMEOUT=200 $(PYTEST) -n auto $(TESTPATH)/device_tests $(TESTOPTS) \
|
PYTEST_TIMEOUT=200 $(PYTEST) -n auto $(TESTPATH)/device_tests $(TESTOPTS) \
|
||||||
|
@ -197,6 +197,15 @@ class DebugLink:
|
|||||||
self.t1_screenshot_directory: Optional[Path] = None
|
self.t1_screenshot_directory: Optional[Path] = None
|
||||||
self.t1_screenshot_counter = 0
|
self.t1_screenshot_counter = 0
|
||||||
|
|
||||||
|
# Optional file for saving text representation of the screen
|
||||||
|
self.screen_text_file: Optional[Path] = None
|
||||||
|
self.last_screen_content = ""
|
||||||
|
|
||||||
|
def set_screen_text_file(self, file_path: Optional[Path]) -> None:
|
||||||
|
if file_path is not None:
|
||||||
|
Path(file_path).write_bytes(b"")
|
||||||
|
self.screen_text_file = file_path
|
||||||
|
|
||||||
def open(self) -> None:
|
def open(self) -> None:
|
||||||
self.transport.begin_session()
|
self.transport.begin_session()
|
||||||
|
|
||||||
@ -302,12 +311,35 @@ class DebugLink:
|
|||||||
wait=wait,
|
wait=wait,
|
||||||
hold_ms=hold_ms,
|
hold_ms=hold_ms,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
# Optionally saving the textual screen output
|
||||||
|
if self.screen_text_file is not None:
|
||||||
|
layout = self.read_layout()
|
||||||
|
self.save_debug_screen(layout.lines)
|
||||||
|
|
||||||
ret = self._call(decision, nowait=not wait)
|
ret = self._call(decision, nowait=not wait)
|
||||||
if ret is not None:
|
if ret is not None:
|
||||||
return LayoutContent(ret.lines)
|
return LayoutContent(ret.lines)
|
||||||
|
|
||||||
return None
|
return None
|
||||||
|
|
||||||
|
def save_debug_screen(self, lines: List[str]) -> None:
|
||||||
|
if self.screen_text_file is not None:
|
||||||
|
if not self.screen_text_file.exists():
|
||||||
|
self.screen_text_file.write_bytes(b"")
|
||||||
|
|
||||||
|
content = "\n".join(lines)
|
||||||
|
|
||||||
|
# Not writing the same screen twice
|
||||||
|
if content == self.last_screen_content:
|
||||||
|
return
|
||||||
|
|
||||||
|
self.last_screen_content = content
|
||||||
|
|
||||||
|
with open(self.screen_text_file, "a") as f:
|
||||||
|
f.write(content)
|
||||||
|
f.write("\n" + 80 * "/" + "\n")
|
||||||
|
|
||||||
# Type overloads make sure that when we supply `wait=True` into `click()`,
|
# Type overloads make sure that when we supply `wait=True` into `click()`,
|
||||||
# it will always return `LayoutContent` and we do not need to assert `is not None`.
|
# it will always return `LayoutContent` and we do not need to assert `is not None`.
|
||||||
|
|
||||||
|
@ -268,18 +268,19 @@ def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -
|
|||||||
|
|
||||||
missing = session.config.getoption("ui_check_missing")
|
missing = session.config.getoption("ui_check_missing")
|
||||||
test_ui = session.config.getoption("ui")
|
test_ui = session.config.getoption("ui")
|
||||||
|
record_text_layout = bool(session.config.getoption("record_text_layout"))
|
||||||
|
|
||||||
if test_ui == "test":
|
if test_ui == "test":
|
||||||
if missing and ui_tests.list_missing():
|
if missing and ui_tests.list_missing():
|
||||||
session.exitstatus = pytest.ExitCode.TESTS_FAILED
|
session.exitstatus = pytest.ExitCode.TESTS_FAILED
|
||||||
ui_tests.write_fixtures_suggestion(missing)
|
ui_tests.write_fixtures_suggestion(missing)
|
||||||
testreport.generate_reports()
|
testreport.generate_reports(record_text_layout)
|
||||||
elif test_ui == "record":
|
elif test_ui == "record":
|
||||||
if exitstatus == pytest.ExitCode.OK:
|
if exitstatus == pytest.ExitCode.OK:
|
||||||
ui_tests.write_fixtures(missing)
|
ui_tests.write_fixtures(missing)
|
||||||
else:
|
else:
|
||||||
ui_tests.write_fixtures_suggestion(missing, only_passed_tests=True)
|
ui_tests.write_fixtures_suggestion(missing, only_passed_tests=True)
|
||||||
testreport.generate_reports()
|
testreport.generate_reports(record_text_layout)
|
||||||
|
|
||||||
|
|
||||||
def pytest_terminal_summary(
|
def pytest_terminal_summary(
|
||||||
@ -360,6 +361,14 @@ def pytest_addoption(parser: "Parser") -> None:
|
|||||||
help="Not generating HTML reports after each test case. "
|
help="Not generating HTML reports after each test case. "
|
||||||
"Useful for CI tests to speed them up.",
|
"Useful for CI tests to speed them up.",
|
||||||
)
|
)
|
||||||
|
parser.addoption(
|
||||||
|
"--record-text-layout",
|
||||||
|
action="store_true",
|
||||||
|
default=False,
|
||||||
|
help="Saving debugging traces for each screen change. "
|
||||||
|
"Will generate a report with text from all test-cases. "
|
||||||
|
"WARNING: does not work well with multicore (causes freezing).",
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
def pytest_configure(config: "Config") -> None:
|
def pytest_configure(config: "Config") -> None:
|
||||||
|
1
tests/ui_tests/.gitignore
vendored
1
tests/ui_tests/.gitignore
vendored
@ -1,5 +1,6 @@
|
|||||||
*.png
|
*.png
|
||||||
*.html
|
*.html
|
||||||
*.zip
|
*.zip
|
||||||
|
*.txt
|
||||||
fixtures.suggestion.json
|
fixtures.suggestion.json
|
||||||
fixtures.json.diff
|
fixtures.json.diff
|
||||||
|
@ -127,6 +127,7 @@ def screen_recording(
|
|||||||
client: Client, request: pytest.FixtureRequest
|
client: Client, request: pytest.FixtureRequest
|
||||||
) -> Generator[None, None, None]:
|
) -> Generator[None, None, None]:
|
||||||
test_ui = request.config.getoption("ui")
|
test_ui = request.config.getoption("ui")
|
||||||
|
record_text_layout = request.config.getoption("record_text_layout")
|
||||||
test_name = get_test_name(request.node.nodeid)
|
test_name = get_test_name(request.node.nodeid)
|
||||||
|
|
||||||
# Differentiating test names between T1 and TT
|
# Differentiating test names between T1 and TT
|
||||||
@ -138,11 +139,18 @@ def screen_recording(
|
|||||||
|
|
||||||
screens_test_path = SCREENS_DIR / test_name
|
screens_test_path = SCREENS_DIR / test_name
|
||||||
|
|
||||||
|
# In which directory to save the screenshots
|
||||||
if test_ui == "record":
|
if test_ui == "record":
|
||||||
screen_path = screens_test_path / "recorded"
|
screen_path = screens_test_path / "recorded"
|
||||||
else:
|
else:
|
||||||
screen_path = screens_test_path / "actual"
|
screen_path = screens_test_path / "actual"
|
||||||
|
|
||||||
|
# Whether and where to save the text layout
|
||||||
|
if record_text_layout:
|
||||||
|
screen_text_file = screens_test_path / "screens.txt"
|
||||||
|
else:
|
||||||
|
screen_text_file = None
|
||||||
|
|
||||||
if not screens_test_path.exists():
|
if not screens_test_path.exists():
|
||||||
screens_test_path.mkdir()
|
screens_test_path.mkdir()
|
||||||
# remove previous files
|
# remove previous files
|
||||||
@ -151,6 +159,7 @@ def screen_recording(
|
|||||||
|
|
||||||
try:
|
try:
|
||||||
client.debug.start_recording(str(screen_path))
|
client.debug.start_recording(str(screen_path))
|
||||||
|
client.debug.set_screen_text_file(screen_text_file)
|
||||||
yield
|
yield
|
||||||
finally:
|
finally:
|
||||||
# Wait for response to Initialize, which gives the emulator time to catch up
|
# Wait for response to Initialize, which gives the emulator time to catch up
|
||||||
@ -158,6 +167,7 @@ def screen_recording(
|
|||||||
# and stopping recording.
|
# and stopping recording.
|
||||||
client.init_device()
|
client.init_device()
|
||||||
client.debug.stop_recording()
|
client.debug.stop_recording()
|
||||||
|
client.debug.set_screen_text_file(None)
|
||||||
|
|
||||||
if test_ui:
|
if test_ui:
|
||||||
PROCESSED.add(test_name)
|
PROCESSED.add(test_name)
|
||||||
|
@ -16,6 +16,7 @@ from . import download, html
|
|||||||
HERE = Path(__file__).resolve().parent
|
HERE = Path(__file__).resolve().parent
|
||||||
REPORTS_PATH = HERE / "reports" / "test"
|
REPORTS_PATH = HERE / "reports" / "test"
|
||||||
RECORDED_SCREENS_PATH = Path(__file__).resolve().parent.parent / "screens"
|
RECORDED_SCREENS_PATH = Path(__file__).resolve().parent.parent / "screens"
|
||||||
|
SCREEN_TEXT_FILE = REPORTS_PATH / "screen_text.txt"
|
||||||
|
|
||||||
STYLE = (HERE / "testreport.css").read_text()
|
STYLE = (HERE / "testreport.css").read_text()
|
||||||
SCRIPT = (HERE / "testreport.js").read_text()
|
SCRIPT = (HERE / "testreport.js").read_text()
|
||||||
@ -201,7 +202,19 @@ def all_unique_screens(test_case_dirs: List[Path]) -> Path:
|
|||||||
return html.write(REPORTS_PATH, doc, ALL_UNIQUE_SCREENS)
|
return html.write(REPORTS_PATH, doc, ALL_UNIQUE_SCREENS)
|
||||||
|
|
||||||
|
|
||||||
def generate_reports() -> None:
|
def screen_text_report(test_case_dirs: List[Path]) -> None:
|
||||||
|
with open(SCREEN_TEXT_FILE, "w") as f2:
|
||||||
|
for test_case_dir in test_case_dirs:
|
||||||
|
screen_file = test_case_dir / "screens.txt"
|
||||||
|
if not screen_file.exists():
|
||||||
|
continue
|
||||||
|
f2.write(f"\n{test_case_dir.name}\n")
|
||||||
|
with open(screen_file, "r") as f:
|
||||||
|
for line in f.readlines():
|
||||||
|
f2.write(f"\t{line}")
|
||||||
|
|
||||||
|
|
||||||
|
def generate_reports(do_screen_text: bool = False) -> None:
|
||||||
"""Generate HTML reports for the test."""
|
"""Generate HTML reports for the test."""
|
||||||
index()
|
index()
|
||||||
|
|
||||||
@ -210,6 +223,8 @@ def generate_reports() -> None:
|
|||||||
current_testcases = _get_testcases_dirs()
|
current_testcases = _get_testcases_dirs()
|
||||||
all_screens(current_testcases)
|
all_screens(current_testcases)
|
||||||
all_unique_screens(current_testcases)
|
all_unique_screens(current_testcases)
|
||||||
|
if do_screen_text:
|
||||||
|
screen_text_report(current_testcases)
|
||||||
|
|
||||||
|
|
||||||
def _img_hash(img: Path) -> str:
|
def _img_hash(img: Path) -> str:
|
||||||
|
Loading…
Reference in New Issue
Block a user