1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2025-01-10 15:30:55 +00:00

feat(tests): allow for accepting the recent UI diff without recording the tests

[no changelog]
This commit is contained in:
grdddj 2023-01-02 17:57:07 +01:00 committed by Jiří Musil
parent 4f0343c8c6
commit 20f3658e7e
5 changed files with 56 additions and 2 deletions

View File

@ -114,8 +114,12 @@ test_emu_ui_multicore: ## run ui integration tests using multiple cores
test_emu_ui_record: ## record and hash screens for ui integration tests test_emu_ui_record: ## record and hash screens for ui integration tests
$(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests --ui=record --ui-check-missing $(TESTOPTS) $(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests --ui=record --ui-check-missing $(TESTOPTS)
test_emu_ui_record_multicore: ## record and hash screens for ui integration tests using multiple cores test_emu_ui_record_multicore: ## quickly record all screens
$(PYTEST) -n auto $(TESTPATH)/device_tests $(TESTOPTS) --ui=record --ui-check-missing --control-emulators --model=core --random-order-seed=$(shell echo $$RANDOM) make test_emu_ui_multicore || echo "All errors are recorded in fixtures.json"
make test_emu_accept_fixtures
test_emu_accept_fixtures: # accept UI fixtures from the last run of UI tests
../tests/update_fixtures.py
pylint: ## run pylint on application sources and tests pylint: ## run pylint on application sources and tests
pylint -E $(shell find src tests -name *.py) pylint -E $(shell find src tests -name *.py)

View File

@ -249,6 +249,8 @@ def pytest_sessionstart(session: pytest.Session) -> None:
ui_tests.read_fixtures() ui_tests.read_fixtures()
if session.config.getoption("ui") and _is_main_runner(session): if session.config.getoption("ui") and _is_main_runner(session):
testreport.clear_dir() testreport.clear_dir()
# Preparing a new empty file for UI diff
ui_tests.FIXTURES_DIFF.write_bytes(b"")
def _should_write_ui_report(exitstatus: pytest.ExitCode) -> bool: def _should_write_ui_report(exitstatus: pytest.ExitCode) -> bool:
@ -316,6 +318,10 @@ def pytest_terminal_summary(
println("Run ./tests/show_results.py to open test summary") println("Run ./tests/show_results.py to open test summary")
println("") println("")
println("-------- Accepting all recent UI changes: --------")
println("Run ./tests/update_fixtures.py to apply all changes")
println("")
def pytest_addoption(parser: "Parser") -> None: def pytest_addoption(parser: "Parser") -> None:
parser.addoption( parser.addoption(

View File

@ -2,3 +2,4 @@
*.html *.html
*.zip *.zip
fixtures.suggestion.json fixtures.suggestion.json
fixtures.json.diff

View File

@ -18,6 +18,7 @@ UI_TESTS_DIR = Path(__file__).resolve().parent
SCREENS_DIR = UI_TESTS_DIR / "screens" SCREENS_DIR = UI_TESTS_DIR / "screens"
HASH_FILE = UI_TESTS_DIR / "fixtures.json" HASH_FILE = UI_TESTS_DIR / "fixtures.json"
SUGGESTION_FILE = UI_TESTS_DIR / "fixtures.suggestion.json" SUGGESTION_FILE = UI_TESTS_DIR / "fixtures.suggestion.json"
FIXTURES_DIFF = UI_TESTS_DIR / "fixtures.json.diff"
FILE_HASHES: Dict[str, str] = {} FILE_HASHES: Dict[str, str] = {}
ACTUAL_HASHES: Dict[str, str] = {} ACTUAL_HASHES: Dict[str, str] = {}
PROCESSED: Set[str] = set() PROCESSED: Set[str] = set()
@ -92,6 +93,16 @@ def _process_tested(fixture_test_path: Path, test_name: str) -> None:
fixture_test_path, test_name, actual_hash, expected_hash fixture_test_path, test_name, actual_hash, expected_hash
) )
# Writing the diff to a file, so that we can process it later
# Appending a new JSON object, not having to regenerate the
# whole file (which could cause issues with multiple processes/threads)
with open(FIXTURES_DIFF, "a") as f:
diff = {
"test_name": test_name,
"actual_hash": actual_hash,
}
f.write(json.dumps(diff) + "\n")
pytest.fail( pytest.fail(
f"Hash of {test_name} differs.\n" f"Hash of {test_name} differs.\n"
f"Expected: {expected_hash}\n" f"Expected: {expected_hash}\n"
@ -186,6 +197,31 @@ def write_fixtures_suggestion(
) )
def update_fixtures_with_diff() -> int:
"""Update the fixtures.json file with the actual hashes from the diff file.
Use-case is that the UI test run will generate the differing hashes,
and with this function we can simply update the fixtures.json file
without having to call the UI tests again in recording mode.
"""
if not FIXTURES_DIFF.exists():
raise ValueError(f"File {FIXTURES_DIFF} not found.")
read_fixtures()
changes_amount = 0
with open(FIXTURES_DIFF) as f:
for line in f:
changes_amount += 1
diff = json.loads(line)
FILE_HASHES[diff["test_name"]] = diff["actual_hash"]
write_fixtures(remove_missing=False)
# Returning the amount of updated hashes
return changes_amount
def _get_fixtures_content( def _get_fixtures_content(
fixtures: Dict[str, str], remove_missing: bool, only_passed_tests: bool = False fixtures: Dict[str, str], remove_missing: bool, only_passed_tests: bool = False
) -> str: ) -> str:

7
tests/update_fixtures.py Executable file
View File

@ -0,0 +1,7 @@
#!/usr/bin/env python3
from ui_tests import update_fixtures_with_diff
changes_amount = update_fixtures_with_diff()
print(f"{changes_amount} hashes updated in fixtures.json file.")