1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2024-12-23 23:08:14 +00:00

tests: make ui-check-missing more resilient for nonstandard exits

This commit is contained in:
matejcik 2020-02-21 15:47:25 +01:00 committed by matejcik
parent c418e628ac
commit f9097b16e6
2 changed files with 28 additions and 11 deletions

View File

@ -153,18 +153,37 @@ def pytest_sessionstart(session):
def pytest_sessionfinish(session, exitstatus):
if exitstatus != pytest.ExitCode.OK:
return
if session.config.getoption("ui") == "test":
if session.config.getoption("ui_check_missing"):
ui_tests.check_missing()
if session.config.getoption("ui_check_missing") and ui_tests.list_missing():
session.exitstatus = pytest.ExitCode.TESTS_FAILED
report.index()
if session.config.getoption("ui") == "record":
ui_tests.write_fixtures(session.config.getoption("ui_check_missing"))
def pytest_terminal_summary(terminalreporter, exitstatus, config):
terminalreporter.writer.line(
"\nUI tests summary: %s" % (report.REPORTS_PATH / "index.html")
)
println = terminalreporter.writer.line
println()
ui_option = config.getoption("ui")
missing_tests = ui_tests.list_missing()
if ui_option and exitstatus == pytest.ExitCode.OK and missing_tests:
println(f"{len(missing_tests)} expected UI tests did not run.")
if config.getoption("ui_check_missing"):
println("List of missing tests follows:")
for test in missing_tests:
println("\t" + test)
if ui_option == "test":
println("UI test failed.")
elif ui_option == "record":
println("Removing missing tests from record.")
println()
println(f"UI tests summary: {report.REPORTS_PATH / 'index.html'}")
def pytest_addoption(parser):

View File

@ -96,20 +96,18 @@ def screen_recording(client, request):
try:
client.debug.start_recording(str(screen_path))
yield
finally:
client.debug.stop_recording()
if test_ui == "record":
_process_recorded(screen_path, test_name)
elif test_ui == "test":
_process_tested(screens_test_path, test_name)
else:
raise ValueError("Invalid 'ui' option.")
finally:
client.debug.stop_recording()
def check_missing():
missing = set(HASHES.keys()) - PROCESSED
if missing:
pytest.fail("Fixtures.json contains tests that are not tested: %s" % missing)
def list_missing():
return set(HASHES.keys()) - PROCESSED
def read_fixtures():