diff --git a/ci/test.yml b/ci/test.yml index 61be0c3c80..5c20f49e2a 100644 --- a/ci/test.yml +++ b/ci/test.yml @@ -54,8 +54,7 @@ core unix device ui test: paths: - trezor.log - ci/ui_test_records/ - - tests/ui_tests/fixtures/*/failure_diff.html - - tests/ui_tests/fixtures/*/success.html + - tests/ui_tests/reports/ - tests/junit.xml when: always expire_in: 1 week diff --git a/tests/conftest.py b/tests/conftest.py index 5109be9ca1..9fc51eb6e6 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -26,6 +26,7 @@ from trezorlib.transport import enumerate_devices, get_transport from . import ui_tests from .device_handler import BackgroundDeviceHandler +from .ui_tests import get_test_name, report def get_device(): @@ -145,6 +146,35 @@ def client(request): client.close() +def pytest_sessionstart(session): + if session.config.getoption("ui") == "test": + report.clear_dir() + + +def pytest_sessionfinish(session, exitstatus): + if session.config.getoption("ui") != "test": + return + + reporter = session.config.pluginmanager.get_plugin("terminalreporter") + # intentionally set(), because there are multiple stages for one test in the TestReport items + test_names = {"passed": set(), "failed": set()} + for status, test in reporter.stats.items(): + if status in ("deselected", "warnings"): + continue + if status in ("passed", "failed"): + # iterate through the stages to get the test name + for t in test: + test_names[status].add(get_test_name(t.nodeid)) + + report.index(test_names, exitstatus) + + +def pytest_terminal_summary(terminalreporter, exitstatus, config): + terminalreporter.writer.line( + "\nUI tests summary: %s" % (report.REPORTS_PATH / "index.html") + ) + + def pytest_addoption(parser): parser.addoption( "--ui", diff --git a/tests/ui_tests/.gitignore b/tests/ui_tests/.gitignore index 58760efc84..004c322998 100644 --- a/tests/ui_tests/.gitignore +++ b/tests/ui_tests/.gitignore @@ -1,3 +1,4 @@ *.png *.html *.zip +reports/ diff --git a/tests/ui_tests/__init__.py b/tests/ui_tests/__init__.py index 82ab7916a2..f13988e8e2 100644 --- a/tests/ui_tests/__init__.py +++ b/tests/ui_tests/__init__.py @@ -8,6 +8,8 @@ import pytest from . import report +UI_TESTS_DIR = Path(__file__).parent.resolve() + def get_test_name(node_id): # Test item name is usually function name, but when parametrization is used, @@ -67,28 +69,24 @@ def _process_tested(fixture_test_path, test_name): _rename_records(actual_path) if actual_hash != expected_hash: - file_path = report.failure( + file_path = report.failed( fixture_test_path, test_name, actual_hash, expected_hash ) - if (fixture_test_path / "success.html").exists(): - (fixture_test_path / "success.html").unlink() pytest.fail( "Hash of {} differs.\nExpected: {}\nActual: {}\nDiff file: {}".format( test_name, expected_hash, actual_hash, file_path ) ) else: - report.success(fixture_test_path, test_name, actual_hash) - if (fixture_test_path / "failure_diff.html").exists(): - (fixture_test_path / "failure_diff.html").unlink() + report.passed(fixture_test_path, test_name, actual_hash) @contextmanager def screen_recording(client, request): test_ui = request.config.getoption("ui") test_name = get_test_name(request.node.nodeid) - fixture_test_path = Path(__file__).parent.resolve() / "fixtures" / test_name + fixture_test_path = UI_TESTS_DIR / "fixtures" / test_name if test_ui == "record": screen_path = fixture_test_path / "recorded" @@ -97,7 +95,9 @@ def screen_recording(client, request): else: raise ValueError("Invalid 'ui' option.") - _check_fixture_directory(fixture_test_path, screen_path) + # remove previous files + shutil.rmtree(screen_path, ignore_errors=True) + screen_path.mkdir() try: client.debug.start_recording(str(screen_path)) diff --git a/tests/ui_tests/report.py b/tests/ui_tests/report.py index 734f6a8221..a58ad23347 100644 --- a/tests/ui_tests/report.py +++ b/tests/ui_tests/report.py @@ -1,13 +1,18 @@ import base64 import filecmp +import shutil +from datetime import datetime from distutils.dir_util import copy_tree from itertools import zip_longest +from pathlib import Path import dominate -from dominate.tags import div, h1, hr, i, img, p, table, td, th, tr +from dominate.tags import a, div, h1, h2, hr, i, img, p, table, td, th, tr from . import download +REPORTS_PATH = Path(__file__).parent.resolve() / "reports" + def _image(src): with td(): @@ -47,20 +52,62 @@ def _write(fixture_test_path, doc, filename): return fixture_test_path / filename -def failure(fixture_test_path, test_name, actual_hash, expected_hash): +def _report_links(tests, status): + if status not in ("failed", "passed"): + raise ValueError("Different status than failed/passed is not yet supported.") + if not tests: + i("None!") + return + with table(border=1): + with tr(): + th("Link to report") + for test in tests: + with tr(): + td(a(test, href=REPORTS_PATH / status / (test + ".html"))) + + +def clear_dir(): + # delete and create the reports dir to clear previous entries + shutil.rmtree(REPORTS_PATH, ignore_errors=True) + REPORTS_PATH.mkdir() + (REPORTS_PATH / "failed").mkdir() + (REPORTS_PATH / "passed").mkdir() + + +def index(tests, status): + title = "UI Test report " + datetime.now().strftime("%Y-%m-%d %H:%M:%S") + doc = dominate.document(title=title) + + with doc: + h1("UI Test report") + if status == 0: + p("All tests succeeded!", style="color: green; font-weight: bold;") + else: + p("Some tests failed!", style="color: red; font-weight: bold;") + hr() + + h2("Failed", style="color: red;") + _report_links(tests["failed"], "failed") + + h2("Passed", style="color: green;") + _report_links(tests["passed"], "passed") + + return _write(REPORTS_PATH, doc, "index.html") + + +def failed(fixture_test_path, test_name, actual_hash, expected_hash): doc = dominate.document(title=test_name) recorded_path = fixture_test_path / "recorded" actual_path = fixture_test_path / "actual" if not recorded_path.exists(): recorded_path.mkdir() - download.fetch_recorded(expected_hash, recorded_path) - recorded = sorted(recorded_path.iterdir()) - actual = sorted(actual_path.iterdir()) + recorded_screens = sorted(recorded_path.iterdir()) + actual_screens = sorted(actual_path.iterdir()) - if not recorded: + if not recorded_screens: return with doc: @@ -71,24 +118,24 @@ def failure(fixture_test_path, test_name, actual_hash, expected_hash): th("Expected") th("Actual") - for r, a in zip_longest(recorded, actual): - if r and a and filecmp.cmp(a, r): + for recorded, actual in zip_longest(recorded_screens, actual_screens): + if recorded and actual and filecmp.cmp(actual, recorded): background = "white" else: background = "red" with tr(bgcolor=background): - _image(r) - _image(a) + _image(recorded) + _image(actual) - return _write(fixture_test_path, doc, "failure_diff.html") + return _write(REPORTS_PATH / "failed", doc, test_name + ".html") -def success(fixture_test_path, test_name, actual_hash): +def passed(fixture_test_path, test_name, actual_hash): copy_tree(str(fixture_test_path / "actual"), str(fixture_test_path / "recorded")) doc = dominate.document(title=test_name) actual_path = fixture_test_path / "actual" - actual = sorted(actual_path.iterdir()) + actual_screens = sorted(actual_path.iterdir()) with doc: _header(test_name, actual_hash, actual_hash) @@ -97,8 +144,8 @@ def success(fixture_test_path, test_name, actual_hash): with tr(): th("Recorded") - for a in actual: + for screen in actual_screens: with tr(): - _image(a) + _image(screen) - return _write(fixture_test_path, doc, "success.html") + return _write(REPORTS_PATH / "passed", doc, test_name + ".html")