chore(tests): tests recording also generates "results web page"

pull/2294/head
Martin Novak 2 years ago committed by marnova
parent 0388268b57
commit a69e43d1a6

@ -165,7 +165,7 @@ def client(
def pytest_sessionstart(session: pytest.Session) -> None: def pytest_sessionstart(session: pytest.Session) -> None:
ui_tests.read_fixtures() ui_tests.read_fixtures()
if session.config.getoption("ui") == "test": if session.config.getoption("ui"):
testreport.clear_dir() testreport.clear_dir()
@ -181,13 +181,19 @@ def pytest_sessionfinish(session: pytest.Session, exitstatus: pytest.ExitCode) -
return return
missing = session.config.getoption("ui_check_missing") missing = session.config.getoption("ui_check_missing")
if session.config.getoption("ui") == "test": test_ui = session.config.getoption("ui")
if test_ui == "test":
if missing and ui_tests.list_missing(): if missing and ui_tests.list_missing():
session.exitstatus = pytest.ExitCode.TESTS_FAILED session.exitstatus = pytest.ExitCode.TESTS_FAILED
ui_tests.write_fixtures_suggestion(missing) ui_tests.write_fixtures_suggestion(missing)
testreport.index() testreport.index()
if session.config.getoption("ui") == "record": if test_ui == "record":
ui_tests.write_fixtures(missing) if exitstatus == pytest.ExitCode.OK:
ui_tests.write_fixtures(missing)
else:
ui_tests.write_fixtures_suggestion(missing, only_passed_tests=True)
testreport.index()
def pytest_terminal_summary( def pytest_terminal_summary(
@ -216,6 +222,13 @@ def pytest_terminal_summary(
print("See", ui_tests.SUGGESTION_FILE) print("See", ui_tests.SUGGESTION_FILE)
println("") println("")
if ui_option == "record" and exitstatus != pytest.ExitCode.OK:
println(
f"\n-------- WARNING! Recording to {ui_tests.HASH_FILE.name} was disabled due to failed tests. --------"
)
print("See", ui_tests.SUGGESTION_FILE, "for suggestions for ONLY PASSED tests.")
println("")
if _should_write_ui_report(exitstatus): if _should_write_ui_report(exitstatus):
println("-------- UI tests summary: --------") println("-------- UI tests summary: --------")
println("Run ./tests/show_results.py to open test summary") println("Run ./tests/show_results.py to open test summary")
@ -278,7 +291,7 @@ def pytest_runtest_teardown(item: pytest.Item) -> None:
Dumps the current UI test report HTML. Dumps the current UI test report HTML.
""" """
if item.session.config.getoption("ui") == "test": if item.session.config.getoption("ui"):
testreport.index() testreport.index()
@ -298,12 +311,13 @@ def device_handler(client: Client, request: pytest.FixtureRequest) -> None:
device_handler = BackgroundDeviceHandler(client) device_handler = BackgroundDeviceHandler(client)
yield device_handler yield device_handler
# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport # get call test result
# did not create the attribute we need test_res = ui_tests.get_last_call_test_result(request)
if not hasattr(request.node, "rep_call"):
if test_res is None:
return return
# if test finished, make sure all background tasks are done # if test finished, make sure all background tasks are done
finalized_ok = device_handler.check_finalize() finalized_ok = device_handler.check_finalize()
if request.node.rep_call.passed and not finalized_ok: # type: ignore [rep_call must exist] if test_res and not finalized_ok: # type: ignore [rep_call must exist]
raise RuntimeError("Test did not check result of background task") raise RuntimeError("Test did not check result of background task")

@ -5,7 +5,7 @@ import re
import shutil import shutil
from contextlib import contextmanager from contextlib import contextmanager
from pathlib import Path from pathlib import Path
from typing import Dict, Generator, Set from typing import Dict, Generator, Optional, Set
import pytest import pytest
from _pytest.outcomes import Failed from _pytest.outcomes import Failed
@ -22,6 +22,7 @@ SUGGESTION_FILE = UI_TESTS_DIR / "fixtures.suggestion.json"
FILE_HASHES: Dict[str, str] = {} FILE_HASHES: Dict[str, str] = {}
ACTUAL_HASHES: Dict[str, str] = {} ACTUAL_HASHES: Dict[str, str] = {}
PROCESSED: Set[str] = set() PROCESSED: Set[str] = set()
FAILED_TESTS: Set[str] = set()
# T1/TT, to be set in screen_recording(), as we do not know it beforehand # T1/TT, to be set in screen_recording(), as we do not know it beforehand
# TODO: it is not the cleanest, we could create a class out of this file # TODO: it is not the cleanest, we could create a class out of this file
@ -44,9 +45,11 @@ def get_test_name(node_id: str) -> str:
def _process_recorded(screen_path: Path, test_name: str) -> None: def _process_recorded(screen_path: Path, test_name: str) -> None:
# calculate hash # calculate hash
FILE_HASHES[test_name] = _hash_files(screen_path) actual_hash = _hash_files(screen_path)
FILE_HASHES[test_name] = actual_hash
ACTUAL_HASHES[test_name] = actual_hash
_rename_records(screen_path) _rename_records(screen_path)
PROCESSED.add(test_name) testreport.recorded(screen_path, test_name, actual_hash)
def _rename_records(screen_path: Path) -> None: def _rename_records(screen_path: Path) -> None:
@ -74,8 +77,6 @@ def _get_bytes_from_png(png_file: str) -> bytes:
def _process_tested(fixture_test_path: Path, test_name: str) -> None: def _process_tested(fixture_test_path: Path, test_name: str) -> None:
PROCESSED.add(test_name)
actual_path = fixture_test_path / "actual" actual_path = fixture_test_path / "actual"
actual_hash = _hash_files(actual_path) actual_hash = _hash_files(actual_path)
ACTUAL_HASHES[test_name] = actual_hash ACTUAL_HASHES[test_name] = actual_hash
@ -102,6 +103,15 @@ def _process_tested(fixture_test_path: Path, test_name: str) -> None:
testreport.passed(fixture_test_path, test_name, actual_hash) testreport.passed(fixture_test_path, test_name, actual_hash)
def get_last_call_test_result(request: pytest.FixtureRequest) -> Optional[bool]:
# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport
# did not create the attribute we need
if not hasattr(request.node, "rep_call"):
return None
return request.node.rep_call.passed
@contextmanager @contextmanager
def screen_recording( def screen_recording(
client: Client, request: pytest.FixtureRequest client: Client, request: pytest.FixtureRequest
@ -141,10 +151,15 @@ def screen_recording(
client.init_device() client.init_device()
client.debug.stop_recording() client.debug.stop_recording()
if test_ui == "record": if test_ui:
_process_recorded(screen_path, test_name) PROCESSED.add(test_name)
else: if get_last_call_test_result(request) is False:
_process_tested(screens_test_path, test_name) FAILED_TESTS.add(test_name)
if test_ui == "record":
_process_recorded(screen_path, test_name)
else:
_process_tested(screens_test_path, test_name)
def list_missing() -> Set[str]: def list_missing() -> Set[str]:
@ -166,17 +181,28 @@ def write_fixtures(remove_missing: bool) -> None:
HASH_FILE.write_text(_get_fixtures_content(FILE_HASHES, remove_missing)) HASH_FILE.write_text(_get_fixtures_content(FILE_HASHES, remove_missing))
def write_fixtures_suggestion(remove_missing: bool) -> None: def write_fixtures_suggestion(
SUGGESTION_FILE.write_text(_get_fixtures_content(ACTUAL_HASHES, remove_missing)) remove_missing: bool, only_passed_tests: bool = False
) -> None:
SUGGESTION_FILE.write_text(
_get_fixtures_content(ACTUAL_HASHES, remove_missing, only_passed_tests)
)
def _get_fixtures_content(fixtures: Dict[str, str], remove_missing: bool) -> str: def _get_fixtures_content(
fixtures: Dict[str, str], remove_missing: bool, only_passed_tests: bool = False
) -> str:
if remove_missing: if remove_missing:
# Not removing the ones for different model # Not removing the ones for different model
nonrelevant_cases = { nonrelevant_cases = {
f: h for f, h in FILE_HASHES.items() if not f.startswith(f"{MODEL}_") f: h for f, h in FILE_HASHES.items() if not f.startswith(f"{MODEL}_")
} }
processed_fixtures = {i: fixtures[i] for i in PROCESSED}
filtered_processed_tests = PROCESSED
if only_passed_tests:
filtered_processed_tests = PROCESSED - FAILED_TESTS
processed_fixtures = {i: fixtures[i] for i in filtered_processed_tests}
fixtures = {**nonrelevant_cases, **processed_fixtures} fixtures = {**nonrelevant_cases, **processed_fixtures}
else: else:
fixtures = fixtures fixtures = fixtures

@ -2,7 +2,7 @@ import base64
import filecmp import filecmp
from itertools import zip_longest from itertools import zip_longest
from pathlib import Path from pathlib import Path
from typing import Dict, List from typing import Dict, List, Optional
from dominate.tags import a, i, img, table, td, th, tr from dominate.tags import a, i, img, table, td, th, tr
@ -30,7 +30,7 @@ def write(fixture_test_path: Path, doc, filename: str) -> Path:
return fixture_test_path / filename return fixture_test_path / filename
def image(src: Path) -> None: def image(src: Path, image_width: Optional[int] = None) -> None:
with td(): with td():
if src: if src:
# open image file # open image file
@ -40,17 +40,26 @@ def image(src: Path) -> None:
# convert output to str # convert output to str
image = image.decode() image = image.decode()
# img(src=src.relative_to(fixture_test_path)) # img(src=src.relative_to(fixture_test_path))
img(src="data:image/png;base64, " + image) img(
src="data:image/png;base64, " + image,
style=f"width: {image_width}px; image-rendering: pixelated;"
if image_width
else "",
)
else: else:
i("missing") i("missing")
def diff_table(left_screens: List[Path], right_screens: List[Path]) -> None: def diff_table(
left_screens: List[Path],
right_screens: List[Path],
image_width: Optional[int] = None,
) -> None:
for left, right in zip_longest(left_screens, right_screens): for left, right in zip_longest(left_screens, right_screens):
if left and right and filecmp.cmp(right, left): if left and right and filecmp.cmp(right, left):
background = "white" background = "white"
else: else:
background = "red" background = "red"
with tr(bgcolor=background): with tr(bgcolor=background):
image(left) image(left, image_width)
image(right) image(right, image_width)

@ -16,6 +16,11 @@ REPORTS_PATH = HERE / "reports" / "test"
STYLE = (HERE / "testreport.css").read_text() STYLE = (HERE / "testreport.css").read_text()
SCRIPT = (HERE / "testreport.js").read_text() SCRIPT = (HERE / "testreport.js").read_text()
SCREENSHOTS_WIDTH_PX_TO_DISPLAY = {
"T1": 128 * 2, # original is 128px
"TT": 240, # original is 240px
"TR": 128 * 2, # original is 128px
}
ACTUAL_HASHES: Dict[str, str] = {} ACTUAL_HASHES: Dict[str, str] = {}
@ -147,7 +152,11 @@ def failed(
th("Expected") th("Expected")
th("Actual") th("Actual")
html.diff_table(recorded_screens, actual_screens) html.diff_table(
recorded_screens,
actual_screens,
SCREENSHOTS_WIDTH_PX_TO_DISPLAY[test_name[:2]],
)
return html.write(REPORTS_PATH / "failed", doc, test_name + ".html") return html.write(REPORTS_PATH / "failed", doc, test_name + ".html")
@ -155,9 +164,12 @@ def failed(
def passed(fixture_test_path: Path, test_name: str, actual_hash: str) -> Path: def passed(fixture_test_path: Path, test_name: str, actual_hash: str) -> Path:
copy_tree(str(fixture_test_path / "actual"), str(fixture_test_path / "recorded")) copy_tree(str(fixture_test_path / "actual"), str(fixture_test_path / "recorded"))
return recorded(fixture_test_path / "actual", test_name, actual_hash)
def recorded(fixture_test_path: Path, test_name: str, actual_hash: str) -> Path:
doc = document(title=test_name) doc = document(title=test_name)
actual_path = fixture_test_path / "actual" actual_screens = sorted(fixture_test_path.iterdir())
actual_screens = sorted(actual_path.iterdir())
with doc: with doc:
_header(test_name, actual_hash, actual_hash) _header(test_name, actual_hash, actual_hash)
@ -168,6 +180,6 @@ def passed(fixture_test_path: Path, test_name: str, actual_hash: str) -> Path:
for screen in actual_screens: for screen in actual_screens:
with tr(): with tr():
html.image(screen) html.image(screen, SCREENSHOTS_WIDTH_PX_TO_DISPLAY[test_name[:2]])
return html.write(REPORTS_PATH / "passed", doc, test_name + ".html") return html.write(REPORTS_PATH / "passed", doc, test_name + ".html")

Loading…
Cancel
Save