2019-12-30 11:20:51 +00:00
|
|
|
import hashlib
|
2020-01-31 11:28:44 +00:00
|
|
|
import json
|
2019-12-30 11:20:51 +00:00
|
|
|
import re
|
|
|
|
import shutil
|
|
|
|
from contextlib import contextmanager
|
|
|
|
from pathlib import Path
|
2022-05-25 12:54:03 +00:00
|
|
|
from typing import Dict, Generator, Optional, Set
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
import pytest
|
2021-06-21 11:55:01 +00:00
|
|
|
from _pytest.outcomes import Failed
|
2021-09-02 11:04:16 +00:00
|
|
|
from PIL import Image
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
from trezorlib.debuglink import TrezorClientDebugLink as Client
|
|
|
|
|
2020-05-11 14:23:12 +00:00
|
|
|
from .reporting import testreport
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2022-01-28 15:02:17 +00:00
|
|
|
UI_TESTS_DIR = Path(__file__).resolve().parent
|
2021-09-02 11:04:16 +00:00
|
|
|
SCREENS_DIR = UI_TESTS_DIR / "screens"
|
2020-01-31 11:28:44 +00:00
|
|
|
HASH_FILE = UI_TESTS_DIR / "fixtures.json"
|
2020-09-29 17:53:28 +00:00
|
|
|
SUGGESTION_FILE = UI_TESTS_DIR / "fixtures.suggestion.json"
|
2023-01-02 16:57:07 +00:00
|
|
|
FIXTURES_DIFF = UI_TESTS_DIR / "fixtures.json.diff"
|
2022-01-28 18:26:03 +00:00
|
|
|
FILE_HASHES: Dict[str, str] = {}
|
|
|
|
ACTUAL_HASHES: Dict[str, str] = {}
|
|
|
|
PROCESSED: Set[str] = set()
|
2022-05-25 12:54:03 +00:00
|
|
|
FAILED_TESTS: Set[str] = set()
|
2020-01-09 14:25:45 +00:00
|
|
|
|
2022-02-08 15:37:34 +00:00
|
|
|
# T1/TT, to be set in screen_recording(), as we do not know it beforehand
|
|
|
|
# TODO: it is not the cleanest, we could create a class out of this file
|
|
|
|
MODEL = ""
|
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def get_test_name(node_id: str) -> str:
|
2019-12-30 11:20:51 +00:00
|
|
|
# Test item name is usually function name, but when parametrization is used,
|
|
|
|
# parameters are also part of the name. Some functions have very long parameter
|
|
|
|
# names (tx hashes etc) that run out of maximum allowable filename length, so
|
|
|
|
# we limit the name to first 100 chars. This is not a problem with txhashes.
|
2020-01-09 11:29:45 +00:00
|
|
|
new_name = node_id.replace("tests/device_tests/", "")
|
|
|
|
# remove ::TestClass:: if present because it is usually the same as the test file name
|
|
|
|
new_name = re.sub(r"::.*?::", "-", new_name)
|
|
|
|
new_name = new_name.replace("/", "-") # in case there is "/"
|
2021-01-18 12:37:40 +00:00
|
|
|
if len(new_name) <= 100:
|
|
|
|
return new_name
|
|
|
|
return new_name[:91] + "-" + hashlib.sha256(new_name.encode()).hexdigest()[:8]
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def _process_recorded(screen_path: Path, test_name: str) -> None:
|
2020-01-31 11:28:44 +00:00
|
|
|
# calculate hash
|
2022-05-25 12:54:03 +00:00
|
|
|
actual_hash = _hash_files(screen_path)
|
|
|
|
FILE_HASHES[test_name] = actual_hash
|
|
|
|
ACTUAL_HASHES[test_name] = actual_hash
|
2019-12-30 11:20:51 +00:00
|
|
|
_rename_records(screen_path)
|
2022-05-25 12:54:03 +00:00
|
|
|
testreport.recorded(screen_path, test_name, actual_hash)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def _rename_records(screen_path: Path) -> None:
|
2019-12-30 11:20:51 +00:00
|
|
|
# rename screenshots
|
|
|
|
for index, record in enumerate(sorted(screen_path.iterdir())):
|
2020-01-06 14:44:30 +00:00
|
|
|
record.replace(screen_path / f"{index:08}.png")
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
2021-09-02 11:04:16 +00:00
|
|
|
def _hash_files(path: Path) -> str:
|
2020-01-06 14:44:30 +00:00
|
|
|
files = path.iterdir()
|
2019-12-30 11:20:51 +00:00
|
|
|
hasher = hashlib.sha256()
|
|
|
|
for file in sorted(files):
|
2021-09-02 11:04:16 +00:00
|
|
|
hasher.update(_get_bytes_from_png(str(file)))
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
return hasher.digest().hex()
|
|
|
|
|
|
|
|
|
2021-09-02 11:04:16 +00:00
|
|
|
def _get_bytes_from_png(png_file: str) -> bytes:
|
|
|
|
"""Decode a PNG file into bytes representing all the pixels.
|
|
|
|
|
|
|
|
Is necessary because Linux and Mac are using different PNG encoding libraries,
|
|
|
|
and we need the file hashes to be the same on both platforms.
|
|
|
|
"""
|
|
|
|
return Image.open(png_file).tobytes()
|
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def _process_tested(fixture_test_path: Path, test_name: str) -> None:
|
2019-12-30 11:20:51 +00:00
|
|
|
actual_path = fixture_test_path / "actual"
|
2020-01-06 14:44:30 +00:00
|
|
|
actual_hash = _hash_files(actual_path)
|
2020-09-29 17:53:28 +00:00
|
|
|
ACTUAL_HASHES[test_name] = actual_hash
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-06 14:44:30 +00:00
|
|
|
_rename_records(actual_path)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2021-06-02 13:11:14 +00:00
|
|
|
expected_hash = FILE_HASHES.get(test_name)
|
|
|
|
if expected_hash is None:
|
|
|
|
pytest.fail(f"Hash of {test_name} not found in fixtures.json")
|
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
if actual_hash != expected_hash:
|
2022-01-28 18:26:03 +00:00
|
|
|
assert expected_hash is not None
|
2020-05-11 14:23:12 +00:00
|
|
|
file_path = testreport.failed(
|
2019-12-31 10:28:39 +00:00
|
|
|
fixture_test_path, test_name, actual_hash, expected_hash
|
|
|
|
)
|
|
|
|
|
2023-01-02 16:57:07 +00:00
|
|
|
# Writing the diff to a file, so that we can process it later
|
|
|
|
# Appending a new JSON object, not having to regenerate the
|
|
|
|
# whole file (which could cause issues with multiple processes/threads)
|
|
|
|
with open(FIXTURES_DIFF, "a") as f:
|
|
|
|
diff = {
|
|
|
|
"test_name": test_name,
|
|
|
|
"actual_hash": actual_hash,
|
|
|
|
}
|
|
|
|
f.write(json.dumps(diff) + "\n")
|
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
pytest.fail(
|
2021-09-27 10:13:51 +00:00
|
|
|
f"Hash of {test_name} differs.\n"
|
|
|
|
f"Expected: {expected_hash}\n"
|
|
|
|
f"Actual: {actual_hash}\n"
|
|
|
|
f"Diff file: {file_path}"
|
2019-12-30 11:20:51 +00:00
|
|
|
)
|
|
|
|
else:
|
2020-05-11 14:23:12 +00:00
|
|
|
testreport.passed(fixture_test_path, test_name, actual_hash)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
2022-05-25 12:54:03 +00:00
|
|
|
def get_last_call_test_result(request: pytest.FixtureRequest) -> Optional[bool]:
|
|
|
|
# if test did not finish, e.g. interrupted by Ctrl+C, the pytest_runtest_makereport
|
|
|
|
# did not create the attribute we need
|
|
|
|
if not hasattr(request.node, "rep_call"):
|
|
|
|
return None
|
|
|
|
|
|
|
|
return request.node.rep_call.passed
|
|
|
|
|
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
@contextmanager
|
2022-01-28 18:26:03 +00:00
|
|
|
def screen_recording(
|
|
|
|
client: Client, request: pytest.FixtureRequest
|
|
|
|
) -> Generator[None, None, None]:
|
2020-01-07 09:13:08 +00:00
|
|
|
test_ui = request.config.getoption("ui")
|
2020-01-09 11:29:45 +00:00
|
|
|
test_name = get_test_name(request.node.nodeid)
|
2022-02-08 15:37:34 +00:00
|
|
|
|
|
|
|
# Differentiating test names between T1 and TT
|
|
|
|
# Making the model global for other functions
|
|
|
|
global MODEL
|
|
|
|
MODEL = f"T{client.features.model}"
|
2022-04-05 12:35:37 +00:00
|
|
|
|
2022-02-08 15:37:34 +00:00
|
|
|
test_name = f"{MODEL}_{test_name}"
|
|
|
|
|
2021-09-02 11:04:16 +00:00
|
|
|
screens_test_path = SCREENS_DIR / test_name
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-07 09:13:08 +00:00
|
|
|
if test_ui == "record":
|
2020-01-31 11:28:44 +00:00
|
|
|
screen_path = screens_test_path / "recorded"
|
2019-12-30 11:20:51 +00:00
|
|
|
else:
|
2020-05-19 08:00:37 +00:00
|
|
|
screen_path = screens_test_path / "actual"
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-31 11:28:44 +00:00
|
|
|
if not screens_test_path.exists():
|
|
|
|
screens_test_path.mkdir()
|
2020-01-09 14:25:45 +00:00
|
|
|
# remove previous files
|
|
|
|
shutil.rmtree(screen_path, ignore_errors=True)
|
|
|
|
screen_path.mkdir()
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
client.debug.start_recording(str(screen_path))
|
|
|
|
yield
|
2020-02-21 14:47:25 +00:00
|
|
|
finally:
|
2021-04-09 09:41:50 +00:00
|
|
|
# Wait for response to Initialize, which gives the emulator time to catch up
|
|
|
|
# and redraw the homescreen. Otherwise there's a race condition between that
|
|
|
|
# and stopping recording.
|
|
|
|
client.init_device()
|
2020-02-21 14:47:25 +00:00
|
|
|
client.debug.stop_recording()
|
2020-01-31 11:28:44 +00:00
|
|
|
|
2022-05-25 12:54:03 +00:00
|
|
|
if test_ui:
|
|
|
|
PROCESSED.add(test_name)
|
|
|
|
if get_last_call_test_result(request) is False:
|
|
|
|
FAILED_TESTS.add(test_name)
|
|
|
|
|
|
|
|
if test_ui == "record":
|
|
|
|
_process_recorded(screen_path, test_name)
|
|
|
|
else:
|
|
|
|
_process_tested(screens_test_path, test_name)
|
2021-04-09 09:41:50 +00:00
|
|
|
|
2020-01-31 11:28:44 +00:00
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def list_missing() -> Set[str]:
|
2022-02-08 15:37:34 +00:00
|
|
|
# Only listing the ones for the current model
|
|
|
|
relevant_cases = {
|
|
|
|
case for case in FILE_HASHES.keys() if case.startswith(f"{MODEL}_")
|
|
|
|
}
|
|
|
|
return relevant_cases - PROCESSED
|
2020-02-17 14:38:26 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def read_fixtures() -> None:
|
2020-01-31 11:28:44 +00:00
|
|
|
if not HASH_FILE.exists():
|
|
|
|
raise ValueError("File fixtures.json not found.")
|
2020-09-29 17:53:28 +00:00
|
|
|
global FILE_HASHES
|
|
|
|
FILE_HASHES = json.loads(HASH_FILE.read_text())
|
2020-01-31 11:28:44 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def write_fixtures(remove_missing: bool) -> None:
|
2020-09-29 17:53:28 +00:00
|
|
|
HASH_FILE.write_text(_get_fixtures_content(FILE_HASHES, remove_missing))
|
|
|
|
|
|
|
|
|
2022-05-25 12:54:03 +00:00
|
|
|
def write_fixtures_suggestion(
|
|
|
|
remove_missing: bool, only_passed_tests: bool = False
|
|
|
|
) -> None:
|
|
|
|
SUGGESTION_FILE.write_text(
|
|
|
|
_get_fixtures_content(ACTUAL_HASHES, remove_missing, only_passed_tests)
|
|
|
|
)
|
2020-09-29 17:53:28 +00:00
|
|
|
|
|
|
|
|
2023-01-02 16:57:07 +00:00
|
|
|
def update_fixtures_with_diff() -> int:
|
|
|
|
"""Update the fixtures.json file with the actual hashes from the diff file.
|
|
|
|
|
|
|
|
Use-case is that the UI test run will generate the differing hashes,
|
|
|
|
and with this function we can simply update the fixtures.json file
|
|
|
|
without having to call the UI tests again in recording mode.
|
|
|
|
"""
|
|
|
|
if not FIXTURES_DIFF.exists():
|
|
|
|
raise ValueError(f"File {FIXTURES_DIFF} not found.")
|
|
|
|
|
|
|
|
read_fixtures()
|
|
|
|
|
|
|
|
changes_amount = 0
|
|
|
|
with open(FIXTURES_DIFF) as f:
|
|
|
|
for line in f:
|
|
|
|
changes_amount += 1
|
|
|
|
diff = json.loads(line)
|
|
|
|
FILE_HASHES[diff["test_name"]] = diff["actual_hash"]
|
|
|
|
|
|
|
|
write_fixtures(remove_missing=False)
|
|
|
|
|
|
|
|
# Returning the amount of updated hashes
|
|
|
|
return changes_amount
|
|
|
|
|
|
|
|
|
2022-05-25 12:54:03 +00:00
|
|
|
def _get_fixtures_content(
|
|
|
|
fixtures: Dict[str, str], remove_missing: bool, only_passed_tests: bool = False
|
|
|
|
) -> str:
|
2020-02-17 14:38:26 +00:00
|
|
|
if remove_missing:
|
2022-02-08 15:37:34 +00:00
|
|
|
# Not removing the ones for different model
|
|
|
|
nonrelevant_cases = {
|
|
|
|
f: h for f, h in FILE_HASHES.items() if not f.startswith(f"{MODEL}_")
|
|
|
|
}
|
2022-05-25 12:54:03 +00:00
|
|
|
|
|
|
|
filtered_processed_tests = PROCESSED
|
|
|
|
if only_passed_tests:
|
|
|
|
filtered_processed_tests = PROCESSED - FAILED_TESTS
|
|
|
|
|
|
|
|
processed_fixtures = {i: fixtures[i] for i in filtered_processed_tests}
|
2022-02-08 15:37:34 +00:00
|
|
|
fixtures = {**nonrelevant_cases, **processed_fixtures}
|
2020-02-17 14:38:26 +00:00
|
|
|
else:
|
2020-09-29 17:53:28 +00:00
|
|
|
fixtures = fixtures
|
2020-02-17 14:38:26 +00:00
|
|
|
|
2020-09-29 17:53:28 +00:00
|
|
|
return json.dumps(fixtures, indent="", sort_keys=True) + "\n"
|
2021-06-21 11:55:01 +00:00
|
|
|
|
|
|
|
|
2022-01-28 18:26:03 +00:00
|
|
|
def main() -> None:
|
2021-06-21 11:55:01 +00:00
|
|
|
read_fixtures()
|
2021-09-02 11:04:16 +00:00
|
|
|
for record in SCREENS_DIR.iterdir():
|
2021-06-21 11:55:01 +00:00
|
|
|
if not (record / "actual").exists():
|
|
|
|
continue
|
|
|
|
|
|
|
|
try:
|
|
|
|
_process_tested(record, record.name)
|
|
|
|
print("PASSED:", record.name)
|
|
|
|
except Failed:
|
|
|
|
print("FAILED:", record.name)
|
|
|
|
|
2022-08-15 16:30:45 +00:00
|
|
|
testreport.generate_reports()
|