2019-12-30 11:20:51 +00:00
|
|
|
import hashlib
|
2020-01-31 11:28:44 +00:00
|
|
|
import json
|
2019-12-30 11:20:51 +00:00
|
|
|
import re
|
|
|
|
import shutil
|
|
|
|
from contextlib import contextmanager
|
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
|
2020-03-03 14:50:57 +00:00
|
|
|
from .reporting import report_test
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-09 14:25:45 +00:00
|
|
|
UI_TESTS_DIR = Path(__file__).parent.resolve()
|
2020-01-31 11:28:44 +00:00
|
|
|
HASH_FILE = UI_TESTS_DIR / "fixtures.json"
|
|
|
|
HASHES = {}
|
2020-02-17 14:38:26 +00:00
|
|
|
PROCESSED = set()
|
2020-01-09 14:25:45 +00:00
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-09 11:29:45 +00:00
|
|
|
def get_test_name(node_id):
|
2019-12-30 11:20:51 +00:00
|
|
|
# Test item name is usually function name, but when parametrization is used,
|
|
|
|
# parameters are also part of the name. Some functions have very long parameter
|
|
|
|
# names (tx hashes etc) that run out of maximum allowable filename length, so
|
|
|
|
# we limit the name to first 100 chars. This is not a problem with txhashes.
|
2020-01-09 11:29:45 +00:00
|
|
|
new_name = node_id.replace("tests/device_tests/", "")
|
|
|
|
# remove ::TestClass:: if present because it is usually the same as the test file name
|
|
|
|
new_name = re.sub(r"::.*?::", "-", new_name)
|
|
|
|
new_name = new_name.replace("/", "-") # in case there is "/"
|
|
|
|
return new_name[:100]
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
2020-01-31 11:28:44 +00:00
|
|
|
def _process_recorded(screen_path, test_name):
|
|
|
|
# calculate hash
|
|
|
|
HASHES[test_name] = _hash_files(screen_path)
|
2019-12-30 11:20:51 +00:00
|
|
|
_rename_records(screen_path)
|
2020-02-17 14:38:26 +00:00
|
|
|
PROCESSED.add(test_name)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
def _rename_records(screen_path):
|
|
|
|
# rename screenshots
|
|
|
|
for index, record in enumerate(sorted(screen_path.iterdir())):
|
2020-01-06 14:44:30 +00:00
|
|
|
record.replace(screen_path / f"{index:08}.png")
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
2020-01-06 14:44:30 +00:00
|
|
|
def _hash_files(path):
|
|
|
|
files = path.iterdir()
|
2019-12-30 11:20:51 +00:00
|
|
|
hasher = hashlib.sha256()
|
|
|
|
for file in sorted(files):
|
2020-01-06 14:44:30 +00:00
|
|
|
hasher.update(file.read_bytes())
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
return hasher.digest().hex()
|
|
|
|
|
|
|
|
|
|
|
|
def _process_tested(fixture_test_path, test_name):
|
2020-01-31 11:28:44 +00:00
|
|
|
expected_hash = HASHES.get(test_name)
|
|
|
|
if expected_hash is None:
|
|
|
|
raise ValueError("Hash for '%s' not found in fixtures.json" % test_name)
|
2020-02-17 14:38:26 +00:00
|
|
|
PROCESSED.add(test_name)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
actual_path = fixture_test_path / "actual"
|
2020-01-06 14:44:30 +00:00
|
|
|
actual_hash = _hash_files(actual_path)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-06 14:44:30 +00:00
|
|
|
_rename_records(actual_path)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
if actual_hash != expected_hash:
|
2020-03-03 14:50:57 +00:00
|
|
|
file_path = report_test.failed(
|
2019-12-31 10:28:39 +00:00
|
|
|
fixture_test_path, test_name, actual_hash, expected_hash
|
|
|
|
)
|
|
|
|
|
2019-12-30 11:20:51 +00:00
|
|
|
pytest.fail(
|
2019-12-31 10:28:39 +00:00
|
|
|
"Hash of {} differs.\nExpected: {}\nActual: {}\nDiff file: {}".format(
|
2020-01-07 14:42:55 +00:00
|
|
|
test_name, expected_hash, actual_hash, file_path
|
2019-12-30 11:20:51 +00:00
|
|
|
)
|
|
|
|
)
|
|
|
|
else:
|
2020-03-03 14:50:57 +00:00
|
|
|
report_test.passed(fixture_test_path, test_name, actual_hash)
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
|
|
|
|
@contextmanager
|
|
|
|
def screen_recording(client, request):
|
2020-01-07 09:13:08 +00:00
|
|
|
test_ui = request.config.getoption("ui")
|
2020-01-09 11:29:45 +00:00
|
|
|
test_name = get_test_name(request.node.nodeid)
|
2020-01-31 11:28:44 +00:00
|
|
|
screens_test_path = UI_TESTS_DIR / "screens" / test_name
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-07 09:13:08 +00:00
|
|
|
if test_ui == "record":
|
2020-01-31 11:28:44 +00:00
|
|
|
screen_path = screens_test_path / "recorded"
|
2020-01-07 09:13:08 +00:00
|
|
|
elif test_ui == "test":
|
2020-01-31 11:28:44 +00:00
|
|
|
screen_path = screens_test_path / "actual"
|
2019-12-30 11:20:51 +00:00
|
|
|
else:
|
2020-01-07 09:13:08 +00:00
|
|
|
raise ValueError("Invalid 'ui' option.")
|
2019-12-30 11:20:51 +00:00
|
|
|
|
2020-01-31 11:28:44 +00:00
|
|
|
if not screens_test_path.exists():
|
|
|
|
screens_test_path.mkdir()
|
2020-01-09 14:25:45 +00:00
|
|
|
# remove previous files
|
|
|
|
shutil.rmtree(screen_path, ignore_errors=True)
|
|
|
|
screen_path.mkdir()
|
2019-12-30 11:20:51 +00:00
|
|
|
|
|
|
|
try:
|
|
|
|
client.debug.start_recording(str(screen_path))
|
|
|
|
yield
|
2020-01-07 09:13:08 +00:00
|
|
|
if test_ui == "record":
|
2020-01-31 11:28:44 +00:00
|
|
|
_process_recorded(screen_path, test_name)
|
2020-01-07 09:13:08 +00:00
|
|
|
elif test_ui == "test":
|
2020-01-31 11:28:44 +00:00
|
|
|
_process_tested(screens_test_path, test_name)
|
2019-12-30 11:20:51 +00:00
|
|
|
else:
|
2020-01-07 09:13:08 +00:00
|
|
|
raise ValueError("Invalid 'ui' option.")
|
2020-02-21 14:47:25 +00:00
|
|
|
finally:
|
|
|
|
client.debug.stop_recording()
|
2020-01-31 11:28:44 +00:00
|
|
|
|
|
|
|
|
2020-02-21 14:47:25 +00:00
|
|
|
def list_missing():
|
|
|
|
return set(HASHES.keys()) - PROCESSED
|
2020-02-17 14:38:26 +00:00
|
|
|
|
|
|
|
|
2020-01-31 11:28:44 +00:00
|
|
|
def read_fixtures():
|
|
|
|
if not HASH_FILE.exists():
|
|
|
|
raise ValueError("File fixtures.json not found.")
|
|
|
|
global HASHES
|
|
|
|
HASHES = json.loads(HASH_FILE.read_text())
|
|
|
|
|
|
|
|
|
2020-02-17 14:38:26 +00:00
|
|
|
def write_fixtures(remove_missing: bool):
|
|
|
|
if remove_missing:
|
|
|
|
write = {i: HASHES[i] for i in PROCESSED}
|
|
|
|
else:
|
|
|
|
write = HASHES
|
|
|
|
|
|
|
|
HASH_FILE.write_text(json.dumps(write, indent="", sort_keys=True) + "\n")
|