1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2024-11-22 07:28:10 +00:00

chore(tests): small naming changes and docs

[no changelog]
This commit is contained in:
grdddj 2023-02-03 16:00:15 +01:00 committed by Jiří Musil
parent 39a8e6bb50
commit eb195d038b
5 changed files with 24 additions and 23 deletions

View File

@ -10,7 +10,7 @@ from tests.ui_tests.common import TestResult, _hash_files, get_fixtures # isort
FIXTURES = get_fixtures()
for result in TestResult.recent_tests():
for result in TestResult.recent_results():
if not result.passed or result.expected_hash != result.actual_hash:
print("WARNING: skipping failed test", result.test.id)
continue

View File

@ -2,4 +2,3 @@
*.html
*.zip
fixtures.suggestion.json
fixtures.json.diff

View File

@ -90,7 +90,9 @@ def setup(main_runner: bool) -> None:
def list_missing() -> set[str]:
# Only listing the ones for the current model
_, missing = common.prepare_fixtures(TestResult.recent_tests(), remove_missing=True)
_, missing = common.prepare_fixtures(
TestResult.recent_results(), remove_missing=True
)
return {test.id for test in missing}
@ -99,7 +101,7 @@ def update_fixtures(remove_missing: bool = False) -> int:
Used in --ui=record and in update_fixtures.py
"""
results = list(TestResult.recent_tests())
results = list(TestResult.recent_results())
for result in results:
result.store_recorded()
@ -162,7 +164,7 @@ def sessionfinish(
testreport.generate_reports()
if test_ui == "test" and check_missing and list_missing():
common.write_fixtures(
TestResult.recent_tests(),
TestResult.recent_results(),
remove_missing=True,
dest=FIXTURES_SUGGESTION_FILE,
)
@ -175,7 +177,7 @@ def sessionfinish(
def main() -> None:
for result in TestResult.recent_tests():
for result in TestResult.recent_results():
try:
_process_tested(result)
print("PASSED:", result.test.id)

View File

@ -57,7 +57,7 @@ def prepare_fixtures(
group = grouped_tests.setdefault(idx, {})
group[result.test.fixtures_name] = result.actual_hash
missing_tests = set()
missing_tests: set[TestCase] = set()
# merge with previous fixtures
fixtures = deepcopy(get_fixtures())
@ -99,16 +99,10 @@ def screens_and_hashes(screen_path: Path) -> tuple[list[Path], list[str]]:
if not screen_path.exists():
return [], []
hashes = []
paths = []
paths: list[Path] = []
hashes: list[str] = []
for file in sorted(screen_path.iterdir()):
paths.append(file)
if len(file.stem) == 32:
try:
hashes.append(bytes.fromhex(file.stem))
continue
except ValueError:
pass
hashes.append(_get_image_hash(file))
return paths, hashes
@ -175,6 +169,12 @@ def screens_diff(
diff = SequenceMatcher(
None, expected_hashes, actual_hashes, autojunk=False
).get_opcodes()
# Example diff result:
# [('equal', 0, 1, 0, 1), ('replace', 1, 2, 1, 3), ('equal', 2, 6, 3, 7)]
# For situation when:
# - first screen is the same for both
# - second screen has changes and there is new third screen
# - rest is the same
for _tag, i1, i2, j1, j2 in diff:
# tag is one of "replace", "delete", "equal", "insert"
# i1, i2 and j1, j2 are slice indexes for expected/actual respectively
@ -283,7 +283,7 @@ class TestResult:
)
@classmethod
def recent_tests(cls) -> t.Iterator[Self]:
def recent_results(cls) -> t.Iterator[Self]:
for testdir in sorted(SCREENS_DIR.iterdir()):
meta = testdir / "metadata.json"
if not meta.exists():

View File

@ -94,7 +94,7 @@ def index() -> Path:
new_tests = list((TESTREPORT_PATH / "new").iterdir())
actual_hashes = {
result.test.id: result.actual_hash for result in TestResult.recent_tests()
result.test.id: result.actual_hash for result in TestResult.recent_results()
}
title = "UI Test report " + datetime.now().strftime("%Y-%m-%d %H:%M:%S")
@ -143,8 +143,8 @@ def all_screens() -> Path:
Shows all test-cases at one place.
"""
recent_tests = list(TestResult.recent_tests())
model = recent_tests[0].test.model if recent_tests else None
recent_results = list(TestResult.recent_results())
model = recent_results[0].test.model if recent_results else None
title = "All test cases"
doc = document(title=title, model=model)
@ -154,7 +154,7 @@ def all_screens() -> Path:
count = 0
result_count = 0
for result in recent_tests:
for result in recent_results:
result_count += 1
h2(result.test.id, id=result.test.id)
for image in result.images:
@ -170,11 +170,11 @@ def all_screens() -> Path:
def all_unique_screens() -> Path:
"""Generate an HTML file with all the unique screens from the current test run."""
results = TestResult.recent_tests()
recent_results = TestResult.recent_results()
result_count = 0
model = None
test_cases = defaultdict(list)
for result in results:
test_cases: dict[str, list[str]] = defaultdict(list)
for result in recent_results:
result_count += 1
model = result.test.model
for image in result.images: