1
0
mirror of https://github.com/trezor/trezor-firmware.git synced 2025-03-20 18:16:05 +00:00

tests: introduce --ui-check-missing to test/remove missing tests

This commit is contained in:
Tomas Susanka 2020-02-17 14:38:26 +00:00 committed by matejcik
parent b5446bd125
commit 577daf09fe
4 changed files with 47 additions and 5 deletions

View File

@ -93,10 +93,10 @@ test_emu_click: ## run click tests
$(EMU_TEST) $(PYTEST) $(TESTPATH)/click_tests $(TESTOPTS)
test_emu_ui: ## run ui integration tests
$(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests --ui=test -m "not skip_ui" $(TESTOPTS)
$(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests --ui=test --ui-check-missing -m "not skip_ui" $(TESTOPTS)
test_emu_ui_record: ## record and hash screens for ui integration tests
$(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests --ui=record -m "not skip_ui" $(TESTOPTS)
$(EMU_TEST) $(PYTEST) $(TESTPATH)/device_tests --ui=record --ui-check-missing -m "not skip_ui" $(TESTOPTS)
pylint: ## run pylint on application sources and tests
pylint -E $(shell find src tests -name *.py)

View File

@ -35,6 +35,12 @@ Now you can run the test suite with `pytest` from the root directory:
pytest tests/device_tests --ui=test
```
If you wish to check that all test cases in `fixtures.json` were used set the `--ui-check-missing` flag. Of course this is meaningful only if you run the tests on the whole `device_tests` folder.
```sh
pytest tests/device_tests --ui=test --ui-check-missing
```
You can also skip tests marked as `skip_ui`.
```sh
@ -43,6 +49,13 @@ pytest tests/device_tests --ui=test -m "not skip_ui"
# Updating Fixtures ("Recording")
Short version:
```sh
pipenv run make -C core test_emu_ui_record
```
Long version:
The `--ui` pytest argument has two options:
- **record**: Create screenshots and calculate theirs hash for each test.
@ -55,6 +68,12 @@ to proceed is to run `--ui=test` at first, see what tests fail (see the Reports
decide if those changes are the ones you expected and then finally run the `--ui=record`
and commit the new hashes.
Also here we provide an option to check the `fixtures.json` file. Use `--ui-check-missing` flag again to make sure there are no extra fixtures in the file:
```sh
pytest tests/device_tests --ui=record --ui-check-missing
```
## Reports
Each `--ui=test` creates a clear report which tests passed and which failed.

View File

@ -152,9 +152,11 @@ def pytest_sessionstart(session):
def pytest_sessionfinish(session, exitstatus):
if session.config.getoption("ui") == "test":
if session.config.getoption("ui_check_missing"):
ui_tests.check_missing()
report.index()
if session.config.getoption("ui") == "record":
ui_tests.write_fixtures()
ui_tests.write_fixtures(session.config.getoption("ui_check_missing"))
def pytest_terminal_summary(terminalreporter, exitstatus, config):
@ -170,6 +172,13 @@ def pytest_addoption(parser):
default="",
help="Enable UI intergration tests: 'record' or 'test'",
)
parser.addoption(
"--ui-check-missing",
action="store_true",
default=False,
help="Check UI fixtures are containing the appropriate test cases (fails on `test`,"
"deletes old ones on `record`).",
)
def pytest_configure(config):

View File

@ -12,6 +12,7 @@ from . import report
UI_TESTS_DIR = Path(__file__).parent.resolve()
HASH_FILE = UI_TESTS_DIR / "fixtures.json"
HASHES = {}
PROCESSED = set()
def get_test_name(node_id):
@ -30,6 +31,7 @@ def _process_recorded(screen_path, test_name):
# calculate hash
HASHES[test_name] = _hash_files(screen_path)
_rename_records(screen_path)
PROCESSED.add(test_name)
def _rename_records(screen_path):
@ -51,6 +53,7 @@ def _process_tested(fixture_test_path, test_name):
expected_hash = HASHES.get(test_name)
if expected_hash is None:
raise ValueError("Hash for '%s' not found in fixtures.json" % test_name)
PROCESSED.add(test_name)
actual_path = fixture_test_path / "actual"
actual_hash = _hash_files(actual_path)
@ -103,6 +106,12 @@ def screen_recording(client, request):
raise ValueError("Invalid 'ui' option.")
def check_missing():
missing = set(HASHES.keys()) - PROCESSED
if missing:
pytest.fail("Fixtures.json contains tests that are not tested: %s" % missing)
def read_fixtures():
if not HASH_FILE.exists():
raise ValueError("File fixtures.json not found.")
@ -110,5 +119,10 @@ def read_fixtures():
HASHES = json.loads(HASH_FILE.read_text())
def write_fixtures():
HASH_FILE.write_text(json.dumps(HASHES, indent="", sort_keys=True) + "\n")
def write_fixtures(remove_missing: bool):
if remove_missing:
write = {i: HASHES[i] for i in PROCESSED}
else:
write = HASHES
HASH_FILE.write_text(json.dumps(write, indent="", sort_keys=True) + "\n")