tests: remove xfail

pull/179/head
Tomas Susanka 5 years ago committed by Pavol Rusnak
parent d190d906de
commit 562206d220
No known key found for this signature in database
GPG Key ID: 91F3B339B9A02A3D

@ -2,7 +2,7 @@
## Testing with python-trezor
Apart from the internal tests, Trezor core has a suite of integration tests in the [`python-trezor`](https://github.com/trezor/python-trezor) library. There are several ways to use that.
Apart from the internal tests, Trezor core has a suite of integration tests in the `python` subdirectory. There are several ways to use that.
### 1. Running the suite with pipenv
@ -26,28 +26,20 @@ pipenv run make test_emu
### 2. Developing new tests
You will need a separate checkout of `python-trezor`. It's probably a good idea to do this outside the `trezor-core` directory:
```sh
git clone https://github.com/trezor/python-trezor
```
Prepare a virtual environment with all the requirements, and switch into it. Again, it's easiest to do this with `pipenv`:
```sh
cd python-trezor
pipenv install -r requirements-dev.txt
pipenv install -e .
pipenv install
pipenv shell
```
Alternately, if you have an existing virtualenv, you can install python-trezor in "develop" mode:
Alternately, if you have an existing virtualenv, you can install `python` in "develop" mode:
```sh
python setup.py develop
```
If you want to test against the emulator, run it in a separate terminal from the `trezor-core` checkout directory:
If you want to test against the emulator, run it in a separate terminal from the `core` subdirectory:
```sh
PYOPT=0 ./emu.sh
@ -61,7 +53,7 @@ export TREZOR_PATH="udp:127.0.0.1:21324"
(You can find other devices with `trezorctl list`.)
Now you can run the test suite, either from `python-trezor` or `trezor-core` root directory:
Now you can run the test suite, either from `python` or `core` root directory:
```sh
pytest
@ -83,22 +75,3 @@ pytest -m stellar # only runs tests marked with @pytest.mark.stellar
```
If you want to see debugging information and protocol dumps, run with `-v`.
### 3. Submitting tests for new features
When you're happy with your tests, follow these steps:
1. Mark each of your tests with the name of your feature. E.g., `@pytest.mark.ultracoin2000`.
2. Also mark each of your tests with `@pytest.mark.xfail`. That means that the test is expected to fail.
If you want to run that test as usual, run `pytest --runxfail`
3. Submit a PR to `python-trezor`, containing these tests.
4. Edit the file `trezor-core/pytest.ini`, and add your marker to the `run_xfail` item:
``` ini
run_xfail = lisk nem ultracoin2000
```
This will cause your PR to re-enable the `xfail`ed tests. That way we will see whether your feature actually implements what it claims.
5. Submit a PR to `trezor-core`.
6. Optionally, if you like to be extra nice: after both your PRs are accepted, submit a new one to `python-trezor` that removes the `xfail` markers, and one to `trezor-core` that removes the `run_xfail` entry.

@ -1,3 +1,2 @@
[pytest]
addopts = --pyargs trezorlib.tests.device_tests
xfail_strict = true

@ -99,12 +99,6 @@ def pytest_configure(config):
def pytest_addoption(parser):
parser.addini(
"run_xfail",
"List of markers that will run even tests that are marked as xfail",
"args",
[],
)
parser.addoption(
"--interactive",
action="store_true",
@ -119,7 +113,6 @@ def pytest_runtest_setup(item):
Performs custom processing, mainly useful for trezor CI testing:
* 'skip_t2' tests are skipped on T2 and 'skip_t1' tests are skipped on T1.
* no test should have both skips at the same time
* allows to 'runxfail' tests specified by 'run_xfail' in pytest.ini
"""
if item.get_closest_marker("skip_t1") and item.get_closest_marker("skip_t2"):
pytest.fail("Don't skip tests for both trezors!")
@ -128,13 +121,3 @@ def pytest_runtest_setup(item):
pytest.skip("Test excluded on Trezor T")
if item.get_closest_marker("skip_t1") and TREZOR_VERSION == 1:
pytest.skip("Test excluded on Trezor 1")
xfail = item.get_closest_marker("xfail")
runxfail_markers = item.config.getini("run_xfail")
run_xfail = any(item.get_closest_marker(marker) for marker in runxfail_markers)
if xfail and run_xfail:
# Deep hack: pytest's private _evalxfail helper determines whether the test should xfail or not.
# The helper caches its result even before this hook runs.
# Here we force-set the result to False, meaning "test does NOT xfail, run as normal"
# IOW, this is basically per-item "--runxfail"
item._evalxfail.result = False

@ -22,7 +22,6 @@ from trezorlib.tools import parse_path
from .common import TrezorTest
@pytest.mark.xfail
@pytest.mark.ontology
@pytest.mark.skip_t1
class TestMsgOntologyGetaddress(TrezorTest):

@ -25,7 +25,6 @@ from trezorlib.tools import parse_path
from .common import TrezorTest
@pytest.mark.xfail
@pytest.mark.ontology
@pytest.mark.skip_t1
class TestMsgOntologySignOntIdAddAttributes(TrezorTest):

@ -25,7 +25,6 @@ from trezorlib.tools import parse_path
from .common import TrezorTest
@pytest.mark.xfail
@pytest.mark.ontology
@pytest.mark.skip_t1
class TestMsgOntologySignOntIdRegister(TrezorTest):

@ -22,7 +22,6 @@ from trezorlib.tools import parse_path
from .common import TrezorTest
@pytest.mark.xfail
@pytest.mark.ontology
@pytest.mark.skip_t1
class TestMsgOntologySigntx(TrezorTest):

@ -22,7 +22,6 @@ from trezorlib.tools import parse_path
from .common import TrezorTest
@pytest.mark.xfail
@pytest.mark.ontology
@pytest.mark.skip_t1
class TestMsgOntologySignWithdraw(TrezorTest):

@ -30,7 +30,6 @@ TXHASH_339c3e = bytes.fromhex(
)
@pytest.mark.xfail
@pytest.mark.komodo
class TestMsgSigntxKomodo(TrezorTest):
def test_one_one_fee_sapling(self):

@ -27,6 +27,3 @@ not_skip=__init__.py
forced_separate = apps
known_standard_library = micropython,ubinascii,ustruct,uctypes,utime,utimeq,trezorio,trezorui,trezorutils,trezorconfig
known_third_party = curve25519,ecdsa,hypothesis
[tool:pytest]
xfail_strict = true

Loading…
Cancel
Save