diff --git a/.coveragerc b/.coveragerc index cbc6c5c5084..ed1fb97596b 100644 --- a/.coveragerc +++ b/.coveragerc @@ -16,3 +16,11 @@ source = src/ */lib/python*/site-packages/ */pypy*/site-packages/ *\Lib\site-packages\ + +[report] +skip_covered = True +show_missing = True +exclude_lines = + \#\s*pragma: no cover + ^\s*raise NotImplementedError\b + ^\s*return NotImplemented\b diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 4d127d3c587..e9a970ca7f5 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -26,7 +26,7 @@ repos: hooks: - id: flake8 language_version: python3 - additional_dependencies: [flake8-typing-imports] + additional_dependencies: [flake8-typing-imports==1.3.0] - repo: https://github.com/asottile/reorder_python_imports rev: v1.4.0 hooks: diff --git a/.travis.yml b/.travis.yml index 5de40f3a454..c1f7ad357a6 100644 --- a/.travis.yml +++ b/.travis.yml @@ -43,7 +43,8 @@ jobs: python: 'pypy3' - env: TOXENV=py35-xdist - python: '3.5' + dist: trusty + python: '3.5.0' # Coverage for: # - pytester's LsofFdLeakChecker diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 390fe84cac1..3f9637248fd 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,6 +18,173 @@ with advance notice in the **Deprecations** section of releases. .. towncrier release notes start +pytest 5.1.1 (2019-08-20) +========================= + +Bug Fixes +--------- + +- `#5751 `_: Fixed ``TypeError`` when importing pytest on Python 3.5.0 and 3.5.1. + + +pytest 5.1.0 (2019-08-15) +========================= + +Removals +-------- + +- `#5180 `_: As per our policy, the following features have been deprecated in the 4.X series and are now + removed: + + * ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead. + + * ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument. + + * ``message`` parameter of ``pytest.raises``. + + * ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only + syntax. This might change the exception message from previous versions, but they still raise + ``TypeError`` on unknown keyword arguments as before. + + * ``pytest.config`` global variable. + + * ``tmpdir_factory.ensuretemp`` method. + + * ``pytest_logwarning`` hook. + + * ``RemovedInPytest4Warning`` warning type. + + * ``request`` is now a reserved name for fixtures. + + + For more information consult + `Deprecations and Removals `__ in the docs. + + +- `#5565 `_: Removed unused support code for `unittest2 `__. + + The ``unittest2`` backport module is no longer + necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem + to be used: after removed, all tests still pass unchanged. + + Although our policy is to introduce a deprecation period before removing any features or support + for third party libraries, because this code is apparently not used + at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to + remove it in this release. + + If you experience a regression because of this, please + `file an issue `__. + + +- `#5615 `_: ``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument. + + This was supported for Python 2 where it was tempting to use ``"message"`` + instead of ``u"message"``. + + Python 3 code is unlikely to pass ``bytes`` to these functions. If you do, + please decode it to an ``str`` beforehand. + + + +Features +-------- + +- `#5564 `_: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``. + + +- `#5576 `_: New `NUMBER `__ + option for doctests to ignore irrelevant differences in floating-point numbers. + Inspired by Sébastien Boisgérault's `numtest `__ + extension for doctest. + + + +Improvements +------------ + +- `#5471 `_: JUnit XML now includes a timestamp and hostname in the testsuite tag. + + +- `#5707 `_: Time taken to run the test suite now includes a human-readable representation when it takes over + 60 seconds, for example:: + + ===== 2 failed in 102.70s (0:01:42) ===== + + + +Bug Fixes +--------- + +- `#4344 `_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only. + + +- `#5115 `_: Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest. + + +- `#5477 `_: The XML file produced by ``--junitxml`` now correctly contain a ```` root element. + + +- `#5523 `_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+. + + +- `#5524 `_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only, + which could lead to pytest crashing when executed a second time with the ``--basetemp`` option. + + +- `#5537 `_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the + standard library on Python 3.8+. + + +- `#5578 `_: Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc) + so they provide better error messages when users meant to use marks (for example ``@pytest.xfail`` + instead of ``@pytest.mark.xfail``). + + +- `#5606 `_: Fixed internal error when test functions were patched with objects that cannot be compared + for truth values against others, like ``numpy`` arrays. + + +- `#5634 `_: ``pytest.exit`` is now correctly handled in ``unittest`` cases. + This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly. + + +- `#5650 `_: Improved output when parsing an ini configuration file fails. + + +- `#5701 `_: Fix collection of ``staticmethod`` objects defined with ``functools.partial``. + + +- `#5734 `_: Skip async generator test functions, and update the warning message to refer to ``async def`` functions. + + + +Improved Documentation +---------------------- + +- `#5669 `_: Add docstring for ``Testdir.copy_example``. + + + +Trivial/Internal Changes +------------------------ + +- `#5095 `_: XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite + to avoid future regressions. + + +- `#5516 `_: Cache node splitting function which can improve collection performance in very large test suites. + + +- `#5603 `_: Simplified internal ``SafeRepr`` class and removed some dead code. + + +- `#5664 `_: When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``, + the ``test_xfail_handling`` test no longer fails. + + +- `#5684 `_: Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.). + + pytest 5.0.1 (2019-07-04) ========================= diff --git a/changelog/4344.bugfix.rst b/changelog/4344.bugfix.rst deleted file mode 100644 index 644a6f03058..00000000000 --- a/changelog/4344.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only. diff --git a/changelog/5095.trivial.rst b/changelog/5095.trivial.rst deleted file mode 100644 index 2256cf9f4cd..00000000000 --- a/changelog/5095.trivial.rst +++ /dev/null @@ -1,2 +0,0 @@ -XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite -to avoid future regressions. diff --git a/changelog/5115.bugfix.rst b/changelog/5115.bugfix.rst deleted file mode 100644 index af75499a395..00000000000 --- a/changelog/5115.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest. diff --git a/changelog/5180.removal.rst b/changelog/5180.removal.rst deleted file mode 100644 index 1174a7cbaa2..00000000000 --- a/changelog/5180.removal.rst +++ /dev/null @@ -1,26 +0,0 @@ -As per our policy, the following features have been deprecated in the 4.X series and are now -removed: - -* ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead. - -* ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument. - -* ``message`` parameter of ``pytest.raises``. - -* ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only - syntax. This might change the exception message from previous versions, but they still raise - ``TypeError`` on unknown keyword arguments as before. - -* ``pytest.config`` global variable. - -* ``tmpdir_factory.ensuretemp`` method. - -* ``pytest_logwarning`` hook. - -* ``RemovedInPytest4Warning`` warning type. - -* ``request`` is now a reserved name for fixtures. - - -For more information consult -`Deprecations and Removals `__ in the docs. diff --git a/changelog/5471.improvement.rst b/changelog/5471.improvement.rst deleted file mode 100644 index 154b64ea763..00000000000 --- a/changelog/5471.improvement.rst +++ /dev/null @@ -1 +0,0 @@ -JUnit XML now includes a timestamp and hostname in the testsuite tag. diff --git a/changelog/5477.bugfix.rst b/changelog/5477.bugfix.rst deleted file mode 100644 index c9c9386e996..00000000000 --- a/changelog/5477.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -The XML file produced by ``--junitxml`` now correctly contain a ```` root element. diff --git a/changelog/5516.trivial.rst b/changelog/5516.trivial.rst deleted file mode 100644 index 2f6b4e35ee6..00000000000 --- a/changelog/5516.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Cache node splitting function which can improve collection performance in very large test suites. diff --git a/changelog/5523.bugfix.rst b/changelog/5523.bugfix.rst deleted file mode 100644 index 5155b92b156..00000000000 --- a/changelog/5523.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+. diff --git a/changelog/5524.bugfix.rst b/changelog/5524.bugfix.rst deleted file mode 100644 index 96ebbd43e09..00000000000 --- a/changelog/5524.bugfix.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only, -which could lead to pytest crashing when executed a second time with the ``--basetemp`` option. diff --git a/changelog/5537.bugfix.rst b/changelog/5537.bugfix.rst deleted file mode 100644 index 0263e8cdf4c..00000000000 --- a/changelog/5537.bugfix.rst +++ /dev/null @@ -1,2 +0,0 @@ -Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the -standard library on Python 3.8+. diff --git a/changelog/5564.feature.rst b/changelog/5564.feature.rst deleted file mode 100644 index e2f365a3317..00000000000 --- a/changelog/5564.feature.rst +++ /dev/null @@ -1 +0,0 @@ -New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``. diff --git a/changelog/5565.removal.rst b/changelog/5565.removal.rst deleted file mode 100644 index 33de2b4e76c..00000000000 --- a/changelog/5565.removal.rst +++ /dev/null @@ -1,13 +0,0 @@ -Removed unused support code for `unittest2 `__. - -The ``unittest2`` backport module is no longer -necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem -to be used: after removed, all tests still pass unchanged. - -Although our policy is to introduce a deprecation period before removing any features or support -for third party libraries, because this code is apparently not used -at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to -remove it in this release. - -If you experience a regression because of this, please -`file an issue `__. diff --git a/changelog/5576.feature.rst b/changelog/5576.feature.rst deleted file mode 100644 index 267a28292a8..00000000000 --- a/changelog/5576.feature.rst +++ /dev/null @@ -1,4 +0,0 @@ -New `NUMBER `__ -option for doctests to ignore irrelevant differences in floating-point numbers. -Inspired by Sébastien Boisgérault's `numtest `__ -extension for doctest. diff --git a/changelog/5578.bugfix.rst b/changelog/5578.bugfix.rst deleted file mode 100644 index 5f6c3918535..00000000000 --- a/changelog/5578.bugfix.rst +++ /dev/null @@ -1,3 +0,0 @@ -Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc) -so they provide better error messages when users meant to use marks (for example ``@pytest.xfail`` -instead of ``@pytest.mark.xfail``). diff --git a/changelog/5603.trivial.rst b/changelog/5603.trivial.rst deleted file mode 100644 index 310e8856268..00000000000 --- a/changelog/5603.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Simplified internal ``SafeRepr`` class and removed some dead code. diff --git a/changelog/5606.bugfix.rst b/changelog/5606.bugfix.rst deleted file mode 100644 index 82332ba9972..00000000000 --- a/changelog/5606.bugfix.rst +++ /dev/null @@ -1,2 +0,0 @@ -Fixed internal error when test functions were patched with objects that cannot be compared -for truth values against others, like ``numpy`` arrays. diff --git a/changelog/5615.removal.rst b/changelog/5615.removal.rst deleted file mode 100644 index 6dd9aec1de5..00000000000 --- a/changelog/5615.removal.rst +++ /dev/null @@ -1,7 +0,0 @@ -``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument. - -This was supported for Python 2 where it was tempting to use ``"message"`` -instead of ``u"message"``. - -Python 3 code is unlikely to pass ``bytes`` to these functions. If you do, -please decode it to an ``str`` beforehand. diff --git a/changelog/5634.bugfix.rst b/changelog/5634.bugfix.rst deleted file mode 100644 index a2a282f938b..00000000000 --- a/changelog/5634.bugfix.rst +++ /dev/null @@ -1,2 +0,0 @@ -``pytest.exit`` is now correctly handled in ``unittest`` cases. -This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly. diff --git a/changelog/5650.bugfix.rst b/changelog/5650.bugfix.rst deleted file mode 100644 index db57a40b976..00000000000 --- a/changelog/5650.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Improved output when parsing an ini configuration file fails. diff --git a/changelog/5664.trivial.rst b/changelog/5664.trivial.rst deleted file mode 100644 index 3928454ef7e..00000000000 --- a/changelog/5664.trivial.rst +++ /dev/null @@ -1,2 +0,0 @@ -When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``, -the ``test_xfail_handling`` test no longer fails. diff --git a/changelog/5669.doc.rst b/changelog/5669.doc.rst deleted file mode 100644 index 0ec9626ae50..00000000000 --- a/changelog/5669.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Add docstring for ``Testdir.copy_example``. diff --git a/changelog/5684.trivial.rst b/changelog/5684.trivial.rst deleted file mode 100644 index 393fa32051d..00000000000 --- a/changelog/5684.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.). diff --git a/changelog/5701.bugfix.rst b/changelog/5701.bugfix.rst deleted file mode 100644 index b654e74479a..00000000000 --- a/changelog/5701.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix collection of ``staticmethod`` objects defined with ``functools.partial``. diff --git a/changelog/5707.improvement.rst b/changelog/5707.improvement.rst deleted file mode 100644 index 59176e1bbe2..00000000000 --- a/changelog/5707.improvement.rst +++ /dev/null @@ -1,4 +0,0 @@ -Time taken to run the test suite now includes a human-readable representation when it takes over -60 seconds, for example:: - - ===== 2 failed in 102.70s (0:01:42) ===== diff --git a/changelog/5734.bugfix.rst b/changelog/5734.bugfix.rst deleted file mode 100644 index dc20e6b523b..00000000000 --- a/changelog/5734.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Skip async generator test functions, and update the warning message to refer to ``async def`` functions. diff --git a/changelog/5782.bugfix.rst b/changelog/5782.bugfix.rst new file mode 100644 index 00000000000..e961d8fb51e --- /dev/null +++ b/changelog/5782.bugfix.rst @@ -0,0 +1 @@ +Fix decoding error when printing an error response from ``--pastebin``. diff --git a/codecov.yml b/codecov.yml new file mode 100644 index 00000000000..a0a308588e2 --- /dev/null +++ b/codecov.yml @@ -0,0 +1,7 @@ +coverage: + status: + project: true + patch: true + changes: true + +comment: off diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index d372c92fa73..84a41d2bfad 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,8 @@ Release announcements :maxdepth: 2 + release-5.1.1 + release-5.1.0 release-5.0.1 release-5.0.0 release-4.6.5 diff --git a/doc/en/announce/release-5.1.0.rst b/doc/en/announce/release-5.1.0.rst new file mode 100644 index 00000000000..73e956d77e3 --- /dev/null +++ b/doc/en/announce/release-5.1.0.rst @@ -0,0 +1,56 @@ +pytest-5.1.0 +======================================= + +The pytest team is proud to announce the 5.1.0 release! + +pytest is a mature Python testing tool with more than a 2000 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + https://docs.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + https://docs.pytest.org/en/latest/ + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Albert Tugushev +* Alexey Zankevich +* Anthony Sottile +* Bruno Oliveira +* Daniel Hahler +* David Röthlisberger +* Florian Bruhin +* Ilya Stepin +* Jon Dufresne +* Kaiqi +* Max R +* Miro Hrončok +* Oliver Bestwalter +* Ran Benita +* Ronny Pfannschmidt +* Samuel Searles-Bryant +* Semen Zhydenko +* Steffen Schroeder +* Thomas Grainger +* Tim Hoffmann +* William Woodall +* Wojtek Erbetowski +* Xixi Zhao +* Yash Todi +* boris +* dmitry.dygalo +* helloocc +* martbln +* mei-li + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/announce/release-5.1.1.rst b/doc/en/announce/release-5.1.1.rst new file mode 100644 index 00000000000..9cb731ebb98 --- /dev/null +++ b/doc/en/announce/release-5.1.1.rst @@ -0,0 +1,24 @@ +pytest-5.1.1 +======================================= + +pytest 5.1.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at https://docs.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Anthony Sottile +* Bruno Oliveira +* Daniel Hahler +* Florian Bruhin +* Hugo van Kemenade +* Ran Benita +* Ronny Pfannschmidt + + +Happy testing, +The pytest Development Team diff --git a/doc/en/assert.rst b/doc/en/assert.rst index bc7e75256a6..16de778981d 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -47,7 +47,7 @@ you will see the return value of the function call: E + where 3 = f() test_assert1.py:6: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================ 1 failed in 0.02s ============================= ``pytest`` has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary @@ -208,7 +208,7 @@ if you run this module: E Use -v to get the full diff test_assert2.py:6: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================ 1 failed in 0.02s ============================= Special comparisons are done for a number of cases: @@ -279,7 +279,7 @@ the conftest file: E vals: 1 != 2 test_foocompare.py:12: AssertionError - 1 failed in 0.12 seconds + 1 failed in 0.02s .. _assert-details: .. _`assert introspection`: diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index 4309a16eaa4..fc8b3f40f9a 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -160,7 +160,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a in python < 3.6 this is a pathlib2.Path - no tests ran in 0.12 seconds + no tests ran in 0.00s You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like: diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 84b3fa00940..384be5daf54 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -60,10 +60,10 @@ If you run this for the first time you will see two failures: @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): - > pytest.fail("bad luck") - E Failed: bad luck + > pytest.fail("bad luck") + E Failed: bad luck - test_50.py:6: Failed + test_50.py:7: Failed _______________________________ test_num[25] _______________________________ i = 25 @@ -71,11 +71,11 @@ If you run this for the first time you will see two failures: @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): - > pytest.fail("bad luck") - E Failed: bad luck + > pytest.fail("bad luck") + E Failed: bad luck - test_50.py:6: Failed - 2 failed, 48 passed in 0.12 seconds + test_50.py:7: Failed + 2 failed, 48 passed in 0.08s If you then run it with ``--lf``: @@ -99,10 +99,10 @@ If you then run it with ``--lf``: @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): - > pytest.fail("bad luck") - E Failed: bad luck + > pytest.fail("bad luck") + E Failed: bad luck - test_50.py:6: Failed + test_50.py:7: Failed _______________________________ test_num[25] _______________________________ i = 25 @@ -110,11 +110,11 @@ If you then run it with ``--lf``: @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): - > pytest.fail("bad luck") - E Failed: bad luck + > pytest.fail("bad luck") + E Failed: bad luck - test_50.py:6: Failed - ================= 2 failed, 48 deselected in 0.12 seconds ================== + test_50.py:7: Failed + ===================== 2 failed, 48 deselected in 0.02s ===================== You have run only the two failing tests from the last run, while the 48 passing tests have not been run ("deselected"). @@ -143,10 +143,10 @@ of ``FF`` and dots): @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): - > pytest.fail("bad luck") - E Failed: bad luck + > pytest.fail("bad luck") + E Failed: bad luck - test_50.py:6: Failed + test_50.py:7: Failed _______________________________ test_num[25] _______________________________ i = 25 @@ -154,11 +154,11 @@ of ``FF`` and dots): @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): - > pytest.fail("bad luck") - E Failed: bad luck + > pytest.fail("bad luck") + E Failed: bad luck - test_50.py:6: Failed - =================== 2 failed, 48 passed in 0.12 seconds ==================== + test_50.py:7: Failed + ======================= 2 failed, 48 passed in 0.07s ======================= .. _`config.cache`: @@ -227,10 +227,10 @@ If you run this command for the first time, you can see the print statement: > assert mydata == 23 E assert 42 == 23 - test_caching.py:17: AssertionError + test_caching.py:20: AssertionError -------------------------- Captured stdout setup --------------------------- running expensive computation... - 1 failed in 0.12 seconds + 1 failed in 0.02s If you run it a second time, the value will be retrieved from the cache and nothing will be printed: @@ -248,8 +248,8 @@ the cache and nothing will be printed: > assert mydata == 23 E assert 42 == 23 - test_caching.py:17: AssertionError - 1 failed in 0.12 seconds + test_caching.py:20: AssertionError + 1 failed in 0.02s See the :ref:`cache-api` for more details. @@ -283,7 +283,7 @@ You can always peek at the content of the cache using the example/value contains: 42 - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== ``--cache-show`` takes an optional argument to specify a glob pattern for filtering: @@ -300,7 +300,7 @@ filtering: example/value contains: 42 - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== Clearing Cache content ---------------------- diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 55714c25b4b..2a9de0be3e2 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -88,10 +88,10 @@ of the failing function and hide the other one: > assert False E assert False - test_module.py:9: AssertionError + test_module.py:12: AssertionError -------------------------- Captured stdout setup --------------------------- setting up - ==================== 1 failed, 1 passed in 0.12 seconds ==================== + ======================= 1 failed, 1 passed in 0.02s ======================== Accessing captured output from a test function --------------------------------------------------- diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 2020b30b3af..7ecfe7e563b 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -36,7 +36,7 @@ then you can just invoke ``pytest`` directly: test_example.txt . [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================ 1 passed in 0.01s ============================= By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you can pass additional globs using the ``--doctest-glob`` option (multi-allowed). @@ -66,7 +66,7 @@ and functions, including from test modules: mymodule.py . [ 50%] test_example.txt . [100%] - ========================= 2 passed in 0.12 seconds ========================= + ============================ 2 passed in 0.01s ============================= You can make these changes permanent in your project by putting them into a pytest.ini file like this: diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 909f23a2e13..f5acd296f1b 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -52,7 +52,7 @@ You can then restrict a test run to only run tests marked with ``webtest``: test_server.py::test_send_http PASSED [100%] - ================== 1 passed, 3 deselected in 0.12 seconds ================== + ===================== 1 passed, 3 deselected in 0.01s ====================== Or the inverse, running all tests except the webtest ones: @@ -69,7 +69,7 @@ Or the inverse, running all tests except the webtest ones: test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - ================== 3 passed, 1 deselected in 0.12 seconds ================== + ===================== 3 passed, 1 deselected in 0.01s ====================== Selecting tests based on their node ID -------------------------------------- @@ -89,7 +89,7 @@ tests based on their module, class, method, or function name: test_server.py::TestClass::test_method PASSED [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================ 1 passed in 0.01s ============================= You can also select on the class: @@ -104,7 +104,7 @@ You can also select on the class: test_server.py::TestClass::test_method PASSED [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================ 1 passed in 0.01s ============================= Or select multiple nodes: @@ -120,7 +120,7 @@ Or select multiple nodes: test_server.py::TestClass::test_method PASSED [ 50%] test_server.py::test_send_http PASSED [100%] - ========================= 2 passed in 0.12 seconds ========================= + ============================ 2 passed in 0.01s ============================= .. _node-id: @@ -159,7 +159,7 @@ select tests based on their names: test_server.py::test_send_http PASSED [100%] - ================== 1 passed, 3 deselected in 0.12 seconds ================== + ===================== 1 passed, 3 deselected in 0.01s ====================== And you can also run all tests except the ones that match the keyword: @@ -176,7 +176,7 @@ And you can also run all tests except the ones that match the keyword: test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - ================== 3 passed, 1 deselected in 0.12 seconds ================== + ===================== 3 passed, 1 deselected in 0.01s ====================== Or to select "http" and "quick" tests: @@ -192,7 +192,7 @@ Or to select "http" and "quick" tests: test_server.py::test_send_http PASSED [ 50%] test_server.py::test_something_quick PASSED [100%] - ================== 2 passed, 2 deselected in 0.12 seconds ================== + ===================== 2 passed, 2 deselected in 0.01s ====================== .. note:: @@ -413,7 +413,7 @@ the test needs: test_someenv.py s [100%] - ======================== 1 skipped in 0.12 seconds ========================= + ============================ 1 skipped in 0.00s ============================ and here is one that specifies exactly the environment needed: @@ -428,7 +428,7 @@ and here is one that specifies exactly the environment needed: test_someenv.py . [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================ 1 passed in 0.01s ============================= The ``--markers`` option always gives you a list of available markers: @@ -499,7 +499,7 @@ The output is as follows: $ pytest -q -s Mark(name='my_marker', args=(,), kwargs={}) . - 1 passed in 0.12 seconds + 1 passed in 0.00s We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``. @@ -551,7 +551,7 @@ Let's run this without capturing output and see what we get: glob args=('class',) kwargs={'x': 2} glob args=('module',) kwargs={'x': 1} . - 1 passed in 0.12 seconds + 1 passed in 0.01s marking platform specific tests with pytest -------------------------------------------------------------- @@ -623,7 +623,7 @@ then you will see two tests skipped and two executed tests as expected: ========================= short test summary info ========================== SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux - =================== 2 passed, 2 skipped in 0.12 seconds ==================== + ======================= 2 passed, 2 skipped in 0.01s ======================= Note that if you specify a platform via the marker-command line option like this: @@ -638,7 +638,7 @@ Note that if you specify a platform via the marker-command line option like this test_plat.py . [100%] - ================== 1 passed, 3 deselected in 0.12 seconds ================== + ===================== 1 passed, 3 deselected in 0.01s ====================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -711,7 +711,7 @@ We can now use the ``-m option`` to select one set: test_module.py:8: in test_interface_complex assert 0 E assert 0 - ================== 2 failed, 2 deselected in 0.12 seconds ================== + ===================== 2 failed, 2 deselected in 0.02s ====================== or to select both "event" and "interface" tests: @@ -739,4 +739,4 @@ or to select both "event" and "interface" tests: test_module.py:12: in test_event_simple assert 0 E assert 0 - ================== 3 failed, 1 deselected in 0.12 seconds ================== + ===================== 3 failed, 1 deselected in 0.03s ====================== diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 75dc764e9f4..6699de74933 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -41,7 +41,7 @@ now execute the test specification: usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.12 seconds ==================== + ======================= 1 failed, 1 passed in 0.02s ======================== .. regendoc:wipe @@ -77,7 +77,7 @@ consulted when reporting in ``verbose`` mode: usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.12 seconds ==================== + ======================= 1 failed, 1 passed in 0.02s ======================== .. regendoc:wipe @@ -97,4 +97,4 @@ interesting to just look at the collection tree: - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.02s =========================== diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 1e6d53e37f3..cf99ea47268 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -54,7 +54,7 @@ This means that we only run 2 tests if we do not pass ``--all``: $ pytest -q test_compute.py .. [100%] - 2 passed in 0.12 seconds + 2 passed in 0.01s We run only two computations, so we see two dots. let's run the full monty: @@ -72,8 +72,8 @@ let's run the full monty: > assert param1 < 4 E assert 4 < 4 - test_compute.py:3: AssertionError - 1 failed, 4 passed in 0.12 seconds + test_compute.py:4: AssertionError + 1 failed, 4 passed in 0.02s As expected when running the full range of ``param1`` values we'll get an error on the last one. @@ -172,7 +172,7 @@ objects, they are still using the default pytest representation: - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.01s =========================== In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs together with the actual data, instead of listing them separately. @@ -229,7 +229,7 @@ this is a fully self-contained example which you can run with: test_scenarios.py .... [100%] - ========================= 4 passed in 0.12 seconds ========================= + ============================ 4 passed in 0.01s ============================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function: @@ -248,7 +248,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.01s =========================== Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a @@ -262,8 +262,8 @@ Deferring the setup of parametrized resources The parametrization of test functions happens at collection time. It is a good idea to setup expensive resources like DB connections or subprocess only when the actual test is run. -Here is a simple example how you can achieve that, first -the actual test requiring a ``db`` object: +Here is a simple example how you can achieve that. This test +requires a ``db`` object fixture: .. code-block:: python @@ -323,7 +323,7 @@ Let's first see how it looks like at collection time: - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== And then when we run the test: @@ -342,8 +342,8 @@ And then when we run the test: > pytest.fail("deliberately failing for demo purposes") E Failed: deliberately failing for demo purposes - test_backends.py:6: Failed - 1 failed, 1 passed in 0.12 seconds + test_backends.py:8: Failed + 1 failed, 1 passed in 0.02s The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. @@ -394,7 +394,7 @@ The result of this test will be successful: - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== .. regendoc:wipe @@ -453,8 +453,8 @@ argument sets to use for each test function. Let's run it: > assert a == b E assert 1 == 2 - test_parametrize.py:18: AssertionError - 1 failed, 2 passed in 0.12 seconds + test_parametrize.py:21: AssertionError + 1 failed, 2 passed in 0.03s Indirect parametrization with multiple fixtures -------------------------------------------------------------- @@ -475,11 +475,10 @@ Running it results in some skips if we don't have all the python interpreters in .. code-block:: pytest . $ pytest -rs -q multipython.py - ssssssssssss...ssssssssssss [100%] + ssssssssssss......sss...... [100%] ========================= short test summary info ========================== - SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found - SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.7' not found - 3 passed, 24 skipped in 0.12 seconds + SKIPPED [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found + 12 passed, 15 skipped in 0.62s Indirect parametrization of optional implementations/imports -------------------------------------------------------------------- @@ -547,8 +546,8 @@ If you run this with reporting for skips enabled: test_module.py .s [100%] ========================= short test summary info ========================== - SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2': No module named 'opt2' - =================== 1 passed, 1 skipped in 0.12 seconds ==================== + SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2' + ======================= 1 passed, 1 skipped in 0.01s ======================= You'll see that we don't have an ``opt2`` module and thus the second test run of our ``test_func1`` was skipped. A few notes: @@ -610,7 +609,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%] test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%] - ============ 2 passed, 15 deselected, 1 xfailed in 0.12 seconds ============ + =============== 2 passed, 15 deselected, 1 xfailed in 0.08s ================ As the result: diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index cd4e34352a1..a718de4005a 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -158,7 +158,7 @@ The test collection would look like this: - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.01s =========================== You can check for multiple glob patterns by adding a space between the patterns: @@ -221,7 +221,7 @@ You can always peek at the collection tree without running tests like this: - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== .. _customizing-test-collection: @@ -297,7 +297,7 @@ file will be left out: rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.01s =========================== It's also possible to ignore files based on Unix shell-style wildcards by adding patterns to ``collect_ignore_glob``. diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 05d06ecb6be..c024b8616df 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -119,7 +119,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: a = "1" * 100 + "a" + "2" * 100 b = "1" * 100 + "b" + "2" * 100 > assert a == b - E AssertionError: assert '111111111111...2222222222222' == '1111111111111...2222222222222' + E AssertionError: assert '111111111111...2222222222222' == '111111111111...2222222222222' E Skipping 90 identical leading characters in diff, use -v to show E Skipping 91 identical trailing characters in diff, use -v to show E - 1111111111a222222222 @@ -136,7 +136,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: a = "1\n" * 100 + "a" + "2\n" * 100 b = "1\n" * 100 + "b" + "2\n" * 100 > assert a == b - E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' + E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n...n2\n2\n2\n2\n' E Skipping 190 identical leading characters in diff, use -v to show E Skipping 191 identical trailing characters in diff, use -v to show E 1 @@ -235,7 +235,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: def test_not_in_text_multiline(self): text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail" > assert "foo" not in text - E AssertionError: assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail' + E AssertionError: assert 'foo' not in 'some multil...nand a\ntail' E 'foo' is contained here: E some multiline E text @@ -267,7 +267,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: def test_not_in_text_single_long(self): text = "head " * 50 + "foo " + "tail " * 20 > assert "foo" not in text - E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' + E AssertionError: assert 'foo' not in 'head head h...l tail tail ' E 'foo' is contained here: E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ @@ -280,7 +280,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: def test_not_in_text_single_long_term(self): text = "head " * 50 + "f" * 70 + "tail " * 20 > assert "f" * 70 not in text - E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' + E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head h...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -301,7 +301,7 @@ Here is a nice run of several failures and how ``pytest`` presents things: left = Foo(1, "b") right = Foo(1, "c") > assert left == right - E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c') + E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialis...oo(a=1, b='c') E Omitting 1 identical items, use -vv to show E Differing attributes: E b: 'b' != 'c' @@ -650,4 +650,4 @@ Here is a nice run of several failures and how ``pytest`` presents things: E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a failure_demo.py:282: AssertionError - ======================== 44 failed in 0.12 seconds ========================= + ============================ 44 failed in 0.26s ============================ diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index b4baa2b9b83..fea73f4e96b 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -65,7 +65,7 @@ Let's run this without supplying our new option: test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- first - 1 failed in 0.12 seconds + 1 failed in 0.02s And now with supplying a command line option: @@ -89,7 +89,7 @@ And now with supplying a command line option: test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- second - 1 failed in 0.12 seconds + 1 failed in 0.02s You can see that the command line option arrived in our test. This completes the basic pattern. However, one often rather wants to process @@ -132,7 +132,7 @@ directory with the above conftest.py: rootdir: $REGENDOC_TMPDIR collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== .. _`excontrolskip`: @@ -201,7 +201,7 @@ and when running it will see a skipped "slow" test: ========================= short test summary info ========================== SKIPPED [1] test_module.py:8: need --runslow option to run - =================== 1 passed, 1 skipped in 0.12 seconds ==================== + ======================= 1 passed, 1 skipped in 0.01s ======================= Or run it including the ``slow`` marked test: @@ -216,7 +216,7 @@ Or run it including the ``slow`` marked test: test_module.py .. [100%] - ========================= 2 passed in 0.12 seconds ========================= + ============================ 2 passed in 0.01s ============================= Writing well integrated assertion helpers -------------------------------------------------- @@ -261,7 +261,7 @@ Let's run our little function: E Failed: not configured: 42 test_checkconfig.py:11: Failed - 1 failed in 0.12 seconds + 1 failed in 0.02s If you only want to hide certain exceptions, you can set ``__tracebackhide__`` to a callable which gets the ``ExceptionInfo`` object. You can for example use @@ -358,7 +358,7 @@ which will add the string to the test header accordingly: rootdir: $REGENDOC_TMPDIR collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== .. regendoc:wipe @@ -388,7 +388,7 @@ which will add info only when run with "--v": rootdir: $REGENDOC_TMPDIR collecting ... collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== and nothing when run plainly: @@ -401,7 +401,7 @@ and nothing when run plainly: rootdir: $REGENDOC_TMPDIR collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.00s =========================== profiling test duration -------------------------- @@ -447,7 +447,7 @@ Now we can profile which test functions execute the slowest: 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 0.10s call test_some_are_slow.py::test_funcfast - ========================= 3 passed in 0.12 seconds ========================= + ============================ 3 passed in 0.61s ============================= incremental testing - test steps --------------------------------------------------- @@ -531,7 +531,7 @@ If we run this: ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::test_deletion reason: previous test failed (test_modification) - ============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds =============== + ================== 1 failed, 2 passed, 1 xfailed in 0.03s ================== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". @@ -644,7 +644,7 @@ We can run this: E assert 0 a/test_db2.py:2: AssertionError - ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== + ============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.05s ============== The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course @@ -733,7 +733,7 @@ and run them: E assert 0 test_module.py:6: AssertionError - ========================= 2 failed in 0.12 seconds ========================= + ============================ 2 failed in 0.02s ============================= you will have a "failures" file which contains the failing test ids: @@ -848,7 +848,7 @@ and run it: E assert 0 test_module.py:19: AssertionError - ==================== 2 failed, 1 error in 0.12 seconds ===================== + ======================== 2 failed, 1 error in 0.02s ======================== You'll see that the fixture finalizers could use the precise reporting information. diff --git a/doc/en/example/special.rst b/doc/en/example/special.rst index 5161c43ab12..9ad55f3d7b6 100644 --- a/doc/en/example/special.rst +++ b/doc/en/example/special.rst @@ -81,4 +81,4 @@ If you run this without output capturing: .test other .test_unit1 method called . - 4 passed in 0.12 seconds + 4 passed in 0.01s diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index b494ec0fe1d..91b5aca85e2 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -92,11 +92,11 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: def test_ehlo(smtp_connection): response, msg = smtp_connection.ehlo() assert response == 250 - > assert 0 # for demo purposes + > assert 0 # for demo purposes E assert 0 - test_smtpsimple.py:11: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + test_smtpsimple.py:14: AssertionError + ============================ 1 failed in 0.18s ============================= In the failure traceback we see that the test function was called with a ``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -246,7 +246,7 @@ inspect what is going on and can now run the tests: > assert 0 # for demo purposes E assert 0 - test_module.py:6: AssertionError + test_module.py:7: AssertionError ________________________________ test_noop _________________________________ smtp_connection = @@ -257,8 +257,8 @@ inspect what is going on and can now run the tests: > assert 0 # for demo purposes E assert 0 - test_module.py:11: AssertionError - ========================= 2 failed in 0.12 seconds ========================= + test_module.py:13: AssertionError + ============================ 2 failed in 0.20s ============================= You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp_connection`` object was passed into the @@ -315,15 +315,15 @@ Consider the code below: .. literalinclude:: example/fixtures/test_fixtures_order.py -The fixtures requested by ``test_foo`` will be instantiated in the following order: +The fixtures requested by ``test_order`` will be instantiated in the following order: 1. ``s1``: is the highest-scoped fixture (``session``). 2. ``m1``: is the second highest-scoped fixture (``module``). 3. ``a1``: is a ``function``-scoped ``autouse`` fixture: it will be instantiated before other fixtures within the same scope. 4. ``f3``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point -5. ``f1``: is the first ``function``-scoped fixture in ``test_foo`` parameter list. -6. ``f2``: is the last ``function``-scoped fixture in ``test_foo`` parameter list. +5. ``f1``: is the first ``function``-scoped fixture in ``test_order`` parameter list. +6. ``f2``: is the last ``function``-scoped fixture in ``test_order`` parameter list. .. _`finalization`: @@ -361,7 +361,7 @@ Let's execute it: $ pytest -s -q --tb=no FFteardown smtp - 2 failed in 0.12 seconds + 2 failed in 0.20s We see that the ``smtp_connection`` instance is finalized after the two tests finished execution. Note that if we decorated our fixture @@ -515,7 +515,7 @@ again, nothing much has changed: $ pytest -s -q --tb=no FFfinalizing (smtp.gmail.com) - 2 failed in 0.12 seconds + 2 failed in 0.21s Let's quickly create another test module that actually sets the server URL in its module namespace: @@ -538,7 +538,7 @@ Running it: F [100%] ================================= FAILURES ================================= ______________________________ test_showhelo _______________________________ - test_anothersmtp.py:5: in test_showhelo + test_anothersmtp.py:6: in test_showhelo assert 0, smtp_connection.helo() E AssertionError: (250, b'mail.python.org') E assert 0 @@ -654,7 +654,7 @@ So let's just do another run: > assert 0 # for demo purposes E assert 0 - test_module.py:6: AssertionError + test_module.py:7: AssertionError ________________________ test_noop[smtp.gmail.com] _________________________ smtp_connection = @@ -665,7 +665,7 @@ So let's just do another run: > assert 0 # for demo purposes E assert 0 - test_module.py:11: AssertionError + test_module.py:13: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ smtp_connection = @@ -676,7 +676,7 @@ So let's just do another run: > assert b"smtp.gmail.com" in msg E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING' - test_module.py:5: AssertionError + test_module.py:6: AssertionError -------------------------- Captured stdout setup --------------------------- finalizing ________________________ test_noop[mail.python.org] ________________________ @@ -689,10 +689,10 @@ So let's just do another run: > assert 0 # for demo purposes E assert 0 - test_module.py:11: AssertionError + test_module.py:13: AssertionError ------------------------- Captured stdout teardown ------------------------- finalizing - 4 failed in 0.12 seconds + 4 failed in 0.89s We see that our two test functions each ran twice, against the different ``smtp_connection`` instances. Note also, that with the ``mail.python.org`` @@ -771,7 +771,7 @@ Running the above tests results in the following test IDs being used: - ======================= no tests ran in 0.12 seconds ======================= + ========================== no tests ran in 0.01s =========================== .. _`fixture-parametrize-marks`: @@ -812,7 +812,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: test_fixture_marks.py::test_data[1] PASSED [ 66%] test_fixture_marks.py::test_data[2] SKIPPED [100%] - =================== 2 passed, 1 skipped in 0.12 seconds ==================== + ======================= 2 passed, 1 skipped in 0.01s ======================= .. _`interdependent fixtures`: @@ -861,7 +861,7 @@ Here we declare an ``app`` fixture which receives the previously defined test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] - ========================= 2 passed in 0.12 seconds ========================= + ============================ 2 passed in 0.44s ============================= Due to the parametrization of ``smtp_connection``, the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -971,7 +971,7 @@ Let's run the tests in verbose mode and with looking at the print-output: TEARDOWN modarg mod2 - ========================= 8 passed in 0.12 seconds ========================= + ============================ 8 passed in 0.01s ============================= You can see that the parametrized module-scoped ``modarg`` resource caused an ordering of test execution that lead to the fewest possible "active" resources. @@ -1043,7 +1043,7 @@ to verify our fixture is activated and the tests pass: $ pytest -q .. [100%] - 2 passed in 0.12 seconds + 2 passed in 0.01s You can specify multiple fixtures like this: @@ -1151,7 +1151,7 @@ If we run it, we get two passing tests: $ pytest -q .. [100%] - 2 passed in 0.12 seconds + 2 passed in 0.01s Here is how autouse fixtures work in other scopes: diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index f1c28769f0b..38a3618189e 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -28,7 +28,7 @@ Install ``pytest`` .. code-block:: bash $ pytest --version - This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.x/site-packages/pytest.py + This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py .. _`simpletest`: @@ -68,8 +68,8 @@ That’s it. You can now execute the test function: E assert 4 == 5 E + where 4 = func(3) - test_sample.py:5: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + test_sample.py:6: AssertionError + ============================ 1 failed in 0.02s ============================= This test returns a failure report because ``func(3)`` does not return ``5``. @@ -108,7 +108,7 @@ Execute the test function with “quiet” reporting mode: $ pytest -q test_sysexit.py . [100%] - 1 passed in 0.12 seconds + 1 passed in 0.00s Group multiple tests in a class -------------------------------------------------------------- @@ -140,12 +140,12 @@ Once you develop multiple tests, you may want to group them into a class. pytest def test_two(self): x = "hello" - > assert hasattr(x, 'check') + > assert hasattr(x, "check") E AssertionError: assert False E + where False = hasattr('hello', 'check') test_class.py:8: AssertionError - 1 failed, 1 passed in 0.12 seconds + 1 failed, 1 passed in 0.02s The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure. @@ -180,7 +180,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look test_tmpdir.py:3: AssertionError --------------------------- Captured stdout call --------------------------- PYTEST_TMPDIR/test_needsfiles0 - 1 failed in 0.12 seconds + 1 failed in 0.02s More info on tmpdir handling is available at :ref:`Temporary directories and files `. diff --git a/doc/en/index.rst b/doc/en/index.rst index 6c7c848659e..65b4631cde0 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -44,7 +44,7 @@ To execute it: E + where 4 = inc(3) test_sample.py:6: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================ 1 failed in 0.02s ============================= Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See :ref:`Getting Started ` for more examples. diff --git a/doc/en/monkeypatch.rst b/doc/en/monkeypatch.rst index a38f07e795d..1d1bd68c03a 100644 --- a/doc/en/monkeypatch.rst +++ b/doc/en/monkeypatch.rst @@ -50,7 +50,7 @@ these patches. :py:meth:`monkeypatch.chdir` to change the context of the current working directory during a test. -5. Use py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also +5. Use :py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also call :py:meth:`pkg_resources.fixup_namespace_packages` and :py:meth:`importlib.invalidate_caches`. See the `monkeypatch blog post`_ for some introduction material diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index 2e2d846ea59..85f233be3f4 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -75,7 +75,7 @@ them in turn: E + where 54 = eval('6*9') test_expectation.py:6: AssertionError - ==================== 1 failed, 2 passed in 0.12 seconds ==================== + ======================= 1 failed, 2 passed in 0.02s ======================== .. note:: @@ -128,7 +128,7 @@ Let's run this: test_expectation.py ..x [100%] - =================== 2 passed, 1 xfailed in 0.12 seconds ==================== + ======================= 2 passed, 1 xfailed in 0.02s ======================= The one parameter set which caused a failure previously now shows up as an "xfailed (expected to fail)" test. @@ -205,7 +205,7 @@ If we now pass two stringinput values, our test will run twice: $ pytest -q --stringinput="hello" --stringinput="world" test_strings.py .. [100%] - 2 passed in 0.12 seconds + 2 passed in 0.01s Let's also run with a stringinput that will lead to a failing test: @@ -225,7 +225,7 @@ Let's also run with a stringinput that will lead to a failing test: E + where = '!'.isalpha test_strings.py:4: AssertionError - 1 failed in 0.12 seconds + 1 failed in 0.02s As expected our test function fails. @@ -239,7 +239,7 @@ list: s [100%] ========================= short test summary info ========================== SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2 - 1 skipped in 0.12 seconds + 1 skipped in 0.00s Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across those sets cannot be duplicated, otherwise an error will be raised. diff --git a/doc/en/py27-py34-deprecation.rst b/doc/en/py27-py34-deprecation.rst index 5c2f0610531..54119784387 100644 --- a/doc/en/py27-py34-deprecation.rst +++ b/doc/en/py27-py34-deprecation.rst @@ -7,8 +7,8 @@ Python 3.4's last release is scheduled for `March 2019 `__. pytest is one of the participating projects of the https://python3statement.org. -The **pytest 4.6** series will be the last to support Python 2.7 and 3.4, and is scheduled -to be released by **mid-2019**. **pytest 5.0** and onwards will support only Python 3.5+. +The **pytest 4.6** series is the last to support Python 2.7 and 3.4, and was released in +**June 2019**. **pytest 5.0** and onwards will support only Python 3.5+. Thanks to the `python_requires`_ ``setuptools`` option, Python 2.7 and Python 3.4 users using a modern ``pip`` version diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index 2b654560e10..d271b0b2ac0 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -371,7 +371,7 @@ Running it with the report-on-xfail option gives this output: XFAIL xfail_demo.py::test_hello6 reason: reason XFAIL xfail_demo.py::test_hello7 - ======================== 7 xfailed in 0.12 seconds ========================= + ============================ 7 xfailed in 0.05s ============================ .. _`skip/xfail with parametrize`: diff --git a/doc/en/talks.rst b/doc/en/talks.rst index f6619281738..eb1eadbe1ef 100644 --- a/doc/en/talks.rst +++ b/doc/en/talks.rst @@ -4,7 +4,6 @@ Talks and Tutorials .. sidebar:: Next Open Trainings - - `Training at Workshoptage 2019 `_ (German), 10th September 2019, Rapperswil, Switzerland. - `3 day hands-on workshop covering pytest, tox and devpi: "Professional Testing with Python" `_ (English), October 21 - 23, 2019, Leipzig, Germany. .. _`funcargs`: funcargs.html diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 1b565cee89e..c231e76a1f8 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -64,7 +64,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmp_path.py:13: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================ 1 failed in 0.02s ============================= .. _`tmp_path_factory example`: @@ -132,8 +132,8 @@ Running this would result in a passed test except for the last > assert 0 E assert 0 - test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + test_tmpdir.py:9: AssertionError + ============================ 1 failed in 0.02s ============================= .. _`tmpdir factory example`: diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 18b6a721b2e..4f0a279a2b5 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -151,22 +151,22 @@ the ``self.db`` values in the traceback: def test_method1(self): assert hasattr(self, "db") - > assert 0, self.db # fail for demo purposes + > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - test_unittest_db.py:9: AssertionError + test_unittest_db.py:10: AssertionError ___________________________ MyTest.test_method2 ____________________________ self = def test_method2(self): - > assert 0, self.db # fail for demo purposes + > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - test_unittest_db.py:12: AssertionError - ========================= 2 failed in 0.12 seconds ========================= + test_unittest_db.py:13: AssertionError + ============================ 2 failed in 0.02s ============================= This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention @@ -219,7 +219,7 @@ Running this test module ...: $ pytest -q test_unittest_cleandir.py . [100%] - 1 passed in 0.12 seconds + 1 passed in 0.01s ... gives us one passed test because the ``initdir`` fixture function was executed ahead of the ``test_method``. diff --git a/doc/en/usage.rst b/doc/en/usage.rst index d5ff8a9847b..78702ea86ab 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -247,7 +247,7 @@ Example: XPASS test_example.py::test_xpass always xfail ERROR test_example.py::test_error - assert 0 FAILED test_example.py::test_fail - assert 0 - = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = + == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes". @@ -297,7 +297,7 @@ More than one character can be used, so for example to only see failed and skipp ========================= short test summary info ========================== FAILED test_example.py::test_fail - assert 0 SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test - = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = + == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had captured output: @@ -336,7 +336,7 @@ captured output: ok ========================= short test summary info ========================== PASSED test_example.py::test_ok - = 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = + == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s === .. _pdb-option: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index d48cd468822..b8a2df270d1 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -41,7 +41,7 @@ Running pytest now produces this output: warnings.warn(UserWarning("api v1, should use functions from v2")) -- Docs: https://docs.pytest.org/en/latest/warnings.html - =================== 1 passed, 1 warnings in 0.12 seconds =================== + ====================== 1 passed, 1 warnings in 0.00s ======================= The ``-W`` flag can be passed to control which warnings will be displayed or even turn them into errors: @@ -64,7 +64,7 @@ them into errors: E UserWarning: api v1, should use functions from v2 test_show_warnings.py:5: UserWarning - 1 failed in 0.12 seconds + 1 failed in 0.02s The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option. For example, the configuration below will ignore all user warnings, but will transform @@ -407,7 +407,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta class Test: -- Docs: https://docs.pytest.org/en/latest/warnings.html - 1 warnings in 0.12 seconds + 1 warnings in 0.00s These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings. diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index 7975f58267c..5f429c219a7 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -442,7 +442,7 @@ additionally it is possible to copy examples for an example folder before runnin testdir.copy_example("test_example.py") -- Docs: https://docs.pytest.org/en/latest/warnings.html - =================== 2 passed, 1 warnings in 0.12 seconds =================== + ====================== 2 passed, 1 warnings in 0.12s ======================= For more information about the result object that ``runpytest()`` returns, and the methods that it provides please check out the :py:class:`RunResult diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py index 744e9cf66f6..534bfe2a831 100644 --- a/src/_pytest/_code/code.py +++ b/src/_pytest/_code/code.py @@ -596,7 +596,7 @@ def getrepr( ) return fmt.repr_excinfo(self) - def match(self, regexp: Union[str, Pattern]) -> bool: + def match(self, regexp: "Union[str, Pattern]") -> bool: """ Check whether the regular expression 'regexp' is found in the string representation of the exception using ``re.search``. If it matches diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index df513144982..c225eff5fb5 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -35,9 +35,6 @@ PYC_EXT = ".py" + (__debug__ and "c" or "o") PYC_TAIL = "." + PYTEST_TAG + PYC_EXT -AST_IS = ast.Is() -AST_NONE = ast.NameConstant(None) - class AssertionRewritingHook(importlib.abc.MetaPathFinder): """PEP302/PEP451 import hook which rewrites asserts.""" @@ -863,7 +860,7 @@ def warn_about_none_ast(self, node, module_path, lineno): internally already. See issue #3191 for more details. """ - val_is_none = ast.Compare(node, [AST_IS], [AST_NONE]) + val_is_none = ast.Compare(node, [ast.Is()], [ast.NameConstant(None)]) send_warning = ast.parse( """\ from _pytest.warning_types import PytestAssertRewriteWarning diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py index 2d11231a4d0..3d1531e77c5 100644 --- a/src/_pytest/compat.py +++ b/src/_pytest/compat.py @@ -9,6 +9,7 @@ from contextlib import contextmanager from inspect import Parameter from inspect import signature +from typing import overload import attr import py @@ -27,9 +28,9 @@ if sys.version_info >= (3, 8): - from importlib import metadata as importlib_metadata # noqa + from importlib import metadata as importlib_metadata # noqa: F401 else: - import importlib_metadata # noqa + import importlib_metadata # noqa: F401 def _format_args(func): @@ -347,3 +348,9 @@ def funcargnames(self): warnings.warn(FUNCARGNAMES, stacklevel=2) return self.fixturenames + + +if sys.version_info < (3, 5, 2): # pragma: no cover + + def overload(f): # noqa: F811 + return f diff --git a/src/_pytest/pastebin.py b/src/_pytest/pastebin.py index ce0e73accc2..91aa5f1fdcb 100644 --- a/src/_pytest/pastebin.py +++ b/src/_pytest/pastebin.py @@ -72,7 +72,7 @@ def create_new_paste(contents): if m: return "{}/show/{}".format(url, m.group(1)) else: - return "bad response: " + response + return "bad response: " + response.decode("utf-8") def pytest_terminal_summary(terminalreporter): diff --git a/src/_pytest/python_api.py b/src/_pytest/python_api.py index fbc3d914e5c..f03d45ab76c 100644 --- a/src/_pytest/python_api.py +++ b/src/_pytest/python_api.py @@ -13,7 +13,6 @@ from typing import cast from typing import Generic from typing import Optional -from typing import overload from typing import Pattern from typing import Tuple from typing import TypeVar @@ -22,12 +21,14 @@ from more_itertools.more import always_iterable import _pytest._code +from _pytest.compat import overload from _pytest.compat import STRING_TYPES from _pytest.outcomes import fail if False: # TYPE_CHECKING from typing import Type # noqa: F401 (used in type string) + BASE_TYPE = (type, STRING_TYPES) @@ -547,12 +548,12 @@ def _is_numpy_array(obj): def raises( expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], *, - match: Optional[Union[str, Pattern]] = ... + match: "Optional[Union[str, Pattern]]" = ... ) -> "RaisesContext[_E]": ... # pragma: no cover -@overload +@overload # noqa: F811 def raises( expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], func: Callable, @@ -563,10 +564,10 @@ def raises( ... # pragma: no cover -def raises( +def raises( # noqa: F811 expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], *args: Any, - match: Optional[Union[str, Pattern]] = None, + match: Optional[Union[str, "Pattern"]] = None, **kwargs: Any ) -> Union["RaisesContext[_E]", Optional[_pytest._code.ExceptionInfo[_E]]]: r""" @@ -724,7 +725,7 @@ def __init__( self, expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]], message: str, - match_expr: Optional[Union[str, Pattern]] = None, + match_expr: Optional[Union[str, "Pattern"]] = None, ) -> None: self.expected_exception = expected_exception self.message = message diff --git a/src/_pytest/recwarn.py b/src/_pytest/recwarn.py index 19e3938c384..58076d66b9d 100644 --- a/src/_pytest/recwarn.py +++ b/src/_pytest/recwarn.py @@ -7,11 +7,11 @@ from typing import Iterator from typing import List from typing import Optional -from typing import overload from typing import Pattern from typing import Tuple from typing import Union +from _pytest.compat import overload from _pytest.fixtures import yield_fixture from _pytest.outcomes import fail @@ -58,26 +58,26 @@ def deprecated_call(func=None, *args, **kwargs): def warns( expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]], *, - match: Optional[Union[str, Pattern]] = ... + match: "Optional[Union[str, Pattern]]" = ... ) -> "WarningsChecker": ... # pragma: no cover -@overload +@overload # noqa: F811 def warns( expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]], func: Callable, *args: Any, - match: Optional[Union[str, Pattern]] = ..., + match: Optional[Union[str, "Pattern"]] = ..., **kwargs: Any ) -> Union[Any]: ... # pragma: no cover -def warns( +def warns( # noqa: F811 expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]], *args: Any, - match: Optional[Union[str, Pattern]] = None, + match: Optional[Union[str, "Pattern"]] = None, **kwargs: Any ) -> Union["WarningsChecker", Any]: r"""Assert that code raises a particular class of warning. @@ -207,7 +207,7 @@ def __init__( expected_warning: Optional[ Union["Type[Warning]", Tuple["Type[Warning]", ...]] ] = None, - match_expr: Optional[Union[str, Pattern]] = None, + match_expr: Optional[Union[str, "Pattern"]] = None, ) -> None: super().__init__() diff --git a/testing/python/fixtures.py b/testing/python/fixtures.py index cfe2ed37d98..1f383e75253 100644 --- a/testing/python/fixtures.py +++ b/testing/python/fixtures.py @@ -13,22 +13,22 @@ def test_getfuncargnames_functions(): """Test getfuncargnames for normal functions""" def f(): - pass + raise NotImplementedError() assert not fixtures.getfuncargnames(f) def g(arg): - pass + raise NotImplementedError() assert fixtures.getfuncargnames(g) == ("arg",) def h(arg1, arg2="hello"): - pass + raise NotImplementedError() assert fixtures.getfuncargnames(h) == ("arg1",) def j(arg1, arg2, arg3="hello"): - pass + raise NotImplementedError() assert fixtures.getfuncargnames(j) == ("arg1", "arg2") @@ -38,7 +38,7 @@ def test_getfuncargnames_methods(): class A: def f(self, arg1, arg2="hello"): - pass + raise NotImplementedError() assert fixtures.getfuncargnames(A().f) == ("arg1",) @@ -49,7 +49,7 @@ def test_getfuncargnames_staticmethod(): class A: @staticmethod def static(arg1, arg2, x=1): - pass + raise NotImplementedError() assert fixtures.getfuncargnames(A.static, cls=A) == ("arg1", "arg2") @@ -59,7 +59,7 @@ def test_getfuncargnames_partial(): import functools def check(arg1, arg2, i): - pass + raise NotImplementedError() class T: test_ok = functools.partial(check, i=2) @@ -73,7 +73,7 @@ def test_getfuncargnames_staticmethod_partial(): import functools def check(arg1, arg2, i): - pass + raise NotImplementedError() class T: test_ok = staticmethod(functools.partial(check, i=2)) @@ -3325,7 +3325,7 @@ def test_fixture_disallow_twice(self): @pytest.fixture @pytest.fixture def foo(): - pass + raise NotImplementedError() class TestContextManagerFixtureFuncs: @@ -3951,7 +3951,7 @@ def test_call_fixture_function_error(): @pytest.fixture def fix(): - return 1 + raise NotImplementedError() with pytest.raises(pytest.fail.Exception): assert fix() == 1 diff --git a/testing/python/raises.py b/testing/python/raises.py index 668be57fc69..28b0715c01a 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -163,9 +163,16 @@ def test_raises_cyclic_reference(self, method): class T: def __call__(self): + # Early versions of Python 3.5 have some bug causing the + # __call__ frame to still refer to t even after everything + # is done. This makes the test pass for them. + if sys.version_info < (3, 5, 2): # pragma: no cover + del self raise ValueError t = T() + refcount = len(gc.get_referrers(t)) + if method == "function": pytest.raises(ValueError, t) else: @@ -175,14 +182,7 @@ def __call__(self): # ensure both forms of pytest.raises don't leave exceptions in sys.exc_info() assert sys.exc_info() == (None, None, None) - del t - # Make sure this does get updated in locals dict - # otherwise it could keep a reference - locals() - - # ensure the t instance is not stuck in a cyclic reference - for o in gc.get_objects(): - assert type(o) is not T + assert refcount == len(gc.get_referrers(t)) def test_raises_match(self): msg = r"with base \d+" diff --git a/testing/test_assertion.py b/testing/test_assertion.py index 8079c45a00a..bf23e32023a 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -490,7 +490,6 @@ def test_frozenzet(self): assert len(expl) > 1 def test_Sequence(self): - if not hasattr(collections_abc, "MutableSequence"): pytest.skip("cannot import MutableSequence") MutableSequence = collections_abc.MutableSequence @@ -806,9 +805,6 @@ def test_fmt_multi_newline_before_where(self): class TestTruncateExplanation: - - """ Confirm assertion output is truncated as expected """ - # The number of lines in the truncation explanation message. Used # to calculate that results have the expected length. LINES_IN_TRUNCATION_MSG = 2 @@ -969,7 +965,13 @@ def test_hello(): ) result = testdir.runpytest() result.stdout.fnmatch_lines( - ["*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*50*"] + [ + "*def test_hello():*", + "*assert x == y*", + "*E*Extra items*left*", + "*E*50*", + "*= 1 failed in*", + ] ) @@ -1302,3 +1304,23 @@ def raise_exit(obj): with pytest.raises(outcomes.Exit, match="Quitting debugger"): callequal(1, 1) + + +def test_assertion_location_with_coverage(testdir): + """This used to report the wrong location when run with coverage (#5754).""" + p = testdir.makepyfile( + """ + def test(): + assert False, 1 + assert False, 2 + """ + ) + result = testdir.runpytest(str(p)) + result.stdout.fnmatch_lines( + [ + "> assert False, 1", + "E AssertionError: 1", + "E assert False", + "*= 1 failed in*", + ] + ) diff --git a/testing/test_pastebin.py b/testing/test_pastebin.py index 9afa1e23f31..4e8bac56cb2 100644 --- a/testing/test_pastebin.py +++ b/testing/test_pastebin.py @@ -116,3 +116,15 @@ def test_create_new_paste(self, pastebin, mocked_urlopen): assert "lexer=%s" % lexer in data.decode() assert "code=full-paste-contents" in data.decode() assert "expiry=1week" in data.decode() + + def test_create_new_paste_failure(self, pastebin, monkeypatch): + import io + import urllib.request + + def response(url, data): + stream = io.BytesIO(b"something bad occurred") + return stream + + monkeypatch.setattr(urllib.request, "urlopen", response) + result = pastebin.create_new_paste(b"full-paste-contents") + assert result == "bad response: something bad occurred" diff --git a/tox.ini b/tox.ini index 6467ddacd42..15360826f95 100644 --- a/tox.ini +++ b/tox.ini @@ -118,7 +118,7 @@ commands = python scripts/release.py {posargs} description = create GitHub release after deployment basepython = python3.6 usedevelop = True -passenv = GH_RELEASE_NOTES_TOKEN TRAVIS_TAG +passenv = GH_RELEASE_NOTES_TOKEN TRAVIS_TAG TRAVIS_REPO_SLUG deps = github3.py pypandoc