From 9769bc05c626dd0401e691c2c9c68f774fb990c3 Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Thu, 2 Aug 2018 02:54:15 -0500 Subject: [PATCH 01/98] moving plugin inside pytest first pass --- .gitignore | 2 ++ src/_pytest/assertion/util.py | 46 +++++++++++++++++++++++++++++++++++ 2 files changed, 48 insertions(+) diff --git a/.gitignore b/.gitignore index f5cd0145cce..907876f1bba 100644 --- a/.gitignore +++ b/.gitignore @@ -44,3 +44,5 @@ coverage.xml .pydevproject .project .settings +.vscode +.envrc diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index 451e454952b..1f5a857b875 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -122,6 +122,12 @@ def isdict(x): def isset(x): return isinstance(x, (set, frozenset)) + def isdatacls(obj): + return hasattr(obj, "__dataclass_fields__") + + def isattrs(obj): + return hasattr(obj, "__attrs_attrs__") + def isiterable(obj): try: iter(obj) @@ -142,6 +148,10 @@ def isiterable(obj): explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): explanation = _compare_eq_dict(left, right, verbose) + elif type(left) == type(right) and isdatacls(left) and isdatacls(right): + explanation = _compare_eq_class(left, right, verbose, type="data") + elif type(left) == type(right) and isattrs(left) and isattrs(right): + explanation = _compare_eq_class(left, right, verbose, type="attrs") if isiterable(left) and isiterable(right): expl = _compare_eq_iterable(left, right, verbose) if explanation is not None: @@ -315,6 +325,42 @@ def _compare_eq_dict(left, right, verbose=False): return explanation +def _compare_eq_class(left, right, verbose, type=None): + # TODO account for verbose + # TODO write tests + + if type == "data": + all_fields = left.__dataclass_fields__ + fields_to_check = [field for field, info in all_fields.items() if info.compare] + elif type == "attrs": + all_fields = left.__attrs_attrs__ + fields_to_check = [field.name for field in all_fields if field.cmp] + else: + raise RuntimeError # TODO figure out what to raise + + same = [] + diff = [] + for field in fields_to_check: + if getattr(left, field) == getattr(right, field): + same.append(field) + else: + diff.append(field) + + explanation = [] + if same: + explanation += [("Common attributes:")] + explanation += pprint.pformat(same).splitlines() + if diff: + explanation += [("Differing attributes:")] + for k in diff: + class_name = left.__class__.__name__ + explanation += [ + u("%s(%s=%r) != %s(%s=%r)") + % (class_name, k, getattr(left, k), class_name, k, getattr(right, k)) + ] + return explanation + + def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] From d42f1e87c3c4b55ef5faa1a11a4f3083860c864d Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Thu, 2 Aug 2018 17:16:14 -0500 Subject: [PATCH 02/98] Add tests for attrs and dataclasses --- src/_pytest/assertion/util.py | 11 ++- testing/test_assertion.py | 139 ++++++++++++++++++++++++++++++++++ 2 files changed, 144 insertions(+), 6 deletions(-) diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index 1f5a857b875..da5d5fe97e2 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -326,9 +326,6 @@ def _compare_eq_dict(left, right, verbose=False): def _compare_eq_class(left, right, verbose, type=None): - # TODO account for verbose - # TODO write tests - if type == "data": all_fields = left.__dataclass_fields__ fields_to_check = [field for field, info in all_fields.items() if info.compare] @@ -336,7 +333,7 @@ def _compare_eq_class(left, right, verbose, type=None): all_fields = left.__attrs_attrs__ fields_to_check = [field.name for field in all_fields if field.cmp] else: - raise RuntimeError # TODO figure out what to raise + raise RuntimeError same = [] diff = [] @@ -347,8 +344,10 @@ def _compare_eq_class(left, right, verbose, type=None): diff.append(field) explanation = [] - if same: - explanation += [("Common attributes:")] + if same and verbose < 2: + explanation += [u("Omitting %s identical items, use -vv to show") % len(same)] + elif same: + explanation += [u("Common items:")] explanation += pprint.pformat(same).splitlines() if diff: explanation += [("Differing attributes:")] diff --git a/testing/test_assertion.py b/testing/test_assertion.py index b6c31aba2bf..87f7de2b5e8 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -6,6 +6,7 @@ import sys import textwrap +import attr import py import six @@ -548,6 +549,144 @@ def test_mojibake(self): assert msg +class TestAssert_reprcompare_dataclass(object): + @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") + def test_dataclasses(self): + from dataclasses import dataclass + + @dataclass + class SimpleDataObject: + field_a: int + field_b: str + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "c") + + lines = callequal(left, right) + assert lines[1].startswith("Omitting 1 identical item") + assert "Common items" not in lines + for line in lines[1:]: + assert "field_a" not in line + + @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") + def test_dataclasses_verbose(self): + from dataclasses import dataclass + + @dataclass + class SimpleDataObject: + field_a: int + field_b: str + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "c") + + lines = callequal(left, right, verbose=2) + assert lines[1].startswith("Common items:") + assert "Omitting" not in lines[1] + assert lines[2] == "['field_a']" + + def test_dataclasses_with_attribute_comparison_off(self): + from dataclasses import dataclass, field + + @dataclass + class SimpleDataObject: + field_a: int + field_b: str = field(compare=False) + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "b") + + lines = callequal(left, right, verbose=2) + assert lines[1].startswith("Common items:") + assert "Omitting" not in lines[1] + assert lines[2] == "['field_a']" + for line in lines[2:]: + assert "field_b" not in line + + def test_comparing_different_data_classes(self): + from dataclasses import dataclass + + @dataclass + class SimpleDataObjectOne: + field_a: int + field_b: str + + @dataclass + class SimpleDataObjectTwo: + field_a: int + field_b: str + + left = SimpleDataObjectOne(1, "b") + right = SimpleDataObjectTwo(1, "c") + + lines = callequal(left, right) + assert lines is None + + +class TestAssert_reprcompare_attrsclass(object): + def test_attrs(self): + @attr.s + class SimpleDataObject: + field_a = attr.ib() + field_b = attr.ib() + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "c") + + lines = callequal(left, right) + assert lines[1].startswith("Omitting 1 identical item") + assert "Common items" not in lines + for line in lines[1:]: + assert "field_a" not in line + + def test_attrs_verbose(self): + @attr.s + class SimpleDataObject: + field_a = attr.ib() + field_b = attr.ib() + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "c") + + lines = callequal(left, right, verbose=2) + assert lines[1].startswith("Common items:") + assert "Omitting" not in lines[1] + assert lines[2] == "['field_a']" + + def test_attrs_with_attribute_comparison_off(self): + @attr.s + class SimpleDataObject: + field_a = attr.ib() + field_b = attr.ib(cmp=False) + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "b") + + lines = callequal(left, right, verbose=2) + assert lines[1].startswith("Common items:") + assert "Omitting" not in lines[1] + assert lines[2] == "['field_a']" + for line in lines[2:]: + assert "field_b" not in line + + def test_comparing_different_attrs(self): + @attr.s + class SimpleDataObjectOne: + field_a = attr.ib() + field_b = attr.ib() + + @attr.s + class SimpleDataObjectTwo: + field_a = attr.ib() + field_b = attr.ib() + + left = SimpleDataObjectOne(1, "b") + right = SimpleDataObjectTwo(1, "c") + + lines = callequal(left, right) + assert lines is None + + class TestFormatExplanation(object): def test_special_chars_full(self, testdir): # Issue 453, for the bug this would raise IndexError From a0ba881c2202b4b7d49505fb5d53088ea387db30 Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Thu, 2 Aug 2018 18:08:07 -0500 Subject: [PATCH 03/98] Add change to log; name to AUTHORS --- AUTHORS | 1 + changelog/3632.feature.rst | 1 + 2 files changed, 2 insertions(+) create mode 100644 changelog/3632.feature.rst diff --git a/AUTHORS b/AUTHORS index 777eda324ea..f5ba603c2ae 100644 --- a/AUTHORS +++ b/AUTHORS @@ -11,6 +11,7 @@ Alan Velasco Alexander Johnson Alexei Kozlenok Allan Feldman +Aly Sivji Anatoly Bubenkoff Anders Hovmöller Andras Tim diff --git a/changelog/3632.feature.rst b/changelog/3632.feature.rst new file mode 100644 index 00000000000..bb7918ab7f4 --- /dev/null +++ b/changelog/3632.feature.rst @@ -0,0 +1 @@ +Provide richer comparison on ``AssertionError`` for objects created using `dataclasses `_ (Python 3.7+) or `attrs package `_. From 1184db827373822863fe936b3027b225cb77ed02 Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Thu, 2 Aug 2018 18:22:15 -0500 Subject: [PATCH 04/98] cleaning up --- changelog/3632.feature.rst | 2 +- testing/test_assertion.py | 2 ++ 2 files changed, 3 insertions(+), 1 deletion(-) diff --git a/changelog/3632.feature.rst b/changelog/3632.feature.rst index bb7918ab7f4..a08a6dd61a2 100644 --- a/changelog/3632.feature.rst +++ b/changelog/3632.feature.rst @@ -1 +1 @@ -Provide richer comparison on ``AssertionError`` for objects created using `dataclasses `_ (Python 3.7+) or `attrs package `_. +Richer comparison information on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+). diff --git a/testing/test_assertion.py b/testing/test_assertion.py index 87f7de2b5e8..e7f3dbaf39e 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -585,6 +585,7 @@ class SimpleDataObject: assert "Omitting" not in lines[1] assert lines[2] == "['field_a']" + @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") def test_dataclasses_with_attribute_comparison_off(self): from dataclasses import dataclass, field @@ -603,6 +604,7 @@ class SimpleDataObject: for line in lines[2:]: assert "field_b" not in line + @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") def test_comparing_different_data_classes(self): from dataclasses import dataclass From 87b019d5f91f7fb74c8a80218b4d4cf1647e61e5 Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Thu, 2 Aug 2018 18:24:46 -0500 Subject: [PATCH 05/98] fix gitignore --- .gitignore | 1 - 1 file changed, 1 deletion(-) diff --git a/.gitignore b/.gitignore index 907876f1bba..e2d59502cb4 100644 --- a/.gitignore +++ b/.gitignore @@ -45,4 +45,3 @@ coverage.xml .project .settings .vscode -.envrc From 1847cc74208a58efbfd1d8154e34b2bf18987b8c Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Fri, 3 Aug 2018 09:23:50 -0500 Subject: [PATCH 06/98] adding docs and cleaning up --- changelog/3632.feature.rst | 2 +- doc/en/example/assertion/failure_demo.py | 24 +++++++++++++++++++++++ doc/en/example/assertion/test_failures.py | 2 +- src/_pytest/assertion/util.py | 4 ++-- testing/test_assertion.py | 16 +++++++-------- 5 files changed, 36 insertions(+), 12 deletions(-) diff --git a/changelog/3632.feature.rst b/changelog/3632.feature.rst index a08a6dd61a2..a715288e153 100644 --- a/changelog/3632.feature.rst +++ b/changelog/3632.feature.rst @@ -1 +1 @@ -Richer comparison information on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+). +Richer comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+). diff --git a/doc/en/example/assertion/failure_demo.py b/doc/en/example/assertion/failure_demo.py index 115fd3e22e2..10f8798f27b 100644 --- a/doc/en/example/assertion/failure_demo.py +++ b/doc/en/example/assertion/failure_demo.py @@ -101,6 +101,30 @@ def test_not_in_text_single_long_term(self): text = "head " * 50 + "f" * 70 + "tail " * 20 assert "f" * 70 not in text + def test_eq_dataclass(self): + from dataclasses import dataclass + + @dataclass + class Foo(object): + a: int + b: str + + left = Foo(1, "b") + right = Foo(1, "c") + assert left == right + + def test_eq_attrs(self): + import attr + + @attr.s + class Foo(object): + a = attr.ib() + b = attr.ib() + + left = Foo(1, "b") + right = Foo(1, "c") + assert left == right + def test_attribute(): class Foo(object): diff --git a/doc/en/example/assertion/test_failures.py b/doc/en/example/assertion/test_failures.py index 9ffe3166459..30ebc72dc37 100644 --- a/doc/en/example/assertion/test_failures.py +++ b/doc/en/example/assertion/test_failures.py @@ -9,5 +9,5 @@ def test_failure_demo_fails_properly(testdir): failure_demo.copy(target) failure_demo.copy(testdir.tmpdir.join(failure_demo.basename)) result = testdir.runpytest(target, syspathinsert=True) - result.stdout.fnmatch_lines(["*42 failed*"]) + result.stdout.fnmatch_lines(["*44 failed*"]) assert result.ret != 0 diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index da5d5fe97e2..ac83f600084 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -347,12 +347,12 @@ def _compare_eq_class(left, right, verbose, type=None): if same and verbose < 2: explanation += [u("Omitting %s identical items, use -vv to show") % len(same)] elif same: - explanation += [u("Common items:")] + explanation += [u("Common attributes:")] explanation += pprint.pformat(same).splitlines() if diff: + class_name = left.__class__.__name__ explanation += [("Differing attributes:")] for k in diff: - class_name = left.__class__.__name__ explanation += [ u("%s(%s=%r) != %s(%s=%r)") % (class_name, k, getattr(left, k), class_name, k, getattr(right, k)) diff --git a/testing/test_assertion.py b/testing/test_assertion.py index e7f3dbaf39e..8ddd96b94c7 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -564,7 +564,7 @@ class SimpleDataObject: lines = callequal(left, right) assert lines[1].startswith("Omitting 1 identical item") - assert "Common items" not in lines + assert "Common attributes" not in lines for line in lines[1:]: assert "field_a" not in line @@ -581,7 +581,7 @@ class SimpleDataObject: right = SimpleDataObject(1, "c") lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common items:") + assert lines[1].startswith("Common attributes:") assert "Omitting" not in lines[1] assert lines[2] == "['field_a']" @@ -598,14 +598,14 @@ class SimpleDataObject: right = SimpleDataObject(1, "b") lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common items:") + assert lines[1].startswith("Common attributes:") assert "Omitting" not in lines[1] assert lines[2] == "['field_a']" for line in lines[2:]: assert "field_b" not in line @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") - def test_comparing_different_data_classes(self): + def test_comparing_two_different_data_classes(self): from dataclasses import dataclass @dataclass @@ -637,7 +637,7 @@ class SimpleDataObject: lines = callequal(left, right) assert lines[1].startswith("Omitting 1 identical item") - assert "Common items" not in lines + assert "Common attributes" not in lines for line in lines[1:]: assert "field_a" not in line @@ -651,7 +651,7 @@ class SimpleDataObject: right = SimpleDataObject(1, "c") lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common items:") + assert lines[1].startswith("Common attributes:") assert "Omitting" not in lines[1] assert lines[2] == "['field_a']" @@ -665,13 +665,13 @@ class SimpleDataObject: right = SimpleDataObject(1, "b") lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common items:") + assert lines[1].startswith("Common attributes:") assert "Omitting" not in lines[1] assert lines[2] == "['field_a']" for line in lines[2:]: assert "field_b" not in line - def test_comparing_different_attrs(self): + def test_comparing_two_different_attrs_classes(self): @attr.s class SimpleDataObjectOne: field_a = attr.ib() From a3e388a73a4ef6b712c442c4147633322790b80d Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Fri, 3 Aug 2018 10:28:46 -0500 Subject: [PATCH 07/98] Improve changelog --- changelog/3632.feature.rst | 2 +- src/_pytest/assertion/util.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog/3632.feature.rst b/changelog/3632.feature.rst index a715288e153..023fa360715 100644 --- a/changelog/3632.feature.rst +++ b/changelog/3632.feature.rst @@ -1 +1 @@ -Richer comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+). +Richer equality comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+, `backported to Python 3.6 `_). diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index ac83f600084..4536cd0dd2d 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -333,7 +333,7 @@ def _compare_eq_class(left, right, verbose, type=None): all_fields = left.__attrs_attrs__ fields_to_check = [field.name for field in all_fields if field.cmp] else: - raise RuntimeError + raise RuntimeError("Unexpected value for `type` paramater") same = [] diff = [] From 025d160dfc0ce87482606b27762c47d47e4a2d5a Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Fri, 3 Aug 2018 11:29:45 -0500 Subject: [PATCH 08/98] Update tests to pass in py27 --- changelog/3632.feature.rst | 2 +- testing/test_assertion.py | 46 +++++++++++++++++++------------------- 2 files changed, 24 insertions(+), 24 deletions(-) diff --git a/changelog/3632.feature.rst b/changelog/3632.feature.rst index 023fa360715..cb1d93750c6 100644 --- a/changelog/3632.feature.rst +++ b/changelog/3632.feature.rst @@ -1 +1 @@ -Richer equality comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+, `backported to Python 3.6 `_). +Richer equality comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+, `backported to 3.6 `_). diff --git a/testing/test_assertion.py b/testing/test_assertion.py index 8ddd96b94c7..fdcfccfed29 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -552,12 +552,12 @@ def test_mojibake(self): class TestAssert_reprcompare_dataclass(object): @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") def test_dataclasses(self): - from dataclasses import dataclass + from dataclasses import dataclass, field @dataclass - class SimpleDataObject: - field_a: int - field_b: str + class SimpleDataObject(object): + field_a = field() + field_b = field() left = SimpleDataObject(1, "b") right = SimpleDataObject(1, "c") @@ -570,12 +570,12 @@ class SimpleDataObject: @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") def test_dataclasses_verbose(self): - from dataclasses import dataclass + from dataclasses import dataclass, field @dataclass - class SimpleDataObject: - field_a: int - field_b: str + class SimpleDataObject(object): + field_a = field() + field_b = field() left = SimpleDataObject(1, "b") right = SimpleDataObject(1, "c") @@ -590,9 +590,9 @@ def test_dataclasses_with_attribute_comparison_off(self): from dataclasses import dataclass, field @dataclass - class SimpleDataObject: - field_a: int - field_b: str = field(compare=False) + class SimpleDataObject(object): + field_a = field() + field_b = field(compare=False) left = SimpleDataObject(1, "b") right = SimpleDataObject(1, "b") @@ -606,17 +606,17 @@ class SimpleDataObject: @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") def test_comparing_two_different_data_classes(self): - from dataclasses import dataclass + from dataclasses import dataclass, field @dataclass - class SimpleDataObjectOne: - field_a: int - field_b: str + class SimpleDataObjectOne(object): + field_a = field() + field_b = field() @dataclass - class SimpleDataObjectTwo: - field_a: int - field_b: str + class SimpleDataObjectTwo(object): + field_a = field() + field_b = field() left = SimpleDataObjectOne(1, "b") right = SimpleDataObjectTwo(1, "c") @@ -628,7 +628,7 @@ class SimpleDataObjectTwo: class TestAssert_reprcompare_attrsclass(object): def test_attrs(self): @attr.s - class SimpleDataObject: + class SimpleDataObject(object): field_a = attr.ib() field_b = attr.ib() @@ -643,7 +643,7 @@ class SimpleDataObject: def test_attrs_verbose(self): @attr.s - class SimpleDataObject: + class SimpleDataObject(object): field_a = attr.ib() field_b = attr.ib() @@ -657,7 +657,7 @@ class SimpleDataObject: def test_attrs_with_attribute_comparison_off(self): @attr.s - class SimpleDataObject: + class SimpleDataObject(object): field_a = attr.ib() field_b = attr.ib(cmp=False) @@ -673,12 +673,12 @@ class SimpleDataObject: def test_comparing_two_different_attrs_classes(self): @attr.s - class SimpleDataObjectOne: + class SimpleDataObjectOne(object): field_a = attr.ib() field_b = attr.ib() @attr.s - class SimpleDataObjectTwo: + class SimpleDataObjectTwo(object): field_a = attr.ib() field_b = attr.ib() From e1e81e315e41fd4674b9e10c2bd4074734d3692b Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Sat, 4 Aug 2018 08:29:55 -0500 Subject: [PATCH 09/98] code review 1/n -- change hasattr to getattr --- src/_pytest/assertion/util.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index 4536cd0dd2d..b6867436e74 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -123,10 +123,10 @@ def isset(x): return isinstance(x, (set, frozenset)) def isdatacls(obj): - return hasattr(obj, "__dataclass_fields__") + return getattr(obj, "__dataclass_fields__", None) is not None def isattrs(obj): - return hasattr(obj, "__attrs_attrs__") + return getattr(obj, "__attrs_attrs__", None) is not None def isiterable(obj): try: From a663f60b054fa791c1cb9729b68dbddedfdff26f Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Mon, 10 Sep 2018 15:33:37 -0500 Subject: [PATCH 10/98] cr 2/n -- refactor compare eq class --- src/_pytest/assertion/util.py | 16 +++++++--------- 1 file changed, 7 insertions(+), 9 deletions(-) diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index b6867436e74..9984b512075 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -148,10 +148,9 @@ def isiterable(obj): explanation = _compare_eq_set(left, right, verbose) elif isdict(left) and isdict(right): explanation = _compare_eq_dict(left, right, verbose) - elif type(left) == type(right) and isdatacls(left) and isdatacls(right): - explanation = _compare_eq_class(left, right, verbose, type="data") - elif type(left) == type(right) and isattrs(left) and isattrs(right): - explanation = _compare_eq_class(left, right, verbose, type="attrs") + elif type(left) == type(right) and (isdatacls(left) or isattrs(left)): + type_fn = (isdatacls, isattrs) + explanation = _compare_eq_cls(left, right, verbose, type_fn) if isiterable(left) and isiterable(right): expl = _compare_eq_iterable(left, right, verbose) if explanation is not None: @@ -325,15 +324,14 @@ def _compare_eq_dict(left, right, verbose=False): return explanation -def _compare_eq_class(left, right, verbose, type=None): - if type == "data": +def _compare_eq_cls(left, right, verbose, type_fns): + isdatacls, isattrs = type_fns + if isdatacls(left): all_fields = left.__dataclass_fields__ fields_to_check = [field for field, info in all_fields.items() if info.compare] - elif type == "attrs": + elif isattrs(left): all_fields = left.__attrs_attrs__ fields_to_check = [field.name for field in all_fields if field.cmp] - else: - raise RuntimeError("Unexpected value for `type` paramater") same = [] diff = [] From 4e99c80425024dfcf733e060c5ce9c53ff49c475 Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Mon, 12 Nov 2018 11:24:15 -0600 Subject: [PATCH 11/98] have tests pass in python37; move to separate file --- src/_pytest/assertion/util.py | 8 ++++---- testing/test_assertion.py | 20 ++++++++++---------- 2 files changed, 14 insertions(+), 14 deletions(-) diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index 9984b512075..4e10a3fbb01 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -343,16 +343,16 @@ def _compare_eq_cls(left, right, verbose, type_fns): explanation = [] if same and verbose < 2: - explanation += [u("Omitting %s identical items, use -vv to show") % len(same)] + explanation.append(u"Omitting %s identical items, use -vv to show" % len(same)) elif same: - explanation += [u("Common attributes:")] + explanation += [u"Common attributes:"] explanation += pprint.pformat(same).splitlines() if diff: class_name = left.__class__.__name__ - explanation += [("Differing attributes:")] + explanation += [u"Differing attributes:"] for k in diff: explanation += [ - u("%s(%s=%r) != %s(%s=%r)") + (u"%s(%s=%r) != %s(%s=%r)") % (class_name, k, getattr(left, k), class_name, k, getattr(right, k)) ] return explanation diff --git a/testing/test_assertion.py b/testing/test_assertion.py index fdcfccfed29..4a09187b758 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -556,8 +556,8 @@ def test_dataclasses(self): @dataclass class SimpleDataObject(object): - field_a = field() - field_b = field() + field_a: int = field() + field_b: int = field() left = SimpleDataObject(1, "b") right = SimpleDataObject(1, "c") @@ -574,8 +574,8 @@ def test_dataclasses_verbose(self): @dataclass class SimpleDataObject(object): - field_a = field() - field_b = field() + field_a: int = field() + field_b: int = field() left = SimpleDataObject(1, "b") right = SimpleDataObject(1, "c") @@ -591,8 +591,8 @@ def test_dataclasses_with_attribute_comparison_off(self): @dataclass class SimpleDataObject(object): - field_a = field() - field_b = field(compare=False) + field_a: int = field() + field_b: int = field(compare=False) left = SimpleDataObject(1, "b") right = SimpleDataObject(1, "b") @@ -610,13 +610,13 @@ def test_comparing_two_different_data_classes(self): @dataclass class SimpleDataObjectOne(object): - field_a = field() - field_b = field() + field_a: int = field() + field_b: int = field() @dataclass class SimpleDataObjectTwo(object): - field_a = field() - field_b = field() + field_a: int = field() + field_b: int = field() left = SimpleDataObjectOne(1, "b") right = SimpleDataObjectTwo(1, "c") From 2bffd6829eb38a44f9e7523d044dad7339830691 Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Mon, 12 Nov 2018 15:36:16 -0600 Subject: [PATCH 12/98] Move dataclass tests for 3.7 to separate file --- .../dataclasses/test_compare_dataclasses.py | 14 +++ ...ompare_dataclasses_field_comparison_off.py | 14 +++ .../test_compare_dataclasses_verbose.py | 14 +++ .../test_compare_two_different_dataclasses.py | 19 ++++ testing/test_assertion.py | 101 ++++++------------ 5 files changed, 96 insertions(+), 66 deletions(-) create mode 100644 testing/example_scripts/dataclasses/test_compare_dataclasses.py create mode 100644 testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py create mode 100644 testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py create mode 100644 testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py diff --git a/testing/example_scripts/dataclasses/test_compare_dataclasses.py b/testing/example_scripts/dataclasses/test_compare_dataclasses.py new file mode 100644 index 00000000000..3bbebe2aa26 --- /dev/null +++ b/testing/example_scripts/dataclasses/test_compare_dataclasses.py @@ -0,0 +1,14 @@ +from dataclasses import dataclass +from dataclasses import field + + +def test_dataclasses(): + @dataclass + class SimpleDataObject(object): + field_a: int = field() + field_b: int = field() + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "c") + + assert left == right diff --git a/testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py b/testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py new file mode 100644 index 00000000000..63b9f534e6e --- /dev/null +++ b/testing/example_scripts/dataclasses/test_compare_dataclasses_field_comparison_off.py @@ -0,0 +1,14 @@ +from dataclasses import dataclass +from dataclasses import field + + +def test_dataclasses_with_attribute_comparison_off(): + @dataclass + class SimpleDataObject(object): + field_a: int = field() + field_b: int = field(compare=False) + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "c") + + assert left == right diff --git a/testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py b/testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py new file mode 100644 index 00000000000..17835c0c3fc --- /dev/null +++ b/testing/example_scripts/dataclasses/test_compare_dataclasses_verbose.py @@ -0,0 +1,14 @@ +from dataclasses import dataclass +from dataclasses import field + + +def test_dataclasses_verbose(): + @dataclass + class SimpleDataObject(object): + field_a: int = field() + field_b: int = field() + + left = SimpleDataObject(1, "b") + right = SimpleDataObject(1, "c") + + assert left == right diff --git a/testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py b/testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py new file mode 100644 index 00000000000..24f185d8ac4 --- /dev/null +++ b/testing/example_scripts/dataclasses/test_compare_two_different_dataclasses.py @@ -0,0 +1,19 @@ +from dataclasses import dataclass +from dataclasses import field + + +def test_comparing_two_different_data_classes(): + @dataclass + class SimpleDataObjectOne(object): + field_a: int = field() + field_b: int = field() + + @dataclass + class SimpleDataObjectTwo(object): + field_a: int = field() + field_b: int = field() + + left = SimpleDataObjectOne(1, "b") + right = SimpleDataObjectTwo(1, "c") + + assert left != right diff --git a/testing/test_assertion.py b/testing/test_assertion.py index 4a09187b758..2a55f70bcca 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -551,78 +551,47 @@ def test_mojibake(self): class TestAssert_reprcompare_dataclass(object): @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") - def test_dataclasses(self): - from dataclasses import dataclass, field - - @dataclass - class SimpleDataObject(object): - field_a: int = field() - field_b: int = field() - - left = SimpleDataObject(1, "b") - right = SimpleDataObject(1, "c") - - lines = callequal(left, right) - assert lines[1].startswith("Omitting 1 identical item") - assert "Common attributes" not in lines - for line in lines[1:]: - assert "field_a" not in line + def test_dataclasses(self, testdir): + p = testdir.copy_example("dataclasses/test_compare_dataclasses.py") + result = testdir.runpytest(p) + result.assert_outcomes(failed=1, passed=0) + result.stdout.fnmatch_lines( + [ + "*Omitting 1 identical items, use -vv to show*", + "*Differing attributes:*", + "*SimpleDataObject(field_b='b') != SimpleDataObject(field_b='c')*", + ] + ) @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") - def test_dataclasses_verbose(self): - from dataclasses import dataclass, field - - @dataclass - class SimpleDataObject(object): - field_a: int = field() - field_b: int = field() - - left = SimpleDataObject(1, "b") - right = SimpleDataObject(1, "c") - - lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common attributes:") - assert "Omitting" not in lines[1] - assert lines[2] == "['field_a']" + def test_dataclasses_verbose(self, testdir): + p = testdir.copy_example("dataclasses/test_compare_dataclasses_verbose.py") + result = testdir.runpytest(p, "-vv") + result.assert_outcomes(failed=1, passed=0) + result.stdout.fnmatch_lines( + [ + "*Common attributes:*", + "*['field_a']*", + "*Differing attributes:*", + "*SimpleDataObject(field_b='b') != SimpleDataObject(field_b='c')*", + ] + ) @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") - def test_dataclasses_with_attribute_comparison_off(self): - from dataclasses import dataclass, field - - @dataclass - class SimpleDataObject(object): - field_a: int = field() - field_b: int = field(compare=False) - - left = SimpleDataObject(1, "b") - right = SimpleDataObject(1, "b") - - lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common attributes:") - assert "Omitting" not in lines[1] - assert lines[2] == "['field_a']" - for line in lines[2:]: - assert "field_b" not in line + def test_dataclasses_with_attribute_comparison_off(self, testdir): + p = testdir.copy_example( + "dataclasses/test_compare_dataclasses_field_comparison_off.py" + ) + result = testdir.runpytest(p, "-vv") + result.assert_outcomes(failed=0, passed=1) @pytest.mark.skipif(sys.version_info < (3, 7), reason="Dataclasses in Python3.7+") - def test_comparing_two_different_data_classes(self): - from dataclasses import dataclass, field - - @dataclass - class SimpleDataObjectOne(object): - field_a: int = field() - field_b: int = field() - - @dataclass - class SimpleDataObjectTwo(object): - field_a: int = field() - field_b: int = field() - - left = SimpleDataObjectOne(1, "b") - right = SimpleDataObjectTwo(1, "c") - - lines = callequal(left, right) - assert lines is None + def test_comparing_two_different_data_classes(self, testdir): + p = testdir.copy_example( + "dataclasses/test_compare_two_different_dataclasses.py" + ) + result = testdir.runpytest(p, "-vv") + result.assert_outcomes(failed=0, passed=1) class TestAssert_reprcompare_attrsclass(object): From b83e97802e91ff74c6a437bb076de65bf2423fa1 Mon Sep 17 00:00:00 2001 From: Aly Sivji Date: Tue, 13 Nov 2018 09:37:02 -0600 Subject: [PATCH 13/98] improve failure output --- src/_pytest/assertion/util.py | 8 +++----- testing/test_assertion.py | 12 ++++++------ 2 files changed, 9 insertions(+), 11 deletions(-) diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index 4e10a3fbb01..3ec9a365aad 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -345,15 +345,13 @@ def _compare_eq_cls(left, right, verbose, type_fns): if same and verbose < 2: explanation.append(u"Omitting %s identical items, use -vv to show" % len(same)) elif same: - explanation += [u"Common attributes:"] + explanation += [u"Matching attributes:"] explanation += pprint.pformat(same).splitlines() if diff: - class_name = left.__class__.__name__ explanation += [u"Differing attributes:"] - for k in diff: + for field in diff: explanation += [ - (u"%s(%s=%r) != %s(%s=%r)") - % (class_name, k, getattr(left, k), class_name, k, getattr(right, k)) + (u"%s: %r != %r") % (field, getattr(left, field), getattr(right, field)) ] return explanation diff --git a/testing/test_assertion.py b/testing/test_assertion.py index 2a55f70bcca..bb54e394f2d 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -559,7 +559,7 @@ def test_dataclasses(self, testdir): [ "*Omitting 1 identical items, use -vv to show*", "*Differing attributes:*", - "*SimpleDataObject(field_b='b') != SimpleDataObject(field_b='c')*", + "*field_b: 'b' != 'c'*", ] ) @@ -570,10 +570,10 @@ def test_dataclasses_verbose(self, testdir): result.assert_outcomes(failed=1, passed=0) result.stdout.fnmatch_lines( [ - "*Common attributes:*", + "*Matching attributes:*", "*['field_a']*", "*Differing attributes:*", - "*SimpleDataObject(field_b='b') != SimpleDataObject(field_b='c')*", + "*field_b: 'b' != 'c'*", ] ) @@ -606,7 +606,7 @@ class SimpleDataObject(object): lines = callequal(left, right) assert lines[1].startswith("Omitting 1 identical item") - assert "Common attributes" not in lines + assert "Matching attributes" not in lines for line in lines[1:]: assert "field_a" not in line @@ -620,7 +620,7 @@ class SimpleDataObject(object): right = SimpleDataObject(1, "c") lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common attributes:") + assert lines[1].startswith("Matching attributes:") assert "Omitting" not in lines[1] assert lines[2] == "['field_a']" @@ -634,7 +634,7 @@ class SimpleDataObject(object): right = SimpleDataObject(1, "b") lines = callequal(left, right, verbose=2) - assert lines[1].startswith("Common attributes:") + assert lines[1].startswith("Matching attributes:") assert "Omitting" not in lines[1] assert lines[2] == "['field_a']" for line in lines[2:]: From ed91d5f086cfc8e707098e4032c098125b376f20 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Thu, 8 Nov 2018 18:52:15 +0100 Subject: [PATCH 14/98] config: set invocation_dir in the constructor already This allows to make use of it when determining the rootdir etc. --- src/_pytest/config/__init__.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 60b455bb3cd..ba11c8055a4 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -609,6 +609,7 @@ def __init__(self, pluginmanager): self._warn = self.pluginmanager._warn self.pluginmanager.register(self, "pytestconfig") self._configured = False + self.invocation_dir = py.path.local() def do_setns(dic): import pytest @@ -731,7 +732,6 @@ def _initini(self, args): self.rootdir, self.inifile, self.inicfg = r self._parser.extra_info["rootdir"] = self.rootdir self._parser.extra_info["inifile"] = self.inifile - self.invocation_dir = py.path.local() self._parser.addini("addopts", "extra command line options", "args") self._parser.addini("minversion", "minimally required pytest version") self._override_ini = ns.override_ini or () From ba457f5febe2f0b99330e696acb636d896f613b2 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Fri, 9 Nov 2018 03:44:39 +0100 Subject: [PATCH 15/98] Optimize/revisit determine_setup --- src/_pytest/config/findpaths.py | 26 ++++++++++++++++---------- 1 file changed, 16 insertions(+), 10 deletions(-) diff --git a/src/_pytest/config/findpaths.py b/src/_pytest/config/findpaths.py index 4f371ec7f6b..bd18994660b 100644 --- a/src/_pytest/config/findpaths.py +++ b/src/_pytest/config/findpaths.py @@ -121,28 +121,34 @@ def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): break except KeyError: inicfg = None - rootdir = get_common_ancestor(dirs) + if rootdir_cmd_arg is None: + rootdir = get_common_ancestor(dirs) else: ancestor = get_common_ancestor(dirs) rootdir, inifile, inicfg = getcfg([ancestor], config=config) - if rootdir is None: - for rootdir in ancestor.parts(reverse=True): - if rootdir.join("setup.py").exists(): + if rootdir is None and rootdir_cmd_arg is None: + for possible_rootdir in ancestor.parts(reverse=True): + if possible_rootdir.join("setup.py").exists(): + rootdir = possible_rootdir break else: - rootdir, inifile, inicfg = getcfg(dirs, config=config) + if dirs != [ancestor]: + rootdir, inifile, inicfg = getcfg(dirs, config=config) if rootdir is None: - rootdir = get_common_ancestor([py.path.local(), ancestor]) + if config is not None: + cwd = config.invocation_dir + else: + cwd = py.path.local() + rootdir = get_common_ancestor([cwd, ancestor]) is_fs_root = os.path.splitdrive(str(rootdir))[1] == "/" if is_fs_root: rootdir = ancestor if rootdir_cmd_arg: - rootdir_abs_path = py.path.local(os.path.expandvars(rootdir_cmd_arg)) - if not os.path.isdir(str(rootdir_abs_path)): + rootdir = py.path.local(os.path.expandvars(rootdir_cmd_arg)) + if not rootdir.isdir(): raise UsageError( "Directory '{}' not found. Check your '--rootdir' option.".format( - rootdir_abs_path + rootdir ) ) - rootdir = rootdir_abs_path return rootdir, inifile, inicfg or {} From 1a8d9bf2549ab2c74aec4000599b8d6afc3a631b Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Wed, 7 Nov 2018 12:08:23 +0000 Subject: [PATCH 16/98] Let approx() work on more generic sequences approx() was updated in 9f3122fe to work better with numpy arrays, however at the same time the requirements were tightened from requiring an Iterable to requiring a Sequence - the former being tested only on interface, while the latter requires subclassing or registration with the abc. Since the ApproxSequence only used __iter__ and __len__ this commit reduces the requirement to only what's used, and allows unregistered Sequence-like containers to be used. Since numpy arrays qualify for the new criteria, reorder the checks so that generic sequences are checked for after numpy arrays. --- AUTHORS | 1 + changelog/4327.bugfix.rst | 1 + src/_pytest/compat.py | 4 ++-- src/_pytest/python_api.py | 13 +++++++++---- testing/python/approx.py | 10 ++++++++++ 5 files changed, 23 insertions(+), 6 deletions(-) create mode 100644 changelog/4327.bugfix.rst diff --git a/AUTHORS b/AUTHORS index 777eda324ea..3a89b987a8f 100644 --- a/AUTHORS +++ b/AUTHORS @@ -163,6 +163,7 @@ Miro Hrončok Nathaniel Waisbrot Ned Batchelder Neven Mundar +Nicholas Devenish Niclas Olofsson Nicolas Delaby Oleg Pidsadnyi diff --git a/changelog/4327.bugfix.rst b/changelog/4327.bugfix.rst new file mode 100644 index 00000000000..0b498cd09f8 --- /dev/null +++ b/changelog/4327.bugfix.rst @@ -0,0 +1 @@ +Loosen the definition of what ``approx`` considers a sequence diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py index ead9ffd8d80..29cab7dd2c6 100644 --- a/src/_pytest/compat.py +++ b/src/_pytest/compat.py @@ -45,11 +45,11 @@ if _PY3: from collections.abc import MutableMapping as MappingMixin - from collections.abc import Mapping, Sequence + from collections.abc import Iterable, Mapping, Sequence, Sized else: # those raise DeprecationWarnings in Python >=3.7 from collections import MutableMapping as MappingMixin # noqa - from collections import Mapping, Sequence # noqa + from collections import Iterable, Mapping, Sequence, Sized # noqa if sys.version_info >= (3, 4): diff --git a/src/_pytest/python_api.py b/src/_pytest/python_api.py index 805cd85ad41..9eb988b244e 100644 --- a/src/_pytest/python_api.py +++ b/src/_pytest/python_api.py @@ -11,8 +11,9 @@ import _pytest._code from _pytest.compat import isclass +from _pytest.compat import Iterable from _pytest.compat import Mapping -from _pytest.compat import Sequence +from _pytest.compat import Sized from _pytest.compat import STRING_TYPES from _pytest.outcomes import fail @@ -182,7 +183,7 @@ def _check_type(self): raise _non_numeric_type_error(self.expected, at="key={!r}".format(key)) -class ApproxSequence(ApproxBase): +class ApproxSequencelike(ApproxBase): """ Perform approximate comparisons where the expected value is a sequence of numbers. @@ -518,10 +519,14 @@ def approx(expected, rel=None, abs=None, nan_ok=False): cls = ApproxScalar elif isinstance(expected, Mapping): cls = ApproxMapping - elif isinstance(expected, Sequence) and not isinstance(expected, STRING_TYPES): - cls = ApproxSequence elif _is_numpy_array(expected): cls = ApproxNumpy + elif ( + isinstance(expected, Iterable) + and isinstance(expected, Sized) + and not isinstance(expected, STRING_TYPES) + ): + cls = ApproxSequencelike else: raise _non_numeric_type_error(expected, at=None) diff --git a/testing/python/approx.py b/testing/python/approx.py index 96433d52bf0..0a91bb08fe6 100644 --- a/testing/python/approx.py +++ b/testing/python/approx.py @@ -496,3 +496,13 @@ def test_numpy_scalar_with_array(self): assert actual != approx(expected, rel=5e-8, abs=0) assert approx(expected, rel=5e-7, abs=0) == actual assert approx(expected, rel=5e-8, abs=0) != actual + + def test_generic_iterable_sized_object(self): + class newIterable(object): + def __iter__(self): + return iter([1, 2, 3, 4]) + + def __len__(self): + return 4 + + assert [1, 2, 3, 4] == approx(newIterable()) From 4eddf634e7c3631a37b3415aa8df1673e18114df Mon Sep 17 00:00:00 2001 From: Nicholas Devenish Date: Sun, 18 Nov 2018 19:59:58 +0000 Subject: [PATCH 17/98] Rename and split out the testing, and reword the changelog. --- changelog/4327.bugfix.rst | 2 +- testing/python/approx.py | 7 ++++--- 2 files changed, 5 insertions(+), 4 deletions(-) diff --git a/changelog/4327.bugfix.rst b/changelog/4327.bugfix.rst index 0b498cd09f8..72223af4ee5 100644 --- a/changelog/4327.bugfix.rst +++ b/changelog/4327.bugfix.rst @@ -1 +1 @@ -Loosen the definition of what ``approx`` considers a sequence +``approx`` again works with more generic containers, more precisely instances of ``Iterable`` and ``Sized`` instead of more restrictive ``Sequence``. diff --git a/testing/python/approx.py b/testing/python/approx.py index 0a91bb08fe6..26e6a4ab209 100644 --- a/testing/python/approx.py +++ b/testing/python/approx.py @@ -497,12 +497,13 @@ def test_numpy_scalar_with_array(self): assert approx(expected, rel=5e-7, abs=0) == actual assert approx(expected, rel=5e-8, abs=0) != actual - def test_generic_iterable_sized_object(self): - class newIterable(object): + def test_generic_sized_iterable_object(self): + class MySizedIterable(object): def __iter__(self): return iter([1, 2, 3, 4]) def __len__(self): return 4 - assert [1, 2, 3, 4] == approx(newIterable()) + expected = MySizedIterable() + assert [1, 2, 3, 4] == approx(expected) From 92a2884b09eb793a390460012e6e2859da87767a Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Sun, 18 Nov 2018 20:18:42 +0100 Subject: [PATCH 18/98] pdb: support kwargs with `pdb.set_trace` This handles `header` similar to Python 3.7 does it, and forwards any other keyword arguments to the Pdb constructor. This allows for `__import__("pdb").set_trace(skip=["foo.*"])`. Fixes https://github.com/pytest-dev/pytest/issues/4416. --- changelog/4416.feature.rst | 6 ++++++ src/_pytest/debugging.py | 28 ++++++++++++++++++---------- testing/test_pdb.py | 31 ++++++++++++++++++++++++++++++- 3 files changed, 54 insertions(+), 11 deletions(-) create mode 100644 changelog/4416.feature.rst diff --git a/changelog/4416.feature.rst b/changelog/4416.feature.rst new file mode 100644 index 00000000000..89c0a84b10e --- /dev/null +++ b/changelog/4416.feature.rst @@ -0,0 +1,6 @@ +pdb: support keyword arguments with ``pdb.set_trace`` + +It handles ``header`` similar to Python 3.7 does it, and forwards any +other keyword arguments to the ``Pdb`` constructor. + +This allows for ``__import__("pdb").set_trace(skip=["foo.*"])``. diff --git a/src/_pytest/debugging.py b/src/_pytest/debugging.py index fe54d493955..adf9d0e54da 100644 --- a/src/_pytest/debugging.py +++ b/src/_pytest/debugging.py @@ -77,18 +77,21 @@ class pytestPDB(object): _saved = [] @classmethod - def set_trace(cls, set_break=True): - """ invoke PDB set_trace debugging, dropping any IO capturing. """ + def _init_pdb(cls, *args, **kwargs): + """ Initialize PDB debugging, dropping any IO capturing. """ import _pytest.config - frame = sys._getframe().f_back if cls._pluginmanager is not None: capman = cls._pluginmanager.getplugin("capturemanager") if capman: capman.suspend_global_capture(in_=True) tw = _pytest.config.create_terminal_writer(cls._config) tw.line() - if capman and capman.is_globally_capturing(): + # Handle header similar to pdb.set_trace in py37+. + header = kwargs.pop("header", None) + if header is not None: + tw.sep(">", header) + elif capman and capman.is_globally_capturing(): tw.sep(">", "PDB set_trace (IO-capturing turned off)") else: tw.sep(">", "PDB set_trace") @@ -129,13 +132,18 @@ def setup(self, f, tb): self._pytest_capman.suspend_global_capture(in_=True) return ret - _pdb = _PdbWrapper() + _pdb = _PdbWrapper(**kwargs) cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config, pdb=_pdb) else: - _pdb = cls._pdb_cls() + _pdb = cls._pdb_cls(**kwargs) + return _pdb - if set_break: - _pdb.set_trace(frame) + @classmethod + def set_trace(cls, *args, **kwargs): + """Invoke debugging via ``Pdb.set_trace``, dropping any IO capturing.""" + frame = sys._getframe().f_back + _pdb = cls._init_pdb(*args, **kwargs) + _pdb.set_trace(frame) class PdbInvoke(object): @@ -161,9 +169,9 @@ def pytest_pyfunc_call(self, pyfuncitem): def _test_pytest_function(pyfuncitem): - pytestPDB.set_trace(set_break=False) + _pdb = pytestPDB._init_pdb() testfunction = pyfuncitem.obj - pyfuncitem.obj = pdb.runcall + pyfuncitem.obj = _pdb.runcall if pyfuncitem._isyieldedfunction(): arg_list = list(pyfuncitem._args) arg_list.insert(0, testfunction) diff --git a/testing/test_pdb.py b/testing/test_pdb.py index dd349454b11..41b4badda43 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -390,6 +390,28 @@ def test_1(): assert "hello17" in rest # out is captured self.flush(child) + def test_pdb_set_trace_kwargs(self, testdir): + p1 = testdir.makepyfile( + """ + import pytest + def test_1(): + i = 0 + print("hello17") + pytest.set_trace(header="== my_header ==") + x = 3 + """ + ) + child = testdir.spawn_pytest(str(p1)) + child.expect("== my_header ==") + assert "PDB set_trace" not in child.before.decode() + child.expect("Pdb") + child.sendeof() + rest = child.read().decode("utf-8") + assert "1 failed" in rest + assert "def test_1" in rest + assert "hello17" in rest # out is captured + self.flush(child) + def test_pdb_set_trace_interception(self, testdir): p1 = testdir.makepyfile( """ @@ -634,6 +656,12 @@ def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch): testdir.makepyfile( custom_pdb=""" class CustomPdb(object): + def __init__(self, *args, **kwargs): + skip = kwargs.pop("skip") + assert skip == ["foo.*"] + print("__init__") + super(CustomPdb, self).__init__(*args, **kwargs) + def set_trace(*args, **kwargs): print('custom set_trace>') """ @@ -643,12 +671,13 @@ def set_trace(*args, **kwargs): import pytest def test_foo(): - pytest.set_trace() + pytest.set_trace(skip=['foo.*']) """ ) monkeypatch.setenv("PYTHONPATH", str(testdir.tmpdir)) child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1)) + child.expect("__init__") child.expect("custom set_trace>") self.flush(child) From ba17363d7585c3990d3f0c3796d747ff5f95433f Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Mon, 19 Nov 2018 14:02:37 +0100 Subject: [PATCH 19/98] remove pytest namespace hook --- changelog/4421.removal.rst | 1 + src/_pytest/config/__init__.py | 7 ------ src/_pytest/deprecated.py | 3 --- src/_pytest/hookspec.py | 28 ----------------------- testing/test_pluginmanager.py | 42 ---------------------------------- 5 files changed, 1 insertion(+), 80 deletions(-) create mode 100644 changelog/4421.removal.rst diff --git a/changelog/4421.removal.rst b/changelog/4421.removal.rst new file mode 100644 index 00000000000..be0704faad4 --- /dev/null +++ b/changelog/4421.removal.rst @@ -0,0 +1 @@ +Remove the implementation of the pytest_namespace hook. diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index ba11c8055a4..1d0cdffc9a9 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -610,13 +610,6 @@ def __init__(self, pluginmanager): self.pluginmanager.register(self, "pytestconfig") self._configured = False self.invocation_dir = py.path.local() - - def do_setns(dic): - import pytest - - setns(pytest, dic) - - self.hook.pytest_namespace.call_historic(do_setns, {}) self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser)) def add_cleanup(self, func): diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 8d7a17bcade..a343662809f 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -113,9 +113,6 @@ "Please move it to the top level conftest file instead." ) -PYTEST_NAMESPACE = RemovedInPytest4Warning( - "pytest_namespace is deprecated and will be removed soon" -) PYTEST_ENSURETEMP = RemovedInPytest4Warning( "pytest/tmpdir_factory.ensuretemp is deprecated, \n" diff --git a/src/_pytest/hookspec.py b/src/_pytest/hookspec.py index 625f59e5a0a..0d9f039a173 100644 --- a/src/_pytest/hookspec.py +++ b/src/_pytest/hookspec.py @@ -1,8 +1,6 @@ """ hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ from pluggy import HookspecMarker -from .deprecated import PYTEST_NAMESPACE - hookspec = HookspecMarker("pytest") @@ -24,32 +22,6 @@ def pytest_addhooks(pluginmanager): """ -@hookspec(historic=True, warn_on_impl=PYTEST_NAMESPACE) -def pytest_namespace(): - """ - return dict of name->object to be made globally available in - the pytest namespace. - - This hook is called at plugin registration time. - - .. note:: - This hook is incompatible with ``hookwrapper=True``. - - .. warning:: - This hook has been **deprecated** and will be removed in pytest 4.0. - - Plugins whose users depend on the current namespace functionality should prepare to migrate to a - namespace they actually own. - - To support the migration it's suggested to trigger ``DeprecationWarnings`` for objects they put into the - pytest namespace. - - A stopgap measure to avoid the warning is to monkeypatch the ``pytest`` module, but just as the - ``pytest_namespace`` hook this should be seen as a temporary measure to be removed in future versions after - an appropriate transition period. - """ - - @hookspec(historic=True) def pytest_plugin_registered(plugin, manager): """ a new pytest plugin got registered. diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 8e35290b759..64d05d383b4 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -59,37 +59,6 @@ def pytest_addhooks(pluginmanager): assert res.ret != 0 res.stderr.fnmatch_lines(["*did not find*sys*"]) - def test_namespace_early_from_import(self, testdir): - p = testdir.makepyfile( - """ - from pytest import Item - from pytest import Item as Item2 - assert Item is Item2 - """ - ) - result = testdir.runpython(p) - assert result.ret == 0 - - @pytest.mark.filterwarnings("ignore:pytest_namespace is deprecated") - def test_do_ext_namespace(self, testdir): - testdir.makeconftest( - """ - def pytest_namespace(): - return {'hello': 'world'} - """ - ) - p = testdir.makepyfile( - """ - from pytest import hello - import pytest - def test_hello(): - assert hello == "world" - assert 'hello' in pytest.__all__ - """ - ) - reprec = testdir.inline_run(p) - reprec.assertoutcome(passed=1) - def test_do_option_postinitialize(self, testdir): config = testdir.parseconfigure() assert not hasattr(config.option, "test123") @@ -190,17 +159,6 @@ def pytest_testhook(): assert "deprecated" in warnings[-1] -def test_namespace_has_default_and_env_plugins(testdir): - p = testdir.makepyfile( - """ - import pytest - pytest.mark - """ - ) - result = testdir.runpython(p) - assert result.ret == 0 - - def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines(["*tryfirst*first*", "*trylast*last*"]) From 3d92d5a6595a6f5df5849e5355a8dbd6bda9843d Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 18 Nov 2018 14:32:32 -0800 Subject: [PATCH 20/98] Make sure parametrize ids are printable --- src/_pytest/compat.py | 21 ++++++++++++++++----- testing/python/metafunc.py | 22 +++++++++++++++++----- 2 files changed, 33 insertions(+), 10 deletions(-) diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py index ead9ffd8d80..87d4d51c13a 100644 --- a/src/_pytest/compat.py +++ b/src/_pytest/compat.py @@ -182,6 +182,15 @@ def get_default_arg_names(function): ) +_non_printable_ascii_translate_table = { + i: u"\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127) +} + + +def _translate_non_printable(s): + return s.translate(_non_printable_ascii_translate_table) + + if _PY3: STRING_TYPES = bytes, str UNICODE_TYPES = six.text_type @@ -221,9 +230,10 @@ def ascii_escaped(val): """ if isinstance(val, bytes): - return _bytes_to_ascii(val) + ret = _bytes_to_ascii(val) else: - return val.encode("unicode_escape").decode("ascii") + ret = val.encode("unicode_escape").decode("ascii") + return _translate_non_printable(ret) else: @@ -241,11 +251,12 @@ def ascii_escaped(val): """ if isinstance(val, bytes): try: - return val.encode("ascii") + ret = val.decode("ascii") except UnicodeDecodeError: - return val.encode("string-escape") + ret = val.encode("string-escape").decode("ascii") else: - return val.encode("unicode-escape") + ret = val.encode("unicode-escape").decode("ascii") + return _translate_non_printable(ret) class _PytestWrapper(object): diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 1a9cbf4086a..605814e654f 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -5,6 +5,7 @@ import attr import hypothesis +import six from hypothesis import strategies import pytest @@ -262,11 +263,8 @@ def test_idval_hypothesis(self, value): from _pytest.python import _idval escaped = _idval(value, "a", 6, None, item=None, config=None) - assert isinstance(escaped, str) - if PY3: - escaped.encode("ascii") - else: - escaped.decode("ascii") + assert isinstance(escaped, six.text_type) + escaped.encode("ascii") def test_unicode_idval(self): """This tests that Unicode strings outside the ASCII character set get @@ -382,6 +380,20 @@ def test_idmaker_native_strings(self): "\\xc3\\xb4-other", ] + def test_idmaker_non_printable_characters(self): + from _pytest.python import idmaker + + result = idmaker( + ("s", "n"), + [ + pytest.param("\x00", 1), + pytest.param("\x05", 2), + pytest.param(b"\x00", 3), + pytest.param(b"\x05", 4), + ], + ) + assert result == ["\\x00-1", "\\x05-2", "\\x00-3", "\\x05-4"] + def test_idmaker_enum(self): from _pytest.python import idmaker From 8395b9e25dd968124c239c303af4088aa6a348b9 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 18 Nov 2018 14:16:13 -0800 Subject: [PATCH 21/98] Require id=... to be a string This was documented before, but never enforced. Passing non-strings could have strange side-effects and enforcing a string simplifies other implementation. --- src/_pytest/mark/structures.py | 11 +++++++---- testing/test_mark.py | 32 ++++++++++++++++++++++++-------- 2 files changed, 31 insertions(+), 12 deletions(-) diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py index b8fa011d114..f1892aa3f22 100644 --- a/src/_pytest/mark/structures.py +++ b/src/_pytest/mark/structures.py @@ -5,6 +5,7 @@ from operator import attrgetter import attr +import six from six.moves import map from ..compat import getfslineno @@ -70,10 +71,12 @@ def param(cls, *values, **kw): else: assert isinstance(marks, (tuple, list, set)) - def param_extract_id(id=None): - return id - - id_ = param_extract_id(**kw) + id_ = kw.pop("id", None) + if id_ is not None: + if not isinstance(id_, six.string_types): + raise TypeError( + "Expected id to be a string, got {}: {!r}".format(type(id_), id_) + ) return cls(values, marks, id_) @classmethod diff --git a/testing/test_mark.py b/testing/test_mark.py index 1f50045c579..80979d7eea8 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -5,20 +5,21 @@ import os import sys +import six + +import pytest +from _pytest.mark import EMPTY_PARAMETERSET_OPTION +from _pytest.mark import MarkGenerator as Mark +from _pytest.mark import ParameterSet +from _pytest.mark import transfer_markers +from _pytest.nodes import Collector +from _pytest.nodes import Node from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG try: import mock except ImportError: import unittest.mock as mock -import pytest -from _pytest.mark import ( - MarkGenerator as Mark, - ParameterSet, - transfer_markers, - EMPTY_PARAMETERSET_OPTION, -) -from _pytest.nodes import Node, Collector ignore_markinfo = pytest.mark.filterwarnings( "ignore:MarkInfo objects:pytest.RemovedInPytest4Warning" @@ -1252,3 +1253,18 @@ def test_custom_mark_parametrized(obj_type): result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) result.assert_outcomes(passed=4) + + +def test_pytest_param_id_requires_string(): + with pytest.raises(TypeError) as excinfo: + pytest.param(id=True) + msg, = excinfo.value.args + if six.PY2: + assert msg == "Expected id to be a string, got : True" + else: + assert msg == "Expected id to be a string, got : True" + + +@pytest.mark.parametrize("s", (None, "hello world")) +def test_pytest_param_id_allows_none_or_string(s): + assert pytest.param(id=s) From 9ca0ab6e2be28a44dca3a52080a4bcd407ca0b7b Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 18 Nov 2018 14:39:17 -0800 Subject: [PATCH 22/98] Ensure printable manually-specified param(id=...) --- src/_pytest/mark/structures.py | 2 ++ testing/python/metafunc.py | 12 ++++++++++++ 2 files changed, 14 insertions(+) diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py index f1892aa3f22..14a684745a8 100644 --- a/src/_pytest/mark/structures.py +++ b/src/_pytest/mark/structures.py @@ -8,6 +8,7 @@ import six from six.moves import map +from ..compat import ascii_escaped from ..compat import getfslineno from ..compat import MappingMixin from ..compat import NOTSET @@ -77,6 +78,7 @@ def param(cls, *values, **kw): raise TypeError( "Expected id to be a string, got {}: {!r}".format(type(id_), id_) ) + id_ = ascii_escaped(id_) return cls(values, marks, id_) @classmethod diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 605814e654f..ef6993d9411 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -394,6 +394,18 @@ def test_idmaker_non_printable_characters(self): ) assert result == ["\\x00-1", "\\x05-2", "\\x00-3", "\\x05-4"] + def test_idmaker_manual_ids_must_be_printable(self): + from _pytest.python import idmaker + + result = idmaker( + ("s",), + [ + pytest.param("x00", id="hello \x00"), + pytest.param("x05", id="hello \x05"), + ], + ) + assert result == ["hello \\x00", "hello \\x05"] + def test_idmaker_enum(self): from _pytest.python import idmaker From 9a1e518cc3bedbcfc3eefa1576cc9a627f23aca6 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 18 Nov 2018 14:40:55 -0800 Subject: [PATCH 23/98] Add changelog entry for printable node ids --- changelog/4397.bugfix.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog/4397.bugfix.rst diff --git a/changelog/4397.bugfix.rst b/changelog/4397.bugfix.rst new file mode 100644 index 00000000000..d1a5bd3ba81 --- /dev/null +++ b/changelog/4397.bugfix.rst @@ -0,0 +1 @@ +Ensure that node ids are printable. From 95c6d591f7f5eff10f98e7768823c220ac830435 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 18 Nov 2018 15:12:43 -0800 Subject: [PATCH 24/98] Properly escape \r \n \t bytes --- src/_pytest/compat.py | 3 +++ testing/python/metafunc.py | 4 +++- 2 files changed, 6 insertions(+), 1 deletion(-) diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py index 87d4d51c13a..1857f51a85f 100644 --- a/src/_pytest/compat.py +++ b/src/_pytest/compat.py @@ -185,6 +185,9 @@ def get_default_arg_names(function): _non_printable_ascii_translate_table = { i: u"\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127) } +_non_printable_ascii_translate_table.update( + {ord("\t"): u"\\t", ord("\r"): u"\\r", ord("\n"): u"\\n"} +) def _translate_non_printable(s): diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index ef6993d9411..0d5b6037f31 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -390,9 +390,11 @@ def test_idmaker_non_printable_characters(self): pytest.param("\x05", 2), pytest.param(b"\x00", 3), pytest.param(b"\x05", 4), + pytest.param("\t", 5), + pytest.param(b"\t", 6), ], ) - assert result == ["\\x00-1", "\\x05-2", "\\x00-3", "\\x05-4"] + assert result == ["\\x00-1", "\\x05-2", "\\x00-3", "\\x05-4", "\\t-5", "\\t-6"] def test_idmaker_manual_ids_must_be_printable(self): from _pytest.python import idmaker From d52ea4b6cf1b7a552d52a78acfd3b3b7f642bcc6 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Mon, 19 Nov 2018 20:06:06 -0200 Subject: [PATCH 25/98] Use python 3 in 'doctesting' environment We some examples now use type annotations --- tox.ini | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tox.ini b/tox.ini index dbfd4eef5c4..b9e12983bac 100644 --- a/tox.ini +++ b/tox.ini @@ -141,7 +141,7 @@ commands = sphinx-build -W -b html . _build [testenv:doctesting] -basepython = python +basepython = python3 skipsdist = True deps = PyYAML From 9ed63c607ea8093d0a86bd848a5371562b7573c8 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Sat, 17 Nov 2018 10:55:31 +0100 Subject: [PATCH 26/98] capture: do not overwrite `sys.__stdin__` etc Ref: https://github.com/pytest-dev/pytest/pull/4398#discussion_r234333053 --- src/_pytest/capture.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/src/_pytest/capture.py b/src/_pytest/capture.py index ec72ae3ecd6..679eb4d16cd 100644 --- a/src/_pytest/capture.py +++ b/src/_pytest/capture.py @@ -770,9 +770,9 @@ def _reopen_stdio(f, mode): f.line_buffering, ) - sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, "rb") - sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, "wb") - sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, "wb") + sys.stdin = _reopen_stdio(sys.stdin, "rb") + sys.stdout = _reopen_stdio(sys.stdout, "wb") + sys.stderr = _reopen_stdio(sys.stderr, "wb") def _attempt_to_close_capture_file(f): From 88bf01a31e6625edec134fc1d49ec882947fb680 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Thu, 22 Nov 2018 12:20:14 +0100 Subject: [PATCH 27/98] fix #4386 - restructure construction and partial state of ExceptionInfo --- changelog/4386.feature.rst | 1 + src/_pytest/_code/code.py | 91 ++++++++++++++++++++++++++--------- src/_pytest/assertion/util.py | 2 +- src/_pytest/main.py | 4 +- src/_pytest/python.py | 4 +- src/_pytest/python_api.py | 6 +-- src/_pytest/runner.py | 4 +- src/_pytest/unittest.py | 6 ++- testing/code/test_code.py | 4 +- testing/code/test_excinfo.py | 28 ++++++----- testing/test_resultlog.py | 2 +- testing/test_runner.py | 20 +++----- 12 files changed, 113 insertions(+), 59 deletions(-) create mode 100644 changelog/4386.feature.rst diff --git a/changelog/4386.feature.rst b/changelog/4386.feature.rst new file mode 100644 index 00000000000..fe827cc2394 --- /dev/null +++ b/changelog/4386.feature.rst @@ -0,0 +1 @@ +Restructure ExceptionInfo object construction and ensure incomplete instances have a ``repr``/``str``. diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py index d06e24f006c..595fee6d3f1 100644 --- a/src/_pytest/_code/code.py +++ b/src/_pytest/_code/code.py @@ -391,40 +391,85 @@ def recursionindex(self): ) +@attr.s(repr=False) class ExceptionInfo(object): """ wraps sys.exc_info() objects and offers help for navigating the traceback. """ - _striptext = "" _assert_start_repr = ( "AssertionError(u'assert " if _PY2 else "AssertionError('assert " ) - def __init__(self, tup=None, exprinfo=None): - import _pytest._code + _excinfo = attr.ib() + _striptext = attr.ib(default="") + _traceback = attr.ib(default=None) + + @classmethod + def from_current(cls, exprinfo=None): + """returns a exceptioninfo matching the current traceback + + .. warning:: + + experimental api + + + :param exprinfo: an text string helping to determine if we should + strip assertionerror from the output, defaults + to the exception message/__str__() + + """ + tup = sys.exc_info() + _striptext = "" + if exprinfo is None and isinstance(tup[1], AssertionError): + exprinfo = getattr(tup[1], "msg", None) + if exprinfo is None: + exprinfo = py.io.saferepr(tup[1]) + if exprinfo and exprinfo.startswith(cls._assert_start_repr): + _striptext = "AssertionError: " + + return cls(tup, _striptext) + + @classmethod + def for_later(cls): + """return an unfilled ExceptionInfo + """ + return cls(None) + + @property + def type(self): + """the exception class""" + return self._excinfo[0] + + @property + def value(self): + """the exception value""" + return self._excinfo[1] + + @property + def tb(self): + """the exception raw traceback""" + return self._excinfo[2] + + @property + def typename(self): + """the type name of the exception""" + return self.type.__name__ + + @property + def traceback(self): + """the traceback""" + if self._traceback is None: + self._traceback = Traceback(self.tb, excinfo=ref(self)) + return self._traceback - if tup is None: - tup = sys.exc_info() - if exprinfo is None and isinstance(tup[1], AssertionError): - exprinfo = getattr(tup[1], "msg", None) - if exprinfo is None: - exprinfo = py.io.saferepr(tup[1]) - if exprinfo and exprinfo.startswith(self._assert_start_repr): - self._striptext = "AssertionError: " - self._excinfo = tup - #: the exception class - self.type = tup[0] - #: the exception instance - self.value = tup[1] - #: the exception raw traceback - self.tb = tup[2] - #: the exception type name - self.typename = self.type.__name__ - #: the exception traceback (_pytest._code.Traceback instance) - self.traceback = _pytest._code.Traceback(self.tb, excinfo=ref(self)) + @traceback.setter + def traceback(self, value): + self._traceback = value def __repr__(self): + if self._excinfo is None: + return "" return "" % (self.typename, len(self.traceback)) def exconly(self, tryshort=False): @@ -513,6 +558,8 @@ def getrepr( return fmt.repr_excinfo(self) def __str__(self): + if self._excinfo is None: + return repr(self) entry = self.traceback[-1] loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly()) return str(loc) diff --git a/src/_pytest/assertion/util.py b/src/_pytest/assertion/util.py index 451e454952b..15561f2935f 100644 --- a/src/_pytest/assertion/util.py +++ b/src/_pytest/assertion/util.py @@ -155,7 +155,7 @@ def isiterable(obj): explanation = [ u"(pytest_assertion plugin: representation of details failed. " u"Probably an object has a faulty __repr__.)", - six.text_type(_pytest._code.ExceptionInfo()), + six.text_type(_pytest._code.ExceptionInfo.from_current()), ] if not explanation: diff --git a/src/_pytest/main.py b/src/_pytest/main.py index df4f1c8fbf5..851b08ae39c 100644 --- a/src/_pytest/main.py +++ b/src/_pytest/main.py @@ -188,7 +188,7 @@ def wrap_session(config, doit): except Failed: session.exitstatus = EXIT_TESTSFAILED except KeyboardInterrupt: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() exitstatus = EXIT_INTERRUPTED if initstate <= 2 and isinstance(excinfo.value, exit.Exception): sys.stderr.write("{}: {}\n".format(excinfo.typename, excinfo.value.msg)) @@ -197,7 +197,7 @@ def wrap_session(config, doit): config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = exitstatus except: # noqa - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() config.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR if excinfo.errisinstance(SystemExit): diff --git a/src/_pytest/python.py b/src/_pytest/python.py index 8912ca060a4..8c8de8e752d 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -450,7 +450,7 @@ def _importtestmodule(self): mod = self.fspath.pyimport(ensuresyspath=importmode) except SyntaxError: raise self.CollectError( - _pytest._code.ExceptionInfo().getrepr(style="short") + _pytest._code.ExceptionInfo.from_current().getrepr(style="short") ) except self.fspath.ImportMismatchError: e = sys.exc_info()[1] @@ -466,7 +466,7 @@ def _importtestmodule(self): except ImportError: from _pytest._code.code import ExceptionInfo - exc_info = ExceptionInfo() + exc_info = ExceptionInfo.from_current() if self.config.getoption("verbose") < 2: exc_info.traceback = exc_info.traceback.filter(filter_traceback) exc_repr = ( diff --git a/src/_pytest/python_api.py b/src/_pytest/python_api.py index 805cd85ad41..f895fb8a8e8 100644 --- a/src/_pytest/python_api.py +++ b/src/_pytest/python_api.py @@ -684,13 +684,13 @@ def raises(expected_exception, *args, **kwargs): # XXX didn't mean f_globals == f_locals something special? # this is destroyed here ... except expected_exception: - return _pytest._code.ExceptionInfo() + return _pytest._code.ExceptionInfo.from_current() else: func = args[0] try: func(*args[1:], **kwargs) except expected_exception: - return _pytest._code.ExceptionInfo() + return _pytest._code.ExceptionInfo.from_current() fail(message) @@ -705,7 +705,7 @@ def __init__(self, expected_exception, message, match_expr): self.excinfo = None def __enter__(self): - self.excinfo = object.__new__(_pytest._code.ExceptionInfo) + self.excinfo = _pytest._code.ExceptionInfo.for_later() return self.excinfo def __exit__(self, *tp): diff --git a/src/_pytest/runner.py b/src/_pytest/runner.py index 86298a7aa3f..9ea1a07cd5d 100644 --- a/src/_pytest/runner.py +++ b/src/_pytest/runner.py @@ -211,12 +211,12 @@ def __init__(self, func, when, treat_keyboard_interrupt_as_exception=False): self.result = func() except KeyboardInterrupt: if treat_keyboard_interrupt_as_exception: - self.excinfo = ExceptionInfo() + self.excinfo = ExceptionInfo.from_current() else: self.stop = time() raise except: # noqa - self.excinfo = ExceptionInfo() + self.excinfo = ExceptionInfo.from_current() self.stop = time() def __repr__(self): diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index a38a60d8e68..d9881cd8751 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -115,6 +115,10 @@ def _addexcinfo(self, rawexcinfo): rawexcinfo = getattr(rawexcinfo, "_rawexcinfo", rawexcinfo) try: excinfo = _pytest._code.ExceptionInfo(rawexcinfo) + # invoke the attributes to trigger storing the traceback + # trial causes some issue there + excinfo.value + excinfo.traceback except TypeError: try: try: @@ -136,7 +140,7 @@ def _addexcinfo(self, rawexcinfo): except KeyboardInterrupt: raise except fail.Exception: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() self.__dict__.setdefault("_excinfo", []).append(excinfo) def addError(self, testcase, rawexcinfo): diff --git a/testing/code/test_code.py b/testing/code/test_code.py index 20ca0bfce1f..df9f109ef5c 100644 --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -169,7 +169,7 @@ def test_bad_getsource(self): else: assert False except AssertionError: - exci = _pytest._code.ExceptionInfo() + exci = _pytest._code.ExceptionInfo.from_current() assert exci.getrepr() @@ -181,7 +181,7 @@ def test_getsource(self): else: assert False except AssertionError: - exci = _pytest._code.ExceptionInfo() + exci = _pytest._code.ExceptionInfo.from_current() entry = exci.traceback[0] source = entry.getsource() assert len(source) == 6 diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index c8f4c904d37..b4d64313c60 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -71,7 +71,7 @@ def test_excinfo_simple(): try: raise ValueError except ValueError: - info = _pytest._code.ExceptionInfo() + info = _pytest._code.ExceptionInfo.from_current() assert info.type == ValueError @@ -85,7 +85,7 @@ def f(): try: f() except ValueError: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() linenumbers = [ _pytest._code.getrawcode(f).co_firstlineno - 1 + 4, _pytest._code.getrawcode(f).co_firstlineno - 1 + 1, @@ -126,7 +126,7 @@ def setup_method(self, method): try: h() except ValueError: - self.excinfo = _pytest._code.ExceptionInfo() + self.excinfo = _pytest._code.ExceptionInfo.from_current() def test_traceback_entries(self): tb = self.excinfo.traceback @@ -163,7 +163,7 @@ def xyz(): try: exec(source.compile()) except NameError: - tb = _pytest._code.ExceptionInfo().traceback + tb = _pytest._code.ExceptionInfo.from_current().traceback print(tb[-1].getsource()) s = str(tb[-1].getsource()) assert s.startswith("def xyz():\n try:") @@ -356,6 +356,12 @@ def test_excinfo_str(): assert len(s.split(":")) >= 3 # on windows it's 4 +def test_excinfo_for_later(): + e = ExceptionInfo.for_later() + assert "for raises" in repr(e) + assert "for raises" in str(e) + + def test_excinfo_errisinstance(): excinfo = pytest.raises(ValueError, h) assert excinfo.errisinstance(ValueError) @@ -365,7 +371,7 @@ def test_excinfo_no_sourcecode(): try: exec("raise ValueError()") except ValueError: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() s = str(excinfo.traceback[-1]) assert s == " File '':1 in \n ???\n" @@ -390,7 +396,7 @@ def test_entrysource_Queue_example(): try: queue.Queue().get(timeout=0.001) except queue.Empty: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() entry = excinfo.traceback[-1] source = entry.getsource() assert source is not None @@ -402,7 +408,7 @@ def test_codepath_Queue_example(): try: queue.Queue().get(timeout=0.001) except queue.Empty: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() entry = excinfo.traceback[-1] path = entry.path assert isinstance(path, py.path.local) @@ -453,7 +459,7 @@ def excinfo_from_exec(self, source): except KeyboardInterrupt: raise except: # noqa - return _pytest._code.ExceptionInfo() + return _pytest._code.ExceptionInfo.from_current() assert 0, "did not raise" def test_repr_source(self): @@ -491,7 +497,7 @@ def test_repr_source_not_existing(self): try: exec(co) except ValueError: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() repr = pr.repr_excinfo(excinfo) assert repr.reprtraceback.reprentries[1].lines[0] == "> ???" if sys.version_info[0] >= 3: @@ -510,7 +516,7 @@ def test_repr_many_line_source_not_existing(self): try: exec(co) except ValueError: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() repr = pr.repr_excinfo(excinfo) assert repr.reprtraceback.reprentries[1].lines[0] == "> ???" if sys.version_info[0] >= 3: @@ -1340,7 +1346,7 @@ def test_repr_traceback_with_unicode(style, encoding): try: raise RuntimeError(msg) except RuntimeError: - e_info = ExceptionInfo() + e_info = ExceptionInfo.from_current() formatter = FormattedExcinfo(style=style) repr_traceback = formatter.repr_traceback(e_info) assert repr_traceback is not None diff --git a/testing/test_resultlog.py b/testing/test_resultlog.py index 36f584e573d..cb7b0cd3ce7 100644 --- a/testing/test_resultlog.py +++ b/testing/test_resultlog.py @@ -151,7 +151,7 @@ def test_internal_exception(self, style): try: raise ValueError except ValueError: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() reslog = ResultLog(None, py.io.TextIO()) reslog.pytest_internalerror(excinfo.getrepr(style=style)) entry = reslog.logfile.getvalue() diff --git a/testing/test_runner.py b/testing/test_runner.py index c081920a502..2d047af70b8 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -561,20 +561,16 @@ def test_outcomeexception_passes_except_Exception(): def test_pytest_exit(): - try: + with pytest.raises(pytest.exit.Exception) as excinfo: pytest.exit("hello") - except pytest.exit.Exception: - excinfo = _pytest._code.ExceptionInfo() - assert excinfo.errisinstance(KeyboardInterrupt) + assert excinfo.errisinstance(KeyboardInterrupt) def test_pytest_fail(): - try: + with pytest.raises(pytest.fail.Exception) as excinfo: pytest.fail("hello") - except pytest.fail.Exception: - excinfo = _pytest._code.ExceptionInfo() - s = excinfo.exconly(tryshort=True) - assert s.startswith("Failed") + s = excinfo.exconly(tryshort=True) + assert s.startswith("Failed") def test_pytest_exit_msg(testdir): @@ -683,7 +679,7 @@ def test_exception_printing_skip(): try: pytest.skip("hello") except pytest.skip.Exception: - excinfo = _pytest._code.ExceptionInfo() + excinfo = _pytest._code.ExceptionInfo.from_current() s = excinfo.exconly(tryshort=True) assert s.startswith("Skipped") @@ -718,7 +714,7 @@ def f(): mod2 = pytest.importorskip("hello123", minversion="1.3") assert mod2 == mod except pytest.skip.Exception: - print(_pytest._code.ExceptionInfo()) + print(_pytest._code.ExceptionInfo.from_current()) pytest.fail("spurious skip") @@ -740,7 +736,7 @@ def test_importorskip_dev_module(monkeypatch): pytest.importorskip('mockmodule1', minversion='0.14.0')""", ) except pytest.skip.Exception: - print(_pytest._code.ExceptionInfo()) + print(_pytest._code.ExceptionInfo.from_current()) pytest.fail("spurious skip") From 2eaf3db6aeda21c060b18531c74776ffb41d33fc Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 22 Nov 2018 20:21:22 -0200 Subject: [PATCH 28/98] Fix docstring indentation (docs env) --- src/_pytest/_code/code.py | 11 +++++------ 1 file changed, 5 insertions(+), 6 deletions(-) diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py index 595fee6d3f1..1b49fe75bbb 100644 --- a/src/_pytest/_code/code.py +++ b/src/_pytest/_code/code.py @@ -407,17 +407,16 @@ class ExceptionInfo(object): @classmethod def from_current(cls, exprinfo=None): - """returns a exceptioninfo matching the current traceback + """returns an ExceptionInfo matching the current traceback .. warning:: - experimental api + Experimental API - :param exprinfo: an text string helping to determine if we should - strip assertionerror from the output, defaults - to the exception message/__str__() - + :param exprinfo: a text string helping to determine if we should + strip ``AssertionError`` from the output, defaults + to the exception message/``__str__()`` """ tup = sys.exc_info() _striptext = "" From 5d2e2377ffcd5ba21dae67b7456a9b94a088b29b Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 22 Nov 2018 20:34:38 -0200 Subject: [PATCH 29/98] Update deprecations.rst now that we have removed a few features * yield tests * compat properties * pytest_namespace All merciless cut down by Ronny :'( --- doc/en/deprecations.rst | 146 ++++++++++++++++++++-------------------- 1 file changed, 73 insertions(+), 73 deletions(-) diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 3398c92a20e..a46f868e34a 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -14,19 +14,6 @@ Below is a complete list of all pytest features which are considered deprecated. :class:`_pytest.warning_types.PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters `. -Internal classes accessed through ``Node`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.9 - -Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances now issue -this warning:: - - usage of Function.Module is deprecated, please use pytest.Module instead - -Users should just ``import pytest`` and access those objects using the ``pytest`` module. - -This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings. ``cached_setup`` ~~~~~~~~~~~~~~~~ @@ -103,40 +90,6 @@ Becomes: * ``node.warn("CI", "some message")``: this code/message form is now **deprecated** and should be converted to the warning instance form above. -``pytest_namespace`` -~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.7 - -This hook is deprecated because it greatly complicates the pytest internals regarding configuration and initialization, making some -bug fixes and refactorings impossible. - -Example of usage: - -.. code-block:: python - - class MySymbol: - ... - - - def pytest_namespace(): - return {"my_symbol": MySymbol()} - - -Plugin authors relying on this hook should instead require that users now import the plugin modules directly (with an appropriate public API). - -As a stopgap measure, plugin authors may still inject their names into pytest's namespace, usually during ``pytest_configure``: - -.. code-block:: python - - import pytest - - - def pytest_configure(): - pytest.my_symbol = MySymbol() - - - Calling fixtures directly ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -279,10 +232,52 @@ By passing a string, users expect that pytest will interpret that command-line u on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way. +``pytest_funcarg__`` prefix +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. deprecated:: 3.0 + +In very early pytest versions fixtures could be defined using the ``pytest_funcarg__`` prefix: + +.. code-block:: python + + def pytest_funcarg__data(): + return SomeData() + +Switch over to the ``@pytest.fixture`` decorator: + +.. code-block:: python + + @pytest.fixture + def data(): + return SomeData() + +[pytest] section in setup.cfg files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. deprecated:: 3.0 + +``[pytest]`` sections in ``setup.cfg`` files should now be named ``[tool:pytest]`` +to avoid conflicts with other distutils commands. + +Result log (``--result-log``) +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. deprecated:: 3.0 + +The ``--resultlog`` command line option has been deprecated: it is little used +and there are more modern and better alternatives, for example `pytest-tap `_. + +Removed Features +---------------- + +As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after +an appropriate period of deprecation has passed. + ``yield`` tests ~~~~~~~~~~~~~~~ -.. deprecated:: 3.0 +*Removed in version 4.0.* pytest supports ``yield``-style tests, where a test function actually ``yield`` functions and values that are then turned into proper test methods. Example: @@ -307,48 +302,53 @@ This form of test function doesn't support fixtures properly, and users should s def test_squared(x, y): assert x ** x == y +Internal classes accessed through ``Node`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -``pytest_funcarg__`` prefix -~~~~~~~~~~~~~~~~~~~~~~~~~~~ +*Removed in version 4.0.* -.. deprecated:: 3.0 +Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances now issue +this warning:: -In very early pytest versions fixtures could be defined using the ``pytest_funcarg__`` prefix: + usage of Function.Module is deprecated, please use pytest.Module instead -.. code-block:: python +Users should just ``import pytest`` and access those objects using the ``pytest`` module. - def pytest_funcarg__data(): - return SomeData() +This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings. -Switch over to the ``@pytest.fixture`` decorator: +``pytest_namespace`` +~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +This hook is deprecated because it greatly complicates the pytest internals regarding configuration and initialization, making some +bug fixes and refactorings impossible. + +Example of usage: .. code-block:: python - @pytest.fixture - def data(): - return SomeData() + class MySymbol: + ... -[pytest] section in setup.cfg files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. deprecated:: 3.0 + def pytest_namespace(): + return {"my_symbol": MySymbol()} -``[pytest]`` sections in ``setup.cfg`` files should now be named ``[tool:pytest]`` -to avoid conflicts with other distutils commands. -Result log (``--result-log``) -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +Plugin authors relying on this hook should instead require that users now import the plugin modules directly (with an appropriate public API). -.. deprecated:: 3.0 +As a stopgap measure, plugin authors may still inject their names into pytest's namespace, usually during ``pytest_configure``: -The ``--resultlog`` command line option has been deprecated: it is little used -and there are more modern and better alternatives, for example `pytest-tap `_. +.. code-block:: python + + import pytest + + + def pytest_configure(): + pytest.my_symbol = MySymbol() -Removed Features ----------------- -As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after -an appropriate period of deprecation has passed. Reinterpretation mode (``--assert=reinterp``) From e9b2475e291d49c23beba6d35dbd08222d1b2763 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 25 Nov 2018 09:33:18 -0800 Subject: [PATCH 30/98] Display actual test ids in `--collect-only` --- changelog/4458.bugfix.rst | 1 + src/_pytest/nodes.py | 2 +- testing/python/collect.py | 18 ++++++++++++----- testing/python/metafunc.py | 6 +++--- testing/test_collection.py | 40 +++++++++++++++++++------------------- testing/test_terminal.py | 11 +++++------ 6 files changed, 43 insertions(+), 35 deletions(-) create mode 100644 changelog/4458.bugfix.rst diff --git a/changelog/4458.bugfix.rst b/changelog/4458.bugfix.rst new file mode 100644 index 00000000000..891fb9a2fce --- /dev/null +++ b/changelog/4458.bugfix.rst @@ -0,0 +1 @@ +Display actual test ids in ``--collect-only``. diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py index 86e541152d7..77e6f02c15c 100644 --- a/src/_pytest/nodes.py +++ b/src/_pytest/nodes.py @@ -138,7 +138,7 @@ def _getcustomclass(self, name): return cls def __repr__(self): - return "<%s %r>" % (self.__class__.__name__, getattr(self, "name", None)) + return "<%s %s>" % (self.__class__.__name__, getattr(self, "name", None)) def warn(self, _code_or_warning=None, message=None, code=None): """Issue a warning for this item. diff --git a/testing/python/collect.py b/testing/python/collect.py index 2e5d62dd5a1..4ce3d120dd2 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -489,26 +489,34 @@ def __call__(self, tmpdir): ] ) - def test_function_equality(self, testdir, tmpdir): + @staticmethod + def make_function(testdir, **kwargs): from _pytest.fixtures import FixtureManager config = testdir.parseconfigure() session = testdir.Session(config) session._fixturemanager = FixtureManager(session) + return pytest.Function(config=config, parent=session, **kwargs) + + def test_function_equality(self, testdir, tmpdir): def func1(): pass def func2(): pass - f1 = pytest.Function( - name="name", parent=session, config=config, args=(1,), callobj=func1 - ) + f1 = self.make_function(testdir, name="name", args=(1,), callobj=func1) assert f1 == f1 - f2 = pytest.Function(name="name", config=config, callobj=func2, parent=session) + f2 = self.make_function(testdir, name="name", callobj=func2) assert f1 != f2 + def test_repr_produces_actual_test_id(self, testdir): + f = self.make_function( + testdir, name=r"test[\xe5]", callobj=self.test_repr_produces_actual_test_id + ) + assert repr(f) == r"" + def test_issue197_parametrize_emptyset(self, testdir): testdir.makepyfile( """ diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 0d5b6037f31..243d50d2dca 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -474,9 +474,9 @@ def test_foo(arg): result = testdir.runpytest("--collect-only", SHOW_PYTEST_WARNINGS_ARG) result.stdout.fnmatch_lines( [ - "", - " ", - " ", + "", + " ", + " ", "*test_parametrize_ids_exception.py:6: *parameter arg at position 0*", "*test_parametrize_ids_exception.py:6: *parameter arg at position 1*", ] diff --git a/testing/test_collection.py b/testing/test_collection.py index fae23025e71..473883b0d82 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -950,10 +950,10 @@ def test_collect_init_tests(testdir): [ "collected 2 items", "", - " ", - " ", - " ", + " ", + " ", + " ", + " ", ] ) result = testdir.runpytest("./tests", "--collect-only") @@ -961,10 +961,10 @@ def test_collect_init_tests(testdir): [ "collected 2 items", "", - " ", - " ", - " ", + " ", + " ", + " ", + " ", ] ) # Ignores duplicates with "." and pkginit (#4310). @@ -972,11 +972,11 @@ def test_collect_init_tests(testdir): result.stdout.fnmatch_lines( [ "collected 2 items", - "", - " ", - " ", - " ", - " ", + "", + " ", + " ", + " ", + " ", ] ) # Same as before, but different order. @@ -984,21 +984,21 @@ def test_collect_init_tests(testdir): result.stdout.fnmatch_lines( [ "collected 2 items", - "", - " ", - " ", - " ", - " ", + "", + " ", + " ", + " ", + " ", ] ) result = testdir.runpytest("./tests/test_foo.py", "--collect-only") result.stdout.fnmatch_lines( - ["", " ", " "] + ["", " ", " "] ) assert "test_init" not in result.stdout.str() result = testdir.runpytest("./tests/__init__.py", "--collect-only") result.stdout.fnmatch_lines( - ["", " ", " "] + ["", " ", " "] ) assert "test_foo" not in result.stdout.str() diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 86ec1cd07a6..60a64cdd683 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -263,7 +263,7 @@ def test_func(): ) result = testdir.runpytest("--collect-only") result.stdout.fnmatch_lines( - ["", " "] + ["", " "] ) def test_collectonly_skipped_module(self, testdir): @@ -307,11 +307,10 @@ def test_method(self): assert result.ret == 0 result.stdout.fnmatch_lines( [ - "*", - "* ", - "* ", - # "* ", - "* ", + "*", + "* ", + "* ", + "* ", ] ) From 1bba0a97146575b496ac3a021f6a68e1be74ec0d Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Thu, 22 Nov 2018 10:05:10 -0800 Subject: [PATCH 31/98] Deprecate `raises(..., 'code(as_a_string)')` / `warns(..., 'code(as_a_string)') --- changelog/4435.deprecation.rst | 1 + doc/en/assert.rst | 5 ++- doc/en/deprecations.rst | 35 +++++++++++++++++++ doc/en/example/assertion/failure_demo.py | 6 ++-- doc/en/example/parametrize.rst | 3 +- src/_pytest/deprecated.py | 9 +++++ src/_pytest/python_api.py | 17 ++++----- src/_pytest/recwarn.py | 2 ++ testing/code/test_code.py | 2 +- testing/code/test_excinfo.py | 6 ++-- testing/code/test_source.py | 25 +++++-------- .../sub2/conftest.py | 2 +- testing/python/collect.py | 5 +-- testing/python/fixture.py | 3 +- testing/python/metafunc.py | 8 ++--- testing/python/raises.py | 17 ++++++--- testing/test_capture.py | 10 +++--- testing/test_config.py | 7 ++-- testing/test_monkeypatch.py | 8 ++--- testing/test_parseopt.py | 6 +--- testing/test_pluginmanager.py | 10 +++--- testing/test_pytester.py | 4 +-- testing/test_recwarn.py | 17 ++++++--- testing/test_runner.py | 17 +++------ testing/test_session.py | 8 ++--- testing/test_terminal.py | 3 +- 26 files changed, 140 insertions(+), 96 deletions(-) create mode 100644 changelog/4435.deprecation.rst diff --git a/changelog/4435.deprecation.rst b/changelog/4435.deprecation.rst new file mode 100644 index 00000000000..f12f0bc6cbf --- /dev/null +++ b/changelog/4435.deprecation.rst @@ -0,0 +1 @@ +Deprecate ``raises(..., 'code(as_a_string)')`` and ``warns(..., 'code(as_a_string)')``. See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec diff --git a/doc/en/assert.rst b/doc/en/assert.rst index 43fedebed7e..b13a071f698 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -100,10 +100,9 @@ If you want to write test code that works on Python 2.4 as well, you may also use two other ways to test for an expected exception:: pytest.raises(ExpectedException, func, *args, **kwargs) - pytest.raises(ExpectedException, "func(*args, **kwargs)") -both of which execute the specified function with args and kwargs and -asserts that the given ``ExpectedException`` is raised. The reporter will +which will execute the specified function with args and kwargs and +assert that the given ``ExpectedException`` is raised. The reporter will provide you with helpful output in case of failures such as *no exception* or *wrong exception*. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 3398c92a20e..414e2e3f387 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -14,6 +14,41 @@ Below is a complete list of all pytest features which are considered deprecated. :class:`_pytest.warning_types.PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters `. +.. _raises-warns-exec: + +``raises`` / ``warns`` with a string as the second argument +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. deprecated:: 4.1 + +Use the context manager form of these instead. When necessary, invoke ``exec`` +directly. + +Example: + +.. code-block:: python + + pytest.raises(ZeroDivisionError, "1 / 0") + pytest.raises(SyntaxError, "a $ b") + + pytest.warns(DeprecationWarning, "my_function()") + pytest.warns(SyntaxWarning, "assert(1, 2)") + +Becomes: + +.. code-block:: python + + with pytest.raises(ZeroDivisionError): + 1 / 0 + with pytest.raises(SyntaxError): + exec("a $ b") # exec is required for invalid syntax + + with pytest.warns(DeprecationWarning): + my_function() + with pytest.warns(SyntaxWarning): + exec("assert(1, 2)") # exec is used to avoid a top-level warning + + Internal classes accessed through ``Node`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/doc/en/example/assertion/failure_demo.py b/doc/en/example/assertion/failure_demo.py index 5bd95a37bb3..31a9f25776d 100644 --- a/doc/en/example/assertion/failure_demo.py +++ b/doc/en/example/assertion/failure_demo.py @@ -165,11 +165,11 @@ def globf(x): class TestRaises(object): def test_raises(self): - s = "qwe" # NOQA - raises(TypeError, "int(s)") + s = "qwe" + raises(TypeError, int, s) def test_raises_doesnt(self): - raises(IOError, "int('3')") + raises(IOError, int, "3") def test_raise(self): raise ValueError("demo error") diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 488f6e3100e..bb8ea59960a 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -388,7 +388,8 @@ parametrizer`_ but in a lot less code:: assert a == b def test_zerodivision(self, a, b): - pytest.raises(ZeroDivisionError, "a/b") + with pytest.raises(ZeroDivisionError): + a / b Our test generator looks up a class-level definition which specifies which argument sets to use for each test function. Let's run it: diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index a343662809f..ce3b9180257 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -92,6 +92,15 @@ "Node.warn(code, message) form has been deprecated, use Node.warn(warning_instance) instead." ) +RAISES_EXEC = PytestDeprecationWarning( + "raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n" + "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" +) +WARNS_EXEC = PytestDeprecationWarning( + "warns(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly.\n\n" + "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" +) + RECORD_XML_PROPERTY = RemovedInPytest4Warning( 'Fixture renamed from "record_xml_property" to "record_property" as user ' "properties are now available to all reporters.\n" diff --git a/src/_pytest/python_api.py b/src/_pytest/python_api.py index e9cc2bbde12..7e5dc74a851 100644 --- a/src/_pytest/python_api.py +++ b/src/_pytest/python_api.py @@ -1,6 +1,9 @@ +from __future__ import absolute_import + import math import pprint import sys +import warnings from decimal import Decimal from numbers import Number @@ -14,6 +17,7 @@ from _pytest.compat import Mapping from _pytest.compat import Sequence from _pytest.compat import STRING_TYPES +from _pytest.deprecated import RAISES_EXEC from _pytest.outcomes import fail BASE_TYPE = (type, STRING_TYPES) @@ -604,9 +608,9 @@ def raises(expected_exception, *args, **kwargs): >>> with raises(ValueError, match=r'must be \d+$'): ... raise ValueError("value must be 42") - **Legacy forms** + **Legacy form** - The forms below are fully supported but are discouraged for new code because the + The form below is fully supported but discouraged for new code because the context manager form is regarded as more readable and less error-prone. It is possible to specify a callable by passing a to-be-called lambda:: @@ -623,14 +627,6 @@ def raises(expected_exception, *args, **kwargs): >>> raises(ZeroDivisionError, f, x=0) - It is also possible to pass a string to be evaluated at runtime:: - - >>> raises(ZeroDivisionError, "f(0)") - - - The string will be evaluated using the same ``locals()`` and ``globals()`` - at the moment of the ``raises`` call. - .. currentmodule:: _pytest._code Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`. @@ -672,6 +668,7 @@ def raises(expected_exception, *args, **kwargs): raise TypeError(msg) return RaisesContext(expected_exception, message, match_expr) elif isinstance(args[0], str): + warnings.warn(RAISES_EXEC, stacklevel=2) code, = args assert isinstance(code, str) frame = sys._getframe(1) diff --git a/src/_pytest/recwarn.py b/src/_pytest/recwarn.py index 4f3ab7f2997..f39f7aee789 100644 --- a/src/_pytest/recwarn.py +++ b/src/_pytest/recwarn.py @@ -11,6 +11,7 @@ import six import _pytest._code +from _pytest.deprecated import WARNS_EXEC from _pytest.fixtures import yield_fixture from _pytest.outcomes import fail @@ -89,6 +90,7 @@ def warns(expected_warning, *args, **kwargs): match_expr = kwargs.pop("match") return WarningsChecker(expected_warning, match_expr=match_expr) elif isinstance(args[0], str): + warnings.warn(WARNS_EXEC, stacklevel=2) code, = args assert isinstance(code, str) frame = sys._getframe(1) diff --git a/testing/code/test_code.py b/testing/code/test_code.py index df9f109ef5c..3362d46042b 100644 --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -37,7 +37,7 @@ def test_code_with_class(): class A(object): pass - pytest.raises(TypeError, "_pytest._code.Code(A)") + pytest.raises(TypeError, _pytest._code.Code, A) def x(): diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index b4d64313c60..4e36fb94601 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -180,7 +180,8 @@ def test_traceback_cut(self): def test_traceback_cut_excludepath(self, testdir): p = testdir.makepyfile("def f(): raise ValueError") - excinfo = pytest.raises(ValueError, "p.pyimport().f()") + with pytest.raises(ValueError) as excinfo: + p.pyimport().f() basedir = py.path.local(pytest.__file__).dirpath() newtraceback = excinfo.traceback.cut(excludepath=basedir) for x in newtraceback: @@ -336,7 +337,8 @@ def f(): def test_excinfo_exconly(): excinfo = pytest.raises(ValueError, h) assert excinfo.exconly().startswith("ValueError") - excinfo = pytest.raises(ValueError, "raise ValueError('hello\\nworld')") + with pytest.raises(ValueError) as excinfo: + raise ValueError("hello\nworld") msg = excinfo.exconly(tryshort=True) assert msg.startswith("ValueError") assert msg.endswith("world") diff --git a/testing/code/test_source.py b/testing/code/test_source.py index 3ee46c1b8f4..0103acb7025 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -6,6 +6,7 @@ from __future__ import division from __future__ import print_function +import ast import inspect import sys @@ -14,7 +15,6 @@ import _pytest._code import pytest from _pytest._code import Source -from _pytest._code.source import ast astonly = pytest.mark.nothing @@ -306,8 +306,6 @@ def test_getstatementrange_with_syntaxerror_issue7(self): pytest.raises(SyntaxError, lambda: source.getstatementrange(0)) def test_compile_to_ast(self): - import ast - source = Source("x = 4") mod = source.compile(flag=ast.PyCF_ONLY_AST) assert isinstance(mod, ast.Module) @@ -317,10 +315,9 @@ def test_compile_and_getsource(self): co = self.source.compile() six.exec_(co, globals()) f(7) - excinfo = pytest.raises(AssertionError, "f(6)") + excinfo = pytest.raises(AssertionError, f, 6) frame = excinfo.traceback[-1].frame stmt = frame.code.fullsource.getstatement(frame.lineno) - # print "block", str(block) assert str(stmt).strip().startswith("assert") @pytest.mark.parametrize("name", ["", None, "my"]) @@ -361,17 +358,13 @@ def test_getline_finally(): def c(): pass - excinfo = pytest.raises( - TypeError, - """ - teardown = None - try: - c(1) - finally: - if teardown: - teardown() - """, - ) + with pytest.raises(TypeError) as excinfo: + teardown = None + try: + c(1) + finally: + if teardown: + teardown() source = excinfo.traceback[-1].statement assert str(source).strip() == "c(1)" diff --git a/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py b/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py index c37045454dc..00981c5dc12 100644 --- a/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py +++ b/testing/example_scripts/fixtures/fill_fixtures/test_conftest_funcargs_only_available_in_subdir/sub2/conftest.py @@ -3,4 +3,4 @@ @pytest.fixture def arg2(request): - pytest.raises(Exception, "request.getfixturevalue('arg1')") + pytest.raises(Exception, request.getfixturevalue, "arg1") diff --git a/testing/python/collect.py b/testing/python/collect.py index 4ce3d120dd2..83fcdc3bd10 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -325,7 +325,7 @@ def test_gen(): assert len(colitems) == 1 gencol = colitems[0] assert isinstance(gencol, pytest.Generator) - pytest.raises(ValueError, "gencol.collect()") + pytest.raises(ValueError, gencol.collect) def test_generative_methods_with_explicit_names(self, testdir): modcol = testdir.getmodulecol( @@ -1103,7 +1103,8 @@ def test_modulecol_roundtrip(testdir): class TestTracebackCutting(object): def test_skip_simple(self): - excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")') + with pytest.raises(pytest.skip.Exception) as excinfo: + pytest.skip("xxx") assert excinfo.traceback[-1].frame.code.name == "skip" assert excinfo.traceback[-1].ishidden() diff --git a/testing/python/fixture.py b/testing/python/fixture.py index 86cd29724c2..b7ce16eb5c0 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -906,7 +906,8 @@ def test_func2(self, something): assert "skipif" not in item1.keywords req1.applymarker(pytest.mark.skipif) assert "skipif" in item1.keywords - pytest.raises(ValueError, "req1.applymarker(42)") + with pytest.raises(ValueError): + req1.applymarker(42) def test_accesskeywords(self, testdir): testdir.makepyfile( diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 243d50d2dca..833eb564189 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -70,11 +70,11 @@ def func(arg1): pass metafunc = self.Metafunc(func) - pytest.raises(ValueError, "metafunc.addcall(id=None)") + pytest.raises(ValueError, metafunc.addcall, id=None) metafunc.addcall(id=1) - pytest.raises(ValueError, "metafunc.addcall(id=1)") - pytest.raises(ValueError, "metafunc.addcall(id='1')") + pytest.raises(ValueError, metafunc.addcall, id=1) + pytest.raises(ValueError, metafunc.addcall, id="1") metafunc.addcall(id=2) assert len(metafunc._calls) == 2 assert metafunc._calls[0].id == "1" @@ -108,7 +108,7 @@ class obj(object): metafunc.addcall(funcargs={"x": 2}) metafunc.addcall(funcargs={"x": 3}) - pytest.raises(pytest.fail.Exception, "metafunc.addcall({'xyz': 0})") + pytest.raises(pytest.fail.Exception, metafunc.addcall, {"xyz": 0}) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {"x": 2} assert metafunc._calls[1].funcargs == {"x": 3} diff --git a/testing/python/raises.py b/testing/python/raises.py index 6ca19c677ea..e3a0c4a0595 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -4,25 +4,32 @@ import pytest from _pytest.outcomes import Failed +from _pytest.warning_types import PytestDeprecationWarning class TestRaises(object): def test_raises(self): source = "int('qwe')" - excinfo = pytest.raises(ValueError, source) + with pytest.warns(PytestDeprecationWarning): + excinfo = pytest.raises(ValueError, source) code = excinfo.traceback[-1].frame.code s = str(code.fullsource) assert s == source def test_raises_exec(self): - pytest.raises(ValueError, "a,x = []") + with pytest.warns(PytestDeprecationWarning) as warninfo: + pytest.raises(ValueError, "a,x = []") + assert warninfo[0].filename == __file__ def test_raises_exec_correct_filename(self): - excinfo = pytest.raises(ValueError, 'int("s")') - assert __file__ in excinfo.traceback[-1].path + with pytest.warns(PytestDeprecationWarning): + excinfo = pytest.raises(ValueError, 'int("s")') + assert __file__ in excinfo.traceback[-1].path def test_raises_syntax_error(self): - pytest.raises(SyntaxError, "qwe qwe qwe") + with pytest.warns(PytestDeprecationWarning) as warninfo: + pytest.raises(SyntaxError, "qwe qwe qwe") + assert warninfo[0].filename == __file__ def test_raises_function(self): pytest.raises(ValueError, int, "hello") diff --git a/testing/test_capture.py b/testing/test_capture.py index 47aba70d4e2..17bb8296750 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -87,7 +87,7 @@ def test_init_capturing(self): try: capman = CaptureManager("fd") capman.start_global_capturing() - pytest.raises(AssertionError, "capman.start_global_capturing()") + pytest.raises(AssertionError, capman.start_global_capturing) capman.stop_global_capturing() finally: capouter.stop_capturing() @@ -798,10 +798,10 @@ def test_unicode_and_str_mixture(self): f = capture.CaptureIO() if sys.version_info >= (3, 0): f.write("\u00f6") - pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))") + pytest.raises(TypeError, f.write, b"hello") else: - f.write(text_type("\u00f6", "UTF-8")) - f.write("hello") # bytes + f.write(u"\u00f6") + f.write(b"hello") s = f.getvalue() f.close() assert isinstance(s, text_type) @@ -1149,7 +1149,7 @@ def test_stdin_nulled_by_default(self): print("XXX which indicates an error in the underlying capturing") print("XXX mechanisms") with self.getcapture(): - pytest.raises(IOError, "sys.stdin.read()") + pytest.raises(IOError, sys.stdin.read) class TestStdCaptureFD(TestStdCapture): diff --git a/testing/test_config.py b/testing/test_config.py index 605d28aa0e4..fcb886d5317 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -194,7 +194,7 @@ def pytest_addoption(parser): config = testdir.parseconfig("--hello=this") for x in ("hello", "--hello", "-X"): assert config.getoption(x) == "this" - pytest.raises(ValueError, "config.getoption('qweqwe')") + pytest.raises(ValueError, config.getoption, "qweqwe") @pytest.mark.skipif("sys.version_info[0] < 3") def test_config_getoption_unicode(self, testdir): @@ -211,7 +211,7 @@ def pytest_addoption(parser): def test_config_getvalueorskip(self, testdir): config = testdir.parseconfig() - pytest.raises(pytest.skip.Exception, "config.getvalueorskip('hello')") + pytest.raises(pytest.skip.Exception, config.getvalueorskip, "hello") verbose = config.getvalueorskip("verbose") assert verbose == config.option.verbose @@ -723,7 +723,8 @@ def test_foo(pytestconfig): def test_notify_exception(testdir, capfd): config = testdir.parseconfig() - excinfo = pytest.raises(ValueError, "raise ValueError(1)") + with pytest.raises(ValueError) as excinfo: + raise ValueError(1) config.notify_exception(excinfo) out, err = capfd.readouterr() assert "ValueError" in err diff --git a/testing/test_monkeypatch.py b/testing/test_monkeypatch.py index ebc233fbfb1..9e44b49757d 100644 --- a/testing/test_monkeypatch.py +++ b/testing/test_monkeypatch.py @@ -27,7 +27,7 @@ class A(object): x = 1 monkeypatch = MonkeyPatch() - pytest.raises(AttributeError, "monkeypatch.setattr(A, 'notexists', 2)") + pytest.raises(AttributeError, monkeypatch.setattr, A, "notexists", 2) monkeypatch.setattr(A, "y", 2, raising=False) assert A.y == 2 monkeypatch.undo() @@ -99,7 +99,7 @@ class A(object): monkeypatch = MonkeyPatch() monkeypatch.delattr(A, "x") - pytest.raises(AttributeError, "monkeypatch.delattr(A, 'y')") + pytest.raises(AttributeError, monkeypatch.delattr, A, "y") monkeypatch.delattr(A, "y", raising=False) monkeypatch.setattr(A, "x", 5, raising=False) assert A.x == 5 @@ -156,7 +156,7 @@ def test_delitem(): monkeypatch.delitem(d, "x") assert "x" not in d monkeypatch.delitem(d, "y", raising=False) - pytest.raises(KeyError, "monkeypatch.delitem(d, 'y')") + pytest.raises(KeyError, monkeypatch.delitem, d, "y") assert not d monkeypatch.setitem(d, "y", 1700) assert d["y"] == 1700 @@ -182,7 +182,7 @@ def test_delenv(): name = "xyz1234" assert name not in os.environ monkeypatch = MonkeyPatch() - pytest.raises(KeyError, "monkeypatch.delenv(%r, raising=True)" % name) + pytest.raises(KeyError, monkeypatch.delenv, name, raising=True) monkeypatch.delenv(name, raising=False) monkeypatch.undo() os.environ[name] = "1" diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index 0dafa248b4f..3048c96bda6 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -100,12 +100,8 @@ def test_group_addoption_conflict(self): def test_group_shortopt_lowercase(self, parser): group = parser.getgroup("hello") - pytest.raises( - ValueError, - """ + with pytest.raises(ValueError): group.addoption("-x", action="store_true") - """, - ) assert len(group.options) == 0 group._addoption("-x", action="store_true") assert len(group.options) == 1 diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 64d05d383b4..6137b277137 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -196,7 +196,7 @@ def test_register_imported_modules(self): assert pm.is_registered(mod) values = pm.get_plugins() assert mod in values - pytest.raises(ValueError, "pm.register(mod)") + pytest.raises(ValueError, pm.register, mod) pytest.raises(ValueError, lambda: pm.register(mod)) # assert not pm.is_registered(mod2) assert pm.get_plugins() == values @@ -284,8 +284,8 @@ def test_hello(pytestconfig): result.stdout.fnmatch_lines(["*1 passed*"]) def test_import_plugin_importname(self, testdir, pytestpm): - pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")') - pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwx.y")') + pytest.raises(ImportError, pytestpm.import_plugin, "qweqwex.y") + pytest.raises(ImportError, pytestpm.import_plugin, "pytest_qweqwx.y") testdir.syspathinsert() pluginname = "pytest_hello" @@ -301,8 +301,8 @@ def test_import_plugin_importname(self, testdir, pytestpm): assert plugin2 is plugin1 def test_import_plugin_dotted_name(self, testdir, pytestpm): - pytest.raises(ImportError, 'pytestpm.import_plugin("qweqwex.y")') - pytest.raises(ImportError, 'pytestpm.import_plugin("pytest_qweqwex.y")') + pytest.raises(ImportError, pytestpm.import_plugin, "qweqwex.y") + pytest.raises(ImportError, pytestpm.import_plugin, "pytest_qweqwex.y") testdir.syspathinsert() testdir.mkpydir("pkg").join("plug.py").write("x=3") diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 0c28bc91b7e..669da6e172b 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -71,7 +71,7 @@ class rep(object): recorder.unregister() recorder.clear() recorder.hook.pytest_runtest_logreport(report=rep) - pytest.raises(ValueError, "recorder.getfailures()") + pytest.raises(ValueError, recorder.getfailures) def test_parseconfig(testdir): @@ -174,7 +174,7 @@ def test_hookrecorder_basic(holder): call = rec.popcall("pytest_xyz") assert call.arg == 123 assert call._name == "pytest_xyz" - pytest.raises(pytest.fail.Exception, "rec.popcall('abc')") + pytest.raises(pytest.fail.Exception, rec.popcall, "abc") pm.hook.pytest_xyz_noarg() call = rec.popcall("pytest_xyz_noarg") assert call._name == "pytest_xyz_noarg" diff --git a/testing/test_recwarn.py b/testing/test_recwarn.py index 223521a5eac..9bf6a2ffbb6 100644 --- a/testing/test_recwarn.py +++ b/testing/test_recwarn.py @@ -7,6 +7,7 @@ import pytest from _pytest.recwarn import WarningsRecorder +from _pytest.warning_types import PytestDeprecationWarning def test_recwarn_stacklevel(recwarn): @@ -44,7 +45,7 @@ def test_recording(self): rec.clear() assert len(rec.list) == 0 assert values is rec.list - pytest.raises(AssertionError, "rec.pop()") + pytest.raises(AssertionError, rec.pop) @pytest.mark.issue(4243) def test_warn_stacklevel(self): @@ -214,9 +215,17 @@ def test_strings(self): source1 = "warnings.warn('w1', RuntimeWarning)" source2 = "warnings.warn('w2', RuntimeWarning)" source3 = "warnings.warn('w3', RuntimeWarning)" - pytest.warns(RuntimeWarning, source1) - pytest.raises(pytest.fail.Exception, lambda: pytest.warns(UserWarning, source2)) - pytest.warns(RuntimeWarning, source3) + with pytest.warns(PytestDeprecationWarning) as warninfo: # yo dawg + pytest.warns(RuntimeWarning, source1) + pytest.raises( + pytest.fail.Exception, lambda: pytest.warns(UserWarning, source2) + ) + pytest.warns(RuntimeWarning, source3) + assert len(warninfo) == 3 + for w in warninfo: + assert w.filename == __file__ + msg, = w.message.args + assert msg.startswith("warns(..., 'code(as_a_string)') is deprecated") def test_function(self): pytest.warns( diff --git a/testing/test_runner.py b/testing/test_runner.py index 2d047af70b8..d76f3da9b7e 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -700,17 +700,13 @@ def f(): # check that importorskip reports the actual call # in this test the test_runner.py file assert path.purebasename == "test_runner" - pytest.raises(SyntaxError, "pytest.importorskip('x y z')") - pytest.raises(SyntaxError, "pytest.importorskip('x=y')") + pytest.raises(SyntaxError, pytest.importorskip, "x y z") + pytest.raises(SyntaxError, pytest.importorskip, "x=y") mod = types.ModuleType("hello123") mod.__version__ = "1.3" monkeypatch.setitem(sys.modules, "hello123", mod) - pytest.raises( - pytest.skip.Exception, - """ + with pytest.raises(pytest.skip.Exception): pytest.importorskip("hello123", minversion="1.3.1") - """, - ) mod2 = pytest.importorskip("hello123", minversion="1.3") assert mod2 == mod except pytest.skip.Exception: @@ -730,11 +726,8 @@ def test_importorskip_dev_module(monkeypatch): monkeypatch.setitem(sys.modules, "mockmodule", mod) mod2 = pytest.importorskip("mockmodule", minversion="0.12.0") assert mod2 == mod - pytest.raises( - pytest.skip.Exception, - """ - pytest.importorskip('mockmodule1', minversion='0.14.0')""", - ) + with pytest.raises(pytest.skip.Exception): + pytest.importorskip("mockmodule1", minversion="0.14.0") except pytest.skip.Exception: print(_pytest._code.ExceptionInfo.from_current()) pytest.fail("spurious skip") diff --git a/testing/test_session.py b/testing/test_session.py index 0dc98a703cd..32d71a18e08 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -243,12 +243,8 @@ def test_minus_x_overridden_by_maxfail(self, testdir): def test_plugin_specify(testdir): - pytest.raises( - ImportError, - """ - testdir.parseconfig("-p", "nqweotexistent") - """, - ) + with pytest.raises(ImportError): + testdir.parseconfig("-p", "nqweotexistent") # pytest.raises(ImportError, # "config.do_configure(config)" # ) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 60a64cdd683..0faf9b4010a 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -105,7 +105,8 @@ def test_func(): def test_internalerror(self, testdir, linecomp): modcol = testdir.getmodulecol("def test_one(): pass") rep = TerminalReporter(modcol.config, file=linecomp.stringio) - excinfo = pytest.raises(ValueError, "raise ValueError('hello')") + with pytest.raises(ValueError) as excinfo: + raise ValueError("hello") rep.pytest_internalerror(excinfo.getrepr()) linecomp.assert_contains_lines(["INTERNALERROR> *ValueError*hello*"]) From 7eb28f9eb7222ea9a08d0b7ffdec29353769519f Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Tue, 28 Aug 2018 10:26:18 +0200 Subject: [PATCH 32/98] remove yield tests and compat properties --- changelog/3079.removal.rst | 1 + changelog/3616.removal.rst | 1 + src/_pytest/compat.py | 1 - src/_pytest/deprecated.py | 17 +- src/_pytest/fixtures.py | 6 - src/_pytest/nodes.py | 35 ----- src/_pytest/nose.py | 12 -- src/_pytest/python.py | 62 ++------ src/pytest.py | 2 - testing/deprecated_test.py | 71 --------- testing/python/collect.py | 293 ----------------------------------- testing/python/fixture.py | 18 --- testing/test_collection.py | 14 -- testing/test_nose.py | 68 -------- testing/test_pdb.py | 22 --- testing/test_runner_xunit.py | 59 ------- testing/test_session.py | 14 -- testing/test_terminal.py | 24 ++- 18 files changed, 29 insertions(+), 691 deletions(-) create mode 100644 changelog/3079.removal.rst create mode 100644 changelog/3616.removal.rst diff --git a/changelog/3079.removal.rst b/changelog/3079.removal.rst new file mode 100644 index 00000000000..c289176d20b --- /dev/null +++ b/changelog/3079.removal.rst @@ -0,0 +1 @@ +Remove support for yield tests - they are fundamentally broken since collection and test execution were separated. diff --git a/changelog/3616.removal.rst b/changelog/3616.removal.rst new file mode 100644 index 00000000000..a88c4534a45 --- /dev/null +++ b/changelog/3616.removal.rst @@ -0,0 +1 @@ +Remove the deprecated compat properties for node.Class/Function/Module - use pytest... now. diff --git a/src/_pytest/compat.py b/src/_pytest/compat.py index 1857f51a85f..6008b8b40e1 100644 --- a/src/_pytest/compat.py +++ b/src/_pytest/compat.py @@ -389,7 +389,6 @@ def safe_str(v): COLLECT_FAKEMODULE_ATTRIBUTES = ( "Collector", "Module", - "Generator", "Function", "Instance", "Session", diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index ce3b9180257..ba7426a1193 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -22,28 +22,13 @@ "pass a list of arguments instead." ) -YIELD_TESTS = RemovedInPytest4Warning( - "yield tests are deprecated, and scheduled to be removed in pytest 4.0" -) +YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" CACHED_SETUP = RemovedInPytest4Warning( "cached_setup is deprecated and will be removed in a future release. " "Use standard fixture functions instead." ) -COMPAT_PROPERTY = UnformattedWarning( - RemovedInPytest4Warning, - "usage of {owner}.{name} is deprecated, please use pytest.{name} instead", -) - -CUSTOM_CLASS = UnformattedWarning( - RemovedInPytest4Warning, - 'use of special named "{name}" objects in collectors of type "{type_name}" to ' - "customize the created nodes is deprecated. " - "Use pytest_pycollect_makeitem(...) to create custom " - "collection nodes instead.", -) - FUNCARG_PREFIX = UnformattedWarning( RemovedInPytest4Warning, '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 124b611db29..924352afb0f 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -1303,17 +1303,11 @@ def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): if holderobj in self._holderobjseen: return - from _pytest.nodes import _CompatProperty - self._holderobjseen.add(holderobj) autousenames = [] for name in dir(holderobj): # The attribute can be an arbitrary descriptor, so the attribute # access below can raise. safe_getatt() ignores such exceptions. - maybe_property = safe_getattr(type(holderobj), name, None) - if isinstance(maybe_property, _CompatProperty): - # deprecated - continue obj = safe_getattr(holderobj, name, None) marker = getfixturemarker(obj) # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py index 77e6f02c15c..1b41898c4c8 100644 --- a/src/_pytest/nodes.py +++ b/src/_pytest/nodes.py @@ -5,7 +5,6 @@ import os import warnings -import attr import py import six @@ -56,22 +55,6 @@ def ischildnode(baseid, nodeid): return node_parts[: len(base_parts)] == base_parts -@attr.s -class _CompatProperty(object): - name = attr.ib() - - def __get__(self, obj, owner): - if obj is None: - return self - - from _pytest.deprecated import COMPAT_PROPERTY - - warnings.warn( - COMPAT_PROPERTY.format(name=self.name, owner=owner.__name__), stacklevel=2 - ) - return getattr(__import__("pytest"), self.name) - - class Node(object): """ base class for Collector and Item the test collection tree. Collector subclasses have children, Items are terminal nodes.""" @@ -119,24 +102,6 @@ def ihook(self): """ fspath sensitive hook proxy used to call pytest hooks""" return self.session.gethookproxy(self.fspath) - Module = _CompatProperty("Module") - Class = _CompatProperty("Class") - Instance = _CompatProperty("Instance") - Function = _CompatProperty("Function") - File = _CompatProperty("File") - Item = _CompatProperty("Item") - - def _getcustomclass(self, name): - maybe_compatprop = getattr(type(self), name) - if isinstance(maybe_compatprop, _CompatProperty): - return getattr(__import__("pytest"), name) - else: - from _pytest.deprecated import CUSTOM_CLASS - - cls = getattr(self, name) - self.warn(CUSTOM_CLASS.format(name=name, type_name=type(self).__name__)) - return cls - def __repr__(self): return "<%s %s>" % (self.__class__.__name__, getattr(self, "name", None)) diff --git a/src/_pytest/nose.py b/src/_pytest/nose.py index 4bfa9c5838b..416381bb528 100644 --- a/src/_pytest/nose.py +++ b/src/_pytest/nose.py @@ -30,13 +30,6 @@ def pytest_runtest_makereport(item, call): @hookimpl(trylast=True) def pytest_runtest_setup(item): if is_potential_nosetest(item): - if isinstance(item.parent, python.Generator): - gen = item.parent - if not hasattr(gen, "_nosegensetup"): - call_optional(gen.obj, "setup") - if isinstance(gen.parent, python.Instance): - call_optional(gen.parent.obj, "setup") - gen._nosegensetup = True if not call_optional(item.obj, "setup"): # call module level setup if there is no object level one call_optional(item.parent.obj, "setup") @@ -53,11 +46,6 @@ def teardown_nose(item): # del item.parent._nosegensetup -def pytest_make_collect_report(collector): - if isinstance(collector, python.Generator): - call_optional(collector.obj, "setup") - - def is_potential_nosetest(item): # extra check needed since we do not do nose style setup/teardown # on direct unittest style classes diff --git a/src/_pytest/python.py b/src/_pytest/python.py index 8c8de8e752d..7ddcc2762a6 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -38,6 +38,7 @@ from _pytest.compat import STRING_TYPES from _pytest.config import hookimpl from _pytest.main import FSHookProxy +from _pytest.mark import MARK_GEN from _pytest.mark.structures import get_unpacked_marks from _pytest.mark.structures import normalize_mark_list from _pytest.mark.structures import transfer_markers @@ -199,7 +200,6 @@ def pytest_pycollect_makeitem(collector, name, obj): # nothing was collected elsewhere, let's do it here if safe_isclass(obj): if collector.istestclass(obj, name): - Class = collector._getcustomclass("Class") outcome.force_result(Class(name, parent=collector)) elif collector.istestfunction(obj, name): # mock seems to store unbound methods (issue473), normalize it @@ -219,7 +219,10 @@ def pytest_pycollect_makeitem(collector, name, obj): ) elif getattr(obj, "__test__", True): if is_generator(obj): - res = Generator(name, parent=collector) + res = Function(name, parent=collector) + reason = deprecated.YIELD_TESTS.format(name=name) + res.add_marker(MARK_GEN.xfail(run=False, reason=reason)) + res.warn(PytestWarning(reason)) else: res = list(collector._genfunctions(name, obj)) outcome.force_result(res) @@ -408,7 +411,6 @@ def _genfunctions(self, name, funcobj): else: self.ihook.pytest_generate_tests(metafunc=metafunc) - Function = self._getcustomclass("Function") if not metafunc._calls: yield Function(name, parent=self, fixtureinfo=fixtureinfo) else: @@ -648,7 +650,7 @@ def collect(self): ) ) return [] - return [self._getcustomclass("Instance")(name="()", parent=self)] + return [Instance(name="()", parent=self)] def setup(self): setup_class = _get_xunit_func(self.obj, "setup_class") @@ -739,51 +741,6 @@ def repr_failure(self, excinfo, outerr=None): return self._repr_failure_py(excinfo, style=style) -class Generator(FunctionMixin, PyCollector): - def collect(self): - - # test generators are seen as collectors but they also - # invoke setup/teardown on popular request - # (induced by the common "test_*" naming shared with normal tests) - from _pytest import deprecated - - self.warn(deprecated.YIELD_TESTS) - - self.session._setupstate.prepare(self) - # see FunctionMixin.setup and test_setupstate_is_preserved_134 - self._preservedparent = self.parent.obj - values = [] - seen = {} - _Function = self._getcustomclass("Function") - for i, x in enumerate(self.obj()): - name, call, args = self.getcallargs(x) - if not callable(call): - raise TypeError("%r yielded non callable test %r" % (self.obj, call)) - if name is None: - name = "[%d]" % i - else: - name = "['%s']" % name - if name in seen: - raise ValueError( - "%r generated tests with non-unique name %r" % (self, name) - ) - seen[name] = True - values.append(_Function(name, self, args=args, callobj=call)) - return values - - def getcallargs(self, obj): - if not isinstance(obj, (tuple, list)): - obj = (obj,) - # explicit naming - if isinstance(obj[0], six.string_types): - name = obj[0] - obj = obj[1:] - else: - name = None - call, args = obj[0], obj[1:] - return name, call, args - - def hasinit(obj): init = getattr(obj, "__init__", None) if init: @@ -1326,8 +1283,7 @@ def _showfixtures_main(config, session): tw.line(" %s: no docstring available" % (loc,), red=True) -def write_docstring(tw, doc): - INDENT = " " +def write_docstring(tw, doc, indent=" "): doc = doc.rstrip() if "\n" in doc: firstline, rest = doc.split("\n", 1) @@ -1335,11 +1291,11 @@ def write_docstring(tw, doc): firstline, rest = doc, "" if firstline.strip(): - tw.line(INDENT + firstline.strip()) + tw.line(indent + firstline.strip()) if rest: for line in dedent(rest).split("\n"): - tw.write(INDENT + line + "\n") + tw.write(indent + line + "\n") class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr): diff --git a/src/pytest.py b/src/pytest.py index 14ed1acaab8..c0010f166df 100644 --- a/src/pytest.py +++ b/src/pytest.py @@ -28,7 +28,6 @@ from _pytest.outcomes import xfail from _pytest.python import Class from _pytest.python import Function -from _pytest.python import Generator from _pytest.python import Instance from _pytest.python import Module from _pytest.python import Package @@ -57,7 +56,6 @@ "fixture", "freeze_includes", "Function", - "Generator", "hookimpl", "hookspec", "importorskip", diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index bc120b2634a..bc2e8378b13 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -10,47 +10,6 @@ pytestmark = pytest.mark.pytester_example_path("deprecated") -def test_yield_tests_deprecation(testdir): - testdir.makepyfile( - """ - def func1(arg, arg2): - assert arg == arg2 - def test_gen(): - yield "m1", func1, 15, 3*5 - yield "m2", func1, 42, 6*7 - def test_gen2(): - for k in range(10): - yield func1, 1, 1 - """ - ) - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - [ - "*test_yield_tests_deprecation.py:3:*yield tests are deprecated*", - "*test_yield_tests_deprecation.py:6:*yield tests are deprecated*", - "*2 passed*", - ] - ) - assert result.stdout.str().count("yield tests are deprecated") == 2 - - -def test_compat_properties_deprecation(testdir): - testdir.makepyfile( - """ - def test_foo(request): - print(request.node.Module) - """ - ) - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - [ - "*test_compat_properties_deprecation.py:2:*usage of Function.Module is deprecated, " - "please use pytest.Module instead*", - "*1 passed, 1 warnings in*", - ] - ) - - def test_cached_setup_deprecation(testdir): testdir.makepyfile( """ @@ -72,36 +31,6 @@ def test_foo(fix): ) -def test_custom_class_deprecation(testdir): - testdir.makeconftest( - """ - import pytest - - class MyModule(pytest.Module): - - class Class(pytest.Class): - pass - - def pytest_pycollect_makemodule(path, parent): - return MyModule(path, parent) - """ - ) - testdir.makepyfile( - """ - class Test: - def test_foo(self): - pass - """ - ) - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - [ - '*test_custom_class_deprecation.py:1:*"Class" objects in collectors of type "MyModule*', - "*1 passed, 1 warnings in*", - ] - ) - - def test_funcarg_prefix_deprecation(testdir): testdir.makepyfile( """ diff --git a/testing/python/collect.py b/testing/python/collect.py index 83fcdc3bd10..2e534a25943 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -7,7 +7,6 @@ import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED from _pytest.nodes import Collector -from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG class TestModule(object): @@ -244,217 +243,6 @@ def prop(self): @pytest.mark.filterwarnings( "ignore:usage of Generator.Function is deprecated, please use pytest.Function instead" ) -class TestGenerator(object): - def test_generative_functions(self, testdir): - modcol = testdir.getmodulecol( - """ - def func1(arg, arg2): - assert arg == arg2 - - def test_gen(): - yield func1, 17, 3*5 - yield func1, 42, 6*7 - """ - ) - colitems = modcol.collect() - assert len(colitems) == 1 - gencol = colitems[0] - assert isinstance(gencol, pytest.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], pytest.Function) - assert isinstance(gencolitems[1], pytest.Function) - assert gencolitems[0].name == "[0]" - assert gencolitems[0].obj.__name__ == "func1" - - def test_generative_methods(self, testdir): - modcol = testdir.getmodulecol( - """ - def func1(arg, arg2): - assert arg == arg2 - class TestGenMethods(object): - def test_gen(self): - yield func1, 17, 3*5 - yield func1, 42, 6*7 - """ - ) - gencol = modcol.collect()[0].collect()[0].collect()[0] - assert isinstance(gencol, pytest.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], pytest.Function) - assert isinstance(gencolitems[1], pytest.Function) - assert gencolitems[0].name == "[0]" - assert gencolitems[0].obj.__name__ == "func1" - - def test_generative_functions_with_explicit_names(self, testdir): - modcol = testdir.getmodulecol( - """ - def func1(arg, arg2): - assert arg == arg2 - - def test_gen(): - yield "seventeen", func1, 17, 3*5 - yield "fortytwo", func1, 42, 6*7 - """ - ) - colitems = modcol.collect() - assert len(colitems) == 1 - gencol = colitems[0] - assert isinstance(gencol, pytest.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], pytest.Function) - assert isinstance(gencolitems[1], pytest.Function) - assert gencolitems[0].name == "['seventeen']" - assert gencolitems[0].obj.__name__ == "func1" - assert gencolitems[1].name == "['fortytwo']" - assert gencolitems[1].obj.__name__ == "func1" - - def test_generative_functions_unique_explicit_names(self, testdir): - # generative - modcol = testdir.getmodulecol( - """ - def func(): pass - def test_gen(): - yield "name", func - yield "name", func - """ - ) - colitems = modcol.collect() - assert len(colitems) == 1 - gencol = colitems[0] - assert isinstance(gencol, pytest.Generator) - pytest.raises(ValueError, gencol.collect) - - def test_generative_methods_with_explicit_names(self, testdir): - modcol = testdir.getmodulecol( - """ - def func1(arg, arg2): - assert arg == arg2 - class TestGenMethods(object): - def test_gen(self): - yield "m1", func1, 17, 3*5 - yield "m2", func1, 42, 6*7 - """ - ) - gencol = modcol.collect()[0].collect()[0].collect()[0] - assert isinstance(gencol, pytest.Generator) - gencolitems = gencol.collect() - assert len(gencolitems) == 2 - assert isinstance(gencolitems[0], pytest.Function) - assert isinstance(gencolitems[1], pytest.Function) - assert gencolitems[0].name == "['m1']" - assert gencolitems[0].obj.__name__ == "func1" - assert gencolitems[1].name == "['m2']" - assert gencolitems[1].obj.__name__ == "func1" - - def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir): - o = testdir.makepyfile( - """ - from __future__ import print_function - def test_generative_order_of_execution(): - import py, pytest - test_list = [] - expected_list = list(range(6)) - - def list_append(item): - test_list.append(item) - - def assert_order_of_execution(): - print('expected order', expected_list) - print('but got ', test_list) - assert test_list == expected_list - - for i in expected_list: - yield list_append, i - yield assert_order_of_execution - """ - ) - reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG) - passed, skipped, failed = reprec.countoutcomes() - assert passed == 7 - assert not skipped and not failed - - def test_order_of_execution_generator_different_codeline(self, testdir): - o = testdir.makepyfile( - """ - from __future__ import print_function - def test_generative_tests_different_codeline(): - import py, pytest - test_list = [] - expected_list = list(range(3)) - - def list_append_2(): - test_list.append(2) - - def list_append_1(): - test_list.append(1) - - def list_append_0(): - test_list.append(0) - - def assert_order_of_execution(): - print('expected order', expected_list) - print('but got ', test_list) - assert test_list == expected_list - - yield list_append_0 - yield list_append_1 - yield list_append_2 - yield assert_order_of_execution - """ - ) - reprec = testdir.inline_run(o, SHOW_PYTEST_WARNINGS_ARG) - passed, skipped, failed = reprec.countoutcomes() - assert passed == 4 - assert not skipped and not failed - - def test_setupstate_is_preserved_134(self, testdir): - # yield-based tests are messy wrt to setupstate because - # during collection they already invoke setup functions - # and then again when they are run. For now, we want to make sure - # that the old 1.3.4 behaviour is preserved such that all - # yielded functions all share the same "self" instance that - # has been used during collection. - o = testdir.makepyfile( - """ - setuplist = [] - class TestClass(object): - def setup_method(self, func): - #print "setup_method", self, func - setuplist.append(self) - self.init = 42 - - def teardown_method(self, func): - self.init = None - - def test_func1(self): - pass - - def test_func2(self): - yield self.func2 - yield self.func2 - - def func2(self): - assert self.init - - def test_setuplist(): - # once for test_func2 during collection - # once for test_func1 during test run - # once for test_func2 during test run - #print setuplist - assert len(setuplist) == 3, len(setuplist) - assert setuplist[0] == setuplist[2], setuplist - assert setuplist[1] != setuplist[2], setuplist - """ - ) - reprec = testdir.inline_run(o, "-v", SHOW_PYTEST_WARNINGS_ARG) - passed, skipped, failed = reprec.countoutcomes() - assert passed == 4 - assert not skipped and not failed - - class TestFunction(object): @pytest.fixture def ignore_parametrized_marks_args(self): @@ -1271,39 +1059,6 @@ def test_hello(self): pass @pytest.mark.filterwarnings( "ignore:usage of Generator.Function is deprecated, please use pytest.Function instead" ) - def test_generator_reportinfo(self, testdir): - modcol = testdir.getmodulecol( - """ - # lineno 0 - def test_gen(): - def check(x): - assert x - yield check, 3 - """ - ) - gencol = testdir.collect_by_name(modcol, "test_gen") - fspath, lineno, modpath = gencol.reportinfo() - assert fspath == modcol.fspath - assert lineno == 1 - assert modpath == "test_gen" - - genitem = gencol.collect()[0] - fspath, lineno, modpath = genitem.reportinfo() - assert fspath == modcol.fspath - assert lineno == 2 - assert modpath == "test_gen[0]" - """ - def test_func(): - pass - def test_genfunc(): - def check(x): - pass - yield check, 3 - class TestClass(object): - def test_method(self): - pass - """ - def test_reportinfo_with_nasty_getattr(self, testdir): # https://github.com/pytest-dev/pytest/issues/1204 modcol = testdir.getmodulecol( @@ -1373,54 +1128,6 @@ def _test_underscore(): result.stdout.fnmatch_lines(["*1 passed*"]) -def test_collector_attributes(testdir): - testdir.makeconftest( - """ - import pytest - def pytest_pycollect_makeitem(collector): - assert collector.Function == pytest.Function - assert collector.Class == pytest.Class - assert collector.Instance == pytest.Instance - assert collector.Module == pytest.Module - """ - ) - testdir.makepyfile( - """ - def test_hello(): - pass - """ - ) - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines(["*1 passed*"]) - - -def test_customize_through_attributes(testdir): - testdir.makeconftest( - """ - import pytest - class MyFunction(pytest.Function): - pass - class MyInstance(pytest.Instance): - Function = MyFunction - class MyClass(pytest.Class): - Instance = MyInstance - - def pytest_pycollect_makeitem(collector, name, obj): - if name.startswith("MyTestClass"): - return MyClass(name, parent=collector) - """ - ) - testdir.makepyfile( - """ - class MyTestClass(object): - def test_hello(self): - pass - """ - ) - result = testdir.runpytest("--collect-only", SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines(["*MyClass*", "*MyFunction*test_hello*"]) - - def test_unorderable_types(testdir): testdir.makepyfile( """ diff --git a/testing/python/fixture.py b/testing/python/fixture.py index b7ce16eb5c0..f5813b5d84f 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -1850,24 +1850,6 @@ def test_app(): reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=1) - def test_autouse_honored_for_yield(self, testdir): - testdir.makepyfile( - """ - import pytest - @pytest.fixture(autouse=True) - def tst(): - global x - x = 3 - def test_gen(): - def f(hello): - assert x == abs(hello) - yield f, 3 - yield f, -3 - """ - ) - reprec = testdir.inline_run(SHOW_PYTEST_WARNINGS_ARG) - reprec.assertoutcome(passed=2) - def test_funcarg_and_setup(self, testdir): testdir.makepyfile( """ diff --git a/testing/test_collection.py b/testing/test_collection.py index 473883b0d82..36e8a69ce8a 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -21,20 +21,6 @@ def test_collect_versus_item(self): assert not issubclass(Collector, Item) assert not issubclass(Item, Collector) - def test_compat_attributes(self, testdir, recwarn): - modcol = testdir.getmodulecol( - """ - def test_pass(): pass - def test_fail(): assert 0 - """ - ) - recwarn.clear() - assert modcol.Module == pytest.Module - assert modcol.Class == pytest.Class - assert modcol.Item == pytest.Item - assert modcol.File == pytest.File - assert modcol.Function == pytest.Function - def test_check_equality(self, testdir): modcol = testdir.getmodulecol( """ diff --git a/testing/test_nose.py b/testing/test_nose.py index e4db4680298..3e9966529b4 100644 --- a/testing/test_nose.py +++ b/testing/test_nose.py @@ -3,7 +3,6 @@ from __future__ import print_function import pytest -from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG def setup_module(mod): @@ -162,73 +161,6 @@ def test_world(): result.stdout.fnmatch_lines(["*2 passed*"]) -def test_nose_test_generator_fixtures(testdir): - p = testdir.makepyfile( - """ - # taken from nose-0.11.1 unit_tests/test_generator_fixtures.py - from nose.tools import eq_ - called = [] - - def outer_setup(): - called.append('outer_setup') - - def outer_teardown(): - called.append('outer_teardown') - - def inner_setup(): - called.append('inner_setup') - - def inner_teardown(): - called.append('inner_teardown') - - def test_gen(): - called[:] = [] - for i in range(0, 5): - yield check, i - - def check(i): - expect = ['outer_setup'] - for x in range(0, i): - expect.append('inner_setup') - expect.append('inner_teardown') - expect.append('inner_setup') - eq_(called, expect) - - - test_gen.setup = outer_setup - test_gen.teardown = outer_teardown - check.setup = inner_setup - check.teardown = inner_teardown - - class TestClass(object): - def setup(self): - print("setup called in %s" % self) - self.called = ['setup'] - - def teardown(self): - print("teardown called in %s" % self) - eq_(self.called, ['setup']) - self.called.append('teardown') - - def test(self): - print("test called in %s" % self) - for i in range(0, 5): - yield self.check, i - - def check(self, i): - print("check called in %s" % self) - expect = ['setup'] - #for x in range(0, i): - # expect.append('setup') - # expect.append('teardown') - #expect.append('setup') - eq_(self.called, expect) - """ - ) - result = testdir.runpytest(p, "-p", "nose", SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines(["*10 passed*"]) - - def test_module_level_setup(testdir): testdir.makepyfile( """ diff --git a/testing/test_pdb.py b/testing/test_pdb.py index dd349454b11..9bb1ca54661 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -8,7 +8,6 @@ import _pytest._code import pytest -from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG try: breakpoint @@ -809,27 +808,6 @@ def test_1(): assert "reading from stdin while output" not in rest TestPDB.flush(child) - def test_trace_against_yield_test(self, testdir): - p1 = testdir.makepyfile( - """ - def is_equal(a, b): - assert a == b - - def test_1(): - yield is_equal, 1, 1 - """ - ) - child = testdir.spawn_pytest( - "{} --trace {}".format(SHOW_PYTEST_WARNINGS_ARG, str(p1)) - ) - child.expect("is_equal") - child.expect("Pdb") - child.sendeof() - rest = child.read().decode("utf8") - assert "1 passed" in rest - assert "reading from stdin while output" not in rest - TestPDB.flush(child) - def test_trace_after_runpytest(testdir): """Test that debugging's pytest_configure is re-entrant.""" diff --git a/testing/test_runner_xunit.py b/testing/test_runner_xunit.py index b0844dc1c01..6b5752b77ad 100644 --- a/testing/test_runner_xunit.py +++ b/testing/test_runner_xunit.py @@ -7,7 +7,6 @@ from __future__ import print_function import pytest -from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG def test_module_and_function_setup(testdir): @@ -170,64 +169,6 @@ def test_cleanup(): reprec.assertoutcome(failed=1, passed=1) -def test_method_generator_setup(testdir): - reprec = testdir.inline_runsource( - """ - class TestSetupTeardownOnInstance(object): - def setup_class(cls): - cls.classsetup = True - - def setup_method(self, method): - self.methsetup = method - - def test_generate(self): - assert self.classsetup - assert self.methsetup == self.test_generate - yield self.generated, 5 - yield self.generated, 2 - - def generated(self, value): - assert self.classsetup - assert self.methsetup == self.test_generate - assert value == 5 - """, - SHOW_PYTEST_WARNINGS_ARG, - ) - reprec.assertoutcome(passed=1, failed=1) - - -def test_func_generator_setup(testdir): - reprec = testdir.inline_runsource( - """ - import sys - - def setup_module(mod): - print("setup_module") - mod.x = [] - - def setup_function(fun): - print("setup_function") - x.append(1) - - def teardown_function(fun): - print("teardown_function") - x.pop() - - def test_one(): - assert x == [1] - def check(): - print("check") - sys.stderr.write("e\\n") - assert x == [1] - yield check - assert x == [1] - """, - SHOW_PYTEST_WARNINGS_ARG, - ) - rep = reprec.matchreport("test_one", names="pytest_runtest_logreport") - assert rep.passed - - def test_method_setup_uses_fresh_instances(testdir): reprec = testdir.inline_runsource( """ diff --git a/testing/test_session.py b/testing/test_session.py index 32d71a18e08..d68fc9d4181 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -4,7 +4,6 @@ import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED -from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG class SessionTests(object): @@ -73,19 +72,6 @@ def test_raises_doesnt(): print(out) pytest.fail("incorrect raises() output") - def test_generator_yields_None(self, testdir): - reprec = testdir.inline_runsource( - """ - def test_1(): - yield None - """, - SHOW_PYTEST_WARNINGS_ARG, - ) - failures = reprec.getfailedcollections() - out = failures[0].longrepr.reprcrash.message - i = out.find("TypeError") - assert i != -1 - def test_syntax_error_module(self, testdir): reprec = testdir.inline_runsource("this is really not python") values = reprec.getfailedcollections() diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 0faf9b4010a..2a7a646ee60 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -20,7 +20,6 @@ from _pytest.terminal import getreportopt from _pytest.terminal import repr_pythonversion from _pytest.terminal import TerminalReporter -from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"]) @@ -585,8 +584,9 @@ def test_showlocals(): ] ) - def test_verbose_reporting(self, testdir, pytestconfig): - p1 = testdir.makepyfile( + @pytest.fixture + def verbose_testfile(self, testdir): + return testdir.makepyfile( """ import pytest def test_fail(): @@ -602,22 +602,32 @@ def check(x): yield check, 0 """ ) - result = testdir.runpytest(p1, "-v", SHOW_PYTEST_WARNINGS_ARG) + + def test_verbose_reporting(self, verbose_testfile, testdir, pytestconfig): + + result = testdir.runpytest( + verbose_testfile, "-v", "-Walways::pytest.PytestWarning" + ) result.stdout.fnmatch_lines( [ "*test_verbose_reporting.py::test_fail *FAIL*", "*test_verbose_reporting.py::test_pass *PASS*", "*test_verbose_reporting.py::TestClass::test_skip *SKIP*", - "*test_verbose_reporting.py::test_gen*0* *FAIL*", + "*test_verbose_reporting.py::test_gen *xfail*", ] ) assert result.ret == 1 + def test_verbose_reporting_xdist(self, verbose_testfile, testdir, pytestconfig): if not pytestconfig.pluginmanager.get_plugin("xdist"): pytest.skip("xdist plugin not installed") - result = testdir.runpytest(p1, "-v", "-n 1", SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines(["*FAIL*test_verbose_reporting.py::test_fail*"]) + result = testdir.runpytest( + verbose_testfile, "-v", "-n 1", "-Walways::pytest.PytestWarning" + ) + result.stdout.fnmatch_lines( + ["*FAIL*test_verbose_reporting_xdist.py::test_fail*"] + ) assert result.ret == 1 def test_quiet_reporting(self, testdir): From 847eacea19a02887fb6e63601908a37d7c2576a8 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Sat, 3 Nov 2018 21:50:13 +0100 Subject: [PATCH 33/98] refactor CallInfo constructor magic into named constructor --- src/_pytest/nose.py | 4 ++- src/_pytest/runner.py | 61 ++++++++++++++++++++++++++---------------- testing/test_runner.py | 14 ++-------- 3 files changed, 43 insertions(+), 36 deletions(-) diff --git a/src/_pytest/nose.py b/src/_pytest/nose.py index 416381bb528..6facc547fdf 100644 --- a/src/_pytest/nose.py +++ b/src/_pytest/nose.py @@ -23,7 +23,9 @@ def get_skip_exceptions(): def pytest_runtest_makereport(item, call): if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): # let's substitute the excinfo with a pytest.skip one - call2 = call.__class__(lambda: runner.skip(str(call.excinfo.value)), call.when) + call2 = runner.CallInfo.from_call( + lambda: runner.skip(str(call.excinfo.value)), call.when + ) call.excinfo = call2.excinfo diff --git a/src/_pytest/runner.py b/src/_pytest/runner.py index 9ea1a07cd5d..27f244a80e0 100644 --- a/src/_pytest/runner.py +++ b/src/_pytest/runner.py @@ -8,6 +8,7 @@ import sys from time import time +import attr import six from .reports import CollectErrorRepr @@ -189,43 +190,57 @@ def check_interactive_exception(call, report): def call_runtest_hook(item, when, **kwds): hookname = "pytest_runtest_" + when ihook = getattr(item.ihook, hookname) - return CallInfo( + return CallInfo.from_call( lambda: ihook(item=item, **kwds), when=when, - treat_keyboard_interrupt_as_exception=item.config.getvalue("usepdb"), + reraise=KeyboardInterrupt if not item.config.getvalue("usepdb") else (), ) +@attr.s(repr=False) class CallInfo(object): """ Result/Exception info a function invocation. """ - #: None or ExceptionInfo object. - excinfo = None - - def __init__(self, func, when, treat_keyboard_interrupt_as_exception=False): + _result = attr.ib() + # type: Optional[ExceptionInfo] + excinfo = attr.ib() + start = attr.ib() + stop = attr.ib() + when = attr.ib() + + @property + def result(self): + if self.excinfo is not None: + raise AttributeError("{!r} has no valid result".format(self)) + return self._result + + @classmethod + def from_call(cls, func, when, reraise=None): #: context of invocation: one of "setup", "call", #: "teardown", "memocollect" - self.when = when - self.start = time() + start = time() + excinfo = None try: - self.result = func() - except KeyboardInterrupt: - if treat_keyboard_interrupt_as_exception: - self.excinfo = ExceptionInfo.from_current() - else: - self.stop = time() - raise + result = func() except: # noqa - self.excinfo = ExceptionInfo.from_current() - self.stop = time() + excinfo = ExceptionInfo.from_current() + if reraise is not None and excinfo.errisinstance(reraise): + raise + result = None + stop = time() + return cls(start=start, stop=stop, when=when, result=result, excinfo=excinfo) def __repr__(self): - if self.excinfo: - status = "exception: %s" % str(self.excinfo.value) + if self.excinfo is not None: + status = "exception" + value = self.excinfo.value else: - result = getattr(self, "result", "") - status = "result: %r" % (result,) - return "" % (self.when, status) + # TODO: investigate unification + value = repr(self._result) + status = "result" + return "".format( + when=self.when, value=value, status=status + ) def pytest_runtest_makereport(item, call): @@ -269,7 +284,7 @@ def pytest_runtest_makereport(item, call): def pytest_make_collect_report(collector): - call = CallInfo(lambda: list(collector.collect()), "collect") + call = CallInfo.from_call(lambda: list(collector.collect()), "collect") longrepr = None if not call.excinfo: outcome = "passed" diff --git a/testing/test_runner.py b/testing/test_runner.py index d76f3da9b7e..916c2ea4ae3 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -487,13 +487,13 @@ def test_report_extra_parameters(reporttype): def test_callinfo(): - ci = runner.CallInfo(lambda: 0, "123") + ci = runner.CallInfo.from_call(lambda: 0, "123") assert ci.when == "123" assert ci.result == 0 assert "result" in repr(ci) assert repr(ci) == "" - ci = runner.CallInfo(lambda: 0 / 0, "123") + ci = runner.CallInfo.from_call(lambda: 0 / 0, "123") assert ci.when == "123" assert not hasattr(ci, "result") assert repr(ci) == "" @@ -501,16 +501,6 @@ def test_callinfo(): assert "exc" in repr(ci) -def test_callinfo_repr_while_running(): - def repr_while_running(): - f = sys._getframe().f_back - assert "func" in f.f_locals - assert repr(f.f_locals["self"]) == "'>" - - ci = runner.CallInfo(repr_while_running, "when") - assert repr(ci) == "" - - # design question: do we want general hooks in python files? # then something like the following functional tests makes sense From b05061dcd27516da907ce62964263fd91f006ade Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 22 Nov 2018 21:14:53 -0200 Subject: [PATCH 34/98] Change deprecation type of --result-log to PytestDeprecationWarning It was decided to deprecate this on a later date (see discussion in #4447) --- changelog/4447.trivial.rst | 3 +++ doc/en/deprecations.rst | 4 ++++ doc/en/usage.rst | 10 +++------- src/_pytest/deprecated.py | 6 +++--- testing/deprecated_test.py | 4 ++-- 5 files changed, 15 insertions(+), 12 deletions(-) create mode 100644 changelog/4447.trivial.rst diff --git a/changelog/4447.trivial.rst b/changelog/4447.trivial.rst new file mode 100644 index 00000000000..884ccd4c9a1 --- /dev/null +++ b/changelog/4447.trivial.rst @@ -0,0 +1,3 @@ +Changed the deprecation type of ``--result-log`` to ``PytestDeprecationWarning``. + +It was decided to remove this feature at the next major revision. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 229bca1718b..dbcb55271ab 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -303,6 +303,10 @@ Result log (``--result-log``) The ``--resultlog`` command line option has been deprecated: it is little used and there are more modern and better alternatives, for example `pytest-tap `_. +This feature will be effectively removed in pytest 4.0 as the team intends to include a better alternative in the core. + +If you have any concerns, please don't hesitate to `open an issue `__. + Removed Features ---------------- diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 6c42cd0ec85..49c2aa577b8 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -483,14 +483,10 @@ Creating resultlog format files .. deprecated:: 3.0 - This option is rarely used and is scheduled for removal in 4.0. + This option is rarely used and is scheduled for removal in 5.0. - An alternative for users which still need similar functionality is to use the - `pytest-tap `_ plugin which provides - a stream of test data. - - If you have any concerns, please don't hesitate to - `open an issue `_. + See `the deprecation docs `__ + for more information. To create plain-text machine-readable result files you can issue:: diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index ba7426a1193..23b32c3e047 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -56,9 +56,9 @@ "getfuncargvalue is deprecated, use getfixturevalue" ) -RESULT_LOG = RemovedInPytest4Warning( - "--result-log is deprecated and scheduled for removal in pytest 4.0.\n" - "See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information." +RESULT_LOG = PytestDeprecationWarning( + "--result-log is deprecated and scheduled for removal in pytest 5.0.\n" + "See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information." ) MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index bc2e8378b13..8638da05a52 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -120,8 +120,8 @@ def test(): result = testdir.runpytest("--result-log=%s" % testdir.tmpdir.join("result.log")) result.stdout.fnmatch_lines( [ - "*--result-log is deprecated and scheduled for removal in pytest 4.0*", - "*See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information*", + "*--result-log is deprecated and scheduled for removal in pytest 5.0*", + "*See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information*", ] ) From 090f7ff44907ab1b3317bf5c19b36fabb6a84b8b Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 1 Dec 2018 15:58:55 -0200 Subject: [PATCH 35/98] Remove request.cached_setup Fix #4489 --- changelog/4489.removal.rst | 3 + doc/en/deprecations.rst | 68 +++++++------- src/_pytest/deprecated.py | 5 - src/_pytest/fixtures.py | 37 -------- testing/deprecated_test.py | 21 ----- testing/python/fixture.py | 188 +------------------------------------ 6 files changed, 38 insertions(+), 284 deletions(-) create mode 100644 changelog/4489.removal.rst diff --git a/changelog/4489.removal.rst b/changelog/4489.removal.rst new file mode 100644 index 00000000000..4236204650f --- /dev/null +++ b/changelog/4489.removal.rst @@ -0,0 +1,3 @@ +Removed ``request.cached_setup``. This was the predecessor mechanism to modern fixtures. + +See our `docs `__ on information on how to update your code. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 229bca1718b..b6ec4f245f2 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -49,41 +49,6 @@ Becomes: exec("assert(1, 2)") # exec is used to avoid a top-level warning - -``cached_setup`` -~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.9 - -``request.cached_setup`` was the precursor of the setup/teardown mechanism available to fixtures. - -Example: - -.. code-block:: python - - @pytest.fixture - def db_session(): - return request.cached_setup( - setup=Session.create, teardown=lambda session: session.close(), scope="module" - ) - -This should be updated to make use of standard fixture mechanisms: - -.. code-block:: python - - @pytest.fixture(scope="module") - def db_session(): - session = Session.create() - yield session - session.close() - - -You can consult `funcarg comparison section in the docs `_ for -more information. - -This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings. - - Using ``Class`` in custom Collectors ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -309,6 +274,39 @@ Removed Features As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after an appropriate period of deprecation has passed. +``cached_setup`` +~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +``request.cached_setup`` was the precursor of the setup/teardown mechanism available to fixtures. + +Example: + +.. code-block:: python + + @pytest.fixture + def db_session(): + return request.cached_setup( + setup=Session.create, teardown=lambda session: session.close(), scope="module" + ) + +This should be updated to make use of standard fixture mechanisms: + +.. code-block:: python + + @pytest.fixture(scope="module") + def db_session(): + session = Session.create() + yield session + session.close() + + +You can consult `funcarg comparison section in the docs `_ for +more information. + +This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings. + ``yield`` tests ~~~~~~~~~~~~~~~ diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index ba7426a1193..94ba6d0370e 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -24,11 +24,6 @@ YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" -CACHED_SETUP = RemovedInPytest4Warning( - "cached_setup is deprecated and will be removed in a future release. " - "Use standard fixture functions instead." -) - FUNCARG_PREFIX = UnformattedWarning( RemovedInPytest4Warning, '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 924352afb0f..1519d2fbc18 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -469,43 +469,6 @@ def _fillfixtures(self): if argname not in item.funcargs: item.funcargs[argname] = self.getfixturevalue(argname) - def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): - """ (deprecated) Return a testing resource managed by ``setup`` & - ``teardown`` calls. ``scope`` and ``extrakey`` determine when the - ``teardown`` function will be called so that subsequent calls to - ``setup`` would recreate the resource. With pytest-2.3 you often - do not need ``cached_setup()`` as you can directly declare a scope - on a fixture function and register a finalizer through - ``request.addfinalizer()``. - - :arg teardown: function receiving a previously setup resource. - :arg setup: a no-argument function creating a resource. - :arg scope: a string value out of ``function``, ``class``, ``module`` - or ``session`` indicating the caching lifecycle of the resource. - :arg extrakey: added to internal caching key of (funcargname, scope). - """ - from _pytest.deprecated import CACHED_SETUP - - warnings.warn(CACHED_SETUP, stacklevel=2) - if not hasattr(self.config, "_setupcache"): - self.config._setupcache = {} # XXX weakref? - cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) - cache = self.config._setupcache - try: - val = cache[cachekey] - except KeyError: - self._check_scope(self.fixturename, self.scope, scope) - val = setup() - cache[cachekey] = val - if teardown is not None: - - def finalizer(): - del cache[cachekey] - teardown(val) - - self._addfinalizer(finalizer, scope=scope) - return val - def getfixturevalue(self, argname): """ Dynamically run a named fixture function. diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index bc2e8378b13..aa3c5e58b5a 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -10,27 +10,6 @@ pytestmark = pytest.mark.pytester_example_path("deprecated") -def test_cached_setup_deprecation(testdir): - testdir.makepyfile( - """ - import pytest - @pytest.fixture - def fix(request): - return request.cached_setup(lambda: 1) - - def test_foo(fix): - assert fix == 1 - """ - ) - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - [ - "*test_cached_setup_deprecation.py:4:*cached_setup is deprecated*", - "*1 passed, 1 warnings in*", - ] - ) - - def test_funcarg_prefix_deprecation(testdir): testdir.makepyfile( """ diff --git a/testing/python/fixture.py b/testing/python/fixture.py index f5813b5d84f..2cc4122b446 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -953,181 +953,6 @@ def test_fun2(keywords): reprec.assertoutcome(passed=2) -class TestRequestCachedSetup(object): - def test_request_cachedsetup_defaultmodule(self, testdir): - reprec = testdir.inline_runsource( - """ - mysetup = ["hello",].pop - - import pytest - - @pytest.fixture - def something(request): - return request.cached_setup(mysetup, scope="module") - - def test_func1(something): - assert something == "hello" - class TestClass(object): - def test_func1a(self, something): - assert something == "hello" - """, - SHOW_PYTEST_WARNINGS_ARG, - ) - reprec.assertoutcome(passed=2) - - def test_request_cachedsetup_class(self, testdir): - reprec = testdir.inline_runsource( - """ - mysetup = ["hello", "hello2", "hello3"].pop - - import pytest - @pytest.fixture - def something(request): - return request.cached_setup(mysetup, scope="class") - def test_func1(something): - assert something == "hello3" - def test_func2(something): - assert something == "hello2" - class TestClass(object): - def test_func1a(self, something): - assert something == "hello" - def test_func2b(self, something): - assert something == "hello" - """, - SHOW_PYTEST_WARNINGS_ARG, - ) - reprec.assertoutcome(passed=4) - - @pytest.mark.filterwarnings("ignore:cached_setup is deprecated") - def test_request_cachedsetup_extrakey(self, testdir): - item1 = testdir.getitem("def test_func(): pass") - req1 = fixtures.FixtureRequest(item1) - values = ["hello", "world"] - - def setup(): - return values.pop() - - ret1 = req1.cached_setup(setup, extrakey=1) - ret2 = req1.cached_setup(setup, extrakey=2) - assert ret2 == "hello" - assert ret1 == "world" - ret1b = req1.cached_setup(setup, extrakey=1) - ret2b = req1.cached_setup(setup, extrakey=2) - assert ret1 == ret1b - assert ret2 == ret2b - - @pytest.mark.filterwarnings("ignore:cached_setup is deprecated") - def test_request_cachedsetup_cache_deletion(self, testdir): - item1 = testdir.getitem("def test_func(): pass") - req1 = fixtures.FixtureRequest(item1) - values = [] - - def setup(): - values.append("setup") - - def teardown(val): - values.append("teardown") - - req1.cached_setup(setup, teardown, scope="function") - assert values == ["setup"] - # artificial call of finalizer - setupstate = req1._pyfuncitem.session._setupstate - setupstate._callfinalizers(item1) - assert values == ["setup", "teardown"] - req1.cached_setup(setup, teardown, scope="function") - assert values == ["setup", "teardown", "setup"] - setupstate._callfinalizers(item1) - assert values == ["setup", "teardown", "setup", "teardown"] - - def test_request_cached_setup_two_args(self, testdir): - testdir.makepyfile( - """ - import pytest - - @pytest.fixture - def arg1(request): - return request.cached_setup(lambda: 42) - @pytest.fixture - def arg2(request): - return request.cached_setup(lambda: 17) - def test_two_different_setups(arg1, arg2): - assert arg1 != arg2 - """ - ) - result = testdir.runpytest("-v", SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines(["*1 passed*"]) - - def test_request_cached_setup_getfixturevalue(self, testdir): - testdir.makepyfile( - """ - import pytest - - @pytest.fixture - def arg1(request): - arg1 = request.getfixturevalue("arg2") - return request.cached_setup(lambda: arg1 + 1) - @pytest.fixture - def arg2(request): - return request.cached_setup(lambda: 10) - def test_two_funcarg(arg1): - assert arg1 == 11 - """ - ) - result = testdir.runpytest("-v", SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines(["*1 passed*"]) - - def test_request_cached_setup_functional(self, testdir): - testdir.makepyfile( - test_0=""" - import pytest - values = [] - @pytest.fixture - def something(request): - val = request.cached_setup(fsetup, fteardown) - return val - def fsetup(mycache=[1]): - values.append(mycache.pop()) - return values - def fteardown(something): - values.remove(something[0]) - values.append(2) - def test_list_once(something): - assert something == [1] - def test_list_twice(something): - assert something == [1] - """ - ) - testdir.makepyfile( - test_1=""" - import test_0 # should have run already - def test_check_test0_has_teardown_correct(): - assert test_0.values == [2] - """ - ) - result = testdir.runpytest("-v", SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines(["*3 passed*"]) - - def test_issue117_sessionscopeteardown(self, testdir): - testdir.makepyfile( - """ - import pytest - - @pytest.fixture - def app(request): - app = request.cached_setup( - scope='session', - setup=lambda: 0, - teardown=lambda x: 3/x) - return app - def test_func(app): - pass - """ - ) - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - assert result.ret != 0 - result.stdout.fnmatch_lines(["*3/x*", "*ZeroDivisionError*"]) - - class TestFixtureUsages(object): def test_noargfixturedec(self, testdir): testdir.makepyfile( @@ -2297,15 +2122,7 @@ def test_4(arg, created, finalized): reprec = testdir.inline_run() reprec.assertoutcome(passed=4) - @pytest.mark.parametrize( - "method", - [ - 'request.getfixturevalue("arg")', - 'request.cached_setup(lambda: None, scope="function")', - ], - ids=["getfixturevalue", "cached_setup"], - ) - def test_scope_mismatch_various(self, testdir, method): + def test_scope_mismatch_various(self, testdir): testdir.makeconftest( """ import pytest @@ -2321,11 +2138,10 @@ def arg(request): import pytest @pytest.fixture(scope="session") def arg(request): - %s + request.getfixturevalue("arg") def test_1(arg): pass """ - % method ) result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) assert result.ret != 0 From 40b85d7ee89388245e7bb45b044993251c71664c Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 1 Dec 2018 16:41:59 -0200 Subject: [PATCH 36/98] Remove Metafunc.addcall Fix #3083 --- changelog/3083.removal.rst | 3 + doc/en/deprecations.rst | 31 +++++-- src/_pytest/deprecated.py | 5 -- src/_pytest/fixtures.py | 3 +- src/_pytest/python.py | 42 ---------- testing/acceptance_test.py | 5 +- testing/deprecated_test.py | 17 ---- testing/python/metafunc.py | 164 ++----------------------------------- 8 files changed, 37 insertions(+), 233 deletions(-) create mode 100644 changelog/3083.removal.rst diff --git a/changelog/3083.removal.rst b/changelog/3083.removal.rst new file mode 100644 index 00000000000..ce689b94adb --- /dev/null +++ b/changelog/3083.removal.rst @@ -0,0 +1,3 @@ +Remove ``Metafunc.addcall``. This was the predecessor mechanism to ``@pytest.mark.parametrize``. + +See our `docs `__ on information on how to update your code. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index b6ec4f245f2..5f57b51d201 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -168,13 +168,6 @@ Defining ``pytest_plugins`` is now deprecated in non-top-level conftest.py files because they will activate referenced plugins *globally*, which is surprising because for all other pytest features ``conftest.py`` files are only *active* for tests at or below it. -Metafunc.addcall -~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.3 - -:meth:`_pytest.python.Metafunc.addcall` was a precursor to the current parametrized mechanism. Users should use -:meth:`_pytest.python.Metafunc.parametrize` instead. marks in ``pytest.mark.parametrize`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -274,6 +267,30 @@ Removed Features As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after an appropriate period of deprecation has passed. +Metafunc.addcall +~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +:meth:`_pytest.python.Metafunc.addcall` was a precursor to the current parametrized mechanism. Users should use +:meth:`_pytest.python.Metafunc.parametrize` instead. + +Example: + +.. code-block:: python + + def pytest_generate_tests(metafunc): + metafunc.addcall({"i": 1}, id="1") + metafunc.addcall({"i": 2}, id="2") + +Becomes: + +.. code-block:: python + + def pytest_generate_tests(metafunc): + metafunc.parametrize("i", [1, 2], ids=["1", "2"]) + + ``cached_setup`` ~~~~~~~~~~~~~~~~ diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 94ba6d0370e..00e066ff00f 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -91,11 +91,6 @@ "pycollector makeitem was removed as it is an accidentially leaked internal api" ) -METAFUNC_ADD_CALL = RemovedInPytest4Warning( - "Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.\n" - "Please use Metafunc.parametrize instead." -) - PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( "Defining pytest_plugins in a non-top-level conftest is deprecated, " "because it affects the entire directory tree in a non-explicit way.\n" diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 1519d2fbc18..3e5ad5f732b 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -568,8 +568,7 @@ def _compute_fixture_value(self, fixturedef): ) fail(msg, pytrace=False) else: - # indices might not be set if old-style metafunc.addcall() was used - param_index = funcitem.callspec.indices.get(argname, 0) + param_index = funcitem.callspec.indices[argname] # if a parametrize invocation set a scope it will override # the static scope defined with the fixture function paramscopenum = funcitem.callspec._arg2scopenum.get(argname) diff --git a/src/_pytest/python.py b/src/_pytest/python.py index 7ddcc2762a6..3db36fb1982 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -1022,48 +1022,6 @@ def _validate_if_using_arg_names(self, argnames, indirect): pytrace=False, ) - def addcall(self, funcargs=None, id=NOTSET, param=NOTSET): - """ Add a new call to the underlying test function during the collection phase of a test run. - - .. deprecated:: 3.3 - - Use :meth:`parametrize` instead. - - Note that request.addcall() is called during the test collection phase prior and - independently to actual test execution. You should only use addcall() - if you need to specify multiple arguments of a test function. - - :arg funcargs: argument keyword dictionary used when invoking - the test function. - - :arg id: used for reporting and identification purposes. If you - don't supply an `id` an automatic unique id will be generated. - - :arg param: a parameter which will be exposed to a later fixture function - invocation through the ``request.param`` attribute. - """ - warnings.warn(deprecated.METAFUNC_ADD_CALL, stacklevel=2) - - assert funcargs is None or isinstance(funcargs, dict) - if funcargs is not None: - for name in funcargs: - if name not in self.fixturenames: - fail("funcarg %r not used in this function." % name) - else: - funcargs = {} - if id is None: - raise ValueError("id=None not allowed") - if id is NOTSET: - id = len(self._calls) - id = str(id) - if id in self._ids: - raise ValueError("duplicate id %r" % id) - self._ids.add(id) - - cs = CallSpec2(self) - cs.setall(funcargs, id, param) - self._calls.append(cs) - def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): """Find the most appropriate scope for a parametrized call based on its arguments. diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index b23cd7ca89d..0b7af5338d6 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -299,7 +299,7 @@ def test_skip_on_generated_funcarg_id(self, testdir): """ import pytest def pytest_generate_tests(metafunc): - metafunc.addcall({'x': 3}, id='hello-123') + metafunc.parametrize('x', [3], ids=['hello-123']) def pytest_runtest_setup(item): print(item.keywords) if 'hello-123' in item.keywords: @@ -316,8 +316,7 @@ def test_direct_addressing_selects(self, testdir): p = testdir.makepyfile( """ def pytest_generate_tests(metafunc): - metafunc.addcall({'i': 1}, id="1") - metafunc.addcall({'i': 2}, id="2") + metafunc.parametrize('i', [1, 2], ids=["1", "2"]) def test_func(i): pass """ diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index aa3c5e58b5a..71ea600502b 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -105,23 +105,6 @@ def test(): ) -def test_metafunc_addcall_deprecated(testdir): - testdir.makepyfile( - """ - def pytest_generate_tests(metafunc): - metafunc.addcall({'i': 1}) - metafunc.addcall({'i': 2}) - def test_func(i): - pass - """ - ) - res = testdir.runpytest("-s", SHOW_PYTEST_WARNINGS_ARG) - assert res.ret == 0 - res.stdout.fnmatch_lines( - ["*Metafunc.addcall is deprecated*", "*2 passed, 2 warnings*"] - ) - - def test_terminal_reporter_writer_attr(pytestconfig): """Check that TerminalReporter._tw is also available as 'writer' (#2984) This attribute is planned to be deprecated in 3.4. diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 833eb564189..7f9cdb5cc37 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -54,66 +54,6 @@ def func(arg1, arg2="qwe"): assert metafunc.function is func assert metafunc.cls is None - def test_addcall_no_args(self): - def func(arg1): - pass - - metafunc = self.Metafunc(func) - metafunc.addcall() - assert len(metafunc._calls) == 1 - call = metafunc._calls[0] - assert call.id == "0" - assert not hasattr(call, "param") - - def test_addcall_id(self): - def func(arg1): - pass - - metafunc = self.Metafunc(func) - pytest.raises(ValueError, metafunc.addcall, id=None) - - metafunc.addcall(id=1) - pytest.raises(ValueError, metafunc.addcall, id=1) - pytest.raises(ValueError, metafunc.addcall, id="1") - metafunc.addcall(id=2) - assert len(metafunc._calls) == 2 - assert metafunc._calls[0].id == "1" - assert metafunc._calls[1].id == "2" - - def test_addcall_param(self): - def func(arg1): - pass - - metafunc = self.Metafunc(func) - - class obj(object): - pass - - metafunc.addcall(param=obj) - metafunc.addcall(param=obj) - metafunc.addcall(param=1) - assert len(metafunc._calls) == 3 - assert metafunc._calls[0].getparam("arg1") == obj - assert metafunc._calls[1].getparam("arg1") == obj - assert metafunc._calls[2].getparam("arg1") == 1 - - def test_addcall_funcargs(self): - def func(x): - pass - - metafunc = self.Metafunc(func) - - class obj(object): - pass - - metafunc.addcall(funcargs={"x": 2}) - metafunc.addcall(funcargs={"x": 3}) - pytest.raises(pytest.fail.Exception, metafunc.addcall, {"xyz": 0}) - assert len(metafunc._calls) == 2 - assert metafunc._calls[0].funcargs == {"x": 2} - assert metafunc._calls[1].funcargs == {"x": 3} - assert not hasattr(metafunc._calls[1], "param") - def test_parametrize_error(self): def func(x, y): pass @@ -508,19 +448,6 @@ def test_idmaker_with_ids_unique_names(self): ) assert result == ["a0", "a1", "b0", "c", "b1"] - def test_addcall_and_parametrize(self): - def func(x, y): - pass - - metafunc = self.Metafunc(func) - metafunc.addcall({"x": 1}) - metafunc.parametrize("y", [2, 3]) - assert len(metafunc._calls) == 2 - assert metafunc._calls[0].funcargs == {"x": 1, "y": 2} - assert metafunc._calls[1].funcargs == {"x": 1, "y": 3} - assert metafunc._calls[0].id == "0-2" - assert metafunc._calls[1].id == "0-3" - @pytest.mark.issue714 def test_parametrize_indirect(self): def func(x, y): @@ -710,20 +637,6 @@ def test_simple(x, y=1): ["*already takes an argument 'y' with a default value"] ) - def test_addcalls_and_parametrize_indirect(self): - def func(x, y): - pass - - metafunc = self.Metafunc(func) - metafunc.addcall(param="123") - metafunc.parametrize("x", [1], indirect=True) - metafunc.parametrize("y", [2, 3], indirect=True) - assert len(metafunc._calls) == 2 - assert metafunc._calls[0].funcargs == {} - assert metafunc._calls[1].funcargs == {} - assert metafunc._calls[0].params == dict(x=1, y=2) - assert metafunc._calls[1].params == dict(x=1, y=3) - def test_parametrize_functional(self, testdir): testdir.makepyfile( """ @@ -871,7 +784,7 @@ def test_attributes(self, testdir): # assumes that generate/provide runs in the same process import sys, pytest, six def pytest_generate_tests(metafunc): - metafunc.addcall(param=metafunc) + metafunc.parametrize('metafunc', [metafunc]) @pytest.fixture def metafunc(request): @@ -896,43 +809,15 @@ def test_method(self, metafunc, pytestconfig): result = testdir.runpytest(p, "-v", SHOW_PYTEST_WARNINGS_ARG) result.assert_outcomes(passed=2) - def test_addcall_with_two_funcargs_generators(self, testdir): - testdir.makeconftest( - """ - def pytest_generate_tests(metafunc): - assert "arg1" in metafunc.fixturenames - metafunc.addcall(funcargs=dict(arg1=1, arg2=2)) - """ - ) - p = testdir.makepyfile( - """ - def pytest_generate_tests(metafunc): - metafunc.addcall(funcargs=dict(arg1=1, arg2=1)) - - class TestClass(object): - def test_myfunc(self, arg1, arg2): - assert arg1 == arg2 - """ - ) - result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - ["*test_myfunc*0*PASS*", "*test_myfunc*1*FAIL*", "*1 failed, 1 passed*"] - ) - def test_two_functions(self, testdir): p = testdir.makepyfile( """ def pytest_generate_tests(metafunc): - metafunc.addcall(param=10) - metafunc.addcall(param=20) - - import pytest - @pytest.fixture - def arg1(request): - return request.param + metafunc.parametrize('arg1', [10, 20], ids=['0', '1']) def test_func1(arg1): assert arg1 == 10 + def test_func2(arg1): assert arg1 in (10, 20) """ @@ -943,6 +828,7 @@ def test_func2(arg1): "*test_func1*0*PASS*", "*test_func1*1*FAIL*", "*test_func2*PASS*", + "*test_func2*PASS*", "*1 failed, 3 passed*", ] ) @@ -961,47 +847,12 @@ def test_hello(xyz): result = testdir.runpytest(p) result.assert_outcomes(passed=1) - def test_generate_plugin_and_module(self, testdir): - testdir.makeconftest( - """ - def pytest_generate_tests(metafunc): - assert "arg1" in metafunc.fixturenames - metafunc.addcall(id="world", param=(2,100)) - """ - ) - p = testdir.makepyfile( - """ - def pytest_generate_tests(metafunc): - metafunc.addcall(param=(1,1), id="hello") - - import pytest - @pytest.fixture - def arg1(request): - return request.param[0] - @pytest.fixture - def arg2(request): - return request.param[1] - - class TestClass(object): - def test_myfunc(self, arg1, arg2): - assert arg1 == arg2 - """ - ) - result = testdir.runpytest("-v", p, SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - [ - "*test_myfunc*hello*PASS*", - "*test_myfunc*world*FAIL*", - "*1 failed, 1 passed*", - ] - ) - def test_generate_tests_in_class(self, testdir): p = testdir.makepyfile( """ class TestClass(object): def pytest_generate_tests(self, metafunc): - metafunc.addcall(funcargs={'hello': 'world'}, id="hello") + metafunc.parametrize('hello', ['world'], ids=['hellow']) def test_myfunc(self, hello): assert hello == "world" @@ -1014,8 +865,7 @@ def test_two_functions_not_same_instance(self, testdir): p = testdir.makepyfile( """ def pytest_generate_tests(metafunc): - metafunc.addcall({'arg1': 10}) - metafunc.addcall({'arg1': 20}) + metafunc.parametrize('arg1', [10, 20], ids=["0", "1"]) class TestClass(object): def test_func(self, arg1): @@ -1032,7 +882,7 @@ def test_issue28_setup_method_in_generate_tests(self, testdir): p = testdir.makepyfile( """ def pytest_generate_tests(metafunc): - metafunc.addcall({'arg1': 1}) + metafunc.parametrize('arg1', [1]) class TestClass(object): def test_method(self, arg1): From 4ffa13728dd64ee3131f718b0d3e58dc00d29b72 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 1 Dec 2018 17:33:48 -0200 Subject: [PATCH 37/98] Add links to the deprecations docs for the "removal" changelog entries --- changelog/3079.removal.rst | 4 +++- changelog/3616.removal.rst | 4 +++- changelog/4421.removal.rst | 4 +++- 3 files changed, 9 insertions(+), 3 deletions(-) diff --git a/changelog/3079.removal.rst b/changelog/3079.removal.rst index c289176d20b..19a8612f0c7 100644 --- a/changelog/3079.removal.rst +++ b/changelog/3079.removal.rst @@ -1 +1,3 @@ -Remove support for yield tests - they are fundamentally broken since collection and test execution were separated. +Remove support for yield tests - they are fundamentally broken because they don't support fixtures properly since collection and test execution were separated. + +See our `docs `__ on information on how to update your code. diff --git a/changelog/3616.removal.rst b/changelog/3616.removal.rst index a88c4534a45..a8f2f1c92a9 100644 --- a/changelog/3616.removal.rst +++ b/changelog/3616.removal.rst @@ -1 +1,3 @@ -Remove the deprecated compat properties for node.Class/Function/Module - use pytest... now. +Remove the deprecated compat properties for ``node.Class/Function/Module`` - use ``pytest.Class/Function/Module`` now. + +See our `docs `__ on information on how to update your code. diff --git a/changelog/4421.removal.rst b/changelog/4421.removal.rst index be0704faad4..279587d06da 100644 --- a/changelog/4421.removal.rst +++ b/changelog/4421.removal.rst @@ -1 +1,3 @@ -Remove the implementation of the pytest_namespace hook. +Remove the implementation of the ``pytest_namespace`` hook. + +See our `docs `__ on information on how to update your code. From b88c3f8f828baa8b1b719a587773671db70c87b0 Mon Sep 17 00:00:00 2001 From: Anthony Sottile Date: Sun, 2 Dec 2018 20:09:04 -0800 Subject: [PATCH 38/98] Deprecate pytest.config --- changelog/3050.deprecation.rst | 1 + doc/en/deprecations.rst | 9 +++++++++ src/_pytest/deprecated.py | 4 ++++ src/_pytest/main.py | 20 +++++++++++++++++++- testing/test_terminal.py | 4 ++-- 5 files changed, 35 insertions(+), 3 deletions(-) create mode 100644 changelog/3050.deprecation.rst diff --git a/changelog/3050.deprecation.rst b/changelog/3050.deprecation.rst new file mode 100644 index 00000000000..2da417c85a3 --- /dev/null +++ b/changelog/3050.deprecation.rst @@ -0,0 +1 @@ +Deprecate ``pytest.config`` global. See https://docs.pytest.org/en/latest/deprecations.html#pytest-config-global diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 80b819e9780..ca72bfbfff7 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -14,6 +14,15 @@ Below is a complete list of all pytest features which are considered deprecated. :class:`_pytest.warning_types.PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters `. +``pytest.config`` global +~~~~~~~~~~~~~~~~~~~~~~~~ + +.. deprecated:: 4.1 + +The ``pytest.config`` global object is deprecated. Instead use +``request.config`` (via the ``request`` fixture) or if you are a plugin author +use the ``pytest_configure(config)`` hook. + .. _raises-warns-exec: ``raises`` / ``warns`` with a string as the second argument diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 7c9ff942564..0d3dc85cc62 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -97,6 +97,10 @@ "Please move it to the top level conftest file instead." ) +PYTEST_CONFIG_GLOBAL = PytestDeprecationWarning( + "the `pytest.config` global is deprecated. Please use `request.config` " + "or `pytest_configure` (if you're a pytest plugin) instead." +) PYTEST_ENSURETEMP = RemovedInPytest4Warning( "pytest/tmpdir_factory.ensuretemp is deprecated, \n" diff --git a/src/_pytest/main.py b/src/_pytest/main.py index 851b08ae39c..08490f03aa6 100644 --- a/src/_pytest/main.py +++ b/src/_pytest/main.py @@ -8,6 +8,7 @@ import os import pkgutil import sys +import warnings import attr import py @@ -18,6 +19,7 @@ from _pytest.config import directory_arg from _pytest.config import hookimpl from _pytest.config import UsageError +from _pytest.deprecated import PYTEST_CONFIG_GLOBAL from _pytest.outcomes import exit from _pytest.runner import collect_one_node @@ -167,8 +169,24 @@ def pytest_addoption(parser): ) +class _ConfigDeprecated(object): + def __init__(self, config): + self.__dict__["_config"] = config + + def __getattr__(self, attr): + warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2) + return getattr(self._config, attr) + + def __setattr__(self, attr, val): + warnings.warn(PYTEST_CONFIG_GLOBAL, stacklevel=2) + return setattr(self._config, attr, val) + + def __repr__(self): + return "{}({!r})".format(type(self).__name__, self._config) + + def pytest_configure(config): - __import__("pytest").config = config # compatibility + __import__("pytest").config = _ConfigDeprecated(config) # compatibility def wrap_session(config, doit): diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 2a7a646ee60..20f4f007809 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -539,7 +539,7 @@ def test_method(self): result.stdout.fnmatch_lines(["test_passes.py ..*", "* 2 pass*"]) assert result.ret == 0 - def test_header_trailer_info(self, testdir): + def test_header_trailer_info(self, testdir, request): testdir.makepyfile( """ def test_passes(): @@ -563,7 +563,7 @@ def test_passes(): "=* 1 passed*in *.[0-9][0-9] seconds *=", ] ) - if pytest.config.pluginmanager.list_plugin_distinfo(): + if request.config.pluginmanager.list_plugin_distinfo(): result.stdout.fnmatch_lines(["plugins: *"]) def test_showlocals(self, testdir): From d237197de34bed39854251989126777fc9941c18 Mon Sep 17 00:00:00 2001 From: feuillemorte Date: Tue, 4 Dec 2018 13:49:08 +0100 Subject: [PATCH 39/98] #4278 Added a CACHEDIR.TAG file to the cache directory --- changelog/4278.trivial.rst | 1 + src/_pytest/cacheprovider.py | 11 +++++++++++ testing/test_cacheprovider.py | 12 ++++++++++++ 3 files changed, 24 insertions(+) create mode 100644 changelog/4278.trivial.rst diff --git a/changelog/4278.trivial.rst b/changelog/4278.trivial.rst new file mode 100644 index 00000000000..09c0166394b --- /dev/null +++ b/changelog/4278.trivial.rst @@ -0,0 +1 @@ +Added a CACHEDIR.TAG file to the cache directory diff --git a/src/_pytest/cacheprovider.py b/src/_pytest/cacheprovider.py index 22ce578fc1d..59265ad85f6 100755 --- a/src/_pytest/cacheprovider.py +++ b/src/_pytest/cacheprovider.py @@ -33,6 +33,13 @@ See [the docs](https://docs.pytest.org/en/latest/cache.html) for more information. """ +CACHEDIR_TAG_CONTENT = b"""\ +Signature: 8a477f597d28d172789f06886806bc55 +# This file is a cache directory tag created by pytest. +# For information about cache directory tags, see: +# http://www.bford.info/cachedir/spec.html +""" + @attr.s class Cache(object): @@ -140,6 +147,10 @@ def _ensure_supporting_files(self): msg = u"# Created by pytest automatically.\n*" gitignore_path.write_text(msg, encoding="UTF-8") + cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG") + if not cachedir_tag_path.is_file(): + cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT) + class LFPlugin(object): """ Plugin which implements the --lf (run last-failing) option """ diff --git a/testing/test_cacheprovider.py b/testing/test_cacheprovider.py index 30fe23aeb5e..29c2d8a1d27 100644 --- a/testing/test_cacheprovider.py +++ b/testing/test_cacheprovider.py @@ -925,3 +925,15 @@ def test_does_not_create_boilerplate_in_existing_dirs(testdir): assert os.path.isdir("v") # cache contents assert not os.path.exists(".gitignore") assert not os.path.exists("README.md") + + +def test_cachedir_tag(testdir): + """Ensure we automatically create CACHEDIR.TAG file in the pytest_cache directory (#4278).""" + from _pytest.cacheprovider import Cache + from _pytest.cacheprovider import CACHEDIR_TAG_CONTENT + + config = testdir.parseconfig() + cache = Cache.for_config(config) + cache.set("foo", "bar") + cachedir_tag_path = cache._cachedir.joinpath("CACHEDIR.TAG") + assert cachedir_tag_path.read_bytes() == CACHEDIR_TAG_CONTENT From 1654b77ca0febbe047a56cdce59e14755064b3e6 Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Sun, 14 Oct 2018 19:13:00 +0300 Subject: [PATCH 40/98] [#3191] Set up tests to confirm warnings --- testing/test_warnings.py | 35 +++++++++++++++++++++++++++++++++++ 1 file changed, 35 insertions(+) diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 53d9c71cd3f..9dbe7f0d6bd 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -623,3 +623,38 @@ def test(): else: assert change_default in ("ini", "cmdline") result.stdout.fnmatch_lines(["* 1 passed in *"]) +class TestAssertionWarnings: + def test_tuple_warning(self, testdir): + testdir.makepyfile( + """ + def test_foo(): + assert (1,2) + """ + ) + with pytest.warns(pytest.PytestWarning): + testdir.runpytest_subprocess() + + def create_file(self, testdir, return_none): + testdir.makepyfile( + """ + def foo(return_none): + if return_none: + return None + else: + return False + + def test_foo(): + assert foo({return_none}) + """.format( + return_none=return_none + ) + ) + + def test_none_function_warns(self, testdir): + self.create_file(testdir, True) + with pytest.warns(pytest.PytestWarning): + testdir.runpytest_subprocess() + + def test_false_function_no_warn(self, testdir): + self.create_file(testdir, False) + testdir.runpytest_subprocess("-W error:PytestWarning") From 9fc9b2926f35915b6e7fd9bebe1855e47554db04 Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Mon, 15 Oct 2018 10:53:27 +0300 Subject: [PATCH 41/98] Fix tests and add aditional cases As requested by review. :ok_hand: Address code review for tests --- testing/test_warnings.py | 35 +++++++++++++++++++++++++++++------ 1 file changed, 29 insertions(+), 6 deletions(-) diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 9dbe7f0d6bd..ff169750298 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -624,6 +624,10 @@ def test(): assert change_default in ("ini", "cmdline") result.stdout.fnmatch_lines(["* 1 passed in *"]) class TestAssertionWarnings: + @staticmethod + def result_warns(result): + return result.stdout.fnmatch_lines(["*PytestWarning*"]) + def test_tuple_warning(self, testdir): testdir.makepyfile( """ @@ -631,10 +635,11 @@ def test_foo(): assert (1,2) """ ) - with pytest.warns(pytest.PytestWarning): - testdir.runpytest_subprocess() + result = testdir.runpytest() + assert self.result_warns(result) - def create_file(self, testdir, return_none): + @staticmethod + def create_file(testdir, return_none): testdir.makepyfile( """ def foo(return_none): @@ -652,9 +657,27 @@ def test_foo(): def test_none_function_warns(self, testdir): self.create_file(testdir, True) - with pytest.warns(pytest.PytestWarning): - testdir.runpytest_subprocess() + result = testdir.runpytest() + assert self.result_warns(result) + + def test_assert_is_none_no_warn(self, testdir): + """Tests a more simple case of `test_none_function_warns` where `assert None` is explicitly called""" + testdir.makepyfile( + """ + def foo(return_none): + if return_none: + return None + else: + return False + + def test_foo(): + assert foo(True) is None + """ + ) + result = testdir.runpytest() + assert not self.result_warns(result) def test_false_function_no_warn(self, testdir): self.create_file(testdir, False) - testdir.runpytest_subprocess("-W error:PytestWarning") + result = testdir.runpytest() + assert not self.result_warns(result) From 59a11b6a5d4e9a93eba71c39b9f523e2b8380f39 Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Thu, 25 Oct 2018 00:05:20 +0300 Subject: [PATCH 42/98] Check for 'assert None' and warn appropriately :bug:Fix warn ast bugs :bug:Fix inner-ast imports by using importFrom Alternetavly ast_call_helper could be retooled to use ast.attribute(...) --- src/_pytest/assertion/rewrite.py | 58 ++++++++++++++++++++++++++++++++ 1 file changed, 58 insertions(+) diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index d1231b77468..4d04dfc1a1d 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -51,6 +51,19 @@ def ast_Call(a, b, c): return ast.Call(a, b, c, None, None) +def ast_Call_helper(func_name, *args, **kwargs): + """ + func_name: str + args: Iterable[ast.expr] + kwargs: Dict[str,ast.expr] + """ + return ast.Call( + ast.Name(func_name, ast.Load()), + list(args), + [ast.keyword(key, val) for key, val in kwargs.items()], + ) + + class AssertionRewritingHook(object): """PEP302 Import hook which rewrites asserts.""" @@ -828,6 +841,12 @@ def visit_Assert(self, assert_): self.push_format_context() # Rewrite assert into a bunch of statements. top_condition, explanation = self.visit(assert_.test) + # Check if directly asserting None, in order to warn [Issue #3191] + self.statements.append( + self.warn_about_none_ast( + top_condition, module_path=self.module_path, lineno=assert_.lineno + ) + ) # Create failure message. body = self.on_failure negation = ast.UnaryOp(ast.Not(), top_condition) @@ -858,6 +877,45 @@ def visit_Assert(self, assert_): set_location(stmt, assert_.lineno, assert_.col_offset) return self.statements + def warn_about_none_ast(self, node, module_path, lineno): + """Returns an ast warning if node is None with the following statement: + if node is None: + from _pytest.warning_types import PytestWarning + import warnings + warnings.warn_explicit( + PytestWarning('assertion the value None, Please use "assert is None"'), + category=None, + # filename=str(self.module_path), + filename=__file__ + lineno=node.lineno, + ) + """ + + warning_msg = ast.Str( + 'Asserting the value None directly, Please use "assert is None" to eliminate ambiguity' + ) + AST_NONE = ast.NameConstant(None) + val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE]) + import_warnings = ast.ImportFrom( + module="warnings", names=[ast.alias("warn_explicit", None)], level=0 + ) + import_pytest_warning = ast.ImportFrom( + module="pytest", names=[ast.alias("PytestWarning", None)], level=0 + ) + pytest_warning = ast_Call_helper("PytestWarning", warning_msg) + # This won't work because this isn't the same "self" as an AssertionRewriter! + # ast_filename = improved_ast_Call('str',ast.Attribute('self','module_path',ast.Load).module_path) + warn = ast_Call_helper( + "warn_explicit", + pytest_warning, + category=AST_NONE, + filename=ast.Str(str(module_path)), + lineno=ast.Num(lineno), + ) + return ast.If( + val_is_none, [import_warnings, import_pytest_warning, ast.Expr(warn)], [] + ) + def visit_Name(self, name): # Display the repr of the name if it's a local variable or # _should_repr_global_name() thinks it's acceptable. From e0c2ab19017d38cd957b96aff873e6fe97acbeef Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Thu, 25 Oct 2018 13:47:30 +0300 Subject: [PATCH 43/98] Fix tests not to assert a function that already asserts Maybe there should be a warning about that too? --- testing/test_warnings.py | 23 +++++++++++------------ 1 file changed, 11 insertions(+), 12 deletions(-) diff --git a/testing/test_warnings.py b/testing/test_warnings.py index ff169750298..5369416adb3 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -625,8 +625,8 @@ def test(): result.stdout.fnmatch_lines(["* 1 passed in *"]) class TestAssertionWarnings: @staticmethod - def result_warns(result): - return result.stdout.fnmatch_lines(["*PytestWarning*"]) + def assert_result_warns(result): + result.stdout.fnmatch_lines(["*PytestWarning*"]) def test_tuple_warning(self, testdir): testdir.makepyfile( @@ -636,7 +636,7 @@ def test_foo(): """ ) result = testdir.runpytest() - assert self.result_warns(result) + self.assert_result_warns(result) @staticmethod def create_file(testdir, return_none): @@ -658,26 +658,25 @@ def test_foo(): def test_none_function_warns(self, testdir): self.create_file(testdir, True) result = testdir.runpytest() - assert self.result_warns(result) + self.assert_result_warns(result) + @pytest.mark.xfail(strict=True) def test_assert_is_none_no_warn(self, testdir): """Tests a more simple case of `test_none_function_warns` where `assert None` is explicitly called""" testdir.makepyfile( """ - def foo(return_none): - if return_none: - return None - else: - return False + def foo(): + return None def test_foo(): - assert foo(True) is None + assert foo() is None """ ) result = testdir.runpytest() - assert not self.result_warns(result) + self.assert_result_warns(result) + @pytest.mark.xfail(strict=True) def test_false_function_no_warn(self, testdir): self.create_file(testdir, False) result = testdir.runpytest() - assert not self.result_warns(result) + self.assert_result_warns(result) From aaf7f7fcca0721941b8633e190065759818fd5ea Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Thu, 25 Oct 2018 14:09:50 +0300 Subject: [PATCH 44/98] Update changelog --- changelog/3191.feature.rst | 1 + 1 file changed, 1 insertion(+) create mode 100644 changelog/3191.feature.rst diff --git a/changelog/3191.feature.rst b/changelog/3191.feature.rst new file mode 100644 index 00000000000..3eedf8dfc5e --- /dev/null +++ b/changelog/3191.feature.rst @@ -0,0 +1 @@ +Add warning when asserting ``None`` directly From 3e6f1fa2db20eb2fd751a778dc42dd126a9e778d Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Thu, 25 Oct 2018 15:48:39 +0300 Subject: [PATCH 45/98] Simplify warning creation by using ast.parse in py2 it's a ast.Name where in py3 it's a ast.NamedConstant Fixes namespace by using import from --- src/_pytest/assertion/rewrite.py | 40 ++++++++++++++------------------ 1 file changed, 17 insertions(+), 23 deletions(-) diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index 4d04dfc1a1d..7603eb4979f 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -891,30 +891,24 @@ def warn_about_none_ast(self, node, module_path, lineno): ) """ - warning_msg = ast.Str( - 'Asserting the value None directly, Please use "assert is None" to eliminate ambiguity' - ) - AST_NONE = ast.NameConstant(None) + # using parse because it's different between py2 py3 + AST_NONE = ast.parse("None").body[0].value val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE]) - import_warnings = ast.ImportFrom( - module="warnings", names=[ast.alias("warn_explicit", None)], level=0 - ) - import_pytest_warning = ast.ImportFrom( - module="pytest", names=[ast.alias("PytestWarning", None)], level=0 - ) - pytest_warning = ast_Call_helper("PytestWarning", warning_msg) - # This won't work because this isn't the same "self" as an AssertionRewriter! - # ast_filename = improved_ast_Call('str',ast.Attribute('self','module_path',ast.Load).module_path) - warn = ast_Call_helper( - "warn_explicit", - pytest_warning, - category=AST_NONE, - filename=ast.Str(str(module_path)), - lineno=ast.Num(lineno), - ) - return ast.If( - val_is_none, [import_warnings, import_pytest_warning, ast.Expr(warn)], [] - ) + send_warning = ast.parse( + """ +from _pytest.warning_types import PytestWarning +from warnings import warn_explicit +warn_explicit( + PytestWarning('assertion the value None, Please use "assert is None"'), + category=None, + filename='{filename}', + lineno={lineno}, +) + """.format( + filename=str(module_path), lineno=lineno + ) + ).body + return ast.If(val_is_none, send_warning, []) def visit_Name(self, name): # Display the repr of the name if it's a local variable or From 26d27df6fce16904ed9fb27e27e16dc9620d9569 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 30 Oct 2018 16:15:03 -0300 Subject: [PATCH 46/98] Improve changelog message --- changelog/3191.feature.rst | 12 +++++++++++- 1 file changed, 11 insertions(+), 1 deletion(-) diff --git a/changelog/3191.feature.rst b/changelog/3191.feature.rst index 3eedf8dfc5e..d62301a0cab 100644 --- a/changelog/3191.feature.rst +++ b/changelog/3191.feature.rst @@ -1 +1,11 @@ -Add warning when asserting ``None`` directly +A warning is now issued when assertions are made directly against ``None``. + +This is a common source of confusion among new users, which write:: + + assert mocked_object.assert_called_with(3, 4, 5, key='value') + +When they should write:: + + mocked_object.assert_called_with(3, 4, 5, key='value') + +Because the ``assert_called_with`` method of mock objects already executes an assertion. From e1e4b226c6aa7ea81bf18eb192a6f979275d7097 Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Mon, 5 Nov 2018 10:13:37 +0200 Subject: [PATCH 47/98] :ok_hand: Address code review Edited the changelog for extra clarity, and to fire off auto-formatting Oddly enough, keeping `filename='{filename!r}'` caused an error while collecting tests, but getting rid of the single ticks fixed it Hopefully closes #3191 --- changelog/3191.feature.rst | 9 +++++++-- src/_pytest/assertion/rewrite.py | 2 +- testing/test_warnings.py | 8 ++++---- 3 files changed, 12 insertions(+), 7 deletions(-) diff --git a/changelog/3191.feature.rst b/changelog/3191.feature.rst index d62301a0cab..13d2049bfbf 100644 --- a/changelog/3191.feature.rst +++ b/changelog/3191.feature.rst @@ -3,9 +3,14 @@ A warning is now issued when assertions are made directly against ``None``. This is a common source of confusion among new users, which write:: assert mocked_object.assert_called_with(3, 4, 5, key='value') - -When they should write:: + +When they should write:: mocked_object.assert_called_with(3, 4, 5, key='value') Because the ``assert_called_with`` method of mock objects already executes an assertion. + +This warning will not be issued when ``None`` is explicitly checked + assert none_returning_fun() is None + +will not issue the warning diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index 7603eb4979f..bfb81cf85c8 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -901,7 +901,7 @@ def warn_about_none_ast(self, node, module_path, lineno): warn_explicit( PytestWarning('assertion the value None, Please use "assert is None"'), category=None, - filename='{filename}', + filename={filename!r}, lineno={lineno}, ) """.format( diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 5369416adb3..8fa1a96efaf 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -623,6 +623,8 @@ def test(): else: assert change_default in ("ini", "cmdline") result.stdout.fnmatch_lines(["* 1 passed in *"]) + + class TestAssertionWarnings: @staticmethod def assert_result_warns(result): @@ -660,7 +662,6 @@ def test_none_function_warns(self, testdir): result = testdir.runpytest() self.assert_result_warns(result) - @pytest.mark.xfail(strict=True) def test_assert_is_none_no_warn(self, testdir): """Tests a more simple case of `test_none_function_warns` where `assert None` is explicitly called""" testdir.makepyfile( @@ -673,10 +674,9 @@ def test_foo(): """ ) result = testdir.runpytest() - self.assert_result_warns(result) + result.stdout.fnmatch_lines(["*1 passed in*"]) - @pytest.mark.xfail(strict=True) def test_false_function_no_warn(self, testdir): self.create_file(testdir, False) result = testdir.runpytest() - self.assert_result_warns(result) + result.stdout.fnmatch_lines(["*1 failed in*"]) From 41031fce2fc64d36abfbbf5b140325759af94383 Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Wed, 5 Dec 2018 17:18:57 +0200 Subject: [PATCH 48/98] Address code review --- changelog/3191.feature.rst | 5 +++-- src/_pytest/assertion/rewrite.py | 5 ++--- 2 files changed, 5 insertions(+), 5 deletions(-) diff --git a/changelog/3191.feature.rst b/changelog/3191.feature.rst index 13d2049bfbf..4b1626eed3e 100644 --- a/changelog/3191.feature.rst +++ b/changelog/3191.feature.rst @@ -10,7 +10,8 @@ When they should write:: Because the ``assert_called_with`` method of mock objects already executes an assertion. -This warning will not be issued when ``None`` is explicitly checked - assert none_returning_fun() is None +This warning will not be issued when ``None`` is explicitly checked. An assertion like:: + + assert variable is None will not issue the warning diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index bfb81cf85c8..e819b8da2e3 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -885,8 +885,7 @@ def warn_about_none_ast(self, node, module_path, lineno): warnings.warn_explicit( PytestWarning('assertion the value None, Please use "assert is None"'), category=None, - # filename=str(self.module_path), - filename=__file__ + filename=str, lineno=node.lineno, ) """ @@ -905,7 +904,7 @@ def warn_about_none_ast(self, node, module_path, lineno): lineno={lineno}, ) """.format( - filename=str(module_path), lineno=lineno + filename=module_path.strpath, lineno=lineno ) ).body return ast.If(val_is_none, send_warning, []) From 7a7ad0c12076f5dad281bbac064b95ba48dfb24e Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Wed, 5 Dec 2018 17:47:34 +0200 Subject: [PATCH 49/98] Shorten docstring for warn_about_none_ast --- src/_pytest/assertion/rewrite.py | 14 ++++---------- 1 file changed, 4 insertions(+), 10 deletions(-) diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index e819b8da2e3..8b5ff5ad650 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -878,16 +878,10 @@ def visit_Assert(self, assert_): return self.statements def warn_about_none_ast(self, node, module_path, lineno): - """Returns an ast warning if node is None with the following statement: - if node is None: - from _pytest.warning_types import PytestWarning - import warnings - warnings.warn_explicit( - PytestWarning('assertion the value None, Please use "assert is None"'), - category=None, - filename=str, - lineno=node.lineno, - ) + """ + Returns an ast issuing a warning if the value of node is `None` + This is used to warn the user when asserting a function that asserts internally. + See issue #3191 for more details """ # using parse because it's different between py2 py3 From 8fd60483efec91cdf410afc54ce9a95d76a8a2c9 Mon Sep 17 00:00:00 2001 From: Tomer Keren Date: Wed, 5 Dec 2018 19:49:54 +0200 Subject: [PATCH 50/98] Don't insert warnings when not in a module --- src/_pytest/assertion/rewrite.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index 8b5ff5ad650..90724142a2d 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -841,12 +841,13 @@ def visit_Assert(self, assert_): self.push_format_context() # Rewrite assert into a bunch of statements. top_condition, explanation = self.visit(assert_.test) - # Check if directly asserting None, in order to warn [Issue #3191] - self.statements.append( - self.warn_about_none_ast( - top_condition, module_path=self.module_path, lineno=assert_.lineno + # If in a test module, check if directly asserting None, in order to warn [Issue #3191] + if self.module_path is not None: + self.statements.append( + self.warn_about_none_ast( + top_condition, module_path=self.module_path, lineno=assert_.lineno + ) ) - ) # Create failure message. body = self.on_failure negation = ast.UnaryOp(ast.Not(), top_condition) From 62b8f2f731c1b144a0bc9e872e6714922273549b Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Wed, 5 Dec 2018 19:07:10 +0100 Subject: [PATCH 51/98] Update changelog [ci skip] --- changelog/4278.trivial.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/changelog/4278.trivial.rst b/changelog/4278.trivial.rst index 09c0166394b..126cabea859 100644 --- a/changelog/4278.trivial.rst +++ b/changelog/4278.trivial.rst @@ -1 +1 @@ -Added a CACHEDIR.TAG file to the cache directory +A CACHEDIR.TAG file gets added to the cache directory. From 8a0ed7e2b33a33f29f81314dce587d05c6425c0a Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Wed, 5 Dec 2018 19:14:41 +0100 Subject: [PATCH 52/98] Revisit changelog entry [ci skip] --- changelog/3191.feature.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/changelog/3191.feature.rst b/changelog/3191.feature.rst index 4b1626eed3e..7eb4c3a15a3 100644 --- a/changelog/3191.feature.rst +++ b/changelog/3191.feature.rst @@ -1,4 +1,4 @@ -A warning is now issued when assertions are made directly against ``None``. +A warning is now issued when assertions are made for ``None``. This is a common source of confusion among new users, which write:: @@ -14,4 +14,4 @@ This warning will not be issued when ``None`` is explicitly checked. An assertio assert variable is None -will not issue the warning +will not issue the warning. From 5ebacc49c630178413a38deb6e344fc07f284be3 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Wed, 5 Dec 2018 19:22:44 +0100 Subject: [PATCH 53/98] Harden tests, fix doc/msg --- src/_pytest/assertion/rewrite.py | 11 ++++++----- testing/test_warnings.py | 13 ++++++++----- 2 files changed, 14 insertions(+), 10 deletions(-) diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index 90724142a2d..78b8edcd87b 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -880,12 +880,13 @@ def visit_Assert(self, assert_): def warn_about_none_ast(self, node, module_path, lineno): """ - Returns an ast issuing a warning if the value of node is `None` - This is used to warn the user when asserting a function that asserts internally. - See issue #3191 for more details + Returns an AST issuing a warning if the value of node is `None`. + This is used to warn the user when asserting a function that asserts + internally already. + See issue #3191 for more details. """ - # using parse because it's different between py2 py3 + # Using parse because it is different between py2 and py3. AST_NONE = ast.parse("None").body[0].value val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE]) send_warning = ast.parse( @@ -893,7 +894,7 @@ def warn_about_none_ast(self, node, module_path, lineno): from _pytest.warning_types import PytestWarning from warnings import warn_explicit warn_explicit( - PytestWarning('assertion the value None, Please use "assert is None"'), + PytestWarning('asserting the value None, please use "assert is None"'), category=None, filename={filename!r}, lineno={lineno}, diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 8fa1a96efaf..655c89f4c42 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -627,8 +627,8 @@ def test(): class TestAssertionWarnings: @staticmethod - def assert_result_warns(result): - result.stdout.fnmatch_lines(["*PytestWarning*"]) + def assert_result_warns(result, msg): + result.stdout.fnmatch_lines(["*PytestWarning: %s*" % msg]) def test_tuple_warning(self, testdir): testdir.makepyfile( @@ -638,7 +638,9 @@ def test_foo(): """ ) result = testdir.runpytest() - self.assert_result_warns(result) + self.assert_result_warns( + result, "assertion is always true, perhaps remove parentheses?" + ) @staticmethod def create_file(testdir, return_none): @@ -660,10 +662,11 @@ def test_foo(): def test_none_function_warns(self, testdir): self.create_file(testdir, True) result = testdir.runpytest() - self.assert_result_warns(result) + self.assert_result_warns( + result, 'asserting the value None, please use "assert is None"' + ) def test_assert_is_none_no_warn(self, testdir): - """Tests a more simple case of `test_none_function_warns` where `assert None` is explicitly called""" testdir.makepyfile( """ def foo(): From fc4aa27caebe6bcdcd3cefd7771df8fb2ef5e6b5 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Fri, 2 Nov 2018 19:50:29 +0100 Subject: [PATCH 54/98] Derive outcomes.exit.Exception from SystemExit instead of KeyboardInterrupt This is required for properly getting out of pdb, where KeyboardInterrupt is caught in py36 at least. Ref: https://github.com/pytest-dev/pytest/issues/1865#issuecomment-242599949 --- changelog/4292.feature.rst | 1 + src/_pytest/main.py | 2 +- src/_pytest/outcomes.py | 6 +++--- src/_pytest/runner.py | 8 +++++--- testing/test_runner.py | 2 +- 5 files changed, 11 insertions(+), 8 deletions(-) create mode 100644 changelog/4292.feature.rst diff --git a/changelog/4292.feature.rst b/changelog/4292.feature.rst new file mode 100644 index 00000000000..27d113ba069 --- /dev/null +++ b/changelog/4292.feature.rst @@ -0,0 +1 @@ +``pytest.outcomes.Exit`` is derived from ``SystemExit`` instead of ``KeyboardInterrupt``. diff --git a/src/_pytest/main.py b/src/_pytest/main.py index 08490f03aa6..d0d826bb653 100644 --- a/src/_pytest/main.py +++ b/src/_pytest/main.py @@ -205,7 +205,7 @@ def wrap_session(config, doit): raise except Failed: session.exitstatus = EXIT_TESTSFAILED - except KeyboardInterrupt: + except (KeyboardInterrupt, exit.Exception): excinfo = _pytest._code.ExceptionInfo.from_current() exitstatus = EXIT_INTERRUPTED if initstate <= 2 and isinstance(excinfo.value, exit.Exception): diff --git a/src/_pytest/outcomes.py b/src/_pytest/outcomes.py index cd08c0d48e4..714be308834 100644 --- a/src/_pytest/outcomes.py +++ b/src/_pytest/outcomes.py @@ -49,13 +49,13 @@ class Failed(OutcomeException): __module__ = "builtins" -class Exit(KeyboardInterrupt): +class Exit(SystemExit): """ raised for immediate program exits (no tracebacks/summaries)""" def __init__(self, msg="unknown reason", returncode=None): self.msg = msg self.returncode = returncode - KeyboardInterrupt.__init__(self, msg) + SystemExit.__init__(self, msg) # exposed helper methods @@ -63,7 +63,7 @@ def __init__(self, msg="unknown reason", returncode=None): def exit(msg, returncode=None): """ - Exit testing process as if KeyboardInterrupt was triggered. + Exit testing process as if SystemExit was triggered. :param str msg: message to display upon exit. :param int returncode: return code to be used when exiting pytest. diff --git a/src/_pytest/runner.py b/src/_pytest/runner.py index 27f244a80e0..538e13403a7 100644 --- a/src/_pytest/runner.py +++ b/src/_pytest/runner.py @@ -15,6 +15,7 @@ from .reports import CollectReport from .reports import TestReport from _pytest._code.code import ExceptionInfo +from _pytest.outcomes import Exit from _pytest.outcomes import skip from _pytest.outcomes import Skipped from _pytest.outcomes import TEST_OUTCOME @@ -190,10 +191,11 @@ def check_interactive_exception(call, report): def call_runtest_hook(item, when, **kwds): hookname = "pytest_runtest_" + when ihook = getattr(item.ihook, hookname) + reraise = (Exit,) + if not item.config.getvalue("usepdb"): + reraise += (KeyboardInterrupt,) return CallInfo.from_call( - lambda: ihook(item=item, **kwds), - when=when, - reraise=KeyboardInterrupt if not item.config.getvalue("usepdb") else (), + lambda: ihook(item=item, **kwds), when=when, reraise=reraise ) diff --git a/testing/test_runner.py b/testing/test_runner.py index 916c2ea4ae3..ae129d06d27 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -553,7 +553,7 @@ def test_outcomeexception_passes_except_Exception(): def test_pytest_exit(): with pytest.raises(pytest.exit.Exception) as excinfo: pytest.exit("hello") - assert excinfo.errisinstance(KeyboardInterrupt) + assert excinfo.errisinstance(pytest.exit.Exception) def test_pytest_fail(): From 7b1cb885c747dcbbbba3f320e5465bade8a98fa2 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Sun, 9 Dec 2018 14:23:08 +0100 Subject: [PATCH 55/98] Handle missing space with -p This still does not use an actual argument parser, which only gets instantiated below, and it does not appear to make sense instantiating it just for this pre-parsing it seems. `-p` without the required value is being handled before already though, so it could potentially be passed down from somewhere already?! Fixes https://github.com/pytest-dev/pytest/issues/3532. --- changelog/3532.bugfix.rst | 1 + src/_pytest/config/__init__.py | 19 +++++++++++++++---- testing/test_pluginmanager.py | 6 ++++++ 3 files changed, 22 insertions(+), 4 deletions(-) create mode 100644 changelog/3532.bugfix.rst diff --git a/changelog/3532.bugfix.rst b/changelog/3532.bugfix.rst new file mode 100644 index 00000000000..8651458d99e --- /dev/null +++ b/changelog/3532.bugfix.rst @@ -0,0 +1 @@ +``-p`` now accepts its argument without a space between the value, for example ``-pmyplugin``. diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 13944099ce6..fafd8c930fe 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -470,9 +470,20 @@ def _importconftest(self, conftestpath): # def consider_preparse(self, args): - for opt1, opt2 in zip(args, args[1:]): - if opt1 == "-p": - self.consider_pluginarg(opt2) + i = 0 + n = len(args) + while i < n: + opt = args[i] + i += 1 + if isinstance(opt, six.string_types): + if opt == "-p": + parg = args[i] + i += 1 + elif opt.startswith("-p"): + parg = opt[2:] + else: + continue + self.consider_pluginarg(parg) def consider_pluginarg(self, arg): if arg.startswith("no:"): @@ -507,7 +518,7 @@ def import_plugin(self, modname): # "terminal" or "capture". Those plugins are registered under their # basename for historic purposes but must be imported with the # _pytest prefix. - assert isinstance(modname, (six.text_type, str)), ( + assert isinstance(modname, six.string_types), ( "module name as text required, got %r" % modname ) modname = str(modname) diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 6137b277137..80ef3db02a2 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -323,6 +323,12 @@ def test_preparse_args(self, pytestpm): ImportError, lambda: pytestpm.consider_preparse(["xyz", "-p", "hello123"]) ) + # Handles -p without space (#3532). + with pytest.raises(ImportError) as excinfo: + pytestpm.consider_preparse(["-phello123"]) + assert '"hello123"' in excinfo.value.args[0] + pytestpm.consider_preparse(["-pno:hello123"]) + def test_plugin_prevent_register(self, pytestpm): pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) l1 = pytestpm.get_plugins() From 3445eae737e5d5110674ad5aff1f87263c1be089 Mon Sep 17 00:00:00 2001 From: Daniel Hahler Date: Tue, 11 Dec 2018 04:54:54 +0100 Subject: [PATCH 56/98] argparsing: Parser: allow to forward prog to argparse Ref: https://github.com/pytest-dev/pytest-xdist/pull/388. --- src/_pytest/config/argparsing.py | 7 +++++-- testing/test_parseopt.py | 6 ++++++ 2 files changed, 11 insertions(+), 2 deletions(-) diff --git a/src/_pytest/config/argparsing.py b/src/_pytest/config/argparsing.py index 5b8306ddab7..51f70833591 100644 --- a/src/_pytest/config/argparsing.py +++ b/src/_pytest/config/argparsing.py @@ -18,6 +18,8 @@ class Parser(object): there's an error processing the command line arguments. """ + prog = None + def __init__(self, usage=None, processopt=None): self._anonymous = OptionGroup("custom options", parser=self) self._groups = [] @@ -82,7 +84,7 @@ def parse(self, args, namespace=None): def _getparser(self): from _pytest._argcomplete import filescompleter - optparser = MyOptionParser(self, self.extra_info) + optparser = MyOptionParser(self, self.extra_info, prog=self.prog) groups = self._groups + [self._anonymous] for group in groups: if group.options: @@ -319,12 +321,13 @@ def _addoption_instance(self, option, shortupper=False): class MyOptionParser(argparse.ArgumentParser): - def __init__(self, parser, extra_info=None): + def __init__(self, parser, extra_info=None, prog=None): if not extra_info: extra_info = {} self._parser = parser argparse.ArgumentParser.__init__( self, + prog=prog, usage=parser._usage, add_help=False, formatter_class=DropShorterLongHelpFormatter, diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index 3048c96bda6..c3b4ee698ce 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -24,6 +24,12 @@ def test_no_help_by_default(self, capsys): out, err = capsys.readouterr() assert err.find("error: unrecognized arguments") != -1 + def test_custom_prog(self, parser): + """Custom prog can be set for `argparse.ArgumentParser`.""" + assert parser._getparser().prog == os.path.basename(sys.argv[0]) + parser.prog = "custom-prog" + assert parser._getparser().prog == "custom-prog" + def test_argument(self): with pytest.raises(parseopt.ArgumentError): # need a short or long option From cfbd387a5d7a27a3e7dfb754d7cd4482e931822b Mon Sep 17 00:00:00 2001 From: Andrey Paramonov Date: Tue, 11 Dec 2018 19:29:31 +0300 Subject: [PATCH 57/98] Add --junittime=call option --- src/_pytest/junitxml.py | 20 +++++++++++++++++--- testing/test_junitxml.py | 19 +++++++++++++++++++ 2 files changed, 36 insertions(+), 3 deletions(-) diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py index 09847c942da..c5cc2b0a91b 100644 --- a/src/_pytest/junitxml.py +++ b/src/_pytest/junitxml.py @@ -314,6 +314,15 @@ def pytest_addoption(parser): default=None, help="prepend prefix to classnames in junit-xml output", ) + group.addoption( + "--junittime", + "--junit-time", + action="store", + metavar="str", + default="total", + # choices=["total", "call"], + help='duration time to report: "total" (default), "call"', + ) parser.addini( "junit_suite_name", "Test suite name for JUnit report", default="pytest" ) @@ -334,6 +343,7 @@ def pytest_configure(config): config.option.junitprefix, config.getini("junit_suite_name"), config.getini("junit_logging"), + config.option.junittime, ) config.pluginmanager.register(config._xml) @@ -361,12 +371,14 @@ def mangle_test_address(address): class LogXML(object): - def __init__(self, logfile, prefix, suite_name="pytest", logging="no"): + def __init__(self, logfile, prefix, suite_name="pytest", logging="no", + report_duration=None): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix self.suite_name = suite_name self.logging = logging + self.report_duration = report_duration self.stats = dict.fromkeys(["error", "passed", "failure", "skipped"], 0) self.node_reporters = {} # nodeid -> _NodeReporter self.node_reporters_ordered = [] @@ -500,8 +512,10 @@ def update_testcase_duration(self, report): """accumulates total duration for nodeid from given report and updates the Junit.testcase with the new total if already created. """ - reporter = self.node_reporter(report) - reporter.duration += getattr(report, "duration", 0.0) + if not self.report_duration or self.report_duration == "total" or \ + report.when == self.report_duration: + reporter = self.node_reporter(report) + reporter.duration += getattr(report, "duration", 0.0) def pytest_collectreport(self, report): if not report.passed: diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index c9dc39f82b7..82ed7901c5a 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -153,6 +153,24 @@ def test_sleep(): val = tnode["time"] assert round(float(val), 2) >= 0.03 + def test_call_time(self, testdir): + testdir.makepyfile( + """ + import time, pytest + def setup_module(): + time.sleep(0.01) + def teardown_module(): + time.sleep(0.01) + def test_sleep(): + time.sleep(0.01) + """ + ) + result, dom = runandparse(testdir, "--junit-time=call") + node = dom.find_first_by_tag("testsuite") + tnode = node.find_first_by_tag("testcase") + val = tnode["time"] + assert 0.01 <= round(float(val), 2) < 0.02 + def test_setup_error(self, testdir): testdir.makepyfile( """ @@ -727,6 +745,7 @@ def getini(self, name): junitprefix = None # XXX: shouldnt need tmpdir ? xmlpath = str(tmpdir.join("junix.xml")) + junittime = None register = gotten.append fake_config = FakeConfig() From a44776ed48755de7c7a0860cdba078b363b3d2b6 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 11 Dec 2018 15:16:11 -0200 Subject: [PATCH 58/98] Fix linting --- src/_pytest/junitxml.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py index c5cc2b0a91b..eed2a9a77a0 100644 --- a/src/_pytest/junitxml.py +++ b/src/_pytest/junitxml.py @@ -371,8 +371,9 @@ def mangle_test_address(address): class LogXML(object): - def __init__(self, logfile, prefix, suite_name="pytest", logging="no", - report_duration=None): + def __init__( + self, logfile, prefix, suite_name="pytest", logging="no", report_duration=None + ): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix @@ -512,8 +513,11 @@ def update_testcase_duration(self, report): """accumulates total duration for nodeid from given report and updates the Junit.testcase with the new total if already created. """ - if not self.report_duration or self.report_duration == "total" or \ - report.when == self.report_duration: + if ( + not self.report_duration + or self.report_duration == "total" + or report.when == self.report_duration + ): reporter = self.node_reporter(report) reporter.duration += getattr(report, "duration", 0.0) From 9839ceffe0c78c5308716db758532b03e0f908d8 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 11 Dec 2018 20:36:57 -0200 Subject: [PATCH 59/98] Change -ra to show errors and failures last, instead of first Often in large test suites (like pytest's), the -ra summary is very useful to obtain a list of failures so we can execute each test at once to fix them. Problem is the default shows errors and failures first, which leads to a lot of scrolling to get to them. --- src/_pytest/terminal.py | 2 +- testing/test_skipping.py | 13 ++++++++++++- 2 files changed, 13 insertions(+), 2 deletions(-) diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index 6f389365382..1137d52b885 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -167,7 +167,7 @@ def getreportopt(config): if char not in reportopts and char != "a": reportopts += char elif char == "a": - reportopts = "fEsxXw" + reportopts = "sxXwEf" return reportopts diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 231c3b6aaeb..6b18011b634 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -875,11 +875,22 @@ def test_3(): pass def test_4(): pytest.skip("four") + @pytest.fixture + def fail(): + assert 0 + def test_5(fail): + pass """ ) result = testdir.runpytest("-ra") result.stdout.fnmatch_lines( - ["FAIL*test_1*", "SKIP*four*", "XFAIL*test_2*", "XPASS*test_3*"] + [ + "SKIP*four*", + "XFAIL*test_2*", + "XPASS*test_3*", + "ERROR*test_5*", + "FAIL*test_1*", + ] ) From 3cd11617ea8df94989af8e5115c1d235aaea13c2 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 11 Dec 2018 20:40:06 -0200 Subject: [PATCH 60/98] Add CHANGELOG --- changelog/4532.feature.rst | 3 +++ 1 file changed, 3 insertions(+) create mode 100644 changelog/4532.feature.rst diff --git a/changelog/4532.feature.rst b/changelog/4532.feature.rst new file mode 100644 index 00000000000..ce7eb372976 --- /dev/null +++ b/changelog/4532.feature.rst @@ -0,0 +1,3 @@ +``-ra`` now will show errors and failures last, instead of as the first items in the summary. + +This makes it easier to obtain a list of errors and failures to run tests selectively. From 0bccfc44a754dfefce3e99602e767253b7f360f7 Mon Sep 17 00:00:00 2001 From: Andrey Paramonov Date: Wed, 12 Dec 2018 12:14:14 +0300 Subject: [PATCH 61/98] Fix flaky test --- testing/test_junitxml.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index 82ed7901c5a..ba7b43826f1 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -169,7 +169,7 @@ def test_sleep(): node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") val = tnode["time"] - assert 0.01 <= round(float(val), 2) < 0.02 + assert 0.01 <= round(float(val), 3) < 0.02 def test_setup_error(self, testdir): testdir.makepyfile( From 316cca204ff04c93529368ce63a7d3f5cc60aa6c Mon Sep 17 00:00:00 2001 From: Andrey Paramonov Date: Wed, 12 Dec 2018 13:19:39 +0300 Subject: [PATCH 62/98] Switch to ini config parameter --- src/_pytest/junitxml.py | 27 +++++++++++---------------- testing/test_junitxml.py | 3 +-- 2 files changed, 12 insertions(+), 18 deletions(-) diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py index eed2a9a77a0..696deb6e9bd 100644 --- a/src/_pytest/junitxml.py +++ b/src/_pytest/junitxml.py @@ -314,15 +314,6 @@ def pytest_addoption(parser): default=None, help="prepend prefix to classnames in junit-xml output", ) - group.addoption( - "--junittime", - "--junit-time", - action="store", - metavar="str", - default="total", - # choices=["total", "call"], - help='duration time to report: "total" (default), "call"', - ) parser.addini( "junit_suite_name", "Test suite name for JUnit report", default="pytest" ) @@ -332,6 +323,9 @@ def pytest_addoption(parser): "one of no|system-out|system-err", default="no", ) # choices=['no', 'stdout', 'stderr']) + parser.addini( + "junit_time", "Duration time to report: one of total|call", default="total" + ) # choices=['total', 'call']) def pytest_configure(config): @@ -343,7 +337,7 @@ def pytest_configure(config): config.option.junitprefix, config.getini("junit_suite_name"), config.getini("junit_logging"), - config.option.junittime, + config.getini("junit_time"), ) config.pluginmanager.register(config._xml) @@ -372,7 +366,12 @@ def mangle_test_address(address): class LogXML(object): def __init__( - self, logfile, prefix, suite_name="pytest", logging="no", report_duration=None + self, + logfile, + prefix, + suite_name="pytest", + logging="no", + report_duration="total", ): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) @@ -513,11 +512,7 @@ def update_testcase_duration(self, report): """accumulates total duration for nodeid from given report and updates the Junit.testcase with the new total if already created. """ - if ( - not self.report_duration - or self.report_duration == "total" - or report.when == self.report_duration - ): + if self.report_duration == "total" or report.when == self.report_duration: reporter = self.node_reporter(report) reporter.duration += getattr(report, "duration", 0.0) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index ba7b43826f1..896b11b824a 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -165,7 +165,7 @@ def test_sleep(): time.sleep(0.01) """ ) - result, dom = runandparse(testdir, "--junit-time=call") + result, dom = runandparse(testdir, "-o", "junit_time=call") node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") val = tnode["time"] @@ -745,7 +745,6 @@ def getini(self, name): junitprefix = None # XXX: shouldnt need tmpdir ? xmlpath = str(tmpdir.join("junix.xml")) - junittime = None register = gotten.append fake_config = FakeConfig() From b1e766c30ed3f9a5cf3fa7dd47b330dd8684565e Mon Sep 17 00:00:00 2001 From: Andrey Paramonov Date: Wed, 12 Dec 2018 13:27:44 +0300 Subject: [PATCH 63/98] Update docs --- AUTHORS | 1 + changelog/4483.feature.rst | 2 ++ doc/en/usage.rst | 14 ++++++++++++++ 3 files changed, 17 insertions(+) create mode 100644 changelog/4483.feature.rst diff --git a/AUTHORS b/AUTHORS index 684063778f7..00ced49f018 100644 --- a/AUTHORS +++ b/AUTHORS @@ -17,6 +17,7 @@ Anders Hovmöller Andras Tim Andrea Cimatoribus Andreas Zeidler +Andrey Paramonov Andrzej Ostrowski Andy Freeland Anthon van der Neut diff --git a/changelog/4483.feature.rst b/changelog/4483.feature.rst new file mode 100644 index 00000000000..d9bd4c7173f --- /dev/null +++ b/changelog/4483.feature.rst @@ -0,0 +1,2 @@ +Add ini parameter ``junit_time`` to optionally report test call +durations less setup and teardown times. diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 49c2aa577b8..91b91002b0b 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -294,6 +294,20 @@ To set the name of the root test suite xml item, you can configure the ``junit_s [pytest] junit_suite_name = my_suite +.. versionadded:: 4.0 + +JUnit XML specification seems to indicate that ``"time"`` attribute +should report total test execution times, including setup and teardown +(`1`_, +`2`_). +It is the default pytest behavior. To report just call durations +instead, configure the ``junit_time`` option like this: + +.. code-block:: ini + + [pytest] + junit_time = call + .. _record_property example: record_property From ec4507d12a12cf9cc8b3bd33952abc1042e11344 Mon Sep 17 00:00:00 2001 From: Andrey Paramonov Date: Wed, 12 Dec 2018 14:33:02 +0300 Subject: [PATCH 64/98] Fix doc formatting --- doc/en/usage.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 91b91002b0b..7c3ef19fbdf 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -298,8 +298,8 @@ To set the name of the root test suite xml item, you can configure the ``junit_s JUnit XML specification seems to indicate that ``"time"`` attribute should report total test execution times, including setup and teardown -(`1`_, -`2`_). +(`1 `_, `2 +`_). It is the default pytest behavior. To report just call durations instead, configure the ``junit_time`` option like this: From 5d79baf3f8cc2986dacfe0e34ff0b84794beecb4 Mon Sep 17 00:00:00 2001 From: Andrey Paramonov Date: Wed, 12 Dec 2018 15:33:29 +0300 Subject: [PATCH 65/98] Fix flaky test attempt 2 --- testing/test_junitxml.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index 896b11b824a..aafbb8da9df 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -158,18 +158,18 @@ def test_call_time(self, testdir): """ import time, pytest def setup_module(): - time.sleep(0.01) + time.sleep(0.1) def teardown_module(): - time.sleep(0.01) + time.sleep(0.1) def test_sleep(): - time.sleep(0.01) + time.sleep(0.1) """ ) result, dom = runandparse(testdir, "-o", "junit_time=call") node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") val = tnode["time"] - assert 0.01 <= round(float(val), 3) < 0.02 + assert 0.1 <= round(float(val), 2) < 0.2 def test_setup_error(self, testdir): testdir.makepyfile( From e3d30f8ebf75da51f4ac09a6b197716420994b1f Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 11 Dec 2018 18:24:02 -0200 Subject: [PATCH 66/98] Remove deprecated PyCollector.makeitem Fix #4535 --- changelog/4535.removal.rst | 1 + doc/en/deprecations.rst | 22 ++++++++++++---------- src/_pytest/deprecated.py | 3 --- src/_pytest/python.py | 4 ---- testing/deprecated_test.py | 20 -------------------- testing/python/collect.py | 2 +- 6 files changed, 14 insertions(+), 38 deletions(-) create mode 100644 changelog/4535.removal.rst diff --git a/changelog/4535.removal.rst b/changelog/4535.removal.rst new file mode 100644 index 00000000000..f899005871a --- /dev/null +++ b/changelog/4535.removal.rst @@ -0,0 +1 @@ +Removed deprecated ``PyCollector.makeitem`` method. This method was made public by mistake a long time ago. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index ca72bfbfff7..3ee5ca0d4f9 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -58,17 +58,7 @@ Becomes: exec("assert(1, 2)") # exec is used to avoid a top-level warning -Using ``Class`` in custom Collectors -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -.. deprecated:: 3.9 - -Using objects named ``"Class"`` as a way to customize the type of nodes that are collected in ``Collector`` -subclasses has been deprecated. Users instead should use ``pytest_pycollect_makeitem`` to customize node types during -collection. - -This issue should affect only advanced plugins who create new collection types, so if you see this warning -message please contact the authors so they can change the code. ``Config.warn`` and ``Node.warn`` @@ -280,6 +270,18 @@ Removed Features As stated in our :ref:`backwards-compatibility` policy, deprecated features are removed only in major releases after an appropriate period of deprecation has passed. +Using ``Class`` in custom Collectors +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +Using objects named ``"Class"`` as a way to customize the type of nodes that are collected in ``Collector`` +subclasses has been deprecated. Users instead should use ``pytest_pycollect_makeitem`` to customize node types during +collection. + +This issue should affect only advanced plugins who create new collection types, so if you see this warning +message please contact the authors so they can change the code. + Metafunc.addcall ~~~~~~~~~~~~~~~~ diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 0d3dc85cc62..30173e6b120 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -87,9 +87,6 @@ '"record_xml_property" is now deprecated.' ) -COLLECTOR_MAKEITEM = RemovedInPytest4Warning( - "pycollector makeitem was removed as it is an accidentially leaked internal api" -) PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( "Defining pytest_plugins in a non-top-level conftest is deprecated, " diff --git a/src/_pytest/python.py b/src/_pytest/python.py index 3db36fb1982..a872a86ede5 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -378,10 +378,6 @@ def collect(self): values.sort(key=lambda item: item.reportinfo()[:2]) return values - def makeitem(self, name, obj): - warnings.warn(deprecated.COLLECTOR_MAKEITEM, stacklevel=2) - self._makeitem(name, obj) - def _makeitem(self, name, obj): # assert self.ihook.fspath == self.fspath, self return self.ihook.pytest_pycollect_makeitem(collector=self, name=name, obj=obj) diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index d54c868f290..111ff629f25 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -268,26 +268,6 @@ def fix(): assert fix() == 1 -def test_pycollector_makeitem_is_deprecated(): - from _pytest.python import PyCollector - from _pytest.warning_types import RemovedInPytest4Warning - - class PyCollectorMock(PyCollector): - """evil hack""" - - def __init__(self): - self.called = False - - def _makeitem(self, *k): - """hack to disable the actual behaviour""" - self.called = True - - collector = PyCollectorMock() - with pytest.warns(RemovedInPytest4Warning): - collector.makeitem("foo", "bar") - assert collector.called - - def test_fixture_named_request(testdir): testdir.copy_example() result = testdir.runpytest() diff --git a/testing/python/collect.py b/testing/python/collect.py index 2e534a25943..53b3bc18b04 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -808,7 +808,7 @@ def test_makeitem_non_underscore(self, testdir, monkeypatch): modcol = testdir.getmodulecol("def _hello(): pass") values = [] monkeypatch.setattr( - pytest.Module, "makeitem", lambda self, name, obj: values.append(name) + pytest.Module, "_makeitem", lambda self, name, obj: values.append(name) ) values = modcol.collect() assert "_hello" not in values From 5b83417afcce36c42e5c6cd51649da55101c0d86 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 12 Dec 2018 19:12:44 -0200 Subject: [PATCH 67/98] Deprecate the 'message' parameter of pytest.raises Fix #3974 --- changelog/3974.deprecation.rst | 8 ++++ doc/en/deprecations.rst | 13 ++++++ src/_pytest/deprecated.py | 7 ++++ src/_pytest/python_api.py | 73 ++++++++++++++++++---------------- testing/deprecated_test.py | 6 +++ testing/python/raises.py | 5 ++- testing/test_pytester.py | 2 +- 7 files changed, 77 insertions(+), 37 deletions(-) create mode 100644 changelog/3974.deprecation.rst diff --git a/changelog/3974.deprecation.rst b/changelog/3974.deprecation.rst new file mode 100644 index 00000000000..070ecb8b275 --- /dev/null +++ b/changelog/3974.deprecation.rst @@ -0,0 +1,8 @@ +Passing the ``message`` parameter of ``pytest.raises`` now issues a ``DeprecationWarning``. + +It is a common mistake to think this parameter will match the exception message, while in fact +it only serves to provide a custom message in case the ``pytest.raises`` check fails. To avoid this +mistake and because it is believed to be little used, pytest is deprecating it without providing +an alternative for the moment. + +If you have concerns about this, please comment on `issue #3974 `__. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 3ee5ca0d4f9..a2f16d974bc 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -14,6 +14,19 @@ Below is a complete list of all pytest features which are considered deprecated. :class:`_pytest.warning_types.PytestWarning` or subclasses, which can be filtered using :ref:`standard warning filters `. +``"message"`` parameter of ``pytest.raises`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +.. deprecated:: 4.1 + +It is a common mistake to think this parameter will match the exception message, while in fact +it only serves to provide a custom message in case the ``pytest.raises`` check fails. To avoid this +mistake and because it is believed to be little used, pytest is deprecating it without providing +an alternative for the moment. + +If you have concerns about this, please comment on `issue #3974 `__. + + ``pytest.config`` global ~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 30173e6b120..426533a0c7c 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -51,6 +51,13 @@ "getfuncargvalue is deprecated, use getfixturevalue" ) +RAISES_MESSAGE_PARAMETER = PytestDeprecationWarning( + "The 'message' parameter is deprecated.\n" + "(did you mean to use `match='some regex'` to check the exception message?)\n" + "Please comment on https://github.com/pytest-dev/pytest/issues/3974 " + "if you have concerns about removal of this parameter." +) + RESULT_LOG = PytestDeprecationWarning( "--result-log is deprecated and scheduled for removal in pytest 5.0.\n" "See https://docs.pytest.org/en/latest/deprecations.html#result-log-result-log for more information." diff --git a/src/_pytest/python_api.py b/src/_pytest/python_api.py index 7e5dc74a851..33e88b4090d 100644 --- a/src/_pytest/python_api.py +++ b/src/_pytest/python_api.py @@ -13,11 +13,11 @@ from six.moves import zip import _pytest._code +from _pytest import deprecated from _pytest.compat import isclass from _pytest.compat import Mapping from _pytest.compat import Sequence from _pytest.compat import STRING_TYPES -from _pytest.deprecated import RAISES_EXEC from _pytest.outcomes import fail BASE_TYPE = (type, STRING_TYPES) @@ -551,29 +551,47 @@ def _is_numpy_array(obj): def raises(expected_exception, *args, **kwargs): r""" Assert that a code block/function call raises ``expected_exception`` - and raise a failure exception otherwise. + or raise a failure exception otherwise. - :arg message: if specified, provides a custom failure message if the - exception is not raised - :arg match: if specified, asserts that the exception matches a text or regex + :kwparam match: if specified, asserts that the exception matches a text or regex - This helper produces a ``ExceptionInfo()`` object (see below). + :kwparam message: **(deprecated since 4.1)** if specified, provides a custom failure message + if the exception is not raised - You may use this function as a context manager:: + .. currentmodule:: _pytest._code + + Use ``pytest.raises`` as a context manager, which will capture the exception of the given + type:: >>> with raises(ZeroDivisionError): ... 1/0 - .. versionchanged:: 2.10 + If the code block does not raise the expected exception (``ZeroDivisionError`` in the example + above), or no exception at all, the check will fail instead. + + You can also use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") - In the context manager form you may use the keyword argument - ``message`` to specify a custom failure message:: + The context manager produces an :class:`ExceptionInfo` object which can be used to inspect the + details of the captured exception:: - >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): - ... pass - Traceback (most recent call last): - ... - Failed: Expecting ZeroDivisionError + >>> with raises(ValueError) as exc_info: + ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError + >>> assert exc_info.value.args[0] == "value must be 42" + + .. deprecated:: 4.1 + + In the context manager form you may use the keyword argument + ``message`` to specify a custom failure message that will be displayed + in case the ``pytest.raises`` check fails. This has been deprecated as it + is considered error prone as users often mean to use ``match`` instead. .. note:: @@ -587,7 +605,7 @@ def raises(expected_exception, *args, **kwargs): >>> with raises(ValueError) as exc_info: ... if value > 10: ... raise ValueError("value must be <= 10") - ... assert exc_info.type == ValueError # this will not execute + ... assert exc_info.type is ValueError # this will not execute Instead, the following approach must be taken (note the difference in scope):: @@ -596,23 +614,10 @@ def raises(expected_exception, *args, **kwargs): ... if value > 10: ... raise ValueError("value must be <= 10") ... - >>> assert exc_info.type == ValueError - - - Since version ``3.1`` you can use the keyword argument ``match`` to assert that the - exception matches a text or regex:: - - >>> with raises(ValueError, match='must be 0 or None'): - ... raise ValueError("value must be 0 or None") - - >>> with raises(ValueError, match=r'must be \d+$'): - ... raise ValueError("value must be 42") + >>> assert exc_info.type is ValueError **Legacy form** - The form below is fully supported but discouraged for new code because the - context manager form is regarded as more readable and less error-prone. - It is possible to specify a callable by passing a to-be-called lambda:: >>> raises(ZeroDivisionError, lambda: 1/0) @@ -627,9 +632,8 @@ def raises(expected_exception, *args, **kwargs): >>> raises(ZeroDivisionError, f, x=0) - .. currentmodule:: _pytest._code - - Consult the API of ``excinfo`` objects: :class:`ExceptionInfo`. + The form above is fully supported but discouraged for new code because the + context manager form is regarded as more readable and less error-prone. .. note:: Similar to caught exception objects in Python, explicitly clearing @@ -660,6 +664,7 @@ def raises(expected_exception, *args, **kwargs): if not args: if "message" in kwargs: message = kwargs.pop("message") + warnings.warn(deprecated.RAISES_MESSAGE_PARAMETER, stacklevel=2) if "match" in kwargs: match_expr = kwargs.pop("match") if kwargs: @@ -668,7 +673,7 @@ def raises(expected_exception, *args, **kwargs): raise TypeError(msg) return RaisesContext(expected_exception, message, match_expr) elif isinstance(args[0], str): - warnings.warn(RAISES_EXEC, stacklevel=2) + warnings.warn(deprecated.RAISES_EXEC, stacklevel=2) code, = args assert isinstance(code, str) frame = sys._getframe(1) diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index 111ff629f25..4353ec2bef3 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -136,6 +136,12 @@ def test_func(pytestconfig): ) +def test_raises_message_argument_deprecated(): + with pytest.warns(pytest.PytestDeprecationWarning): + with pytest.raises(RuntimeError, message="foobar"): + raise RuntimeError + + def test_pytest_plugins_in_non_top_level_conftest_deprecated(testdir): from _pytest.deprecated import PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST diff --git a/testing/python/raises.py b/testing/python/raises.py index 52ad6cfa685..aad60d775dc 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -121,8 +121,9 @@ def test_no_raise_message(self): def test_custom_raise_message(self): message = "TEST_MESSAGE" try: - with pytest.raises(ValueError, message=message): - pass + with pytest.warns(PytestDeprecationWarning): + with pytest.raises(ValueError, message=message): + pass except pytest.raises.Exception as e: assert e.msg == message else: diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 669da6e172b..0b66acbf210 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -280,7 +280,7 @@ def test_assert_outcomes_after_pytest_error(testdir): testdir.makepyfile("def test_foo(): assert True") result = testdir.runpytest("--unexpected-argument") - with pytest.raises(ValueError, message="Pytest terminal report not found"): + with pytest.raises(ValueError, match="Pytest terminal report not found"): result.assert_outcomes(passed=0) From 1e80a9cb34c73066cc8fa232be9b20fe284b8ae9 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Thu, 13 Dec 2018 21:14:41 -0200 Subject: [PATCH 68/98] Remove pytest_funcarg__ prefix support for defining fixtures Fix #4543 --- changelog/4543.removal.rst | 3 +++ doc/en/deprecations.rst | 41 +++++++++++++++++----------------- src/_pytest/config/__init__.py | 4 ++-- src/_pytest/deprecated.py | 6 ----- src/_pytest/fixtures.py | 31 ++++--------------------- testing/deprecated_test.py | 22 ------------------ testing/python/fixture.py | 19 ---------------- 7 files changed, 30 insertions(+), 96 deletions(-) create mode 100644 changelog/4543.removal.rst diff --git a/changelog/4543.removal.rst b/changelog/4543.removal.rst new file mode 100644 index 00000000000..f810b5badfa --- /dev/null +++ b/changelog/4543.removal.rst @@ -0,0 +1,3 @@ +Remove support to define fixtures using the ``pytest_funcarg__`` prefix. Use the ``@pytest.fixture`` decorator instead. + +See our `docs `__ on information on how to update your code. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index a2f16d974bc..37cffb1fbe5 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -237,26 +237,6 @@ By passing a string, users expect that pytest will interpret that command-line u on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way. -``pytest_funcarg__`` prefix -~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.0 - -In very early pytest versions fixtures could be defined using the ``pytest_funcarg__`` prefix: - -.. code-block:: python - - def pytest_funcarg__data(): - return SomeData() - -Switch over to the ``@pytest.fixture`` decorator: - -.. code-block:: python - - @pytest.fixture - def data(): - return SomeData() - [pytest] section in setup.cfg files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -295,6 +275,27 @@ collection. This issue should affect only advanced plugins who create new collection types, so if you see this warning message please contact the authors so they can change the code. +``pytest_funcarg__`` prefix +~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +In very early pytest versions fixtures could be defined using the ``pytest_funcarg__`` prefix: + +.. code-block:: python + + def pytest_funcarg__data(): + return SomeData() + +Switch over to the ``@pytest.fixture`` decorator: + +.. code-block:: python + + @pytest.fixture + def data(): + return SomeData() + + Metafunc.addcall ~~~~~~~~~~~~~~~~ diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index fafd8c930fe..3ed82694b92 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -261,8 +261,8 @@ def parse_hookimpl_opts(self, plugin, name): # (see issue #1073) if not name.startswith("pytest_"): return - # ignore some historic special names which can not be hooks anyway - if name == "pytest_plugins" or name.startswith("pytest_funcarg__"): + # ignore names which can not be hooks + if name == "pytest_plugins": return method = getattr(plugin, name) diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 426533a0c7c..67f0d534f1a 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -24,12 +24,6 @@ YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" -FUNCARG_PREFIX = UnformattedWarning( - RemovedInPytest4Warning, - '{name}: declaring fixtures using "pytest_funcarg__" prefix is deprecated ' - "and scheduled to be removed in pytest 4.0. " - "Please remove the prefix and use the @pytest.fixture decorator instead.", -) FIXTURE_FUNCTION_CALL = UnformattedWarning( RemovedInPytest4Warning, diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 3e5ad5f732b..0136dea09ac 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -38,8 +38,6 @@ from _pytest.outcomes import fail from _pytest.outcomes import TEST_OUTCOME -FIXTURE_MSG = 'fixtures cannot have "pytest_funcarg__" prefix and be decorated with @pytest.fixture:\n{}' - @attr.s(frozen=True) class PseudoFixtureDef(object): @@ -1117,7 +1115,6 @@ class FixtureManager(object): by a lookup of their FuncFixtureInfo. """ - _argprefix = "pytest_funcarg__" FixtureLookupError = FixtureLookupError FixtureLookupErrorRepr = FixtureLookupErrorRepr @@ -1255,8 +1252,6 @@ def pytest_collection_modifyitems(self, items): items[:] = reorder_items(items) def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): - from _pytest import deprecated - if nodeid is not NOTSET: holderobj = node_or_obj else: @@ -1272,31 +1267,13 @@ def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False): # access below can raise. safe_getatt() ignores such exceptions. obj = safe_getattr(holderobj, name, None) marker = getfixturemarker(obj) - # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) - # or are "@pytest.fixture" marked - if marker is None: - if not name.startswith(self._argprefix): - continue - if not callable(obj): - continue - marker = defaultfuncargprefixmarker - - filename, lineno = getfslineno(obj) - warnings.warn_explicit( - deprecated.FUNCARG_PREFIX.format(name=name), - category=None, - filename=str(filename), - lineno=lineno + 1, - ) - name = name[len(self._argprefix) :] - elif not isinstance(marker, FixtureFunctionMarker): + if not isinstance(marker, FixtureFunctionMarker): # magic globals with __getattr__ might have got us a wrong # fixture attribute continue - else: - if marker.name: - name = marker.name - assert not name.startswith(self._argprefix), FIXTURE_MSG.format(name) + + if marker.name: + name = marker.name # during fixture definition we wrap the original fixture function # to issue a warning if called directly, so here we unwrap it in order to not emit the warning diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index 4353ec2bef3..b971a9d2e7f 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -10,28 +10,6 @@ pytestmark = pytest.mark.pytester_example_path("deprecated") -def test_funcarg_prefix_deprecation(testdir): - testdir.makepyfile( - """ - def pytest_funcarg__value(): - return 10 - - def test_funcarg_prefix(value): - assert value == 10 - """ - ) - result = testdir.runpytest("-ra", SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - [ - ( - "*test_funcarg_prefix_deprecation.py:1: *pytest_funcarg__value: " - 'declaring fixtures using "pytest_funcarg__" prefix is deprecated*' - ), - "*1 passed*", - ] - ) - - @pytest.mark.filterwarnings("default") def test_pytest_setup_cfg_deprecated(testdir): testdir.makefile( diff --git a/testing/python/fixture.py b/testing/python/fixture.py index 2cc4122b446..a4ef5af879b 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -627,25 +627,6 @@ def test_func(something): pass print(ss.stack) assert teardownlist == [1] - def test_mark_as_fixture_with_prefix_and_decorator_fails(self, testdir): - testdir.makeconftest( - """ - import pytest - - @pytest.fixture - def pytest_funcarg__marked_with_prefix_and_decorator(): - pass - """ - ) - result = testdir.runpytest_subprocess() - assert result.ret != 0 - result.stdout.fnmatch_lines( - [ - "*AssertionError: fixtures cannot have*@pytest.fixture*", - "*pytest_funcarg__marked_with_prefix_and_decorator*", - ] - ) - def test_request_addfinalizer_failing_setup(self, testdir): testdir.makepyfile( """ From 98987177a082944135ea2b208b38ec7d32b02d39 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 14 Dec 2018 11:17:24 -0200 Subject: [PATCH 69/98] Review changelog entries for features branch I used `towncrier --draft` to see the full changelog, and decided to "nitpick" it so it reads better as a whole. --- changelog/3050.deprecation.rst | 4 +++- changelog/3079.removal.rst | 2 +- changelog/3083.removal.rst | 2 +- changelog/3191.feature.rst | 16 +++++++++++----- changelog/3616.removal.rst | 2 +- changelog/4278.feature.rst | 4 ++++ changelog/4278.trivial.rst | 1 - changelog/4292.feature.rst | 2 +- changelog/4386.feature.rst | 2 +- changelog/4416.feature.rst | 2 +- changelog/4421.removal.rst | 2 +- changelog/4435.bugfix.rst | 2 +- changelog/4435.deprecation.rst | 4 +++- changelog/4483.feature.rst | 11 +++++++++-- changelog/4535.removal.rst | 2 +- changelog/4543.removal.rst | 2 +- 16 files changed, 40 insertions(+), 20 deletions(-) create mode 100644 changelog/4278.feature.rst delete mode 100644 changelog/4278.trivial.rst diff --git a/changelog/3050.deprecation.rst b/changelog/3050.deprecation.rst index 2da417c85a3..fce5979d677 100644 --- a/changelog/3050.deprecation.rst +++ b/changelog/3050.deprecation.rst @@ -1 +1,3 @@ -Deprecate ``pytest.config`` global. See https://docs.pytest.org/en/latest/deprecations.html#pytest-config-global +Deprecated the ``pytest.config`` global. + +See https://docs.pytest.org/en/latest/deprecations.html#pytest-config-global for rationale. diff --git a/changelog/3079.removal.rst b/changelog/3079.removal.rst index 19a8612f0c7..cb2265ff386 100644 --- a/changelog/3079.removal.rst +++ b/changelog/3079.removal.rst @@ -1,3 +1,3 @@ -Remove support for yield tests - they are fundamentally broken because they don't support fixtures properly since collection and test execution were separated. +Removed support for yield tests - they are fundamentally broken because they don't support fixtures properly since collection and test execution were separated. See our `docs `__ on information on how to update your code. diff --git a/changelog/3083.removal.rst b/changelog/3083.removal.rst index ce689b94adb..74d268a4e28 100644 --- a/changelog/3083.removal.rst +++ b/changelog/3083.removal.rst @@ -1,3 +1,3 @@ -Remove ``Metafunc.addcall``. This was the predecessor mechanism to ``@pytest.mark.parametrize``. +Removed ``Metafunc.addcall``. This was the predecessor mechanism to ``@pytest.mark.parametrize``. See our `docs `__ on information on how to update your code. diff --git a/changelog/3191.feature.rst b/changelog/3191.feature.rst index 7eb4c3a15a3..dbf1c8304b8 100644 --- a/changelog/3191.feature.rst +++ b/changelog/3191.feature.rst @@ -1,16 +1,22 @@ A warning is now issued when assertions are made for ``None``. -This is a common source of confusion among new users, which write:: +This is a common source of confusion among new users, which write: - assert mocked_object.assert_called_with(3, 4, 5, key='value') +.. code-block:: python -When they should write:: + assert mocked_object.assert_called_with(3, 4, 5, key="value") - mocked_object.assert_called_with(3, 4, 5, key='value') +When they should write: + +.. code-block:: python + + mocked_object.assert_called_with(3, 4, 5, key="value") Because the ``assert_called_with`` method of mock objects already executes an assertion. -This warning will not be issued when ``None`` is explicitly checked. An assertion like:: +This warning will not be issued when ``None`` is explicitly checked. An assertion like: + +.. code-block:: python assert variable is None diff --git a/changelog/3616.removal.rst b/changelog/3616.removal.rst index a8f2f1c92a9..5d8c9134e0b 100644 --- a/changelog/3616.removal.rst +++ b/changelog/3616.removal.rst @@ -1,3 +1,3 @@ -Remove the deprecated compat properties for ``node.Class/Function/Module`` - use ``pytest.Class/Function/Module`` now. +Removed the deprecated compat properties for ``node.Class/Function/Module`` - use ``pytest.Class/Function/Module`` now. See our `docs `__ on information on how to update your code. diff --git a/changelog/4278.feature.rst b/changelog/4278.feature.rst new file mode 100644 index 00000000000..332e64572a2 --- /dev/null +++ b/changelog/4278.feature.rst @@ -0,0 +1,4 @@ +``CACHEDIR.TAG`` files are now created inside cache directories. + +Those files are part of the `Cache Directory Tagging Standard `__, and can +be used by backup or synchronization programs to identify pytest's cache directory as such. diff --git a/changelog/4278.trivial.rst b/changelog/4278.trivial.rst deleted file mode 100644 index 126cabea859..00000000000 --- a/changelog/4278.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -A CACHEDIR.TAG file gets added to the cache directory. diff --git a/changelog/4292.feature.rst b/changelog/4292.feature.rst index 27d113ba069..760a2778309 100644 --- a/changelog/4292.feature.rst +++ b/changelog/4292.feature.rst @@ -1 +1 @@ -``pytest.outcomes.Exit`` is derived from ``SystemExit`` instead of ``KeyboardInterrupt``. +``pytest.outcomes.Exit`` is derived from ``SystemExit`` instead of ``KeyboardInterrupt``. This allows us to better handle ``pdb`` exiting. diff --git a/changelog/4386.feature.rst b/changelog/4386.feature.rst index fe827cc2394..5133a39a773 100644 --- a/changelog/4386.feature.rst +++ b/changelog/4386.feature.rst @@ -1 +1 @@ -Restructure ExceptionInfo object construction and ensure incomplete instances have a ``repr``/``str``. +Restructured ``ExceptionInfo`` object construction and ensure incomplete instances have a ``repr``/``str``. diff --git a/changelog/4416.feature.rst b/changelog/4416.feature.rst index 89c0a84b10e..949e7c25a9a 100644 --- a/changelog/4416.feature.rst +++ b/changelog/4416.feature.rst @@ -1,4 +1,4 @@ -pdb: support keyword arguments with ``pdb.set_trace`` +pdb: added support for keyword arguments with ``pdb.set_trace``. It handles ``header`` similar to Python 3.7 does it, and forwards any other keyword arguments to the ``Pdb`` constructor. diff --git a/changelog/4421.removal.rst b/changelog/4421.removal.rst index 279587d06da..4bebd5c1941 100644 --- a/changelog/4421.removal.rst +++ b/changelog/4421.removal.rst @@ -1,3 +1,3 @@ -Remove the implementation of the ``pytest_namespace`` hook. +Removed the implementation of the ``pytest_namespace`` hook. See our `docs `__ on information on how to update your code. diff --git a/changelog/4435.bugfix.rst b/changelog/4435.bugfix.rst index de60b5e62b5..36ace1fab44 100644 --- a/changelog/4435.bugfix.rst +++ b/changelog/4435.bugfix.rst @@ -1 +1 @@ -Fix ``raises(..., 'code(string)')`` frame filename. +Fixed ``raises(..., 'code(string)')`` frame filename. diff --git a/changelog/4435.deprecation.rst b/changelog/4435.deprecation.rst index f12f0bc6cbf..6815c1776ee 100644 --- a/changelog/4435.deprecation.rst +++ b/changelog/4435.deprecation.rst @@ -1 +1,3 @@ -Deprecate ``raises(..., 'code(as_a_string)')`` and ``warns(..., 'code(as_a_string)')``. See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec +Deprecated ``raises(..., 'code(as_a_string)')`` and ``warns(..., 'code(as_a_string)')``. + +See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec for rationale and examples. diff --git a/changelog/4483.feature.rst b/changelog/4483.feature.rst index d9bd4c7173f..e70db297981 100644 --- a/changelog/4483.feature.rst +++ b/changelog/4483.feature.rst @@ -1,2 +1,9 @@ -Add ini parameter ``junit_time`` to optionally report test call -durations less setup and teardown times. +Added ini parameter ``junit_time`` to optionally report test call durations, excluding setup and teardown times. + +The JUnit XML specification and the default pytest behavior is to include setup and teardown times in the test duration +report. You can include just the call durations instead (excluding setup and teardown) by adding this to your ``pytest.ini`` file: + +.. code-block:: ini + + [pytest] + junit_time = call diff --git a/changelog/4535.removal.rst b/changelog/4535.removal.rst index f899005871a..89de6b74405 100644 --- a/changelog/4535.removal.rst +++ b/changelog/4535.removal.rst @@ -1 +1 @@ -Removed deprecated ``PyCollector.makeitem`` method. This method was made public by mistake a long time ago. +Removed the deprecated ``PyCollector.makeitem`` method. This method was made public by mistake a long time ago. diff --git a/changelog/4543.removal.rst b/changelog/4543.removal.rst index f810b5badfa..0a2b615f950 100644 --- a/changelog/4543.removal.rst +++ b/changelog/4543.removal.rst @@ -1,3 +1,3 @@ -Remove support to define fixtures using the ``pytest_funcarg__`` prefix. Use the ``@pytest.fixture`` decorator instead. +Removed support to define fixtures using the ``pytest_funcarg__`` prefix. Use the ``@pytest.fixture`` decorator instead. See our `docs `__ on information on how to update your code. From fd48cd57f95d2c4e2f6f4e416d20c36b4484d094 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 11 Dec 2018 20:02:36 -0200 Subject: [PATCH 70/98] Remove config.warn, Node.warn; pytest_logwarning issues a warning when implemented Fix #3078 --- changelog/3078.removal.rst | 3 ++ doc/en/deprecations.rst | 56 +++++++++++----------- doc/en/reference.rst | 1 - src/_pytest/assertion/rewrite.py | 6 +-- src/_pytest/cacheprovider.py | 6 +-- src/_pytest/config/__init__.py | 82 +++++++++++--------------------- src/_pytest/config/findpaths.py | 12 ++--- src/_pytest/deprecated.py | 16 +++++-- src/_pytest/hookspec.py | 4 +- src/_pytest/nodes.py | 71 ++------------------------- src/_pytest/resultlog.py | 4 +- src/_pytest/terminal.py | 12 +---- src/_pytest/warnings.py | 6 +-- testing/acceptance_test.py | 1 + testing/deprecated_test.py | 1 + testing/test_assertrewrite.py | 4 +- testing/test_config.py | 61 ------------------------ testing/test_pluginmanager.py | 24 ++-------- testing/test_pytester.py | 2 +- testing/test_warnings.py | 4 +- 20 files changed, 107 insertions(+), 269 deletions(-) create mode 100644 changelog/3078.removal.rst diff --git a/changelog/3078.removal.rst b/changelog/3078.removal.rst new file mode 100644 index 00000000000..8f90811c3a2 --- /dev/null +++ b/changelog/3078.removal.rst @@ -0,0 +1,3 @@ +Remove legacy internal warnings system: ``config.warn``, ``Node.warn``. The ``pytest_logwarning`` now issues a warning when implemented. + +See our `docs `__ on information on how to update your code. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index a2f16d974bc..f36423ad62a 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -74,34 +74,6 @@ Becomes: -``Config.warn`` and ``Node.warn`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.8 - -Those methods were part of the internal pytest warnings system, but since ``3.8`` pytest is using the builtin warning -system for its own warnings, so those two functions are now deprecated. - -``Config.warn`` should be replaced by calls to the standard ``warnings.warn``, example: - -.. code-block:: python - - config.warn("C1", "some warning") - -Becomes: - -.. code-block:: python - - warnings.warn(pytest.PytestWarning("some warning")) - -``Node.warn`` now supports two signatures: - -* ``node.warn(PytestWarning("some message"))``: is now the **recommended** way to call this function. - The warning instance must be a PytestWarning or subclass. - -* ``node.warn("CI", "some message")``: this code/message form is now **deprecated** and should be converted to the warning instance form above. - - Calling fixtures directly ~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -350,7 +322,33 @@ This should be updated to make use of standard fixture mechanisms: You can consult `funcarg comparison section in the docs `_ for more information. -This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings. + +``Config.warn`` and ``Node.warn`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +Those methods were part of the internal pytest warnings system, but since ``3.8`` pytest is using the builtin warning +system for its own warnings, so those two functions are now deprecated. + +``Config.warn`` should be replaced by calls to the standard ``warnings.warn``, example: + +.. code-block:: python + + config.warn("C1", "some warning") + +Becomes: + +.. code-block:: python + + warnings.warn(pytest.PytestWarning("some warning")) + +``Node.warn`` now supports two signatures: + +* ``node.warn(PytestWarning("some message"))``: is now the **recommended** way to call this function. + The warning instance must be a PytestWarning or subclass. + +* ``node.warn("CI", "some message")``: this code/message form has been **removed** and should be converted to the warning instance form above. ``yield`` tests ~~~~~~~~~~~~~~~ diff --git a/doc/en/reference.rst b/doc/en/reference.rst index da53e7fea91..754035d16b8 100644 --- a/doc/en/reference.rst +++ b/doc/en/reference.rst @@ -618,7 +618,6 @@ Session related reporting hooks: .. autofunction:: pytest_terminal_summary .. autofunction:: pytest_fixture_setup .. autofunction:: pytest_fixture_post_finalizer -.. autofunction:: pytest_logwarning .. autofunction:: pytest_warning_captured And here is the central hook for reporting about diff --git a/src/_pytest/assertion/rewrite.py b/src/_pytest/assertion/rewrite.py index 78b8edcd87b..1d2c27ed152 100644 --- a/src/_pytest/assertion/rewrite.py +++ b/src/_pytest/assertion/rewrite.py @@ -278,11 +278,11 @@ def mark_rewrite(self, *names): def _warn_already_imported(self, name): from _pytest.warning_types import PytestWarning - from _pytest.warnings import _issue_config_warning + from _pytest.warnings import _issue_warning_captured - _issue_config_warning( + _issue_warning_captured( PytestWarning("Module already imported so cannot be rewritten: %s" % name), - self.config, + self.config.hook, stacklevel=5, ) diff --git a/src/_pytest/cacheprovider.py b/src/_pytest/cacheprovider.py index 59265ad85f6..87b2e5426e2 100755 --- a/src/_pytest/cacheprovider.py +++ b/src/_pytest/cacheprovider.py @@ -59,12 +59,12 @@ def cache_dir_from_config(config): return resolve_from_str(config.getini("cache_dir"), config.rootdir) def warn(self, fmt, **args): - from _pytest.warnings import _issue_config_warning + from _pytest.warnings import _issue_warning_captured from _pytest.warning_types import PytestWarning - _issue_config_warning( + _issue_warning_captured( PytestWarning(fmt.format(**args) if args else fmt), - self._config, + self._config.hook, stacklevel=3, ) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index fafd8c930fe..248ed585799 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -26,11 +26,13 @@ from .exceptions import UsageError from .findpaths import determine_setup from .findpaths import exists +from _pytest import deprecated from _pytest._code import ExceptionInfo from _pytest._code import filter_traceback from _pytest.compat import lru_cache from _pytest.compat import safe_str from _pytest.outcomes import Skipped +from _pytest.warning_types import PytestWarning hookimpl = HookimplMarker("pytest") hookspec = HookspecMarker("pytest") @@ -189,9 +191,9 @@ def _prepareconfig(args=None, plugins=None): else: pluginmanager.register(plugin) if warning: - from _pytest.warnings import _issue_config_warning + from _pytest.warnings import _issue_warning_captured - _issue_config_warning(warning, config=config, stacklevel=4) + _issue_warning_captured(warning, hook=config.hook, stacklevel=4) return pluginmanager.hook.pytest_cmdline_parse( pluginmanager=pluginmanager, args=args ) @@ -245,14 +247,7 @@ def addhooks(self, module_or_class): Use :py:meth:`pluggy.PluginManager.add_hookspecs ` instead. """ - warning = dict( - code="I2", - fslocation=_pytest._code.getfslineno(sys._getframe(1)), - nodeid=None, - message="use pluginmanager.add_hookspecs instead of " - "deprecated addhooks() method.", - ) - self._warn(warning) + warnings.warn(deprecated.PLUGIN_MANAGER_ADDHOOKS, stacklevel=2) return self.add_hookspecs(module_or_class) def parse_hookimpl_opts(self, plugin, name): @@ -296,10 +291,12 @@ def parse_hookspec_opts(self, module_or_class, name): def register(self, plugin, name=None): if name in ["pytest_catchlog", "pytest_capturelog"]: - self._warn( - "{} plugin has been merged into the core, " - "please remove it from your requirements.".format( - name.replace("_", "-") + warnings.warn( + PytestWarning( + "{} plugin has been merged into the core, " + "please remove it from your requirements.".format( + name.replace("_", "-") + ) ) ) return @@ -336,14 +333,6 @@ def pytest_configure(self, config): ) self._configured = True - def _warn(self, message): - kwargs = ( - message - if isinstance(message, dict) - else {"code": "I1", "message": message, "fslocation": None, "nodeid": None} - ) - self.hook.pytest_logwarning.call_historic(kwargs=kwargs) - # # internal API for local conftest plugin handling # @@ -542,7 +531,13 @@ def import_plugin(self, modname): six.reraise(new_exc_type, new_exc, sys.exc_info()[2]) except Skipped as e: - self._warn("skipped plugin %r: %s" % ((modname, e.msg))) + from _pytest.warnings import _issue_warning_captured + + _issue_warning_captured( + PytestWarning("skipped plugin %r: %s" % (modname, e.msg)), + self.hook, + stacklevel=1, + ) else: mod = sys.modules[importspec] self.register(mod, modname) @@ -617,7 +612,6 @@ def __init__(self, pluginmanager): self._override_ini = () self._opt2dest = {} self._cleanup = [] - self._warn = self.pluginmanager._warn self.pluginmanager.register(self, "pytestconfig") self._configured = False self.invocation_dir = py.path.local() @@ -642,36 +636,6 @@ def _ensure_unconfigure(self): fin = self._cleanup.pop() fin() - def warn(self, code, message, fslocation=None, nodeid=None): - """ - .. deprecated:: 3.8 - - Use :py:func:`warnings.warn` or :py:func:`warnings.warn_explicit` directly instead. - - Generate a warning for this test session. - """ - from _pytest.warning_types import RemovedInPytest4Warning - - if isinstance(fslocation, (tuple, list)) and len(fslocation) > 2: - filename, lineno = fslocation[:2] - else: - filename = "unknown file" - lineno = 0 - msg = "config.warn has been deprecated, use warnings.warn instead" - if nodeid: - msg = "{}: {}".format(nodeid, msg) - warnings.warn_explicit( - RemovedInPytest4Warning(msg), - category=None, - filename=filename, - lineno=lineno, - ) - self.hook.pytest_logwarning.call_historic( - kwargs=dict( - code=code, message=message, fslocation=fslocation, nodeid=nodeid - ) - ) - def get_terminal_writer(self): return self.pluginmanager.get_plugin("terminalreporter")._tw @@ -826,7 +790,15 @@ def _preparse(self, args, addopts=True): if ns.help or ns.version: # we don't want to prevent --help/--version to work # so just let is pass and print a warning at the end - self._warn("could not load initial conftests (%s)\n" % e.path) + from _pytest.warnings import _issue_warning_captured + + _issue_warning_captured( + PytestWarning( + "could not load initial conftests: {}".format(e.path) + ), + self.hook, + stacklevel=2, + ) else: raise diff --git a/src/_pytest/config/findpaths.py b/src/_pytest/config/findpaths.py index 169b8ddfcfb..eecc92606d4 100644 --- a/src/_pytest/config/findpaths.py +++ b/src/_pytest/config/findpaths.py @@ -34,14 +34,14 @@ def getcfg(args, config=None): iniconfig = py.iniconfig.IniConfig(p) if "pytest" in iniconfig.sections: if inibasename == "setup.cfg" and config is not None: - from _pytest.warnings import _issue_config_warning + from _pytest.warnings import _issue_warning_captured from _pytest.warning_types import RemovedInPytest4Warning - _issue_config_warning( + _issue_warning_captured( RemovedInPytest4Warning( CFG_PYTEST_SECTION.format(filename=inibasename) ), - config=config, + hook=config.hook, stacklevel=2, ) return base, p, iniconfig["pytest"] @@ -112,13 +112,13 @@ def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): inicfg = iniconfig[section] if is_cfg_file and section == "pytest" and config is not None: from _pytest.deprecated import CFG_PYTEST_SECTION - from _pytest.warnings import _issue_config_warning + from _pytest.warnings import _issue_warning_captured # TODO: [pytest] section in *.cfg files is deprecated. Need refactoring once # the deprecation expires. - _issue_config_warning( + _issue_warning_captured( CFG_PYTEST_SECTION.format(filename=str(inifile)), - config, + config.hook, stacklevel=2, ) break diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 426533a0c7c..4136328e19a 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -75,10 +75,6 @@ "For more details, see: https://docs.pytest.org/en/latest/parametrize.html" ) -NODE_WARN = RemovedInPytest4Warning( - "Node.warn(code, message) form has been deprecated, use Node.warn(warning_instance) instead." -) - RAISES_EXEC = PytestDeprecationWarning( "raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n" "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" @@ -94,6 +90,13 @@ '"record_xml_property" is now deprecated.' ) +PLUGIN_MANAGER_ADDHOOKS = PytestDeprecationWarning( + "use pluginmanager.add_hookspecs instead of deprecated addhooks() method." +) + +COLLECTOR_MAKEITEM = RemovedInPytest4Warning( + "pycollector makeitem was removed as it is an accidentially leaked internal api" +) PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( "Defining pytest_plugins in a non-top-level conftest is deprecated, " @@ -110,3 +113,8 @@ "pytest/tmpdir_factory.ensuretemp is deprecated, \n" "please use the tmp_path fixture or tmp_path_factory.mktemp" ) + +PYTEST_LOGWARNING = PytestDeprecationWarning( + "pytest_logwarning is deprecated, no longer being called, and will be removed soon\n" + "please use pytest_warning_captured instead" +) diff --git a/src/_pytest/hookspec.py b/src/_pytest/hookspec.py index 0d9f039a173..2dfbfd0c99a 100644 --- a/src/_pytest/hookspec.py +++ b/src/_pytest/hookspec.py @@ -1,6 +1,8 @@ """ hook specifications for pytest plugins, invoked from main.py and builtin plugins. """ from pluggy import HookspecMarker +from _pytest.deprecated import PYTEST_LOGWARNING + hookspec = HookspecMarker("pytest") @@ -496,7 +498,7 @@ def pytest_terminal_summary(terminalreporter, exitstatus): """ -@hookspec(historic=True) +@hookspec(historic=True, warn_on_impl=PYTEST_LOGWARNING) def pytest_logwarning(message, code, nodeid, fslocation): """ .. deprecated:: 3.8 diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py index 1b41898c4c8..b324b1f6899 100644 --- a/src/_pytest/nodes.py +++ b/src/_pytest/nodes.py @@ -105,81 +105,20 @@ def ihook(self): def __repr__(self): return "<%s %s>" % (self.__class__.__name__, getattr(self, "name", None)) - def warn(self, _code_or_warning=None, message=None, code=None): + def warn(self, warning): """Issue a warning for this item. - Warnings will be displayed after the test session, unless explicitly suppressed. + Warnings will be displayed after the test session, unless explicitly suppressed - This can be called in two forms: + :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning. - **Warning instance** + :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning. - This was introduced in pytest 3.8 and uses the standard warning mechanism to issue warnings. + Example usage:: .. code-block:: python node.warn(PytestWarning("some message")) - - The warning instance must be a subclass of :class:`pytest.PytestWarning`. - - **code/message (deprecated)** - - This form was used in pytest prior to 3.8 and is considered deprecated. Using this form will emit another - warning about the deprecation: - - .. code-block:: python - - node.warn("CI", "some message") - - :param Union[Warning,str] _code_or_warning: - warning instance or warning code (legacy). This parameter receives an underscore for backward - compatibility with the legacy code/message form, and will be replaced for something - more usual when the legacy form is removed. - - :param Union[str,None] message: message to display when called in the legacy form. - :param str code: code for the warning, in legacy form when using keyword arguments. - :return: - """ - if message is None: - if _code_or_warning is None: - raise ValueError("code_or_warning must be given") - self._std_warn(_code_or_warning) - else: - if _code_or_warning and code: - raise ValueError( - "code_or_warning and code cannot both be passed to this function" - ) - code = _code_or_warning or code - self._legacy_warn(code, message) - - def _legacy_warn(self, code, message): - """ - .. deprecated:: 3.8 - - Use :meth:`Node.std_warn <_pytest.nodes.Node.std_warn>` instead. - - Generate a warning with the given code and message for this item. - """ - from _pytest.deprecated import NODE_WARN - - self._std_warn(NODE_WARN) - - assert isinstance(code, str) - fslocation = get_fslocation_from_item(self) - self.ihook.pytest_logwarning.call_historic( - kwargs=dict( - code=code, message=message, nodeid=self.nodeid, fslocation=fslocation - ) - ) - - def _std_warn(self, warning): - """Issue a warning for this item. - - Warnings will be displayed after the test session, unless explicitly suppressed - - :param Warning warning: the warning instance to issue. Must be a subclass of PytestWarning. - - :raise ValueError: if ``warning`` instance is not a subclass of PytestWarning. """ from _pytest.warning_types import PytestWarning diff --git a/src/_pytest/resultlog.py b/src/_pytest/resultlog.py index ab2d0f98b9d..bdf8130fdd5 100644 --- a/src/_pytest/resultlog.py +++ b/src/_pytest/resultlog.py @@ -34,9 +34,9 @@ def pytest_configure(config): config.pluginmanager.register(config._resultlog) from _pytest.deprecated import RESULT_LOG - from _pytest.warnings import _issue_config_warning + from _pytest.warnings import _issue_warning_captured - _issue_config_warning(RESULT_LOG, config, stacklevel=2) + _issue_warning_captured(RESULT_LOG, config.hook, stacklevel=2) def pytest_unconfigure(config): diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index 1137d52b885..82719b5d40d 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -186,20 +186,17 @@ def pytest_report_teststatus(report): @attr.s class WarningReport(object): """ - Simple structure to hold warnings information captured by ``pytest_logwarning`` and ``pytest_warning_captured``. + Simple structure to hold warnings information captured by ``pytest_warning_captured``. :ivar str message: user friendly message about the warning :ivar str|None nodeid: node id that generated the warning (see ``get_location``). :ivar tuple|py.path.local fslocation: file system location of the source of the warning (see ``get_location``). - - :ivar bool legacy: if this warning report was generated from the deprecated ``pytest_logwarning`` hook. """ message = attr.ib() nodeid = attr.ib(default=None) fslocation = attr.ib(default=None) - legacy = attr.ib(default=False) def get_location(self, config): """ @@ -329,13 +326,6 @@ def pytest_internalerror(self, excrepr): self.write_line("INTERNALERROR> " + line) return 1 - def pytest_logwarning(self, fslocation, message, nodeid): - warnings = self.stats.setdefault("warnings", []) - warning = WarningReport( - fslocation=fslocation, message=message, nodeid=nodeid, legacy=True - ) - warnings.append(warning) - def pytest_warning_captured(self, warning_message, item): # from _pytest.nodes import get_fslocation_from_item from _pytest.warnings import warning_record_to_str diff --git a/src/_pytest/warnings.py b/src/_pytest/warnings.py index e3e206933ed..76498573694 100644 --- a/src/_pytest/warnings.py +++ b/src/_pytest/warnings.py @@ -160,19 +160,19 @@ def pytest_terminal_summary(terminalreporter): yield -def _issue_config_warning(warning, config, stacklevel): +def _issue_warning_captured(warning, hook, stacklevel): """ This function should be used instead of calling ``warnings.warn`` directly when we are in the "configure" stage: at this point the actual options might not have been set, so we manually trigger the pytest_warning_captured hook so we can display this warnings in the terminal. This is a hack until we can sort out #2891. :param warning: the warning instance. - :param config: + :param hook: the hook caller :param stacklevel: stacklevel forwarded to warnings.warn """ with warnings.catch_warnings(record=True) as records: warnings.simplefilter("always", type(warning)) warnings.warn(warning, stacklevel=stacklevel) - config.hook.pytest_warning_captured.call_historic( + hook.pytest_warning_captured.call_historic( kwargs=dict(warning_message=records[0], when="config", item=None) ) diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 0b7af5338d6..f81680d0d61 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -146,6 +146,7 @@ def test_not_collectable_arguments(self, testdir): assert result.ret result.stderr.fnmatch_lines(["*ERROR: not found:*{}".format(p2.basename)]) + @pytest.mark.filterwarnings("default") def test_better_reporting_on_conftest_load_failure(self, testdir, request): """Show a user-friendly traceback on conftest import failures (#486, #3332)""" testdir.makepyfile("") diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index 4353ec2bef3..c7515d2cb5c 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -120,6 +120,7 @@ def test_terminal_reporter_writer_attr(pytestconfig): @pytest.mark.parametrize("plugin", ["catchlog", "capturelog"]) +@pytest.mark.filterwarnings("default") def test_pytest_catchlog_deprecated(testdir, plugin): testdir.makepyfile( """ diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index a02433cd62a..4187e365b6d 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -823,7 +823,9 @@ def test_remember_rewritten_modules(self, pytestconfig, testdir, monkeypatch): testdir.makepyfile(test_remember_rewritten_modules="") warnings = [] hook = AssertionRewritingHook(pytestconfig) - monkeypatch.setattr(hook.config, "warn", lambda code, msg: warnings.append(msg)) + monkeypatch.setattr( + hook, "_warn_already_imported", lambda code, msg: warnings.append(msg) + ) hook.find_module("test_remember_rewritten_modules") hook.load_module("test_remember_rewritten_modules") hook.mark_rewrite("test_remember_rewritten_modules") diff --git a/testing/test_config.py b/testing/test_config.py index 012b8936c3a..f757bb018d4 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -12,7 +12,6 @@ from _pytest.config.findpaths import get_common_ancestor from _pytest.config.findpaths import getcfg from _pytest.main import EXIT_NOTESTSCOLLECTED -from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG class TestParseIni(object): @@ -790,66 +789,6 @@ class pytest_something(object): assert pm.parse_hookimpl_opts(Dummy(), "pytest_something") is None -class TestLegacyWarning(object): - @pytest.mark.filterwarnings("default") - def test_warn_config(self, testdir): - testdir.makeconftest( - """ - values = [] - def pytest_runtest_setup(item): - item.config.warn("C1", "hello") - def pytest_logwarning(code, message): - if message == "hello" and code == "C1": - values.append(1) - """ - ) - testdir.makepyfile( - """ - def test_proper(pytestconfig): - import conftest - assert conftest.values == [1] - """ - ) - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - ["*hello", "*config.warn has been deprecated*", "*1 passed*"] - ) - - @pytest.mark.filterwarnings("default") - @pytest.mark.parametrize("use_kw", [True, False]) - def test_warn_on_test_item_from_request(self, testdir, use_kw): - code_kw = "code=" if use_kw else "" - message_kw = "message=" if use_kw else "" - testdir.makepyfile( - """ - import pytest - - @pytest.fixture - def fix(request): - request.node.warn({code_kw}"T1", {message_kw}"hello") - - def test_hello(fix): - pass - """.format( - code_kw=code_kw, message_kw=message_kw - ) - ) - result = testdir.runpytest( - "--disable-pytest-warnings", SHOW_PYTEST_WARNINGS_ARG - ) - assert "hello" not in result.stdout.str() - - result = testdir.runpytest(SHOW_PYTEST_WARNINGS_ARG) - result.stdout.fnmatch_lines( - """ - ===*warnings summary*=== - *test_warn_on_test_item_from_request.py::test_hello* - *hello* - *test_warn_on_test_item_from_request.py:7:*Node.warn(code, message) form has been deprecated* - """ - ) - - class TestRootdir(object): def test_simple_noini(self, tmpdir): assert get_common_ancestor([tmpdir]) == tmpdir diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 80ef3db02a2..80817932eee 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -32,7 +32,7 @@ def pytest_myhook(xyz): """ import newhooks def pytest_addhooks(pluginmanager): - pluginmanager.addhooks(newhooks) + pluginmanager.add_hookspecs(newhooks) def pytest_myhook(xyz): return xyz + 1 """ @@ -52,7 +52,7 @@ def test_addhooks_nohooks(self, testdir): """ import sys def pytest_addhooks(pluginmanager): - pluginmanager.addhooks(sys) + pluginmanager.add_hookspecs(sys) """ ) res = testdir.runpytest() @@ -141,23 +141,6 @@ def test_hook_proxy(self, testdir): ihook_b = session.gethookproxy(testdir.tmpdir.join("tests")) assert ihook_a is not ihook_b - def test_warn_on_deprecated_addhooks(self, pytestpm): - warnings = [] - - class get_warnings(object): - def pytest_logwarning(self, code, fslocation, message, nodeid): - warnings.append(message) - - class Plugin(object): - def pytest_testhook(): - pass - - pytestpm.register(get_warnings()) - before = list(warnings) - pytestpm.addhooks(Plugin()) - assert len(warnings) == len(before) + 1 - assert "deprecated" in warnings[-1] - def test_default_markers(testdir): result = testdir.runpytest("--markers") @@ -240,11 +223,12 @@ def test_consider_env_fails_to_import(self, monkeypatch, pytestpm): with pytest.raises(ImportError): pytestpm.consider_env() + @pytest.mark.filterwarnings("always") def test_plugin_skip(self, testdir, monkeypatch): p = testdir.makepyfile( skipping1=""" import pytest - pytest.skip("hello") + pytest.skip("hello", allow_module_level=True) """ ) p.copy(p.dirpath("skipping2.py")) diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 0b66acbf210..d14fbd18e8e 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -168,7 +168,7 @@ def pytest_xyz_noarg(): @pytest.mark.parametrize("holder", make_holder()) def test_hookrecorder_basic(holder): pm = PytestPluginManager() - pm.addhooks(holder) + pm.add_hookspecs(holder) rec = HookRecorder(pm) pm.hook.pytest_xyz(arg=123) call = rec.popcall("pytest_xyz") diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 655c89f4c42..3bac9a545d0 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -308,9 +308,9 @@ def test_func(): def test_warning_captured_hook(testdir): testdir.makeconftest( """ - from _pytest.warnings import _issue_config_warning + from _pytest.warnings import _issue_warning_captured def pytest_configure(config): - _issue_config_warning(UserWarning("config warning"), config, stacklevel=2) + _issue_warning_captured(UserWarning("config warning"), config.hook, stacklevel=2) """ ) testdir.makepyfile( From 231863b1337b708b2c95db975f7f2d9f6d0ef086 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 14 Dec 2018 12:56:26 -0200 Subject: [PATCH 71/98] Rename "junit_time" to "junit_duration_report" option Just realized while reading the changelog that "junit_time" is not a very good name, so I decided to open this PR renaming it to "junit_duration_report" which I believe conveys the meaning of the option better --- changelog/4483.feature.rst | 4 ++-- doc/en/usage.rst | 4 ++-- src/_pytest/junitxml.py | 6 ++++-- testing/test_junitxml.py | 4 ++-- 4 files changed, 10 insertions(+), 8 deletions(-) diff --git a/changelog/4483.feature.rst b/changelog/4483.feature.rst index e70db297981..9b301870753 100644 --- a/changelog/4483.feature.rst +++ b/changelog/4483.feature.rst @@ -1,4 +1,4 @@ -Added ini parameter ``junit_time`` to optionally report test call durations, excluding setup and teardown times. +Added ini parameter ``junit_duration_report`` to optionally report test call durations, excluding setup and teardown times. The JUnit XML specification and the default pytest behavior is to include setup and teardown times in the test duration report. You can include just the call durations instead (excluding setup and teardown) by adding this to your ``pytest.ini`` file: @@ -6,4 +6,4 @@ report. You can include just the call durations instead (excluding setup and tea .. code-block:: ini [pytest] - junit_time = call + junit_duration_report = call diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 7c3ef19fbdf..865e007f5e7 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -301,12 +301,12 @@ should report total test execution times, including setup and teardown (`1 `_, `2 `_). It is the default pytest behavior. To report just call durations -instead, configure the ``junit_time`` option like this: +instead, configure the ``junit_duration_report`` option like this: .. code-block:: ini [pytest] - junit_time = call + junit_duration_report = call .. _record_property example: diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py index 696deb6e9bd..672fde5d5df 100644 --- a/src/_pytest/junitxml.py +++ b/src/_pytest/junitxml.py @@ -324,7 +324,9 @@ def pytest_addoption(parser): default="no", ) # choices=['no', 'stdout', 'stderr']) parser.addini( - "junit_time", "Duration time to report: one of total|call", default="total" + "junit_duration_report", + "Duration time to report: one of total|call", + default="total", ) # choices=['total', 'call']) @@ -337,7 +339,7 @@ def pytest_configure(config): config.option.junitprefix, config.getini("junit_suite_name"), config.getini("junit_logging"), - config.getini("junit_time"), + config.getini("junit_duration_report"), ) config.pluginmanager.register(config._xml) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index aafbb8da9df..d3e9542f8a6 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -153,7 +153,7 @@ def test_sleep(): val = tnode["time"] assert round(float(val), 2) >= 0.03 - def test_call_time(self, testdir): + def test_junit_duration_report(self, testdir): testdir.makepyfile( """ import time, pytest @@ -165,7 +165,7 @@ def test_sleep(): time.sleep(0.1) """ ) - result, dom = runandparse(testdir, "-o", "junit_time=call") + result, dom = runandparse(testdir, "-o", "junit_duration_report=call") node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") val = tnode["time"] From 6e1b1abfa7ec8816c289df29c5bc8fdfae5e6bff Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 14 Dec 2018 15:10:08 -0200 Subject: [PATCH 72/98] Remove deprecated record_xml_property Fix #4547 --- changelog/4547.removal.rst | 3 +++ doc/en/deprecations.rst | 44 +++++++++++++++++++------------------- src/_pytest/deprecated.py | 6 ------ src/_pytest/junitxml.py | 10 --------- 4 files changed, 25 insertions(+), 38 deletions(-) create mode 100644 changelog/4547.removal.rst diff --git a/changelog/4547.removal.rst b/changelog/4547.removal.rst new file mode 100644 index 00000000000..a30d5d7bd87 --- /dev/null +++ b/changelog/4547.removal.rst @@ -0,0 +1,3 @@ +The deprecated ``record_xml_property`` fixture has been removed, use the more generic ``record_property`` instead. + +See our `docs `__ for more information. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 37cffb1fbe5..d814b0bee43 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -149,28 +149,6 @@ As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` i :ref:`the documentation ` on tips on how to update your code. -record_xml_property -~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.5 - -The ``record_xml_property`` fixture is now deprecated in favor of the more generic ``record_property``, which -can be used by other consumers (for example ``pytest-html``) to obtain custom information about the test run. - -This is just a matter of renaming the fixture as the API is the same: - -.. code-block:: python - - def test_foo(record_xml_property): - ... - -Change to: - -.. code-block:: python - - def test_foo(record_property): - ... - pytest_plugins in non-top-level conftest files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -353,6 +331,28 @@ more information. This has been documented as deprecated for years, but only now we are actually emitting deprecation warnings. +record_xml_property +~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +The ``record_xml_property`` fixture is now deprecated in favor of the more generic ``record_property``, which +can be used by other consumers (for example ``pytest-html``) to obtain custom information about the test run. + +This is just a matter of renaming the fixture as the API is the same: + +.. code-block:: python + + def test_foo(record_xml_property): + ... + +Change to: + +.. code-block:: python + + def test_foo(record_property): + ... + ``yield`` tests ~~~~~~~~~~~~~~~ diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 67f0d534f1a..1de42924d74 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -82,12 +82,6 @@ "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" ) -RECORD_XML_PROPERTY = RemovedInPytest4Warning( - 'Fixture renamed from "record_xml_property" to "record_property" as user ' - "properties are now available to all reporters.\n" - '"record_xml_property" is now deprecated.' -) - PYTEST_PLUGINS_FROM_NON_TOP_LEVEL_CONFTEST = RemovedInPytest4Warning( "Defining pytest_plugins in a non-top-level conftest is deprecated, " diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py index 696deb6e9bd..73adb179c00 100644 --- a/src/_pytest/junitxml.py +++ b/src/_pytest/junitxml.py @@ -263,16 +263,6 @@ def append_property(name, value): return append_property -@pytest.fixture -def record_xml_property(record_property, request): - """(Deprecated) use record_property.""" - from _pytest import deprecated - - request.node.warn(deprecated.RECORD_XML_PROPERTY) - - return record_property - - @pytest.fixture def record_xml_attribute(request): """Add extra xml attributes to the tag for the calling test. From a7e401656eca66fb1ecee7c5e55ea24598d87f45 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 14 Dec 2018 15:19:48 -0200 Subject: [PATCH 73/98] Remove support to pass strings to pytest.main() Fix #3085 --- changelog/3085.removal.rst | 3 +++ doc/en/deprecations.rst | 45 ++++++++++++++++++---------------- src/_pytest/config/__init__.py | 7 ++---- src/_pytest/deprecated.py | 5 ---- testing/acceptance_test.py | 11 ++++----- testing/deprecated_test.py | 19 -------------- 6 files changed, 34 insertions(+), 56 deletions(-) create mode 100644 changelog/3085.removal.rst diff --git a/changelog/3085.removal.rst b/changelog/3085.removal.rst new file mode 100644 index 00000000000..67ba04c2829 --- /dev/null +++ b/changelog/3085.removal.rst @@ -0,0 +1,3 @@ +Removed support for passing strings to ``pytest.main``. Now, always pass a list of strings instead. + +See our `docs `__ on information on how to update your code. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index d814b0bee43..6ec80cbe8eb 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -193,27 +193,6 @@ To update the code, use ``pytest.param``: -Passing command-line string to ``pytest.main()`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.0 - -Passing a command-line string to ``pytest.main()`` is deprecated: - -.. code-block:: python - - pytest.main("-v -s") - -Pass a list instead: - -.. code-block:: python - - pytest.main(["-v", "-s"]) - - -By passing a string, users expect that pytest will interpret that command-line using the shell rules they are working -on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way. - [pytest] section in setup.cfg files ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -353,6 +332,30 @@ Change to: def test_foo(record_property): ... + +Passing command-line string to ``pytest.main()`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +Passing a command-line string to ``pytest.main()`` is deprecated: + +.. code-block:: python + + pytest.main("-v -s") + +Pass a list instead: + +.. code-block:: python + + pytest.main(["-v", "-s"]) + + +By passing a string, users expect that pytest will interpret that command-line using the shell rules they are working +on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way. + + + ``yield`` tests ~~~~~~~~~~~~~~~ diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 3ed82694b92..d68b1249ecd 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -173,12 +173,9 @@ def _prepareconfig(args=None, plugins=None): elif isinstance(args, py.path.local): args = [str(args)] elif not isinstance(args, (tuple, list)): - if not isinstance(args, str): - raise ValueError("not a string or argument list: %r" % (args,)) - args = shlex.split(args, posix=sys.platform != "win32") - from _pytest import deprecated + msg = "`args` parameter expected to be a list or tuple of strings, got: {!r} (type: {})" + raise TypeError(msg.format(args, type(args))) - warning = deprecated.MAIN_STR_ARGS config = get_config() pluginmanager = config.pluginmanager try: diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 1de42924d74..8b2fbf3f963 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -17,11 +17,6 @@ from _pytest.warning_types import UnformattedWarning -MAIN_STR_ARGS = RemovedInPytest4Warning( - "passing a string to pytest.main() is deprecated, " - "pass a list of arguments instead." -) - YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 0b7af5338d6..7276445ac02 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -559,12 +559,11 @@ def test_python_pytest_package(self, testdir): def test_equivalence_pytest_pytest(self): assert pytest.main == py.test.cmdline.main - def test_invoke_with_string(self, capsys): - retcode = pytest.main("-h") - assert not retcode - out, err = capsys.readouterr() - assert "--help" in out - pytest.raises(ValueError, lambda: pytest.main(0)) + def test_invoke_with_invalid_type(self, capsys): + with pytest.raises( + TypeError, match="expected to be a list or tuple of strings, got: '-h'" + ): + pytest.main("-h") def test_invoke_with_path(self, tmpdir, capsys): retcode = pytest.main(tmpdir) diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index b971a9d2e7f..5f5b7803244 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -40,25 +40,6 @@ def test_pytest_custom_cfg_deprecated(testdir): ) -def test_str_args_deprecated(tmpdir): - """Deprecate passing strings to pytest.main(). Scheduled for removal in pytest-4.0.""" - from _pytest.main import EXIT_NOTESTSCOLLECTED - - warnings = [] - - class Collect(object): - def pytest_warning_captured(self, warning_message): - warnings.append(str(warning_message.message)) - - ret = pytest.main("%s -x" % tmpdir, plugins=[Collect()]) - msg = ( - "passing a string to pytest.main() is deprecated, " - "pass a list of arguments instead." - ) - assert msg in warnings - assert ret == EXIT_NOTESTSCOLLECTED - - def test_getfuncargvalue_is_deprecated(request): pytest.deprecated_call(request.getfuncargvalue, "tmpdir") From 1499778d5ea0f02f690cd50462c4061dad8cb9d7 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Fri, 14 Dec 2018 17:09:27 -0200 Subject: [PATCH 74/98] Errors in parametrize id functions now propagate the error instead of issuing a warning Fix #2169 --- changelog/2169.removal.rst | 1 + src/_pytest/python.py | 11 ++++------- testing/python/metafunc.py | 10 +++------- 3 files changed, 8 insertions(+), 14 deletions(-) create mode 100644 changelog/2169.removal.rst diff --git a/changelog/2169.removal.rst b/changelog/2169.removal.rst new file mode 100644 index 00000000000..272ddbdfbac --- /dev/null +++ b/changelog/2169.removal.rst @@ -0,0 +1 @@ +``pytest.mark.parametrize``: in previous versions, errors raised by id functions were suppressed and changed into warnings. Now the exceptions are propagated, along with a pytest message informing the node, parameter value and index where the exception occurred. diff --git a/src/_pytest/python.py b/src/_pytest/python.py index a872a86ede5..5a77b09adcb 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -45,7 +45,6 @@ from _pytest.outcomes import fail from _pytest.pathlib import parts from _pytest.warning_types import PytestWarning -from _pytest.warning_types import RemovedInPytest4Warning def pyobj_property(name): @@ -1059,13 +1058,11 @@ def _idval(val, argname, idx, idfn, item, config): s = idfn(val) except Exception as e: # See issue https://github.com/pytest-dev/pytest/issues/2169 - msg = ( - "While trying to determine id of parameter {} at position " - "{} the following exception was raised:\n".format(argname, idx) - ) + msg = "{}: error raised while trying to determine id of parameter '{}' at position {}\n" + msg = msg.format(item.nodeid, argname, idx) + # we only append the exception type and message because on Python 2 reraise does nothing msg += " {}: {}\n".format(type(e).__name__, e) - msg += "This warning will be an error error in pytest-4.0." - item.warn(RemovedInPytest4Warning(msg)) + six.raise_from(ValueError(msg), e) if s: return ascii_escaped(s) diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 7f9cdb5cc37..0b05a7c5e2b 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -393,7 +393,6 @@ def ids(val): ) assert result == ["a-a0", "a-a1", "a-a2"] - @pytest.mark.filterwarnings("default") def test_parametrize_ids_exception(self, testdir): """ :param testdir: the instance of Testdir class, a temporary @@ -411,14 +410,11 @@ def test_foo(arg): pass """ ) - result = testdir.runpytest("--collect-only", SHOW_PYTEST_WARNINGS_ARG) + result = testdir.runpytest() result.stdout.fnmatch_lines( [ - "", - " ", - " ", - "*test_parametrize_ids_exception.py:6: *parameter arg at position 0*", - "*test_parametrize_ids_exception.py:6: *parameter arg at position 1*", + "*test_foo: error raised while trying to determine id of parameter 'arg' at position 0", + "*Exception: bad ids", ] ) From 0115766df3fe660ee663a96b114e08a6a8b039b3 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Tue, 18 Dec 2018 21:05:48 -0200 Subject: [PATCH 75/98] Calling fixtures directly is now an error instead of a warning Fix #4545 --- changelog/4545.removal.rst | 3 ++ doc/en/deprecations.rst | 92 +++++++++++++++++++++----------------- src/_pytest/deprecated.py | 10 ++--- src/_pytest/fixtures.py | 33 ++++---------- testing/deprecated_test.py | 11 ----- testing/python/fixture.py | 11 +++++ 6 files changed, 79 insertions(+), 81 deletions(-) create mode 100644 changelog/4545.removal.rst diff --git a/changelog/4545.removal.rst b/changelog/4545.removal.rst new file mode 100644 index 00000000000..865d2fc1277 --- /dev/null +++ b/changelog/4545.removal.rst @@ -0,0 +1,3 @@ +Calling fixtures directly is now always an error instead of a warning. + +See our `docs `__ on information on how to update your code. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 8cc259cb1fc..b37be1b7f63 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -72,46 +72,6 @@ Becomes: - - -Calling fixtures directly -~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.7 - -Calling a fixture function directly, as opposed to request them in a test function, is deprecated. - -For example: - -.. code-block:: python - - @pytest.fixture - def cell(): - return ... - - - @pytest.fixture - def full_cell(): - cell = cell() - cell.make_full() - return cell - -This is a great source of confusion to new users, which will often call the fixture functions and request them from test functions interchangeably, which breaks the fixture resolution model. - -In those cases just request the function directly in the dependent fixture: - -.. code-block:: python - - @pytest.fixture - def cell(): - return ... - - - @pytest.fixture - def full_cell(cell): - cell.make_full() - return cell - ``Node.get_marker`` ~~~~~~~~~~~~~~~~~~~ @@ -353,6 +313,58 @@ By passing a string, users expect that pytest will interpret that command-line u on (for example ``bash`` or ``Powershell``), but this is very hard/impossible to do in a portable way. +Calling fixtures directly +~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +Calling a fixture function directly, as opposed to request them in a test function, is deprecated. + +For example: + +.. code-block:: python + + @pytest.fixture + def cell(): + return ... + + + @pytest.fixture + def full_cell(): + cell = cell() + cell.make_full() + return cell + +This is a great source of confusion to new users, which will often call the fixture functions and request them from test functions interchangeably, which breaks the fixture resolution model. + +In those cases just request the function directly in the dependent fixture: + +.. code-block:: python + + @pytest.fixture + def cell(): + return ... + + + @pytest.fixture + def full_cell(cell): + cell.make_full() + return cell + +Alternatively if the fixture function is called multiple times inside a test (making it hard to apply the above pattern) or +if you would like to make minimal changes to the code, you can create a fixture which calls the original function together +with the ``name`` parameter: + +.. code-block:: python + + def cell(): + return ... + + + @pytest.fixture(name="cell") + def cell_fixture(): + return cell() + ``yield`` tests ~~~~~~~~~~~~~~~ diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 30020a9ab60..af1b5c792fd 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -20,11 +20,11 @@ YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" -FIXTURE_FUNCTION_CALL = UnformattedWarning( - RemovedInPytest4Warning, - 'Fixture "{name}" called directly. Fixtures are not meant to be called directly, ' - "are created automatically when test functions request them as parameters. " - "See https://docs.pytest.org/en/latest/fixture.html for more information.", +FIXTURE_FUNCTION_CALL = ( + 'Fixture "{name}" called directly. Fixtures are not meant to be called directly,\n' + "but are created automatically when test functions request them as parameters.\n" + "See https://docs.pytest.org/en/latest/fixture.html for more information about fixtures, and\n" + "https://docs.pytest.org/en/latest/deprecations.html#calling-fixtures-directly about how to update your code." ) FIXTURE_NAMED_REQUEST = PytestDeprecationWarning( diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 0136dea09ac..1da7a6c4893 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -942,34 +942,17 @@ def _ensure_immutable_ids(ids): return tuple(ids) -def wrap_function_to_warning_if_called_directly(function, fixture_marker): - """Wrap the given fixture function so we can issue warnings about it being called directly, instead of - used as an argument in a test function. +def wrap_function_to_error_out_if_called_directly(function, fixture_marker): + """Wrap the given fixture function so we can raise an error about it being called directly, + instead of used as an argument in a test function. """ - is_yield_function = is_generator(function) - warning = FIXTURE_FUNCTION_CALL.format( + message = FIXTURE_FUNCTION_CALL.format( name=fixture_marker.name or function.__name__ ) - if is_yield_function: - - @functools.wraps(function) - def result(*args, **kwargs): - __tracebackhide__ = True - warnings.warn(warning, stacklevel=3) - for x in function(*args, **kwargs): - yield x - - else: - - @functools.wraps(function) - def result(*args, **kwargs): - __tracebackhide__ = True - warnings.warn(warning, stacklevel=3) - return function(*args, **kwargs) - - if six.PY2: - result.__wrapped__ = function + @six.wraps(function) + def result(*args, **kwargs): + fail(message, pytrace=False) # keep reference to the original function in our own custom attribute so we don't unwrap # further than this point and lose useful wrappings like @mock.patch (#3774) @@ -995,7 +978,7 @@ def __call__(self, function): "fixture is being applied more than once to the same function" ) - function = wrap_function_to_warning_if_called_directly(function, self) + function = wrap_function_to_error_out_if_called_directly(function, self) name = self.name or function.__name__ if name == "request": diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index 56828f7c022..d392ac7d9f1 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -223,17 +223,6 @@ def test_func(): assert msg not in res.stdout.str() -def test_call_fixture_function_deprecated(): - """Check if a warning is raised if a fixture function is called directly (#3661)""" - - @pytest.fixture - def fix(): - return 1 - - with pytest.deprecated_call(): - assert fix() == 1 - - def test_fixture_named_request(testdir): testdir.copy_example() result = testdir.runpytest() diff --git a/testing/python/fixture.py b/testing/python/fixture.py index a4ef5af879b..b6692ac9b6c 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -3850,3 +3850,14 @@ def test_2(fix): ) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) + + +def test_call_fixture_function_error(): + """Check if an error is raised if a fixture function is called directly (#4545)""" + + @pytest.fixture + def fix(): + return 1 + + with pytest.raises(pytest.fail.Exception): + assert fix() == 1 From 197fd69ddcc4acb4f9e598f3e857abee9e02cba1 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 19 Dec 2018 17:11:20 -0200 Subject: [PATCH 76/98] Use mocking to make test_junit_duration_report deterministic Fix #4563 --- testing/test_junitxml.py | 35 ++++++++++++++++++++++++----------- 1 file changed, 24 insertions(+), 11 deletions(-) diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index cbadbf064c2..59c11fa008a 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -153,23 +153,36 @@ def test_sleep(): val = tnode["time"] assert round(float(val), 2) >= 0.03 - def test_junit_duration_report(self, testdir): + @pytest.mark.parametrize("duration_report", ["call", "total"]) + def test_junit_duration_report(self, testdir, monkeypatch, duration_report): + + # mock LogXML.node_reporter so it always sets a known duration to each test report object + original_node_reporter = LogXML.node_reporter + + def node_reporter_wrapper(s, report): + report.duration = 1.0 + reporter = original_node_reporter(s, report) + return reporter + + monkeypatch.setattr(LogXML, "node_reporter", node_reporter_wrapper) + testdir.makepyfile( """ - import time, pytest - def setup_module(): - time.sleep(0.1) - def teardown_module(): - time.sleep(0.1) - def test_sleep(): - time.sleep(0.1) + def test_foo(): + pass """ ) - result, dom = runandparse(testdir, "-o", "junit_duration_report=call") + result, dom = runandparse( + testdir, "-o", "junit_duration_report={}".format(duration_report) + ) node = dom.find_first_by_tag("testsuite") tnode = node.find_first_by_tag("testcase") - val = tnode["time"] - assert 0.1 <= round(float(val), 2) < 0.2 + val = float(tnode["time"]) + if duration_report == "total": + assert val == 3.0 + else: + assert duration_report == "call" + assert val == 1.0 def test_setup_error(self, testdir): testdir.makepyfile( From 91384193794dcf8ae30778767546a2dad7bad845 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 19 Dec 2018 17:43:17 -0200 Subject: [PATCH 77/98] Remove support for '[pytest]' section in setup.cfg file Fix #3086 --- changelog/3086.removal.rst | 4 ++++ doc/en/deprecations.rst | 19 ++++++++++--------- src/_pytest/config/findpaths.py | 25 ++++++++----------------- src/_pytest/deprecated.py | 6 +----- testing/deprecated_test.py | 18 ++++++------------ 5 files changed, 29 insertions(+), 43 deletions(-) create mode 100644 changelog/3086.removal.rst diff --git a/changelog/3086.removal.rst b/changelog/3086.removal.rst new file mode 100644 index 00000000000..3974aa5004f --- /dev/null +++ b/changelog/3086.removal.rst @@ -0,0 +1,4 @@ +``[pytest]`` section in **setup.cfg** files is not longer supported, use ``[tool:pytest]`` instead. ``setup.cfg`` files +are meant for use with ``distutils``, and a section named ``pytest`` has notoriously been a source of conflicts and bugs. + +Note that for **pytest.ini** and **tox.ini** files the section remains ``[pytest]``. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index b37be1b7f63..781d05b774d 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -125,15 +125,6 @@ To update the code, use ``pytest.param``: - -[pytest] section in setup.cfg files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.0 - -``[pytest]`` sections in ``setup.cfg`` files should now be named ``[tool:pytest]`` -to avoid conflicts with other distutils commands. - Result log (``--result-log``) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -185,6 +176,16 @@ Switch over to the ``@pytest.fixture`` decorator: return SomeData() + +[pytest] section in setup.cfg files +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +``[pytest]`` sections in ``setup.cfg`` files should now be named ``[tool:pytest]`` +to avoid conflicts with other distutils commands. + + Metafunc.addcall ~~~~~~~~~~~~~~~~ diff --git a/src/_pytest/config/findpaths.py b/src/_pytest/config/findpaths.py index eecc92606d4..a0f16134d1e 100644 --- a/src/_pytest/config/findpaths.py +++ b/src/_pytest/config/findpaths.py @@ -3,6 +3,7 @@ import py from .exceptions import UsageError +from _pytest.outcomes import fail def exists(path, ignore=EnvironmentError): @@ -34,15 +35,10 @@ def getcfg(args, config=None): iniconfig = py.iniconfig.IniConfig(p) if "pytest" in iniconfig.sections: if inibasename == "setup.cfg" and config is not None: - from _pytest.warnings import _issue_warning_captured - from _pytest.warning_types import RemovedInPytest4Warning - - _issue_warning_captured( - RemovedInPytest4Warning( - CFG_PYTEST_SECTION.format(filename=inibasename) - ), - hook=config.hook, - stacklevel=2, + + fail( + CFG_PYTEST_SECTION.format(filename=inibasename), + pytrace=False, ) return base, p, iniconfig["pytest"] if ( @@ -112,14 +108,9 @@ def determine_setup(inifile, args, rootdir_cmd_arg=None, config=None): inicfg = iniconfig[section] if is_cfg_file and section == "pytest" and config is not None: from _pytest.deprecated import CFG_PYTEST_SECTION - from _pytest.warnings import _issue_warning_captured - - # TODO: [pytest] section in *.cfg files is deprecated. Need refactoring once - # the deprecation expires. - _issue_warning_captured( - CFG_PYTEST_SECTION.format(filename=str(inifile)), - config.hook, - stacklevel=2, + + fail( + CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False ) break except KeyError: diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index af1b5c792fd..467c735847f 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -14,7 +14,6 @@ from _pytest.warning_types import PytestDeprecationWarning from _pytest.warning_types import RemovedInPytest4Warning -from _pytest.warning_types import UnformattedWarning YIELD_TESTS = "yield tests were removed in pytest 4.0 - {name} will be ignored" @@ -31,10 +30,7 @@ "'request' is a reserved name for fixtures and will raise an error in future versions" ) -CFG_PYTEST_SECTION = UnformattedWarning( - RemovedInPytest4Warning, - "[pytest] section in {filename} files is deprecated, use [tool:pytest] instead.", -) +CFG_PYTEST_SECTION = "[pytest] section in {filename} files is no longer supported, change to [tool:pytest] instead." GETFUNCARGVALUE = RemovedInPytest4Warning( "getfuncargvalue is deprecated, use getfixturevalue" diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index d392ac7d9f1..bb2c17c45e9 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -10,8 +10,7 @@ pytestmark = pytest.mark.pytester_example_path("deprecated") -@pytest.mark.filterwarnings("default") -def test_pytest_setup_cfg_deprecated(testdir): +def test_pytest_setup_cfg_unsupported(testdir): testdir.makefile( ".cfg", setup=""" @@ -19,14 +18,11 @@ def test_pytest_setup_cfg_deprecated(testdir): addopts = --verbose """, ) - result = testdir.runpytest() - result.stdout.fnmatch_lines( - ["*pytest*section in setup.cfg files is deprecated*use*tool:pytest*instead*"] - ) + with pytest.raises(pytest.fail.Exception): + testdir.runpytest() -@pytest.mark.filterwarnings("default") -def test_pytest_custom_cfg_deprecated(testdir): +def test_pytest_custom_cfg_unsupported(testdir): testdir.makefile( ".cfg", custom=""" @@ -34,10 +30,8 @@ def test_pytest_custom_cfg_deprecated(testdir): addopts = --verbose """, ) - result = testdir.runpytest("-c", "custom.cfg") - result.stdout.fnmatch_lines( - ["*pytest*section in custom.cfg files is deprecated*use*tool:pytest*instead*"] - ) + with pytest.raises(pytest.fail.Exception): + testdir.runpytest("-c", "custom.cfg") def test_getfuncargvalue_is_deprecated(request): From a93f41233a8f4edadb33a055396247ef1ede210a Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 19 Dec 2018 18:09:47 -0200 Subject: [PATCH 78/98] Raise an error if pytest_plugins is defined in a non-top-level conftest.py file Fix #4548 --- changelog/4548.removal.rst | 3 +++ doc/en/deprecations.rst | 20 ++++++++++---------- src/_pytest/config/__init__.py | 11 ++++++----- src/_pytest/deprecated.py | 10 +++++++--- testing/deprecated_test.py | 22 ++++++++-------------- 5 files changed, 34 insertions(+), 32 deletions(-) create mode 100644 changelog/4548.removal.rst diff --git a/changelog/4548.removal.rst b/changelog/4548.removal.rst new file mode 100644 index 00000000000..bd47b1d5172 --- /dev/null +++ b/changelog/4548.removal.rst @@ -0,0 +1,3 @@ +An error is now raised if the ``pytest_plugins`` variable is defined in a non-top-level ``conftest.py`` file (i.e., not residing in the ``rootdir``). + +See our `docs `__ for more information. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 781d05b774d..ca95bab389b 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -81,16 +81,6 @@ As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` i :ref:`the documentation ` on tips on how to update your code. -pytest_plugins in non-top-level conftest files -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.5 - -Defining ``pytest_plugins`` is now deprecated in non-top-level conftest.py -files because they will activate referenced plugins *globally*, which is surprising because for all other pytest -features ``conftest.py`` files are only *active* for tests at or below it. - - marks in ``pytest.mark.parametrize`` ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -242,6 +232,16 @@ You can consult `funcarg comparison section in the docs Date: Thu, 20 Dec 2018 16:13:43 -0200 Subject: [PATCH 79/98] Remove support for applying marks to values in parametrize Fix #3082 --- changelog/3082.removal.rst | 3 ++ doc/en/deprecations.rst | 88 +++++++++++++++++++++------------- src/_pytest/deprecated.py | 6 --- src/_pytest/mark/structures.py | 54 ++++++++------------- testing/python/collect.py | 42 +++++++--------- testing/python/metafunc.py | 21 ++++---- testing/test_mark.py | 37 ++------------ 7 files changed, 110 insertions(+), 141 deletions(-) create mode 100644 changelog/3082.removal.rst diff --git a/changelog/3082.removal.rst b/changelog/3082.removal.rst new file mode 100644 index 00000000000..750f097bc70 --- /dev/null +++ b/changelog/3082.removal.rst @@ -0,0 +1,3 @@ +Removed support for applying marks directly to values in ``@pytest.mark.parametrize``. Use ``pytest.param`` instead. + +See our `docs `__ on information on how to update your code. diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index ca95bab389b..dc716951b0d 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -7,6 +7,11 @@ This page lists all pytest features that are currently deprecated or have been r The objective is to give users a clear rationale why a certain feature has been removed, and what alternatives should be used instead. +.. contents:: + :depth: 3 + :local: + + Deprecated Features ------------------- @@ -81,40 +86,6 @@ As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` i :ref:`the documentation ` on tips on how to update your code. -marks in ``pytest.mark.parametrize`` -~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.2 - -Applying marks to values of a ``pytest.mark.parametrize`` call is now deprecated. For example: - -.. code-block:: python - - @pytest.mark.parametrize( - "a, b", [(3, 9), pytest.mark.xfail(reason="flaky")(6, 36), (10, 100)] - ) - def test_foo(a, b): - ... - -This code applies the ``pytest.mark.xfail(reason="flaky")`` mark to the ``(6, 36)`` value of the above parametrization -call. - -This was considered hard to read and understand, and also its implementation presented problems to the code preventing -further internal improvements in the marks architecture. - -To update the code, use ``pytest.param``: - -.. code-block:: python - - @pytest.mark.parametrize( - "a, b", - [(3, 9), pytest.param((6, 36), marks=pytest.mark.xfail(reason="flaky")), (10, 100)], - ) - def test_foo(a, b): - ... - - - Result log (``--result-log``) ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ @@ -145,6 +116,55 @@ collection. This issue should affect only advanced plugins who create new collection types, so if you see this warning message please contact the authors so they can change the code. + +marks in ``pytest.mark.parametrize`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +*Removed in version 4.0.* + +Applying marks to values of a ``pytest.mark.parametrize`` call is now deprecated. For example: + +.. code-block:: python + + @pytest.mark.parametrize( + "a, b", + [ + (3, 9), + pytest.mark.xfail(reason="flaky")(6, 36), + (10, 100), + (20, 200), + (40, 400), + (50, 500), + ], + ) + def test_foo(a, b): + ... + +This code applies the ``pytest.mark.xfail(reason="flaky")`` mark to the ``(6, 36)`` value of the above parametrization +call. + +This was considered hard to read and understand, and also its implementation presented problems to the code preventing +further internal improvements in the marks architecture. + +To update the code, use ``pytest.param``: + +.. code-block:: python + + @pytest.mark.parametrize( + "a, b", + [ + (3, 9), + pytest.param(6, 36, marks=pytest.mark.xfail(reason="flaky")), + (10, 100), + (20, 200), + (40, 400), + (50, 500), + ], + ) + def test_foo(a, b): + ... + + ``pytest_funcarg__`` prefix ~~~~~~~~~~~~~~~~~~~~~~~~~~~ diff --git a/src/_pytest/deprecated.py b/src/_pytest/deprecated.py index 02b26ba7713..494a453b666 100644 --- a/src/_pytest/deprecated.py +++ b/src/_pytest/deprecated.py @@ -54,12 +54,6 @@ "Docs: https://docs.pytest.org/en/latest/mark.html#updating-code" ) -MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning( - "Applying marks directly to parameters is deprecated," - " please use pytest.param(..., marks=...) instead.\n" - "For more details, see: https://docs.pytest.org/en/latest/parametrize.html" -) - RAISES_EXEC = PytestDeprecationWarning( "raises(..., 'code(as_a_string)') is deprecated, use the context manager form or use `exec()` directly\n\n" "See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec" diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py index 14a684745a8..18fb6fa6db2 100644 --- a/src/_pytest/mark/structures.py +++ b/src/_pytest/mark/structures.py @@ -13,7 +13,6 @@ from ..compat import MappingMixin from ..compat import NOTSET from ..deprecated import MARK_INFO_ATTRIBUTE -from ..deprecated import MARK_PARAMETERSET_UNPACKING from _pytest.outcomes import fail @@ -82,39 +81,23 @@ def param(cls, *values, **kw): return cls(values, marks, id_) @classmethod - def extract_from(cls, parameterset, belonging_definition, legacy_force_tuple=False): + def extract_from(cls, parameterset, force_tuple=False): """ :param parameterset: a legacy style parameterset that may or may not be a tuple, and may or may not be wrapped into a mess of mark objects - :param legacy_force_tuple: + :param force_tuple: enforce tuple wrapping so single argument tuple values don't get decomposed and break tests - - :param belonging_definition: the item that we will be extracting the parameters from. """ if isinstance(parameterset, cls): return parameterset - if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple: + if force_tuple: return cls.param(parameterset) - - newmarks = [] - argval = parameterset - while isinstance(argval, MarkDecorator): - newmarks.append( - MarkDecorator(Mark(argval.markname, argval.args[:-1], argval.kwargs)) - ) - argval = argval.args[-1] - assert not isinstance(argval, ParameterSet) - if legacy_force_tuple: - argval = (argval,) - - if newmarks and belonging_definition is not None: - belonging_definition.warn(MARK_PARAMETERSET_UNPACKING) - - return cls(argval, marks=newmarks, id=None) + else: + return cls(parameterset, marks=[], id=None) @classmethod def _for_parametrize(cls, argnames, argvalues, func, config, function_definition): @@ -124,12 +107,7 @@ def _for_parametrize(cls, argnames, argvalues, func, config, function_definition else: force_tuple = False parameters = [ - ParameterSet.extract_from( - x, - legacy_force_tuple=force_tuple, - belonging_definition=function_definition, - ) - for x in argvalues + ParameterSet.extract_from(x, force_tuple=force_tuple) for x in argvalues ] del argvalues @@ -137,11 +115,21 @@ def _for_parametrize(cls, argnames, argvalues, func, config, function_definition # check all parameter sets have the correct number of values for param in parameters: if len(param.values) != len(argnames): - raise ValueError( - 'In "parametrize" the number of values ({}) must be ' - "equal to the number of names ({})".format( - param.values, argnames - ) + msg = ( + '{nodeid}: in "parametrize" the number of names ({names_len}):\n' + " {names}\n" + "must be equal to the number of values ({values_len}):\n" + " {values}" + ) + fail( + msg.format( + nodeid=function_definition.nodeid, + values=param.values, + names=argnames, + names_len=len(argnames), + values_len=len(param.values), + ), + pytrace=False, ) else: # empty parameter set (likely computed at runtime): create a single diff --git a/testing/python/collect.py b/testing/python/collect.py index 53b3bc18b04..3147ee9e26f 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -244,13 +244,6 @@ def prop(self): "ignore:usage of Generator.Function is deprecated, please use pytest.Function instead" ) class TestFunction(object): - @pytest.fixture - def ignore_parametrized_marks_args(self): - """Provides arguments to pytester.runpytest() to ignore the warning about marks being applied directly - to parameters. - """ - return ("-W", "ignore:Applying marks directly to parameters") - def test_getmodulecollector(self, testdir): item = testdir.getitem("def test_func(): pass") modcol = item.getparent(pytest.Module) @@ -472,7 +465,6 @@ def test_it(fix1): rec = testdir.inline_run() rec.assertoutcome(passed=1) - @pytest.mark.filterwarnings("ignore:Applying marks directly to parameters") def test_parametrize_with_mark(self, testdir): items = testdir.getitems( """ @@ -480,7 +472,7 @@ def test_parametrize_with_mark(self, testdir): @pytest.mark.foo @pytest.mark.parametrize('arg', [ 1, - pytest.mark.bar(pytest.mark.baz(2)) + pytest.param(2, marks=[pytest.mark.baz, pytest.mark.bar]) ]) def test_function(arg): pass @@ -558,37 +550,37 @@ def test2(self, x, y): assert colitems[2].name == "test2[a-c]" assert colitems[3].name == "test2[b-c]" - def test_parametrize_skipif(self, testdir, ignore_parametrized_marks_args): + def test_parametrize_skipif(self, testdir): testdir.makepyfile( """ import pytest m = pytest.mark.skipif('True') - @pytest.mark.parametrize('x', [0, 1, m(2)]) + @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)]) def test_skip_if(x): assert x < 2 """ ) - result = testdir.runpytest(*ignore_parametrized_marks_args) + result = testdir.runpytest() result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *") - def test_parametrize_skip(self, testdir, ignore_parametrized_marks_args): + def test_parametrize_skip(self, testdir): testdir.makepyfile( """ import pytest m = pytest.mark.skip('') - @pytest.mark.parametrize('x', [0, 1, m(2)]) + @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)]) def test_skip(x): assert x < 2 """ ) - result = testdir.runpytest(*ignore_parametrized_marks_args) + result = testdir.runpytest() result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *") - def test_parametrize_skipif_no_skip(self, testdir, ignore_parametrized_marks_args): + def test_parametrize_skipif_no_skip(self, testdir): testdir.makepyfile( """ import pytest @@ -600,40 +592,40 @@ def test_skipif_no_skip(x): assert x < 2 """ ) - result = testdir.runpytest(*ignore_parametrized_marks_args) + result = testdir.runpytest() result.stdout.fnmatch_lines("* 1 failed, 2 passed in *") - def test_parametrize_xfail(self, testdir, ignore_parametrized_marks_args): + def test_parametrize_xfail(self, testdir): testdir.makepyfile( """ import pytest m = pytest.mark.xfail('True') - @pytest.mark.parametrize('x', [0, 1, m(2)]) + @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)]) def test_xfail(x): assert x < 2 """ ) - result = testdir.runpytest(*ignore_parametrized_marks_args) + result = testdir.runpytest() result.stdout.fnmatch_lines("* 2 passed, 1 xfailed in *") - def test_parametrize_passed(self, testdir, ignore_parametrized_marks_args): + def test_parametrize_passed(self, testdir): testdir.makepyfile( """ import pytest m = pytest.mark.xfail('True') - @pytest.mark.parametrize('x', [0, 1, m(2)]) + @pytest.mark.parametrize('x', [0, 1, pytest.param(2, marks=m)]) def test_xfail(x): pass """ ) - result = testdir.runpytest(*ignore_parametrized_marks_args) + result = testdir.runpytest() result.stdout.fnmatch_lines("* 2 passed, 1 xpassed in *") - def test_parametrize_xfail_passed(self, testdir, ignore_parametrized_marks_args): + def test_parametrize_xfail_passed(self, testdir): testdir.makepyfile( """ import pytest @@ -645,7 +637,7 @@ def test_passed(x): pass """ ) - result = testdir.runpytest(*ignore_parametrized_marks_args) + result = testdir.runpytest() result.stdout.fnmatch_lines("* 3 passed in *") def test_function_original_name(self, testdir): diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 0b05a7c5e2b..54a6ecb91c2 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -1372,7 +1372,6 @@ def pytest_generate_tests(metafunc): assert output.count("preparing foo-3") == 1 -@pytest.mark.filterwarnings("ignore:Applying marks directly to parameters") @pytest.mark.issue308 class TestMarkersWithParametrization(object): def test_simple_mark(self, testdir): @@ -1382,7 +1381,7 @@ def test_simple_mark(self, testdir): @pytest.mark.foo @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.bar((1, 3)), + pytest.param(1, 3, marks=pytest.mark.bar), (2, 3), ]) def test_increment(n, expected): @@ -1402,7 +1401,7 @@ def test_select_based_on_mark(self, testdir): @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.foo((2, 3)), + pytest.param(2, 3, marks=pytest.mark.foo), (3, 4), ]) def test_increment(n, expected): @@ -1442,7 +1441,7 @@ def test_simple_xfail(self, testdir): @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail((1, 3)), + pytest.param(1, 3, marks=pytest.mark.xfail), (2, 3), ]) def test_increment(n, expected): @@ -1459,7 +1458,7 @@ def test_simple_xfail_single_argname(self, testdir): @pytest.mark.parametrize("n", [ 2, - pytest.mark.xfail(3), + pytest.param(3, marks=pytest.mark.xfail), 4, ]) def test_isEven(n): @@ -1475,7 +1474,7 @@ def test_xfail_with_arg(self, testdir): @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail("True")((1, 3)), + pytest.param(1, 3, marks=pytest.mark.xfail("True")), (2, 3), ]) def test_increment(n, expected): @@ -1491,7 +1490,7 @@ def test_xfail_with_kwarg(self, testdir): @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail(reason="some bug")((1, 3)), + pytest.param(1, 3, marks=pytest.mark.xfail(reason="some bug")), (2, 3), ]) def test_increment(n, expected): @@ -1507,7 +1506,7 @@ def test_xfail_with_arg_and_kwarg(self, testdir): @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail("True", reason="some bug")((1, 3)), + pytest.param(1, 3, marks=pytest.mark.xfail("True", reason="some bug")), (2, 3), ]) def test_increment(n, expected): @@ -1522,9 +1521,11 @@ def test_xfail_passing_is_xpass(self, testdir, strict): s = """ import pytest + m = pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict}) + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})((2, 3)), + pytest.param(2, 3, marks=m), (3, 4), ]) def test_increment(n, expected): @@ -1548,7 +1549,7 @@ def pytest_generate_tests(metafunc): failingTestData = [(1, 3), (2, 2)] - testData = passingTestData + [pytest.mark.xfail(d) + testData = passingTestData + [pytest.param(*d, marks=pytest.mark.xfail) for d in failingTestData] metafunc.parametrize(("n", "expected"), testData) diff --git a/testing/test_mark.py b/testing/test_mark.py index 8bf715995e1..4888b1c557c 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -10,7 +10,6 @@ import pytest from _pytest.mark import EMPTY_PARAMETERSET_OPTION from _pytest.mark import MarkGenerator as Mark -from _pytest.mark import ParameterSet from _pytest.mark import transfer_markers from _pytest.nodes import Collector from _pytest.nodes import Node @@ -477,8 +476,10 @@ def test_func(foo, bar): result = testdir.runpytest(py_file) result.stdout.fnmatch_lines( [ - 'E ValueError: In "parametrize" the number of values ((1, 2, 3)) ' - "must be equal to the number of names (['foo', 'bar'])" + 'test_parametrized_collect_with_wrong_args.py::test_func: in "parametrize" the number of names (2):', + " ['foo', 'bar']", + "must be equal to the number of values (3):", + " (1, 2, 3)", ] ) @@ -1042,36 +1043,6 @@ def assert_test_is_not_selected(keyword): assert_test_is_not_selected("()") -@pytest.mark.parametrize( - "argval, expected", - [ - ( - pytest.mark.skip()((1, 2)), - ParameterSet(values=(1, 2), marks=[pytest.mark.skip], id=None), - ), - ( - pytest.mark.xfail(pytest.mark.skip()((1, 2))), - ParameterSet( - values=(1, 2), marks=[pytest.mark.xfail, pytest.mark.skip], id=None - ), - ), - ], -) -@pytest.mark.filterwarnings("default") -def test_parameterset_extractfrom(argval, expected): - from _pytest.deprecated import MARK_PARAMETERSET_UNPACKING - - warn_called = [] - - class DummyItem: - def warn(self, warning): - warn_called.append(warning) - - extracted = ParameterSet.extract_from(argval, belonging_definition=DummyItem()) - assert extracted == expected - assert warn_called == [MARK_PARAMETERSET_UNPACKING] - - def test_legacy_transfer(): class FakeModule(object): pytestmark = [] From 9f9f6ee48beba8bbf0911e458590aa67b45bd867 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Wed, 19 Dec 2018 16:00:59 +0100 Subject: [PATCH 80/98] remove most of markertransfer keywords are still a big issue --- src/_pytest/config/__init__.py | 5 +- src/_pytest/fixtures.py | 25 +-- src/_pytest/mark/__init__.py | 11 +- src/_pytest/mark/structures.py | 101 +----------- src/_pytest/nodes.py | 15 -- src/_pytest/python.py | 20 ++- src/_pytest/unittest.py | 4 - testing/test_mark.py | 292 ++------------------------------- 8 files changed, 52 insertions(+), 421 deletions(-) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index f20bb036e3b..d9ca76d05bf 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -270,8 +270,11 @@ def parse_hookimpl_opts(self, plugin, name): opts = {} if opts is not None: + # TODO: DeprecationWarning, people should use hookimpl + known_marks = {m.name for m in getattr(method, "pytestmark", [])} for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): - opts.setdefault(name, hasattr(method, name)) + + opts.setdefault(name, hasattr(method, name) or name in known_marks) return opts def parse_hookspec_opts(self, module_or_class, name): diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 1da7a6c4893..0a1f258e571 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -1207,19 +1207,20 @@ def pytest_generate_tests(self, metafunc): if faclist: fixturedef = faclist[-1] if fixturedef.params is not None: - parametrize_func = getattr(metafunc.function, "parametrize", None) - if parametrize_func is not None: - parametrize_func = parametrize_func.combined - func_params = getattr(parametrize_func, "args", [[None]]) - func_kwargs = getattr(parametrize_func, "kwargs", {}) - # skip directly parametrized arguments - if "argnames" in func_kwargs: - argnames = parametrize_func.kwargs["argnames"] + markers = list(metafunc.definition.iter_markers("parametrize")) + for parametrize_mark in markers: + if "argnames" in parametrize_mark.kwargs: + argnames = parametrize_mark.kwargs["argnames"] + else: + argnames = parametrize_mark.args[0] + + if not isinstance(argnames, (tuple, list)): + argnames = [ + x.strip() for x in argnames.split(",") if x.strip() + ] + if argname in argnames: + break else: - argnames = func_params[0] - if not isinstance(argnames, (tuple, list)): - argnames = [x.strip() for x in argnames.split(",") if x.strip()] - if argname not in func_params and argname not in argnames: metafunc.parametrize( argname, fixturedef.params, diff --git a/src/_pytest/mark/__init__.py b/src/_pytest/mark/__init__.py index b6495dd0345..bc4c467f918 100644 --- a/src/_pytest/mark/__init__.py +++ b/src/_pytest/mark/__init__.py @@ -11,19 +11,10 @@ from .structures import MARK_GEN from .structures import MarkDecorator from .structures import MarkGenerator -from .structures import MarkInfo from .structures import ParameterSet -from .structures import transfer_markers from _pytest.config import UsageError -__all__ = [ - "Mark", - "MarkInfo", - "MarkDecorator", - "MarkGenerator", - "transfer_markers", - "get_empty_parameterset_mark", -] +__all__ = ["Mark", "MarkDecorator", "MarkGenerator", "get_empty_parameterset_mark"] def param(*values, **kw): diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py index 18fb6fa6db2..49695b56f65 100644 --- a/src/_pytest/mark/structures.py +++ b/src/_pytest/mark/structures.py @@ -1,18 +1,15 @@ import inspect import warnings from collections import namedtuple -from functools import reduce from operator import attrgetter import attr import six -from six.moves import map from ..compat import ascii_escaped from ..compat import getfslineno from ..compat import MappingMixin from ..compat import NOTSET -from ..deprecated import MARK_INFO_ATTRIBUTE from _pytest.outcomes import fail @@ -233,11 +230,7 @@ def __call__(self, *args, **kwargs): func = args[0] is_class = inspect.isclass(func) if len(args) == 1 and (istestfunc(func) or is_class): - if is_class: - store_mark(func, self.mark) - else: - store_legacy_markinfo(func, self.mark) - store_mark(func, self.mark) + store_mark(func, self.mark) return func return self.with_args(*args, **kwargs) @@ -259,7 +252,13 @@ def normalize_mark_list(mark_list): :type mark_list: List[Union[Mark, Markdecorator]] :rtype: List[Mark] """ - return [getattr(mark, "mark", mark) for mark in mark_list] # unpack MarkDecorator + extracted = [ + getattr(mark, "mark", mark) for mark in mark_list + ] # unpack MarkDecorator + for mark in extracted: + if not isinstance(mark, Mark): + raise TypeError("got {!r} instead of Mark".format(mark)) + return [x for x in extracted if isinstance(x, Mark)] def store_mark(obj, mark): @@ -272,90 +271,6 @@ def store_mark(obj, mark): obj.pytestmark = get_unpacked_marks(obj) + [mark] -def store_legacy_markinfo(func, mark): - """create the legacy MarkInfo objects and put them onto the function - """ - if not isinstance(mark, Mark): - raise TypeError("got {mark!r} instead of a Mark".format(mark=mark)) - holder = getattr(func, mark.name, None) - if holder is None: - holder = MarkInfo.for_mark(mark) - setattr(func, mark.name, holder) - elif isinstance(holder, MarkInfo): - holder.add_mark(mark) - - -def transfer_markers(funcobj, cls, mod): - """ - this function transfers class level markers and module level markers - into function level markinfo objects - - this is the main reason why marks are so broken - the resolution will involve phasing out function level MarkInfo objects - - """ - for obj in (cls, mod): - for mark in get_unpacked_marks(obj): - if not _marked(funcobj, mark): - store_legacy_markinfo(funcobj, mark) - - -def _marked(func, mark): - """ Returns True if :func: is already marked with :mark:, False otherwise. - This can happen if marker is applied to class and the test file is - invoked more than once. - """ - try: - func_mark = getattr(func, getattr(mark, "combined", mark).name) - except AttributeError: - return False - return any(mark == info.combined for info in func_mark) - - -@attr.s(repr=False) -class MarkInfo(object): - """ Marking object created by :class:`MarkDecorator` instances. """ - - _marks = attr.ib(converter=list) - - @_marks.validator - def validate_marks(self, attribute, value): - for item in value: - if not isinstance(item, Mark): - raise ValueError( - "MarkInfo expects Mark instances, got {!r} ({!r})".format( - item, type(item) - ) - ) - - combined = attr.ib( - repr=False, - default=attr.Factory( - lambda self: reduce(Mark.combined_with, self._marks), takes_self=True - ), - ) - - name = alias("combined.name", warning=MARK_INFO_ATTRIBUTE) - args = alias("combined.args", warning=MARK_INFO_ATTRIBUTE) - kwargs = alias("combined.kwargs", warning=MARK_INFO_ATTRIBUTE) - - @classmethod - def for_mark(cls, mark): - return cls([mark]) - - def __repr__(self): - return "".format(self.combined) - - def add_mark(self, mark): - """ add a MarkInfo with the given args and kwargs. """ - self._marks.append(mark) - self.combined = self.combined.combined_with(mark) - - def __iter__(self): - """ yield MarkInfo objects each relating to a marking-call. """ - return map(MarkInfo.for_mark, self._marks) - - class MarkGenerator(object): """ Factory for :class:`MarkDecorator` objects - exposed as a ``pytest.mark`` singleton instance. Example:: diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py index b324b1f6899..00ec80894c9 100644 --- a/src/_pytest/nodes.py +++ b/src/_pytest/nodes.py @@ -10,7 +10,6 @@ import _pytest._code from _pytest.compat import getfslineno -from _pytest.mark.structures import MarkInfo from _pytest.mark.structures import NodeKeywords from _pytest.outcomes import fail @@ -211,20 +210,6 @@ def get_closest_marker(self, name, default=None): """ return next(self.iter_markers(name=name), default) - def get_marker(self, name): - """ get a marker object from this node or None if - the node doesn't have a marker with that name. - - .. deprecated:: 3.6 - This function has been deprecated in favor of - :meth:`Node.get_closest_marker <_pytest.nodes.Node.get_closest_marker>` and - :meth:`Node.iter_markers <_pytest.nodes.Node.iter_markers>`, see :ref:`update marker code` - for more details. - """ - markers = list(self.iter_markers(name=name)) - if markers: - return MarkInfo(markers) - def listextrakeywords(self): """ Return a set of all extra keywords in self and any parents.""" extra_keywords = set() diff --git a/src/_pytest/python.py b/src/_pytest/python.py index 5a77b09adcb..a508dbe2441 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -41,7 +41,6 @@ from _pytest.mark import MARK_GEN from _pytest.mark.structures import get_unpacked_marks from _pytest.mark.structures import normalize_mark_list -from _pytest.mark.structures import transfer_markers from _pytest.outcomes import fail from _pytest.pathlib import parts from _pytest.warning_types import PytestWarning @@ -125,10 +124,10 @@ def pytest_generate_tests(metafunc): # those alternative spellings are common - raise a specific error to alert # the user alt_spellings = ["parameterize", "parametrise", "parameterise"] - for attr in alt_spellings: - if hasattr(metafunc.function, attr): + for mark_name in alt_spellings: + if metafunc.definition.get_closest_marker(mark_name): msg = "{0} has '{1}' mark, spelling should be 'parametrize'" - fail(msg.format(metafunc.function.__name__, attr), pytrace=False) + fail(msg.format(metafunc.function.__name__, mark_name), pytrace=False) for marker in metafunc.definition.iter_markers(name="parametrize"): metafunc.parametrize(*marker.args, **marker.kwargs) @@ -385,7 +384,6 @@ def _genfunctions(self, name, funcobj): module = self.getparent(Module).obj clscol = self.getparent(Class) cls = clscol and clscol.obj or None - transfer_markers(funcobj, cls, module) fm = self.session._fixturemanager definition = FunctionDefinition(name=name, parent=self, callobj=funcobj) @@ -1291,6 +1289,18 @@ def __init__( if keywords: self.keywords.update(keywords) + # todo: this is a hell of a hack + self.keywords.update( + dict.fromkeys( + [ + mark.name + for mark in self.iter_markers() + if mark.name not in self.keywords + ], + True, + ) + ) + if fixtureinfo is None: fixtureinfo = self.session._fixturemanager.getfixtureinfo( self, self.obj, self.cls, funcargs=not self._isyieldedfunction() diff --git a/src/_pytest/unittest.py b/src/_pytest/unittest.py index d9881cd8751..4a886c2e113 100644 --- a/src/_pytest/unittest.py +++ b/src/_pytest/unittest.py @@ -14,8 +14,6 @@ from _pytest.outcomes import xfail from _pytest.python import Class from _pytest.python import Function -from _pytest.python import Module -from _pytest.python import transfer_markers def pytest_pycollect_makeitem(collector, name, obj): @@ -54,14 +52,12 @@ def collect(self): return self.session._fixturemanager.parsefactories(self, unittest=True) loader = TestLoader() - module = self.getparent(Module).obj foundsomething = False for name in loader.getTestCaseNames(self.obj): x = getattr(self.obj, name) if not getattr(x, "__test__", True): continue funcobj = getimfunc(x) - transfer_markers(funcobj, cls, module) yield TestCaseFunction(name, parent=self, callobj=funcobj) foundsomething = True diff --git a/testing/test_mark.py b/testing/test_mark.py index 4888b1c557c..a10e2e19de4 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -10,7 +10,6 @@ import pytest from _pytest.mark import EMPTY_PARAMETERSET_OPTION from _pytest.mark import MarkGenerator as Mark -from _pytest.mark import transfer_markers from _pytest.nodes import Collector from _pytest.nodes import Node from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG @@ -26,12 +25,6 @@ class TestMark(object): - def test_markinfo_repr(self): - from _pytest.mark import MarkInfo, Mark - - m = MarkInfo.for_mark(Mark("hello", (1, 2), {})) - repr(m) - @pytest.mark.parametrize("attr", ["mark", "param"]) @pytest.mark.parametrize("modulename", ["py.test", "pytest"]) def test_pytest_exists_in_namespace_all(self, attr, modulename): @@ -57,105 +50,8 @@ class SomeClass(object): def test_pytest_mark_name_starts_with_underscore(self): mark = Mark() - pytest.raises(AttributeError, getattr, mark, "_some_name") - - def test_pytest_mark_bare(self): - mark = Mark() - - def f(): - pass - - mark.hello(f) - assert f.hello - - def test_mark_legacy_ignore_fail(self): - def add_attribute(func): - func.foo = 1 - return func - - @pytest.mark.foo - @add_attribute - def test_fun(): - pass - - assert test_fun.foo == 1 - assert test_fun.pytestmark - - @ignore_markinfo - def test_pytest_mark_keywords(self): - mark = Mark() - - def f(): - pass - - mark.world(x=3, y=4)(f) - assert f.world - assert f.world.kwargs["x"] == 3 - assert f.world.kwargs["y"] == 4 - - @ignore_markinfo - def test_apply_multiple_and_merge(self): - mark = Mark() - - def f(): - pass - - mark.world - mark.world(x=3)(f) - assert f.world.kwargs["x"] == 3 - mark.world(y=4)(f) - assert f.world.kwargs["x"] == 3 - assert f.world.kwargs["y"] == 4 - mark.world(y=1)(f) - assert f.world.kwargs["y"] == 1 - assert len(f.world.args) == 0 - - @ignore_markinfo - def test_pytest_mark_positional(self): - mark = Mark() - - def f(): - pass - - mark.world("hello")(f) - assert f.world.args[0] == "hello" - mark.world("world")(f) - - @ignore_markinfo - def test_pytest_mark_positional_func_and_keyword(self): - mark = Mark() - - def f(): - raise Exception - - m = mark.world(f, omega="hello") - - def g(): - pass - - assert m(g) == g - assert g.world.args[0] is f - assert g.world.kwargs["omega"] == "hello" - - @ignore_markinfo - def test_pytest_mark_reuse(self): - mark = Mark() - - def f(): - pass - - w = mark.some - w("hello", reason="123")(f) - assert f.some.args[0] == "hello" - assert f.some.kwargs["reason"] == "123" - - def g(): - pass - - w("world", reason2="456")(g) - assert g.some.args[0] == "world" - assert "reason" not in g.some.kwargs - assert g.some.kwargs["reason2"] == "456" + with pytest.raises(AttributeError): + mark._some_name def test_marked_class_run_twice(testdir, request): @@ -505,116 +401,6 @@ def test_func(a, b): class TestFunctional(object): - def test_mark_per_function(self, testdir): - p = testdir.makepyfile( - """ - import pytest - @pytest.mark.hello - def test_hello(): - assert hasattr(test_hello, 'hello') - """ - ) - result = testdir.runpytest(p) - result.stdout.fnmatch_lines(["*1 passed*"]) - - def test_mark_per_module(self, testdir): - item = testdir.getitem( - """ - import pytest - pytestmark = pytest.mark.hello - def test_func(): - pass - """ - ) - keywords = item.keywords - assert "hello" in keywords - - def test_marklist_per_class(self, testdir): - item = testdir.getitem( - """ - import pytest - class TestClass(object): - pytestmark = [pytest.mark.hello, pytest.mark.world] - def test_func(self): - assert TestClass.test_func.hello - assert TestClass.test_func.world - """ - ) - keywords = item.keywords - assert "hello" in keywords - - def test_marklist_per_module(self, testdir): - item = testdir.getitem( - """ - import pytest - pytestmark = [pytest.mark.hello, pytest.mark.world] - class TestClass(object): - def test_func(self): - assert TestClass.test_func.hello - assert TestClass.test_func.world - """ - ) - keywords = item.keywords - assert "hello" in keywords - assert "world" in keywords - - def test_mark_per_class_decorator(self, testdir): - item = testdir.getitem( - """ - import pytest - @pytest.mark.hello - class TestClass(object): - def test_func(self): - assert TestClass.test_func.hello - """ - ) - keywords = item.keywords - assert "hello" in keywords - - def test_mark_per_class_decorator_plus_existing_dec(self, testdir): - item = testdir.getitem( - """ - import pytest - @pytest.mark.hello - class TestClass(object): - pytestmark = pytest.mark.world - def test_func(self): - assert TestClass.test_func.hello - assert TestClass.test_func.world - """ - ) - keywords = item.keywords - assert "hello" in keywords - assert "world" in keywords - - @ignore_markinfo - def test_merging_markers(self, testdir): - p = testdir.makepyfile( - """ - import pytest - pytestmark = pytest.mark.hello("pos1", x=1, y=2) - class TestClass(object): - # classlevel overrides module level - pytestmark = pytest.mark.hello(x=3) - @pytest.mark.hello("pos0", z=4) - def test_func(self): - pass - """ - ) - items, rec = testdir.inline_genitems(p) - item, = items - keywords = item.keywords - marker = keywords["hello"] - assert marker.args == ("pos0", "pos1") - assert marker.kwargs == {"x": 1, "y": 2, "z": 4} - - # test the new __iter__ interface - values = list(marker) - assert len(values) == 3 - assert values[0].args == ("pos0",) - assert values[1].args == () - assert values[2].args == ("pos1",) - def test_merging_markers_deep(self, testdir): # issue 199 - propagate markers into nested classes p = testdir.makepyfile( @@ -677,11 +463,6 @@ class TestOtherSub(TestBase): items, rec = testdir.inline_genitems(p) base_item, sub_item, sub_item_other = items print(items, [x.nodeid for x in items]) - # legacy api smears - assert hasattr(base_item.obj, "b") - assert hasattr(sub_item_other.obj, "b") - assert hasattr(sub_item.obj, "b") - # new api seregates assert not list(base_item.iter_markers(name="b")) assert not list(sub_item_other.iter_markers(name="b")) @@ -767,26 +548,6 @@ def test_func(arg): result = testdir.runpytest() result.stdout.fnmatch_lines(["keyword: *hello*"]) - @ignore_markinfo - def test_merging_markers_two_functions(self, testdir): - p = testdir.makepyfile( - """ - import pytest - @pytest.mark.hello("pos1", z=4) - @pytest.mark.hello("pos0", z=3) - def test_func(): - pass - """ - ) - items, rec = testdir.inline_genitems(p) - item, = items - keywords = item.keywords - marker = keywords["hello"] - values = list(marker) - assert len(values) == 2 - assert values[0].args == ("pos0",) - assert values[1].args == ("pos1",) - def test_no_marker_match_on_unmarked_names(self, testdir): p = testdir.makepyfile( """ @@ -860,7 +621,7 @@ def test_some(request): assert "mark2" in request.keywords assert "mark3" in request.keywords assert 10 not in request.keywords - marker = request.node.get_marker("mark1") + marker = request.node.get_closest_marker("mark1") assert marker.name == "mark1" assert marker.args == () assert marker.kwargs == {} @@ -876,15 +637,11 @@ def assert_markers(self, items, **expected): .. note:: this could be moved to ``testdir`` if proven to be useful to other modules. """ - from _pytest.mark import MarkInfo items = {x.name: x for x in items} for name, expected_markers in expected.items(): - markers = items[name].keywords._markers - marker_names = { - name for (name, v) in markers.items() if isinstance(v, MarkInfo) - } - assert marker_names == set(expected_markers) + markers = {m.name for m in items[name].iter_markers()} + assert markers == set(expected_markers) @pytest.mark.issue1540 @pytest.mark.filterwarnings("ignore") @@ -1043,26 +800,6 @@ def assert_test_is_not_selected(keyword): assert_test_is_not_selected("()") -def test_legacy_transfer(): - class FakeModule(object): - pytestmark = [] - - class FakeClass(object): - pytestmark = pytest.mark.nofun - - @pytest.mark.fun - def fake_method(self): - pass - - transfer_markers(fake_method, FakeClass, FakeModule) - - # legacy marks transfer smeared - assert fake_method.nofun - assert fake_method.fun - # pristine marks dont transfer - assert fake_method.pytestmark == [pytest.mark.fun.mark] - - class TestMarkDecorator(object): @pytest.mark.parametrize( "lhs, rhs, expected", @@ -1163,19 +900,12 @@ class TestBarClass(BaseTests): deselected_tests = dlist[0].items assert len(deselected_tests) == 1 + # todo: fixed # keywords smear - expected behaviour - reprec_keywords = testdir.inline_run("-k", "FOO") - passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes() - assert passed_k == 2 - assert skipped_k == failed_k == 0 - - -def test_addmarker_getmarker(): - node = Node("Test", config=mock.Mock(), session=mock.Mock(), nodeid="Test") - node.add_marker(pytest.mark.a(1)) - node.add_marker("b") - node.get_marker("a").combined - node.get_marker("b").combined + # reprec_keywords = testdir.inline_run("-k", "FOO") + # passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes() + # assert passed_k == 2 + # assert skipped_k == failed_k == 0 def test_addmarker_order(): @@ -1199,7 +929,7 @@ def test_markers_from_parametrize(testdir): custom_mark = pytest.mark.custom_mark @pytest.fixture(autouse=True) def trigger(request): - custom_mark =request.node.get_marker('custom_mark') + custom_mark = list(request.node.iter_markers('custom_mark')) print("Custom mark %s" % custom_mark) @custom_mark("custom mark non parametrized") From 3947b859dc28c33658f82bd6b9ddbe6f4f6d97ba Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Wed, 19 Dec 2018 20:41:13 +0100 Subject: [PATCH 81/98] fix hookspec parsing --- src/_pytest/config/__init__.py | 11 ++++++++--- 1 file changed, 8 insertions(+), 3 deletions(-) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index d9ca76d05bf..3f7c0674991 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -268,10 +268,10 @@ def parse_hookimpl_opts(self, plugin, name): # collect unmarked hooks as long as they have the `pytest_' prefix if opts is None and name.startswith("pytest_"): opts = {} - if opts is not None: # TODO: DeprecationWarning, people should use hookimpl known_marks = {m.name for m in getattr(method, "pytestmark", [])} + for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): opts.setdefault(name, hasattr(method, name) or name in known_marks) @@ -283,10 +283,15 @@ def parse_hookspec_opts(self, module_or_class, name): ) if opts is None: method = getattr(module_or_class, name) + if name.startswith("pytest_"): + # todo: deprecate hookspec hacks + known_marks = {m.name for m in getattr(method, "pytestmark", [])} opts = { - "firstresult": hasattr(method, "firstresult"), - "historic": hasattr(method, "historic"), + "firstresult": hasattr(method, "firstresult") + or "firstresult" in known_marks, + "historic": hasattr(method, "historic") + or "historic" in known_marks, } return opts From b258764ffed842bab2f6c580e48ed8f0b6b06992 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Wed, 19 Dec 2018 21:12:12 +0100 Subject: [PATCH 82/98] fix docs --- doc/en/reference.rst | 7 ------- tox.ini | 1 + 2 files changed, 1 insertion(+), 7 deletions(-) diff --git a/doc/en/reference.rst b/doc/en/reference.rst index 754035d16b8..9305cbb9586 100644 --- a/doc/en/reference.rst +++ b/doc/en/reference.rst @@ -724,13 +724,6 @@ MarkGenerator :members: -MarkInfo -~~~~~~~~ - -.. autoclass:: _pytest.mark.MarkInfo - :members: - - Mark ~~~~ diff --git a/tox.ini b/tox.ini index 65e40116a56..7a69000c817 100644 --- a/tox.ini +++ b/tox.ini @@ -121,6 +121,7 @@ setenv= setenv = {[testenv:py27-pluggymaster]setenv} [testenv:docs] +basepython = python3 skipsdist = True usedevelop = True changedir = doc/en From 64a353f2b688049941fcbad87b62ddea6c3ef71d Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Thu, 20 Dec 2018 10:41:14 +0100 Subject: [PATCH 83/98] update deprecation docs --- doc/en/deprecations.rst | 24 ++++++++++++++++++------ 1 file changed, 18 insertions(+), 6 deletions(-) diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index dc716951b0d..37be419e984 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -77,13 +77,7 @@ Becomes: -``Node.get_marker`` -~~~~~~~~~~~~~~~~~~~ - -.. deprecated:: 3.6 -As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` is deprecated. See -:ref:`the documentation ` on tips on how to update your code. Result log (``--result-log``) @@ -497,3 +491,21 @@ Removed all ``py.test-X*`` entry points. The versioned, suffixed entry points were never documented and a leftover from a pre-virtualenv era. These entry points also created broken entry points in wheels, so removing them also removes a source of confusion for users. + + +``Node.get_marker`` +~~~~~~~~~~~~~~~~~~~ + +*removed in version 4.1* + +As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` is deprecated. See +:ref:`the documentation ` on tips on how to update your code. + + +``somefunction.markname`` +~~~~~~~~~~~~~~~~~~~~~ + +* Removed in version 4.1 + +As part of a large :ref:`marker-revamp` we already deprecated using ``MarkInfo`` +the only correct way to get markers of an element is via ``node.iter_markers([name]``. From 102ffc69e8f0a8a82406f7f8a1ffaa722da79de7 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Thu, 20 Dec 2018 11:57:44 +0100 Subject: [PATCH 84/98] add issue references to the todos --- src/_pytest/config/__init__.py | 2 ++ src/_pytest/python.py | 2 ++ 2 files changed, 4 insertions(+) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 3f7c0674991..051eda79d3c 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -270,6 +270,7 @@ def parse_hookimpl_opts(self, plugin, name): opts = {} if opts is not None: # TODO: DeprecationWarning, people should use hookimpl + # https://github.com/pytest-dev/pytest/issues/4562 known_marks = {m.name for m in getattr(method, "pytestmark", [])} for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"): @@ -286,6 +287,7 @@ def parse_hookspec_opts(self, module_or_class, name): if name.startswith("pytest_"): # todo: deprecate hookspec hacks + # https://github.com/pytest-dev/pytest/issues/4562 known_marks = {m.name for m in getattr(method, "pytestmark", [])} opts = { "firstresult": hasattr(method, "firstresult") diff --git a/src/_pytest/python.py b/src/_pytest/python.py index a508dbe2441..48a50178f65 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -1290,6 +1290,8 @@ def __init__( self.keywords.update(keywords) # todo: this is a hell of a hack + # https://github.com/pytest-dev/pytest/issues/4569 + self.keywords.update( dict.fromkeys( [ From 8f8d3114dd12564b9e051ca48fab55c3496b4cd1 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Thu, 20 Dec 2018 12:11:26 +0100 Subject: [PATCH 85/98] apply suggested enhancements in deprecations.rst --- doc/en/deprecations.rst | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/doc/en/deprecations.rst b/doc/en/deprecations.rst index 37be419e984..f3240cec7a1 100644 --- a/doc/en/deprecations.rst +++ b/doc/en/deprecations.rst @@ -496,16 +496,16 @@ removes a source of confusion for users. ``Node.get_marker`` ~~~~~~~~~~~~~~~~~~~ -*removed in version 4.1* +*Removed in version 4.0* As part of a large :ref:`marker-revamp`, :meth:`_pytest.nodes.Node.get_marker` is deprecated. See :ref:`the documentation ` on tips on how to update your code. ``somefunction.markname`` -~~~~~~~~~~~~~~~~~~~~~ +~~~~~~~~~~~~~~~~~~~~~~~~~ -* Removed in version 4.1 +*Removed in version 4.0* As part of a large :ref:`marker-revamp` we already deprecated using ``MarkInfo`` -the only correct way to get markers of an element is via ``node.iter_markers([name]``. +the only correct way to get markers of an element is via ``node.iter_markers(name)``. From 134641fcb5687bd76511bdc629407a4c0be05685 Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Fri, 21 Dec 2018 07:44:45 +0100 Subject: [PATCH 86/98] add first set of changelog entries for marker removal --- changelog/4546.removal.rst | 3 +++ changelog/891.removal.rst | 1 + 2 files changed, 4 insertions(+) create mode 100644 changelog/4546.removal.rst create mode 100644 changelog/891.removal.rst diff --git a/changelog/4546.removal.rst b/changelog/4546.removal.rst new file mode 100644 index 00000000000..977a30e8cd0 --- /dev/null +++ b/changelog/4546.removal.rst @@ -0,0 +1,3 @@ +Remove ``Node.get_marker(name)`` the return value was not usable for more than a existence check. + +Use ``Node.get_closest_marker(name)`` as a replacement. diff --git a/changelog/891.removal.rst b/changelog/891.removal.rst new file mode 100644 index 00000000000..82d75c530a6 --- /dev/null +++ b/changelog/891.removal.rst @@ -0,0 +1 @@ +Remove ``testfunction.markername`` attributes - use ``Node.iter_markers(name=None)`` to iterate them. From 1eef53b6fe7a5e16359d03f30da6ed7fe4e0d8f6 Mon Sep 17 00:00:00 2001 From: Jeffrey Rackauckas Date: Sat, 29 Dec 2018 22:46:46 -0800 Subject: [PATCH 87/98] Update --collect-only to display test descriptions when ran in verbose mode. --- changelog/4371.feature.rst | 1 + src/_pytest/terminal.py | 4 ++++ testing/test_terminal.py | 12 ++++++++++++ 3 files changed, 17 insertions(+) create mode 100644 changelog/4371.feature.rst diff --git a/changelog/4371.feature.rst b/changelog/4371.feature.rst new file mode 100644 index 00000000000..f205fc26997 --- /dev/null +++ b/changelog/4371.feature.rst @@ -0,0 +1 @@ +Updated the ``--collect-only`` option to display test descriptions when ran using ``--verbose``. diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index 82719b5d40d..bea02306bca 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -611,6 +611,10 @@ def _printcollecteditems(self, items): continue indent = (len(stack) - 1) * " " self._tw.line("%s%s" % (indent, col)) + if self.config.option.verbose >= 1: + if hasattr(col, "_obj") and col._obj.__doc__: + for line in col._obj.__doc__.strip().splitlines(): + self._tw.line("%s%s" % (indent + " ", line.strip())) @pytest.hookimpl(hookwrapper=True) def pytest_sessionfinish(self, exitstatus): diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 9cd79afcf98..06345f88d5d 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -276,6 +276,18 @@ def test_collectonly_skipped_module(self, testdir): result = testdir.runpytest("--collect-only", "-rs") result.stdout.fnmatch_lines(["*ERROR collecting*"]) + def test_collectonly_display_test_description(self, testdir): + testdir.makepyfile( + """ + def test_with_description(): + \""" This test has a description. + \""" + assert True + """ + ) + result = testdir.runpytest("--collect-only", "--verbose") + result.stdout.fnmatch_lines([" This test has a description."]) + def test_collectonly_failed_module(self, testdir): testdir.makepyfile("""raise ValueError(0)""") result = testdir.runpytest("--collect-only") From 8b48621687f1a4bbc4832d1c2b852bec061dbfff Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Fri, 4 Jan 2019 17:56:13 +0000 Subject: [PATCH 88/98] Allow providing a custom reason for `importorskip` --- src/_pytest/outcomes.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/src/_pytest/outcomes.py b/src/_pytest/outcomes.py index 714be308834..cdda8630e24 100644 --- a/src/_pytest/outcomes.py +++ b/src/_pytest/outcomes.py @@ -137,7 +137,7 @@ def xfail(reason=""): xfail.Exception = XFailed -def importorskip(modname, minversion=None): +def importorskip(modname, minversion=None, reason=None): """ return imported module if it has at least "minversion" as its __version__ attribute. If no minversion is specified the a skip is only triggered if the module can not be imported. @@ -159,7 +159,9 @@ def importorskip(modname, minversion=None): # Do not raise chained exception here(#1485) should_skip = True if should_skip: - raise Skipped("could not import %r" % (modname,), allow_module_level=True) + if reason is None: + reason = "could not import %r" % (modname,) + raise Skipped(reason, allow_module_level=True) mod = sys.modules[modname] if minversion is None: return mod From be7eb22e8871fb9cb332bacfd3810a698f52636b Mon Sep 17 00:00:00 2001 From: Pedro Algarvio Date: Fri, 4 Jan 2019 19:02:07 +0000 Subject: [PATCH 89/98] Add test case for `importorskip` custom reason --- testing/test_runner.py | 16 ++++++++++++++++ 1 file changed, 16 insertions(+) diff --git a/testing/test_runner.py b/testing/test_runner.py index ae129d06d27..91f7d270013 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -738,6 +738,22 @@ def test_foo(): result.stdout.fnmatch_lines(["*collected 0 items / 1 skipped*"]) +def test_importorskip_custom_reason(testdir): + """make sure custom reasons are used""" + testdir.makepyfile( + """ + import pytest + foobarbaz = pytest.importorskip("foobarbaz2", reason="just because") + + def test_foo(): + pass + """ + ) + result = testdir.runpytest("-ra") + result.stdout.fnmatch_lines(["*just because*"]) + result.stdout.fnmatch_lines(["*collected 0 items / 1 skipped*"]) + + def test_pytest_cmdline_main(testdir): p = testdir.makepyfile( """ From 0c4898670c98fa39b393d396062d08808941ae5f Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 12:55:39 -0200 Subject: [PATCH 90/98] Add changelog entry and update docs for importorskip --- changelog/4599.feature.rst | 2 ++ src/_pytest/outcomes.py | 11 ++++++++--- 2 files changed, 10 insertions(+), 3 deletions(-) create mode 100644 changelog/4599.feature.rst diff --git a/changelog/4599.feature.rst b/changelog/4599.feature.rst new file mode 100644 index 00000000000..12ed20b238f --- /dev/null +++ b/changelog/4599.feature.rst @@ -0,0 +1,2 @@ +``pytest.importorskip`` now supports a ``reason`` parameter, which will be shown when the +requested module cannot be imported. diff --git a/src/_pytest/outcomes.py b/src/_pytest/outcomes.py index cdda8630e24..d27939e30c7 100644 --- a/src/_pytest/outcomes.py +++ b/src/_pytest/outcomes.py @@ -138,9 +138,14 @@ def xfail(reason=""): def importorskip(modname, minversion=None, reason=None): - """ return imported module if it has at least "minversion" as its - __version__ attribute. If no minversion is specified the a skip - is only triggered if the module can not be imported. + """Imports and returns the requested module ``modname``, or skip the current test + if the module cannot be imported. + + :param str modname: the name of the module to import + :param str minversion: if given, the imported module ``__version__`` attribute must be + at least this minimal version, otherwise the test is still skipped. + :param str reason: if given, this reason is shown as the message when the module + cannot be imported. """ import warnings From 9d297c06e8679346a3046239554f5d67452b8dea Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 15:21:49 +0000 Subject: [PATCH 91/98] Preparing release version 4.1.0 --- CHANGELOG.rst | 226 ++++++++++++++++++++++++++++ changelog/2169.removal.rst | 1 - changelog/3050.deprecation.rst | 3 - changelog/3078.removal.rst | 3 - changelog/3079.removal.rst | 3 - changelog/3082.removal.rst | 3 - changelog/3083.removal.rst | 3 - changelog/3085.removal.rst | 3 - changelog/3086.removal.rst | 4 - changelog/3191.feature.rst | 23 --- changelog/3532.bugfix.rst | 1 - changelog/3616.removal.rst | 3 - changelog/3632.feature.rst | 1 - changelog/3974.deprecation.rst | 8 - changelog/4278.feature.rst | 4 - changelog/4292.feature.rst | 1 - changelog/4327.bugfix.rst | 1 - changelog/4371.feature.rst | 1 - changelog/4386.feature.rst | 1 - changelog/4397.bugfix.rst | 1 - changelog/4416.feature.rst | 6 - changelog/4421.removal.rst | 3 - changelog/4435.bugfix.rst | 1 - changelog/4435.deprecation.rst | 3 - changelog/4447.trivial.rst | 3 - changelog/4458.bugfix.rst | 1 - changelog/4483.feature.rst | 9 -- changelog/4489.removal.rst | 3 - changelog/4532.feature.rst | 3 - changelog/4535.removal.rst | 1 - changelog/4543.removal.rst | 3 - changelog/4545.removal.rst | 3 - changelog/4546.removal.rst | 3 - changelog/4547.removal.rst | 3 - changelog/4548.removal.rst | 3 - changelog/4557.doc.rst | 1 - changelog/4558.doc.rst | 1 - changelog/4580.doc.rst | 1 - changelog/4599.feature.rst | 2 - changelog/891.removal.rst | 1 - doc/en/announce/index.rst | 1 + doc/en/announce/release-4.1.0.rst | 44 ++++++ doc/en/builtin.rst | 2 - doc/en/cache.rst | 6 +- doc/en/example/nonpython.rst | 8 +- doc/en/example/parametrize.rst | 40 ++--- doc/en/example/pythoncollection.rst | 18 +-- doc/en/example/reportingdemo.rst | 99 +++++++----- doc/en/example/simple.rst | 2 +- doc/en/fixture.rst | 28 ++-- doc/en/usage.rst | 63 +------- 51 files changed, 393 insertions(+), 266 deletions(-) delete mode 100644 changelog/2169.removal.rst delete mode 100644 changelog/3050.deprecation.rst delete mode 100644 changelog/3078.removal.rst delete mode 100644 changelog/3079.removal.rst delete mode 100644 changelog/3082.removal.rst delete mode 100644 changelog/3083.removal.rst delete mode 100644 changelog/3085.removal.rst delete mode 100644 changelog/3086.removal.rst delete mode 100644 changelog/3191.feature.rst delete mode 100644 changelog/3532.bugfix.rst delete mode 100644 changelog/3616.removal.rst delete mode 100644 changelog/3632.feature.rst delete mode 100644 changelog/3974.deprecation.rst delete mode 100644 changelog/4278.feature.rst delete mode 100644 changelog/4292.feature.rst delete mode 100644 changelog/4327.bugfix.rst delete mode 100644 changelog/4371.feature.rst delete mode 100644 changelog/4386.feature.rst delete mode 100644 changelog/4397.bugfix.rst delete mode 100644 changelog/4416.feature.rst delete mode 100644 changelog/4421.removal.rst delete mode 100644 changelog/4435.bugfix.rst delete mode 100644 changelog/4435.deprecation.rst delete mode 100644 changelog/4447.trivial.rst delete mode 100644 changelog/4458.bugfix.rst delete mode 100644 changelog/4483.feature.rst delete mode 100644 changelog/4489.removal.rst delete mode 100644 changelog/4532.feature.rst delete mode 100644 changelog/4535.removal.rst delete mode 100644 changelog/4543.removal.rst delete mode 100644 changelog/4545.removal.rst delete mode 100644 changelog/4546.removal.rst delete mode 100644 changelog/4547.removal.rst delete mode 100644 changelog/4548.removal.rst delete mode 100644 changelog/4557.doc.rst delete mode 100644 changelog/4558.doc.rst delete mode 100644 changelog/4580.doc.rst delete mode 100644 changelog/4599.feature.rst delete mode 100644 changelog/891.removal.rst create mode 100644 doc/en/announce/release-4.1.0.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 22f3ac862bd..86d751b63fa 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -18,6 +18,232 @@ with advance notice in the **Deprecations** section of releases. .. towncrier release notes start +pytest 4.1.0 (2019-01-05) +========================= + +Removals +-------- + +- `#2169 `_: ``pytest.mark.parametrize``: in previous versions, errors raised by id functions were suppressed and changed into warnings. Now the exceptions are propagated, along with a pytest message informing the node, parameter value and index where the exception occurred. + + +- `#3078 `_: Remove legacy internal warnings system: ``config.warn``, ``Node.warn``. The ``pytest_logwarning`` now issues a warning when implemented. + + See our `docs `__ on information on how to update your code. + + +- `#3079 `_: Removed support for yield tests - they are fundamentally broken because they don't support fixtures properly since collection and test execution were separated. + + See our `docs `__ on information on how to update your code. + + +- `#3082 `_: Removed support for applying marks directly to values in ``@pytest.mark.parametrize``. Use ``pytest.param`` instead. + + See our `docs `__ on information on how to update your code. + + +- `#3083 `_: Removed ``Metafunc.addcall``. This was the predecessor mechanism to ``@pytest.mark.parametrize``. + + See our `docs `__ on information on how to update your code. + + +- `#3085 `_: Removed support for passing strings to ``pytest.main``. Now, always pass a list of strings instead. + + See our `docs `__ on information on how to update your code. + + +- `#3086 `_: ``[pytest]`` section in **setup.cfg** files is not longer supported, use ``[tool:pytest]`` instead. ``setup.cfg`` files + are meant for use with ``distutils``, and a section named ``pytest`` has notoriously been a source of conflicts and bugs. + + Note that for **pytest.ini** and **tox.ini** files the section remains ``[pytest]``. + + +- `#3616 `_: Removed the deprecated compat properties for ``node.Class/Function/Module`` - use ``pytest.Class/Function/Module`` now. + + See our `docs `__ on information on how to update your code. + + +- `#4421 `_: Removed the implementation of the ``pytest_namespace`` hook. + + See our `docs `__ on information on how to update your code. + + +- `#4489 `_: Removed ``request.cached_setup``. This was the predecessor mechanism to modern fixtures. + + See our `docs `__ on information on how to update your code. + + +- `#4535 `_: Removed the deprecated ``PyCollector.makeitem`` method. This method was made public by mistake a long time ago. + + +- `#4543 `_: Removed support to define fixtures using the ``pytest_funcarg__`` prefix. Use the ``@pytest.fixture`` decorator instead. + + See our `docs `__ on information on how to update your code. + + +- `#4545 `_: Calling fixtures directly is now always an error instead of a warning. + + See our `docs `__ on information on how to update your code. + + +- `#4546 `_: Remove ``Node.get_marker(name)`` the return value was not usable for more than a existence check. + + Use ``Node.get_closest_marker(name)`` as a replacement. + + +- `#4547 `_: The deprecated ``record_xml_property`` fixture has been removed, use the more generic ``record_property`` instead. + + See our `docs `__ for more information. + + +- `#4548 `_: An error is now raised if the ``pytest_plugins`` variable is defined in a non-top-level ``conftest.py`` file (i.e., not residing in the ``rootdir``). + + See our `docs `__ for more information. + + +- `#891 `_: Remove ``testfunction.markername`` attributes - use ``Node.iter_markers(name=None)`` to iterate them. + + + +Deprecations +------------ + +- `#3050 `_: Deprecated the ``pytest.config`` global. + + See https://docs.pytest.org/en/latest/deprecations.html#pytest-config-global for rationale. + + +- `#3974 `_: Passing the ``message`` parameter of ``pytest.raises`` now issues a ``DeprecationWarning``. + + It is a common mistake to think this parameter will match the exception message, while in fact + it only serves to provide a custom message in case the ``pytest.raises`` check fails. To avoid this + mistake and because it is believed to be little used, pytest is deprecating it without providing + an alternative for the moment. + + If you have concerns about this, please comment on `issue #3974 `__. + + +- `#4435 `_: Deprecated ``raises(..., 'code(as_a_string)')`` and ``warns(..., 'code(as_a_string)')``. + + See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec for rationale and examples. + + + +Features +-------- + +- `#3191 `_: A warning is now issued when assertions are made for ``None``. + + This is a common source of confusion among new users, which write: + + .. code-block:: python + + assert mocked_object.assert_called_with(3, 4, 5, key="value") + + When they should write: + + .. code-block:: python + + mocked_object.assert_called_with(3, 4, 5, key="value") + + Because the ``assert_called_with`` method of mock objects already executes an assertion. + + This warning will not be issued when ``None`` is explicitly checked. An assertion like: + + .. code-block:: python + + assert variable is None + + will not issue the warning. + + +- `#3632 `_: Richer equality comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+, `backported to 3.6 `_). + + +- `#4278 `_: ``CACHEDIR.TAG`` files are now created inside cache directories. + + Those files are part of the `Cache Directory Tagging Standard `__, and can + be used by backup or synchronization programs to identify pytest's cache directory as such. + + +- `#4292 `_: ``pytest.outcomes.Exit`` is derived from ``SystemExit`` instead of ``KeyboardInterrupt``. This allows us to better handle ``pdb`` exiting. + + +- `#4371 `_: Updated the ``--collect-only`` option to display test descriptions when ran using ``--verbose``. + + +- `#4386 `_: Restructured ``ExceptionInfo`` object construction and ensure incomplete instances have a ``repr``/``str``. + + +- `#4416 `_: pdb: added support for keyword arguments with ``pdb.set_trace``. + + It handles ``header`` similar to Python 3.7 does it, and forwards any + other keyword arguments to the ``Pdb`` constructor. + + This allows for ``__import__("pdb").set_trace(skip=["foo.*"])``. + + +- `#4483 `_: Added ini parameter ``junit_duration_report`` to optionally report test call durations, excluding setup and teardown times. + + The JUnit XML specification and the default pytest behavior is to include setup and teardown times in the test duration + report. You can include just the call durations instead (excluding setup and teardown) by adding this to your ``pytest.ini`` file: + + .. code-block:: ini + + [pytest] + junit_duration_report = call + + +- `#4532 `_: ``-ra`` now will show errors and failures last, instead of as the first items in the summary. + + This makes it easier to obtain a list of errors and failures to run tests selectively. + + +- `#4599 `_: ``pytest.importorskip`` now supports a ``reason`` parameter, which will be shown when the + requested module cannot be imported. + + + +Bug Fixes +--------- + +- `#3532 `_: ``-p`` now accepts its argument without a space between the value, for example ``-pmyplugin``. + + +- `#4327 `_: ``approx`` again works with more generic containers, more precisely instances of ``Iterable`` and ``Sized`` instead of more restrictive ``Sequence``. + + +- `#4397 `_: Ensure that node ids are printable. + + +- `#4435 `_: Fixed ``raises(..., 'code(string)')`` frame filename. + + +- `#4458 `_: Display actual test ids in ``--collect-only``. + + + +Improved Documentation +---------------------- + +- `#4557 `_: Markers example documentation page updated to support latest pytest version. + + +- `#4558 `_: Update cache documentation example to correctly show cache hit and miss. + + +- `#4580 `_: Improved detailed summary report documentation. + + + +Trivial/Internal Changes +------------------------ + +- `#4447 `_: Changed the deprecation type of ``--result-log`` to ``PytestDeprecationWarning``. + + It was decided to remove this feature at the next major revision. + + pytest 4.0.2 (2018-12-13) ========================= diff --git a/changelog/2169.removal.rst b/changelog/2169.removal.rst deleted file mode 100644 index 272ddbdfbac..00000000000 --- a/changelog/2169.removal.rst +++ /dev/null @@ -1 +0,0 @@ -``pytest.mark.parametrize``: in previous versions, errors raised by id functions were suppressed and changed into warnings. Now the exceptions are propagated, along with a pytest message informing the node, parameter value and index where the exception occurred. diff --git a/changelog/3050.deprecation.rst b/changelog/3050.deprecation.rst deleted file mode 100644 index fce5979d677..00000000000 --- a/changelog/3050.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -Deprecated the ``pytest.config`` global. - -See https://docs.pytest.org/en/latest/deprecations.html#pytest-config-global for rationale. diff --git a/changelog/3078.removal.rst b/changelog/3078.removal.rst deleted file mode 100644 index 8f90811c3a2..00000000000 --- a/changelog/3078.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Remove legacy internal warnings system: ``config.warn``, ``Node.warn``. The ``pytest_logwarning`` now issues a warning when implemented. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/3079.removal.rst b/changelog/3079.removal.rst deleted file mode 100644 index cb2265ff386..00000000000 --- a/changelog/3079.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed support for yield tests - they are fundamentally broken because they don't support fixtures properly since collection and test execution were separated. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/3082.removal.rst b/changelog/3082.removal.rst deleted file mode 100644 index 750f097bc70..00000000000 --- a/changelog/3082.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed support for applying marks directly to values in ``@pytest.mark.parametrize``. Use ``pytest.param`` instead. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/3083.removal.rst b/changelog/3083.removal.rst deleted file mode 100644 index 74d268a4e28..00000000000 --- a/changelog/3083.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed ``Metafunc.addcall``. This was the predecessor mechanism to ``@pytest.mark.parametrize``. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/3085.removal.rst b/changelog/3085.removal.rst deleted file mode 100644 index 67ba04c2829..00000000000 --- a/changelog/3085.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed support for passing strings to ``pytest.main``. Now, always pass a list of strings instead. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/3086.removal.rst b/changelog/3086.removal.rst deleted file mode 100644 index 3974aa5004f..00000000000 --- a/changelog/3086.removal.rst +++ /dev/null @@ -1,4 +0,0 @@ -``[pytest]`` section in **setup.cfg** files is not longer supported, use ``[tool:pytest]`` instead. ``setup.cfg`` files -are meant for use with ``distutils``, and a section named ``pytest`` has notoriously been a source of conflicts and bugs. - -Note that for **pytest.ini** and **tox.ini** files the section remains ``[pytest]``. diff --git a/changelog/3191.feature.rst b/changelog/3191.feature.rst deleted file mode 100644 index dbf1c8304b8..00000000000 --- a/changelog/3191.feature.rst +++ /dev/null @@ -1,23 +0,0 @@ -A warning is now issued when assertions are made for ``None``. - -This is a common source of confusion among new users, which write: - -.. code-block:: python - - assert mocked_object.assert_called_with(3, 4, 5, key="value") - -When they should write: - -.. code-block:: python - - mocked_object.assert_called_with(3, 4, 5, key="value") - -Because the ``assert_called_with`` method of mock objects already executes an assertion. - -This warning will not be issued when ``None`` is explicitly checked. An assertion like: - -.. code-block:: python - - assert variable is None - -will not issue the warning. diff --git a/changelog/3532.bugfix.rst b/changelog/3532.bugfix.rst deleted file mode 100644 index 8651458d99e..00000000000 --- a/changelog/3532.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -``-p`` now accepts its argument without a space between the value, for example ``-pmyplugin``. diff --git a/changelog/3616.removal.rst b/changelog/3616.removal.rst deleted file mode 100644 index 5d8c9134e0b..00000000000 --- a/changelog/3616.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed the deprecated compat properties for ``node.Class/Function/Module`` - use ``pytest.Class/Function/Module`` now. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/3632.feature.rst b/changelog/3632.feature.rst deleted file mode 100644 index cb1d93750c6..00000000000 --- a/changelog/3632.feature.rst +++ /dev/null @@ -1 +0,0 @@ -Richer equality comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+, `backported to 3.6 `_). diff --git a/changelog/3974.deprecation.rst b/changelog/3974.deprecation.rst deleted file mode 100644 index 070ecb8b275..00000000000 --- a/changelog/3974.deprecation.rst +++ /dev/null @@ -1,8 +0,0 @@ -Passing the ``message`` parameter of ``pytest.raises`` now issues a ``DeprecationWarning``. - -It is a common mistake to think this parameter will match the exception message, while in fact -it only serves to provide a custom message in case the ``pytest.raises`` check fails. To avoid this -mistake and because it is believed to be little used, pytest is deprecating it without providing -an alternative for the moment. - -If you have concerns about this, please comment on `issue #3974 `__. diff --git a/changelog/4278.feature.rst b/changelog/4278.feature.rst deleted file mode 100644 index 332e64572a2..00000000000 --- a/changelog/4278.feature.rst +++ /dev/null @@ -1,4 +0,0 @@ -``CACHEDIR.TAG`` files are now created inside cache directories. - -Those files are part of the `Cache Directory Tagging Standard `__, and can -be used by backup or synchronization programs to identify pytest's cache directory as such. diff --git a/changelog/4292.feature.rst b/changelog/4292.feature.rst deleted file mode 100644 index 760a2778309..00000000000 --- a/changelog/4292.feature.rst +++ /dev/null @@ -1 +0,0 @@ -``pytest.outcomes.Exit`` is derived from ``SystemExit`` instead of ``KeyboardInterrupt``. This allows us to better handle ``pdb`` exiting. diff --git a/changelog/4327.bugfix.rst b/changelog/4327.bugfix.rst deleted file mode 100644 index 72223af4ee5..00000000000 --- a/changelog/4327.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -``approx`` again works with more generic containers, more precisely instances of ``Iterable`` and ``Sized`` instead of more restrictive ``Sequence``. diff --git a/changelog/4371.feature.rst b/changelog/4371.feature.rst deleted file mode 100644 index f205fc26997..00000000000 --- a/changelog/4371.feature.rst +++ /dev/null @@ -1 +0,0 @@ -Updated the ``--collect-only`` option to display test descriptions when ran using ``--verbose``. diff --git a/changelog/4386.feature.rst b/changelog/4386.feature.rst deleted file mode 100644 index 5133a39a773..00000000000 --- a/changelog/4386.feature.rst +++ /dev/null @@ -1 +0,0 @@ -Restructured ``ExceptionInfo`` object construction and ensure incomplete instances have a ``repr``/``str``. diff --git a/changelog/4397.bugfix.rst b/changelog/4397.bugfix.rst deleted file mode 100644 index d1a5bd3ba81..00000000000 --- a/changelog/4397.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Ensure that node ids are printable. diff --git a/changelog/4416.feature.rst b/changelog/4416.feature.rst deleted file mode 100644 index 949e7c25a9a..00000000000 --- a/changelog/4416.feature.rst +++ /dev/null @@ -1,6 +0,0 @@ -pdb: added support for keyword arguments with ``pdb.set_trace``. - -It handles ``header`` similar to Python 3.7 does it, and forwards any -other keyword arguments to the ``Pdb`` constructor. - -This allows for ``__import__("pdb").set_trace(skip=["foo.*"])``. diff --git a/changelog/4421.removal.rst b/changelog/4421.removal.rst deleted file mode 100644 index 4bebd5c1941..00000000000 --- a/changelog/4421.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed the implementation of the ``pytest_namespace`` hook. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/4435.bugfix.rst b/changelog/4435.bugfix.rst deleted file mode 100644 index 36ace1fab44..00000000000 --- a/changelog/4435.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fixed ``raises(..., 'code(string)')`` frame filename. diff --git a/changelog/4435.deprecation.rst b/changelog/4435.deprecation.rst deleted file mode 100644 index 6815c1776ee..00000000000 --- a/changelog/4435.deprecation.rst +++ /dev/null @@ -1,3 +0,0 @@ -Deprecated ``raises(..., 'code(as_a_string)')`` and ``warns(..., 'code(as_a_string)')``. - -See https://docs.pytest.org/en/latest/deprecations.html#raises-warns-exec for rationale and examples. diff --git a/changelog/4447.trivial.rst b/changelog/4447.trivial.rst deleted file mode 100644 index 884ccd4c9a1..00000000000 --- a/changelog/4447.trivial.rst +++ /dev/null @@ -1,3 +0,0 @@ -Changed the deprecation type of ``--result-log`` to ``PytestDeprecationWarning``. - -It was decided to remove this feature at the next major revision. diff --git a/changelog/4458.bugfix.rst b/changelog/4458.bugfix.rst deleted file mode 100644 index 891fb9a2fce..00000000000 --- a/changelog/4458.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Display actual test ids in ``--collect-only``. diff --git a/changelog/4483.feature.rst b/changelog/4483.feature.rst deleted file mode 100644 index 9b301870753..00000000000 --- a/changelog/4483.feature.rst +++ /dev/null @@ -1,9 +0,0 @@ -Added ini parameter ``junit_duration_report`` to optionally report test call durations, excluding setup and teardown times. - -The JUnit XML specification and the default pytest behavior is to include setup and teardown times in the test duration -report. You can include just the call durations instead (excluding setup and teardown) by adding this to your ``pytest.ini`` file: - -.. code-block:: ini - - [pytest] - junit_duration_report = call diff --git a/changelog/4489.removal.rst b/changelog/4489.removal.rst deleted file mode 100644 index 4236204650f..00000000000 --- a/changelog/4489.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed ``request.cached_setup``. This was the predecessor mechanism to modern fixtures. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/4532.feature.rst b/changelog/4532.feature.rst deleted file mode 100644 index ce7eb372976..00000000000 --- a/changelog/4532.feature.rst +++ /dev/null @@ -1,3 +0,0 @@ -``-ra`` now will show errors and failures last, instead of as the first items in the summary. - -This makes it easier to obtain a list of errors and failures to run tests selectively. diff --git a/changelog/4535.removal.rst b/changelog/4535.removal.rst deleted file mode 100644 index 89de6b74405..00000000000 --- a/changelog/4535.removal.rst +++ /dev/null @@ -1 +0,0 @@ -Removed the deprecated ``PyCollector.makeitem`` method. This method was made public by mistake a long time ago. diff --git a/changelog/4543.removal.rst b/changelog/4543.removal.rst deleted file mode 100644 index 0a2b615f950..00000000000 --- a/changelog/4543.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Removed support to define fixtures using the ``pytest_funcarg__`` prefix. Use the ``@pytest.fixture`` decorator instead. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/4545.removal.rst b/changelog/4545.removal.rst deleted file mode 100644 index 865d2fc1277..00000000000 --- a/changelog/4545.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Calling fixtures directly is now always an error instead of a warning. - -See our `docs `__ on information on how to update your code. diff --git a/changelog/4546.removal.rst b/changelog/4546.removal.rst deleted file mode 100644 index 977a30e8cd0..00000000000 --- a/changelog/4546.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -Remove ``Node.get_marker(name)`` the return value was not usable for more than a existence check. - -Use ``Node.get_closest_marker(name)`` as a replacement. diff --git a/changelog/4547.removal.rst b/changelog/4547.removal.rst deleted file mode 100644 index a30d5d7bd87..00000000000 --- a/changelog/4547.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -The deprecated ``record_xml_property`` fixture has been removed, use the more generic ``record_property`` instead. - -See our `docs `__ for more information. diff --git a/changelog/4548.removal.rst b/changelog/4548.removal.rst deleted file mode 100644 index bd47b1d5172..00000000000 --- a/changelog/4548.removal.rst +++ /dev/null @@ -1,3 +0,0 @@ -An error is now raised if the ``pytest_plugins`` variable is defined in a non-top-level ``conftest.py`` file (i.e., not residing in the ``rootdir``). - -See our `docs `__ for more information. diff --git a/changelog/4557.doc.rst b/changelog/4557.doc.rst deleted file mode 100644 index dba2e39cd95..00000000000 --- a/changelog/4557.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Markers example documentation page updated to support latest pytest version. diff --git a/changelog/4558.doc.rst b/changelog/4558.doc.rst deleted file mode 100644 index 09dc5b8637c..00000000000 --- a/changelog/4558.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Update cache documentation example to correctly show cache hit and miss. diff --git a/changelog/4580.doc.rst b/changelog/4580.doc.rst deleted file mode 100644 index 2d8d52f3314..00000000000 --- a/changelog/4580.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Improved detailed summary report documentation. diff --git a/changelog/4599.feature.rst b/changelog/4599.feature.rst deleted file mode 100644 index 12ed20b238f..00000000000 --- a/changelog/4599.feature.rst +++ /dev/null @@ -1,2 +0,0 @@ -``pytest.importorskip`` now supports a ``reason`` parameter, which will be shown when the -requested module cannot be imported. diff --git a/changelog/891.removal.rst b/changelog/891.removal.rst deleted file mode 100644 index 82d75c530a6..00000000000 --- a/changelog/891.removal.rst +++ /dev/null @@ -1 +0,0 @@ -Remove ``testfunction.markername`` attributes - use ``Node.iter_markers(name=None)`` to iterate them. diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index d6379f1b343..40734e5b37f 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,7 @@ Release announcements :maxdepth: 2 + release-4.1.0 release-4.0.2 release-4.0.1 release-4.0.0 diff --git a/doc/en/announce/release-4.1.0.rst b/doc/en/announce/release-4.1.0.rst new file mode 100644 index 00000000000..b7a076f61c9 --- /dev/null +++ b/doc/en/announce/release-4.1.0.rst @@ -0,0 +1,44 @@ +pytest-4.1.0 +======================================= + +The pytest team is proud to announce the 4.1.0 release! + +pytest is a mature Python testing tool with more than a 2000 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + https://docs.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + https://docs.pytest.org/en/latest/ + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Adam Johnson +* Aly Sivji +* Andrey Paramonov +* Anthony Sottile +* Bruno Oliveira +* Daniel Hahler +* David Vo +* Hyunchel Kim +* Jeffrey Rackauckas +* Kanguros +* Nicholas Devenish +* Pedro Algarvio +* Randy Barlow +* Ronny Pfannschmidt +* Tomer Keren +* feuillemorte +* wim glenn + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index 1e376f0d3ec..a40dfc223fd 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -68,8 +68,6 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a def test_function(record_property): record_property("example_key", 1) - record_xml_property - (Deprecated) use record_property. record_xml_attribute Add extra xml attributes to the tag for the calling test. The fixture is callable with ``(name, value)``, with value being diff --git a/doc/en/cache.rst b/doc/en/cache.rst index a0fa72db17a..ba9d87a5fa8 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -215,7 +215,9 @@ If you run this command for the first time, you can see the print statement: > assert mydata == 23 E assert 42 == 23 - test_caching.py:14: AssertionError + test_caching.py:17: AssertionError + -------------------------- Captured stdout setup --------------------------- + running expensive computation... 1 failed in 0.12 seconds If you run it a second time the value will be retrieved from @@ -234,7 +236,7 @@ the cache and nothing will be printed: > assert mydata == 23 E assert 42 == 23 - test_caching.py:14: AssertionError + test_caching.py:17: AssertionError 1 failed in 0.12 seconds See the :ref:`cache-api` for more details. diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 5b96a33bfd4..eba8279f38d 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -90,9 +90,9 @@ interesting to just look at the collection tree: platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items - - - - + + + + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index d4540fe5e86..92756e492e5 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -147,15 +147,15 @@ objects, they are still using the default pytest representation: platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 8 items - - - - - - - - - + + + + + + + + + ======================= no tests ran in 0.12 seconds ======================= @@ -219,12 +219,12 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - - - - - - + + + + + + ======================= no tests ran in 0.12 seconds ======================= @@ -285,9 +285,9 @@ Let's first see how it looks like at collection time: platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - - - + + + ======================= no tests ran in 0.12 seconds ======================= @@ -350,8 +350,8 @@ The result of this test will be successful: platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - - + + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 3f1dd68eefd..394924e2d85 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -134,10 +134,10 @@ The test collection would look like this: platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 2 items - - - - + + + + ======================= no tests ran in 0.12 seconds ======================= @@ -189,11 +189,11 @@ You can always peek at the collection tree without running tests like this: platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 3 items - - - - - + + + + + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index ffccdf77f47..2f8c25f02a3 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -15,9 +15,9 @@ get on the terminal - we are working on that): =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/assertion, inifile: - collected 42 items + collected 44 items - failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] ================================= FAILURES ================================= ___________________________ test_generative[3-6] ___________________________ @@ -289,6 +289,36 @@ get on the terminal - we are working on that): E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:99: AssertionError + ______________ TestSpecialisedExplanations.test_eq_dataclass _______________ + + self = + + def test_eq_dataclass(self): + > from dataclasses import dataclass + E ModuleNotFoundError: No module named 'dataclasses' + + failure_demo.py:102: ModuleNotFoundError + ________________ TestSpecialisedExplanations.test_eq_attrs _________________ + + self = + + def test_eq_attrs(self): + import attr + + @attr.s + class Foo(object): + a = attr.ib() + b = attr.ib() + + left = Foo(1, "b") + right = Foo(1, "c") + > assert left == right + E AssertionError: assert Foo(a=1, b='b') == Foo(a=1, b='c') + E Omitting 1 identical items, use -vv to show + E Differing attributes: + E b: 'b' != 'c' + + failure_demo.py:123: AssertionError ______________________________ test_attribute ______________________________ def test_attribute(): @@ -300,7 +330,7 @@ get on the terminal - we are working on that): E assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b - failure_demo.py:107: AssertionError + failure_demo.py:131: AssertionError _________________________ test_attribute_instance __________________________ def test_attribute_instance(): @@ -312,7 +342,7 @@ get on the terminal - we are working on that): E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() - failure_demo.py:114: AssertionError + failure_demo.py:138: AssertionError __________________________ test_attribute_failure __________________________ def test_attribute_failure(): @@ -325,7 +355,7 @@ get on the terminal - we are working on that): i = Foo() > assert i.b == 2 - failure_demo.py:125: + failure_demo.py:149: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = .Foo object at 0xdeadbeef> @@ -334,7 +364,7 @@ get on the terminal - we are working on that): > raise Exception("Failed to get attrib") E Exception: Failed to get attrib - failure_demo.py:120: Exception + failure_demo.py:144: Exception _________________________ test_attribute_multiple __________________________ def test_attribute_multiple(): @@ -351,31 +381,26 @@ get on the terminal - we are working on that): E + and 2 = .Bar object at 0xdeadbeef>.b E + where .Bar object at 0xdeadbeef> = .Bar'>() - failure_demo.py:135: AssertionError + failure_demo.py:159: AssertionError __________________________ TestRaises.test_raises __________________________ self = def test_raises(self): - s = "qwe" # NOQA - > raises(TypeError, "int(s)") - - failure_demo.py:145: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - - > int(s) - E ValueError: invalid literal for int() with base 10: 'qwe' + s = "qwe" + > raises(TypeError, int, s) + E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen $REGENDOC_TMPDIR/assertion/failure_demo.py:145>:1: ValueError + failure_demo.py:169: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ self = def test_raises_doesnt(self): - > raises(IOError, "int('3')") + > raises(IOError, int, "3") E Failed: DID NOT RAISE - failure_demo.py:148: Failed + failure_demo.py:172: Failed __________________________ TestRaises.test_raise ___________________________ self = @@ -384,7 +409,7 @@ get on the terminal - we are working on that): > raise ValueError("demo error") E ValueError: demo error - failure_demo.py:151: ValueError + failure_demo.py:175: ValueError ________________________ TestRaises.test_tupleerror ________________________ self = @@ -393,7 +418,7 @@ get on the terminal - we are working on that): > a, b = [1] # NOQA E ValueError: not enough values to unpack (expected 2, got 1) - failure_demo.py:154: ValueError + failure_demo.py:178: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ self = @@ -404,7 +429,7 @@ get on the terminal - we are working on that): > a, b = items.pop() E TypeError: 'int' object is not iterable - failure_demo.py:159: TypeError + failure_demo.py:183: TypeError --------------------------- Captured stdout call --------------------------- items is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ @@ -415,7 +440,7 @@ get on the terminal - we are working on that): > if namenotexi: # NOQA E NameError: name 'namenotexi' is not defined - failure_demo.py:162: NameError + failure_demo.py:186: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ def test_dynamic_compile_shows_nicely(): @@ -430,14 +455,14 @@ get on the terminal - we are working on that): sys.modules[name] = module > module.foo() - failure_demo.py:180: + failure_demo.py:204: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E AssertionError - <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:177>:2: AssertionError + <0-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:201>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ self = @@ -451,7 +476,7 @@ get on the terminal - we are working on that): > somefunc(f(), g()) - failure_demo.py:191: + failure_demo.py:215: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:13: in somefunc otherfunc(x, y) @@ -473,7 +498,7 @@ get on the terminal - we are working on that): > a, b = items E ValueError: not enough values to unpack (expected 2, got 0) - failure_demo.py:195: ValueError + failure_demo.py:219: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ self = @@ -483,7 +508,7 @@ get on the terminal - we are working on that): > a, b = items E TypeError: 'int' object is not iterable - failure_demo.py:199: TypeError + failure_demo.py:223: TypeError ______________________ TestMoreErrors.test_startswith ______________________ self = @@ -496,7 +521,7 @@ get on the terminal - we are working on that): E + where False = ('456') E + where = '123'.startswith - failure_demo.py:204: AssertionError + failure_demo.py:228: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ self = @@ -515,7 +540,7 @@ get on the terminal - we are working on that): E + where '123' = .f at 0xdeadbeef>() E + and '456' = .g at 0xdeadbeef>() - failure_demo.py:213: AssertionError + failure_demo.py:237: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ self = @@ -526,7 +551,7 @@ get on the terminal - we are working on that): E + where False = isinstance(43, float) E + where 43 = globf(42) - failure_demo.py:216: AssertionError + failure_demo.py:240: AssertionError _______________________ TestMoreErrors.test_instance _______________________ self = @@ -537,7 +562,7 @@ get on the terminal - we are working on that): E assert 42 != 42 E + where 42 = .x - failure_demo.py:220: AssertionError + failure_demo.py:244: AssertionError _______________________ TestMoreErrors.test_compare ________________________ self = @@ -547,7 +572,7 @@ get on the terminal - we are working on that): E assert 11 < 5 E + where 11 = globf(10) - failure_demo.py:223: AssertionError + failure_demo.py:247: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ self = @@ -558,7 +583,7 @@ get on the terminal - we are working on that): > assert x == 0 E assert 1 == 0 - failure_demo.py:228: AssertionError + failure_demo.py:252: AssertionError ___________________ TestCustomAssertMsg.test_single_line ___________________ self = @@ -573,7 +598,7 @@ get on the terminal - we are working on that): E assert 1 == 2 E + where 1 = .A'>.a - failure_demo.py:239: AssertionError + failure_demo.py:263: AssertionError ____________________ TestCustomAssertMsg.test_multiline ____________________ self = @@ -592,7 +617,7 @@ get on the terminal - we are working on that): E assert 1 == 2 E + where 1 = .A'>.a - failure_demo.py:246: AssertionError + failure_demo.py:270: AssertionError ___________________ TestCustomAssertMsg.test_custom_repr ___________________ self = @@ -614,5 +639,5 @@ get on the terminal - we are working on that): E assert 1 == 2 E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a - failure_demo.py:259: AssertionError - ======================== 42 failed in 0.12 seconds ========================= + failure_demo.py:283: AssertionError + ======================== 44 failed in 0.12 seconds ========================= diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 95c0e636557..76a1ddc807e 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -598,7 +598,7 @@ We can run this: file $REGENDOC_TMPDIR/b/test_error.py, line 1 def test_root(db): # no db here, will error out E fixture 'db' not found - > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory + > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. $REGENDOC_TMPDIR/b/test_error.py:1 diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index 133901ddea1..4dd68f8e4ac 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -628,7 +628,7 @@ So let's just do another run: response, msg = smtp_connection.ehlo() assert response == 250 > assert b"smtp.gmail.com" in msg - E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' + E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING' test_module.py:5: AssertionError -------------------------- Captured stdout setup --------------------------- @@ -703,19 +703,19 @@ Running the above tests results in the following test IDs being used: platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 10 items - - - - - - - - - - - - - + + + + + + + + + + + + + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/usage.rst b/doc/en/usage.rst index bb1e9218cf2..bd9706c4fda 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -158,34 +158,9 @@ Example: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 7 items - - test_examples.py ..FEsxX [100%] - - ==================================== ERRORS ==================================== - _________________________ ERROR at setup of test_error _________________________ - file /Users/chainz/tmp/pytestratest/test_examples.py, line 17 - def test_error(unknown_fixture): - E fixture 'unknown_fixture' not found - > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmp_path, tmp_path_factory, tmpdir, tmpdir_factory - > use 'pytest --fixtures [testpath]' for help on them. - - /Users/chainz/tmp/pytestratest/test_examples.py:17 - =================================== FAILURES =================================== - __________________________________ test_fail ___________________________________ - - def test_fail(): - > assert 0 - E assert 0 - - test_examples.py:14: AssertionError - =========================== short test summary info ============================ - FAIL test_examples.py::test_fail - ERROR test_examples.py::test_error - SKIP [1] test_examples.py:21: Example - XFAIL test_examples.py::test_xfail - XPASS test_examples.py::test_xpass - = 1 failed, 2 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.07 seconds = + collected 0 items + + ======================= no tests ran in 0.12 seconds ======================= The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes". @@ -208,22 +183,9 @@ More than one character can be used, so for example to only see failed and skipp =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 2 items - - test_examples.py Fs [100%] - - =================================== FAILURES =================================== - __________________________________ test_fail ___________________________________ + collected 0 items - def test_fail(): - > assert 0 - E assert 0 - - test_examples.py:14: AssertionError - =========================== short test summary info ============================ - FAIL test_examples.py::test_fail - SKIP [1] test_examples.py:21: Example - ===================== 1 failed, 1 skipped in 0.09 seconds ====================== + ======================= no tests ran in 0.12 seconds ======================= Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had captured output: @@ -234,18 +196,9 @@ captured output: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: - collected 2 items - - test_examples.py .. [100%] - =========================== short test summary info ============================ - PASSED test_examples.py::test_pass - PASSED test_examples.py::test_pass_with_output - - ==================================== PASSES ==================================== - ____________________________ test_pass_with_output _____________________________ - ----------------------------- Captured stdout call ----------------------------- - Passing test - =========================== 2 passed in 0.04 seconds =========================== + collected 0 items + + ======================= no tests ran in 0.12 seconds ======================= .. _pdb-option: From e380d4306bcc56c441255c4a1d92878f4b553fce Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 15:31:42 -0200 Subject: [PATCH 92/98] Anonimize attrs links --- CHANGELOG.rst | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 86d751b63fa..3860ff73557 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -157,7 +157,7 @@ Features will not issue the warning. -- `#3632 `_: Richer equality comparison introspection on ``AssertionError`` for objects created using `attrs `_ or `dataclasses `_ (Python 3.7+, `backported to 3.6 `_). +- `#3632 `_: Richer equality comparison introspection on ``AssertionError`` for objects created using `attrs `__ or `dataclasses `_ (Python 3.7+, `backported to 3.6 `__). - `#4278 `_: ``CACHEDIR.TAG`` files are now created inside cache directories. @@ -1983,7 +1983,7 @@ Bug Fixes Trivial/Internal Changes ------------------------ -- pytest now depends on `attrs `_ for internal +- pytest now depends on `attrs `__ for internal structures to ease code maintainability. (`#2641 `_) From 5a8e674e927970e2f35d93b0fd2f10ea9c81d287 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 17:16:52 +0000 Subject: [PATCH 93/98] Add dataclasses as a regendoc dependency --- tox.ini | 1 + 1 file changed, 1 insertion(+) diff --git a/tox.ini b/tox.ini index 7a69000c817..3d5c2fd56ef 100644 --- a/tox.ini +++ b/tox.ini @@ -148,6 +148,7 @@ deps = sphinx PyYAML regendoc>=0.6.1 + dataclasses whitelist_externals = rm make From d0e9b4812f083289cf14a581ab05da06d57d7174 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 17:32:16 +0000 Subject: [PATCH 94/98] Regendocs --- doc/en/assert.rst | 30 +++--- doc/en/cache.rst | 58 ++++++----- doc/en/capture.rst | 14 +-- doc/en/doctest.rst | 8 +- doc/en/example/markers.rst | 144 +++++++++++++++++----------- doc/en/example/nonpython.rst | 32 ++++--- doc/en/example/parametrize.rst | 78 ++++++++------- doc/en/example/pythoncollection.rst | 18 ++-- doc/en/example/reportingdemo.rst | 128 ++++++++++++++----------- doc/en/example/simple.rst | 144 ++++++++++++++++------------ doc/en/fixture.rst | 88 +++++++++-------- doc/en/getting-started.rst | 30 +++--- doc/en/index.rst | 12 ++- doc/en/parametrize.rst | 32 ++++--- doc/en/skipping.rst | 10 +- doc/en/tmpdir.rst | 24 +++-- doc/en/unittest.rst | 16 ++-- doc/en/usage.rst | 20 ++-- doc/en/warnings.rst | 20 ++-- doc/en/writing_plugins.rst | 10 +- 20 files changed, 532 insertions(+), 384 deletions(-) diff --git a/doc/en/assert.rst b/doc/en/assert.rst index b13a071f698..7f422af1f55 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -27,15 +27,17 @@ you will see the return value of the function call: .. code-block:: pytest $ pytest test_assert1.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_assert1.py F [100%] + test_assert1.py F [100%] - ================================= FAILURES ================================= - ______________________________ test_function _______________________________ + ====================================== FAILURES ====================================== + ___________________________________ test_function ____________________________________ def test_function(): > assert f() == 4 @@ -43,7 +45,7 @@ you will see the return value of the function call: E + where 3 = f() test_assert1.py:5: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================== 1 failed in 0.12 seconds ============================== ``pytest`` has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary @@ -171,15 +173,17 @@ if you run this module: .. code-block:: pytest $ pytest test_assert2.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_assert2.py F [100%] + test_assert2.py F [100%] - ================================= FAILURES ================================= - ___________________________ test_set_comparison ____________________________ + ====================================== FAILURES ====================================== + ________________________________ test_set_comparison _________________________________ def test_set_comparison(): set1 = set("1308") @@ -193,7 +197,7 @@ if you run this module: E Use -v to get the full diff test_assert2.py:5: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================== 1 failed in 0.12 seconds ============================== Special comparisons are done for a number of cases: @@ -243,9 +247,9 @@ the conftest file: .. code-block:: pytest $ pytest -q test_foocompare.py - F [100%] - ================================= FAILURES ================================= - _______________________________ test_compare _______________________________ + F [100%] + ====================================== FAILURES ====================================== + ____________________________________ test_compare ____________________________________ def test_compare(): f1 = Foo(1) diff --git a/doc/en/cache.rst b/doc/en/cache.rst index ba9d87a5fa8..1814d386d2b 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -48,9 +48,9 @@ If you run this for the first time you will see two failures: .. code-block:: pytest $ pytest -q - .................F.......F........................ [100%] - ================================= FAILURES ================================= - _______________________________ test_num[17] _______________________________ + .................F.......F........................ [100%] + ====================================== FAILURES ====================================== + ____________________________________ test_num[17] ____________________________________ i = 17 @@ -61,7 +61,7 @@ If you run this for the first time you will see two failures: E Failed: bad luck test_50.py:6: Failed - _______________________________ test_num[25] _______________________________ + ____________________________________ test_num[25] ____________________________________ i = 25 @@ -79,16 +79,18 @@ If you then run it with ``--lf``: .. code-block:: pytest $ pytest --lf - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 50 items / 48 deselected run-last-failure: rerun previous 2 failures - test_50.py FF [100%] + test_50.py FF [100%] - ================================= FAILURES ================================= - _______________________________ test_num[17] _______________________________ + ====================================== FAILURES ====================================== + ____________________________________ test_num[17] ____________________________________ i = 17 @@ -99,7 +101,7 @@ If you then run it with ``--lf``: E Failed: bad luck test_50.py:6: Failed - _______________________________ test_num[25] _______________________________ + ____________________________________ test_num[25] ____________________________________ i = 25 @@ -110,7 +112,7 @@ If you then run it with ``--lf``: E Failed: bad luck test_50.py:6: Failed - ================= 2 failed, 48 deselected in 0.12 seconds ================== + ====================== 2 failed, 48 deselected in 0.12 seconds ======================= You have run only the two failing test from the last run, while 48 tests have not been run ("deselected"). @@ -122,16 +124,18 @@ of ``FF`` and dots): .. code-block:: pytest $ pytest --ff - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 50 items run-last-failure: rerun previous 2 failures first - test_50.py FF................................................ [100%] + test_50.py FF................................................ [100%] - ================================= FAILURES ================================= - _______________________________ test_num[17] _______________________________ + ====================================== FAILURES ====================================== + ____________________________________ test_num[17] ____________________________________ i = 17 @@ -142,7 +146,7 @@ of ``FF`` and dots): E Failed: bad luck test_50.py:6: Failed - _______________________________ test_num[25] _______________________________ + ____________________________________ test_num[25] ____________________________________ i = 25 @@ -153,7 +157,7 @@ of ``FF`` and dots): E Failed: bad luck test_50.py:6: Failed - =================== 2 failed, 48 passed in 0.12 seconds ==================== + ======================== 2 failed, 48 passed in 0.12 seconds ========================= .. _`config.cache`: @@ -205,9 +209,9 @@ If you run this command for the first time, you can see the print statement: .. code-block:: pytest $ pytest -q - F [100%] - ================================= FAILURES ================================= - ______________________________ test_function _______________________________ + F [100%] + ====================================== FAILURES ====================================== + ___________________________________ test_function ____________________________________ mydata = 42 @@ -216,7 +220,7 @@ If you run this command for the first time, you can see the print statement: E assert 42 == 23 test_caching.py:17: AssertionError - -------------------------- Captured stdout setup --------------------------- + ------------------------------- Captured stdout setup -------------------------------- running expensive computation... 1 failed in 0.12 seconds @@ -226,9 +230,9 @@ the cache and nothing will be printed: .. code-block:: pytest $ pytest -q - F [100%] - ================================= FAILURES ================================= - ______________________________ test_function _______________________________ + F [100%] + ====================================== FAILURES ====================================== + ___________________________________ test_function ____________________________________ mydata = 42 @@ -251,11 +255,13 @@ You can always peek at the content of the cache using the .. code-block:: pytest $ pytest --cache-show - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y cachedir: $REGENDOC_TMPDIR/.pytest_cache - ------------------------------- cache values ------------------------------- + ------------------------------------ cache values ------------------------------------ cache/lastfailed contains: {'test_caching.py::test_function': True} cache/nodeids contains: @@ -265,7 +271,7 @@ You can always peek at the content of the cache using the example/value contains: 42 - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ Clearing Cache content ------------------------------- diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 488b2b8746f..15ad75910c2 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -66,24 +66,26 @@ of the failing function and hide the other one: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_module.py .F [100%] + test_module.py .F [100%] - ================================= FAILURES ================================= - ________________________________ test_func2 ________________________________ + ====================================== FAILURES ====================================== + _____________________________________ test_func2 _____________________________________ def test_func2(): > assert False E assert False test_module.py:9: AssertionError - -------------------------- Captured stdout setup --------------------------- + ------------------------------- Captured stdout setup -------------------------------- setting up - ==================== 1 failed, 1 passed in 0.12 seconds ==================== + ========================= 1 failed, 1 passed in 0.12 seconds ========================= Accessing captured output from a test function --------------------------------------------------- diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 125ed3aa704..c861ede8a02 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -63,14 +63,16 @@ then you can just invoke ``pytest`` without command line options: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini + plugins: hypothesis-3.x.y collected 1 item - mymodule.py . [100%] + mymodule.py . [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================== 1 passed in 0.12 seconds ============================== It is possible to use fixtures using the ``getfixture`` helper:: diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 9d325c30e58..b27a4fcb27e 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -32,32 +32,36 @@ You can then restrict a test run to only run tests marked with ``webtest``: .. code-block:: pytest $ pytest -v -m webtest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 4 items / 3 deselected - test_server.py::test_send_http PASSED [100%] + test_server.py::test_send_http PASSED [100%] - ================== 1 passed, 3 deselected in 0.12 seconds ================== + ======================= 1 passed, 3 deselected in 0.12 seconds ======================= Or the inverse, running all tests except the webtest ones: .. code-block:: pytest $ pytest -v -m "not webtest" - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 4 items / 1 deselected - test_server.py::test_something_quick PASSED [ 33%] - test_server.py::test_another PASSED [ 66%] - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::test_something_quick PASSED [ 33%] + test_server.py::test_another PASSED [ 66%] + test_server.py::TestClass::test_method PASSED [100%] - ================== 3 passed, 1 deselected in 0.12 seconds ================== + ======================= 3 passed, 1 deselected in 0.12 seconds ======================= Selecting tests based on their node ID -------------------------------------- @@ -69,46 +73,52 @@ tests based on their module, class, method, or function name: .. code-block:: pytest $ pytest -v test_server.py::TestClass::test_method - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 1 item - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::TestClass::test_method PASSED [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================== 1 passed in 0.12 seconds ============================== You can also select on the class: .. code-block:: pytest $ pytest -v test_server.py::TestClass - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 1 item - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::TestClass::test_method PASSED [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================== 1 passed in 0.12 seconds ============================== Or select multiple nodes: .. code-block:: pytest $ pytest -v test_server.py::TestClass test_server.py::test_send_http - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 2 items - test_server.py::TestClass::test_method PASSED [ 50%] - test_server.py::test_send_http PASSED [100%] + test_server.py::TestClass::test_method PASSED [ 50%] + test_server.py::test_send_http PASSED [100%] - ========================= 2 passed in 0.12 seconds ========================= + ============================== 2 passed in 0.12 seconds ============================== .. _node-id: @@ -139,48 +149,54 @@ select tests based on their names: .. code-block:: pytest $ pytest -v -k http # running with the above defined example module - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 4 items / 3 deselected - test_server.py::test_send_http PASSED [100%] + test_server.py::test_send_http PASSED [100%] - ================== 1 passed, 3 deselected in 0.12 seconds ================== + ======================= 1 passed, 3 deselected in 0.12 seconds ======================= And you can also run all tests except the ones that match the keyword: .. code-block:: pytest $ pytest -k "not send_http" -v - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 4 items / 1 deselected - test_server.py::test_something_quick PASSED [ 33%] - test_server.py::test_another PASSED [ 66%] - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::test_something_quick PASSED [ 33%] + test_server.py::test_another PASSED [ 66%] + test_server.py::TestClass::test_method PASSED [100%] - ================== 3 passed, 1 deselected in 0.12 seconds ================== + ======================= 3 passed, 1 deselected in 0.12 seconds ======================= Or to select "http" and "quick" tests: .. code-block:: pytest $ pytest -k "http or quick" -v - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 4 items / 2 deselected - test_server.py::test_send_http PASSED [ 50%] - test_server.py::test_something_quick PASSED [100%] + test_server.py::test_send_http PASSED [ 50%] + test_server.py::test_something_quick PASSED [100%] - ================== 2 passed, 2 deselected in 0.12 seconds ================== + ======================= 2 passed, 2 deselected in 0.12 seconds ======================= .. note:: @@ -216,6 +232,8 @@ You can ask which markers exist for your test suite - the list includes our just $ pytest --markers @pytest.mark.webtest: mark a test as a webtest. + @pytest.mark.hypothesis: Tests which use hypothesis. + @pytest.mark.filterwarnings(warning): add a warning filter to the given test. see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. @@ -363,34 +381,40 @@ the test needs: .. code-block:: pytest $ pytest -E stage2 - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_someenv.py s [100%] + test_someenv.py s [100%] - ======================== 1 skipped in 0.12 seconds ========================= + ============================= 1 skipped in 0.12 seconds ============================== and here is one that specifies exactly the environment needed: .. code-block:: pytest $ pytest -E stage1 - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_someenv.py . [100%] + test_someenv.py . [100%] - ========================= 1 passed in 0.12 seconds ========================= + ============================== 1 passed in 0.12 seconds ============================== The ``--markers`` option always gives you a list of available markers:: $ pytest --markers @pytest.mark.env(name): mark test to run only on named environment + @pytest.mark.hypothesis: Tests which use hypothesis. + @pytest.mark.filterwarnings(warning): add a warning filter to the given test. see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. @@ -544,30 +568,34 @@ then you will see two tests skipped and two executed tests as expected: .. code-block:: pytest $ pytest -rs # this option reports skip reasons - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 4 items - test_plat.py s.s. [100%] - ========================= short test summary info ========================== + test_plat.py s.s. [100%] + ============================== short test summary info =============================== SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux - =================== 2 passed, 2 skipped in 0.12 seconds ==================== + ======================== 2 passed, 2 skipped in 0.12 seconds ========================= Note that if you specify a platform via the marker-command line option like this: .. code-block:: pytest $ pytest -m linux - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 4 items / 3 deselected - test_plat.py . [100%] + test_plat.py . [100%] - ================== 1 passed, 3 deselected in 0.12 seconds ================== + ======================= 1 passed, 3 deselected in 0.12 seconds ======================= then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -613,47 +641,51 @@ We can now use the ``-m option`` to select one set: .. code-block:: pytest $ pytest -m interface --tb=short - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 4 items / 2 deselected - test_module.py FF [100%] + test_module.py FF [100%] - ================================= FAILURES ================================= - __________________________ test_interface_simple ___________________________ + ====================================== FAILURES ====================================== + _______________________________ test_interface_simple ________________________________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - __________________________ test_interface_complex __________________________ + _______________________________ test_interface_complex _______________________________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ================== 2 failed, 2 deselected in 0.12 seconds ================== + ======================= 2 failed, 2 deselected in 0.12 seconds ======================= or to select both "event" and "interface" tests: .. code-block:: pytest $ pytest -m "interface or event" --tb=short - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 4 items / 1 deselected - test_module.py FFF [100%] + test_module.py FFF [100%] - ================================= FAILURES ================================= - __________________________ test_interface_simple ___________________________ + ====================================== FAILURES ====================================== + _______________________________ test_interface_simple ________________________________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - __________________________ test_interface_complex __________________________ + _______________________________ test_interface_complex _______________________________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ____________________________ test_event_simple _____________________________ + _________________________________ test_event_simple __________________________________ test_module.py:9: in test_event_simple assert 0 E assert 0 - ================== 3 failed, 1 deselected in 0.12 seconds ================== + ======================= 3 failed, 1 deselected in 0.12 seconds ======================= diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index eba8279f38d..1581b8672b1 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -28,19 +28,21 @@ now execute the test specification: .. code-block:: pytest nonpython $ pytest test_simple.yml - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_simple.yml F. [100%] + test_simple.yml F. [100%] - ================================= FAILURES ================================= - ______________________________ usecase: hello ______________________________ + ====================================== FAILURES ====================================== + ___________________________________ usecase: hello ___________________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.12 seconds ==================== + ========================= 1 failed, 1 passed in 0.12 seconds ========================= .. regendoc:wipe @@ -62,21 +64,23 @@ consulted when reporting in ``verbose`` mode: .. code-block:: pytest nonpython $ pytest -v - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: + plugins: hypothesis-3.x.y collecting ... collected 2 items - test_simple.yml::hello FAILED [ 50%] - test_simple.yml::ok PASSED [100%] + test_simple.yml::hello FAILED [ 50%] + test_simple.yml::ok PASSED [100%] - ================================= FAILURES ================================= - ______________________________ usecase: hello ______________________________ + ====================================== FAILURES ====================================== + ___________________________________ usecase: hello ___________________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.12 seconds ==================== + ========================= 1 failed, 1 passed in 0.12 seconds ========================= .. regendoc:wipe @@ -86,13 +90,15 @@ interesting to just look at the collection tree: .. code-block:: pytest nonpython $ pytest --collect-only - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: + plugins: hypothesis-3.x.y collected 2 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 92756e492e5..76cb688678f 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -47,7 +47,7 @@ This means that we only run 2 tests if we do not pass ``--all``: .. code-block:: pytest $ pytest -q test_compute.py - .. [100%] + .. [100%] 2 passed in 0.12 seconds We run only two computations, so we see two dots. @@ -56,9 +56,9 @@ let's run the full monty: .. code-block:: pytest $ pytest -q --all - ....F [100%] - ================================= FAILURES ================================= - _____________________________ test_compute[4] ______________________________ + ....F [100%] + ====================================== FAILURES ====================================== + __________________________________ test_compute[4] ___________________________________ param1 = 4 @@ -143,9 +143,11 @@ objects, they are still using the default pytest representation: .. code-block:: pytest $ pytest test_time.py --collect-only - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 8 items @@ -157,7 +159,7 @@ objects, they are still using the default pytest representation: - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs together with the actual data, instead of listing them separately. @@ -201,23 +203,27 @@ this is a fully self-contained example which you can run with: .. code-block:: pytest $ pytest test_scenarios.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 4 items - test_scenarios.py .... [100%] + test_scenarios.py .... [100%] - ========================= 4 passed in 0.12 seconds ========================= + ============================== 4 passed in 0.12 seconds ============================== If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function: .. code-block:: pytest $ pytest --collect-only test_scenarios.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 4 items @@ -226,7 +232,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a @@ -281,24 +287,26 @@ Let's first see how it looks like at collection time: .. code-block:: pytest $ pytest test_backends.py --collect-only - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ And then when we run the test: .. code-block:: pytest $ pytest -q test_backends.py - .F [100%] - ================================= FAILURES ================================= - _________________________ test_db_initialized[d2] __________________________ + .F [100%] + ====================================== FAILURES ====================================== + ______________________________ test_db_initialized[d2] _______________________________ db = @@ -346,14 +354,16 @@ The result of this test will be successful: .. code-block:: pytest $ pytest test_indirect_list.py --collect-only - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ .. regendoc:wipe @@ -397,9 +407,9 @@ argument sets to use for each test function. Let's run it: .. code-block:: pytest $ pytest -q - F.. [100%] - ================================= FAILURES ================================= - ________________________ TestClass.test_equals[1-2] ________________________ + F.. [100%] + ====================================== FAILURES ====================================== + _____________________________ TestClass.test_equals[1-2] _____________________________ self = , a = 1, b = 2 @@ -429,8 +439,8 @@ Running it results in some skips if we don't have all the python interpreters in .. code-block:: pytest . $ pytest -rs -q multipython.py - ...sss...sssssssss...sss... [100%] - ========================= short test summary info ========================== + ...sss...sssssssss...sss... [100%] + ============================== short test summary info =============================== SKIP [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.4' not found 12 passed, 15 skipped in 0.12 seconds @@ -480,16 +490,18 @@ If you run this with reporting for skips enabled: .. code-block:: pytest $ pytest -rs test_module.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_module.py .s [100%] - ========================= short test summary info ========================== + test_module.py .s [100%] + ============================== short test summary info =============================== SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' - =================== 1 passed, 1 skipped in 0.12 seconds ==================== + ======================== 1 passed, 1 skipped in 0.12 seconds ========================= You'll see that we don't have an ``opt2`` module and thus the second test run of our ``test_func1`` was skipped. A few notes: @@ -537,17 +549,19 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: .. code-block:: pytest $ pytest -v -m basic - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 17 items / 14 deselected - test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%] - test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%] - test_pytest_param_example.py::test_eval[basic_6*9] xfail [100%] + test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%] + test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%] + test_pytest_param_example.py::test_eval[basic_6*9] xfail [100%] - ============ 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ============ + ================= 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ================= As the result: diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 394924e2d85..bc7e0c0d22d 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -130,16 +130,18 @@ The test collection would look like this: .. code-block:: pytest $ pytest --collect-only - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini + plugins: hypothesis-3.x.y collected 2 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ You can check for multiple glob patterns by adding a space between the patterns:: @@ -185,9 +187,11 @@ You can always peek at the collection tree without running tests like this: .. code-block:: pytest . $ pytest --collect-only pythoncollection.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/CWD/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini + plugins: hypothesis-3.x.y collected 3 items @@ -195,7 +199,7 @@ You can always peek at the collection tree without running tests like this: - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ .. _customizing-test-collection: @@ -257,9 +261,11 @@ file will be left out: .. code-block:: pytest $ pytest --collect-only - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini + plugins: hypothesis-3.x.y collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 2f8c25f02a3..d99a06725e1 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -12,15 +12,17 @@ get on the terminal - we are working on that): .. code-block:: pytest assertion $ pytest failure_demo.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/assertion/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/assertion, inifile: + plugins: hypothesis-3.x.y collected 44 items - failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] - ================================= FAILURES ================================= - ___________________________ test_generative[3-6] ___________________________ + ====================================== FAILURES ====================================== + ________________________________ test_generative[3-6] ________________________________ param1 = 3, param2 = 6 @@ -30,7 +32,7 @@ get on the terminal - we are working on that): E assert (3 * 2) < 6 failure_demo.py:22: AssertionError - _________________________ TestFailing.test_simple __________________________ + ______________________________ TestFailing.test_simple _______________________________ self = @@ -47,7 +49,7 @@ get on the terminal - we are working on that): E + and 43 = .g at 0xdeadbeef>() failure_demo.py:33: AssertionError - ____________________ TestFailing.test_simple_multiline _____________________ + _________________________ TestFailing.test_simple_multiline __________________________ self = @@ -55,7 +57,7 @@ get on the terminal - we are working on that): > otherfunc_multi(42, 6 * 9) failure_demo.py:36: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 42, b = 54 @@ -64,7 +66,7 @@ get on the terminal - we are working on that): E assert 42 == 54 failure_demo.py:17: AssertionError - ___________________________ TestFailing.test_not ___________________________ + ________________________________ TestFailing.test_not ________________________________ self = @@ -77,7 +79,7 @@ get on the terminal - we are working on that): E + where 42 = .f at 0xdeadbeef>() failure_demo.py:42: AssertionError - _________________ TestSpecialisedExplanations.test_eq_text _________________ + ______________________ TestSpecialisedExplanations.test_eq_text ______________________ self = @@ -88,7 +90,7 @@ get on the terminal - we are working on that): E + eggs failure_demo.py:47: AssertionError - _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ + __________________ TestSpecialisedExplanations.test_eq_similar_text __________________ self = @@ -101,7 +103,7 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:50: AssertionError - ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ + _________________ TestSpecialisedExplanations.test_eq_multiline_text _________________ self = @@ -114,7 +116,7 @@ get on the terminal - we are working on that): E bar failure_demo.py:53: AssertionError - ______________ TestSpecialisedExplanations.test_eq_long_text _______________ + ___________________ TestSpecialisedExplanations.test_eq_long_text ____________________ self = @@ -131,7 +133,7 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:58: AssertionError - _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ + ______________ TestSpecialisedExplanations.test_eq_long_text_multiline _______________ self = @@ -151,7 +153,7 @@ get on the terminal - we are working on that): E ...Full output truncated (7 lines hidden), use '-vv' to show failure_demo.py:63: AssertionError - _________________ TestSpecialisedExplanations.test_eq_list _________________ + ______________________ TestSpecialisedExplanations.test_eq_list ______________________ self = @@ -162,7 +164,7 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:66: AssertionError - ______________ TestSpecialisedExplanations.test_eq_list_long _______________ + ___________________ TestSpecialisedExplanations.test_eq_list_long ____________________ self = @@ -175,7 +177,7 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:71: AssertionError - _________________ TestSpecialisedExplanations.test_eq_dict _________________ + ______________________ TestSpecialisedExplanations.test_eq_dict ______________________ self = @@ -193,7 +195,7 @@ get on the terminal - we are working on that): E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:74: AssertionError - _________________ TestSpecialisedExplanations.test_eq_set __________________ + ______________________ TestSpecialisedExplanations.test_eq_set _______________________ self = @@ -211,7 +213,7 @@ get on the terminal - we are working on that): E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:77: AssertionError - _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ + __________________ TestSpecialisedExplanations.test_eq_longer_list ___________________ self = @@ -222,7 +224,7 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:80: AssertionError - _________________ TestSpecialisedExplanations.test_in_list _________________ + ______________________ TestSpecialisedExplanations.test_in_list ______________________ self = @@ -231,7 +233,7 @@ get on the terminal - we are working on that): E assert 1 in [0, 2, 3, 4, 5] failure_demo.py:83: AssertionError - __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ + _______________ TestSpecialisedExplanations.test_not_in_text_multiline _______________ self = @@ -250,7 +252,7 @@ get on the terminal - we are working on that): E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:87: AssertionError - ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ + ________________ TestSpecialisedExplanations.test_not_in_text_single _________________ self = @@ -263,7 +265,7 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:91: AssertionError - _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ + ______________ TestSpecialisedExplanations.test_not_in_text_single_long ______________ self = @@ -276,7 +278,7 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:95: AssertionError - ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ + ___________ TestSpecialisedExplanations.test_not_in_text_single_long_term ____________ self = @@ -289,16 +291,28 @@ get on the terminal - we are working on that): E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:99: AssertionError - ______________ TestSpecialisedExplanations.test_eq_dataclass _______________ + ___________________ TestSpecialisedExplanations.test_eq_dataclass ____________________ self = def test_eq_dataclass(self): - > from dataclasses import dataclass - E ModuleNotFoundError: No module named 'dataclasses' + from dataclasses import dataclass - failure_demo.py:102: ModuleNotFoundError - ________________ TestSpecialisedExplanations.test_eq_attrs _________________ + @dataclass + class Foo(object): + a: int + b: str + + left = Foo(1, "b") + right = Foo(1, "c") + > assert left == right + E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c') + E Omitting 1 identical items, use -vv to show + E Differing attributes: + E b: 'b' != 'c' + + failure_demo.py:111: AssertionError + _____________________ TestSpecialisedExplanations.test_eq_attrs ______________________ self = @@ -319,7 +333,7 @@ get on the terminal - we are working on that): E b: 'b' != 'c' failure_demo.py:123: AssertionError - ______________________________ test_attribute ______________________________ + ___________________________________ test_attribute ___________________________________ def test_attribute(): class Foo(object): @@ -331,7 +345,7 @@ get on the terminal - we are working on that): E + where 1 = .Foo object at 0xdeadbeef>.b failure_demo.py:131: AssertionError - _________________________ test_attribute_instance __________________________ + ______________________________ test_attribute_instance _______________________________ def test_attribute_instance(): class Foo(object): @@ -343,7 +357,7 @@ get on the terminal - we are working on that): E + where .Foo object at 0xdeadbeef> = .Foo'>() failure_demo.py:138: AssertionError - __________________________ test_attribute_failure __________________________ + _______________________________ test_attribute_failure _______________________________ def test_attribute_failure(): class Foo(object): @@ -356,7 +370,7 @@ get on the terminal - we are working on that): > assert i.b == 2 failure_demo.py:149: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = .Foo object at 0xdeadbeef> @@ -365,7 +379,7 @@ get on the terminal - we are working on that): E Exception: Failed to get attrib failure_demo.py:144: Exception - _________________________ test_attribute_multiple __________________________ + ______________________________ test_attribute_multiple _______________________________ def test_attribute_multiple(): class Foo(object): @@ -382,7 +396,7 @@ get on the terminal - we are working on that): E + where .Bar object at 0xdeadbeef> = .Bar'>() failure_demo.py:159: AssertionError - __________________________ TestRaises.test_raises __________________________ + _______________________________ TestRaises.test_raises _______________________________ self = @@ -392,7 +406,7 @@ get on the terminal - we are working on that): E ValueError: invalid literal for int() with base 10: 'qwe' failure_demo.py:169: ValueError - ______________________ TestRaises.test_raises_doesnt _______________________ + ___________________________ TestRaises.test_raises_doesnt ____________________________ self = @@ -401,7 +415,7 @@ get on the terminal - we are working on that): E Failed: DID NOT RAISE failure_demo.py:172: Failed - __________________________ TestRaises.test_raise ___________________________ + _______________________________ TestRaises.test_raise ________________________________ self = @@ -410,7 +424,7 @@ get on the terminal - we are working on that): E ValueError: demo error failure_demo.py:175: ValueError - ________________________ TestRaises.test_tupleerror ________________________ + _____________________________ TestRaises.test_tupleerror _____________________________ self = @@ -419,7 +433,7 @@ get on the terminal - we are working on that): E ValueError: not enough values to unpack (expected 2, got 1) failure_demo.py:178: ValueError - ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ + ___________ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ___________ self = @@ -430,9 +444,9 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:183: TypeError - --------------------------- Captured stdout call --------------------------- + -------------------------------- Captured stdout call -------------------------------- items is [1, 2, 3] - ________________________ TestRaises.test_some_error ________________________ + _____________________________ TestRaises.test_some_error _____________________________ self = @@ -441,7 +455,7 @@ get on the terminal - we are working on that): E NameError: name 'namenotexi' is not defined failure_demo.py:186: NameError - ____________________ test_dynamic_compile_shows_nicely _____________________ + _________________________ test_dynamic_compile_shows_nicely __________________________ def test_dynamic_compile_shows_nicely(): import imp @@ -456,14 +470,14 @@ get on the terminal - we are working on that): > module.foo() failure_demo.py:204: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E AssertionError <0-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:201>:2: AssertionError - ____________________ TestMoreErrors.test_complex_error _____________________ + _________________________ TestMoreErrors.test_complex_error __________________________ self = @@ -477,10 +491,10 @@ get on the terminal - we are working on that): > somefunc(f(), g()) failure_demo.py:215: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:13: in somefunc otherfunc(x, y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 44, b = 43 @@ -489,7 +503,7 @@ get on the terminal - we are working on that): E assert 44 == 43 failure_demo.py:9: AssertionError - ___________________ TestMoreErrors.test_z1_unpack_error ____________________ + ________________________ TestMoreErrors.test_z1_unpack_error _________________________ self = @@ -499,7 +513,7 @@ get on the terminal - we are working on that): E ValueError: not enough values to unpack (expected 2, got 0) failure_demo.py:219: ValueError - ____________________ TestMoreErrors.test_z2_type_error _____________________ + _________________________ TestMoreErrors.test_z2_type_error __________________________ self = @@ -509,7 +523,7 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:223: TypeError - ______________________ TestMoreErrors.test_startswith ______________________ + ___________________________ TestMoreErrors.test_startswith ___________________________ self = @@ -522,7 +536,7 @@ get on the terminal - we are working on that): E + where = '123'.startswith failure_demo.py:228: AssertionError - __________________ TestMoreErrors.test_startswith_nested ___________________ + _______________________ TestMoreErrors.test_startswith_nested ________________________ self = @@ -541,7 +555,7 @@ get on the terminal - we are working on that): E + and '456' = .g at 0xdeadbeef>() failure_demo.py:237: AssertionError - _____________________ TestMoreErrors.test_global_func ______________________ + __________________________ TestMoreErrors.test_global_func ___________________________ self = @@ -552,7 +566,7 @@ get on the terminal - we are working on that): E + where 43 = globf(42) failure_demo.py:240: AssertionError - _______________________ TestMoreErrors.test_instance _______________________ + ____________________________ TestMoreErrors.test_instance ____________________________ self = @@ -563,7 +577,7 @@ get on the terminal - we are working on that): E + where 42 = .x failure_demo.py:244: AssertionError - _______________________ TestMoreErrors.test_compare ________________________ + ____________________________ TestMoreErrors.test_compare _____________________________ self = @@ -573,7 +587,7 @@ get on the terminal - we are working on that): E + where 11 = globf(10) failure_demo.py:247: AssertionError - _____________________ TestMoreErrors.test_try_finally ______________________ + __________________________ TestMoreErrors.test_try_finally ___________________________ self = @@ -584,7 +598,7 @@ get on the terminal - we are working on that): E assert 1 == 0 failure_demo.py:252: AssertionError - ___________________ TestCustomAssertMsg.test_single_line ___________________ + ________________________ TestCustomAssertMsg.test_single_line ________________________ self = @@ -599,7 +613,7 @@ get on the terminal - we are working on that): E + where 1 = .A'>.a failure_demo.py:263: AssertionError - ____________________ TestCustomAssertMsg.test_multiline ____________________ + _________________________ TestCustomAssertMsg.test_multiline _________________________ self = @@ -618,7 +632,7 @@ get on the terminal - we are working on that): E + where 1 = .A'>.a failure_demo.py:270: AssertionError - ___________________ TestCustomAssertMsg.test_custom_repr ___________________ + ________________________ TestCustomAssertMsg.test_custom_repr ________________________ self = @@ -640,4 +654,4 @@ get on the terminal - we are working on that): E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a failure_demo.py:283: AssertionError - ======================== 44 failed in 0.12 seconds ========================= + ============================= 44 failed in 0.12 seconds ============================== diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 76a1ddc807e..26d5d6c4b60 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -48,9 +48,9 @@ Let's run this without supplying our new option: .. code-block:: pytest $ pytest -q test_sample.py - F [100%] - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ + F [100%] + ====================================== FAILURES ====================================== + ____________________________________ test_answer _____________________________________ cmdopt = 'type1' @@ -63,7 +63,7 @@ Let's run this without supplying our new option: E assert 0 test_sample.py:6: AssertionError - --------------------------- Captured stdout call --------------------------- + -------------------------------- Captured stdout call -------------------------------- first 1 failed in 0.12 seconds @@ -72,9 +72,9 @@ And now with supplying a command line option: .. code-block:: pytest $ pytest -q --cmdopt=type2 - F [100%] - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ + F [100%] + ====================================== FAILURES ====================================== + ____________________________________ test_answer _____________________________________ cmdopt = 'type2' @@ -87,7 +87,7 @@ And now with supplying a command line option: E assert 0 test_sample.py:6: AssertionError - --------------------------- Captured stdout call --------------------------- + -------------------------------- Captured stdout call -------------------------------- second 1 failed in 0.12 seconds @@ -126,12 +126,14 @@ directory with the above conftest.py: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ .. _`excontrolskip`: @@ -186,30 +188,34 @@ and when running it will see a skipped "slow" test: .. code-block:: pytest $ pytest -rs # "-rs" means report details on the little 's' - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_module.py .s [100%] - ========================= short test summary info ========================== + test_module.py .s [100%] + ============================== short test summary info =============================== SKIP [1] test_module.py:8: need --runslow option to run - =================== 1 passed, 1 skipped in 0.12 seconds ==================== + ======================== 1 passed, 1 skipped in 0.12 seconds ========================= Or run it including the ``slow`` marked test: .. code-block:: pytest $ pytest --runslow - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_module.py .. [100%] + test_module.py .. [100%] - ========================= 2 passed in 0.12 seconds ========================= + ============================== 2 passed in 0.12 seconds ============================== Writing well integrated assertion helpers -------------------------------------------------- @@ -245,9 +251,9 @@ Let's run our little function: .. code-block:: pytest $ pytest -q test_checkconfig.py - F [100%] - ================================= FAILURES ================================= - ______________________________ test_something ______________________________ + F [100%] + ====================================== FAILURES ====================================== + ___________________________________ test_something ___________________________________ def test_something(): > checkconfig(42) @@ -344,13 +350,15 @@ which will add the string to the test header accordingly: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ .. regendoc:wipe @@ -372,27 +380,31 @@ which will add info only when run with "--v": .. code-block:: pytest $ pytest -v - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') info1: did you know that ... did you? rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ and nothing when run plainly: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ profiling test duration -------------------------- @@ -426,18 +438,20 @@ Now we can profile which test functions execute the slowest: .. code-block:: pytest $ pytest --durations=3 - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 3 items - test_some_are_slow.py ... [100%] + test_some_are_slow.py ... [100%] - ========================= slowest 3 test durations ========================= + ============================== slowest 3 test durations ============================== 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 0.10s call test_some_are_slow.py::test_funcfast - ========================= 3 passed in 0.12 seconds ========================= + ============================== 3 passed in 0.12 seconds ============================== incremental testing - test steps --------------------------------------------------- @@ -500,15 +514,17 @@ If we run this: .. code-block:: pytest $ pytest -rx - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 4 items - test_step.py .Fx. [100%] + test_step.py .Fx. [100%] - ================================= FAILURES ================================= - ____________________ TestUserHandling.test_modification ____________________ + ====================================== FAILURES ====================================== + _________________________ TestUserHandling.test_modification _________________________ self = @@ -517,10 +533,10 @@ If we run this: E assert 0 test_step.py:11: AssertionError - ========================= short test summary info ========================== + ============================== short test summary info =============================== XFAIL test_step.py::TestUserHandling::test_deletion reason: previous test failed (test_modification) - ============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds =============== + =================== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ==================== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". @@ -583,18 +599,20 @@ We can run this: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 7 items - test_step.py .Fx. [ 57%] - a/test_db.py F [ 71%] - a/test_db2.py F [ 85%] - b/test_error.py E [100%] + test_step.py .Fx. [ 57%] + a/test_db.py F [ 71%] + a/test_db2.py F [ 85%] + b/test_error.py E [100%] - ================================== ERRORS ================================== - _______________________ ERROR at setup of test_root ________________________ + ======================================= ERRORS ======================================= + ____________________________ ERROR at setup of test_root _____________________________ file $REGENDOC_TMPDIR/b/test_error.py, line 1 def test_root(db): # no db here, will error out E fixture 'db' not found @@ -602,8 +620,8 @@ We can run this: > use 'pytest --fixtures [testpath]' for help on them. $REGENDOC_TMPDIR/b/test_error.py:1 - ================================= FAILURES ================================= - ____________________ TestUserHandling.test_modification ____________________ + ====================================== FAILURES ====================================== + _________________________ TestUserHandling.test_modification _________________________ self = @@ -612,7 +630,7 @@ We can run this: E assert 0 test_step.py:11: AssertionError - _________________________________ test_a1 __________________________________ + ______________________________________ test_a1 _______________________________________ db = @@ -622,7 +640,7 @@ We can run this: E assert 0 a/test_db.py:2: AssertionError - _________________________________ test_a2 __________________________________ + ______________________________________ test_a2 _______________________________________ db = @@ -632,7 +650,7 @@ We can run this: E assert 0 a/test_db2.py:2: AssertionError - ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== + =============== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds =============== The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course @@ -696,15 +714,17 @@ and run them: .. code-block:: pytest $ pytest test_module.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_module.py FF [100%] + test_module.py FF [100%] - ================================= FAILURES ================================= - ________________________________ test_fail1 ________________________________ + ====================================== FAILURES ====================================== + _____________________________________ test_fail1 _____________________________________ tmpdir = local('PYTEST_TMPDIR/test_fail10') @@ -713,14 +733,14 @@ and run them: E assert 0 test_module.py:2: AssertionError - ________________________________ test_fail2 ________________________________ + _____________________________________ test_fail2 _____________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:6: AssertionError - ========================= 2 failed in 0.12 seconds ========================= + ============================== 2 failed in 0.12 seconds ============================== you will have a "failures" file which contains the failing test ids:: @@ -797,17 +817,19 @@ and run it: .. code-block:: pytest $ pytest -s test_module.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 3 items test_module.py Esetting up a test failed! test_module.py::test_setup_fails Fexecuting test failed test_module.py::test_call_fails F - ================================== ERRORS ================================== - ____________________ ERROR at setup of test_setup_fails ____________________ + ======================================= ERRORS ======================================= + _________________________ ERROR at setup of test_setup_fails _________________________ @pytest.fixture def other(): @@ -815,8 +837,8 @@ and run it: E assert 0 test_module.py:7: AssertionError - ================================= FAILURES ================================= - _____________________________ test_call_fails ______________________________ + ====================================== FAILURES ====================================== + __________________________________ test_call_fails ___________________________________ something = None @@ -825,14 +847,14 @@ and run it: E assert 0 test_module.py:15: AssertionError - ________________________________ test_fail2 ________________________________ + _____________________________________ test_fail2 _____________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:19: AssertionError - ==================== 2 failed, 1 error in 0.12 seconds ===================== + ========================= 2 failed, 1 error in 0.12 seconds ========================== You'll see that the fixture finalizers could use the precise reporting information. diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index 4dd68f8e4ac..6aed3ca0802 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -71,15 +71,17 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: .. code-block:: pytest $ pytest test_smtpsimple.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_smtpsimple.py F [100%] + test_smtpsimple.py F [100%] - ================================= FAILURES ================================= - ________________________________ test_ehlo _________________________________ + ====================================== FAILURES ====================================== + _____________________________________ test_ehlo ______________________________________ smtp_connection = @@ -90,7 +92,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: E assert 0 test_smtpsimple.py:11: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================== 1 failed in 0.12 seconds ============================== In the failure traceback we see that the test function was called with a ``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -211,15 +213,17 @@ inspect what is going on and can now run the tests: .. code-block:: pytest $ pytest test_module.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_module.py FF [100%] + test_module.py FF [100%] - ================================= FAILURES ================================= - ________________________________ test_ehlo _________________________________ + ====================================== FAILURES ====================================== + _____________________________________ test_ehlo ______________________________________ smtp_connection = @@ -231,7 +235,7 @@ inspect what is going on and can now run the tests: E assert 0 test_module.py:6: AssertionError - ________________________________ test_noop _________________________________ + _____________________________________ test_noop ______________________________________ smtp_connection = @@ -242,7 +246,7 @@ inspect what is going on and can now run the tests: E assert 0 test_module.py:11: AssertionError - ========================= 2 failed in 0.12 seconds ========================= + ============================== 2 failed in 0.12 seconds ============================== You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp_connection`` object was passed into the @@ -491,14 +495,14 @@ Running it: .. code-block:: pytest $ pytest -qq --tb=short test_anothersmtp.py - F [100%] - ================================= FAILURES ================================= - ______________________________ test_showhelo _______________________________ + F [100%] + ====================================== FAILURES ====================================== + ___________________________________ test_showhelo ____________________________________ test_anothersmtp.py:5: in test_showhelo assert 0, smtp_connection.helo() E AssertionError: (250, b'mail.python.org') E assert 0 - ------------------------- Captured stdout teardown ------------------------- + ------------------------------ Captured stdout teardown ------------------------------ finalizing (mail.python.org) voila! The ``smtp_connection`` fixture function picked up our mail server name @@ -595,9 +599,9 @@ So let's just do another run: .. code-block:: pytest $ pytest -q test_module.py - FFFF [100%] - ================================= FAILURES ================================= - ________________________ test_ehlo[smtp.gmail.com] _________________________ + FFFF [100%] + ====================================== FAILURES ====================================== + _____________________________ test_ehlo[smtp.gmail.com] ______________________________ smtp_connection = @@ -609,7 +613,7 @@ So let's just do another run: E assert 0 test_module.py:6: AssertionError - ________________________ test_noop[smtp.gmail.com] _________________________ + _____________________________ test_noop[smtp.gmail.com] ______________________________ smtp_connection = @@ -620,7 +624,7 @@ So let's just do another run: E assert 0 test_module.py:11: AssertionError - ________________________ test_ehlo[mail.python.org] ________________________ + _____________________________ test_ehlo[mail.python.org] _____________________________ smtp_connection = @@ -631,9 +635,9 @@ So let's just do another run: E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING' test_module.py:5: AssertionError - -------------------------- Captured stdout setup --------------------------- + ------------------------------- Captured stdout setup -------------------------------- finalizing - ________________________ test_noop[mail.python.org] ________________________ + _____________________________ test_noop[mail.python.org] _____________________________ smtp_connection = @@ -644,7 +648,7 @@ So let's just do another run: E assert 0 test_module.py:11: AssertionError - ------------------------- Captured stdout teardown ------------------------- + ------------------------------ Captured stdout teardown ------------------------------ finalizing 4 failed in 0.12 seconds @@ -699,9 +703,11 @@ Running the above tests results in the following test IDs being used: .. code-block:: pytest $ pytest --collect-only - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 10 items @@ -717,7 +723,7 @@ Running the above tests results in the following test IDs being used: - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ .. _`fixture-parametrize-marks`: @@ -743,17 +749,19 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: .. code-block:: pytest $ pytest test_fixture_marks.py -v - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 3 items - test_fixture_marks.py::test_data[0] PASSED [ 33%] - test_fixture_marks.py::test_data[1] PASSED [ 66%] - test_fixture_marks.py::test_data[2] SKIPPED [100%] + test_fixture_marks.py::test_data[0] PASSED [ 33%] + test_fixture_marks.py::test_data[1] PASSED [ 66%] + test_fixture_marks.py::test_data[2] SKIPPED [100%] - =================== 2 passed, 1 skipped in 0.12 seconds ==================== + ======================== 2 passed, 1 skipped in 0.12 seconds ========================= .. _`interdependent fixtures`: @@ -788,16 +796,18 @@ Here we declare an ``app`` fixture which receives the previously defined .. code-block:: pytest $ pytest -v test_appsetup.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 2 items - test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] - test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] + test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] + test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] - ========================= 2 passed in 0.12 seconds ========================= + ============================== 2 passed in 0.12 seconds ============================== Due to the parametrization of ``smtp_connection``, the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -859,10 +869,12 @@ Let's run the tests in verbose mode and with looking at the print-output: .. code-block:: pytest $ pytest -v -s test_module.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collecting ... collected 8 items test_module.py::test_0[1] SETUP otherarg 1 @@ -898,7 +910,7 @@ Let's run the tests in verbose mode and with looking at the print-output: TEARDOWN modarg mod2 - ========================= 8 passed in 0.12 seconds ========================= + ============================== 8 passed in 0.12 seconds ============================== You can see that the parametrized module-scoped ``modarg`` resource caused an ordering of test execution that lead to the fewest possible "active" resources. @@ -963,7 +975,7 @@ to verify our fixture is activated and the tests pass: .. code-block:: pytest $ pytest -q - .. [100%] + .. [100%] 2 passed in 0.12 seconds You can specify multiple fixtures like this: @@ -1064,7 +1076,7 @@ If we run it, we get two passing tests: .. code-block:: pytest $ pytest -q - .. [100%] + .. [100%] 2 passed in 0.12 seconds Here is how autouse fixtures work in other scopes: diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index 500fc3d93cc..31910fa0221 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -25,6 +25,8 @@ Install ``pytest`` $ pytest --version This is pytest version 4.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py + setuptools registered plugins: + hypothesis-3.x.y at $PYTHON_PREFIX/lib/python3.6/site-packages/hypothesis/extra/pytestplugin.py .. _`simpletest`: @@ -45,15 +47,17 @@ That’s it. You can now execute the test function: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_sample.py F [100%] + test_sample.py F [100%] - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ + ====================================== FAILURES ====================================== + ____________________________________ test_answer _____________________________________ def test_answer(): > assert func(3) == 5 @@ -61,7 +65,7 @@ That’s it. You can now execute the test function: E + where 4 = func(3) test_sample.py:5: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================== 1 failed in 0.12 seconds ============================== This test returns a failure report because ``func(3)`` does not return ``5``. @@ -94,7 +98,7 @@ Execute the test function with “quiet” reporting mode: .. code-block:: pytest $ pytest -q test_sysexit.py - . [100%] + . [100%] 1 passed in 0.12 seconds Group multiple tests in a class @@ -117,9 +121,9 @@ Once you develop multiple tests, you may want to group them into a class. pytest .. code-block:: pytest $ pytest -q test_class.py - .F [100%] - ================================= FAILURES ================================= - ____________________________ TestClass.test_two ____________________________ + .F [100%] + ====================================== FAILURES ====================================== + _________________________________ TestClass.test_two _________________________________ self = @@ -149,9 +153,9 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look .. code-block:: pytest $ pytest -q test_tmpdir.py - F [100%] - ================================= FAILURES ================================= - _____________________________ test_needsfiles ______________________________ + F [100%] + ====================================== FAILURES ====================================== + __________________________________ test_needsfiles ___________________________________ tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') @@ -161,7 +165,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look E assert 0 test_tmpdir.py:3: AssertionError - --------------------------- Captured stdout call --------------------------- + -------------------------------- Captured stdout call -------------------------------- PYTEST_TMPDIR/test_needsfiles0 1 failed in 0.12 seconds diff --git a/doc/en/index.rst b/doc/en/index.rst index 7c201fbd7c5..3c9cb024138 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -27,15 +27,17 @@ To execute it: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_sample.py F [100%] + test_sample.py F [100%] - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ + ====================================== FAILURES ====================================== + ____________________________________ test_answer _____________________________________ def test_answer(): > assert inc(3) == 5 @@ -43,7 +45,7 @@ To execute it: E + where 4 = inc(3) test_sample.py:6: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================== 1 failed in 0.12 seconds ============================== Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See :ref:`Getting Started ` for more examples. diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index 099b531c2db..0808b08dfe6 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -55,15 +55,17 @@ them in turn: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 3 items - test_expectation.py ..F [100%] + test_expectation.py ..F [100%] - ================================= FAILURES ================================= - ____________________________ test_eval[6*9-42] _____________________________ + ====================================== FAILURES ====================================== + _________________________________ test_eval[6*9-42] __________________________________ test_input = '6*9', expected = 42 @@ -78,7 +80,7 @@ them in turn: E + where 54 = eval('6*9') test_expectation.py:8: AssertionError - ==================== 1 failed, 2 passed in 0.12 seconds ==================== + ========================= 1 failed, 2 passed in 0.12 seconds ========================= As designed in this example, only one pair of input/output values fails the simple test function. And as usual with test function arguments, @@ -106,14 +108,16 @@ Let's run this: .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 3 items - test_expectation.py ..x [100%] + test_expectation.py ..x [100%] - =================== 2 passed, 1 xfailed in 0.12 seconds ==================== + ======================== 2 passed, 1 xfailed in 0.12 seconds ========================= The one parameter set which caused a failure previously now shows up as an "xfailed (expected to fail)" test. @@ -173,7 +177,7 @@ command line option and the parametrization of our test function:: If we now pass two stringinput values, our test will run twice:: $ pytest -q --stringinput="hello" --stringinput="world" test_strings.py - .. [100%] + .. [100%] 2 passed in 0.12 seconds Let's also run with a stringinput that will lead to a failing test: @@ -181,9 +185,9 @@ Let's also run with a stringinput that will lead to a failing test: .. code-block:: pytest $ pytest -q --stringinput="!" test_strings.py - F [100%] - ================================= FAILURES ================================= - ___________________________ test_valid_string[!] ___________________________ + F [100%] + ====================================== FAILURES ====================================== + ________________________________ test_valid_string[!] ________________________________ stringinput = '!' @@ -205,8 +209,8 @@ list: .. code-block:: pytest $ pytest -q -rs test_strings.py - s [100%] - ========================= short test summary info ========================== + s [100%] + ============================== short test summary info =============================== SKIP [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 1 skipped in 0.12 seconds diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index ae1dc714996..0f207309058 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -328,13 +328,15 @@ Running it with the report-on-xfail option gives this output: .. code-block:: pytest example $ pytest -rx xfail_demo.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/example/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/example, inifile: + plugins: hypothesis-3.x.y collected 7 items - xfail_demo.py xxxxxxx [100%] - ========================= short test summary info ========================== + xfail_demo.py xxxxxxx [100%] + ============================== short test summary info =============================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 reason: [NOTRUN] @@ -348,7 +350,7 @@ Running it with the report-on-xfail option gives this output: reason: reason XFAIL xfail_demo.py::test_hello7 - ======================== 7 xfailed in 0.12 seconds ========================= + ============================= 7 xfailed in 0.12 seconds ============================== .. _`skip/xfail with parametrize`: diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 8c21e17e582..5f7e98a84cc 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -40,15 +40,17 @@ Running this would result in a passed test except for the last .. code-block:: pytest $ pytest test_tmp_path.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_tmp_path.py F [100%] + test_tmp_path.py F [100%] - ================================= FAILURES ================================= - _____________________________ test_create_file _____________________________ + ====================================== FAILURES ====================================== + __________________________________ test_create_file __________________________________ tmp_path = PosixPath('PYTEST_TMPDIR/test_create_file0') @@ -63,7 +65,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmp_path.py:13: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================== 1 failed in 0.12 seconds ============================== The ``tmp_path_factory`` fixture -------------------------------- @@ -102,15 +104,17 @@ Running this would result in a passed test except for the last .. code-block:: pytest $ pytest test_tmpdir.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_tmpdir.py F [100%] + test_tmpdir.py F [100%] - ================================= FAILURES ================================= - _____________________________ test_create_file _____________________________ + ====================================== FAILURES ====================================== + __________________________________ test_create_file __________________________________ tmpdir = local('PYTEST_TMPDIR/test_create_file0') @@ -123,7 +127,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.12 seconds ========================= + ============================== 1 failed in 0.12 seconds ============================== .. _`tmpdir factory example`: diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 34c8a35db23..fe7f2e5503a 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -127,15 +127,17 @@ the ``self.db`` values in the traceback: .. code-block:: pytest $ pytest test_unittest_db.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 2 items - test_unittest_db.py FF [100%] + test_unittest_db.py FF [100%] - ================================= FAILURES ================================= - ___________________________ MyTest.test_method1 ____________________________ + ====================================== FAILURES ====================================== + ________________________________ MyTest.test_method1 _________________________________ self = @@ -146,7 +148,7 @@ the ``self.db`` values in the traceback: E assert 0 test_unittest_db.py:9: AssertionError - ___________________________ MyTest.test_method2 ____________________________ + ________________________________ MyTest.test_method2 _________________________________ self = @@ -156,7 +158,7 @@ the ``self.db`` values in the traceback: E assert 0 test_unittest_db.py:12: AssertionError - ========================= 2 failed in 0.12 seconds ========================= + ============================== 2 failed in 0.12 seconds ============================== This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention @@ -206,7 +208,7 @@ Running this test module ...: .. code-block:: pytest $ pytest -q test_unittest_cleandir.py - . [100%] + . [100%] 1 passed in 0.12 seconds ... gives us one passed test because the ``initdir`` fixture function diff --git a/doc/en/usage.rst b/doc/en/usage.rst index bd9706c4fda..87171507de3 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -155,12 +155,14 @@ Example: .. code-block:: pytest $ pytest -ra - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes". @@ -180,12 +182,14 @@ More than one character can be used, so for example to only see failed and skipp .. code-block:: pytest $ pytest -rfs - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had captured output: @@ -193,12 +197,14 @@ captured output: .. code-block:: pytest $ pytest -rpP - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 0 items - ======================= no tests ran in 0.12 seconds ======================= + ============================ no tests ran in 0.12 seconds ============================ .. _pdb-option: @@ -584,7 +590,7 @@ Running it will show that ``MyPlugin`` was added and its hook was invoked:: $ python myinvoke.py - . [100%]*** test run reporting finishing + . [100%]*** test run reporting finishing .. note:: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index 3e69d34800d..8de555d3ccb 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -23,20 +23,22 @@ Running pytest now produces this output: .. code-block:: pytest $ pytest test_show_warnings.py - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: + plugins: hypothesis-3.x.y collected 1 item - test_show_warnings.py . [100%] + test_show_warnings.py . [100%] - ============================= warnings summary ============================= + ================================== warnings summary ================================== test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) -- Docs: https://docs.pytest.org/en/latest/warnings.html - =================== 1 passed, 1 warnings in 0.12 seconds =================== + ======================== 1 passed, 1 warnings in 0.12 seconds ======================== The ``-W`` flag can be passed to control which warnings will be displayed or even turn them into errors: @@ -44,15 +46,15 @@ them into errors: .. code-block:: pytest $ pytest -q test_show_warnings.py -W error::UserWarning - F [100%] - ================================= FAILURES ================================= - _________________________________ test_one _________________________________ + F [100%] + ====================================== FAILURES ====================================== + ______________________________________ test_one ______________________________________ def test_one(): > assert api_v1() == 1 test_show_warnings.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def api_v1(): > warnings.warn(UserWarning("api v1, should use functions from v2")) @@ -355,7 +357,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta $ pytest test_pytest_warnings.py -q - ============================= warnings summary ============================= + ================================== warnings summary ================================== test_pytest_warnings.py:1 $REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestWarning: cannot collect test class 'Test' because it has a __init__ constructor class Test: diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index 70bf315aa24..f627fec057c 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -411,20 +411,22 @@ additionally it is possible to copy examples for an example folder before runnin .. code-block:: pytest $ pytest - =========================== test session starts ============================ + ================================ test session starts ================================= platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y + hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini + plugins: hypothesis-3.x.y collected 2 items - test_example.py .. [100%] + test_example.py .. [100%] - ============================= warnings summary ============================= + ================================== warnings summary ================================== test_example.py::test_plugin $REGENDOC_TMPDIR/test_example.py:4: PytestExperimentalApiWarning: testdir.copy_example is an experimental api that may change over time testdir.copy_example("test_example.py") -- Docs: https://docs.pytest.org/en/latest/warnings.html - =================== 2 passed, 1 warnings in 0.12 seconds =================== + ======================== 2 passed, 1 warnings in 0.12 seconds ======================== For more information about the result object that ``runpytest()`` returns, and the methods that it provides please check out the :py:class:`RunResult From 01151ff566388a53b5cac3eeb90aae6045275c75 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 16:53:12 -0200 Subject: [PATCH 95/98] Add example for -ra usage to the docs --- doc/en/usage.rst | 36 ++++++++++++++++++++++++++++++++++++ 1 file changed, 36 insertions(+) diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 87171507de3..2a2d972c0e3 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -152,6 +152,42 @@ making it easy in large test suites to get a clear picture of all failures, skip Example: +.. code-block:: python + + # content of test_example.py + import pytest + + + @pytest.fixture + def error_fixture(): + assert 0 + + + def test_ok(): + print("ok") + + + def test_fail(): + assert 0 + + + def test_error(error_fixture): + pass + + + def test_skip(): + pytest.skip("skipping this test") + + + def test_xfail(): + pytest.xfail("xfailing this test") + + + @pytest.mark.xfail(reason="always xfail") + def test_xpass(): + pass + + .. code-block:: pytest $ pytest -ra From d7465895d09e71225ffa6ae6943e2ae6b6136ec8 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 19:19:40 +0000 Subject: [PATCH 96/98] Regendoc again --- doc/en/assert.rst | 26 +++--- doc/en/cache.rst | 52 ++++++------ doc/en/capture.rst | 12 +-- doc/en/doctest.rst | 6 +- doc/en/example/markers.rst | 112 ++++++++++++------------- doc/en/example/nonpython.rst | 26 +++--- doc/en/example/parametrize.rst | 64 +++++++-------- doc/en/example/pythoncollection.rst | 12 +-- doc/en/example/reportingdemo.rst | 108 ++++++++++++------------ doc/en/example/simple.rst | 122 ++++++++++++++-------------- doc/en/fixture.rst | 76 ++++++++--------- doc/en/getting-started.rst | 26 +++--- doc/en/index.rst | 10 +-- doc/en/parametrize.rst | 28 +++---- doc/en/skipping.rst | 8 +- doc/en/tmpdir.rst | 20 ++--- doc/en/unittest.rst | 14 ++-- doc/en/usage.rst | 110 ++++++++++++++++++++++--- doc/en/warnings.rst | 18 ++-- doc/en/writing_plugins.rst | 8 +- 20 files changed, 474 insertions(+), 384 deletions(-) diff --git a/doc/en/assert.rst b/doc/en/assert.rst index 7f422af1f55..511839d889a 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -27,17 +27,17 @@ you will see the return value of the function call: .. code-block:: pytest $ pytest test_assert1.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_assert1.py F [100%] + test_assert1.py F [100%] - ====================================== FAILURES ====================================== - ___________________________________ test_function ____________________________________ + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ def test_function(): > assert f() == 4 @@ -45,7 +45,7 @@ you will see the return value of the function call: E + where 3 = f() test_assert1.py:5: AssertionError - ============================== 1 failed in 0.12 seconds ============================== + ========================= 1 failed in 0.12 seconds ========================= ``pytest`` has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary @@ -173,17 +173,17 @@ if you run this module: .. code-block:: pytest $ pytest test_assert2.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_assert2.py F [100%] + test_assert2.py F [100%] - ====================================== FAILURES ====================================== - ________________________________ test_set_comparison _________________________________ + ================================= FAILURES ================================= + ___________________________ test_set_comparison ____________________________ def test_set_comparison(): set1 = set("1308") @@ -197,7 +197,7 @@ if you run this module: E Use -v to get the full diff test_assert2.py:5: AssertionError - ============================== 1 failed in 0.12 seconds ============================== + ========================= 1 failed in 0.12 seconds ========================= Special comparisons are done for a number of cases: @@ -247,9 +247,9 @@ the conftest file: .. code-block:: pytest $ pytest -q test_foocompare.py - F [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_compare ____________________________________ + F [100%] + ================================= FAILURES ================================= + _______________________________ test_compare _______________________________ def test_compare(): f1 = Foo(1) diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 1814d386d2b..caa170027eb 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -48,9 +48,9 @@ If you run this for the first time you will see two failures: .. code-block:: pytest $ pytest -q - .................F.......F........................ [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_num[17] ____________________________________ + .................F.......F........................ [100%] + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ i = 17 @@ -61,7 +61,7 @@ If you run this for the first time you will see two failures: E Failed: bad luck test_50.py:6: Failed - ____________________________________ test_num[25] ____________________________________ + _______________________________ test_num[25] _______________________________ i = 25 @@ -79,7 +79,7 @@ If you then run it with ``--lf``: .. code-block:: pytest $ pytest --lf - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -87,10 +87,10 @@ If you then run it with ``--lf``: collected 50 items / 48 deselected run-last-failure: rerun previous 2 failures - test_50.py FF [100%] + test_50.py FF [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_num[17] ____________________________________ + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ i = 17 @@ -101,7 +101,7 @@ If you then run it with ``--lf``: E Failed: bad luck test_50.py:6: Failed - ____________________________________ test_num[25] ____________________________________ + _______________________________ test_num[25] _______________________________ i = 25 @@ -112,7 +112,7 @@ If you then run it with ``--lf``: E Failed: bad luck test_50.py:6: Failed - ====================== 2 failed, 48 deselected in 0.12 seconds ======================= + ================= 2 failed, 48 deselected in 0.12 seconds ================== You have run only the two failing test from the last run, while 48 tests have not been run ("deselected"). @@ -124,7 +124,7 @@ of ``FF`` and dots): .. code-block:: pytest $ pytest --ff - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -132,10 +132,10 @@ of ``FF`` and dots): collected 50 items run-last-failure: rerun previous 2 failures first - test_50.py FF................................................ [100%] + test_50.py FF................................................ [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_num[17] ____________________________________ + ================================= FAILURES ================================= + _______________________________ test_num[17] _______________________________ i = 17 @@ -146,7 +146,7 @@ of ``FF`` and dots): E Failed: bad luck test_50.py:6: Failed - ____________________________________ test_num[25] ____________________________________ + _______________________________ test_num[25] _______________________________ i = 25 @@ -157,7 +157,7 @@ of ``FF`` and dots): E Failed: bad luck test_50.py:6: Failed - ======================== 2 failed, 48 passed in 0.12 seconds ========================= + =================== 2 failed, 48 passed in 0.12 seconds ==================== .. _`config.cache`: @@ -209,9 +209,9 @@ If you run this command for the first time, you can see the print statement: .. code-block:: pytest $ pytest -q - F [100%] - ====================================== FAILURES ====================================== - ___________________________________ test_function ____________________________________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ mydata = 42 @@ -220,7 +220,7 @@ If you run this command for the first time, you can see the print statement: E assert 42 == 23 test_caching.py:17: AssertionError - ------------------------------- Captured stdout setup -------------------------------- + -------------------------- Captured stdout setup --------------------------- running expensive computation... 1 failed in 0.12 seconds @@ -230,9 +230,9 @@ the cache and nothing will be printed: .. code-block:: pytest $ pytest -q - F [100%] - ====================================== FAILURES ====================================== - ___________________________________ test_function ____________________________________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_function _______________________________ mydata = 42 @@ -255,13 +255,13 @@ You can always peek at the content of the cache using the .. code-block:: pytest $ pytest --cache-show - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y cachedir: $REGENDOC_TMPDIR/.pytest_cache - ------------------------------------ cache values ------------------------------------ + ------------------------------- cache values ------------------------------- cache/lastfailed contains: {'test_caching.py::test_function': True} cache/nodeids contains: @@ -271,7 +271,7 @@ You can always peek at the content of the cache using the example/value contains: 42 - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= Clearing Cache content ------------------------------- diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 15ad75910c2..7d452b9f9c8 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -66,26 +66,26 @@ of the failing function and hide the other one: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 2 items - test_module.py .F [100%] + test_module.py .F [100%] - ====================================== FAILURES ====================================== - _____________________________________ test_func2 _____________________________________ + ================================= FAILURES ================================= + ________________________________ test_func2 ________________________________ def test_func2(): > assert False E assert False test_module.py:9: AssertionError - ------------------------------- Captured stdout setup -------------------------------- + -------------------------- Captured stdout setup --------------------------- setting up - ========================= 1 failed, 1 passed in 0.12 seconds ========================= + ==================== 1 failed, 1 passed in 0.12 seconds ==================== Accessing captured output from a test function --------------------------------------------------- diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index c861ede8a02..69a9cd18abc 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -63,16 +63,16 @@ then you can just invoke ``pytest`` without command line options: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini plugins: hypothesis-3.x.y collected 1 item - mymodule.py . [100%] + mymodule.py . [100%] - ============================== 1 passed in 0.12 seconds ============================== + ========================= 1 passed in 0.12 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index b27a4fcb27e..637640062a8 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -32,7 +32,7 @@ You can then restrict a test run to only run tests marked with ``webtest``: .. code-block:: pytest $ pytest -v -m webtest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -40,16 +40,16 @@ You can then restrict a test run to only run tests marked with ``webtest``: plugins: hypothesis-3.x.y collecting ... collected 4 items / 3 deselected - test_server.py::test_send_http PASSED [100%] + test_server.py::test_send_http PASSED [100%] - ======================= 1 passed, 3 deselected in 0.12 seconds ======================= + ================== 1 passed, 3 deselected in 0.12 seconds ================== Or the inverse, running all tests except the webtest ones: .. code-block:: pytest $ pytest -v -m "not webtest" - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -57,11 +57,11 @@ Or the inverse, running all tests except the webtest ones: plugins: hypothesis-3.x.y collecting ... collected 4 items / 1 deselected - test_server.py::test_something_quick PASSED [ 33%] - test_server.py::test_another PASSED [ 66%] - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::test_something_quick PASSED [ 33%] + test_server.py::test_another PASSED [ 66%] + test_server.py::TestClass::test_method PASSED [100%] - ======================= 3 passed, 1 deselected in 0.12 seconds ======================= + ================== 3 passed, 1 deselected in 0.12 seconds ================== Selecting tests based on their node ID -------------------------------------- @@ -73,7 +73,7 @@ tests based on their module, class, method, or function name: .. code-block:: pytest $ pytest -v test_server.py::TestClass::test_method - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -81,16 +81,16 @@ tests based on their module, class, method, or function name: plugins: hypothesis-3.x.y collecting ... collected 1 item - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::TestClass::test_method PASSED [100%] - ============================== 1 passed in 0.12 seconds ============================== + ========================= 1 passed in 0.12 seconds ========================= You can also select on the class: .. code-block:: pytest $ pytest -v test_server.py::TestClass - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -98,16 +98,16 @@ You can also select on the class: plugins: hypothesis-3.x.y collecting ... collected 1 item - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::TestClass::test_method PASSED [100%] - ============================== 1 passed in 0.12 seconds ============================== + ========================= 1 passed in 0.12 seconds ========================= Or select multiple nodes: .. code-block:: pytest $ pytest -v test_server.py::TestClass test_server.py::test_send_http - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -115,10 +115,10 @@ Or select multiple nodes: plugins: hypothesis-3.x.y collecting ... collected 2 items - test_server.py::TestClass::test_method PASSED [ 50%] - test_server.py::test_send_http PASSED [100%] + test_server.py::TestClass::test_method PASSED [ 50%] + test_server.py::test_send_http PASSED [100%] - ============================== 2 passed in 0.12 seconds ============================== + ========================= 2 passed in 0.12 seconds ========================= .. _node-id: @@ -149,7 +149,7 @@ select tests based on their names: .. code-block:: pytest $ pytest -v -k http # running with the above defined example module - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -157,16 +157,16 @@ select tests based on their names: plugins: hypothesis-3.x.y collecting ... collected 4 items / 3 deselected - test_server.py::test_send_http PASSED [100%] + test_server.py::test_send_http PASSED [100%] - ======================= 1 passed, 3 deselected in 0.12 seconds ======================= + ================== 1 passed, 3 deselected in 0.12 seconds ================== And you can also run all tests except the ones that match the keyword: .. code-block:: pytest $ pytest -k "not send_http" -v - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -174,18 +174,18 @@ And you can also run all tests except the ones that match the keyword: plugins: hypothesis-3.x.y collecting ... collected 4 items / 1 deselected - test_server.py::test_something_quick PASSED [ 33%] - test_server.py::test_another PASSED [ 66%] - test_server.py::TestClass::test_method PASSED [100%] + test_server.py::test_something_quick PASSED [ 33%] + test_server.py::test_another PASSED [ 66%] + test_server.py::TestClass::test_method PASSED [100%] - ======================= 3 passed, 1 deselected in 0.12 seconds ======================= + ================== 3 passed, 1 deselected in 0.12 seconds ================== Or to select "http" and "quick" tests: .. code-block:: pytest $ pytest -k "http or quick" -v - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -193,10 +193,10 @@ Or to select "http" and "quick" tests: plugins: hypothesis-3.x.y collecting ... collected 4 items / 2 deselected - test_server.py::test_send_http PASSED [ 50%] - test_server.py::test_something_quick PASSED [100%] + test_server.py::test_send_http PASSED [ 50%] + test_server.py::test_something_quick PASSED [100%] - ======================= 2 passed, 2 deselected in 0.12 seconds ======================= + ================== 2 passed, 2 deselected in 0.12 seconds ================== .. note:: @@ -381,32 +381,32 @@ the test needs: .. code-block:: pytest $ pytest -E stage2 - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_someenv.py s [100%] + test_someenv.py s [100%] - ============================= 1 skipped in 0.12 seconds ============================== + ======================== 1 skipped in 0.12 seconds ========================= and here is one that specifies exactly the environment needed: .. code-block:: pytest $ pytest -E stage1 - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_someenv.py . [100%] + test_someenv.py . [100%] - ============================== 1 passed in 0.12 seconds ============================== + ========================= 1 passed in 0.12 seconds ========================= The ``--markers`` option always gives you a list of available markers:: @@ -568,34 +568,34 @@ then you will see two tests skipped and two executed tests as expected: .. code-block:: pytest $ pytest -rs # this option reports skip reasons - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 4 items - test_plat.py s.s. [100%] - ============================== short test summary info =============================== + test_plat.py s.s. [100%] + ========================= short test summary info ========================== SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux - ======================== 2 passed, 2 skipped in 0.12 seconds ========================= + =================== 2 passed, 2 skipped in 0.12 seconds ==================== Note that if you specify a platform via the marker-command line option like this: .. code-block:: pytest $ pytest -m linux - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 4 items / 3 deselected - test_plat.py . [100%] + test_plat.py . [100%] - ======================= 1 passed, 3 deselected in 0.12 seconds ======================= + ================== 1 passed, 3 deselected in 0.12 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -641,51 +641,51 @@ We can now use the ``-m option`` to select one set: .. code-block:: pytest $ pytest -m interface --tb=short - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 4 items / 2 deselected - test_module.py FF [100%] + test_module.py FF [100%] - ====================================== FAILURES ====================================== - _______________________________ test_interface_simple ________________________________ + ================================= FAILURES ================================= + __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - _______________________________ test_interface_complex _______________________________ + __________________________ test_interface_complex __________________________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ======================= 2 failed, 2 deselected in 0.12 seconds ======================= + ================== 2 failed, 2 deselected in 0.12 seconds ================== or to select both "event" and "interface" tests: .. code-block:: pytest $ pytest -m "interface or event" --tb=short - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 4 items / 1 deselected - test_module.py FFF [100%] + test_module.py FFF [100%] - ====================================== FAILURES ====================================== - _______________________________ test_interface_simple ________________________________ + ================================= FAILURES ================================= + __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - _______________________________ test_interface_complex _______________________________ + __________________________ test_interface_complex __________________________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - _________________________________ test_event_simple __________________________________ + ____________________________ test_event_simple _____________________________ test_module.py:9: in test_event_simple assert 0 E assert 0 - ======================= 3 failed, 1 deselected in 0.12 seconds ======================= + ================== 3 failed, 1 deselected in 0.12 seconds ================== diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 1581b8672b1..3db46639142 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -28,21 +28,21 @@ now execute the test specification: .. code-block:: pytest nonpython $ pytest test_simple.yml - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: plugins: hypothesis-3.x.y collected 2 items - test_simple.yml F. [100%] + test_simple.yml F. [100%] - ====================================== FAILURES ====================================== - ___________________________________ usecase: hello ___________________________________ + ================================= FAILURES ================================= + ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ========================= 1 failed, 1 passed in 0.12 seconds ========================= + ==================== 1 failed, 1 passed in 0.12 seconds ==================== .. regendoc:wipe @@ -64,7 +64,7 @@ consulted when reporting in ``verbose`` mode: .. code-block:: pytest nonpython $ pytest -v - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') @@ -72,15 +72,15 @@ consulted when reporting in ``verbose`` mode: plugins: hypothesis-3.x.y collecting ... collected 2 items - test_simple.yml::hello FAILED [ 50%] - test_simple.yml::ok PASSED [100%] + test_simple.yml::hello FAILED [ 50%] + test_simple.yml::ok PASSED [100%] - ====================================== FAILURES ====================================== - ___________________________________ usecase: hello ___________________________________ + ================================= FAILURES ================================= + ______________________________ usecase: hello ______________________________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ========================= 1 failed, 1 passed in 0.12 seconds ========================= + ==================== 1 failed, 1 passed in 0.12 seconds ==================== .. regendoc:wipe @@ -90,7 +90,7 @@ interesting to just look at the collection tree: .. code-block:: pytest nonpython $ pytest --collect-only - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: @@ -101,4 +101,4 @@ interesting to just look at the collection tree: - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 76cb688678f..133add2c608 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -47,7 +47,7 @@ This means that we only run 2 tests if we do not pass ``--all``: .. code-block:: pytest $ pytest -q test_compute.py - .. [100%] + .. [100%] 2 passed in 0.12 seconds We run only two computations, so we see two dots. @@ -56,9 +56,9 @@ let's run the full monty: .. code-block:: pytest $ pytest -q --all - ....F [100%] - ====================================== FAILURES ====================================== - __________________________________ test_compute[4] ___________________________________ + ....F [100%] + ================================= FAILURES ================================= + _____________________________ test_compute[4] ______________________________ param1 = 4 @@ -143,7 +143,7 @@ objects, they are still using the default pytest representation: .. code-block:: pytest $ pytest test_time.py --collect-only - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -159,7 +159,7 @@ objects, they are still using the default pytest representation: - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs together with the actual data, instead of listing them separately. @@ -203,23 +203,23 @@ this is a fully self-contained example which you can run with: .. code-block:: pytest $ pytest test_scenarios.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 4 items - test_scenarios.py .... [100%] + test_scenarios.py .... [100%] - ============================== 4 passed in 0.12 seconds ============================== + ========================= 4 passed in 0.12 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function: .. code-block:: pytest $ pytest --collect-only test_scenarios.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -232,7 +232,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a @@ -287,7 +287,7 @@ Let's first see how it looks like at collection time: .. code-block:: pytest $ pytest test_backends.py --collect-only - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -297,16 +297,16 @@ Let's first see how it looks like at collection time: - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= And then when we run the test: .. code-block:: pytest $ pytest -q test_backends.py - .F [100%] - ====================================== FAILURES ====================================== - ______________________________ test_db_initialized[d2] _______________________________ + .F [100%] + ================================= FAILURES ================================= + _________________________ test_db_initialized[d2] __________________________ db = @@ -354,7 +354,7 @@ The result of this test will be successful: .. code-block:: pytest $ pytest test_indirect_list.py --collect-only - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -363,7 +363,7 @@ The result of this test will be successful: - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -407,9 +407,9 @@ argument sets to use for each test function. Let's run it: .. code-block:: pytest $ pytest -q - F.. [100%] - ====================================== FAILURES ====================================== - _____________________________ TestClass.test_equals[1-2] _____________________________ + F.. [100%] + ================================= FAILURES ================================= + ________________________ TestClass.test_equals[1-2] ________________________ self = , a = 1, b = 2 @@ -439,8 +439,8 @@ Running it results in some skips if we don't have all the python interpreters in .. code-block:: pytest . $ pytest -rs -q multipython.py - ...sss...sssssssss...sss... [100%] - ============================== short test summary info =============================== + ...sss...sssssssss...sss... [100%] + ========================= short test summary info ========================== SKIP [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.4' not found 12 passed, 15 skipped in 0.12 seconds @@ -490,18 +490,18 @@ If you run this with reporting for skips enabled: .. code-block:: pytest $ pytest -rs test_module.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 2 items - test_module.py .s [100%] - ============================== short test summary info =============================== + test_module.py .s [100%] + ========================= short test summary info ========================== SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' - ======================== 1 passed, 1 skipped in 0.12 seconds ========================= + =================== 1 passed, 1 skipped in 0.12 seconds ==================== You'll see that we don't have an ``opt2`` module and thus the second test run of our ``test_func1`` was skipped. A few notes: @@ -549,7 +549,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: .. code-block:: pytest $ pytest -v -m basic - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -557,11 +557,11 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: plugins: hypothesis-3.x.y collecting ... collected 17 items / 14 deselected - test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%] - test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%] - test_pytest_param_example.py::test_eval[basic_6*9] xfail [100%] + test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%] + test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%] + test_pytest_param_example.py::test_eval[basic_6*9] xfail [100%] - ================= 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ================= + ============ 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ============ As the result: diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index bc7e0c0d22d..53e7cd32164 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -130,7 +130,7 @@ The test collection would look like this: .. code-block:: pytest $ pytest --collect-only - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini @@ -141,7 +141,7 @@ The test collection would look like this: - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= You can check for multiple glob patterns by adding a space between the patterns:: @@ -187,7 +187,7 @@ You can always peek at the collection tree without running tests like this: .. code-block:: pytest . $ pytest --collect-only pythoncollection.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/CWD/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini @@ -199,7 +199,7 @@ You can always peek at the collection tree without running tests like this: - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= .. _customizing-test-collection: @@ -261,11 +261,11 @@ file will be left out: .. code-block:: pytest $ pytest --collect-only - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini plugins: hypothesis-3.x.y collected 0 items - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index d99a06725e1..c40e15572c9 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -12,17 +12,17 @@ get on the terminal - we are working on that): .. code-block:: pytest assertion $ pytest failure_demo.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/assertion/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/assertion, inifile: plugins: hypothesis-3.x.y collected 44 items - failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] - ====================================== FAILURES ====================================== - ________________________________ test_generative[3-6] ________________________________ + ================================= FAILURES ================================= + ___________________________ test_generative[3-6] ___________________________ param1 = 3, param2 = 6 @@ -32,7 +32,7 @@ get on the terminal - we are working on that): E assert (3 * 2) < 6 failure_demo.py:22: AssertionError - ______________________________ TestFailing.test_simple _______________________________ + _________________________ TestFailing.test_simple __________________________ self = @@ -49,7 +49,7 @@ get on the terminal - we are working on that): E + and 43 = .g at 0xdeadbeef>() failure_demo.py:33: AssertionError - _________________________ TestFailing.test_simple_multiline __________________________ + ____________________ TestFailing.test_simple_multiline _____________________ self = @@ -57,7 +57,7 @@ get on the terminal - we are working on that): > otherfunc_multi(42, 6 * 9) failure_demo.py:36: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 42, b = 54 @@ -66,7 +66,7 @@ get on the terminal - we are working on that): E assert 42 == 54 failure_demo.py:17: AssertionError - ________________________________ TestFailing.test_not ________________________________ + ___________________________ TestFailing.test_not ___________________________ self = @@ -79,7 +79,7 @@ get on the terminal - we are working on that): E + where 42 = .f at 0xdeadbeef>() failure_demo.py:42: AssertionError - ______________________ TestSpecialisedExplanations.test_eq_text ______________________ + _________________ TestSpecialisedExplanations.test_eq_text _________________ self = @@ -90,7 +90,7 @@ get on the terminal - we are working on that): E + eggs failure_demo.py:47: AssertionError - __________________ TestSpecialisedExplanations.test_eq_similar_text __________________ + _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ self = @@ -103,7 +103,7 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:50: AssertionError - _________________ TestSpecialisedExplanations.test_eq_multiline_text _________________ + ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ self = @@ -116,7 +116,7 @@ get on the terminal - we are working on that): E bar failure_demo.py:53: AssertionError - ___________________ TestSpecialisedExplanations.test_eq_long_text ____________________ + ______________ TestSpecialisedExplanations.test_eq_long_text _______________ self = @@ -133,7 +133,7 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:58: AssertionError - ______________ TestSpecialisedExplanations.test_eq_long_text_multiline _______________ + _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ self = @@ -153,7 +153,7 @@ get on the terminal - we are working on that): E ...Full output truncated (7 lines hidden), use '-vv' to show failure_demo.py:63: AssertionError - ______________________ TestSpecialisedExplanations.test_eq_list ______________________ + _________________ TestSpecialisedExplanations.test_eq_list _________________ self = @@ -164,7 +164,7 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:66: AssertionError - ___________________ TestSpecialisedExplanations.test_eq_list_long ____________________ + ______________ TestSpecialisedExplanations.test_eq_list_long _______________ self = @@ -177,7 +177,7 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:71: AssertionError - ______________________ TestSpecialisedExplanations.test_eq_dict ______________________ + _________________ TestSpecialisedExplanations.test_eq_dict _________________ self = @@ -195,7 +195,7 @@ get on the terminal - we are working on that): E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:74: AssertionError - ______________________ TestSpecialisedExplanations.test_eq_set _______________________ + _________________ TestSpecialisedExplanations.test_eq_set __________________ self = @@ -213,7 +213,7 @@ get on the terminal - we are working on that): E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:77: AssertionError - __________________ TestSpecialisedExplanations.test_eq_longer_list ___________________ + _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ self = @@ -224,7 +224,7 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:80: AssertionError - ______________________ TestSpecialisedExplanations.test_in_list ______________________ + _________________ TestSpecialisedExplanations.test_in_list _________________ self = @@ -233,7 +233,7 @@ get on the terminal - we are working on that): E assert 1 in [0, 2, 3, 4, 5] failure_demo.py:83: AssertionError - _______________ TestSpecialisedExplanations.test_not_in_text_multiline _______________ + __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ self = @@ -252,7 +252,7 @@ get on the terminal - we are working on that): E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:87: AssertionError - ________________ TestSpecialisedExplanations.test_not_in_text_single _________________ + ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ self = @@ -265,7 +265,7 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:91: AssertionError - ______________ TestSpecialisedExplanations.test_not_in_text_single_long ______________ + _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ self = @@ -278,7 +278,7 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:95: AssertionError - ___________ TestSpecialisedExplanations.test_not_in_text_single_long_term ____________ + ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ self = @@ -291,7 +291,7 @@ get on the terminal - we are working on that): E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:99: AssertionError - ___________________ TestSpecialisedExplanations.test_eq_dataclass ____________________ + ______________ TestSpecialisedExplanations.test_eq_dataclass _______________ self = @@ -312,7 +312,7 @@ get on the terminal - we are working on that): E b: 'b' != 'c' failure_demo.py:111: AssertionError - _____________________ TestSpecialisedExplanations.test_eq_attrs ______________________ + ________________ TestSpecialisedExplanations.test_eq_attrs _________________ self = @@ -333,7 +333,7 @@ get on the terminal - we are working on that): E b: 'b' != 'c' failure_demo.py:123: AssertionError - ___________________________________ test_attribute ___________________________________ + ______________________________ test_attribute ______________________________ def test_attribute(): class Foo(object): @@ -345,7 +345,7 @@ get on the terminal - we are working on that): E + where 1 = .Foo object at 0xdeadbeef>.b failure_demo.py:131: AssertionError - ______________________________ test_attribute_instance _______________________________ + _________________________ test_attribute_instance __________________________ def test_attribute_instance(): class Foo(object): @@ -357,7 +357,7 @@ get on the terminal - we are working on that): E + where .Foo object at 0xdeadbeef> = .Foo'>() failure_demo.py:138: AssertionError - _______________________________ test_attribute_failure _______________________________ + __________________________ test_attribute_failure __________________________ def test_attribute_failure(): class Foo(object): @@ -370,7 +370,7 @@ get on the terminal - we are working on that): > assert i.b == 2 failure_demo.py:149: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ self = .Foo object at 0xdeadbeef> @@ -379,7 +379,7 @@ get on the terminal - we are working on that): E Exception: Failed to get attrib failure_demo.py:144: Exception - ______________________________ test_attribute_multiple _______________________________ + _________________________ test_attribute_multiple __________________________ def test_attribute_multiple(): class Foo(object): @@ -396,7 +396,7 @@ get on the terminal - we are working on that): E + where .Bar object at 0xdeadbeef> = .Bar'>() failure_demo.py:159: AssertionError - _______________________________ TestRaises.test_raises _______________________________ + __________________________ TestRaises.test_raises __________________________ self = @@ -406,7 +406,7 @@ get on the terminal - we are working on that): E ValueError: invalid literal for int() with base 10: 'qwe' failure_demo.py:169: ValueError - ___________________________ TestRaises.test_raises_doesnt ____________________________ + ______________________ TestRaises.test_raises_doesnt _______________________ self = @@ -415,7 +415,7 @@ get on the terminal - we are working on that): E Failed: DID NOT RAISE failure_demo.py:172: Failed - _______________________________ TestRaises.test_raise ________________________________ + __________________________ TestRaises.test_raise ___________________________ self = @@ -424,7 +424,7 @@ get on the terminal - we are working on that): E ValueError: demo error failure_demo.py:175: ValueError - _____________________________ TestRaises.test_tupleerror _____________________________ + ________________________ TestRaises.test_tupleerror ________________________ self = @@ -433,7 +433,7 @@ get on the terminal - we are working on that): E ValueError: not enough values to unpack (expected 2, got 1) failure_demo.py:178: ValueError - ___________ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ___________ + ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ self = @@ -444,9 +444,9 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:183: TypeError - -------------------------------- Captured stdout call -------------------------------- + --------------------------- Captured stdout call --------------------------- items is [1, 2, 3] - _____________________________ TestRaises.test_some_error _____________________________ + ________________________ TestRaises.test_some_error ________________________ self = @@ -455,7 +455,7 @@ get on the terminal - we are working on that): E NameError: name 'namenotexi' is not defined failure_demo.py:186: NameError - _________________________ test_dynamic_compile_shows_nicely __________________________ + ____________________ test_dynamic_compile_shows_nicely _____________________ def test_dynamic_compile_shows_nicely(): import imp @@ -470,14 +470,14 @@ get on the terminal - we are working on that): > module.foo() failure_demo.py:204: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E AssertionError <0-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:201>:2: AssertionError - _________________________ TestMoreErrors.test_complex_error __________________________ + ____________________ TestMoreErrors.test_complex_error _____________________ self = @@ -491,10 +491,10 @@ get on the terminal - we are working on that): > somefunc(f(), g()) failure_demo.py:215: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:13: in somefunc otherfunc(x, y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 44, b = 43 @@ -503,7 +503,7 @@ get on the terminal - we are working on that): E assert 44 == 43 failure_demo.py:9: AssertionError - ________________________ TestMoreErrors.test_z1_unpack_error _________________________ + ___________________ TestMoreErrors.test_z1_unpack_error ____________________ self = @@ -513,7 +513,7 @@ get on the terminal - we are working on that): E ValueError: not enough values to unpack (expected 2, got 0) failure_demo.py:219: ValueError - _________________________ TestMoreErrors.test_z2_type_error __________________________ + ____________________ TestMoreErrors.test_z2_type_error _____________________ self = @@ -523,7 +523,7 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:223: TypeError - ___________________________ TestMoreErrors.test_startswith ___________________________ + ______________________ TestMoreErrors.test_startswith ______________________ self = @@ -536,7 +536,7 @@ get on the terminal - we are working on that): E + where = '123'.startswith failure_demo.py:228: AssertionError - _______________________ TestMoreErrors.test_startswith_nested ________________________ + __________________ TestMoreErrors.test_startswith_nested ___________________ self = @@ -555,7 +555,7 @@ get on the terminal - we are working on that): E + and '456' = .g at 0xdeadbeef>() failure_demo.py:237: AssertionError - __________________________ TestMoreErrors.test_global_func ___________________________ + _____________________ TestMoreErrors.test_global_func ______________________ self = @@ -566,7 +566,7 @@ get on the terminal - we are working on that): E + where 43 = globf(42) failure_demo.py:240: AssertionError - ____________________________ TestMoreErrors.test_instance ____________________________ + _______________________ TestMoreErrors.test_instance _______________________ self = @@ -577,7 +577,7 @@ get on the terminal - we are working on that): E + where 42 = .x failure_demo.py:244: AssertionError - ____________________________ TestMoreErrors.test_compare _____________________________ + _______________________ TestMoreErrors.test_compare ________________________ self = @@ -587,7 +587,7 @@ get on the terminal - we are working on that): E + where 11 = globf(10) failure_demo.py:247: AssertionError - __________________________ TestMoreErrors.test_try_finally ___________________________ + _____________________ TestMoreErrors.test_try_finally ______________________ self = @@ -598,7 +598,7 @@ get on the terminal - we are working on that): E assert 1 == 0 failure_demo.py:252: AssertionError - ________________________ TestCustomAssertMsg.test_single_line ________________________ + ___________________ TestCustomAssertMsg.test_single_line ___________________ self = @@ -613,7 +613,7 @@ get on the terminal - we are working on that): E + where 1 = .A'>.a failure_demo.py:263: AssertionError - _________________________ TestCustomAssertMsg.test_multiline _________________________ + ____________________ TestCustomAssertMsg.test_multiline ____________________ self = @@ -632,7 +632,7 @@ get on the terminal - we are working on that): E + where 1 = .A'>.a failure_demo.py:270: AssertionError - ________________________ TestCustomAssertMsg.test_custom_repr ________________________ + ___________________ TestCustomAssertMsg.test_custom_repr ___________________ self = @@ -654,4 +654,4 @@ get on the terminal - we are working on that): E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a failure_demo.py:283: AssertionError - ============================= 44 failed in 0.12 seconds ============================== + ======================== 44 failed in 0.12 seconds ========================= diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 26d5d6c4b60..6d06856d1e2 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -48,9 +48,9 @@ Let's run this without supplying our new option: .. code-block:: pytest $ pytest -q test_sample.py - F [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_answer _____________________________________ + F [100%] + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ cmdopt = 'type1' @@ -63,7 +63,7 @@ Let's run this without supplying our new option: E assert 0 test_sample.py:6: AssertionError - -------------------------------- Captured stdout call -------------------------------- + --------------------------- Captured stdout call --------------------------- first 1 failed in 0.12 seconds @@ -72,9 +72,9 @@ And now with supplying a command line option: .. code-block:: pytest $ pytest -q --cmdopt=type2 - F [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_answer _____________________________________ + F [100%] + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ cmdopt = 'type2' @@ -87,7 +87,7 @@ And now with supplying a command line option: E assert 0 test_sample.py:6: AssertionError - -------------------------------- Captured stdout call -------------------------------- + --------------------------- Captured stdout call --------------------------- second 1 failed in 0.12 seconds @@ -126,14 +126,14 @@ directory with the above conftest.py: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 0 items - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= .. _`excontrolskip`: @@ -188,34 +188,34 @@ and when running it will see a skipped "slow" test: .. code-block:: pytest $ pytest -rs # "-rs" means report details on the little 's' - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 2 items - test_module.py .s [100%] - ============================== short test summary info =============================== + test_module.py .s [100%] + ========================= short test summary info ========================== SKIP [1] test_module.py:8: need --runslow option to run - ======================== 1 passed, 1 skipped in 0.12 seconds ========================= + =================== 1 passed, 1 skipped in 0.12 seconds ==================== Or run it including the ``slow`` marked test: .. code-block:: pytest $ pytest --runslow - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 2 items - test_module.py .. [100%] + test_module.py .. [100%] - ============================== 2 passed in 0.12 seconds ============================== + ========================= 2 passed in 0.12 seconds ========================= Writing well integrated assertion helpers -------------------------------------------------- @@ -251,9 +251,9 @@ Let's run our little function: .. code-block:: pytest $ pytest -q test_checkconfig.py - F [100%] - ====================================== FAILURES ====================================== - ___________________________________ test_something ___________________________________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_something ______________________________ def test_something(): > checkconfig(42) @@ -350,7 +350,7 @@ which will add the string to the test header accordingly: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') project deps: mylib-1.1 @@ -358,7 +358,7 @@ which will add the string to the test header accordingly: plugins: hypothesis-3.x.y collected 0 items - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -380,7 +380,7 @@ which will add info only when run with "--v": .. code-block:: pytest $ pytest -v - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -390,21 +390,21 @@ which will add info only when run with "--v": plugins: hypothesis-3.x.y collecting ... collected 0 items - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= and nothing when run plainly: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 0 items - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= profiling test duration -------------------------- @@ -438,20 +438,20 @@ Now we can profile which test functions execute the slowest: .. code-block:: pytest $ pytest --durations=3 - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 3 items - test_some_are_slow.py ... [100%] + test_some_are_slow.py ... [100%] - ============================== slowest 3 test durations ============================== + ========================= slowest 3 test durations ========================= 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 0.10s call test_some_are_slow.py::test_funcfast - ============================== 3 passed in 0.12 seconds ============================== + ========================= 3 passed in 0.12 seconds ========================= incremental testing - test steps --------------------------------------------------- @@ -514,17 +514,17 @@ If we run this: .. code-block:: pytest $ pytest -rx - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 4 items - test_step.py .Fx. [100%] + test_step.py .Fx. [100%] - ====================================== FAILURES ====================================== - _________________________ TestUserHandling.test_modification _________________________ + ================================= FAILURES ================================= + ____________________ TestUserHandling.test_modification ____________________ self = @@ -533,10 +533,10 @@ If we run this: E assert 0 test_step.py:11: AssertionError - ============================== short test summary info =============================== + ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::test_deletion reason: previous test failed (test_modification) - =================== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ==================== + ============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds =============== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". @@ -599,20 +599,20 @@ We can run this: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 7 items - test_step.py .Fx. [ 57%] - a/test_db.py F [ 71%] - a/test_db2.py F [ 85%] - b/test_error.py E [100%] + test_step.py .Fx. [ 57%] + a/test_db.py F [ 71%] + a/test_db2.py F [ 85%] + b/test_error.py E [100%] - ======================================= ERRORS ======================================= - ____________________________ ERROR at setup of test_root _____________________________ + ================================== ERRORS ================================== + _______________________ ERROR at setup of test_root ________________________ file $REGENDOC_TMPDIR/b/test_error.py, line 1 def test_root(db): # no db here, will error out E fixture 'db' not found @@ -620,8 +620,8 @@ We can run this: > use 'pytest --fixtures [testpath]' for help on them. $REGENDOC_TMPDIR/b/test_error.py:1 - ====================================== FAILURES ====================================== - _________________________ TestUserHandling.test_modification _________________________ + ================================= FAILURES ================================= + ____________________ TestUserHandling.test_modification ____________________ self = @@ -630,7 +630,7 @@ We can run this: E assert 0 test_step.py:11: AssertionError - ______________________________________ test_a1 _______________________________________ + _________________________________ test_a1 __________________________________ db = @@ -640,7 +640,7 @@ We can run this: E assert 0 a/test_db.py:2: AssertionError - ______________________________________ test_a2 _______________________________________ + _________________________________ test_a2 __________________________________ db = @@ -650,7 +650,7 @@ We can run this: E assert 0 a/test_db2.py:2: AssertionError - =============== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds =============== + ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course @@ -714,17 +714,17 @@ and run them: .. code-block:: pytest $ pytest test_module.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 2 items - test_module.py FF [100%] + test_module.py FF [100%] - ====================================== FAILURES ====================================== - _____________________________________ test_fail1 _____________________________________ + ================================= FAILURES ================================= + ________________________________ test_fail1 ________________________________ tmpdir = local('PYTEST_TMPDIR/test_fail10') @@ -733,14 +733,14 @@ and run them: E assert 0 test_module.py:2: AssertionError - _____________________________________ test_fail2 _____________________________________ + ________________________________ test_fail2 ________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:6: AssertionError - ============================== 2 failed in 0.12 seconds ============================== + ========================= 2 failed in 0.12 seconds ========================= you will have a "failures" file which contains the failing test ids:: @@ -817,7 +817,7 @@ and run it: .. code-block:: pytest $ pytest -s test_module.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -828,8 +828,8 @@ and run it: Fexecuting test failed test_module.py::test_call_fails F - ======================================= ERRORS ======================================= - _________________________ ERROR at setup of test_setup_fails _________________________ + ================================== ERRORS ================================== + ____________________ ERROR at setup of test_setup_fails ____________________ @pytest.fixture def other(): @@ -837,8 +837,8 @@ and run it: E assert 0 test_module.py:7: AssertionError - ====================================== FAILURES ====================================== - __________________________________ test_call_fails ___________________________________ + ================================= FAILURES ================================= + _____________________________ test_call_fails ______________________________ something = None @@ -847,14 +847,14 @@ and run it: E assert 0 test_module.py:15: AssertionError - _____________________________________ test_fail2 _____________________________________ + ________________________________ test_fail2 ________________________________ def test_fail2(): > assert 0 E assert 0 test_module.py:19: AssertionError - ========================= 2 failed, 1 error in 0.12 seconds ========================== + ==================== 2 failed, 1 error in 0.12 seconds ===================== You'll see that the fixture finalizers could use the precise reporting information. diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index 6aed3ca0802..9f13875d5c6 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -71,17 +71,17 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: .. code-block:: pytest $ pytest test_smtpsimple.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_smtpsimple.py F [100%] + test_smtpsimple.py F [100%] - ====================================== FAILURES ====================================== - _____________________________________ test_ehlo ______________________________________ + ================================= FAILURES ================================= + ________________________________ test_ehlo _________________________________ smtp_connection = @@ -92,7 +92,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: E assert 0 test_smtpsimple.py:11: AssertionError - ============================== 1 failed in 0.12 seconds ============================== + ========================= 1 failed in 0.12 seconds ========================= In the failure traceback we see that the test function was called with a ``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -213,17 +213,17 @@ inspect what is going on and can now run the tests: .. code-block:: pytest $ pytest test_module.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 2 items - test_module.py FF [100%] + test_module.py FF [100%] - ====================================== FAILURES ====================================== - _____________________________________ test_ehlo ______________________________________ + ================================= FAILURES ================================= + ________________________________ test_ehlo _________________________________ smtp_connection = @@ -235,7 +235,7 @@ inspect what is going on and can now run the tests: E assert 0 test_module.py:6: AssertionError - _____________________________________ test_noop ______________________________________ + ________________________________ test_noop _________________________________ smtp_connection = @@ -246,7 +246,7 @@ inspect what is going on and can now run the tests: E assert 0 test_module.py:11: AssertionError - ============================== 2 failed in 0.12 seconds ============================== + ========================= 2 failed in 0.12 seconds ========================= You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp_connection`` object was passed into the @@ -495,14 +495,14 @@ Running it: .. code-block:: pytest $ pytest -qq --tb=short test_anothersmtp.py - F [100%] - ====================================== FAILURES ====================================== - ___________________________________ test_showhelo ____________________________________ + F [100%] + ================================= FAILURES ================================= + ______________________________ test_showhelo _______________________________ test_anothersmtp.py:5: in test_showhelo assert 0, smtp_connection.helo() E AssertionError: (250, b'mail.python.org') E assert 0 - ------------------------------ Captured stdout teardown ------------------------------ + ------------------------- Captured stdout teardown ------------------------- finalizing (mail.python.org) voila! The ``smtp_connection`` fixture function picked up our mail server name @@ -599,9 +599,9 @@ So let's just do another run: .. code-block:: pytest $ pytest -q test_module.py - FFFF [100%] - ====================================== FAILURES ====================================== - _____________________________ test_ehlo[smtp.gmail.com] ______________________________ + FFFF [100%] + ================================= FAILURES ================================= + ________________________ test_ehlo[smtp.gmail.com] _________________________ smtp_connection = @@ -613,7 +613,7 @@ So let's just do another run: E assert 0 test_module.py:6: AssertionError - _____________________________ test_noop[smtp.gmail.com] ______________________________ + ________________________ test_noop[smtp.gmail.com] _________________________ smtp_connection = @@ -624,7 +624,7 @@ So let's just do another run: E assert 0 test_module.py:11: AssertionError - _____________________________ test_ehlo[mail.python.org] _____________________________ + ________________________ test_ehlo[mail.python.org] ________________________ smtp_connection = @@ -635,9 +635,9 @@ So let's just do another run: E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING' test_module.py:5: AssertionError - ------------------------------- Captured stdout setup -------------------------------- + -------------------------- Captured stdout setup --------------------------- finalizing - _____________________________ test_noop[mail.python.org] _____________________________ + ________________________ test_noop[mail.python.org] ________________________ smtp_connection = @@ -648,7 +648,7 @@ So let's just do another run: E assert 0 test_module.py:11: AssertionError - ------------------------------ Captured stdout teardown ------------------------------ + ------------------------- Captured stdout teardown ------------------------- finalizing 4 failed in 0.12 seconds @@ -703,7 +703,7 @@ Running the above tests results in the following test IDs being used: .. code-block:: pytest $ pytest --collect-only - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: @@ -723,7 +723,7 @@ Running the above tests results in the following test IDs being used: - ============================ no tests ran in 0.12 seconds ============================ + ======================= no tests ran in 0.12 seconds ======================= .. _`fixture-parametrize-marks`: @@ -749,7 +749,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: .. code-block:: pytest $ pytest test_fixture_marks.py -v - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -757,11 +757,11 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: plugins: hypothesis-3.x.y collecting ... collected 3 items - test_fixture_marks.py::test_data[0] PASSED [ 33%] - test_fixture_marks.py::test_data[1] PASSED [ 66%] - test_fixture_marks.py::test_data[2] SKIPPED [100%] + test_fixture_marks.py::test_data[0] PASSED [ 33%] + test_fixture_marks.py::test_data[1] PASSED [ 66%] + test_fixture_marks.py::test_data[2] SKIPPED [100%] - ======================== 2 passed, 1 skipped in 0.12 seconds ========================= + =================== 2 passed, 1 skipped in 0.12 seconds ==================== .. _`interdependent fixtures`: @@ -796,7 +796,7 @@ Here we declare an ``app`` fixture which receives the previously defined .. code-block:: pytest $ pytest -v test_appsetup.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -804,10 +804,10 @@ Here we declare an ``app`` fixture which receives the previously defined plugins: hypothesis-3.x.y collecting ... collected 2 items - test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] - test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] + test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] + test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] - ============================== 2 passed in 0.12 seconds ============================== + ========================= 2 passed in 0.12 seconds ========================= Due to the parametrization of ``smtp_connection``, the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -869,7 +869,7 @@ Let's run the tests in verbose mode and with looking at the print-output: .. code-block:: pytest $ pytest -v -s test_module.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') @@ -910,7 +910,7 @@ Let's run the tests in verbose mode and with looking at the print-output: TEARDOWN modarg mod2 - ============================== 8 passed in 0.12 seconds ============================== + ========================= 8 passed in 0.12 seconds ========================= You can see that the parametrized module-scoped ``modarg`` resource caused an ordering of test execution that lead to the fewest possible "active" resources. @@ -975,7 +975,7 @@ to verify our fixture is activated and the tests pass: .. code-block:: pytest $ pytest -q - .. [100%] + .. [100%] 2 passed in 0.12 seconds You can specify multiple fixtures like this: @@ -1076,7 +1076,7 @@ If we run it, we get two passing tests: .. code-block:: pytest $ pytest -q - .. [100%] + .. [100%] 2 passed in 0.12 seconds Here is how autouse fixtures work in other scopes: diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index 31910fa0221..cf94b93f458 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -47,17 +47,17 @@ That’s it. You can now execute the test function: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_sample.py F [100%] + test_sample.py F [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_answer _____________________________________ + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ def test_answer(): > assert func(3) == 5 @@ -65,7 +65,7 @@ That’s it. You can now execute the test function: E + where 4 = func(3) test_sample.py:5: AssertionError - ============================== 1 failed in 0.12 seconds ============================== + ========================= 1 failed in 0.12 seconds ========================= This test returns a failure report because ``func(3)`` does not return ``5``. @@ -98,7 +98,7 @@ Execute the test function with “quiet” reporting mode: .. code-block:: pytest $ pytest -q test_sysexit.py - . [100%] + . [100%] 1 passed in 0.12 seconds Group multiple tests in a class @@ -121,9 +121,9 @@ Once you develop multiple tests, you may want to group them into a class. pytest .. code-block:: pytest $ pytest -q test_class.py - .F [100%] - ====================================== FAILURES ====================================== - _________________________________ TestClass.test_two _________________________________ + .F [100%] + ================================= FAILURES ================================= + ____________________________ TestClass.test_two ____________________________ self = @@ -153,9 +153,9 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look .. code-block:: pytest $ pytest -q test_tmpdir.py - F [100%] - ====================================== FAILURES ====================================== - __________________________________ test_needsfiles ___________________________________ + F [100%] + ================================= FAILURES ================================= + _____________________________ test_needsfiles ______________________________ tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') @@ -165,7 +165,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look E assert 0 test_tmpdir.py:3: AssertionError - -------------------------------- Captured stdout call -------------------------------- + --------------------------- Captured stdout call --------------------------- PYTEST_TMPDIR/test_needsfiles0 1 failed in 0.12 seconds diff --git a/doc/en/index.rst b/doc/en/index.rst index 3c9cb024138..414503e1767 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -27,17 +27,17 @@ To execute it: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_sample.py F [100%] + test_sample.py F [100%] - ====================================== FAILURES ====================================== - ____________________________________ test_answer _____________________________________ + ================================= FAILURES ================================= + _______________________________ test_answer ________________________________ def test_answer(): > assert inc(3) == 5 @@ -45,7 +45,7 @@ To execute it: E + where 4 = inc(3) test_sample.py:6: AssertionError - ============================== 1 failed in 0.12 seconds ============================== + ========================= 1 failed in 0.12 seconds ========================= Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See :ref:`Getting Started ` for more examples. diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index 0808b08dfe6..500c5619f2a 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -55,17 +55,17 @@ them in turn: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 3 items - test_expectation.py ..F [100%] + test_expectation.py ..F [100%] - ====================================== FAILURES ====================================== - _________________________________ test_eval[6*9-42] __________________________________ + ================================= FAILURES ================================= + ____________________________ test_eval[6*9-42] _____________________________ test_input = '6*9', expected = 42 @@ -80,7 +80,7 @@ them in turn: E + where 54 = eval('6*9') test_expectation.py:8: AssertionError - ========================= 1 failed, 2 passed in 0.12 seconds ========================= + ==================== 1 failed, 2 passed in 0.12 seconds ==================== As designed in this example, only one pair of input/output values fails the simple test function. And as usual with test function arguments, @@ -108,16 +108,16 @@ Let's run this: .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 3 items - test_expectation.py ..x [100%] + test_expectation.py ..x [100%] - ======================== 2 passed, 1 xfailed in 0.12 seconds ========================= + =================== 2 passed, 1 xfailed in 0.12 seconds ==================== The one parameter set which caused a failure previously now shows up as an "xfailed (expected to fail)" test. @@ -177,7 +177,7 @@ command line option and the parametrization of our test function:: If we now pass two stringinput values, our test will run twice:: $ pytest -q --stringinput="hello" --stringinput="world" test_strings.py - .. [100%] + .. [100%] 2 passed in 0.12 seconds Let's also run with a stringinput that will lead to a failing test: @@ -185,9 +185,9 @@ Let's also run with a stringinput that will lead to a failing test: .. code-block:: pytest $ pytest -q --stringinput="!" test_strings.py - F [100%] - ====================================== FAILURES ====================================== - ________________________________ test_valid_string[!] ________________________________ + F [100%] + ================================= FAILURES ================================= + ___________________________ test_valid_string[!] ___________________________ stringinput = '!' @@ -209,8 +209,8 @@ list: .. code-block:: pytest $ pytest -q -rs test_strings.py - s [100%] - ============================== short test summary info =============================== + s [100%] + ========================= short test summary info ========================== SKIP [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 1 skipped in 0.12 seconds diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index 0f207309058..978f4dc5c94 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -328,15 +328,15 @@ Running it with the report-on-xfail option gives this output: .. code-block:: pytest example $ pytest -rx xfail_demo.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/example/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/example, inifile: plugins: hypothesis-3.x.y collected 7 items - xfail_demo.py xxxxxxx [100%] - ============================== short test summary info =============================== + xfail_demo.py xxxxxxx [100%] + ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 reason: [NOTRUN] @@ -350,7 +350,7 @@ Running it with the report-on-xfail option gives this output: reason: reason XFAIL xfail_demo.py::test_hello7 - ============================= 7 xfailed in 0.12 seconds ============================== + ======================== 7 xfailed in 0.12 seconds ========================= .. _`skip/xfail with parametrize`: diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 5f7e98a84cc..7db8482805d 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -40,17 +40,17 @@ Running this would result in a passed test except for the last .. code-block:: pytest $ pytest test_tmp_path.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_tmp_path.py F [100%] + test_tmp_path.py F [100%] - ====================================== FAILURES ====================================== - __________________________________ test_create_file __________________________________ + ================================= FAILURES ================================= + _____________________________ test_create_file _____________________________ tmp_path = PosixPath('PYTEST_TMPDIR/test_create_file0') @@ -65,7 +65,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmp_path.py:13: AssertionError - ============================== 1 failed in 0.12 seconds ============================== + ========================= 1 failed in 0.12 seconds ========================= The ``tmp_path_factory`` fixture -------------------------------- @@ -104,17 +104,17 @@ Running this would result in a passed test except for the last .. code-block:: pytest $ pytest test_tmpdir.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_tmpdir.py F [100%] + test_tmpdir.py F [100%] - ====================================== FAILURES ====================================== - __________________________________ test_create_file __________________________________ + ================================= FAILURES ================================= + _____________________________ test_create_file _____________________________ tmpdir = local('PYTEST_TMPDIR/test_create_file0') @@ -127,7 +127,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ============================== 1 failed in 0.12 seconds ============================== + ========================= 1 failed in 0.12 seconds ========================= .. _`tmpdir factory example`: diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index fe7f2e5503a..ff4e73a96c0 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -127,17 +127,17 @@ the ``self.db`` values in the traceback: .. code-block:: pytest $ pytest test_unittest_db.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 2 items - test_unittest_db.py FF [100%] + test_unittest_db.py FF [100%] - ====================================== FAILURES ====================================== - ________________________________ MyTest.test_method1 _________________________________ + ================================= FAILURES ================================= + ___________________________ MyTest.test_method1 ____________________________ self = @@ -148,7 +148,7 @@ the ``self.db`` values in the traceback: E assert 0 test_unittest_db.py:9: AssertionError - ________________________________ MyTest.test_method2 _________________________________ + ___________________________ MyTest.test_method2 ____________________________ self = @@ -158,7 +158,7 @@ the ``self.db`` values in the traceback: E assert 0 test_unittest_db.py:12: AssertionError - ============================== 2 failed in 0.12 seconds ============================== + ========================= 2 failed in 0.12 seconds ========================= This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention @@ -208,7 +208,7 @@ Running this test module ...: .. code-block:: pytest $ pytest -q test_unittest_cleandir.py - . [100%] + . [100%] 1 passed in 0.12 seconds ... gives us one passed test because the ``initdir`` fixture function diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 2a2d972c0e3..9fa3d645609 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -191,14 +191,40 @@ Example: .. code-block:: pytest $ pytest -ra - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y - collected 0 items + collected 6 items - ============================ no tests ran in 0.12 seconds ============================ + test_example.py .FEsxX [100%] + + ================================== ERRORS ================================== + _______________________ ERROR at setup of test_error _______________________ + + @pytest.fixture + def error_fixture(): + > assert 0 + E assert 0 + + test_example.py:6: AssertionError + ================================= FAILURES ================================= + ________________________________ test_fail _________________________________ + + def test_fail(): + > assert 0 + E assert 0 + + test_example.py:14: AssertionError + ========================= short test summary info ========================== + SKIP [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test + XFAIL test_example.py::test_xfail + reason: xfailing this test + XPASS test_example.py::test_xpass always xfail + ERROR test_example.py::test_error + FAIL test_example.py::test_fail + 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds The ``-r`` options accepts a number of characters after it, with ``a`` used above meaning "all except passes". @@ -218,14 +244,36 @@ More than one character can be used, so for example to only see failed and skipp .. code-block:: pytest $ pytest -rfs - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y - collected 0 items + collected 6 items + + test_example.py .FEsxX [100%] + + ================================== ERRORS ================================== + _______________________ ERROR at setup of test_error _______________________ + + @pytest.fixture + def error_fixture(): + > assert 0 + E assert 0 + + test_example.py:6: AssertionError + ================================= FAILURES ================================= + ________________________________ test_fail _________________________________ + + def test_fail(): + > assert 0 + E assert 0 - ============================ no tests ran in 0.12 seconds ============================ + test_example.py:14: AssertionError + ========================= short test summary info ========================== + FAIL test_example.py::test_fail + SKIP [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test + 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had captured output: @@ -233,14 +281,39 @@ captured output: .. code-block:: pytest $ pytest -rpP - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y - collected 0 items + collected 6 items - ============================ no tests ran in 0.12 seconds ============================ + test_example.py .FEsxX [100%] + + ================================== ERRORS ================================== + _______________________ ERROR at setup of test_error _______________________ + + @pytest.fixture + def error_fixture(): + > assert 0 + E assert 0 + + test_example.py:6: AssertionError + ================================= FAILURES ================================= + ________________________________ test_fail _________________________________ + + def test_fail(): + > assert 0 + E assert 0 + + test_example.py:14: AssertionError + ========================= short test summary info ========================== + PASSED test_example.py::test_ok + ================================== PASSES ================================== + _________________________________ test_ok __________________________________ + --------------------------- Captured stdout call --------------------------- + ok + 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds .. _pdb-option: @@ -626,8 +699,25 @@ Running it will show that ``MyPlugin`` was added and its hook was invoked:: $ python myinvoke.py - . [100%]*** test run reporting finishing + .FEsxX. [100%]*** test run reporting finishing + + ================================== ERRORS ================================== + _______________________ ERROR at setup of test_error _______________________ + + @pytest.fixture + def error_fixture(): + > assert 0 + E assert 0 + + test_example.py:6: AssertionError + ================================= FAILURES ================================= + ________________________________ test_fail _________________________________ + + def test_fail(): + > assert 0 + E assert 0 + test_example.py:14: AssertionError .. note:: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index 8de555d3ccb..601459f1c2c 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -23,22 +23,22 @@ Running pytest now produces this output: .. code-block:: pytest $ pytest test_show_warnings.py - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: plugins: hypothesis-3.x.y collected 1 item - test_show_warnings.py . [100%] + test_show_warnings.py . [100%] - ================================== warnings summary ================================== + ============================= warnings summary ============================= test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) -- Docs: https://docs.pytest.org/en/latest/warnings.html - ======================== 1 passed, 1 warnings in 0.12 seconds ======================== + =================== 1 passed, 1 warnings in 0.12 seconds =================== The ``-W`` flag can be passed to control which warnings will be displayed or even turn them into errors: @@ -46,15 +46,15 @@ them into errors: .. code-block:: pytest $ pytest -q test_show_warnings.py -W error::UserWarning - F [100%] - ====================================== FAILURES ====================================== - ______________________________________ test_one ______________________________________ + F [100%] + ================================= FAILURES ================================= + _________________________________ test_one _________________________________ def test_one(): > assert api_v1() == 1 test_show_warnings.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def api_v1(): > warnings.warn(UserWarning("api v1, should use functions from v2")) @@ -357,7 +357,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta $ pytest test_pytest_warnings.py -q - ================================== warnings summary ================================== + ============================= warnings summary ============================= test_pytest_warnings.py:1 $REGENDOC_TMPDIR/test_pytest_warnings.py:1: PytestWarning: cannot collect test class 'Test' because it has a __init__ constructor class Test: diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index f627fec057c..d23a15e00bd 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -411,22 +411,22 @@ additionally it is possible to copy examples for an example folder before runnin .. code-block:: pytest $ pytest - ================================ test session starts ================================= + =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini plugins: hypothesis-3.x.y collected 2 items - test_example.py .. [100%] + test_example.py .. [100%] - ================================== warnings summary ================================== + ============================= warnings summary ============================= test_example.py::test_plugin $REGENDOC_TMPDIR/test_example.py:4: PytestExperimentalApiWarning: testdir.copy_example is an experimental api that may change over time testdir.copy_example("test_example.py") -- Docs: https://docs.pytest.org/en/latest/warnings.html - ======================== 2 passed, 1 warnings in 0.12 seconds ======================== + =================== 2 passed, 1 warnings in 0.12 seconds =================== For more information about the result object that ``runpytest()`` returns, and the methods that it provides please check out the :py:class:`RunResult From 99ef8c6d1660365a836e1cf01bf0298009f11490 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 20:31:01 +0000 Subject: [PATCH 97/98] Fix typo in Makefile: PYTEST_ADDOPT -> PYTEST_ADDOPTS --- doc/en/Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/doc/en/Makefile b/doc/en/Makefile index fa8e8266a29..f93d845576b 100644 --- a/doc/en/Makefile +++ b/doc/en/Makefile @@ -39,7 +39,7 @@ clean: -rm -rf $(BUILDDIR)/* regen: - PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPT=-pno:hypothesis COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS} + PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPTS=-pno:hypothesis COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS} html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html From e24031fb3627ae6c187ea476c99a5f6d90e876f6 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Sat, 5 Jan 2019 20:42:44 +0000 Subject: [PATCH 98/98] Regendocs again, without hypothesis --- doc/en/assert.rst | 4 ---- doc/en/cache.rst | 6 ------ doc/en/capture.rst | 2 -- doc/en/doctest.rst | 2 -- doc/en/example/markers.rst | 32 ----------------------------- doc/en/example/nonpython.rst | 6 ------ doc/en/example/parametrize.rst | 14 ------------- doc/en/example/pythoncollection.rst | 6 ------ doc/en/example/reportingdemo.rst | 2 -- doc/en/example/simple.rst | 22 -------------------- doc/en/fixture.rst | 12 ----------- doc/en/getting-started.rst | 4 ---- doc/en/index.rst | 2 -- doc/en/parametrize.rst | 4 ---- doc/en/skipping.rst | 2 -- doc/en/tmpdir.rst | 4 ---- doc/en/unittest.rst | 2 -- doc/en/usage.rst | 6 ------ doc/en/warnings.rst | 2 -- doc/en/writing_plugins.rst | 2 -- 20 files changed, 136 deletions(-) diff --git a/doc/en/assert.rst b/doc/en/assert.rst index 511839d889a..b13a071f698 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -29,9 +29,7 @@ you will see the return value of the function call: $ pytest test_assert1.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_assert1.py F [100%] @@ -175,9 +173,7 @@ if you run this module: $ pytest test_assert2.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_assert2.py F [100%] diff --git a/doc/en/cache.rst b/doc/en/cache.rst index caa170027eb..ba9d87a5fa8 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -81,9 +81,7 @@ If you then run it with ``--lf``: $ pytest --lf =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 50 items / 48 deselected run-last-failure: rerun previous 2 failures @@ -126,9 +124,7 @@ of ``FF`` and dots): $ pytest --ff =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 50 items run-last-failure: rerun previous 2 failures first @@ -257,9 +253,7 @@ You can always peek at the content of the cache using the $ pytest --cache-show =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y cachedir: $REGENDOC_TMPDIR/.pytest_cache ------------------------------- cache values ------------------------------- cache/lastfailed contains: diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 7d452b9f9c8..488b2b8746f 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -68,9 +68,7 @@ of the failing function and hide the other one: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items test_module.py .F [100%] diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 69a9cd18abc..125ed3aa704 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -65,9 +65,7 @@ then you can just invoke ``pytest`` without command line options: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini - plugins: hypothesis-3.x.y collected 1 item mymodule.py . [100%] diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 637640062a8..9d325c30e58 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -35,9 +35,7 @@ You can then restrict a test run to only run tests marked with ``webtest``: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 4 items / 3 deselected test_server.py::test_send_http PASSED [100%] @@ -52,9 +50,7 @@ Or the inverse, running all tests except the webtest ones: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 4 items / 1 deselected test_server.py::test_something_quick PASSED [ 33%] @@ -76,9 +72,7 @@ tests based on their module, class, method, or function name: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 1 item test_server.py::TestClass::test_method PASSED [100%] @@ -93,9 +87,7 @@ You can also select on the class: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 1 item test_server.py::TestClass::test_method PASSED [100%] @@ -110,9 +102,7 @@ Or select multiple nodes: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 2 items test_server.py::TestClass::test_method PASSED [ 50%] @@ -152,9 +142,7 @@ select tests based on their names: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 4 items / 3 deselected test_server.py::test_send_http PASSED [100%] @@ -169,9 +157,7 @@ And you can also run all tests except the ones that match the keyword: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 4 items / 1 deselected test_server.py::test_something_quick PASSED [ 33%] @@ -188,9 +174,7 @@ Or to select "http" and "quick" tests: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 4 items / 2 deselected test_server.py::test_send_http PASSED [ 50%] @@ -232,8 +216,6 @@ You can ask which markers exist for your test suite - the list includes our just $ pytest --markers @pytest.mark.webtest: mark a test as a webtest. - @pytest.mark.hypothesis: Tests which use hypothesis. - @pytest.mark.filterwarnings(warning): add a warning filter to the given test. see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. @@ -383,9 +365,7 @@ the test needs: $ pytest -E stage2 =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_someenv.py s [100%] @@ -399,9 +379,7 @@ and here is one that specifies exactly the environment needed: $ pytest -E stage1 =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_someenv.py . [100%] @@ -413,8 +391,6 @@ The ``--markers`` option always gives you a list of available markers:: $ pytest --markers @pytest.mark.env(name): mark test to run only on named environment - @pytest.mark.hypothesis: Tests which use hypothesis. - @pytest.mark.filterwarnings(warning): add a warning filter to the given test. see https://docs.pytest.org/en/latest/warnings.html#pytest-mark-filterwarnings @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. @@ -570,9 +546,7 @@ then you will see two tests skipped and two executed tests as expected: $ pytest -rs # this option reports skip reasons =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 4 items test_plat.py s.s. [100%] @@ -588,9 +562,7 @@ Note that if you specify a platform via the marker-command line option like this $ pytest -m linux =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 4 items / 3 deselected test_plat.py . [100%] @@ -643,9 +615,7 @@ We can now use the ``-m option`` to select one set: $ pytest -m interface --tb=short =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 4 items / 2 deselected test_module.py FF [100%] @@ -668,9 +638,7 @@ or to select both "event" and "interface" tests: $ pytest -m "interface or event" --tb=short =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 4 items / 1 deselected test_module.py FFF [100%] diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 3db46639142..eba8279f38d 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -30,9 +30,7 @@ now execute the test specification: nonpython $ pytest test_simple.yml =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: - plugins: hypothesis-3.x.y collected 2 items test_simple.yml F. [100%] @@ -67,9 +65,7 @@ consulted when reporting in ``verbose`` mode: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: - plugins: hypothesis-3.x.y collecting ... collected 2 items test_simple.yml::hello FAILED [ 50%] @@ -92,9 +88,7 @@ interesting to just look at the collection tree: nonpython $ pytest --collect-only =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/nonpython/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/nonpython, inifile: - plugins: hypothesis-3.x.y collected 2 items diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 133add2c608..92756e492e5 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -145,9 +145,7 @@ objects, they are still using the default pytest representation: $ pytest test_time.py --collect-only =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 8 items @@ -205,9 +203,7 @@ this is a fully self-contained example which you can run with: $ pytest test_scenarios.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 4 items test_scenarios.py .... [100%] @@ -221,9 +217,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia $ pytest --collect-only test_scenarios.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 4 items @@ -289,9 +283,7 @@ Let's first see how it looks like at collection time: $ pytest test_backends.py --collect-only =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items @@ -356,9 +348,7 @@ The result of this test will be successful: $ pytest test_indirect_list.py --collect-only =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item @@ -492,9 +482,7 @@ If you run this with reporting for skips enabled: $ pytest -rs test_module.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items test_module.py .s [100%] @@ -552,9 +540,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 17 items / 14 deselected test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%] diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 53e7cd32164..394924e2d85 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -132,9 +132,7 @@ The test collection would look like this: $ pytest --collect-only =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini - plugins: hypothesis-3.x.y collected 2 items @@ -189,9 +187,7 @@ You can always peek at the collection tree without running tests like this: . $ pytest --collect-only pythoncollection.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/CWD/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini - plugins: hypothesis-3.x.y collected 3 items @@ -263,9 +259,7 @@ file will be left out: $ pytest --collect-only =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini - plugins: hypothesis-3.x.y collected 0 items ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index c40e15572c9..15d71caa045 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -14,9 +14,7 @@ get on the terminal - we are working on that): assertion $ pytest failure_demo.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/assertion/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/assertion, inifile: - plugins: hypothesis-3.x.y collected 44 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 6d06856d1e2..76a1ddc807e 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -128,9 +128,7 @@ directory with the above conftest.py: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 0 items ======================= no tests ran in 0.12 seconds ======================= @@ -190,9 +188,7 @@ and when running it will see a skipped "slow" test: $ pytest -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items test_module.py .s [100%] @@ -208,9 +204,7 @@ Or run it including the ``slow`` marked test: $ pytest --runslow =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items test_module.py .. [100%] @@ -352,10 +346,8 @@ which will add the string to the test header accordingly: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 0 items ======================= no tests ran in 0.12 seconds ======================= @@ -383,11 +375,9 @@ which will add info only when run with "--v": =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') info1: did you know that ... did you? rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 0 items ======================= no tests ran in 0.12 seconds ======================= @@ -399,9 +389,7 @@ and nothing when run plainly: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 0 items ======================= no tests ran in 0.12 seconds ======================= @@ -440,9 +428,7 @@ Now we can profile which test functions execute the slowest: $ pytest --durations=3 =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 3 items test_some_are_slow.py ... [100%] @@ -516,9 +502,7 @@ If we run this: $ pytest -rx =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 4 items test_step.py .Fx. [100%] @@ -601,9 +585,7 @@ We can run this: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 7 items test_step.py .Fx. [ 57%] @@ -716,9 +698,7 @@ and run them: $ pytest test_module.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items test_module.py FF [100%] @@ -819,9 +799,7 @@ and run it: $ pytest -s test_module.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 3 items test_module.py Esetting up a test failed! test_module.py::test_setup_fails diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index 9f13875d5c6..4dd68f8e4ac 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -73,9 +73,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this: $ pytest test_smtpsimple.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_smtpsimple.py F [100%] @@ -215,9 +213,7 @@ inspect what is going on and can now run the tests: $ pytest test_module.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items test_module.py FF [100%] @@ -705,9 +701,7 @@ Running the above tests results in the following test IDs being used: $ pytest --collect-only =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 10 items @@ -752,9 +746,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 3 items test_fixture_marks.py::test_data[0] PASSED [ 33%] @@ -799,9 +791,7 @@ Here we declare an ``app`` fixture which receives the previously defined =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 2 items test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] @@ -872,9 +862,7 @@ Let's run the tests in verbose mode and with looking at the print-output: =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.6 cachedir: .pytest_cache - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collecting ... collected 8 items test_module.py::test_0[1] SETUP otherarg 1 diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index cf94b93f458..500fc3d93cc 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -25,8 +25,6 @@ Install ``pytest`` $ pytest --version This is pytest version 4.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py - setuptools registered plugins: - hypothesis-3.x.y at $PYTHON_PREFIX/lib/python3.6/site-packages/hypothesis/extra/pytestplugin.py .. _`simpletest`: @@ -49,9 +47,7 @@ That’s it. You can now execute the test function: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_sample.py F [100%] diff --git a/doc/en/index.rst b/doc/en/index.rst index 414503e1767..7c201fbd7c5 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -29,9 +29,7 @@ To execute it: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_sample.py F [100%] diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index 500c5619f2a..099b531c2db 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -57,9 +57,7 @@ them in turn: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 3 items test_expectation.py ..F [100%] @@ -110,9 +108,7 @@ Let's run this: $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 3 items test_expectation.py ..x [100%] diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index 978f4dc5c94..ae1dc714996 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -330,9 +330,7 @@ Running it with the report-on-xfail option gives this output: example $ pytest -rx xfail_demo.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/example/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR/example, inifile: - plugins: hypothesis-3.x.y collected 7 items xfail_demo.py xxxxxxx [100%] diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 7db8482805d..8c21e17e582 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -42,9 +42,7 @@ Running this would result in a passed test except for the last $ pytest test_tmp_path.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_tmp_path.py F [100%] @@ -106,9 +104,7 @@ Running this would result in a passed test except for the last $ pytest test_tmpdir.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_tmpdir.py F [100%] diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index ff4e73a96c0..34c8a35db23 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -129,9 +129,7 @@ the ``self.db`` values in the traceback: $ pytest test_unittest_db.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 2 items test_unittest_db.py FF [100%] diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 9fa3d645609..3ff6a0dd5d9 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -193,9 +193,7 @@ Example: $ pytest -ra =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 6 items test_example.py .FEsxX [100%] @@ -246,9 +244,7 @@ More than one character can be used, so for example to only see failed and skipp $ pytest -rfs =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 6 items test_example.py .FEsxX [100%] @@ -283,9 +279,7 @@ captured output: $ pytest -rpP =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 6 items test_example.py .FEsxX [100%] diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index 601459f1c2c..3e69d34800d 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -25,9 +25,7 @@ Running pytest now produces this output: $ pytest test_show_warnings.py =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: - plugins: hypothesis-3.x.y collected 1 item test_show_warnings.py . [100%] diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index d23a15e00bd..70bf315aa24 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -413,9 +413,7 @@ additionally it is possible to copy examples for an example folder before runnin $ pytest =========================== test session starts ============================ platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y - hypothesis profile 'default' -> database=DirectoryBasedExampleDatabase('$REGENDOC_TMPDIR/.hypothesis/examples') rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini - plugins: hypothesis-3.x.y collected 2 items test_example.py .. [100%]