Skip to content

Commit 049ca8b

Browse files
authored
Return exceptions & skips correctly pytest (#21603)
fixes #21579
1 parent f77a011 commit 049ca8b

File tree

7 files changed

+180
-9
lines changed

7 files changed

+180
-9
lines changed
Lines changed: 14 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,14 @@
1+
# Copyright (c) Microsoft Corporation. All rights reserved.
2+
# Licensed under the MIT License.
3+
4+
import pytest
5+
6+
7+
@pytest.fixture
8+
def raise_fixture():
9+
raise Exception("Dummy exception")
10+
11+
12+
class TestSomething:
13+
def test_a(self, raise_fixture):
14+
assert True

pythonFiles/tests/pytestadapter/.data/parametrize_tests.py

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,6 @@
1+
# Copyright (c) Microsoft Corporation. All rights reserved.
2+
# Licensed under the MIT License.
3+
14
import pytest
25

36

Lines changed: 30 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,30 @@
1+
# Copyright (c) Microsoft Corporation. All rights reserved.
2+
# Licensed under the MIT License.
3+
4+
import pytest
5+
6+
# Testing pytest with skipped tests. The first passes, the second three are skipped.
7+
8+
9+
def test_something(): # test_marker--test_something
10+
# This tests passes successfully.
11+
assert 1 + 1 == 2
12+
13+
14+
def test_another_thing(): # test_marker--test_another_thing
15+
# Skip this test with a reason.
16+
pytest.skip("Skipping this test for now")
17+
18+
19+
@pytest.mark.skip(
20+
reason="Skipping this test as it requires additional setup" # test_marker--test_complex_thing
21+
)
22+
def test_decorator_thing():
23+
# Skip this test as well, with a reason. This one uses a decorator.
24+
assert True
25+
26+
27+
@pytest.mark.skipif(1 < 5, reason="is always true") # test_marker--test_complex_thing_2
28+
def test_decorator_thing_2():
29+
# Skip this test as well, with a reason. This one uses a decorator with a condition.
30+
assert True

pythonFiles/tests/pytestadapter/expected_discovery_test_output.py

Lines changed: 8 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -489,15 +489,21 @@
489489
{
490490
"name": "[1]",
491491
"path": parameterize_tests_path,
492-
"lineno": "15",
492+
"lineno": find_test_line_number(
493+
"test_under_ten[1]",
494+
parameterize_tests_path,
495+
),
493496
"type_": "test",
494497
"id_": "parametrize_tests.py::test_under_ten[1]",
495498
"runID": "parametrize_tests.py::test_under_ten[1]",
496499
},
497500
{
498501
"name": "[2]",
499502
"path": parameterize_tests_path,
500-
"lineno": "15",
503+
"lineno": find_test_line_number(
504+
"test_under_ten[2]",
505+
parameterize_tests_path,
506+
),
501507
"type_": "test",
502508
"id_": "parametrize_tests.py::test_under_ten[2]",
503509
"runID": "parametrize_tests.py::test_under_ten[2]",

pythonFiles/tests/pytestadapter/expected_execution_test_output.py

Lines changed: 51 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -150,6 +150,57 @@
150150
},
151151
}
152152

153+
# This is the expected output for the error_raised_exception.py file.
154+
# └── error_raise_exception.py
155+
# ├── TestSomething
156+
# │ └── test_a: failure
157+
error_raised_exception_execution_expected_output = {
158+
"error_raise_exception.py::TestSomething::test_a": {
159+
"test": "error_raise_exception.py::TestSomething::test_a",
160+
"outcome": "failure",
161+
"message": "ERROR MESSAGE",
162+
"traceback": "TRACEBACK",
163+
"subtest": None,
164+
}
165+
}
166+
167+
# This is the expected output for the skip_tests.py file.
168+
# └── test_something: success
169+
# └── test_another_thing: skipped
170+
# └── test_decorator_thing: skipped
171+
# └── test_decorator_thing_2: skipped
172+
skip_tests_execution_expected_output = {
173+
"skip_tests.py::test_something": {
174+
"test": "skip_tests.py::test_something",
175+
"outcome": "success",
176+
"message": None,
177+
"traceback": None,
178+
"subtest": None,
179+
},
180+
"skip_tests.py::test_another_thing": {
181+
"test": "skip_tests.py::test_another_thing",
182+
"outcome": "skipped",
183+
"message": None,
184+
"traceback": None,
185+
"subtest": None,
186+
},
187+
"skip_tests.py::test_decorator_thing": {
188+
"test": "skip_tests.py::test_decorator_thing",
189+
"outcome": "skipped",
190+
"message": None,
191+
"traceback": None,
192+
"subtest": None,
193+
},
194+
"skip_tests.py::test_decorator_thing_2": {
195+
"test": "skip_tests.py::test_decorator_thing_2",
196+
"outcome": "skipped",
197+
"message": None,
198+
"traceback": None,
199+
"subtest": None,
200+
},
201+
}
202+
203+
153204
# This is the expected output for the dual_level_nested_folder.py tests
154205
# └── dual_level_nested_folder
155206
# └── test_top_folder.py

pythonFiles/tests/pytestadapter/test_execution.py

Lines changed: 15 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,19 @@ def test_bad_id_error_execution():
5656
@pytest.mark.parametrize(
5757
"test_ids, expected_const",
5858
[
59+
(
60+
[
61+
"skip_tests.py::test_something",
62+
"skip_tests.py::test_another_thing",
63+
"skip_tests.py::test_decorator_thing",
64+
"skip_tests.py::test_decorator_thing_2",
65+
],
66+
expected_execution_test_output.skip_tests_execution_expected_output,
67+
),
68+
(
69+
["error_raise_exception.py::TestSomething::test_a"],
70+
expected_execution_test_output.error_raised_exception_execution_expected_output,
71+
),
5972
(
6073
[
6174
"unittest_folder/test_add.py::TestAddFunction::test_add_positive_numbers",
@@ -161,4 +174,6 @@ def test_pytest_execution(test_ids, expected_const):
161174
for key in actual_result_dict:
162175
if actual_result_dict[key]["outcome"] == "failure":
163176
actual_result_dict[key]["message"] = "ERROR MESSAGE"
177+
if actual_result_dict[key]["traceback"] != None:
178+
actual_result_dict[key]["traceback"] = "TRACEBACK"
164179
assert actual_result_dict == expected_const

pythonFiles/vscode_pytest/__init__.py

Lines changed: 59 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -69,14 +69,37 @@ def pytest_exception_interact(node, call, report):
6969
"""
7070
# call.excinfo is the captured exception of the call, if it raised as type ExceptionInfo.
7171
# call.excinfo.exconly() returns the exception as a string.
72-
if call.excinfo and call.excinfo.typename != "AssertionError":
73-
ERRORS.append(
74-
call.excinfo.exconly() + "\n Check Python Test Logs for more details."
75-
)
72+
# See if it is during discovery or execution.
73+
# if discovery, then add the error to error logs.
74+
if type(report) == pytest.CollectReport:
75+
if call.excinfo and call.excinfo.typename != "AssertionError":
76+
ERRORS.append(
77+
call.excinfo.exconly() + "\n Check Python Test Logs for more details."
78+
)
79+
else:
80+
ERRORS.append(
81+
report.longreprtext + "\n Check Python Test Logs for more details."
82+
)
7683
else:
77-
ERRORS.append(
78-
report.longreprtext + "\n Check Python Test Logs for more details."
79-
)
84+
# if execution, send this data that the given node failed.
85+
report_value = "failure"
86+
node_id = str(node.nodeid)
87+
if node_id not in collected_tests_so_far:
88+
collected_tests_so_far.append(node_id)
89+
item_result = create_test_outcome(
90+
node_id,
91+
report_value,
92+
"Test failed with exception",
93+
report.longreprtext,
94+
)
95+
collected_test = testRunResultDict()
96+
collected_test[node_id] = item_result
97+
cwd = pathlib.Path.cwd()
98+
execution_post(
99+
os.fsdecode(cwd),
100+
"success",
101+
collected_test if collected_test else None,
102+
)
80103

81104

82105
def pytest_keyboard_interrupt(excinfo):
@@ -183,6 +206,35 @@ def pytest_report_teststatus(report, config):
183206
}
184207

185208

209+
def pytest_runtest_protocol(item, nextitem):
210+
if item.own_markers:
211+
for marker in item.own_markers:
212+
# If the test is marked with skip then it will not hit the pytest_report_teststatus hook,
213+
# therefore we need to handle it as skipped here.
214+
skip_condition = False
215+
if marker.name == "skipif":
216+
skip_condition = any(marker.args)
217+
if marker.name == "skip" or skip_condition:
218+
node_id = str(item.nodeid)
219+
report_value = "skipped"
220+
cwd = pathlib.Path.cwd()
221+
if node_id not in collected_tests_so_far:
222+
collected_tests_so_far.append(node_id)
223+
item_result = create_test_outcome(
224+
node_id,
225+
report_value,
226+
None,
227+
None,
228+
)
229+
collected_test = testRunResultDict()
230+
collected_test[node_id] = item_result
231+
execution_post(
232+
os.fsdecode(cwd),
233+
"success",
234+
collected_test if collected_test else None,
235+
)
236+
237+
186238
def pytest_sessionfinish(session, exitstatus):
187239
"""A pytest hook that is called after pytest has fulled finished.
188240

0 commit comments

Comments
 (0)