Skip to content

Commit ea76858

Browse files
authored
Handle error tests as a different test icon (#21630)
fixes #21625 adds a few things: - returns error on any caught errors in pytest that aren't assertion errors - add error node type handling to result resolver - update tests for both files
1 parent e07aa5e commit ea76858

File tree

5 files changed

+71
-5
lines changed

5 files changed

+71
-5
lines changed

pythonFiles/tests/pytestadapter/expected_execution_test_output.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -157,7 +157,7 @@
157157
error_raised_exception_execution_expected_output = {
158158
"error_raise_exception.py::TestSomething::test_a": {
159159
"test": "error_raise_exception.py::TestSomething::test_a",
160-
"outcome": "failure",
160+
"outcome": "error",
161161
"message": "ERROR MESSAGE",
162162
"traceback": "TRACEBACK",
163163
"subtest": None,

pythonFiles/tests/pytestadapter/test_execution.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -174,7 +174,10 @@ def test_pytest_execution(test_ids, expected_const):
174174
assert a["cwd"] == os.fspath(TEST_DATA_PATH)
175175
actual_result_dict.update(a["result"])
176176
for key in actual_result_dict:
177-
if actual_result_dict[key]["outcome"] == "failure":
177+
if (
178+
actual_result_dict[key]["outcome"] == "failure"
179+
or actual_result_dict[key]["outcome"] == "error"
180+
):
178181
actual_result_dict[key]["message"] = "ERROR MESSAGE"
179182
if actual_result_dict[key]["traceback"] != None:
180183
actual_result_dict[key]["traceback"] = "TRACEBACK"

pythonFiles/vscode_pytest/__init__.py

Lines changed: 4 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -82,7 +82,9 @@ def pytest_exception_interact(node, call, report):
8282
)
8383
else:
8484
# if execution, send this data that the given node failed.
85-
report_value = "failure"
85+
report_value = "error"
86+
if call.excinfo.typename == "AssertionError":
87+
report_value = "failure"
8688
node_id = str(node.nodeid)
8789
if node_id not in collected_tests_so_far:
8890
collected_tests_so_far.append(node_id)
@@ -119,7 +121,7 @@ class TestOutcome(Dict):
119121
"""
120122

121123
test: str
122-
outcome: Literal["success", "failure", "skipped"]
124+
outcome: Literal["success", "failure", "skipped", "error"]
123125
message: Union[str, None]
124126
traceback: Union[str, None]
125127
subtest: Optional[str]

src/client/testing/testController/common/resultResolver.ts

Lines changed: 23 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -102,7 +102,29 @@ export class PythonResultResolver implements ITestResultResolver {
102102
testCases.push(...tempArr);
103103
});
104104

105-
if (
105+
if (rawTestExecData.result[keyTemp].outcome === 'error') {
106+
const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? '';
107+
const traceback = splitLines(rawTraceback, {
108+
trim: false,
109+
removeEmptyEntries: true,
110+
}).join('\r\n');
111+
const text = `${rawTestExecData.result[keyTemp].test} failed with error: ${
112+
rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome
113+
}\r\n${traceback}\r\n`;
114+
const message = new TestMessage(text);
115+
116+
const grabVSid = this.runIdToVSid.get(keyTemp);
117+
// search through freshly built array of testItem to find the failed test and update UI.
118+
testCases.forEach((indiItem) => {
119+
if (indiItem.id === grabVSid) {
120+
if (indiItem.uri && indiItem.range) {
121+
message.location = new Location(indiItem.uri, indiItem.range);
122+
runInstance.errored(indiItem, message);
123+
runInstance.appendOutput(fixLogLines(text));
124+
}
125+
}
126+
});
127+
} else if (
106128
rawTestExecData.result[keyTemp].outcome === 'failure' ||
107129
rawTestExecData.result[keyTemp].outcome === 'passed-unexpected'
108130
) {

src/test/testing/testController/resultResolver.unit.test.ts

Lines changed: 39 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -390,6 +390,45 @@ suite('Result Resolver tests', () => {
390390
// verify that the passed function was called for the single test item
391391
runInstance.verify((r) => r.skipped(typemoq.It.isAny()), typemoq.Times.once());
392392
});
393+
test('resolveExecution handles error correctly as test outcome', async () => {
394+
// test specific constants used expected values
395+
testProvider = 'pytest';
396+
workspaceUri = Uri.file('/foo/bar');
397+
resultResolver = new ResultResolver.PythonResultResolver(
398+
testControllerMock.object,
399+
testProvider,
400+
workspaceUri,
401+
);
402+
// add a mock test item to the map of known VSCode ids to run ids
403+
resultResolver.runIdToVSid.set('mockTestItem1', 'mockTestItem1');
404+
resultResolver.runIdToVSid.set('mockTestItem2', 'mockTestItem2');
405+
406+
// add this mock test to the map of known test items
407+
resultResolver.runIdToTestItem.set('mockTestItem1', mockTestItem1);
408+
resultResolver.runIdToTestItem.set('mockTestItem2', mockTestItem2);
409+
410+
// create a successful payload with a single test called mockTestItem1
411+
const successPayload: ExecutionTestPayload = {
412+
cwd: workspaceUri.fsPath,
413+
status: 'success',
414+
result: {
415+
mockTestItem1: {
416+
test: 'test',
417+
outcome: 'error', // failure, passed-unexpected, skipped, success, expected-failure, subtest-failure, subtest-succcess
418+
message: 'message',
419+
traceback: 'traceback',
420+
subtest: 'subtest',
421+
},
422+
},
423+
error: '',
424+
};
425+
426+
// call resolveExecution
427+
resultResolver.resolveExecution(successPayload, runInstance.object);
428+
429+
// verify that the passed function was called for the single test item
430+
runInstance.verify((r) => r.errored(typemoq.It.isAny(), typemoq.It.isAny()), typemoq.Times.once());
431+
});
393432
test('resolveExecution handles success correctly', async () => {
394433
// test specific constants used expected values
395434
testProvider = 'pytest';

0 commit comments

Comments
 (0)