Skip to content

Commit 1dd8a4b

Browse files
switch testing output to test result panel (#22039)
closes #21861 and related issues --------- Co-authored-by: Courtney Webster <[email protected]>
1 parent 055a352 commit 1dd8a4b

File tree

14 files changed

+298
-83
lines changed

14 files changed

+298
-83
lines changed
Lines changed: 35 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,35 @@
1+
# Copyright (c) Microsoft Corporation. All rights reserved.
2+
# Licensed under the MIT License.
3+
import logging
4+
import sys
5+
6+
7+
def test_logging2(caplog):
8+
logger = logging.getLogger(__name__)
9+
caplog.set_level(logging.DEBUG) # Set minimum log level to capture
10+
11+
logger.debug("This is a debug message.")
12+
logger.info("This is an info message.")
13+
logger.warning("This is a warning message.")
14+
logger.error("This is an error message.")
15+
logger.critical("This is a critical message.")
16+
17+
# Printing to stdout and stderr
18+
print("This is a stdout message.")
19+
print("This is a stderr message.", file=sys.stderr)
20+
assert False
21+
22+
23+
def test_logging(caplog):
24+
logger = logging.getLogger(__name__)
25+
caplog.set_level(logging.DEBUG) # Set minimum log level to capture
26+
27+
logger.debug("This is a debug message.")
28+
logger.info("This is an info message.")
29+
logger.warning("This is a warning message.")
30+
logger.error("This is an error message.")
31+
logger.critical("This is a critical message.")
32+
33+
# Printing to stdout and stderr
34+
print("This is a stdout message.")
35+
print("This is a stderr message.", file=sys.stderr)

pythonFiles/tests/pytestadapter/expected_execution_test_output.py

Lines changed: 28 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -596,3 +596,31 @@
596596
"subtest": None,
597597
}
598598
}
599+
600+
601+
# This is the expected output for the test logging file.
602+
# └── test_logging.py
603+
# └── test_logging2: failure
604+
# └── test_logging: success
605+
test_logging_path = TEST_DATA_PATH / "test_logging.py"
606+
607+
logging_test_expected_execution_output = {
608+
get_absolute_test_id("test_logging.py::test_logging2", test_logging_path): {
609+
"test": get_absolute_test_id(
610+
"test_logging.py::test_logging2", test_logging_path
611+
),
612+
"outcome": "failure",
613+
"message": "ERROR MESSAGE",
614+
"traceback": None,
615+
"subtest": None,
616+
},
617+
get_absolute_test_id("test_logging.py::test_logging", test_logging_path): {
618+
"test": get_absolute_test_id(
619+
"test_logging.py::test_logging", test_logging_path
620+
),
621+
"outcome": "success",
622+
"message": None,
623+
"traceback": None,
624+
"subtest": None,
625+
},
626+
}

pythonFiles/tests/pytestadapter/helpers.py

Lines changed: 1 addition & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -129,6 +129,7 @@ def runner_with_cwd(
129129
"pytest",
130130
"-p",
131131
"vscode_pytest",
132+
"-s",
132133
] + args
133134
listener: socket.socket = create_server()
134135
_, port = listener.getsockname()

pythonFiles/tests/pytestadapter/test_execution.py

Lines changed: 17 additions & 10 deletions
Original file line numberDiff line numberDiff line change
@@ -215,23 +215,30 @@ def test_bad_id_error_execution():
215215
],
216216
expected_execution_test_output.doctest_pytest_expected_execution_output,
217217
),
218+
(
219+
["test_logging.py::test_logging2", "test_logging.py::test_logging"],
220+
expected_execution_test_output.logging_test_expected_execution_output,
221+
),
218222
],
219223
)
220224
def test_pytest_execution(test_ids, expected_const):
221225
"""
222226
Test that pytest discovery works as expected where run pytest is always successful
223227
but the actual test results are both successes and failures.:
224-
1. uf_execution_expected_output: unittest tests run on multiple files.
225-
2. uf_single_file_expected_output: test run on a single file.
226-
3. uf_single_method_execution_expected_output: test run on a single method in a file.
227-
4. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
228-
5. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
229-
6. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
228+
1: skip_tests_execution_expected_output: test run on a file with skipped tests.
229+
2. error_raised_exception_execution_expected_output: test run on a file that raises an exception.
230+
3. uf_execution_expected_output: unittest tests run on multiple files.
231+
4. uf_single_file_expected_output: test run on a single file.
232+
5. uf_single_method_execution_expected_output: test run on a single method in a file.
233+
6. uf_non_adjacent_tests_execution_expected_output: test run on unittests in two files with single selection in test explorer.
234+
7. unit_pytest_same_file_execution_expected_output: test run on a file with both unittest and pytest tests.
235+
8. dual_level_nested_folder_execution_expected_output: test run on a file with one test file
230236
at the top level and one test file in a nested folder.
231-
7. double_nested_folder_expected_execution_output: test run on a double nested folder.
232-
8. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
233-
9. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
234-
10. doctest_pytest_expected_execution_output: test run on doctest file.
237+
9. double_nested_folder_expected_execution_output: test run on a double nested folder.
238+
10. parametrize_tests_expected_execution_output: test run on a parametrize test with 3 inputs.
239+
11. single_parametrize_tests_expected_execution_output: test run on single parametrize test.
240+
12. doctest_pytest_expected_execution_output: test run on doctest file.
241+
13. logging_test_expected_execution_output: test run on a file with logging.
235242
236243
237244
Keyword arguments:

pythonFiles/unittestadapter/execution.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -293,8 +293,6 @@ def post_response(
293293
)
294294
# Clear the buffer as complete JSON object is received
295295
buffer = b""
296-
297-
# Process the JSON data
298296
break
299297
except json.JSONDecodeError:
300298
# JSON decoding error, the complete JSON object is not yet received

pythonFiles/vscode_pytest/run_pytest_script.py

Lines changed: 0 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -51,8 +51,6 @@
5151
)
5252
# Clear the buffer as complete JSON object is received
5353
buffer = b""
54-
55-
# Process the JSON data
5654
print("Received JSON data in run script")
5755
break
5856
except json.JSONDecodeError:

src/client/testing/testController/common/resultResolver.ts

Lines changed: 18 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -20,7 +20,7 @@ import { clearAllChildren, createErrorTestItem, getTestCaseNodes } from './testI
2020
import { sendTelemetryEvent } from '../../../telemetry';
2121
import { EventName } from '../../../telemetry/constants';
2222
import { splitLines } from '../../../common/stringUtils';
23-
import { buildErrorNodeOptions, fixLogLines, populateTestTree, splitTestNameWithRegex } from './utils';
23+
import { buildErrorNodeOptions, populateTestTree, splitTestNameWithRegex } from './utils';
2424
import { Deferred } from '../../../common/utils/async';
2525

2626
export class PythonResultResolver implements ITestResultResolver {
@@ -151,15 +151,16 @@ export class PythonResultResolver implements ITestResultResolver {
151151
const tempArr: TestItem[] = getTestCaseNodes(i);
152152
testCases.push(...tempArr);
153153
});
154+
const testItem = rawTestExecData.result[keyTemp];
154155

155-
if (rawTestExecData.result[keyTemp].outcome === 'error') {
156-
const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? '';
156+
if (testItem.outcome === 'error') {
157+
const rawTraceback = testItem.traceback ?? '';
157158
const traceback = splitLines(rawTraceback, {
158159
trim: false,
159160
removeEmptyEntries: true,
160161
}).join('\r\n');
161-
const text = `${rawTestExecData.result[keyTemp].test} failed with error: ${
162-
rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome
162+
const text = `${testItem.test} failed with error: ${
163+
testItem.message ?? testItem.outcome
163164
}\r\n${traceback}\r\n`;
164165
const message = new TestMessage(text);
165166

@@ -170,23 +171,17 @@ export class PythonResultResolver implements ITestResultResolver {
170171
if (indiItem.uri && indiItem.range) {
171172
message.location = new Location(indiItem.uri, indiItem.range);
172173
runInstance.errored(indiItem, message);
173-
runInstance.appendOutput(fixLogLines(text));
174174
}
175175
}
176176
});
177-
} else if (
178-
rawTestExecData.result[keyTemp].outcome === 'failure' ||
179-
rawTestExecData.result[keyTemp].outcome === 'passed-unexpected'
180-
) {
181-
const rawTraceback = rawTestExecData.result[keyTemp].traceback ?? '';
177+
} else if (testItem.outcome === 'failure' || testItem.outcome === 'passed-unexpected') {
178+
const rawTraceback = testItem.traceback ?? '';
182179
const traceback = splitLines(rawTraceback, {
183180
trim: false,
184181
removeEmptyEntries: true,
185182
}).join('\r\n');
186183

187-
const text = `${rawTestExecData.result[keyTemp].test} failed: ${
188-
rawTestExecData.result[keyTemp].message ?? rawTestExecData.result[keyTemp].outcome
189-
}\r\n${traceback}\r\n`;
184+
const text = `${testItem.test} failed: ${testItem.message ?? testItem.outcome}\r\n${traceback}\r\n`;
190185
const message = new TestMessage(text);
191186

192187
// note that keyTemp is a runId for unittest library...
@@ -197,14 +192,10 @@ export class PythonResultResolver implements ITestResultResolver {
197192
if (indiItem.uri && indiItem.range) {
198193
message.location = new Location(indiItem.uri, indiItem.range);
199194
runInstance.failed(indiItem, message);
200-
runInstance.appendOutput(fixLogLines(text));
201195
}
202196
}
203197
});
204-
} else if (
205-
rawTestExecData.result[keyTemp].outcome === 'success' ||
206-
rawTestExecData.result[keyTemp].outcome === 'expected-failure'
207-
) {
198+
} else if (testItem.outcome === 'success' || testItem.outcome === 'expected-failure') {
208199
const grabTestItem = this.runIdToTestItem.get(keyTemp);
209200
const grabVSid = this.runIdToVSid.get(keyTemp);
210201
if (grabTestItem !== undefined) {
@@ -216,7 +207,7 @@ export class PythonResultResolver implements ITestResultResolver {
216207
}
217208
});
218209
}
219-
} else if (rawTestExecData.result[keyTemp].outcome === 'skipped') {
210+
} else if (testItem.outcome === 'skipped') {
220211
const grabTestItem = this.runIdToTestItem.get(keyTemp);
221212
const grabVSid = this.runIdToVSid.get(keyTemp);
222213
if (grabTestItem !== undefined) {
@@ -228,11 +219,11 @@ export class PythonResultResolver implements ITestResultResolver {
228219
}
229220
});
230221
}
231-
} else if (rawTestExecData.result[keyTemp].outcome === 'subtest-failure') {
222+
} else if (testItem.outcome === 'subtest-failure') {
232223
// split on [] or () based on how the subtest is setup.
233224
const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp);
234225
const parentTestItem = this.runIdToTestItem.get(parentTestCaseId);
235-
const data = rawTestExecData.result[keyTemp];
226+
const data = testItem;
236227
// find the subtest's parent test item
237228
if (parentTestItem) {
238229
const subtestStats = this.subTestStats.get(parentTestCaseId);
@@ -243,20 +234,19 @@ export class PythonResultResolver implements ITestResultResolver {
243234
failed: 1,
244235
passed: 0,
245236
});
246-
runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`));
247237
// clear since subtest items don't persist between runs
248238
clearAllChildren(parentTestItem);
249239
}
250240
const subTestItem = this.testController?.createTestItem(subtestId, subtestId);
251-
runInstance.appendOutput(fixLogLines(`${subtestId} Failed\r\n`));
252241
// create a new test item for the subtest
253242
if (subTestItem) {
254243
const traceback = data.traceback ?? '';
255-
const text = `${data.subtest} Failed: ${data.message ?? data.outcome}\r\n${traceback}\r\n`;
256-
runInstance.appendOutput(fixLogLines(text));
244+
const text = `${data.subtest} failed: ${
245+
testItem.message ?? testItem.outcome
246+
}\r\n${traceback}\r\n`;
257247
parentTestItem.children.add(subTestItem);
258248
runInstance.started(subTestItem);
259-
const message = new TestMessage(rawTestExecData?.result[keyTemp].message ?? '');
249+
const message = new TestMessage(text);
260250
if (parentTestItem.uri && parentTestItem.range) {
261251
message.location = new Location(parentTestItem.uri, parentTestItem.range);
262252
}
@@ -267,7 +257,7 @@ export class PythonResultResolver implements ITestResultResolver {
267257
} else {
268258
throw new Error('Parent test item not found');
269259
}
270-
} else if (rawTestExecData.result[keyTemp].outcome === 'subtest-success') {
260+
} else if (testItem.outcome === 'subtest-success') {
271261
// split on [] or () based on how the subtest is setup.
272262
const [parentTestCaseId, subtestId] = splitTestNameWithRegex(keyTemp);
273263
const parentTestItem = this.runIdToTestItem.get(parentTestCaseId);
@@ -279,7 +269,6 @@ export class PythonResultResolver implements ITestResultResolver {
279269
subtestStats.passed += 1;
280270
} else {
281271
this.subTestStats.set(parentTestCaseId, { failed: 0, passed: 1 });
282-
runInstance.appendOutput(fixLogLines(`${parentTestCaseId} [subtests]:\r\n`));
283272
// clear since subtest items don't persist between runs
284273
clearAllChildren(parentTestItem);
285274
}
@@ -289,7 +278,6 @@ export class PythonResultResolver implements ITestResultResolver {
289278
parentTestItem.children.add(subTestItem);
290279
runInstance.started(subTestItem);
291280
runInstance.passed(subTestItem);
292-
runInstance.appendOutput(fixLogLines(`${subtestId} Passed\r\n`));
293281
} else {
294282
throw new Error('Unable to create new child node for subtest');
295283
}

0 commit comments

Comments
 (0)