diff --git a/Editor/Services/ITestRunnerService.cs b/Editor/Services/ITestRunnerService.cs
index b5666099..bc26da68 100644
--- a/Editor/Services/ITestRunnerService.cs
+++ b/Editor/Services/ITestRunnerService.cs
@@ -22,8 +22,9 @@ public interface ITestRunnerService
///
/// The test mode to run (EditMode or PlayMode).
/// If true, only failed test results are included in the output.
+ /// If true, all logs are included in the output.
/// A filter string to select specific tests to run.
/// Task that resolves with test results when tests are complete
- Task ExecuteTestsAsync(TestMode testMode, bool returnOnlyFailures, string testFilter);
+ Task ExecuteTestsAsync(TestMode testMode, bool returnOnlyFailures, bool returnWithLogs, string testFilter);
}
-}
+}
\ No newline at end of file
diff --git a/Editor/Services/TestRunnerService.cs b/Editor/Services/TestRunnerService.cs
index a91b9866..9f4b8df5 100644
--- a/Editor/Services/TestRunnerService.cs
+++ b/Editor/Services/TestRunnerService.cs
@@ -21,6 +21,7 @@ public class TestRunnerService : ITestRunnerService, ICallbacks
private readonly TestRunnerApi _testRunnerApi;
private TaskCompletionSource _tcs;
private bool _returnOnlyFailures;
+ private bool _returnWithLogs;
private List _results;
///
@@ -77,13 +78,15 @@ public async Task> GetAllTestsAsync(string testModeFilter = "
///
/// The test mode to run (EditMode or PlayMode).
/// If true, only failed test results are included in the output.
+ /// If true, all logs are included in the output.
/// A filter string to select specific tests to run.
/// Task that resolves with test results when tests are complete
- public async Task ExecuteTestsAsync(TestMode testMode, bool returnOnlyFailures, string testFilter = "")
+ public async Task ExecuteTestsAsync(TestMode testMode, bool returnOnlyFailures, bool returnWithLogs, string testFilter = "")
{
_tcs = new TaskCompletionSource();
_results = new List();
_returnOnlyFailures = returnOnlyFailures;
+ _returnWithLogs = returnWithLogs;
var filter = new Filter { testMode = testMode };
if (!string.IsNullOrEmpty(testFilter))
@@ -191,30 +194,30 @@ private async Task WaitForCompletionAsync(int timeoutSeconds)
private JObject BuildResultJson(List results, ITestResultAdaptor result)
{
- int pass = results.Count(r => r.ResultState == "Passed");
- int fail = results.Count(r => r.ResultState == "Failed");
- int skip = results.Count(r => r.ResultState == "Skipped");
-
var arr = new JArray(results
+ .Where(r => !r.HasChildren)
.Where(r => !_returnOnlyFailures || r.ResultState == "Failed")
.Select(r => new JObject {
["name"] = r.Name,
["fullName"] = r.FullName,
["state"] = r.ResultState,
["message"] = r.Message,
- ["duration"] = r.Duration
+ ["duration"] = r.Duration,
+ ["logs"] = _returnWithLogs ? r.Output : null,
+ ["stackTrace"] = r.StackTrace
}));
+ int testCount = result.PassCount + result.SkipCount + result.FailCount;
return new JObject {
["success"] = true,
["type"] = "text",
- ["message"] = $"{result.Test.Name} test run completed: {pass}/{results.Count} passed - {fail}/{results.Count} failed - {skip}/{results.Count} skipped",
+ ["message"] = $"{result.Test.Name} test run completed: {result.PassCount}/{testCount} passed - {result.FailCount}/{testCount} failed - {result.SkipCount}/{testCount} skipped",
["resultState"] = result.ResultState,
["durationSeconds"] = result.Duration,
- ["testCount"] = results.Count,
- ["passCount"] = pass,
- ["failCount"] = fail,
- ["skipCount"] = skip,
+ ["testCount"] = testCount,
+ ["passCount"] = result.PassCount,
+ ["failCount"] = result.FailCount,
+ ["skipCount"] = result.SkipCount,
["results"] = arr
};
}
diff --git a/Editor/Tools/RunTestsTool.cs b/Editor/Tools/RunTestsTool.cs
index 857eb31a..d8997194 100644
--- a/Editor/Tools/RunTestsTool.cs
+++ b/Editor/Tools/RunTestsTool.cs
@@ -37,6 +37,7 @@ public override async void ExecuteAsync(JObject parameters, TaskCompletionSource
string testModeStr = parameters?["testMode"]?.ToObject() ?? "EditMode";
string testFilter = parameters?["testFilter"]?.ToObject(); // Optional
bool returnOnlyFailures = parameters?["returnOnlyFailures"]?.ToObject() ?? false; // Optional
+ bool returnWithLogs = parameters?["returnWithLogs"]?.ToObject() ?? false; // Optional
TestMode testMode = TestMode.EditMode;
@@ -48,7 +49,7 @@ public override async void ExecuteAsync(JObject parameters, TaskCompletionSource
McpLogger.LogInfo($"Executing RunTestsTool: Mode={testMode}, Filter={testFilter ?? "(none)"}");
// Call the service to run tests
- JObject result = await _testRunnerService.ExecuteTestsAsync(testMode, returnOnlyFailures, testFilter);
+ JObject result = await _testRunnerService.ExecuteTestsAsync(testMode, returnOnlyFailures, returnWithLogs, testFilter);
tcs.SetResult(result);
}
}
diff --git a/Server~/build/tools/runTestsTool.js b/Server~/build/tools/runTestsTool.js
index 78177ee0..e9a6c428 100644
--- a/Server~/build/tools/runTestsTool.js
+++ b/Server~/build/tools/runTestsTool.js
@@ -5,8 +5,9 @@ const toolName = 'run_tests';
const toolDescription = 'Runs Unity\'s Test Runner tests';
const paramsSchema = z.object({
testMode: z.string().optional().default('EditMode').describe('The test mode to run (EditMode or PlayMode) - defaults to EditMode (optional)'),
- testFilter: z.string().optional().default('').describe('The specific test filter to run (e.g. specific test name or namespace) (optional)'),
- returnOnlyFailures: z.boolean().optional().default(true).describe('Whether to show only failed tests in the results (optional)')
+ testFilter: z.string().optional().default('').describe('The specific test filter to run (e.g. specific test name or class name, must include namespace) (optional)'),
+ returnOnlyFailures: z.boolean().optional().default(true).describe('Whether to show only failed tests in the results (optional)'),
+ returnWithLogs: z.boolean().optional().default(false).describe('Whether to return the test logs in the results (optional)')
});
/**
* Creates and registers the Run Tests tool with the MCP server
@@ -41,14 +42,15 @@ export function registerRunTestsTool(server, mcpUnity, logger) {
* @throws McpUnityError if the request to Unity fails
*/
async function toolHandler(mcpUnity, params = {}) {
- const { testMode = 'EditMode', testFilter = '', returnOnlyFailures = true } = params;
+ const { testMode = 'EditMode', testFilter = '', returnOnlyFailures = true, returnWithLogs = false } = params;
// Create and wait for the test run
const response = await mcpUnity.sendRequest({
method: toolName,
params: {
testMode,
testFilter,
- returnOnlyFailures
+ returnOnlyFailures,
+ returnWithLogs
}
});
// Process the test results
diff --git a/Server~/src/tools/runTestsTool.ts b/Server~/src/tools/runTestsTool.ts
index ea8c3ee2..f5ad715f 100644
--- a/Server~/src/tools/runTestsTool.ts
+++ b/Server~/src/tools/runTestsTool.ts
@@ -10,8 +10,9 @@ const toolName = 'run_tests';
const toolDescription = 'Runs Unity\'s Test Runner tests';
const paramsSchema = z.object({
testMode: z.string().optional().default('EditMode').describe('The test mode to run (EditMode or PlayMode) - defaults to EditMode (optional)'),
- testFilter: z.string().optional().default('').describe('The specific test filter to run (e.g. specific test name or namespace) (optional)'),
- returnOnlyFailures: z.boolean().optional().default(true).describe('Whether to show only failed tests in the results (optional)')
+ testFilter: z.string().optional().default('').describe('The specific test filter to run (e.g. specific test name or class name, must include namespace) (optional)'),
+ returnOnlyFailures: z.boolean().optional().default(true).describe('Whether to show only failed tests in the results (optional)'),
+ returnWithLogs: z.boolean().optional().default(false).describe('Whether to return the test logs in the results (optional)')
});
/**
@@ -56,7 +57,8 @@ async function toolHandler(mcpUnity: McpUnity, params: any = {}): Promise