Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
24 changes: 22 additions & 2 deletions MCPForUnity/Editor/Services/TestRunnerService.cs
Original file line number Diff line number Diff line change
Expand Up @@ -356,13 +356,33 @@ internal TestRunResult(TestRunSummary summary, IReadOnlyList<TestRunTestResult>
public int Failed => Summary.Failed;
public int Skipped => Summary.Skipped;

public object ToSerializable(string mode)
public object ToSerializable(string mode, bool includeDetails = false, bool includeFailedTests = false)
{
// Determine which results to include
IEnumerable<object> resultsToSerialize;
if (includeDetails)
{
// Include all test results
resultsToSerialize = Results.Select(r => r.ToSerializable());
}
else if (includeFailedTests)
{
// Include only failed and skipped tests
resultsToSerialize = Results
.Where(r => !string.Equals(r.State, "Passed", StringComparison.OrdinalIgnoreCase))
.Select(r => r.ToSerializable());
}
else
{
// No individual test results
resultsToSerialize = null;
}

return new
{
mode,
summary = Summary.ToSerializable(),
results = Results.Select(r => r.ToSerializable()).ToList(),
results = resultsToSerialize?.ToList(),
};
}

Expand Down
40 changes: 37 additions & 3 deletions MCPForUnity/Editor/Tools/RunTests.cs
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,27 @@ public static async Task<object> HandleCommand(JObject @params)
// Preserve default timeout if parsing fails
}

bool includeDetails = false;
bool includeFailedTests = false;
try
{
var includeDetailsToken = @params?["includeDetails"];
if (includeDetailsToken != null && bool.TryParse(includeDetailsToken.ToString(), out var parsedIncludeDetails))
{
includeDetails = parsedIncludeDetails;
}

var includeFailedTestsToken = @params?["includeFailedTests"];
if (includeFailedTestsToken != null && bool.TryParse(includeFailedTestsToken.ToString(), out var parsedIncludeFailedTests))
{
includeFailedTests = parsedIncludeFailedTests;
}
}
catch
{
// Preserve defaults if parsing fails
}

var filterOptions = ParseFilterOptions(@params);

var testService = MCPServiceLocator.Tests;
Expand All @@ -66,10 +87,9 @@ public static async Task<object> HandleCommand(JObject @params)

var result = await runTask.ConfigureAwait(true);

string message =
$"{parsedMode.Value} tests completed: {result.Passed}/{result.Total} passed, {result.Failed} failed, {result.Skipped} skipped";
string message = FormatTestResultMessage(parsedMode.Value.ToString(), result);

var data = result.ToSerializable(parsedMode.Value.ToString());
var data = result.ToSerializable(parsedMode.Value.ToString(), includeDetails, includeFailedTests);
return new SuccessResponse(message, data);
}

Expand Down Expand Up @@ -100,6 +120,20 @@ private static TestFilterOptions ParseFilterOptions(JObject @params)
};
}

internal static string FormatTestResultMessage(string mode, TestRunResult result)
{
string message =
$"{mode} tests completed: {result.Passed}/{result.Total} passed, {result.Failed} failed, {result.Skipped} skipped";

// Add warning when no tests matched the filter criteria
if (result.Total == 0)
{
message += " (No tests matched the specified filters)";
}

return message;
}

private static string[] ParseStringArray(JObject @params, string key)
{
var token = @params[key];
Expand Down
10 changes: 9 additions & 1 deletion Server/src/services/tools/run_tests.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,7 @@ class RunTestsTestResult(BaseModel):
class RunTestsResult(BaseModel):
mode: str
summary: RunTestsSummary
results: list[RunTestsTestResult]
results: list[RunTestsTestResult] | None = None


class RunTestsResponse(MCPResponse):
Expand All @@ -52,6 +52,8 @@ async def run_tests(
group_names: Annotated[list[str] | str, "Same as test_names, except it allows for Regex"] | None = None,
category_names: Annotated[list[str] | str, "NUnit category names to filter by (tests marked with [Category] attribute)"] | None = None,
assembly_names: Annotated[list[str] | str, "Assembly names to filter tests by"] | None = None,
include_failed_tests: Annotated[bool, "Include details for failed/skipped tests only (default: false)"] = False,
include_details: Annotated[bool, "Include details for all tests (default: false)"] = False,
) -> RunTestsResponse:
unity_instance = get_unity_instance_from_context(ctx)

Comment on lines +55 to 59
Copy link
Contributor

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

suggestion (testing): Add server-side tests to verify wiring of include_failed_tests / include_details into the Unity command params.

The Python run_tests entrypoint now exposes these flags and forwards them as includeFailedTests / includeDetails params. Please add tests (e.g., using a mocked send_with_unity_instance) that:

  • Call run_tests with default args and assert neither key appears in params.
  • Call run_tests(include_failed_tests=True) and assert only includeFailedTests is present and True.
  • Call run_tests(include_details=True) and assert only includeDetails is present and True.
  • Optionally cover both flags True together to document combined behavior.

This will lock in the client/server contract for test-result verbosity as the feature evolves.

Suggested implementation:

import asyncio
from unittest.mock import AsyncMock

import pytest

from Server.src.services.tools.run_tests import run_tests


@pytest.mark.asyncio
async def test_run_tests_does_not_include_verbosity_flags_by_default(monkeypatch):
    captured_params = {}

    async def fake_send_with_unity_instance(unity_instance, command, params):
        nonlocal captured_params
        captured_params = params
        # Simulate a minimal successful response structure
        return {
            "mode": "edit",
            "summary": {
                "total": 0,
                "passed": 0,
                "failed": 0,
                "inconclusive": 0,
                "skipped": 0,
                "duration": 0.0,
            },
            "results": [],
        }

    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.send_with_unity_instance",
        fake_send_with_unity_instance,
    )
    # Also stub out Unity instance resolution to avoid hitting real Unity
    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.get_unity_instance_from_context",
        lambda ctx: object(),
    )

    # Act
    await run_tests(ctx=None)

    # Assert
    assert "includeFailedTests" not in captured_params
    assert "includeDetails" not in captured_params


@pytest.mark.asyncio
async def test_run_tests_includes_failed_tests_flag(monkeypatch):
    captured_params = {}

    async def fake_send_with_unity_instance(unity_instance, command, params):
        nonlocal captured_params
        captured_params = params
        return {
            "mode": "edit",
            "summary": {
                "total": 0,
                "passed": 0,
                "failed": 0,
                "inconclusive": 0,
                "skipped": 0,
                "duration": 0.0,
            },
            "results": [],
        }

    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.send_with_unity_instance",
        fake_send_with_unity_instance,
    )
    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.get_unity_instance_from_context",
        lambda ctx: object(),
    )

    # Act
    await run_tests(ctx=None, include_failed_tests=True)

    # Assert
    assert captured_params.get("includeFailedTests") is True
    # Only includeFailedTests should be present
    assert "includeDetails" not in captured_params


@pytest.mark.asyncio
async def test_run_tests_includes_details_flag(monkeypatch):
    captured_params = {}

    async def fake_send_with_unity_instance(unity_instance, command, params):
        nonlocal captured_params
        captured_params = params
        return {
            "mode": "edit",
            "summary": {
                "total": 0,
                "passed": 0,
                "failed": 0,
                "inconclusive": 0,
                "skipped": 0,
                "duration": 0.0,
            },
            "results": [],
        }

    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.send_with_unity_instance",
        fake_send_with_unity_instance,
    )
    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.get_unity_instance_from_context",
        lambda ctx: object(),
    )

    # Act
    await run_tests(ctx=None, include_details=True)

    # Assert
    assert captured_params.get("includeDetails") is True
    # Only includeDetails should be present
    assert "includeFailedTests" not in captured_params


@pytest.mark.asyncio
async def test_run_tests_includes_both_flags_when_true(monkeypatch):
    captured_params = {}

    async def fake_send_with_unity_instance(unity_instance, command, params):
        nonlocal captured_params
        captured_params = params
        return {
            "mode": "edit",
            "summary": {
                "total": 0,
                "passed": 0,
                "failed": 0,
                "inconclusive": 0,
                "skipped": 0,
                "duration": 0.0,
            },
            "results": [],
        }

    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.send_with_unity_instance",
        fake_send_with_unity_instance,
    )
    monkeypatch.setattr(
        "Server.src.services.tools.run_tests.get_unity_instance_from_context",
        lambda ctx: object(),
    )

    # Act
    await run_tests(ctx=None, include_failed_tests=True, include_details=True)

    # Assert
    assert captured_params.get("includeFailedTests") is True
    assert captured_params.get("includeDetails") is True

I assumed a pytest-based test suite and a module path Server/tests/services/tools/test_run_tests.py. If your tests live elsewhere (e.g., tests/server/services/tools/test_run_tests.py or similar), adjust the file_path and the import/monkeypatch targets accordingly so that:

  • from Server.src.services.tools.run_tests import run_tests matches your actual import path.
  • The monkeypatch.setattr targets for send_with_unity_instance and get_unity_instance_from_context use the correct fully-qualified module name.

If your async test runner differs (e.g., pytest.mark.anyio or a custom helper), swap @pytest.mark.asyncio for the appropriate decorator.

Expand Down Expand Up @@ -88,6 +90,12 @@ def _coerce_string_list(value) -> list[str] | None:
if assembly_names_list:
params["assemblyNames"] = assembly_names_list

# Add verbosity parameters
if include_failed_tests:
params["includeFailedTests"] = True
if include_details:
params["includeDetails"] = True

response = await send_with_unity_instance(async_send_command_with_retry, unity_instance, "run_tests", params)
await ctx.info(f'Response {response}')
return RunTestsResponse(**response) if isinstance(response, dict) else response
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@
using NUnit.Framework;
using UnityEngine;

namespace MCPForUnityTests.EditMode.Helpers
namespace MCPForUnityTests.Editor.Helpers
{
/// <summary>
/// Tests for Matrix4x4Converter to ensure it safely serializes matrices
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -7,11 +7,13 @@
using MCPForUnity.Editor.Tools;
using Newtonsoft.Json.Linq;

namespace Tests.EditMode.Tools
namespace MCPForUnityTests.Editor.Tools
{
/// <summary>
/// Tests for domain reload resilience - ensuring MCP requests succeed even during Unity domain reloads.
/// </summary>
[Category("domain_reload")]
[Explicit("Intentionally triggers script compilation/domain reload; run explicitly to avoid slowing/flaking cold-start EditMode runs.")]
public class DomainReloadResilienceTests
{
private const string TempDir = "Assets/Temp/DomainReloadTests";
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@
using System.IO;
using System.Text.RegularExpressions;

namespace Tests.EditMode
namespace MCPForUnityTests.Editor.Tools
{
/// <summary>
/// Tests specifically for MCP tool parameter handling issues.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
using System;
using System.Collections;
using Newtonsoft.Json.Linq;
using NUnit.Framework;
using UnityEditor;
using UnityEngine;
using System.Threading;
using UnityEngine.TestTools;
using MCPForUnity.Editor.Helpers;
using MCPForUnity.Editor.Tools;
using MCPForUnityTests.Editor.Tools.Fixtures;
Expand All @@ -14,16 +15,17 @@ public class ManageScriptableObjectTests
{
private const string TempRoot = "Assets/Temp/ManageScriptableObjectTests";
private const string NestedFolder = TempRoot + "/Nested/Deeper";
private const double UnityReadyTimeoutSeconds = 180.0;

private string _createdAssetPath;
private string _createdGuid;
private string _matAPath;
private string _matBPath;

[SetUp]
public void SetUp()
[UnitySetUp]
public IEnumerator SetUp()
{
WaitForUnityReady();
yield return WaitForUnityReady(UnityReadyTimeoutSeconds);
EnsureFolder("Assets/Temp");
// Start from a clean slate every time (prevents intermittent setup failures).
if (AssetDatabase.IsValidFolder(TempRoot))
Expand All @@ -47,7 +49,7 @@ public void SetUp()
AssetDatabase.CreateAsset(new Material(shader), _matBPath);
AssetDatabase.SaveAssets();
AssetDatabase.Refresh();
WaitForUnityReady();
yield return WaitForUnityReady(UnityReadyTimeoutSeconds);
}

[TearDown]
Expand Down Expand Up @@ -308,7 +310,7 @@ private static JObject ToJObject(object result)
return result as JObject ?? JObject.FromObject(result);
}

private static void WaitForUnityReady(double timeoutSeconds = 30.0)
private static IEnumerator WaitForUnityReady(double timeoutSeconds = 30.0)
{
// Some EditMode tests trigger script compilation/domain reload. Tools like ManageScriptableObject
// intentionally return "compiling_or_reloading" during these windows. Wait until Unity is stable
Expand All @@ -320,7 +322,7 @@ private static void WaitForUnityReady(double timeoutSeconds = 30.0)
{
Assert.Fail($"Timed out waiting for Unity to finish compiling/updating (>{timeoutSeconds:0.0}s).");
}
Thread.Sleep(50);
yield return null; // yield to the editor loop so importing/compiling can actually progress
}
}
}
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,66 @@
using System;
using Newtonsoft.Json.Linq;
using NUnit.Framework;
using MCPForUnity.Editor.Services;

namespace MCPForUnityTests.Editor.Tools
{
/// <summary>
/// Tests for RunTests tool functionality.
/// Note: We cannot easily test the full HandleCommand because it would create
/// recursive test runner calls. Instead, we test the message formatting logic.
/// </summary>
public class RunTestsTests
{
[Test]
public void FormatResultMessage_WithNoTests_IncludesWarning()
{
// Arrange
var summary = new TestRunSummary(
total: 0,
passed: 0,
failed: 0,
skipped: 0,
durationSeconds: 0.0,
resultState: "Passed"
);
var result = new TestRunResult(summary, new TestRunTestResult[0]);

// Act
string message = MCPForUnity.Editor.Tools.RunTests.FormatTestResultMessage("EditMode", result);

// Assert - THIS IS THE NEW FEATURE
Assert.IsTrue(
message.Contains("No tests matched"),
$"Expected warning when total=0, but got: '{message}'"
);
}

[Test]
public void FormatResultMessage_WithTests_NoWarning()
{
// Arrange
var summary = new TestRunSummary(
total: 5,
passed: 4,
failed: 1,
skipped: 0,
durationSeconds: 1.5,
resultState: "Failed"
);
var result = new TestRunResult(summary, new TestRunTestResult[0]);

// Act
string message = MCPForUnity.Editor.Tools.RunTests.FormatTestResultMessage("EditMode", result);

// Assert
Assert.IsFalse(
message.Contains("No tests matched"),
$"Should not have warning when tests exist, but got: '{message}'"
);
Assert.IsTrue(message.Contains("4/5 passed"), "Should contain pass ratio");
}

// Use MCPForUnity.Editor.Tools.RunTests.FormatTestResultMessage directly.
}
}

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.