From 0d091106853e10c38f24bfc30a6e69a73876bd9a Mon Sep 17 00:00:00 2001 From: James Devine Date: Fri, 17 Apr 2026 23:03:20 +0100 Subject: [PATCH 01/20] feat: generate mcp-config.json from MCPG runtime output instead of compile time Replaces the compile-time generate_mcp_client_config() approach with runtime conversion of MCPG's actual gateway output, matching gh-aw's convert_gateway_config_copilot.cjs pattern. Changes: - Remove generate_mcp_client_config() and {{ mcp_client_config }} template marker - MCPG stdout is now captured to gateway-output.json - After health check, poll until gateway output contains valid JSON - Use jq to transform URLs (127.0.0.1 -> host.docker.internal) and add tools: [*] - Apply same changes to both standalone and 1ES templates - Update AGENTS.md documentation This ensures the Copilot CLI config reflects MCPG's actual runtime state rather than a compile-time prediction, fixing agents not seeing configured MCPs. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- AGENTS.md | 47 ++++------------ src/compile/common.rs | 114 -------------------------------------- src/compile/onees.rs | 5 +- src/compile/standalone.rs | 3 - src/data/1es-base.yml | 75 +++++++++++++++++-------- src/data/base.yml | 75 +++++++++++++++++-------- 6 files changed, 115 insertions(+), 204 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 17f2842..0fd3023 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -795,41 +795,18 @@ If no passthrough env vars are needed, this marker is replaced with an empty str ## {{ mcp_client_config }} -Should be replaced with the Copilot CLI `mcp-config.json` content, generated at compile time from the MCPG server configuration. This follows gh-aw's pattern where `convert_gateway_config_copilot.cjs` produces per-server routed URLs. - -MCPG runs in routed mode by default, exposing each backend at `/mcp/{serverID}`. The generated JSON lists one entry per MCPG-managed server with: -- `type: "http"` — Copilot CLI HTTP transport -- `url` — routed endpoint (`http://host.docker.internal:{port}/mcp/{name}`) -- `headers` — Bearer auth with the gateway API key (ADO variable `$(MCP_GATEWAY_API_KEY)`) -- `tools: ["*"]` — allow all tools (Copilot CLI requirement) - -**Variable expansion note:** The `$(MCP_GATEWAY_API_KEY)` token in the generated JSON uses ADO macro syntax. Although the JSON is written to disk via a quoted bash heredoc (`<< 'EOF'`), which prevents bash `$(...)` command substitution, ADO macro expansion processes the entire script body *before* bash executes it. The real API key value is therefore substituted by ADO at pipeline runtime, and the file on disk contains the actual secret — Copilot CLI does not need to perform any variable expansion. - -Server names are validated for URL path safety (no `/`, `#`, `?`, `%`, or spaces). Server entries are sorted alphabetically for deterministic output. - -Example output: -```json -{ - "mcpServers": { - "azure-devops": { - "type": "http", - "url": "http://host.docker.internal:80/mcp/azure-devops", - "headers": { - "Authorization": "Bearer $(MCP_GATEWAY_API_KEY)" - }, - "tools": ["*"] - }, - "safeoutputs": { - "type": "http", - "url": "http://host.docker.internal:80/mcp/safeoutputs", - "headers": { - "Authorization": "Bearer $(MCP_GATEWAY_API_KEY)" - }, - "tools": ["*"] - } - } -} -``` +**Removed.** The Copilot CLI `mcp-config.json` is no longer generated at compile time. Instead, it is derived at **pipeline runtime** from MCPG's actual gateway output, matching gh-aw's `convert_gateway_config_copilot.cjs` pattern. + +The "Start MCP Gateway (MCPG)" pipeline step: +1. Redirects MCPG's stdout to `gateway-output.json` +2. Waits for the health check and for valid JSON output +3. Transforms the output with a Python script that: + - Rewrites URLs from `127.0.0.1` → `host.docker.internal` (AWF container loopback vs host) + - Ensures `tools: ["*"]` on each server entry (Copilot CLI requirement) + - Preserves all other fields (headers, type, etc.) +4. Writes the result to `/tmp/awf-tools/mcp-config.json` and `$HOME/.copilot/mcp-config.json` + +This ensures the Copilot CLI config reflects MCPG's actual runtime state rather than a compile-time prediction. ## {{ allowed_domains }} diff --git a/src/compile/common.rs b/src/compile/common.rs index 4a09830..efc0d86 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -1813,65 +1813,6 @@ pub fn generate_mcpg_docker_env(front_matter: &FrontMatter) -> String { } } -/// Generate the Copilot CLI `mcp-config.json` with per-server routed URLs. -/// -/// MCPG runs in routed mode by default, exposing each backend at `/mcp/{serverID}`. -/// This function generates the client-side config that the Copilot CLI uses to -/// reach each MCPG-managed server. Follows the same pattern as gh-aw's -/// `convert_gateway_config_copilot.cjs`. -/// -/// The generated JSON lists one entry per MCPG server with: -/// - `type: "http"` — Copilot CLI HTTP transport -/// - `url` — routed endpoint `http://{MCPG_DOMAIN}:{port}/mcp/{name}` -/// - `headers` — Bearer auth with the gateway API key (ADO variable reference) -/// - `tools: ["*"]` — allow all tools (Copilot CLI requirement) -/// -/// # Pre-conditions -/// -/// All server names in `mcpg_config.mcp_servers` must be URL-safe path segments -/// (validated by [`generate_mcpg_config`]). Names are embedded directly in URL -/// paths without encoding. -pub fn generate_mcp_client_config(mcpg_config: &McpgConfig) -> Result { - // serde_json::Map is BTreeMap-backed, so keys are sorted on insert. - // Server names are validated in generate_mcpg_config (allowlist: [a-zA-Z0-9_.-]). - let mut servers = serde_json::Map::new(); - - for (name, _) in &mcpg_config.mcp_servers { - let mut entry = serde_json::Map::new(); - entry.insert("type".to_string(), serde_json::Value::String("http".to_string())); - entry.insert( - "url".to_string(), - serde_json::Value::String(format!( - "http://{}:{}/mcp/{}", - MCPG_DOMAIN, mcpg_config.gateway.port, name - )), - ); - - // $(MCP_GATEWAY_API_KEY) uses ADO macro syntax. The generated JSON is embedded - // in a bash heredoc; ADO expands $(VARNAME) in script bodies before bash runs, - // so the real API key value is substituted at pipeline runtime. - let mut headers = serde_json::Map::new(); - headers.insert( - "Authorization".to_string(), - serde_json::Value::String("Bearer $(MCP_GATEWAY_API_KEY)".to_string()), - ); - entry.insert("headers".to_string(), serde_json::Value::Object(headers)); - - entry.insert( - "tools".to_string(), - serde_json::Value::Array(vec![serde_json::Value::String("*".to_string())]), - ); - - servers.insert(name.to_string(), serde_json::Value::Object(entry)); - } - - let mut root = serde_json::Map::new(); - root.insert("mcpServers".to_string(), serde_json::Value::Object(servers)); - - serde_json::to_string_pretty(&serde_json::Value::Object(root)) - .context("Failed to serialize MCP client config") -} - // ==================== Domain allowlist ==================== /// Generate the allowed domains list for AWF network isolation. @@ -4251,61 +4192,6 @@ mod tests { assert!(!is_valid_env_var_name("X -v /etc:/etc:rw")); } - // ─── generate_mcp_client_config ──────────────────────────────────────── - - #[test] - fn test_generate_mcp_client_config_default() { - let fm = minimal_front_matter(); - let config = generate_mcpg_config(&fm, &CompileContext::for_test(&fm), &collect_extensions(&fm)).unwrap(); - let json = generate_mcp_client_config(&config).unwrap(); - let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); - let servers = parsed["mcpServers"].as_object().unwrap(); - - // Default config has only safeoutputs - assert!(servers.contains_key("safeoutputs"), "Should have safeoutputs entry"); - assert_eq!(servers.len(), 1, "Should have exactly one server by default"); - - let so = &servers["safeoutputs"]; - assert_eq!(so["type"], "http"); - assert!(so["url"].as_str().unwrap().contains("/mcp/safeoutputs"), "Should have routed URL"); - assert_eq!(so["tools"], serde_json::json!(["*"])); - assert!(so["headers"]["Authorization"].as_str().unwrap().contains("MCP_GATEWAY_API_KEY")); - } - - #[test] - fn test_generate_mcp_client_config_multiple_servers() { - let mut fm = minimal_front_matter(); - fm.mcp_servers.insert( - "my-tool".to_string(), - McpConfig::WithOptions(McpOptions { - container: Some("python:3.12-slim".to_string()), - ..Default::default() - }), - ); - let config = generate_mcpg_config(&fm, &CompileContext::for_test(&fm), &collect_extensions(&fm)).unwrap(); - let json = generate_mcp_client_config(&config).unwrap(); - let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); - let servers = parsed["mcpServers"].as_object().unwrap(); - - assert_eq!(servers.len(), 2, "Should have safeoutputs + my-tool"); - assert!(servers.contains_key("safeoutputs")); - assert!(servers.contains_key("my-tool")); - - // Verify routed URLs - assert!(servers["safeoutputs"]["url"].as_str().unwrap().ends_with("/mcp/safeoutputs")); - assert!(servers["my-tool"]["url"].as_str().unwrap().ends_with("/mcp/my-tool")); - } - - #[test] - fn test_generate_mcp_client_config_uses_correct_port() { - let fm = minimal_front_matter(); - let config = generate_mcpg_config(&fm, &CompileContext::for_test(&fm), &collect_extensions(&fm)).unwrap(); - let json = generate_mcp_client_config(&config).unwrap(); - let parsed: serde_json::Value = serde_json::from_str(&json).unwrap(); - let url = parsed["mcpServers"]["safeoutputs"]["url"].as_str().unwrap(); - assert!(url.contains(":80/"), "URL should use MCPG port 80"); - } - #[test] fn test_generate_mcpg_config_rejects_invalid_server_name() { let yaml = "---\nname: test-agent\ndescription: test\nmcp-servers:\n bad/name:\n container: python:3\n entrypoint: python\n---\n"; diff --git a/src/compile/onees.rs b/src/compile/onees.rs index 65085f6..891a41b 100644 --- a/src/compile/onees.rs +++ b/src/compile/onees.rs @@ -17,7 +17,6 @@ use super::common::{ generate_cancel_previous_builds, generate_enabled_tools_args, generate_mcpg_config, generate_mcpg_docker_env, - generate_mcp_client_config, format_steps_yaml_indented, }; use super::types::FrontMatter; @@ -57,9 +56,8 @@ impl Compiler for OneESCompiler { let mcpg_config_json = serde_json::to_string_pretty(&mcpg_config) .context("Failed to serialize MCPG config")?; let mcpg_docker_env = generate_mcpg_docker_env(front_matter); - let mcp_client_config = generate_mcp_client_config(&mcpg_config)?; - // Generate 1ES-specific setup/teardown jobs (no per-job pool, uses templateContext). + // Generate 1ES-specific setup/teardown jobs(no per-job pool, uses templateContext). // These override the shared {{ setup_job }} / {{ teardown_job }} markers via // extra_replacements, which are applied before the shared replacements. let setup_job = generate_setup_job(&front_matter.setup); @@ -78,7 +76,6 @@ impl Compiler for OneESCompiler { ("{{ cancel_previous_builds }}".into(), cancel_previous_builds), ("{{ mcpg_config }}".into(), mcpg_config_json), ("{{ mcpg_docker_env }}".into(), mcpg_docker_env), - ("{{ mcp_client_config }}".into(), mcp_client_config), ("{{ setup_job }}".into(), setup_job), ("{{ teardown_job }}".into(), teardown_job), ], diff --git a/src/compile/standalone.rs b/src/compile/standalone.rs index e8f6b4a..69dd472 100644 --- a/src/compile/standalone.rs +++ b/src/compile/standalone.rs @@ -19,7 +19,6 @@ use super::common::{ generate_cancel_previous_builds, generate_enabled_tools_args, generate_mcpg_config, generate_mcpg_docker_env, - generate_mcp_client_config, }; use super::types::FrontMatter; @@ -58,7 +57,6 @@ impl Compiler for StandaloneCompiler { let mcpg_config_json = serde_json::to_string_pretty(&config_obj).context("Failed to serialize MCPG config")?; let mcpg_docker_env = generate_mcpg_docker_env(front_matter); - let mcp_client_config = generate_mcp_client_config(&config_obj)?; let config = CompileConfig { template: include_str!("../data/base.yml").to_string(), @@ -73,7 +71,6 @@ impl Compiler for StandaloneCompiler { ("{{ cancel_previous_builds }}".into(), cancel_previous_builds), ("{{ mcpg_config }}".into(), mcpg_config_json), ("{{ mcpg_docker_env }}".into(), mcpg_docker_env), - ("{{ mcp_client_config }}".into(), mcp_client_config), ], skip_integrity, }; diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index 7282492..4a458b6 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -151,28 +151,7 @@ extends: # Copy MCPG config to /tmp cp "$(Agent.TempDirectory)/staging/mcpg-config.json" /tmp/awf-tools/staging/mcpg-config.json - - # Generate MCP config for copilot CLI pointing to MCPG gateway on host. - # The agent inside AWF reaches MCPG via host.docker.internal using routed - # endpoints (/mcp/{serverID}). Each server has its own entry with auth headers. - # - # NOTE: The quoted heredoc (<< 'EOF') prevents bash from expanding $(...) - # as command substitution. However, $(MCP_GATEWAY_API_KEY) in the JSON is - # still expanded by ADO macro expansion, which processes the entire script - # body *before* bash executes it. The real API key value ends up in the file. - cat > /tmp/awf-tools/mcp-config.json << 'MCP_CLIENT_CONFIG_EOF' - {{ mcp_client_config }} - MCP_CLIENT_CONFIG_EOF - - # Also write to $HOME/.copilot for host-side use - cp /tmp/awf-tools/mcp-config.json "$HOME/.copilot/mcp-config.json" - - echo "Generated MCP config at: /tmp/awf-tools/mcp-config.json" - cat /tmp/awf-tools/mcp-config.json - - # Validate JSON - python3 -m json.tool /tmp/awf-tools/mcp-config.json > /dev/null && echo "JSON is valid" - displayName: "Generate MCP configs" + displayName: "Prepare tooling" - bash: | # Write agent instructions to /tmp so it's accessible inside AWF container @@ -271,9 +250,12 @@ extends: echo "Starting MCPG with config template:" cat /tmp/awf-tools/staging/mcpg-config.json | python3 -m json.tool - # Remove any leftover container from a previous interrupted run + # Remove any leftover container or stale output from a previous interrupted run # (--rm only cleans up on clean exit; OOM/SIGKILL may leave it behind) docker rm -f mcpg 2>/dev/null || true + GATEWAY_OUTPUT="/tmp/gh-aw/mcp-config/gateway-output.json" + mkdir -p "$(dirname "$GATEWAY_OUTPUT")" + rm -f "$GATEWAY_OUTPUT" # Start MCPG Docker container on host network. # The Docker socket mount is required because MCPG spawns stdio-based MCP @@ -285,6 +267,9 @@ extends: # which is empty with --network host (by design), causing a spurious error: # [ERROR] Port 80 is not exposed from the container # Upstream fix: https://github.com/github/gh-aw-mcpg/issues/TBD + # + # stdout → gateway-output.json (machine-readable config, read after health check) + # stderr → mcp-logs/stderr.log (debug logs) echo "$MCPG_CONFIG" | docker run -i --rm \ --name mcpg \ --network host \ @@ -295,7 +280,8 @@ extends: -e MCP_GATEWAY_API_KEY="$(MCP_GATEWAY_API_KEY)" \ {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ - --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs & + --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ + > "$GATEWAY_OUTPUT" 2>/tmp/gh-aw/mcp-logs/stderr.log & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" @@ -311,8 +297,49 @@ extends: done if [ "$READY" != "true" ]; then echo "##vso[task.complete result=Failed]MCPG did not become ready within 30s" + echo "MCPG stderr:" + cat /tmp/gh-aw/mcp-logs/stderr.log 2>/dev/null || true + exit 1 + fi + + # Wait for gateway output file to contain valid JSON with mcpServers. + # Health check passing doesn't guarantee stdout is flushed, so poll. + echo "Waiting for gateway output file..." + GATEWAY_READY=false + for i in $(seq 1 15); do + if [ -s "$GATEWAY_OUTPUT" ] && jq -e '.mcpServers' "$GATEWAY_OUTPUT" > /dev/null 2>&1; then + echo "Gateway output is ready" + GATEWAY_READY=true + break + fi + sleep 1 + done + if [ "$GATEWAY_READY" != "true" ]; then + echo "##vso[task.complete result=Failed]Gateway output file not ready within 15s" + echo "Gateway output content:" + cat "$GATEWAY_OUTPUT" 2>/dev/null || echo "(empty or missing)" exit 1 fi + + echo "Gateway output:" + cat "$GATEWAY_OUTPUT" + + # Convert gateway output to Copilot CLI mcp-config.json. + # Mirrors gh-aw's convert_gateway_config_copilot.cjs: + # - Rewrite URLs from 127.0.0.1 to host.docker.internal (AWF container needs + # host.docker.internal to reach MCPG on the host; 127.0.0.1 is container loopback) + # - Ensure tools: ["*"] on each server entry (Copilot CLI requirement) + # - Preserve all other fields (headers, type, etc.) + jq --arg prefix "http://$(MCP_GATEWAY_DOMAIN):$(MCP_GATEWAY_PORT)" \ + '.mcpServers |= (to_entries | sort_by(.key) | map(.value.url |= sub("^http://[^/]+/"; "\($prefix)/") | .value.tools = ["*"]) | from_entries)' \ + "$GATEWAY_OUTPUT" > /tmp/awf-tools/mcp-config.json + + # Also write to $HOME/.copilot for host-side use + cp /tmp/awf-tools/mcp-config.json "$HOME/.copilot/mcp-config.json" + chmod 600 /tmp/awf-tools/mcp-config.json "$HOME/.copilot/mcp-config.json" + + echo "Generated MCP config at: /tmp/awf-tools/mcp-config.json" + cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" # Network isolation via AWF (Agentic Workflow Firewall) diff --git a/src/data/base.yml b/src/data/base.yml index e73cc4b..6646fc2 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -122,28 +122,7 @@ jobs: # Copy MCPG config to /tmp cp "$(Agent.TempDirectory)/staging/mcpg-config.json" /tmp/awf-tools/staging/mcpg-config.json - - # Generate MCP config for copilot CLI pointing to MCPG gateway on host. - # The agent inside AWF reaches MCPG via host.docker.internal using routed - # endpoints (/mcp/{serverID}). Each server has its own entry with auth headers. - # - # NOTE: The quoted heredoc (<< 'EOF') prevents bash from expanding $(...) - # as command substitution. However, $(MCP_GATEWAY_API_KEY) in the JSON is - # still expanded by ADO macro expansion, which processes the entire script - # body *before* bash executes it. The real API key value ends up in the file. - cat > /tmp/awf-tools/mcp-config.json << 'MCP_CLIENT_CONFIG_EOF' - {{ mcp_client_config }} - MCP_CLIENT_CONFIG_EOF - - # Also write to $HOME/.copilot for host-side use - cp /tmp/awf-tools/mcp-config.json "$HOME/.copilot/mcp-config.json" - - echo "Generated MCP config at: /tmp/awf-tools/mcp-config.json" - cat /tmp/awf-tools/mcp-config.json - - # Validate JSON - python3 -m json.tool /tmp/awf-tools/mcp-config.json > /dev/null && echo "JSON is valid" - displayName: "Generate MCP configs" + displayName: "Prepare tooling" - bash: | # Write agent instructions to /tmp so it's accessible inside AWF container @@ -242,9 +221,12 @@ jobs: echo "Starting MCPG with config template:" cat /tmp/awf-tools/staging/mcpg-config.json | python3 -m json.tool - # Remove any leftover container from a previous interrupted run + # Remove any leftover container or stale output from a previous interrupted run # (--rm only cleans up on clean exit; OOM/SIGKILL may leave it behind) docker rm -f mcpg 2>/dev/null || true + GATEWAY_OUTPUT="/tmp/gh-aw/mcp-config/gateway-output.json" + mkdir -p "$(dirname "$GATEWAY_OUTPUT")" + rm -f "$GATEWAY_OUTPUT" # Start MCPG Docker container on host network. # The Docker socket mount is required because MCPG spawns stdio-based MCP @@ -256,6 +238,9 @@ jobs: # which is empty with --network host (by design), causing a spurious error: # [ERROR] Port 80 is not exposed from the container # Upstream fix: https://github.com/github/gh-aw-mcpg/issues/TBD + # + # stdout → gateway-output.json (machine-readable config, read after health check) + # stderr → mcp-logs/stderr.log (debug logs) echo "$MCPG_CONFIG" | docker run -i --rm \ --name mcpg \ --network host \ @@ -266,7 +251,8 @@ jobs: -e MCP_GATEWAY_API_KEY="$(MCP_GATEWAY_API_KEY)" \ {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ - --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs & + --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ + > "$GATEWAY_OUTPUT" 2>/tmp/gh-aw/mcp-logs/stderr.log & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" @@ -282,8 +268,49 @@ jobs: done if [ "$READY" != "true" ]; then echo "##vso[task.complete result=Failed]MCPG did not become ready within 30s" + echo "MCPG stderr:" + cat /tmp/gh-aw/mcp-logs/stderr.log 2>/dev/null || true + exit 1 + fi + + # Wait for gateway output file to contain valid JSON with mcpServers. + # Health check passing doesn't guarantee stdout is flushed, so poll. + echo "Waiting for gateway output file..." + GATEWAY_READY=false + for i in $(seq 1 15); do + if [ -s "$GATEWAY_OUTPUT" ] && jq -e '.mcpServers' "$GATEWAY_OUTPUT" > /dev/null 2>&1; then + echo "Gateway output is ready" + GATEWAY_READY=true + break + fi + sleep 1 + done + if [ "$GATEWAY_READY" != "true" ]; then + echo "##vso[task.complete result=Failed]Gateway output file not ready within 15s" + echo "Gateway output content:" + cat "$GATEWAY_OUTPUT" 2>/dev/null || echo "(empty or missing)" exit 1 fi + + echo "Gateway output:" + cat "$GATEWAY_OUTPUT" + + # Convert gateway output to Copilot CLI mcp-config.json. + # Mirrors gh-aw's convert_gateway_config_copilot.cjs: + # - Rewrite URLs from 127.0.0.1 to host.docker.internal (AWF container needs + # host.docker.internal to reach MCPG on the host; 127.0.0.1 is container loopback) + # - Ensure tools: ["*"] on each server entry (Copilot CLI requirement) + # - Preserve all other fields (headers, type, etc.) + jq --arg prefix "http://$(MCP_GATEWAY_DOMAIN):$(MCP_GATEWAY_PORT)" \ + '.mcpServers |= (to_entries | sort_by(.key) | map(.value.url |= sub("^http://[^/]+/"; "\($prefix)/") | .value.tools = ["*"]) | from_entries)' \ + "$GATEWAY_OUTPUT" > /tmp/awf-tools/mcp-config.json + + # Also write to $HOME/.copilot for host-side use + cp /tmp/awf-tools/mcp-config.json "$HOME/.copilot/mcp-config.json" + chmod 600 /tmp/awf-tools/mcp-config.json "$HOME/.copilot/mcp-config.json" + + echo "Generated MCP config at: /tmp/awf-tools/mcp-config.json" + cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" # Network isolation via AWF (Agentic Workflow Firewall) From b4ec6b737073ebba8dd42959898ce2e34bf99e4c Mon Sep 17 00:00:00 2001 From: James Devine Date: Fri, 17 Apr 2026 23:11:02 +0100 Subject: [PATCH 02/20] fix: create /tmp/gh-aw/mcp-logs before redirecting MCPG stderr The stderr redirect to /tmp/gh-aw/mcp-logs/stderr.log failed because the directory didn't exist yet. Add it to the mkdir -p call alongside the gateway output directory. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 2 +- src/data/base.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index 4a458b6..8d0e799 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -254,7 +254,7 @@ extends: # (--rm only cleans up on clean exit; OOM/SIGKILL may leave it behind) docker rm -f mcpg 2>/dev/null || true GATEWAY_OUTPUT="/tmp/gh-aw/mcp-config/gateway-output.json" - mkdir -p "$(dirname "$GATEWAY_OUTPUT")" + mkdir -p "$(dirname "$GATEWAY_OUTPUT")" /tmp/gh-aw/mcp-logs rm -f "$GATEWAY_OUTPUT" # Start MCPG Docker container on host network. diff --git a/src/data/base.yml b/src/data/base.yml index 6646fc2..958a243 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -225,7 +225,7 @@ jobs: # (--rm only cleans up on clean exit; OOM/SIGKILL may leave it behind) docker rm -f mcpg 2>/dev/null || true GATEWAY_OUTPUT="/tmp/gh-aw/mcp-config/gateway-output.json" - mkdir -p "$(dirname "$GATEWAY_OUTPUT")" + mkdir -p "$(dirname "$GATEWAY_OUTPUT")" /tmp/gh-aw/mcp-logs rm -f "$GATEWAY_OUTPUT" # Start MCPG Docker container on host network. From 9dc4557879e1e8a1114bcf002292b8644f9fc433 Mon Sep 17 00:00:00 2001 From: James Devine Date: Fri, 17 Apr 2026 23:16:37 +0100 Subject: [PATCH 03/20] fix: stream MCPG stderr to pipeline console via tee Use process substitution to tee stderr to both the log file and the pipeline console, giving real-time visibility into MCPG startup logs. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 4 ++-- src/data/base.yml | 4 ++-- 2 files changed, 4 insertions(+), 4 deletions(-) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index 8d0e799..d480012 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -269,7 +269,7 @@ extends: # Upstream fix: https://github.com/github/gh-aw-mcpg/issues/TBD # # stdout → gateway-output.json (machine-readable config, read after health check) - # stderr → mcp-logs/stderr.log (debug logs) + # stderr → tee to both pipeline console and mcp-logs/stderr.log echo "$MCPG_CONFIG" | docker run -i --rm \ --name mcpg \ --network host \ @@ -281,7 +281,7 @@ extends: {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ - > "$GATEWAY_OUTPUT" 2>/tmp/gh-aw/mcp-logs/stderr.log & + > "$GATEWAY_OUTPUT" 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2) & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" diff --git a/src/data/base.yml b/src/data/base.yml index 958a243..85cd9b1 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -240,7 +240,7 @@ jobs: # Upstream fix: https://github.com/github/gh-aw-mcpg/issues/TBD # # stdout → gateway-output.json (machine-readable config, read after health check) - # stderr → mcp-logs/stderr.log (debug logs) + # stderr → tee to both pipeline console and mcp-logs/stderr.log echo "$MCPG_CONFIG" | docker run -i --rm \ --name mcpg \ --network host \ @@ -252,7 +252,7 @@ jobs: {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ - > "$GATEWAY_OUTPUT" 2>/tmp/gh-aw/mcp-logs/stderr.log & + > "$GATEWAY_OUTPUT" 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2) & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" From afdee6b2ac87c505e45e4548be9983096d9dbdeb Mon Sep 17 00:00:00 2001 From: James Devine Date: Fri, 17 Apr 2026 23:25:12 +0100 Subject: [PATCH 04/20] fix: copy MCPG logs to published artifact directory Copy /tmp/gh-aw/mcp-logs/ into staging/logs/mcpg/ so MCPG gateway logs, stderr output, and gateway-output.json are available in the published pipeline artifact for post-run debugging. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 4 ++++ src/data/base.yml | 4 ++++ 2 files changed, 8 insertions(+) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index d480012..861057c 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -427,6 +427,10 @@ extends: if [ -d ~/.ado-aw/logs ]; then cp -r ~/.ado-aw/logs/* "$(Agent.TempDirectory)/staging/logs/" 2>/dev/null || true fi + if [ -d /tmp/gh-aw/mcp-logs ]; then + mkdir -p "$(Agent.TempDirectory)/staging/logs/mcpg" + cp -r /tmp/gh-aw/mcp-logs/* "$(Agent.TempDirectory)/staging/logs/mcpg/" 2>/dev/null || true + fi echo "Logs copied to $(Agent.TempDirectory)/staging/logs" ls -la "$(Agent.TempDirectory)/staging/logs" 2>/dev/null || echo "No logs found" displayName: "Copy logs to output directory" diff --git a/src/data/base.yml b/src/data/base.yml index 85cd9b1..cdacbc1 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -398,6 +398,10 @@ jobs: if [ -d ~/.ado-aw/logs ]; then cp -r ~/.ado-aw/logs/* "$(Agent.TempDirectory)/staging/logs/" 2>/dev/null || true fi + if [ -d /tmp/gh-aw/mcp-logs ]; then + mkdir -p "$(Agent.TempDirectory)/staging/logs/mcpg" + cp -r /tmp/gh-aw/mcp-logs/* "$(Agent.TempDirectory)/staging/logs/mcpg/" 2>/dev/null || true + fi echo "Logs copied to $(Agent.TempDirectory)/staging/logs" ls -la "$(Agent.TempDirectory)/staging/logs" 2>/dev/null || echo "No logs found" displayName: "Copy logs to output directory" From 589660b44746f537b876ca92cb6ab75fab1d3245 Mon Sep 17 00:00:00 2001 From: James Devine Date: Fri, 17 Apr 2026 23:37:41 +0100 Subject: [PATCH 05/20] feat: pre-install Azure DevOps MCP to avoid MCPG startup timeout MCPG has a 30s startup timeout for stdio MCP containers. The previous approach used npx -y @azure-devops/mcp which downloads the package at container launch time, regularly exceeding this timeout. The AzureDevOpsExtension now emits a prepare_steps() that builds a local Docker image (ado-mcp-cached:latest) with @azure-devops/mcp pre-installed via npm install -g. The MCPG config references this cached image, so the container starts instantly without any network download. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/common.rs | 11 ++++++--- src/compile/extensions.rs | 51 ++++++++++++++++++++++++++++++++++++--- 2 files changed, 54 insertions(+), 8 deletions(-) diff --git a/src/compile/common.rs b/src/compile/common.rs index efc0d86..939d68d 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -792,10 +792,13 @@ pub const MCPG_PORT: u16 = 80; /// inside containers running with `--network host` or via Docker DNS. pub const MCPG_DOMAIN: &str = "host.docker.internal"; -/// Docker image for the Azure DevOps MCP container. -/// This is the container used when `tools: azure-devops:` is configured. +/// Docker base image for the Azure DevOps MCP container. pub const ADO_MCP_IMAGE: &str = "node:20-slim"; +/// Local pre-warmed image name with @azure-devops/mcp pre-installed. +/// Built at pipeline runtime via a prepare step to avoid MCPG startup timeout. +pub const ADO_MCP_CACHED_IMAGE: &str = "ado-mcp-cached:latest"; + /// Default entrypoint for the Azure DevOps MCP container. pub const ADO_MCP_ENTRYPOINT: &str = "npx"; @@ -4227,11 +4230,11 @@ mod tests { let config = generate_mcpg_config(&fm, &CompileContext::for_test_with_org(&fm, "inferred-org"), &collect_extensions(&fm)).unwrap(); let ado = config.mcp_servers.get("azure-devops").unwrap(); assert_eq!(ado.server_type, "stdio"); - assert_eq!(ado.container.as_deref(), Some(ADO_MCP_IMAGE)); + assert_eq!(ado.container.as_deref(), Some(ADO_MCP_CACHED_IMAGE)); assert_eq!(ado.entrypoint.as_deref(), Some(ADO_MCP_ENTRYPOINT)); let args = ado.entrypoint_args.as_ref().unwrap(); - assert!(args.contains(&"-y".to_string())); assert!(args.contains(&ADO_MCP_PACKAGE.to_string())); + assert!(!args.contains(&"-y".to_string()), "cached image should not use npx -y"); assert!(args.contains(&"inferred-org".to_string())); // Should have AZURE_DEVOPS_EXT_PAT in env let env = ado.env.as_ref().unwrap(); diff --git a/src/compile/extensions.rs b/src/compile/extensions.rs index 7a828b5..3c378cf 100644 --- a/src/compile/extensions.rs +++ b/src/compile/extensions.rs @@ -422,7 +422,7 @@ the toolchain. Lean files use the `.lean` extension.\n" // ─── Azure DevOps MCP ──────────────────────────────────────────────── use crate::allowed_hosts::mcp_required_hosts; -use super::common::{ADO_MCP_IMAGE, ADO_MCP_ENTRYPOINT, ADO_MCP_PACKAGE, ADO_MCP_SERVER_NAME}; +use super::common::{ADO_MCP_IMAGE, ADO_MCP_CACHED_IMAGE, ADO_MCP_ENTRYPOINT, ADO_MCP_PACKAGE, ADO_MCP_SERVER_NAME}; use super::types::AzureDevOpsToolConfig; /// Azure DevOps first-party tool extension. @@ -459,9 +459,31 @@ impl CompilerExtension for AzureDevOpsExtension { vec![ADO_MCP_SERVER_NAME.to_string()] } + fn prepare_steps(&self) -> Vec { + // Pre-build a local Docker image with @azure-devops/mcp pre-installed. + // Without this, MCPG's 30s startup timeout is exceeded because `npx -y` + // needs to download and install the package at container launch time. + vec![format!( + r#"- bash: | + echo "Pre-building Azure DevOps MCP container image..." + docker pull {base_image} + CONTAINER_ID=$(docker create {base_image} sh -c "npm install -g {package}") + docker start -a "$CONTAINER_ID" + docker commit "$CONTAINER_ID" {cached_image} + docker rm "$CONTAINER_ID" + echo "Azure DevOps MCP container image ready: {cached_image}" + docker images {cached_image} + displayName: "Pre-install Azure DevOps MCP""#, + base_image = ADO_MCP_IMAGE, + package = ADO_MCP_PACKAGE, + cached_image = ADO_MCP_CACHED_IMAGE, + )] + } + fn mcpg_servers(&self, ctx: &CompileContext) -> Result> { - // Build entrypoint args: npx -y @azure-devops/mcp [-d toolset1 toolset2 ...] - let mut entrypoint_args = vec!["-y".to_string(), ADO_MCP_PACKAGE.to_string()]; + // Build entrypoint args: @azure-devops/mcp [-d toolset1 toolset2 ...] + // Uses the pre-installed package from the cached image (no npx -y download needed). + let mut entrypoint_args = vec![ADO_MCP_PACKAGE.to_string()]; // Org: use explicit override, then inferred from git remote, then fail let org = self @@ -519,7 +541,7 @@ impl CompilerExtension for AzureDevOpsExtension { ADO_MCP_SERVER_NAME.to_string(), McpgServerConfig { server_type: "stdio".to_string(), - container: Some(ADO_MCP_IMAGE.to_string()), + container: Some(ADO_MCP_CACHED_IMAGE.to_string()), entrypoint: Some(ADO_MCP_ENTRYPOINT.to_string()), entrypoint_args: Some(entrypoint_args), mounts: None, @@ -1065,6 +1087,27 @@ mod tests { assert!(warnings[0].contains("both tools.azure-devops and mcp-servers")); } + #[test] + fn test_ado_prepare_steps_builds_cached_image() { + let ext = AzureDevOpsExtension::new(AzureDevOpsToolConfig::Enabled(true)); + let steps = ext.prepare_steps(); + assert_eq!(steps.len(), 1); + assert!(steps[0].contains(ADO_MCP_IMAGE), "should pull base image"); + assert!(steps[0].contains(ADO_MCP_PACKAGE), "should install package"); + assert!(steps[0].contains(ADO_MCP_CACHED_IMAGE), "should commit as cached image"); + } + + #[test] + fn test_ado_mcpg_servers_uses_cached_image() { + let fm = minimal_front_matter(); + let ctx = CompileContext::for_test_with_org(&fm, "myorg"); + let ext = AzureDevOpsExtension::new(AzureDevOpsToolConfig::Enabled(true)); + let servers = ext.mcpg_servers(&ctx).unwrap(); + assert_eq!(servers[0].1.container.as_deref(), Some(ADO_MCP_CACHED_IMAGE)); + let args = servers[0].1.entrypoint_args.as_ref().unwrap(); + assert!(!args.contains(&"-y".to_string()), "cached image should not use -y flag"); + } + // ── CacheMemoryExtension ─────────────────────────────────────── #[test] From f1b51f29f47d6f5c43f808f2546263788daedc77 Mon Sep 17 00:00:00 2001 From: James Devine Date: Fri, 17 Apr 2026 23:56:51 +0100 Subject: [PATCH 06/20] fix: add node ecosystem domains for Azure DevOps MCP container The ADO MCP container runs via npx which needs npm registry access to resolve @azure-devops/mcp. AWF's host-level iptables rules block outbound traffic from MCPG-spawned containers unless the domains are allowlisted. Add 'node' ecosystem identifier to AzureDevOpsExtension::required_hosts() so npm/node domains are included in AWF's allowed domains list. Also reverts the cached image approach (pre-install via docker commit) as allowing network access is the correct fix. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/common.rs | 8 ++--- src/compile/extensions.rs | 61 ++++++++------------------------------- 2 files changed, 14 insertions(+), 55 deletions(-) diff --git a/src/compile/common.rs b/src/compile/common.rs index 939d68d..8a2a4c7 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -795,10 +795,6 @@ pub const MCPG_DOMAIN: &str = "host.docker.internal"; /// Docker base image for the Azure DevOps MCP container. pub const ADO_MCP_IMAGE: &str = "node:20-slim"; -/// Local pre-warmed image name with @azure-devops/mcp pre-installed. -/// Built at pipeline runtime via a prepare step to avoid MCPG startup timeout. -pub const ADO_MCP_CACHED_IMAGE: &str = "ado-mcp-cached:latest"; - /// Default entrypoint for the Azure DevOps MCP container. pub const ADO_MCP_ENTRYPOINT: &str = "npx"; @@ -4230,11 +4226,11 @@ mod tests { let config = generate_mcpg_config(&fm, &CompileContext::for_test_with_org(&fm, "inferred-org"), &collect_extensions(&fm)).unwrap(); let ado = config.mcp_servers.get("azure-devops").unwrap(); assert_eq!(ado.server_type, "stdio"); - assert_eq!(ado.container.as_deref(), Some(ADO_MCP_CACHED_IMAGE)); + assert_eq!(ado.container.as_deref(), Some(ADO_MCP_IMAGE)); assert_eq!(ado.entrypoint.as_deref(), Some(ADO_MCP_ENTRYPOINT)); let args = ado.entrypoint_args.as_ref().unwrap(); + assert!(args.contains(&"-y".to_string())); assert!(args.contains(&ADO_MCP_PACKAGE.to_string())); - assert!(!args.contains(&"-y".to_string()), "cached image should not use npx -y"); assert!(args.contains(&"inferred-org".to_string())); // Should have AZURE_DEVOPS_EXT_PAT in env let env = ado.env.as_ref().unwrap(); diff --git a/src/compile/extensions.rs b/src/compile/extensions.rs index 3c378cf..9bc2251 100644 --- a/src/compile/extensions.rs +++ b/src/compile/extensions.rs @@ -422,7 +422,7 @@ the toolchain. Lean files use the `.lean` extension.\n" // ─── Azure DevOps MCP ──────────────────────────────────────────────── use crate::allowed_hosts::mcp_required_hosts; -use super::common::{ADO_MCP_IMAGE, ADO_MCP_CACHED_IMAGE, ADO_MCP_ENTRYPOINT, ADO_MCP_PACKAGE, ADO_MCP_SERVER_NAME}; +use super::common::{ADO_MCP_IMAGE, ADO_MCP_ENTRYPOINT, ADO_MCP_PACKAGE, ADO_MCP_SERVER_NAME}; use super::types::AzureDevOpsToolConfig; /// Azure DevOps first-party tool extension. @@ -449,41 +449,23 @@ impl CompilerExtension for AzureDevOpsExtension { } fn required_hosts(&self) -> Vec { - mcp_required_hosts("ado") + let mut hosts: Vec = mcp_required_hosts("ado") .iter() .map(|h| (*h).to_string()) - .collect() + .collect(); + // The ADO MCP runs in a container via `npx -y @azure-devops/mcp`. + // npx needs npm registry access to resolve and install the package. + hosts.push("node".to_string()); + hosts } fn allowed_copilot_tools(&self) -> Vec { vec![ADO_MCP_SERVER_NAME.to_string()] } - fn prepare_steps(&self) -> Vec { - // Pre-build a local Docker image with @azure-devops/mcp pre-installed. - // Without this, MCPG's 30s startup timeout is exceeded because `npx -y` - // needs to download and install the package at container launch time. - vec![format!( - r#"- bash: | - echo "Pre-building Azure DevOps MCP container image..." - docker pull {base_image} - CONTAINER_ID=$(docker create {base_image} sh -c "npm install -g {package}") - docker start -a "$CONTAINER_ID" - docker commit "$CONTAINER_ID" {cached_image} - docker rm "$CONTAINER_ID" - echo "Azure DevOps MCP container image ready: {cached_image}" - docker images {cached_image} - displayName: "Pre-install Azure DevOps MCP""#, - base_image = ADO_MCP_IMAGE, - package = ADO_MCP_PACKAGE, - cached_image = ADO_MCP_CACHED_IMAGE, - )] - } - fn mcpg_servers(&self, ctx: &CompileContext) -> Result> { - // Build entrypoint args: @azure-devops/mcp [-d toolset1 toolset2 ...] - // Uses the pre-installed package from the cached image (no npx -y download needed). - let mut entrypoint_args = vec![ADO_MCP_PACKAGE.to_string()]; + // Build entrypoint args: npx -y @azure-devops/mcp [-d toolset1 toolset2 ...] + let mut entrypoint_args = vec!["-y".to_string(), ADO_MCP_PACKAGE.to_string()]; // Org: use explicit override, then inferred from git remote, then fail let org = self @@ -541,7 +523,7 @@ impl CompilerExtension for AzureDevOpsExtension { ADO_MCP_SERVER_NAME.to_string(), McpgServerConfig { server_type: "stdio".to_string(), - container: Some(ADO_MCP_CACHED_IMAGE.to_string()), + container: Some(ADO_MCP_IMAGE.to_string()), entrypoint: Some(ADO_MCP_ENTRYPOINT.to_string()), entrypoint_args: Some(entrypoint_args), mounts: None, @@ -1043,6 +1025,8 @@ mod tests { let ext = AzureDevOpsExtension::new(AzureDevOpsToolConfig::Enabled(true)); let hosts = ext.required_hosts(); assert!(hosts.contains(&"dev.azure.com".to_string())); + // Node ecosystem is required for npx to resolve @azure-devops/mcp + assert!(hosts.contains(&"node".to_string())); } #[test] @@ -1087,27 +1071,6 @@ mod tests { assert!(warnings[0].contains("both tools.azure-devops and mcp-servers")); } - #[test] - fn test_ado_prepare_steps_builds_cached_image() { - let ext = AzureDevOpsExtension::new(AzureDevOpsToolConfig::Enabled(true)); - let steps = ext.prepare_steps(); - assert_eq!(steps.len(), 1); - assert!(steps[0].contains(ADO_MCP_IMAGE), "should pull base image"); - assert!(steps[0].contains(ADO_MCP_PACKAGE), "should install package"); - assert!(steps[0].contains(ADO_MCP_CACHED_IMAGE), "should commit as cached image"); - } - - #[test] - fn test_ado_mcpg_servers_uses_cached_image() { - let fm = minimal_front_matter(); - let ctx = CompileContext::for_test_with_org(&fm, "myorg"); - let ext = AzureDevOpsExtension::new(AzureDevOpsToolConfig::Enabled(true)); - let servers = ext.mcpg_servers(&ctx).unwrap(); - assert_eq!(servers[0].1.container.as_deref(), Some(ADO_MCP_CACHED_IMAGE)); - let args = servers[0].1.entrypoint_args.as_ref().unwrap(); - assert!(!args.contains(&"-y".to_string()), "cached image should not use -y flag"); - } - // ── CacheMemoryExtension ─────────────────────────────────────── #[test] From e2409539ea8f46cc1ef8ba5514bc1c5b155a8ecf Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 00:07:19 +0100 Subject: [PATCH 07/20] feat: add MCPG debug logging and backend probe step Two changes to surface MCPG failures early: 1. Pass DEBUG=* to the MCPG container to enable full debug logging on stderr (streamed live via tee). This surfaces backend launch details, connection errors, and timeout diagnostics. 2. Add a 'Verify MCP backends' step after MCPG starts that probes each configured backend with a tools/list call. This forces MCPG's lazy initialization and catches failures (e.g., container timeout, network blocked) before the agent runs. Failures are surfaced as ADO pipeline warnings. Relates to #254 Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 37 +++++++++++++++++++++++++++++++++++++ src/data/base.yml | 37 +++++++++++++++++++++++++++++++++++++ 2 files changed, 74 insertions(+) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index 861057c..818174c 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -278,6 +278,7 @@ extends: -e MCP_GATEWAY_PORT="$(MCP_GATEWAY_PORT)" \ -e MCP_GATEWAY_DOMAIN="$(MCP_GATEWAY_DOMAIN)" \ -e MCP_GATEWAY_API_KEY="$(MCP_GATEWAY_API_KEY)" \ + -e DEBUG="*" \ {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ @@ -342,6 +343,42 @@ extends: cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" + # Probe all MCPG backends to force eager launch and surface failures. + # MCPG lazily starts stdio backends on first tool call — without this + # step, a broken backend (e.g., npx timeout) only surfaces as a silent + # missing-tool error during the agent run. + - bash: | + echo "=== Probing MCP backends ===" + PROBE_FAILED=false + for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do + echo "" + echo "--- Probing: $server ---" + RESPONSE=$(curl -sf --max-time 120 -X POST \ + -H "Authorization: Bearer $(MCP_GATEWAY_API_KEY)" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + CURL_EXIT=$? + if [ $CURL_EXIT -ne 0 ]; then + echo "##vso[task.logissue type=warning]MCP backend '$server' failed to respond (exit code $CURL_EXIT)" + echo "Response: $RESPONSE" + PROBE_FAILED=true + else + TOOL_COUNT=$(echo "$RESPONSE" | jq -r '.result.tools | length' 2>/dev/null || echo "?") + echo "✓ $server: $TOOL_COUNT tools available" + fi + done + + echo "" + echo "=== MCPG health after probes ===" + curl -sf "http://localhost:{{ mcpg_port }}/health" | jq . || true + + if [ "$PROBE_FAILED" = "true" ]; then + echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" + fi + displayName: "Verify MCP backends" + # Network isolation via AWF (Agentic Workflow Firewall) - bash: | set -o pipefail diff --git a/src/data/base.yml b/src/data/base.yml index cdacbc1..d11b36e 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -249,6 +249,7 @@ jobs: -e MCP_GATEWAY_PORT="$(MCP_GATEWAY_PORT)" \ -e MCP_GATEWAY_DOMAIN="$(MCP_GATEWAY_DOMAIN)" \ -e MCP_GATEWAY_API_KEY="$(MCP_GATEWAY_API_KEY)" \ + -e DEBUG="*" \ {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ @@ -313,6 +314,42 @@ jobs: cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" + # Probe all MCPG backends to force eager launch and surface failures. + # MCPG lazily starts stdio backends on first tool call — without this + # step, a broken backend (e.g., npx timeout) only surfaces as a silent + # missing-tool error during the agent run. + - bash: | + echo "=== Probing MCP backends ===" + PROBE_FAILED=false + for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do + echo "" + echo "--- Probing: $server ---" + RESPONSE=$(curl -sf --max-time 120 -X POST \ + -H "Authorization: Bearer $(MCP_GATEWAY_API_KEY)" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + CURL_EXIT=$? + if [ $CURL_EXIT -ne 0 ]; then + echo "##vso[task.logissue type=warning]MCP backend '$server' failed to respond (exit code $CURL_EXIT)" + echo "Response: $RESPONSE" + PROBE_FAILED=true + else + TOOL_COUNT=$(echo "$RESPONSE" | jq -r '.result.tools | length' 2>/dev/null || echo "?") + echo "✓ $server: $TOOL_COUNT tools available" + fi + done + + echo "" + echo "=== MCPG health after probes ===" + curl -sf "http://localhost:{{ mcpg_port }}/health" | jq . || true + + if [ "$PROBE_FAILED" = "true" ]; then + echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" + fi + displayName: "Verify MCP backends" + # Network isolation via AWF (Agentic Workflow Firewall) - bash: | set -o pipefail From 8c7ee231d9dc0377cd6575e8bcbaa90853cb1148 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 00:17:56 +0100 Subject: [PATCH 08/20] fix: capture HTTP status and body in MCP backend probe The probe was using curl -sf which swallows error responses (exit 22 on 4xx). Now captures HTTP status code and response body separately so we can see what MCPG actually returns on failure. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 16 +++++++++------- src/data/base.yml | 16 +++++++++------- 2 files changed, 18 insertions(+), 14 deletions(-) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index 818174c..dcdc29a 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -353,20 +353,22 @@ extends: for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do echo "" echo "--- Probing: $server ---" - RESPONSE=$(curl -sf --max-time 120 -X POST \ + HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ -H "Authorization: Bearer $(MCP_GATEWAY_API_KEY)" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) CURL_EXIT=$? - if [ $CURL_EXIT -ne 0 ]; then - echo "##vso[task.logissue type=warning]MCP backend '$server' failed to respond (exit code $CURL_EXIT)" - echo "Response: $RESPONSE" - PROBE_FAILED=true - else - TOOL_COUNT=$(echo "$RESPONSE" | jq -r '.result.tools | length' 2>/dev/null || echo "?") + BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") + echo "HTTP $HTTP_CODE (curl exit: $CURL_EXIT)" + echo "Response: $BODY" + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ $CURL_EXIT -eq 0 ]; then + TOOL_COUNT=$(echo "$BODY" | jq -r '.result.tools | length' 2>/dev/null || echo "?") echo "✓ $server: $TOOL_COUNT tools available" + else + echo "##vso[task.logissue type=warning]MCP backend '$server' returned HTTP $HTTP_CODE" + PROBE_FAILED=true fi done diff --git a/src/data/base.yml b/src/data/base.yml index d11b36e..5a1e1a9 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -324,20 +324,22 @@ jobs: for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do echo "" echo "--- Probing: $server ---" - RESPONSE=$(curl -sf --max-time 120 -X POST \ + HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ -H "Authorization: Bearer $(MCP_GATEWAY_API_KEY)" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) CURL_EXIT=$? - if [ $CURL_EXIT -ne 0 ]; then - echo "##vso[task.logissue type=warning]MCP backend '$server' failed to respond (exit code $CURL_EXIT)" - echo "Response: $RESPONSE" - PROBE_FAILED=true - else - TOOL_COUNT=$(echo "$RESPONSE" | jq -r '.result.tools | length' 2>/dev/null || echo "?") + BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") + echo "HTTP $HTTP_CODE (curl exit: $CURL_EXIT)" + echo "Response: $BODY" + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ $CURL_EXIT -eq 0 ]; then + TOOL_COUNT=$(echo "$BODY" | jq -r '.result.tools | length' 2>/dev/null || echo "?") echo "✓ $server: $TOOL_COUNT tools available" + else + echo "##vso[task.logissue type=warning]MCP backend '$server' returned HTTP $HTTP_CODE" + PROBE_FAILED=true fi done From 81542f7c612e7a0ec9775c0faf54c378c58ff6b1 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 00:22:34 +0100 Subject: [PATCH 09/20] fix: pass MCPG API key via env mapping in probe step Secret variables set via ##vso[task.setvariable;issecret=true] must be explicitly mapped via env: to be available in bash steps. The probe was using \ macro syntax in the script body, but bash interprets \ as command substitution before ADO can expand it. Pass the key via env: mapping as MCPG_API_KEY and reference it as \ in the script. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 4 +++- src/data/base.yml | 4 +++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index dcdc29a..f79d6e4 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -354,7 +354,7 @@ extends: echo "" echo "--- Probing: $server ---" HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: Bearer $(MCP_GATEWAY_API_KEY)" \ + -H "Authorization: Bearer $MCPG_API_KEY" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ @@ -380,6 +380,8 @@ extends: echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" fi displayName: "Verify MCP backends" + env: + MCPG_API_KEY: $(MCP_GATEWAY_API_KEY) # Network isolation via AWF (Agentic Workflow Firewall) - bash: | diff --git a/src/data/base.yml b/src/data/base.yml index 5a1e1a9..b6d87a6 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -325,7 +325,7 @@ jobs: echo "" echo "--- Probing: $server ---" HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: Bearer $(MCP_GATEWAY_API_KEY)" \ + -H "Authorization: Bearer $MCPG_API_KEY" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ @@ -351,6 +351,8 @@ jobs: echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" fi displayName: "Verify MCP backends" + env: + MCPG_API_KEY: $(MCP_GATEWAY_API_KEY) # Network isolation via AWF (Agentic Workflow Firewall) - bash: | From 0bc1c79e49a3507c7f8f076fe6d76d7a9ee56d58 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 00:26:40 +0100 Subject: [PATCH 10/20] fix: use raw API key for MCPG auth (not Bearer scheme) MCPG spec 7.1 requires the API key directly in the Authorization header, not as a Bearer token. The probe was sending 'Authorization: Bearer ' but MCPG expects 'Authorization: '. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 2 +- src/data/base.yml | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index f79d6e4..be77b52 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -354,7 +354,7 @@ extends: echo "" echo "--- Probing: $server ---" HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: Bearer $MCPG_API_KEY" \ + -H "Authorization: $MCPG_API_KEY" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ diff --git a/src/data/base.yml b/src/data/base.yml index b6d87a6..d3fb6e7 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -325,7 +325,7 @@ jobs: echo "" echo "--- Probing: $server ---" HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: Bearer $MCPG_API_KEY" \ + -H "Authorization: $MCPG_API_KEY" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ From da394de2e7625c22da56ac709c12f994d02c25c9 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 00:31:03 +0100 Subject: [PATCH 11/20] fix: perform MCP initialize handshake before tools/list probe MCP requires an initialize handshake before any other method. The probe now sends initialize first, captures the Mcp-Session-Id from the response header, then sends tools/list with that session ID. Also parses SSE data lines to extract the tool count. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/data/1es-base.yml | 35 ++++++++++++++++++++++++++++------- src/data/base.yml | 35 ++++++++++++++++++++++++++++------- 2 files changed, 56 insertions(+), 14 deletions(-) diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index be77b52..9262229 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -353,21 +353,42 @@ extends: for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do echo "" echo "--- Probing: $server ---" + # MCP requires initialize handshake before tools/list. + # Send initialize first, then tools/list in a second request + # using the session ID from the initialize response. + INIT_RESPONSE=$(curl -s -D /tmp/probe-headers.txt -o /tmp/probe-init.json -w "%{http_code}" --max-time 120 -X POST \ + -H "Authorization: $MCPG_API_KEY" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ado-aw-probe","version":"1.0"}}}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + SESSION_ID=$(grep -i "mcp-session-id" /tmp/probe-headers.txt 2>/dev/null | tr -d '\r' | awk '{print $2}') + echo "Initialize: HTTP $INIT_RESPONSE, session=$SESSION_ID" + + if [ -z "$SESSION_ID" ]; then + echo "##vso[task.logissue type=warning]MCP backend '$server' did not return a session ID" + cat /tmp/probe-init.json 2>/dev/null || true + PROBE_FAILED=true + continue + fi + + # Now send tools/list with the session HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ -H "Authorization: $MCPG_API_KEY" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ - -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ + -H "Mcp-Session-Id: $SESSION_ID" \ + -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' \ "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - CURL_EXIT=$? BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") - echo "HTTP $HTTP_CODE (curl exit: $CURL_EXIT)" - echo "Response: $BODY" - if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ $CURL_EXIT -eq 0 ]; then - TOOL_COUNT=$(echo "$BODY" | jq -r '.result.tools | length' 2>/dev/null || echo "?") + # Extract tool count from SSE data line + TOOL_COUNT=$(echo "$BODY" | grep '^data:' | sed 's/^data: //' | jq -r '.result.tools | length' 2>/dev/null || echo "?") + echo "tools/list: HTTP $HTTP_CODE" + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ "$TOOL_COUNT" != "?" ]; then echo "✓ $server: $TOOL_COUNT tools available" else - echo "##vso[task.logissue type=warning]MCP backend '$server' returned HTTP $HTTP_CODE" + echo "##vso[task.logissue type=warning]MCP backend '$server' tools/list returned HTTP $HTTP_CODE" + echo "Response: $BODY" PROBE_FAILED=true fi done diff --git a/src/data/base.yml b/src/data/base.yml index d3fb6e7..180e8aa 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -324,21 +324,42 @@ jobs: for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do echo "" echo "--- Probing: $server ---" + # MCP requires initialize handshake before tools/list. + # Send initialize first, then tools/list in a second request + # using the session ID from the initialize response. + INIT_RESPONSE=$(curl -s -D /tmp/probe-headers.txt -o /tmp/probe-init.json -w "%{http_code}" --max-time 120 -X POST \ + -H "Authorization: $MCPG_API_KEY" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ado-aw-probe","version":"1.0"}}}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + SESSION_ID=$(grep -i "mcp-session-id" /tmp/probe-headers.txt 2>/dev/null | tr -d '\r' | awk '{print $2}') + echo "Initialize: HTTP $INIT_RESPONSE, session=$SESSION_ID" + + if [ -z "$SESSION_ID" ]; then + echo "##vso[task.logissue type=warning]MCP backend '$server' did not return a session ID" + cat /tmp/probe-init.json 2>/dev/null || true + PROBE_FAILED=true + continue + fi + + # Now send tools/list with the session HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ -H "Authorization: $MCPG_API_KEY" \ -H "Content-Type: application/json" \ -H "Accept: application/json, text/event-stream" \ - -d '{"jsonrpc":"2.0","id":1,"method":"tools/list"}' \ + -H "Mcp-Session-Id: $SESSION_ID" \ + -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' \ "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - CURL_EXIT=$? BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") - echo "HTTP $HTTP_CODE (curl exit: $CURL_EXIT)" - echo "Response: $BODY" - if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ $CURL_EXIT -eq 0 ]; then - TOOL_COUNT=$(echo "$BODY" | jq -r '.result.tools | length' 2>/dev/null || echo "?") + # Extract tool count from SSE data line + TOOL_COUNT=$(echo "$BODY" | grep '^data:' | sed 's/^data: //' | jq -r '.result.tools | length' 2>/dev/null || echo "?") + echo "tools/list: HTTP $HTTP_CODE" + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ "$TOOL_COUNT" != "?" ]; then echo "✓ $server: $TOOL_COUNT tools available" else - echo "##vso[task.logissue type=warning]MCP backend '$server' returned HTTP $HTTP_CODE" + echo "##vso[task.logissue type=warning]MCP backend '$server' tools/list returned HTTP $HTTP_CODE" + echo "Response: $BODY" PROBE_FAILED=true fi done From 75a87e98cc0b3ea5aa649163eba2d2da407e38d4 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 08:21:08 +0100 Subject: [PATCH 12/20] fix: add --network host to ADO MCP container for AWF compatibility MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit AWF's DOCKER-USER iptables rules block outbound traffic from containers on Docker's default bridge network. The ADO MCP container (spawned by MCPG as a sibling container) was unable to reach dev.azure.com, causing MCP error -32001 (request timed out) on every tool call. Adding --network host via the args field bypasses the FORWARD chain rules, matching gh-aw's approach for its built-in agentic-workflows MCP server (see mcp_config_builtin.go). SafeOutputs was unaffected because it runs as an HTTP backend on localhost — no FORWARD chain involved. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/extensions.rs | 11 ++++++++++- 1 file changed, 10 insertions(+), 1 deletion(-) diff --git a/src/compile/extensions.rs b/src/compile/extensions.rs index 9bc2251..378b3e2 100644 --- a/src/compile/extensions.rs +++ b/src/compile/extensions.rs @@ -519,6 +519,12 @@ impl CompilerExtension for AzureDevOpsExtension { String::new(), // Passthrough from pipeline )])); + // --network host: AWF's DOCKER-USER iptables rules block outbound from + // containers on Docker's default bridge. Host networking bypasses FORWARD + // chain rules so the ADO MCP can reach dev.azure.com. + // This matches gh-aw's approach for its built-in agentic-workflows MCP. + let args = Some(vec!["--network".to_string(), "host".to_string()]); + Ok(vec![( ADO_MCP_SERVER_NAME.to_string(), McpgServerConfig { @@ -527,7 +533,7 @@ impl CompilerExtension for AzureDevOpsExtension { entrypoint: Some(ADO_MCP_ENTRYPOINT.to_string()), entrypoint_args: Some(entrypoint_args), mounts: None, - args: None, + args, url: None, headers: None, env, @@ -1044,6 +1050,9 @@ mod tests { .as_ref() .unwrap() .contains(&"myorg".to_string())); + // Must use --network host so AWF iptables don't block outbound + let args = servers[0].1.args.as_ref().expect("args should be set"); + assert_eq!(args, &vec!["--network".to_string(), "host".to_string()]); } #[test] From dd434043233e424007775363bba45376d5e47a1d Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 08:36:39 +0100 Subject: [PATCH 13/20] feat: guard MCPG debug diagnostics behind --debug-pipeline flag MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Move debug/diagnostic pipeline additions behind a compile-time flag (--debug-pipeline, debug builds only) matching the --skip-integrity pattern: - MCPG DEBUG=* env var and stderr tee → {{ mcpg_debug_flags }} marker - MCP backend probe step → {{ verify_mcp_backends }} marker When --debug-pipeline is not passed (the default), both markers resolve to empty strings and the generated pipeline contains no debug diagnostics. MCPG log artifact collection is kept always-on as it is low-cost and useful in production. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- AGENTS.md | 13 ++++ src/compile/common.rs | 139 +++++++++++++++++++++++++++++++++++++- src/compile/mod.rs | 9 ++- src/compile/onees.rs | 2 + src/compile/standalone.rs | 2 + src/data/1es-base.yml | 68 +------------------ src/data/base.yml | 68 +------------------ src/main.rs | 17 ++++- 8 files changed, 181 insertions(+), 137 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index 0fd3023..d3d112a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -739,6 +739,18 @@ Generates the "Verify pipeline integrity" pipeline step that downloads the relea When the compiler is built with `--skip-integrity` (debug builds only), this placeholder is replaced with an empty string and the integrity step is omitted from the generated pipeline. +## {{ mcpg_debug_flags }} + +Generates MCPG debug environment flags for the Docker run command. When `--debug-pipeline` is passed (debug builds only), this inserts `-e DEBUG="*"` to enable verbose MCPG logging and `2> >(tee ...)` to stream MCPG stderr to both the pipeline console and a log file. + +When `--debug-pipeline` is not passed, this placeholder is replaced with an empty string. + +## {{ verify_mcp_backends }} + +Generates a pipeline step that probes each configured MCPG backend with an MCP initialize + tools/list handshake. This forces MCPG's lazy initialization and catches failures (e.g., container timeout, network blocked) before the agent runs, surfacing them as ADO pipeline warnings. + +When `--debug-pipeline` is not passed (the default), this placeholder is replaced with an empty string. + ## {{ pr_trigger }} Generates PR trigger configuration. When a schedule or pipeline trigger is configured, this generates `pr: none` to disable PR triggers. Otherwise, it generates an empty string, allowing the default PR trigger behavior. @@ -963,6 +975,7 @@ Global flags (apply to all subcommands): `--verbose, -v` (enable info-level logg - `compile []` - Compile a markdown file to Azure DevOps pipeline YAML. If no path is given, auto-discovers and recompiles all detected agentic pipelines in the current directory. - `--output, -o ` - Optional output path for generated YAML (only valid when a path is provided) - `--skip-integrity` - *(debug builds only)* Omit the "Verify pipeline integrity" step from the generated pipeline. Useful during local development when the compiled output won't match a released compiler version. This flag is not available in release builds. + - `--debug-pipeline` - *(debug builds only)* Include MCPG debug diagnostics in the generated pipeline: `DEBUG=*` environment variable for verbose MCPG logging, stderr streaming to log files, and a "Verify MCP backends" step that probes each backend with MCP initialize + tools/list before the agent runs. This flag is not available in release builds. - `check ` - Verify that a compiled pipeline matches its source markdown - `` - Path to the pipeline YAML file to verify - The source markdown path is auto-detected from the `@ado-aw` header in the pipeline file diff --git a/src/compile/common.rs b/src/compile/common.rs index 8a2a4c7..6609b62 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -850,6 +850,95 @@ pub fn generate_integrity_check(skip: bool) -> String { .to_string() } +/// Generate debug pipeline replacement values for template markers. +/// +/// When `debug` is `true`, returns content for MCPG debug diagnostics: +/// - `{{ mcpg_debug_flags }}`: `-e DEBUG="*"` env, stderr tee redirect, and +/// stderr dump on health-check failure +/// - `{{ verify_mcp_backends }}`: full pipeline step that probes each MCPG +/// backend with MCP initialize + tools/list +/// +/// When `debug` is `false`, both markers resolve to empty strings. +pub fn generate_debug_pipeline_replacements(debug: bool) -> Vec<(String, String)> { + if !debug { + return vec![ + ("{{ mcpg_debug_flags }}".into(), String::new()), + ("{{ verify_mcp_backends }}".into(), String::new()), + ]; + } + + let mcpg_debug_flags = r##"-e DEBUG="*" \ + 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2)"## + .to_string(); + + let verify_mcp_backends = r###"# Probe all MCPG backends to force eager launch and surface failures. + # MCPG lazily starts stdio backends on first tool call — without this + # step, a broken backend (e.g., npx timeout) only surfaces as a silent + # missing-tool error during the agent run. + - bash: | + echo "=== Probing MCP backends ===" + PROBE_FAILED=false + for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do + echo "" + echo "--- Probing: $server ---" + # MCP requires initialize handshake before tools/list. + # Send initialize first, then tools/list in a second request + # using the session ID from the initialize response. + INIT_RESPONSE=$(curl -s -D /tmp/probe-headers.txt -o /tmp/probe-init.json -w "%{http_code}" --max-time 120 -X POST \ + -H "Authorization: $MCPG_API_KEY" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ado-aw-probe","version":"1.0"}}}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + SESSION_ID=$(grep -i "mcp-session-id" /tmp/probe-headers.txt 2>/dev/null | tr -d '\r' | awk '{print $2}') + echo "Initialize: HTTP $INIT_RESPONSE, session=$SESSION_ID" + + if [ -z "$SESSION_ID" ]; then + echo "##vso[task.logissue type=warning]MCP backend '$server' did not return a session ID" + cat /tmp/probe-init.json 2>/dev/null || true + PROBE_FAILED=true + continue + fi + + # Now send tools/list with the session + HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ + -H "Authorization: $MCPG_API_KEY" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -H "Mcp-Session-Id: $SESSION_ID" \ + -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") + # Extract tool count from SSE data line + TOOL_COUNT=$(echo "$BODY" | grep '^data:' | sed 's/^data: //' | jq -r '.result.tools | length' 2>/dev/null || echo "?") + echo "tools/list: HTTP $HTTP_CODE" + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ "$TOOL_COUNT" != "?" ]; then + echo "✓ $server: $TOOL_COUNT tools available" + else + echo "##vso[task.logissue type=warning]MCP backend '$server' tools/list returned HTTP $HTTP_CODE" + echo "Response: $BODY" + PROBE_FAILED=true + fi + done + + echo "" + echo "=== MCPG health after probes ===" + curl -sf "http://localhost:{{ mcpg_port }}/health" | jq . || true + + if [ "$PROBE_FAILED" = "true" ]; then + echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" + fi + displayName: "Verify MCP backends" + env: + MCPG_API_KEY: $(MCP_GATEWAY_API_KEY)"### + .to_string(); + + vec![ + ("{{ mcpg_debug_flags }}".into(), mcpg_debug_flags), + ("{{ verify_mcp_backends }}".into(), verify_mcp_backends), + ] +} + /// Generate the pipeline YAML path for integrity checking at ADO runtime. /// /// Returns a path using `{{ workspace }}` as the base, derived from the @@ -1967,6 +2056,10 @@ pub struct CompileConfig { /// generated pipeline. This is a developer-only option gated behind /// `cfg(debug_assertions)` at the CLI level. pub skip_integrity: bool, + /// When true, MCPG debug diagnostics (debug logging, stderr streaming, + /// backend probe step) are included in the generated pipeline. + /// Gated behind `cfg(debug_assertions)` at the CLI level. + pub debug_pipeline: bool, } /// Shared compilation flow used by both standalone and 1ES compilers. @@ -2105,7 +2198,14 @@ pub async fn compile_shared( template = replace_with_indent(&template, placeholder, replacement); } - // 13. Shared replacements + // 13. Debug pipeline replacements (before shared replacements since the + // probe step content itself contains {{ mcpg_port }}). + let debug_replacements = generate_debug_pipeline_replacements(config.debug_pipeline); + for (placeholder, replacement) in &debug_replacements { + template = replace_with_indent(&template, placeholder, replacement); + } + + // 14. Shared replacements let compiler_version = env!("CARGO_PKG_VERSION"); let integrity_check = generate_integrity_check(config.skip_integrity); let replacements: Vec<(&str, &str)> = vec![ @@ -2150,7 +2250,7 @@ pub async fn compile_shared( replace_with_indent(&yaml, placeholder, replacement) }); - // 14. Prepend header + // 15. Prepend header let header = generate_header_comment(input_path); Ok(format!("{}{}", header, pipeline_yaml)) } @@ -2875,6 +2975,41 @@ mod tests { ); } + // ─── generate_debug_pipeline_replacements ──────────────────────────────── + + #[test] + fn test_debug_pipeline_replacements_disabled() { + let replacements = generate_debug_pipeline_replacements(false); + assert_eq!(replacements.len(), 2); + for (marker, value) in &replacements { + assert!( + value.is_empty(), + "Marker '{}' should be empty when debug is false", + marker + ); + } + } + + #[test] + fn test_debug_pipeline_replacements_enabled() { + let replacements = generate_debug_pipeline_replacements(true); + assert_eq!(replacements.len(), 2); + + let flags = replacements.iter().find(|(m, _)| m == "{{ mcpg_debug_flags }}"); + assert!(flags.is_some(), "Should have mcpg_debug_flags marker"); + let flags_value = &flags.unwrap().1; + assert!(flags_value.contains("DEBUG"), "Should contain DEBUG env var"); + assert!(flags_value.contains("tee"), "Should contain stderr tee"); + + let probe = replacements.iter().find(|(m, _)| m == "{{ verify_mcp_backends }}"); + assert!(probe.is_some(), "Should have verify_mcp_backends marker"); + let probe_value = &probe.unwrap().1; + assert!(probe_value.contains("Verify MCP backends"), "Should contain displayName"); + assert!(probe_value.contains("tools/list"), "Should contain tools/list probe"); + assert!(probe_value.contains("initialize"), "Should contain initialize handshake"); + assert!(probe_value.contains("MCPG_API_KEY"), "Should contain API key env mapping"); + } + // ─── validate_submit_pr_review_events ──────────────────────────────────── #[test] diff --git a/src/compile/mod.rs b/src/compile/mod.rs index ae07370..0edf727 100644 --- a/src/compile/mod.rs +++ b/src/compile/mod.rs @@ -35,6 +35,7 @@ pub trait Compiler: Send + Sync { front_matter: &FrontMatter, markdown_body: &str, skip_integrity: bool, + debug_pipeline: bool, ) -> Result; /// Get the target name for logging. @@ -48,6 +49,7 @@ pub async fn compile_pipeline( input_path: &str, output_path: Option<&str>, skip_integrity: bool, + debug_pipeline: bool, ) -> Result<()> { let input_path = Path::new(input_path); info!("Compiling pipeline from: {}", input_path.display()); @@ -94,7 +96,7 @@ pub async fn compile_pipeline( // Compile let pipeline_yaml = compiler - .compile(input_path, &yaml_output_path, &front_matter, &markdown_body, skip_integrity) + .compile(input_path, &yaml_output_path, &front_matter, &markdown_body, skip_integrity, debug_pipeline) .await?; // Clean up spacing artifacts from empty placeholder replacements @@ -124,7 +126,7 @@ pub async fn compile_pipeline( /// Scans for compiled YAML files containing the `# @ado-aw source=...` header, /// resolves each source markdown path, and recompiles. Pipelines whose source /// files are missing are reported but don't abort the batch. -pub async fn compile_all_pipelines(skip_integrity: bool) -> Result<()> { +pub async fn compile_all_pipelines(skip_integrity: bool, debug_pipeline: bool) -> Result<()> { let root = Path::new("."); info!("Auto-discovering agentic pipelines for recompilation"); @@ -168,7 +170,7 @@ pub async fn compile_all_pipelines(skip_integrity: bool) -> Result<()> { let source_str = source_path.to_string_lossy(); let output_str = yaml_output_path.to_string_lossy(); - match compile_pipeline(&source_str, Some(&output_str), skip_integrity).await { + match compile_pipeline(&source_str, Some(&output_str), skip_integrity, debug_pipeline).await { Ok(()) => success_count += 1, Err(e) => { eprintln!( @@ -277,6 +279,7 @@ pub async fn check_pipeline(pipeline_path: &str) -> Result<()> { &front_matter, &markdown_body, false, + false, ) .await?; let pipeline_yaml = clean_generated_yaml(&pipeline_yaml); diff --git a/src/compile/onees.rs b/src/compile/onees.rs index 891a41b..8a3f600 100644 --- a/src/compile/onees.rs +++ b/src/compile/onees.rs @@ -37,6 +37,7 @@ impl Compiler for OneESCompiler { front_matter: &FrontMatter, markdown_body: &str, skip_integrity: bool, + debug_pipeline: bool, ) -> Result { info!("Compiling for 1ES target"); @@ -80,6 +81,7 @@ impl Compiler for OneESCompiler { ("{{ teardown_job }}".into(), teardown_job), ], skip_integrity, + debug_pipeline, }; compile_shared(input_path, output_path, front_matter, markdown_body, &extensions, &ctx, config).await diff --git a/src/compile/standalone.rs b/src/compile/standalone.rs index 69dd472..4af8e5e 100644 --- a/src/compile/standalone.rs +++ b/src/compile/standalone.rs @@ -38,6 +38,7 @@ impl Compiler for StandaloneCompiler { front_matter: &FrontMatter, markdown_body: &str, skip_integrity: bool, + debug_pipeline: bool, ) -> Result { info!("Compiling for standalone target"); @@ -73,6 +74,7 @@ impl Compiler for StandaloneCompiler { ("{{ mcpg_docker_env }}".into(), mcpg_docker_env), ], skip_integrity, + debug_pipeline, }; compile_shared(input_path, output_path, front_matter, markdown_body, &extensions, &ctx, config).await diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index 9262229..f07e0d0 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -269,7 +269,6 @@ extends: # Upstream fix: https://github.com/github/gh-aw-mcpg/issues/TBD # # stdout → gateway-output.json (machine-readable config, read after health check) - # stderr → tee to both pipeline console and mcp-logs/stderr.log echo "$MCPG_CONFIG" | docker run -i --rm \ --name mcpg \ --network host \ @@ -278,11 +277,11 @@ extends: -e MCP_GATEWAY_PORT="$(MCP_GATEWAY_PORT)" \ -e MCP_GATEWAY_DOMAIN="$(MCP_GATEWAY_DOMAIN)" \ -e MCP_GATEWAY_API_KEY="$(MCP_GATEWAY_API_KEY)" \ - -e DEBUG="*" \ + {{ mcpg_debug_flags }} {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ - > "$GATEWAY_OUTPUT" 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2) & + > "$GATEWAY_OUTPUT" & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" @@ -298,8 +297,6 @@ extends: done if [ "$READY" != "true" ]; then echo "##vso[task.complete result=Failed]MCPG did not become ready within 30s" - echo "MCPG stderr:" - cat /tmp/gh-aw/mcp-logs/stderr.log 2>/dev/null || true exit 1 fi @@ -343,66 +340,7 @@ extends: cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" - # Probe all MCPG backends to force eager launch and surface failures. - # MCPG lazily starts stdio backends on first tool call — without this - # step, a broken backend (e.g., npx timeout) only surfaces as a silent - # missing-tool error during the agent run. - - bash: | - echo "=== Probing MCP backends ===" - PROBE_FAILED=false - for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do - echo "" - echo "--- Probing: $server ---" - # MCP requires initialize handshake before tools/list. - # Send initialize first, then tools/list in a second request - # using the session ID from the initialize response. - INIT_RESPONSE=$(curl -s -D /tmp/probe-headers.txt -o /tmp/probe-init.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: $MCPG_API_KEY" \ - -H "Content-Type: application/json" \ - -H "Accept: application/json, text/event-stream" \ - -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ado-aw-probe","version":"1.0"}}}' \ - "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - SESSION_ID=$(grep -i "mcp-session-id" /tmp/probe-headers.txt 2>/dev/null | tr -d '\r' | awk '{print $2}') - echo "Initialize: HTTP $INIT_RESPONSE, session=$SESSION_ID" - - if [ -z "$SESSION_ID" ]; then - echo "##vso[task.logissue type=warning]MCP backend '$server' did not return a session ID" - cat /tmp/probe-init.json 2>/dev/null || true - PROBE_FAILED=true - continue - fi - - # Now send tools/list with the session - HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: $MCPG_API_KEY" \ - -H "Content-Type: application/json" \ - -H "Accept: application/json, text/event-stream" \ - -H "Mcp-Session-Id: $SESSION_ID" \ - -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' \ - "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") - # Extract tool count from SSE data line - TOOL_COUNT=$(echo "$BODY" | grep '^data:' | sed 's/^data: //' | jq -r '.result.tools | length' 2>/dev/null || echo "?") - echo "tools/list: HTTP $HTTP_CODE" - if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ "$TOOL_COUNT" != "?" ]; then - echo "✓ $server: $TOOL_COUNT tools available" - else - echo "##vso[task.logissue type=warning]MCP backend '$server' tools/list returned HTTP $HTTP_CODE" - echo "Response: $BODY" - PROBE_FAILED=true - fi - done - - echo "" - echo "=== MCPG health after probes ===" - curl -sf "http://localhost:{{ mcpg_port }}/health" | jq . || true - - if [ "$PROBE_FAILED" = "true" ]; then - echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" - fi - displayName: "Verify MCP backends" - env: - MCPG_API_KEY: $(MCP_GATEWAY_API_KEY) + {{ verify_mcp_backends }} # Network isolation via AWF (Agentic Workflow Firewall) - bash: | diff --git a/src/data/base.yml b/src/data/base.yml index 180e8aa..c51c431 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -240,7 +240,6 @@ jobs: # Upstream fix: https://github.com/github/gh-aw-mcpg/issues/TBD # # stdout → gateway-output.json (machine-readable config, read after health check) - # stderr → tee to both pipeline console and mcp-logs/stderr.log echo "$MCPG_CONFIG" | docker run -i --rm \ --name mcpg \ --network host \ @@ -249,11 +248,11 @@ jobs: -e MCP_GATEWAY_PORT="$(MCP_GATEWAY_PORT)" \ -e MCP_GATEWAY_DOMAIN="$(MCP_GATEWAY_DOMAIN)" \ -e MCP_GATEWAY_API_KEY="$(MCP_GATEWAY_API_KEY)" \ - -e DEBUG="*" \ + {{ mcpg_debug_flags }} {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ - > "$GATEWAY_OUTPUT" 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2) & + > "$GATEWAY_OUTPUT" & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" @@ -269,8 +268,6 @@ jobs: done if [ "$READY" != "true" ]; then echo "##vso[task.complete result=Failed]MCPG did not become ready within 30s" - echo "MCPG stderr:" - cat /tmp/gh-aw/mcp-logs/stderr.log 2>/dev/null || true exit 1 fi @@ -314,66 +311,7 @@ jobs: cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" - # Probe all MCPG backends to force eager launch and surface failures. - # MCPG lazily starts stdio backends on first tool call — without this - # step, a broken backend (e.g., npx timeout) only surfaces as a silent - # missing-tool error during the agent run. - - bash: | - echo "=== Probing MCP backends ===" - PROBE_FAILED=false - for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do - echo "" - echo "--- Probing: $server ---" - # MCP requires initialize handshake before tools/list. - # Send initialize first, then tools/list in a second request - # using the session ID from the initialize response. - INIT_RESPONSE=$(curl -s -D /tmp/probe-headers.txt -o /tmp/probe-init.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: $MCPG_API_KEY" \ - -H "Content-Type: application/json" \ - -H "Accept: application/json, text/event-stream" \ - -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ado-aw-probe","version":"1.0"}}}' \ - "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - SESSION_ID=$(grep -i "mcp-session-id" /tmp/probe-headers.txt 2>/dev/null | tr -d '\r' | awk '{print $2}') - echo "Initialize: HTTP $INIT_RESPONSE, session=$SESSION_ID" - - if [ -z "$SESSION_ID" ]; then - echo "##vso[task.logissue type=warning]MCP backend '$server' did not return a session ID" - cat /tmp/probe-init.json 2>/dev/null || true - PROBE_FAILED=true - continue - fi - - # Now send tools/list with the session - HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: $MCPG_API_KEY" \ - -H "Content-Type: application/json" \ - -H "Accept: application/json, text/event-stream" \ - -H "Mcp-Session-Id: $SESSION_ID" \ - -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' \ - "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") - # Extract tool count from SSE data line - TOOL_COUNT=$(echo "$BODY" | grep '^data:' | sed 's/^data: //' | jq -r '.result.tools | length' 2>/dev/null || echo "?") - echo "tools/list: HTTP $HTTP_CODE" - if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ "$TOOL_COUNT" != "?" ]; then - echo "✓ $server: $TOOL_COUNT tools available" - else - echo "##vso[task.logissue type=warning]MCP backend '$server' tools/list returned HTTP $HTTP_CODE" - echo "Response: $BODY" - PROBE_FAILED=true - fi - done - - echo "" - echo "=== MCPG health after probes ===" - curl -sf "http://localhost:{{ mcpg_port }}/health" | jq . || true - - if [ "$PROBE_FAILED" = "true" ]; then - echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" - fi - displayName: "Verify MCP backends" - env: - MCPG_API_KEY: $(MCP_GATEWAY_API_KEY) + {{ verify_mcp_backends }} # Network isolation via AWF (Agentic Workflow Firewall) - bash: | diff --git a/src/main.rs b/src/main.rs index 6d94606..83fab8f 100644 --- a/src/main.rs +++ b/src/main.rs @@ -36,6 +36,12 @@ enum Commands { #[cfg(debug_assertions)] #[arg(long)] skip_integrity: bool, + /// Include MCPG debug diagnostics in the generated pipeline (debug + /// logging, stderr streaming, backend probe step). + /// Only available in debug builds. + #[cfg(debug_assertions)] + #[arg(long)] + debug_pipeline: bool, }, /// Check that a compiled pipeline matches its source markdown Check { @@ -160,16 +166,23 @@ async fn main() -> Result<()> { output, #[cfg(debug_assertions)] skip_integrity, + #[cfg(debug_assertions)] + debug_pipeline, } => { #[cfg(not(debug_assertions))] let skip_integrity = false; + #[cfg(not(debug_assertions))] + let debug_pipeline = false; if skip_integrity { eprintln!("Warning: pipeline integrity check step omitted (--skip-integrity)"); } + if debug_pipeline { + eprintln!("Warning: debug diagnostics enabled in generated pipeline (--debug-pipeline)"); + } match path { - Some(p) => compile::compile_pipeline(&p, output.as_deref(), skip_integrity).await?, + Some(p) => compile::compile_pipeline(&p, output.as_deref(), skip_integrity, debug_pipeline).await?, None => { if output.is_some() { anyhow::bail!( @@ -177,7 +190,7 @@ async fn main() -> Result<()> { Specify a path to compile a single file with a custom output." ); } - compile::compile_all_pipelines(skip_integrity).await? + compile::compile_all_pipelines(skip_integrity, debug_pipeline).await? } } } From 971a3d71308d1502726fece061b3d4da195c1a00 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 08:41:55 +0100 Subject: [PATCH 14/20] fix: add DEBUG=* env to ADO MCP container for diagnostics The ADO MCP container still times out on API calls despite --network host. Adding DEBUG=* enables verbose logging from the @azure-devops/mcp npm package to surface the root cause (auth issues, DNS, proxy, etc.). Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/extensions.rs | 14 ++++++++++---- 1 file changed, 10 insertions(+), 4 deletions(-) diff --git a/src/compile/extensions.rs b/src/compile/extensions.rs index 378b3e2..6572d56 100644 --- a/src/compile/extensions.rs +++ b/src/compile/extensions.rs @@ -514,10 +514,16 @@ impl CompilerExtension for AzureDevOpsExtension { }; // ADO MCP needs the PAT token passed via environment - let env = Some(HashMap::from([( - "AZURE_DEVOPS_EXT_PAT".to_string(), - String::new(), // Passthrough from pipeline - )])); + let env = Some(HashMap::from([ + ( + "AZURE_DEVOPS_EXT_PAT".to_string(), + String::new(), // Passthrough from pipeline + ), + ( + "DEBUG".to_string(), + "*".to_string(), // Verbose logging for ADO MCP + ), + ])); // --network host: AWF's DOCKER-USER iptables rules block outbound from // containers on Docker's default bridge. Host networking bypasses FORWARD From bed5cbc43f0d0e5665c93ac6a6de12fc24fce656 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 08:50:51 +0100 Subject: [PATCH 15/20] test: add integration tests for --skip-integrity and --debug-pipeline Add 12 new compiler integration tests covering: - --skip-integrity: omits integrity step, produces valid YAML (both targets) - --debug-pipeline: includes DEBUG env, stderr tee, probe step, produces valid YAML (both targets) - Combined flags work together - Default (no flags) excludes debug content and includes integrity step - Probe step indentation is correct for both standalone (8sp) and 1ES (18sp) Also fix replacement content indentation: replace_with_indent adds the marker's indent to continuation lines, so replacement content must have zero base indent. Also refactor compile_fixture into compile_fixture_with_flags with atomic counter for unique temp dirs to prevent parallel test collisions. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/common.rs | 120 +++++++++++----------- tests/compiler_tests.rs | 215 ++++++++++++++++++++++++++++++++++++++-- 2 files changed, 266 insertions(+), 69 deletions(-) diff --git a/src/compile/common.rs b/src/compile/common.rs index 6609b62..5908610 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -868,69 +868,69 @@ pub fn generate_debug_pipeline_replacements(debug: bool) -> Vec<(String, String) } let mcpg_debug_flags = r##"-e DEBUG="*" \ - 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2)"## +2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2)"## .to_string(); let verify_mcp_backends = r###"# Probe all MCPG backends to force eager launch and surface failures. - # MCPG lazily starts stdio backends on first tool call — without this - # step, a broken backend (e.g., npx timeout) only surfaces as a silent - # missing-tool error during the agent run. - - bash: | - echo "=== Probing MCP backends ===" - PROBE_FAILED=false - for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do - echo "" - echo "--- Probing: $server ---" - # MCP requires initialize handshake before tools/list. - # Send initialize first, then tools/list in a second request - # using the session ID from the initialize response. - INIT_RESPONSE=$(curl -s -D /tmp/probe-headers.txt -o /tmp/probe-init.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: $MCPG_API_KEY" \ - -H "Content-Type: application/json" \ - -H "Accept: application/json, text/event-stream" \ - -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ado-aw-probe","version":"1.0"}}}' \ - "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - SESSION_ID=$(grep -i "mcp-session-id" /tmp/probe-headers.txt 2>/dev/null | tr -d '\r' | awk '{print $2}') - echo "Initialize: HTTP $INIT_RESPONSE, session=$SESSION_ID" - - if [ -z "$SESSION_ID" ]; then - echo "##vso[task.logissue type=warning]MCP backend '$server' did not return a session ID" - cat /tmp/probe-init.json 2>/dev/null || true - PROBE_FAILED=true - continue - fi - - # Now send tools/list with the session - HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ - -H "Authorization: $MCPG_API_KEY" \ - -H "Content-Type: application/json" \ - -H "Accept: application/json, text/event-stream" \ - -H "Mcp-Session-Id: $SESSION_ID" \ - -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' \ - "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) - BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") - # Extract tool count from SSE data line - TOOL_COUNT=$(echo "$BODY" | grep '^data:' | sed 's/^data: //' | jq -r '.result.tools | length' 2>/dev/null || echo "?") - echo "tools/list: HTTP $HTTP_CODE" - if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ "$TOOL_COUNT" != "?" ]; then - echo "✓ $server: $TOOL_COUNT tools available" - else - echo "##vso[task.logissue type=warning]MCP backend '$server' tools/list returned HTTP $HTTP_CODE" - echo "Response: $BODY" - PROBE_FAILED=true - fi - done - - echo "" - echo "=== MCPG health after probes ===" - curl -sf "http://localhost:{{ mcpg_port }}/health" | jq . || true - - if [ "$PROBE_FAILED" = "true" ]; then - echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" - fi - displayName: "Verify MCP backends" - env: - MCPG_API_KEY: $(MCP_GATEWAY_API_KEY)"### +# MCPG lazily starts stdio backends on first tool call — without this +# step, a broken backend (e.g., npx timeout) only surfaces as a silent +# missing-tool error during the agent run. +- bash: | + echo "=== Probing MCP backends ===" + PROBE_FAILED=false + for server in $(jq -r '.mcpServers | keys[]' /tmp/awf-tools/mcp-config.json); do + echo "" + echo "--- Probing: $server ---" + # MCP requires initialize handshake before tools/list. + # Send initialize first, then tools/list in a second request + # using the session ID from the initialize response. + INIT_RESPONSE=$(curl -s -D /tmp/probe-headers.txt -o /tmp/probe-init.json -w "%{http_code}" --max-time 120 -X POST \ + -H "Authorization: $MCPG_API_KEY" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -d '{"jsonrpc":"2.0","id":1,"method":"initialize","params":{"protocolVersion":"2025-03-26","capabilities":{},"clientInfo":{"name":"ado-aw-probe","version":"1.0"}}}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + SESSION_ID=$(grep -i "mcp-session-id" /tmp/probe-headers.txt 2>/dev/null | tr -d '\r' | awk '{print $2}') + echo "Initialize: HTTP $INIT_RESPONSE, session=$SESSION_ID" + + if [ -z "$SESSION_ID" ]; then + echo "##vso[task.logissue type=warning]MCP backend '$server' did not return a session ID" + cat /tmp/probe-init.json 2>/dev/null || true + PROBE_FAILED=true + continue + fi + + # Now send tools/list with the session + HTTP_CODE=$(curl -s -o /tmp/probe-response.json -w "%{http_code}" --max-time 120 -X POST \ + -H "Authorization: $MCPG_API_KEY" \ + -H "Content-Type: application/json" \ + -H "Accept: application/json, text/event-stream" \ + -H "Mcp-Session-Id: $SESSION_ID" \ + -d '{"jsonrpc":"2.0","id":2,"method":"tools/list"}' \ + "http://localhost:{{ mcpg_port }}/mcp/$server" 2>&1) + BODY=$(cat /tmp/probe-response.json 2>/dev/null || echo "(empty)") + # Extract tool count from SSE data line + TOOL_COUNT=$(echo "$BODY" | grep '^data:' | sed 's/^data: //' | jq -r '.result.tools | length' 2>/dev/null || echo "?") + echo "tools/list: HTTP $HTTP_CODE" + if [ "$HTTP_CODE" -ge 200 ] && [ "$HTTP_CODE" -lt 300 ] && [ "$TOOL_COUNT" != "?" ]; then + echo "✓ $server: $TOOL_COUNT tools available" + else + echo "##vso[task.logissue type=warning]MCP backend '$server' tools/list returned HTTP $HTTP_CODE" + echo "Response: $BODY" + PROBE_FAILED=true + fi + done + + echo "" + echo "=== MCPG health after probes ===" + curl -sf "http://localhost:{{ mcpg_port }}/health" | jq . || true + + if [ "$PROBE_FAILED" = "true" ]; then + echo "##vso[task.logissue type=warning]One or more MCP backends failed to initialize — check logs above" + fi + displayName: "Verify MCP backends" + env: + MCPG_API_KEY: $(MCP_GATEWAY_API_KEY)"### .to_string(); vec![ diff --git a/tests/compiler_tests.rs b/tests/compiler_tests.rs index bcbf27b..5c1629a 100644 --- a/tests/compiler_tests.rs +++ b/tests/compiler_tests.rs @@ -2926,10 +2926,20 @@ network: /// Helper: compile a fixture and return the compiled YAML string. fn compile_fixture(fixture_name: &str) -> String { + compile_fixture_with_flags(fixture_name, &[]) +} + +/// Compile a fixture with additional CLI flags (e.g., --skip-integrity, --debug-pipeline). +fn compile_fixture_with_flags(fixture_name: &str, extra_flags: &[&str]) -> String { + use std::sync::atomic::{AtomicU64, Ordering}; + static COUNTER: AtomicU64 = AtomicU64::new(0); + let unique_id = COUNTER.fetch_add(1, Ordering::Relaxed); + let temp_dir = std::env::temp_dir().join(format!( - "agentic-pipeline-yaml-validation-{}-{}", + "agentic-pipeline-yaml-validation-{}-{}-{}", fixture_name.replace('.', "-"), - std::process::id() + std::process::id(), + unique_id, )); fs::create_dir_all(&temp_dir).expect("Failed to create temp directory"); @@ -2941,20 +2951,26 @@ fn compile_fixture(fixture_name: &str) -> String { let output_path = temp_dir.join(fixture_name.replace(".md", ".yml")); let binary_path = PathBuf::from(env!("CARGO_BIN_EXE_ado-aw")); + let mut args = vec![ + "compile".to_string(), + fixture_path.to_str().unwrap().to_string(), + "-o".to_string(), + output_path.to_str().unwrap().to_string(), + ]; + for flag in extra_flags { + args.push(flag.to_string()); + } + let output = std::process::Command::new(&binary_path) - .args([ - "compile", - fixture_path.to_str().unwrap(), - "-o", - output_path.to_str().unwrap(), - ]) + .args(&args) .output() .expect("Failed to run compiler"); assert!( output.status.success(), - "Compilation of {} should succeed: {}", + "Compilation of {} with flags {:?} should succeed: {}", fixture_name, + extra_flags, String::from_utf8_lossy(&output.stderr) ); @@ -3097,3 +3113,184 @@ fn test_standalone_pipeline_trigger_compiled_output_is_valid_yaml() { let compiled = compile_fixture("pipeline-trigger-agent.md"); assert_valid_yaml(&compiled, "pipeline-trigger-agent.md"); } + +// ─── --skip-integrity flag tests ───────────────────────────────────────── + +/// Test that --skip-integrity omits the integrity check step from the pipeline +#[test] +fn test_skip_integrity_omits_integrity_step() { + let compiled = compile_fixture_with_flags("minimal-agent.md", &["--skip-integrity"]); + assert_valid_yaml(&compiled, "minimal-agent.md (skip-integrity)"); + + assert!( + !compiled.contains("Verify pipeline integrity"), + "Pipeline compiled with --skip-integrity should NOT contain the integrity check step" + ); +} + +/// Test that without --skip-integrity, the integrity check step IS present +#[test] +fn test_default_includes_integrity_step() { + let compiled = compile_fixture("minimal-agent.md"); + + assert!( + compiled.contains("Verify pipeline integrity"), + "Pipeline compiled without --skip-integrity should contain the integrity check step" + ); + assert!( + compiled.contains("agentic-pipeline-compiler/ado-aw"), + "Pipeline compiled without --skip-integrity should reference the ado-aw binary" + ); +} + +/// Test that --skip-integrity produces valid YAML for both standalone and 1ES +#[test] +fn test_skip_integrity_valid_yaml_standalone() { + let compiled = compile_fixture_with_flags("complete-agent.md", &["--skip-integrity"]); + assert_valid_yaml(&compiled, "complete-agent.md (skip-integrity)"); +} + +#[test] +fn test_skip_integrity_valid_yaml_1es() { + let compiled = compile_fixture_with_flags("1es-test-agent.md", &["--skip-integrity"]); + assert_valid_yaml(&compiled, "1es-test-agent.md (skip-integrity)"); +} + +// ─── --debug-pipeline flag tests ───────────────────────────────────────── + +/// Test that --debug-pipeline includes MCPG debug diagnostics +#[test] +fn test_debug_pipeline_includes_debug_env() { + let compiled = compile_fixture_with_flags("minimal-agent.md", &["--debug-pipeline"]); + assert_valid_yaml(&compiled, "minimal-agent.md (debug-pipeline)"); + + assert!( + compiled.contains(r#"DEBUG="*""#), + "Pipeline compiled with --debug-pipeline should contain DEBUG=* env var" + ); + assert!( + compiled.contains("tee /tmp/gh-aw/mcp-logs/stderr.log"), + "Pipeline compiled with --debug-pipeline should contain stderr tee" + ); +} + +/// Test that --debug-pipeline includes the MCP backend probe step +#[test] +fn test_debug_pipeline_includes_probe_step() { + let compiled = compile_fixture_with_flags("minimal-agent.md", &["--debug-pipeline"]); + + assert!( + compiled.contains("Verify MCP backends"), + "Pipeline compiled with --debug-pipeline should contain probe step displayName" + ); + assert!( + compiled.contains("tools/list"), + "Pipeline compiled with --debug-pipeline should contain tools/list probe" + ); + assert!( + compiled.contains("initialize"), + "Pipeline compiled with --debug-pipeline should contain initialize handshake" + ); + assert!( + compiled.contains("MCPG_API_KEY: $(MCP_GATEWAY_API_KEY)"), + "Pipeline compiled with --debug-pipeline should map MCPG_API_KEY env" + ); +} + +/// Test that without --debug-pipeline, debug diagnostics are NOT present +#[test] +fn test_default_excludes_debug_diagnostics() { + let compiled = compile_fixture("minimal-agent.md"); + + assert!( + !compiled.contains(r#"DEBUG="*""#), + "Pipeline compiled without --debug-pipeline should NOT contain DEBUG=* env var" + ); + assert!( + !compiled.contains("Verify MCP backends"), + "Pipeline compiled without --debug-pipeline should NOT contain probe step" + ); + assert!( + !compiled.contains("tee /tmp/gh-aw/mcp-logs/stderr.log"), + "Pipeline compiled without --debug-pipeline should NOT contain stderr tee" + ); +} + +/// Test that --debug-pipeline produces valid YAML for both targets +#[test] +fn test_debug_pipeline_valid_yaml_standalone() { + let compiled = compile_fixture_with_flags("complete-agent.md", &["--debug-pipeline"]); + assert_valid_yaml(&compiled, "complete-agent.md (debug-pipeline)"); +} + +#[test] +fn test_debug_pipeline_valid_yaml_1es() { + let compiled = compile_fixture_with_flags("1es-test-agent.md", &["--debug-pipeline"]); + assert_valid_yaml(&compiled, "1es-test-agent.md (debug-pipeline)"); +} + +/// Test that both flags can be combined +#[test] +fn test_skip_integrity_and_debug_pipeline_combined() { + let compiled = compile_fixture_with_flags( + "minimal-agent.md", + &["--skip-integrity", "--debug-pipeline"], + ); + assert_valid_yaml(&compiled, "minimal-agent.md (skip-integrity + debug-pipeline)"); + + // Debug content present + assert!( + compiled.contains(r#"DEBUG="*""#), + "Combined flags: should contain DEBUG=*" + ); + assert!( + compiled.contains("Verify MCP backends"), + "Combined flags: should contain probe step" + ); + + // Integrity content absent + assert!( + !compiled.contains("Verify pipeline integrity"), + "Combined flags: should NOT contain integrity check" + ); +} + +/// Test that debug probe step indentation is correct in standalone output +#[test] +fn test_debug_pipeline_probe_step_indentation_standalone() { + let compiled = compile_fixture_with_flags("minimal-agent.md", &["--debug-pipeline"]); + + // The probe step should be a proper YAML step at the same indent level as + // other steps in the Agent job. Find the "- bash:" line and check indent. + for line in compiled.lines() { + if line.contains("displayName: \"Verify MCP backends\"") { + let indent = line.len() - line.trim_start().len(); + // Standalone jobs use 8 spaces for step properties + assert_eq!( + indent, 8, + "Verify MCP backends displayName should be at 8 spaces indent in standalone, got {}", + indent + ); + break; + } + } +} + +/// Test that debug probe step indentation is correct in 1ES output +#[test] +fn test_debug_pipeline_probe_step_indentation_1es() { + let compiled = compile_fixture_with_flags("1es-test-agent.md", &["--debug-pipeline"]); + + for line in compiled.lines() { + if line.contains("displayName: \"Verify MCP backends\"") { + let indent = line.len() - line.trim_start().len(); + // 1ES uses 18 spaces for step properties inside templateContext + assert_eq!( + indent, 18, + "Verify MCP backends displayName should be at 18 spaces indent in 1ES, got {}", + indent + ); + break; + } + } +} From 6d1e845f8750101cc5d564c38b203e956bda5ba1 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 08:54:26 +0100 Subject: [PATCH 16/20] fix: resolve debug step replacement ordering and add marker test Debug replacements must run BEFORE extra_replacements so that {{ mcpg_port }} inside the probe step content gets resolved by the subsequent extra_replacements pass. Previously, debug replacements ran after extra_replacements, leaving {{ mcpg_port }} unresolved. Add test_debug_pipeline_no_unresolved_markers to catch this class of ordering bugs. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/common.rs | 15 +++++++++------ tests/compiler_tests.rs | 27 ++++++++++++++++++++++++++- 2 files changed, 35 insertions(+), 7 deletions(-) diff --git a/src/compile/common.rs b/src/compile/common.rs index 5908610..d605b44 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -2192,16 +2192,19 @@ pub async fn compile_shared( // 12. Apply extra replacements first (target-specific overrides) // These run before shared replacements so targets can override shared - // markers like {{ setup_job }} and {{ teardown_job }}. + // 12. Debug pipeline replacements (MUST run before extra_replacements + // because the probe step content contains {{ mcpg_port }} which is + // resolved by extra_replacements). + let debug_replacements = generate_debug_pipeline_replacements(config.debug_pipeline); let mut template = template; - for (placeholder, replacement) in &config.extra_replacements { + for (placeholder, replacement) in &debug_replacements { template = replace_with_indent(&template, placeholder, replacement); } - // 13. Debug pipeline replacements (before shared replacements since the - // probe step content itself contains {{ mcpg_port }}). - let debug_replacements = generate_debug_pipeline_replacements(config.debug_pipeline); - for (placeholder, replacement) in &debug_replacements { + // 13. Apply extra replacements (target-specific overrides like {{ mcpg_port }}) + // These run before shared replacements so targets can override shared + // markers like {{ setup_job }} and {{ teardown_job }}. + for (placeholder, replacement) in &config.extra_replacements { template = replace_with_indent(&template, placeholder, replacement); } diff --git a/tests/compiler_tests.rs b/tests/compiler_tests.rs index 5c1629a..f7bd3f1 100644 --- a/tests/compiler_tests.rs +++ b/tests/compiler_tests.rs @@ -3255,8 +3255,33 @@ fn test_skip_integrity_and_debug_pipeline_combined() { ); } -/// Test that debug probe step indentation is correct in standalone output +/// Test that debug probe step has no unresolved template markers #[test] +fn test_debug_pipeline_no_unresolved_markers() { + let compiled = compile_fixture_with_flags("minimal-agent.md", &["--debug-pipeline"]); + + // Extract lines around the probe step + let probe_section: Vec<&str> = compiled + .lines() + .skip_while(|l| !l.contains("Verify MCP backends")) + .take(5) + .collect(); + assert!(!probe_section.is_empty(), "Should find probe step"); + + // The probe step should NOT contain unresolved {{ mcpg_port }} markers + assert!( + !compiled.contains("{{ mcpg_port }}"), + "Compiled output should not contain unresolved {{ mcpg_port }} marker" + ); + assert!( + !compiled.contains("{{ mcpg_debug_flags }}"), + "Compiled output should not contain unresolved {{ mcpg_debug_flags }} marker" + ); + assert!( + !compiled.contains("{{ verify_mcp_backends }}"), + "Compiled output should not contain unresolved {{ verify_mcp_backends }} marker" + ); +} fn test_debug_pipeline_probe_step_indentation_standalone() { let compiled = compile_fixture_with_flags("minimal-agent.md", &["--debug-pipeline"]); From 4f43d046ff40d1d8c5b2f24afabdd6836a8d3f5d Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 09:03:04 +0100 Subject: [PATCH 17/20] fix: fix docker run line continuation and simplify debug markers Fix bash line continuation in the MCPG docker run command: - mcpg_debug_flags emits backslash when disabled (maintains line continuation) and -e DEBUG env flag when enabled - Stderr tee is always-on (baked into template), not debug-gated - Fix replacement ordering: debug before extra_replacements so mcpg_port in probe step content gets resolved - Drop mcpg_stderr_redirect marker (back to 2 markers total) Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- AGENTS.md | 4 ++-- src/compile/common.rs | 22 ++++++++++------------ src/data/1es-base.yml | 2 +- src/data/base.yml | 2 +- tests/compiler_tests.rs | 8 -------- 5 files changed, 14 insertions(+), 24 deletions(-) diff --git a/AGENTS.md b/AGENTS.md index d3d112a..ef56e7c 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -741,9 +741,9 @@ When the compiler is built with `--skip-integrity` (debug builds only), this pla ## {{ mcpg_debug_flags }} -Generates MCPG debug environment flags for the Docker run command. When `--debug-pipeline` is passed (debug builds only), this inserts `-e DEBUG="*"` to enable verbose MCPG logging and `2> >(tee ...)` to stream MCPG stderr to both the pipeline console and a log file. +Generates MCPG debug environment flags for the Docker run command. When `--debug-pipeline` is passed (debug builds only), this inserts `-e DEBUG="*"` to enable verbose MCPG logging. -When `--debug-pipeline` is not passed, this placeholder is replaced with an empty string. +When `--debug-pipeline` is not passed, this placeholder is replaced with a bare `\` to maintain bash line continuation. ## {{ verify_mcp_backends }} diff --git a/src/compile/common.rs b/src/compile/common.rs index d605b44..084bce7 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -862,14 +862,14 @@ pub fn generate_integrity_check(skip: bool) -> String { pub fn generate_debug_pipeline_replacements(debug: bool) -> Vec<(String, String)> { if !debug { return vec![ - ("{{ mcpg_debug_flags }}".into(), String::new()), + // Emit `\` to maintain bash line continuation (same pattern as + // generate_mcpg_docker_env when no env flags are needed). + ("{{ mcpg_debug_flags }}".into(), "\\".into()), ("{{ verify_mcp_backends }}".into(), String::new()), ]; } - let mcpg_debug_flags = r##"-e DEBUG="*" \ -2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2)"## - .to_string(); + let mcpg_debug_flags = r##"-e DEBUG="*" \"##.to_string(); let verify_mcp_backends = r###"# Probe all MCPG backends to force eager launch and surface failures. # MCPG lazily starts stdio backends on first tool call — without this @@ -2984,13 +2984,12 @@ mod tests { fn test_debug_pipeline_replacements_disabled() { let replacements = generate_debug_pipeline_replacements(false); assert_eq!(replacements.len(), 2); - for (marker, value) in &replacements { - assert!( - value.is_empty(), - "Marker '{}' should be empty when debug is false", - marker - ); - } + // mcpg_debug_flags returns `\` for bash line continuation + let flags = replacements.iter().find(|(m, _)| m == "{{ mcpg_debug_flags }}").unwrap(); + assert_eq!(flags.1, "\\", "mcpg_debug_flags should be a bare backslash when disabled"); + // verify_mcp_backends should be empty + let probe = replacements.iter().find(|(m, _)| m == "{{ verify_mcp_backends }}").unwrap(); + assert!(probe.1.is_empty(), "verify_mcp_backends should be empty when disabled"); } #[test] @@ -3002,7 +3001,6 @@ mod tests { assert!(flags.is_some(), "Should have mcpg_debug_flags marker"); let flags_value = &flags.unwrap().1; assert!(flags_value.contains("DEBUG"), "Should contain DEBUG env var"); - assert!(flags_value.contains("tee"), "Should contain stderr tee"); let probe = replacements.iter().find(|(m, _)| m == "{{ verify_mcp_backends }}"); assert!(probe.is_some(), "Should have verify_mcp_backends marker"); diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index f07e0d0..26d0753 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -281,7 +281,7 @@ extends: {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ - > "$GATEWAY_OUTPUT" & + > "$GATEWAY_OUTPUT" 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2) & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" diff --git a/src/data/base.yml b/src/data/base.yml index c51c431..9eabaf5 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -252,7 +252,7 @@ jobs: {{ mcpg_docker_env }} {{ mcpg_image }}:v{{ mcpg_version }} \ --routed --listen 0.0.0.0:{{ mcpg_port }} --config-stdin --log-dir /tmp/gh-aw/mcp-logs \ - > "$GATEWAY_OUTPUT" & + > "$GATEWAY_OUTPUT" 2> >(tee /tmp/gh-aw/mcp-logs/stderr.log >&2) & MCPG_PID=$! echo "MCPG started (PID: $MCPG_PID)" diff --git a/tests/compiler_tests.rs b/tests/compiler_tests.rs index f7bd3f1..c9af60d 100644 --- a/tests/compiler_tests.rs +++ b/tests/compiler_tests.rs @@ -3168,10 +3168,6 @@ fn test_debug_pipeline_includes_debug_env() { compiled.contains(r#"DEBUG="*""#), "Pipeline compiled with --debug-pipeline should contain DEBUG=* env var" ); - assert!( - compiled.contains("tee /tmp/gh-aw/mcp-logs/stderr.log"), - "Pipeline compiled with --debug-pipeline should contain stderr tee" - ); } /// Test that --debug-pipeline includes the MCP backend probe step @@ -3210,10 +3206,6 @@ fn test_default_excludes_debug_diagnostics() { !compiled.contains("Verify MCP backends"), "Pipeline compiled without --debug-pipeline should NOT contain probe step" ); - assert!( - !compiled.contains("tee /tmp/gh-aw/mcp-logs/stderr.log"), - "Pipeline compiled without --debug-pipeline should NOT contain stderr tee" - ); } /// Test that --debug-pipeline produces valid YAML for both targets From 166574e5fab0f406ae1d3ea8e6795f71172b00dc Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 09:41:59 +0100 Subject: [PATCH 18/20] feat: generic pipeline variable injection for MCP extensions Replace the ADO-specific special case in generate_mcpg_docker_env with a generic mechanism driven by the CompilerExtension trait. New trait method: required_pipeline_vars() -> Vec - Extensions declare which container env vars map to pipeline variables - AzureDevOpsExtension maps AZURE_DEVOPS_EXT_PAT -> SC_READ_TOKEN The compiler generates the full chain generically: 1. env: block on MCPG step (ADO secret -> bash var) 2. -e flag on MCPG docker run (bash var -> MCPG process env) 3. MCPG config keeps empty string (MCPG passthrough to child) ADO secrets require env: mapping because Azure DevOps does not expand secret variables via macro syntax in script bodies. Also updates azure-devops-mcp-agent fixture to use first-class tools.azure-devops instead of manual mcp-servers configuration. Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/common.rs | 224 +++++++++++------------ src/compile/extensions.rs | 35 +++- src/compile/onees.rs | 6 +- src/compile/standalone.rs | 6 +- src/data/1es-base.yml | 2 + src/data/base.yml | 2 + tests/compiler_tests.rs | 7 +- tests/fixtures/azure-devops-mcp-agent.md | 15 +- 8 files changed, 159 insertions(+), 138 deletions(-) diff --git a/src/compile/common.rs b/src/compile/common.rs index 084bce7..cc6254e 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -1805,68 +1805,48 @@ pub fn generate_mcpg_config( /// Generate additional `-e` flags for the MCPG Docker run command. /// -/// MCP containers spawned by MCPG may need environment variables that flow from -/// the pipeline through the MCPG container (passthrough). This function: -/// 1. Auto-maps `AZURE_DEVOPS_EXT_PAT` from `SC_READ_TOKEN` when `permissions.read` is configured -/// 2. Collects passthrough env vars (value is `""`) from container-based MCP configs -/// -/// Only container-based MCPs are considered — HTTP MCPs don't have child containers -/// that need env passthrough. +/// Two sources of env flags: +/// 1. **Extension pipeline var mappings** — extensions declare `required_pipeline_vars()` +/// which map container env vars to pipeline variables (typically secrets). +/// These become `-e CONTAINER_VAR="$PIPELINE_VAR"` flags referencing bash vars +/// (the companion `generate_mcpg_step_env` provides the ADO `env:` mapping). +/// 2. **User-configured MCP passthrough** — front matter `mcp-servers:` entries with +/// `env: { VAR: "" }` become bare `-e VAR` flags (MCPG passthrough from host env). /// /// Returns flags formatted for inline insertion in the `docker run` command. -/// The marker sits after the last hardcoded `-e` flag, so the output must -/// include leading `\\\n` for line continuation when non-empty. -pub fn generate_mcpg_docker_env(front_matter: &FrontMatter) -> String { +pub fn generate_mcpg_docker_env( + front_matter: &FrontMatter, + extensions: &[super::extensions::Extension], +) -> String { let mut env_flags: Vec = Vec::new(); let mut seen: HashSet = HashSet::new(); - // Check if any container MCP requests AZURE_DEVOPS_EXT_PAT passthrough - let any_mcp_needs_ado_token = front_matter.mcp_servers.values().any(|config| { - matches!(config, McpConfig::WithOptions(opts) - if opts.enabled.unwrap_or(true) - && opts.container.is_some() - && opts.env.contains_key("AZURE_DEVOPS_EXT_PAT")) - }); - - // Also check if tools.azure-devops is enabled (auto-configured ADO MCP always needs token) - let ado_tool_needs_token = front_matter - .tools - .as_ref() - .and_then(|t| t.azure_devops.as_ref()) - .is_some_and(|ado| ado.is_enabled()); - - // Auto-map AZURE_DEVOPS_EXT_PAT from SC_READ_TOKEN when permissions.read is configured - // AND at least one container MCP requests it via env passthrough (or the ADO tool is enabled) - if any_mcp_needs_ado_token || ado_tool_needs_token { - if front_matter.permissions.as_ref().and_then(|p| p.read.as_ref()).is_some() { - env_flags.push( - "-e AZURE_DEVOPS_EXT_PAT=\"$(SC_READ_TOKEN)\"".to_string(), - ); - seen.insert("AZURE_DEVOPS_EXT_PAT".to_string()); - } else { - eprintln!( - "Warning: one or more container MCPs request AZURE_DEVOPS_EXT_PAT passthrough \ - but permissions.read is not configured. The token will be empty at runtime. \ - Add `permissions: {{ read: }}` to enable auto-mapping." - ); + // 1. Extension pipeline var mappings (e.g., AZURE_DEVOPS_EXT_PAT -> SC_READ_TOKEN) + for ext in extensions { + for mapping in ext.required_pipeline_vars() { + if seen.contains(&mapping.container_var) { + continue; + } + env_flags.push(format!( + "-e {}=\"${}\"", + mapping.container_var, mapping.pipeline_var + )); + seen.insert(mapping.container_var.clone()); } } - // Collect passthrough env vars from container-based MCP configs only. - // HTTP MCPs don't have child containers — env passthrough doesn't apply. + // 2. User-configured MCP passthrough env vars (empty value = passthrough from host) for (mcp_name, config) in &front_matter.mcp_servers { let opts = match config { McpConfig::WithOptions(opts) if opts.enabled.unwrap_or(true) => opts, _ => continue, }; - // Only container-based MCPs need env passthrough on the MCPG Docker run if opts.container.is_none() { continue; } for (var_name, var_value) in &opts.env { - // Validate env var name to prevent Docker flag injection (e.g. "X --privileged") if !is_valid_env_var_name(var_name) { log::warn!( "MCP '{}': skipping invalid env var name '{}' — must match [A-Za-z_][A-Za-z0-9_]*", @@ -1877,7 +1857,6 @@ pub fn generate_mcpg_docker_env(front_matter: &FrontMatter) -> String { if seen.contains(var_name) { continue; } - // Passthrough: empty string means forward from host/pipeline environment if var_value.is_empty() { env_flags.push(format!("-e {}", var_name)); seen.insert(var_name.clone()); @@ -1887,20 +1866,44 @@ pub fn generate_mcpg_docker_env(front_matter: &FrontMatter) -> String { env_flags.sort(); if env_flags.is_empty() { - // No extra flags — emit a lone `\` so the bash line continuation from the - // preceding `-e MCP_GATEWAY_API_KEY=...` flag connects to the image name on - // the next line. This is valid bash: a backslash at end-of-line continues - // the command. replace_with_indent preserves this on its own indented line. "\\".to_string() } else { - // Emit each flag on its own line with `\` continuation. - // replace_with_indent handles indentation from the template (base.yml), - // so we only emit the content without hardcoded spaces. let flags = env_flags.join(" \\\n"); format!("{} \\", flags) } } +/// Generate the ADO step-level `env:` block for the MCPG start step. +/// +/// ADO secret variables (set via `##vso[task.setvariable;issecret=true]`) must +/// be explicitly mapped via the step's `env:` block to be available as bash +/// environment variables. This function collects all pipeline variable mappings +/// from extensions and generates the corresponding `env:` entries. +/// +/// Returns YAML `env:` entries (e.g., `SC_READ_TOKEN: $(SC_READ_TOKEN)`), +/// or an empty string if no mappings are needed. +pub fn generate_mcpg_step_env( + extensions: &[super::extensions::Extension], +) -> String { + let mut entries: Vec = Vec::new(); + let mut seen: HashSet = HashSet::new(); + + for ext in extensions { + for mapping in ext.required_pipeline_vars() { + if seen.contains(&mapping.pipeline_var) { + continue; + } + entries.push(format!( + "{}: $({})", + mapping.pipeline_var, mapping.pipeline_var + )); + seen.insert(mapping.pipeline_var.clone()); + } + } + + entries.join("\n") +} + // ==================== Domain allowlist ==================== /// Generate the allowed domains list for AWF network isolation. @@ -4174,62 +4177,38 @@ mod tests { #[test] fn test_generate_mcpg_docker_env_with_permissions_read() { - let mut fm = minimal_front_matter(); - fm.permissions = Some(crate::compile::types::PermissionsConfig { - read: Some("my-read-sc".to_string()), - write: None, - }); - // A container MCP must request AZURE_DEVOPS_EXT_PAT for the auto-map to trigger - fm.mcp_servers.insert( - "ado-tool".to_string(), - McpConfig::WithOptions(McpOptions { - container: Some("node:20-slim".to_string()), - env: { - let mut e = HashMap::new(); - e.insert("AZURE_DEVOPS_EXT_PAT".to_string(), "".to_string()); - e - }, - ..Default::default() - }), - ); - let env = generate_mcpg_docker_env(&fm); + // When ADO tool is enabled with permissions.read, the extension's + // required_pipeline_vars should produce the -e flag + let (fm, _) = parse_markdown( + "---\nname: test\ndescription: test\ntools:\n azure-devops: true\npermissions:\n read: my-read-sc\n---\n", + ).unwrap(); + let extensions = collect_extensions(&fm); + let env = generate_mcpg_docker_env(&fm, &extensions); assert!( - env.contains("-e AZURE_DEVOPS_EXT_PAT=\"$(SC_READ_TOKEN)\""), - "Should auto-map ADO token when permissions.read is set and MCP requests it" + env.contains("-e AZURE_DEVOPS_EXT_PAT=\"$SC_READ_TOKEN\""), + "Should map ADO token via extension pipeline var" ); } #[test] - fn test_generate_mcpg_docker_env_permissions_read_no_mcp_request() { - let mut fm = minimal_front_matter(); - fm.permissions = Some(crate::compile::types::PermissionsConfig { - read: Some("my-read-sc".to_string()), - write: None, - }); - // No MCP requests AZURE_DEVOPS_EXT_PAT — auto-map should NOT trigger - fm.mcp_servers.insert( - "unrelated-tool".to_string(), - McpConfig::WithOptions(McpOptions { - container: Some("node:20-slim".to_string()), - ..Default::default() - }), - ); - let env = generate_mcpg_docker_env(&fm); + fn test_generate_mcpg_docker_env_no_extensions() { + // No tools enabled — no extension pipeline vars — only user MCP passthrough + let fm = minimal_front_matter(); + let extensions = collect_extensions(&fm); + let env = generate_mcpg_docker_env(&fm, &extensions); assert!( !env.contains("AZURE_DEVOPS_EXT_PAT"), - "Should NOT auto-map ADO token when no MCP requests it" + "Should not have ADO token when no extension needs it" ); } #[test] - fn test_generate_mcpg_docker_env_dedup_auto_map_and_passthrough() { - // When permissions.read is set AND MCP has AZURE_DEVOPS_EXT_PAT: "", - // the auto-mapped form (with SC_READ_TOKEN) should win — no duplicate - let mut fm = minimal_front_matter(); - fm.permissions = Some(crate::compile::types::PermissionsConfig { - read: Some("my-read-sc".to_string()), - write: None, - }); + fn test_generate_mcpg_docker_env_dedup_extension_and_user_passthrough() { + // Extension provides AZURE_DEVOPS_EXT_PAT mapping, user MCP also has it as passthrough. + // Extension mapping should win (deduplicated). + let (mut fm, _) = parse_markdown( + "---\nname: test\ndescription: test\ntools:\n azure-devops: true\npermissions:\n read: my-read-sc\n---\n", + ).unwrap(); fm.mcp_servers.insert( "ado-tool".to_string(), McpConfig::WithOptions(McpOptions { @@ -4242,27 +4221,12 @@ mod tests { ..Default::default() }), ); - let env = generate_mcpg_docker_env(&fm); - // Should have the SC_READ_TOKEN form (auto-mapped), not bare passthrough - assert!( - env.contains("-e AZURE_DEVOPS_EXT_PAT=\"$(SC_READ_TOKEN)\""), - "Auto-mapped form should be present" - ); - // Should appear exactly once + let extensions = collect_extensions(&fm); + let env = generate_mcpg_docker_env(&fm, &extensions); let count = env.matches("AZURE_DEVOPS_EXT_PAT").count(); assert_eq!(count, 1, "AZURE_DEVOPS_EXT_PAT should appear exactly once, got {}", count); } - #[test] - fn test_generate_mcpg_docker_env_without_permissions() { - let fm = minimal_front_matter(); - let env = generate_mcpg_docker_env(&fm); - assert!( - !env.contains("AZURE_DEVOPS_EXT_PAT"), - "Should not map ADO token when permissions.read is not set" - ); - } - #[test] fn test_generate_mcpg_docker_env_passthrough_vars() { let mut fm = minimal_front_matter(); @@ -4279,7 +4243,8 @@ mod tests { ..Default::default() }), ); - let env = generate_mcpg_docker_env(&fm); + let extensions = collect_extensions(&fm); + let env = generate_mcpg_docker_env(&fm, &extensions); assert!(env.contains("-e PASS_THROUGH"), "Should include passthrough var"); assert!(!env.contains("-e STATIC"), "Should NOT include static var"); } @@ -4293,16 +4258,15 @@ mod tests { container: Some("img:latest".to_string()), env: { let mut e = HashMap::new(); - // Injection attempt: env var name with Docker flag e.insert("MY_VAR --privileged".to_string(), "".to_string()); - // Valid env var for comparison e.insert("GOOD_VAR".to_string(), "".to_string()); e }, ..Default::default() }), ); - let env = generate_mcpg_docker_env(&fm); + let extensions = collect_extensions(&fm); + let env = generate_mcpg_docker_env(&fm, &extensions); assert!( !env.contains("--privileged"), "Should reject invalid env var name with Docker flag injection" @@ -4313,6 +4277,29 @@ mod tests { ); } + // ─── generate_mcpg_step_env ────────────────────────────────────────────── + + #[test] + fn test_generate_mcpg_step_env_with_ado_extension() { + let (fm, _) = parse_markdown( + "---\nname: test\ndescription: test\ntools:\n azure-devops: true\n---\n", + ).unwrap(); + let extensions = collect_extensions(&fm); + let env = generate_mcpg_step_env(&extensions); + assert!( + env.contains("SC_READ_TOKEN: $(SC_READ_TOKEN)"), + "Should map SC_READ_TOKEN for ADO extension" + ); + } + + #[test] + fn test_generate_mcpg_step_env_no_extensions() { + let fm = minimal_front_matter(); + let extensions = collect_extensions(&fm); + let env = generate_mcpg_step_env(&extensions); + assert!(env.is_empty(), "Should be empty when no extensions need pipeline vars"); + } + #[test] fn test_is_valid_env_var_name() { assert!(is_valid_env_var_name("MY_VAR")); @@ -4508,7 +4495,8 @@ mod tests { "---\nname: test\ndescription: test\ntools:\n azure-devops: true\npermissions:\n read: my-read-sc\n---\n", ) .unwrap(); - let env = generate_mcpg_docker_env(&fm); + let extensions = collect_extensions(&fm); + let env = generate_mcpg_docker_env(&fm, &extensions); assert!( env.contains("AZURE_DEVOPS_EXT_PAT"), "Should include ADO token passthrough when permissions.read is set" diff --git a/src/compile/extensions.rs b/src/compile/extensions.rs index 6572d56..704ef12 100644 --- a/src/compile/extensions.rs +++ b/src/compile/extensions.rs @@ -261,6 +261,30 @@ pub trait CompilerExtension { fn validate(&self, _ctx: &CompileContext) -> Result> { Ok(vec![]) } + + /// Pipeline variable mappings needed by this extension's MCP containers. + /// + /// Each mapping declares that a container env var (e.g., `AZURE_DEVOPS_EXT_PAT`) + /// should be populated from a pipeline variable (e.g., `SC_READ_TOKEN`). + /// The compiler uses these to generate: + /// 1. `env:` block on the MCPG step (maps ADO secret → bash var) + /// 2. `-e` flags on the MCPG docker run (passes bash var → MCPG process) + /// 3. MCPG config keeps `""` (MCPG passthrough from its env → child container) + fn required_pipeline_vars(&self) -> Vec { + vec![] + } +} + +/// Maps a container environment variable to a pipeline variable. +/// +/// Used by extensions to declare that an MCP container needs a specific +/// pipeline variable (typically a secret) injected into its environment. +#[derive(Debug, Clone)] +pub struct PipelineEnvMapping { + /// The env var name inside the MCP container (e.g., `AZURE_DEVOPS_EXT_PAT`). + pub container_var: String, + /// The ADO pipeline variable name (e.g., `SC_READ_TOKEN`). + pub pipeline_var: String, } // ────────────────────────────────────────────────────────────────────── @@ -320,6 +344,9 @@ macro_rules! extension_enum { fn validate(&self, ctx: &CompileContext) -> Result> { match self { $( $Enum::$Variant(e) => e.validate(ctx), )+ } } + fn required_pipeline_vars(&self) -> Vec { + match self { $( $Enum::$Variant(e) => e.required_pipeline_vars(), )+ } + } } }; } @@ -567,10 +594,14 @@ impl CompilerExtension for AzureDevOpsExtension { Ok(warnings) } + fn required_pipeline_vars(&self) -> Vec { + vec![PipelineEnvMapping { + container_var: "AZURE_DEVOPS_EXT_PAT".to_string(), + pipeline_var: "SC_READ_TOKEN".to_string(), + }] + } } -// ─── Cache Memory ──────────────────────────────────────────────────── - use super::types::CacheMemoryToolConfig; /// Cache memory tool extension. diff --git a/src/compile/onees.rs b/src/compile/onees.rs index 8a3f600..add78a1 100644 --- a/src/compile/onees.rs +++ b/src/compile/onees.rs @@ -16,7 +16,7 @@ use super::common::{ generate_allowed_domains, generate_cancel_previous_builds, generate_enabled_tools_args, - generate_mcpg_config, generate_mcpg_docker_env, + generate_mcpg_config, generate_mcpg_docker_env, generate_mcpg_step_env, format_steps_yaml_indented, }; use super::types::FrontMatter; @@ -56,7 +56,8 @@ impl Compiler for OneESCompiler { let mcpg_config = generate_mcpg_config(front_matter, &ctx, &extensions)?; let mcpg_config_json = serde_json::to_string_pretty(&mcpg_config) .context("Failed to serialize MCPG config")?; - let mcpg_docker_env = generate_mcpg_docker_env(front_matter); + let mcpg_docker_env = generate_mcpg_docker_env(front_matter, &extensions); + let mcpg_step_env = generate_mcpg_step_env(&extensions); // Generate 1ES-specific setup/teardown jobs(no per-job pool, uses templateContext). // These override the shared {{ setup_job }} / {{ teardown_job }} markers via @@ -77,6 +78,7 @@ impl Compiler for OneESCompiler { ("{{ cancel_previous_builds }}".into(), cancel_previous_builds), ("{{ mcpg_config }}".into(), mcpg_config_json), ("{{ mcpg_docker_env }}".into(), mcpg_docker_env), + ("{{ mcpg_step_env }}".into(), mcpg_step_env), ("{{ setup_job }}".into(), setup_job), ("{{ teardown_job }}".into(), teardown_job), ], diff --git a/src/compile/standalone.rs b/src/compile/standalone.rs index 4af8e5e..aa994aa 100644 --- a/src/compile/standalone.rs +++ b/src/compile/standalone.rs @@ -18,7 +18,7 @@ use super::common::{ generate_allowed_domains, generate_cancel_previous_builds, generate_enabled_tools_args, - generate_mcpg_config, generate_mcpg_docker_env, + generate_mcpg_config, generate_mcpg_docker_env, generate_mcpg_step_env, }; use super::types::FrontMatter; @@ -57,7 +57,8 @@ impl Compiler for StandaloneCompiler { let config_obj = generate_mcpg_config(front_matter, &ctx, &extensions)?; let mcpg_config_json = serde_json::to_string_pretty(&config_obj).context("Failed to serialize MCPG config")?; - let mcpg_docker_env = generate_mcpg_docker_env(front_matter); + let mcpg_docker_env = generate_mcpg_docker_env(front_matter, &extensions); + let mcpg_step_env = generate_mcpg_step_env(&extensions); let config = CompileConfig { template: include_str!("../data/base.yml").to_string(), @@ -72,6 +73,7 @@ impl Compiler for StandaloneCompiler { ("{{ cancel_previous_builds }}".into(), cancel_previous_builds), ("{{ mcpg_config }}".into(), mcpg_config_json), ("{{ mcpg_docker_env }}".into(), mcpg_docker_env), + ("{{ mcpg_step_env }}".into(), mcpg_step_env), ], skip_integrity, debug_pipeline, diff --git a/src/data/1es-base.yml b/src/data/1es-base.yml index 26d0753..62d727b 100644 --- a/src/data/1es-base.yml +++ b/src/data/1es-base.yml @@ -339,6 +339,8 @@ extends: echo "Generated MCP config at: /tmp/awf-tools/mcp-config.json" cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" + env: + {{ mcpg_step_env }} {{ verify_mcp_backends }} diff --git a/src/data/base.yml b/src/data/base.yml index 9eabaf5..1c19515 100644 --- a/src/data/base.yml +++ b/src/data/base.yml @@ -310,6 +310,8 @@ jobs: echo "Generated MCP config at: /tmp/awf-tools/mcp-config.json" cat /tmp/awf-tools/mcp-config.json displayName: "Start MCP Gateway (MCPG)" + env: + {{ mcpg_step_env }} {{ verify_mcp_backends }} diff --git a/tests/compiler_tests.rs b/tests/compiler_tests.rs index c9af60d..c4df6d2 100644 --- a/tests/compiler_tests.rs +++ b/tests/compiler_tests.rs @@ -2189,7 +2189,7 @@ fn test_fixture_azure_devops_mcp_compiled_output() { // Should contain the MCPG docker env passthrough (auto-mapped ADO token) assert!( - compiled.contains("-e AZURE_DEVOPS_EXT_PAT=\"$(SC_READ_TOKEN)\""), + compiled.contains("-e AZURE_DEVOPS_EXT_PAT=\"$SC_READ_TOKEN\""), "Should auto-map SC_READ_TOKEN to AZURE_DEVOPS_EXT_PAT on MCPG Docker run" ); @@ -2291,8 +2291,9 @@ fn test_mcpg_docker_env_passthrough() { let compiled = fs::read_to_string(&output_path).unwrap(); - // Should auto-map AZURE_DEVOPS_EXT_PAT from SC_READ_TOKEN - assert!(compiled.contains("-e AZURE_DEVOPS_EXT_PAT=\"$(SC_READ_TOKEN)\""), "Should auto-map ADO token"); + // AZURE_DEVOPS_EXT_PAT with "" is bare passthrough for user-configured MCPs + // (only tools.azure-devops extension provides SC_READ_TOKEN mapping) + assert!(compiled.contains("-e AZURE_DEVOPS_EXT_PAT"), "Should forward AZURE_DEVOPS_EXT_PAT as passthrough"); // Should forward passthrough env var MY_TOKEN assert!(compiled.contains("-e MY_TOKEN"), "Should forward passthrough env var"); diff --git a/tests/fixtures/azure-devops-mcp-agent.md b/tests/fixtures/azure-devops-mcp-agent.md index 2dc9a4d..7c2b2f2 100644 --- a/tests/fixtures/azure-devops-mcp-agent.md +++ b/tests/fixtures/azure-devops-mcp-agent.md @@ -1,13 +1,10 @@ --- name: "Azure DevOps MCP Agent" -description: "Agent with Azure DevOps MCP via containerized stdio transport" -mcp-servers: +description: "Agent with Azure DevOps MCP via first-class tool integration" +tools: azure-devops: - container: "node:20-slim" - entrypoint: "npx" - entrypoint-args: ["-y", "@azure-devops/mcp", "myorg", "-d", "core", "work-items"] - env: - AZURE_DEVOPS_EXT_PAT: "" + org: myorg + toolsets: [core, work-items] allowed: - core_list_projects - wit_get_work_item @@ -19,10 +16,6 @@ permissions: safe-outputs: create-work-item: work-item-type: Task -network: - allowed: - - "dev.azure.com" - - "*.dev.azure.com" --- ## Azure DevOps MCP Integration Test From 6d08468e18f2e8853d038a35e4bf2ed289315f72 Mon Sep 17 00:00:00 2001 From: James Devine Date: Sat, 18 Apr 2026 10:02:17 +0100 Subject: [PATCH 19/20] fix: use correct auth for @azure-devops/mcp (envvar + ADO_MCP_AUTH_TOKEN) MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit The @azure-devops/mcp npm package does NOT use AZURE_DEVOPS_EXT_PAT (that's for the az devops CLI extension). It accepts auth type via CLI arg (-a) and reads the token from a specific env var per type. For pipeline use with bearer tokens from az account get-access-token: - Add '-a envvar' to entrypoint args (selects envvar auth mode) - Map ADO_MCP_AUTH_TOKEN (not AZURE_DEVOPS_EXT_PAT) to SC_READ_TOKEN - The ADO MCP reads the bearer token from ADO_MCP_AUTH_TOKEN Auth types in @azure-devops/mcp (src/auth.ts): - 'pat': reads PERSONAL_ACCESS_TOKEN (base64 PAT) - 'envvar': reads ADO_MCP_AUTH_TOKEN (bearer token) ← we use this - 'azcli'/'env': uses DefaultAzureCredential - 'interactive': OAuth browser flow (default, breaks stdio) Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/common.rs | 18 +++++++++--------- src/compile/extensions.rs | 15 ++++++++++----- tests/compiler_tests.rs | 10 +++++----- 3 files changed, 24 insertions(+), 19 deletions(-) diff --git a/src/compile/common.rs b/src/compile/common.rs index cc6254e..b615fa9 100644 --- a/src/compile/common.rs +++ b/src/compile/common.rs @@ -4185,7 +4185,7 @@ mod tests { let extensions = collect_extensions(&fm); let env = generate_mcpg_docker_env(&fm, &extensions); assert!( - env.contains("-e AZURE_DEVOPS_EXT_PAT=\"$SC_READ_TOKEN\""), + env.contains("-e ADO_MCP_AUTH_TOKEN=\"$SC_READ_TOKEN\""), "Should map ADO token via extension pipeline var" ); } @@ -4197,14 +4197,14 @@ mod tests { let extensions = collect_extensions(&fm); let env = generate_mcpg_docker_env(&fm, &extensions); assert!( - !env.contains("AZURE_DEVOPS_EXT_PAT"), + !env.contains("ADO_MCP_AUTH_TOKEN"), "Should not have ADO token when no extension needs it" ); } #[test] fn test_generate_mcpg_docker_env_dedup_extension_and_user_passthrough() { - // Extension provides AZURE_DEVOPS_EXT_PAT mapping, user MCP also has it as passthrough. + // Extension provides ADO_MCP_AUTH_TOKEN mapping, user MCP also has it as passthrough. // Extension mapping should win (deduplicated). let (mut fm, _) = parse_markdown( "---\nname: test\ndescription: test\ntools:\n azure-devops: true\npermissions:\n read: my-read-sc\n---\n", @@ -4215,7 +4215,7 @@ mod tests { container: Some("node:20-slim".to_string()), env: { let mut e = HashMap::new(); - e.insert("AZURE_DEVOPS_EXT_PAT".to_string(), "".to_string()); + e.insert("ADO_MCP_AUTH_TOKEN".to_string(), "".to_string()); e }, ..Default::default() @@ -4223,8 +4223,8 @@ mod tests { ); let extensions = collect_extensions(&fm); let env = generate_mcpg_docker_env(&fm, &extensions); - let count = env.matches("AZURE_DEVOPS_EXT_PAT").count(); - assert_eq!(count, 1, "AZURE_DEVOPS_EXT_PAT should appear exactly once, got {}", count); + let count = env.matches("ADO_MCP_AUTH_TOKEN").count(); + assert_eq!(count, 1, "ADO_MCP_AUTH_TOKEN should appear exactly once, got {}", count); } #[test] @@ -4355,9 +4355,9 @@ mod tests { assert!(args.contains(&"-y".to_string())); assert!(args.contains(&ADO_MCP_PACKAGE.to_string())); assert!(args.contains(&"inferred-org".to_string())); - // Should have AZURE_DEVOPS_EXT_PAT in env + // Should have ADO_MCP_AUTH_TOKEN in env (for bearer token via envvar auth) let env = ado.env.as_ref().unwrap(); - assert!(env.contains_key("AZURE_DEVOPS_EXT_PAT")); + assert!(env.contains_key("ADO_MCP_AUTH_TOKEN")); } #[test] @@ -4498,7 +4498,7 @@ mod tests { let extensions = collect_extensions(&fm); let env = generate_mcpg_docker_env(&fm, &extensions); assert!( - env.contains("AZURE_DEVOPS_EXT_PAT"), + env.contains("ADO_MCP_AUTH_TOKEN"), "Should include ADO token passthrough when permissions.read is set" ); } diff --git a/src/compile/extensions.rs b/src/compile/extensions.rs index 704ef12..98fc7a2 100644 --- a/src/compile/extensions.rs +++ b/src/compile/extensions.rs @@ -540,15 +540,20 @@ impl CompilerExtension for AzureDevOpsExtension { Some(self.config.allowed().to_vec()) }; - // ADO MCP needs the PAT token passed via environment + // ADO MCP authentication: the @azure-devops/mcp npm package accepts + // auth type via CLI arg (-a) and token via env var. For pipeline use, + // we use "envvar" auth which reads ADO_MCP_AUTH_TOKEN. + // SC_READ_TOKEN is a bearer JWT from `az account get-access-token`. + entrypoint_args.extend(["-a".to_string(), "envvar".to_string()]); + let env = Some(HashMap::from([ ( - "AZURE_DEVOPS_EXT_PAT".to_string(), - String::new(), // Passthrough from pipeline + "ADO_MCP_AUTH_TOKEN".to_string(), + String::new(), // Passthrough — bearer token via pipeline var mapping ), ( "DEBUG".to_string(), - "*".to_string(), // Verbose logging for ADO MCP + "*".to_string(), ), ])); @@ -596,7 +601,7 @@ impl CompilerExtension for AzureDevOpsExtension { } fn required_pipeline_vars(&self) -> Vec { vec![PipelineEnvMapping { - container_var: "AZURE_DEVOPS_EXT_PAT".to_string(), + container_var: "ADO_MCP_AUTH_TOKEN".to_string(), pipeline_var: "SC_READ_TOKEN".to_string(), }] } diff --git a/tests/compiler_tests.rs b/tests/compiler_tests.rs index c4df6d2..2524117 100644 --- a/tests/compiler_tests.rs +++ b/tests/compiler_tests.rs @@ -2175,10 +2175,10 @@ fn test_fixture_azure_devops_mcp_compiled_output() { "MCPG config should NOT use command field" ); - // Should contain env passthrough for AZURE_DEVOPS_EXT_PAT + // Should contain env for ADO_MCP_AUTH_TOKEN (envvar auth for @azure-devops/mcp) assert!( - compiled.contains("AZURE_DEVOPS_EXT_PAT"), - "Should reference AZURE_DEVOPS_EXT_PAT" + compiled.contains("ADO_MCP_AUTH_TOKEN"), + "Should reference ADO_MCP_AUTH_TOKEN" ); // Should contain SC_READ_TOKEN (from permissions.read) @@ -2189,8 +2189,8 @@ fn test_fixture_azure_devops_mcp_compiled_output() { // Should contain the MCPG docker env passthrough (auto-mapped ADO token) assert!( - compiled.contains("-e AZURE_DEVOPS_EXT_PAT=\"$SC_READ_TOKEN\""), - "Should auto-map SC_READ_TOKEN to AZURE_DEVOPS_EXT_PAT on MCPG Docker run" + compiled.contains("-e ADO_MCP_AUTH_TOKEN=\"$SC_READ_TOKEN\""), + "Should auto-map SC_READ_TOKEN to ADO_MCP_AUTH_TOKEN on MCPG Docker run" ); let _ = fs::remove_dir_all(&temp_dir); From a1bc0a0a55bf3bd5229fc46d84d8522abada30d9 Mon Sep 17 00:00:00 2001 From: James Devine Date: Mon, 20 Apr 2026 17:16:38 +0100 Subject: [PATCH 20/20] feat(run): fix MCPG local development issues - Use dynamic high port for MCPG instead of hardcoded port 80 (requires admin privileges on most systems) - Platform-aware Docker networking: --network host on Linux, -p port mapping on Windows/macOS where Docker Desktop uses a VM - Rewrite SafeOutputs URL to host.docker.internal on Windows/macOS so MCPG container can reach host services - Derive MCP client config from MCPG runtime gateway output instead of compile-time prediction (matches pipeline behavior) - Add AdoAuthMode enum to AzureDevOpsExtension: Bearer for pipelines (ADO_MCP_AUTH_TOKEN), Pat for local dev (PERSONAL_ACCESS_TOKEN) - Remove stale generate_mcp_client_config re-export (removed on branch) - Update --pat help text to describe MCPG token plumbing Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- src/compile/extensions.rs | 77 ++++++++++++--- src/compile/mod.rs | 1 - src/main.rs | 2 +- src/run.rs | 191 +++++++++++++++++++++++++++++++++----- 4 files changed, 234 insertions(+), 37 deletions(-) diff --git a/src/compile/extensions.rs b/src/compile/extensions.rs index 98fc7a2..4c8d852 100644 --- a/src/compile/extensions.rs +++ b/src/compile/extensions.rs @@ -458,11 +458,34 @@ use super::types::AzureDevOpsToolConfig; /// ADO MCP), and compile-time validation (org inference, duplicate MCP). pub struct AzureDevOpsExtension { config: AzureDevOpsToolConfig, + auth_mode: AdoAuthMode, +} + +/// Authentication mode for the ADO MCP server. +/// +/// Pipelines use bearer tokens (JWT from ARM service connections). +/// Local development uses PATs (Personal Access Tokens). +#[derive(Debug, Clone, Copy, Default)] +pub enum AdoAuthMode { + /// `-a envvar` + `ADO_MCP_AUTH_TOKEN` — bearer JWT from ARM (pipeline default) + #[default] + Bearer, + /// `-a pat` + `AZURE_DEVOPS_EXT_PAT` — Personal Access Token (local dev) + Pat, } impl AzureDevOpsExtension { pub fn new(config: AzureDevOpsToolConfig) -> Self { - Self { config } + Self { + config, + auth_mode: AdoAuthMode::default(), + } + } + + /// Set the authentication mode (e.g., `AdoAuthMode::Pat` for local runs). + pub fn with_auth_mode(mut self, mode: AdoAuthMode) -> Self { + self.auth_mode = mode; + self } } @@ -541,15 +564,19 @@ impl CompilerExtension for AzureDevOpsExtension { }; // ADO MCP authentication: the @azure-devops/mcp npm package accepts - // auth type via CLI arg (-a) and token via env var. For pipeline use, - // we use "envvar" auth which reads ADO_MCP_AUTH_TOKEN. - // SC_READ_TOKEN is a bearer JWT from `az account get-access-token`. - entrypoint_args.extend(["-a".to_string(), "envvar".to_string()]); + // auth type via CLI arg (-a) and token via env var. + // Bearer: `-a envvar` reads ADO_MCP_AUTH_TOKEN (pipeline JWT from ARM) + // Pat: `-a pat` reads PERSONAL_ACCESS_TOKEN (base64-encoded PAT) + let (auth_flag, token_var) = match self.auth_mode { + AdoAuthMode::Bearer => ("envvar", "ADO_MCP_AUTH_TOKEN"), + AdoAuthMode::Pat => ("pat", "PERSONAL_ACCESS_TOKEN"), + }; + entrypoint_args.extend(["-a".to_string(), auth_flag.to_string()]); let env = Some(HashMap::from([ ( - "ADO_MCP_AUTH_TOKEN".to_string(), - String::new(), // Passthrough — bearer token via pipeline var mapping + token_var.to_string(), + String::new(), // Passthrough from MCPG process env ), ( "DEBUG".to_string(), @@ -600,10 +627,15 @@ impl CompilerExtension for AzureDevOpsExtension { Ok(warnings) } fn required_pipeline_vars(&self) -> Vec { - vec![PipelineEnvMapping { - container_var: "ADO_MCP_AUTH_TOKEN".to_string(), - pipeline_var: "SC_READ_TOKEN".to_string(), - }] + match self.auth_mode { + AdoAuthMode::Bearer => vec![PipelineEnvMapping { + container_var: "ADO_MCP_AUTH_TOKEN".to_string(), + pipeline_var: "SC_READ_TOKEN".to_string(), + }], + // PAT mode: no pipeline var mapping needed — the PAT is passed + // directly via AZURE_DEVOPS_EXT_PAT in the MCPG env file. + AdoAuthMode::Pat => vec![], + } } } @@ -795,6 +827,23 @@ These tools generate safe outputs that will be reviewed and executed in a separa /// (runtimes in `RuntimesConfig` field order, tools in `ToolsConfig` /// field order). pub fn collect_extensions(front_matter: &FrontMatter) -> Vec { + collect_extensions_with_options(front_matter, AdoAuthMode::default()) +} + +/// Collect extensions with an explicit ADO auth mode. +/// +/// Used by `ado-aw run` to switch from bearer (pipeline default) to PAT auth. +pub fn collect_extensions_with_ado_auth( + front_matter: &FrontMatter, + ado_auth: AdoAuthMode, +) -> Vec { + collect_extensions_with_options(front_matter, ado_auth) +} + +fn collect_extensions_with_options( + front_matter: &FrontMatter, + ado_auth: AdoAuthMode, +) -> Vec { let mut extensions = Vec::new(); // ── Always-on internal extensions ── @@ -816,9 +865,9 @@ pub fn collect_extensions(front_matter: &FrontMatter) -> Vec { if let Some(tools) = front_matter.tools.as_ref() { if let Some(ado) = tools.azure_devops.as_ref() { if ado.is_enabled() { - extensions.push(Extension::AzureDevOps(AzureDevOpsExtension::new( - ado.clone(), - ))); + extensions.push(Extension::AzureDevOps( + AzureDevOpsExtension::new(ado.clone()).with_auth_mode(ado_auth), + )); } } if let Some(memory) = tools.cache_memory.as_ref() { diff --git a/src/compile/mod.rs b/src/compile/mod.rs index 3569f6d..4df1649 100644 --- a/src/compile/mod.rs +++ b/src/compile/mod.rs @@ -22,7 +22,6 @@ pub use common::sanitize_filename; pub use common::HEADER_MARKER; pub use common::generate_copilot_params; pub use common::generate_mcpg_config; -pub use common::generate_mcp_client_config; pub use common::MCPG_IMAGE; pub use common::MCPG_VERSION; pub use common::MCPG_PORT; diff --git a/src/main.rs b/src/main.rs index 7dff051..dc1b361 100644 --- a/src/main.rs +++ b/src/main.rs @@ -133,7 +133,7 @@ enum Commands { Run { /// Path to the agent markdown file path: String, - /// Azure DevOps PAT for API access (prefer AZURE_DEVOPS_EXT_PAT env var) + /// Azure DevOps PAT for API access (also passed as ADO_MCP_AUTH_TOKEN to MCPG) #[arg(long, env = "AZURE_DEVOPS_EXT_PAT")] pat: Option, /// Azure DevOps organization URL diff --git a/src/run.rs b/src/run.rs index c61f4d1..0dedbb9 100644 --- a/src/run.rs +++ b/src/run.rs @@ -169,9 +169,14 @@ fn is_docker_available() -> bool { /// - The child is reaped after `docker stop` /// - The env file must outlive the child because `spawn()` returns after /// fork() — the Docker CLI hasn't yet exec'd or read `--env-file` +/// +/// `gateway_output_path` receives MCPG's stdout — the runtime gateway config +/// JSON that is later transformed into the copilot MCP client config. fn start_mcpg( mcpg_config_json: &str, mcpg_api_key: &str, + port: u16, + gateway_output_path: &Path, pat: Option<&str>, needs_ado_token: bool, ) -> Result<(Child, tempfile::NamedTempFile)> { @@ -190,10 +195,14 @@ fn start_mcpg( .context("Failed to create temp env file for MCPG secrets")?; let mut env_contents = format!( "MCP_GATEWAY_PORT={}\nMCP_GATEWAY_DOMAIN=127.0.0.1\nMCP_GATEWAY_API_KEY={}\n", - compile::MCPG_PORT, mcpg_api_key, + port, mcpg_api_key, ); if needs_ado_token { if let Some(pat) = pat { + // Set both vars so MCPG can passthrough whichever the auth mode needs: + // PERSONAL_ACCESS_TOKEN — read by ADO MCP in `-a pat` mode (local dev) + // AZURE_DEVOPS_EXT_PAT — used by copilot and execute stages + env_contents.push_str(&format!("PERSONAL_ACCESS_TOKEN={}\n", pat)); env_contents.push_str(&format!("AZURE_DEVOPS_EXT_PAT={}\n", pat)); } } @@ -206,28 +215,51 @@ fn start_mcpg( "--rm".to_string(), "--name".to_string(), "ado-aw-mcpg".to_string(), - "--network".to_string(), - "host".to_string(), + ]; + + // Network strategy differs by platform: + // Linux: --network host shares the host stack; 127.0.0.1 works both ways. + // Windows/macOS: Docker Desktop runs containers in a VM. --network host doesn't + // expose container ports on the host. Use -p for port mapping and + // host.docker.internal for container→host communication. + if cfg!(target_os = "linux") { + args.extend(["--network".to_string(), "host".to_string()]); + } else { + args.extend(["-p".to_string(), format!("{}:{}", port, port)]); + } + + args.extend([ "--entrypoint".to_string(), "/app/awmg".to_string(), "-v".to_string(), "/var/run/docker.sock:/var/run/docker.sock".to_string(), "--env-file".to_string(), env_file.path().to_string_lossy().into_owned(), - ]; + ]); args.push(format!("{}:v{}", compile::MCPG_IMAGE, compile::MCPG_VERSION)); args.push("--routed".to_string()); args.push("--listen".to_string()); - args.push(format!("127.0.0.1:{}", compile::MCPG_PORT)); + // Linux (--network host): bind to loopback only. + // Windows/macOS (-p mapping): bind to 0.0.0.0 so Docker can forward traffic. + if cfg!(target_os = "linux") { + args.push(format!("127.0.0.1:{}", port)); + } else { + args.push(format!("0.0.0.0:{}", port)); + } args.push("--config-stdin".to_string()); args.push("--log-dir".to_string()); args.push("/tmp/gh-aw/mcp-logs".to_string()); + // Redirect stdout to the gateway output file — MCPG writes its runtime + // config (with actual URLs) to stdout once it finishes initialising servers. + let gateway_file = std::fs::File::create(gateway_output_path) + .with_context(|| format!("Failed to create gateway output file: {}", gateway_output_path.display()))?; + let mut child = Command::new("docker") .args(&args) .stdin(Stdio::piped()) - .stdout(Stdio::null()) + .stdout(gateway_file) .stderr(Stdio::null()) .spawn() .context("Failed to start MCPG Docker container")?; @@ -244,6 +276,34 @@ fn start_mcpg( Ok((child, env_file)) } +/// Transform MCPG's gateway output JSON into a copilot-compatible MCP client config. +/// +/// Mirrors the pipeline's `jq` transformation: +/// - Keep URLs as-is (local run: copilot runs on host, same as MCPG) +/// - Ensure `tools: ["*"]` on each server entry (Copilot CLI requirement) +/// - Preserve headers and other fields +fn transform_gateway_output(gateway_json: &str) -> Result { + let mut config: serde_json::Value = serde_json::from_str(gateway_json) + .context("Failed to parse MCPG gateway output as JSON")?; + + let servers = config + .get_mut("mcpServers") + .and_then(|v| v.as_object_mut()) + .context("Gateway output missing mcpServers")?; + + for (_name, entry) in servers.iter_mut() { + if let Some(obj) = entry.as_object_mut() { + obj.insert( + "tools".into(), + serde_json::Value::Array(vec![serde_json::Value::String("*".into())]), + ); + } + } + + serde_json::to_string_pretty(&config) + .context("Failed to serialize MCP client config") +} + /// Build a `std::process::Command` for a program that may be a script wrapper. /// /// On Windows, npm-installed tools like `copilot` are `.cmd`/`.ps1` wrappers. @@ -335,7 +395,11 @@ pub async fn run(args: &RunArgs) -> Result<()> { } // ── 2. Collect extensions ──────────────────────────────────────── - let extensions = compile::extensions::collect_extensions(&front_matter); + // Local run uses PAT auth for ADO MCP (users have PATs, not bearer JWTs). + let extensions = compile::extensions::collect_extensions_with_ado_auth( + &front_matter, + compile::extensions::AdoAuthMode::Pat, + ); // ── 3. Create output directory ─────────────────────────────────── let output_dir = match &args.output_dir { @@ -401,12 +465,18 @@ pub async fn run(args: &RunArgs) -> Result<()> { } if use_mcpg { + // Pick a free high port for MCPG — port 80 (used in pipelines) requires + // elevated privileges on most systems and isn't suitable for local dev. + let mcpg_port = find_free_port() + .context("Failed to find a free port for MCPG")?; + println!("\n=== Generating MCPG config ==="); let compile_ctx = compile::extensions::CompileContext::new(&front_matter, &working_dir).await; - let mcpg_config = + let mut mcpg_config = compile::generate_mcpg_config(&front_matter, &compile_ctx, &extensions)?; + mcpg_config.gateway.port = mcpg_port; // Serialize and substitute runtime placeholders let mcpg_json = serde_json::to_string_pretty(&mcpg_config) @@ -416,21 +486,23 @@ pub async fn run(args: &RunArgs) -> Result<()> { .replace("${SAFE_OUTPUTS_API_KEY}", &so_api_key) .replace("${MCP_GATEWAY_API_KEY}", &mcpg_api_key); + // Rewrite SafeOutputs URL for the container→host network path. + // The compile-time config uses "localhost" which works in pipelines + // (Linux, --network host shares the host stack). Locally: + // - Linux: "localhost" may resolve to ::1 (IPv6) but SafeOutputs + // binds IPv4 only, so use 127.0.0.1 explicitly. + // - Windows/macOS: Docker Desktop runs containers in a VM, so + // "localhost" is the VM loopback. Use host.docker.internal. + let mcpg_json = if cfg!(target_os = "linux") { + mcpg_json.replace("http://localhost:", "http://127.0.0.1:") + } else { + mcpg_json.replace("http://localhost:", "http://host.docker.internal:") + }; + tokio::fs::write(output_dir.join("mcpg-config.json"), &mcpg_json).await .with_context(|| format!("Failed to write MCPG config: {}", output_dir.join("mcpg-config.json").display()))?; debug!("MCPG config written"); - // Generate MCP client config for copilot - let mcp_client_json = compile::generate_mcp_client_config(&mcpg_config)?; - // Substitute ADO macro placeholder with actual key - let mcp_client_json = mcp_client_json.replace("$(MCP_GATEWAY_API_KEY)", &mcpg_api_key); - // Local: copilot runs on host, not inside AWF container - let mcp_client_json = mcp_client_json.replace("host.docker.internal", "127.0.0.1"); - - tokio::fs::write(&mcp_config_path, &mcp_client_json).await - .with_context(|| format!("Failed to write MCP config: {}", mcp_config_path.display()))?; - debug!("MCP client config written"); - // Start MCPG println!("\n=== Starting MCP Gateway (MCPG) ==="); if needs_ado_token && args.pat.is_none() { @@ -438,9 +510,12 @@ pub async fn run(args: &RunArgs) -> Result<()> { ADO MCP tool calls will likely fail at runtime."); println!("Warning: ADO MCP enabled but no PAT provided — tool calls may fail"); } + let gateway_output_path = output_dir.join("gateway-output.json"); let (mcpg_child, mcpg_env_file) = start_mcpg( &mcpg_json, &mcpg_api_key, + mcpg_port, + &gateway_output_path, args.pat.as_deref(), needs_ado_token, )?; @@ -449,7 +524,7 @@ pub async fn run(args: &RunArgs) -> Result<()> { // Health check MCPG — also detect early crash let client = reqwest::Client::new(); - let health_url = format!("http://127.0.0.1:{}/health", compile::MCPG_PORT); + let health_url = format!("http://127.0.0.1:{}/health", mcpg_port); let mut ready = false; for _ in 0..30 { if let Some(ref mut child) = guard.mcpg_child { @@ -469,7 +544,43 @@ pub async fn run(args: &RunArgs) -> Result<()> { if !ready { bail!("MCPG did not become ready within 30s"); } - println!("MCPG ready on port {}", compile::MCPG_PORT); + println!("MCPG ready on port {}", mcpg_port); + + // Wait for gateway output — health check passing doesn't guarantee + // stdout is flushed, so poll until the file contains valid JSON. + println!("Waiting for gateway output..."); + let mut gateway_ready = false; + for _ in 0..15 { + if let Ok(content) = tokio::fs::read_to_string(&gateway_output_path).await { + if serde_json::from_str::(&content) + .ok() + .and_then(|v| v.get("mcpServers").cloned()) + .is_some() + { + gateway_ready = true; + break; + } + } + tokio::time::sleep(std::time::Duration::from_secs(1)).await; + } + if !gateway_ready { + let content = tokio::fs::read_to_string(&gateway_output_path).await.unwrap_or_default(); + bail!( + "MCPG gateway output not ready within 15s. Content: {}", + if content.is_empty() { "(empty)" } else { &content } + ); + } + + // Transform MCPG's runtime output into copilot client config + let gateway_json = tokio::fs::read_to_string(&gateway_output_path).await + .context("Failed to read MCPG gateway output")?; + debug!("Gateway output: {}", gateway_json); + let mcp_client_json = transform_gateway_output(&gateway_json)?; + + tokio::fs::write(&mcp_config_path, &mcp_client_json).await + .with_context(|| format!("Failed to write MCP config: {}", mcp_config_path.display()))?; + debug!("MCP client config written"); + println!("MCP client config generated from gateway output"); } else { // Skip MCPG — generate direct config pointing to SafeOutputs println!("\n=== Generating direct MCP config (no MCPG) ==="); @@ -714,4 +825,42 @@ mod tests { assert!(json.contains("Bearer test-key")); assert!(json.contains("\"type\": \"http\"")); } + + #[test] + fn test_transform_gateway_output() { + let gateway_json = serde_json::json!({ + "mcpServers": { + "safeoutputs": { + "type": "http", + "url": "http://127.0.0.1:54321/mcp/safeoutputs", + "headers": { + "Authorization": "Bearer secret-key" + } + }, + "azure-devops": { + "type": "http", + "url": "http://127.0.0.1:54321/mcp/azure-devops" + } + } + }); + + let result = transform_gateway_output(&gateway_json.to_string()).unwrap(); + let parsed: serde_json::Value = serde_json::from_str(&result).unwrap(); + + // Each server should have tools: ["*"] + let servers = parsed["mcpServers"].as_object().unwrap(); + for (name, entry) in servers { + assert_eq!( + entry["tools"], + serde_json::json!(["*"]), + "Server '{}' should have tools: [\"*\"]", name, + ); + } + + // URLs should be preserved as-is (local run, no rewriting needed) + assert!(result.contains("127.0.0.1:54321")); + + // Headers should be preserved + assert!(result.contains("Bearer secret-key")); + } }