From c5fffc33a48ec121033cf53c81d529b9a1ddaba5 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" <41898282+github-actions[bot]@users.noreply.github.com> Date: Tue, 28 Apr 2026 12:59:05 +0000 Subject: [PATCH] docs: document /reflect endpoint in api-proxy-sidecar Add documentation for the GET /reflect management endpoint introduced in commit 9b2e0b8 (feat(api-proxy): add /reflect endpoint for dynamic provider and model discovery). Co-authored-by: Copilot <223556219+Copilot@users.noreply.github.com> --- docs/api-proxy-sidecar.md | 64 +++++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) diff --git a/docs/api-proxy-sidecar.md b/docs/api-proxy-sidecar.md index af3e94f3..c559d527 100644 --- a/docs/api-proxy-sidecar.md +++ b/docs/api-proxy-sidecar.md @@ -323,6 +323,70 @@ Docker healthcheck on the `/health` endpoint (port 10000): - **Retries**: 5 - **Start period**: 2s +### Reflection endpoint + +The management port (10000) also exposes a `GET /reflect` endpoint for dynamic provider and model discovery. This allows agent harnesses to query which providers are configured and which models are available at runtime. + +```bash +curl http://172.30.0.30:10000/reflect +``` + +**Example response:** + +```json +{ + "endpoints": [ + { + "provider": "openai", + "port": 10000, + "base_url": "http://api-proxy:10000", + "configured": true, + "models": ["gpt-4o", "gpt-4o-mini"], + "models_url": "http://api-proxy:10000/v1/models" + }, + { + "provider": "anthropic", + "port": 10001, + "base_url": "http://api-proxy:10001", + "configured": false, + "models": null, + "models_url": "http://api-proxy:10001/v1/models" + }, + { + "provider": "copilot", + "port": 10002, + "base_url": "http://api-proxy:10002", + "configured": true, + "models": ["gpt-4o", "claude-3.5-sonnet"], + "models_url": "http://api-proxy:10002/models" + }, + { + "provider": "gemini", + "port": 10003, + "base_url": "http://api-proxy:10003", + "configured": false, + "models": null, + "models_url": "http://api-proxy:10003/v1beta/models" + }, + { + "provider": "opencode", + "port": 10004, + "base_url": "http://api-proxy:10004", + "configured": true, + "models": null, + "models_url": null + } + ], + "models_fetch_complete": true +} +``` + +Fields: +- `configured` — `true` if an API key for this provider was found at startup +- `models` — list of model IDs fetched from the provider at startup; `null` if the provider is not configured or model fetch failed +- `models_fetch_complete` — `true` once the startup model-fetch pass has finished +- `models_url` — URL to query for the live model list; `null` for OpenCode (which routes to other providers) + ## Troubleshooting ### Gemini proxy returns 503