From fa2fa48c9a520a85e49f6f4d93e7f05539663fdb Mon Sep 17 00:00:00 2001 From: Mola-maker <2249464964@qq.com> Date: Mon, 20 Apr 2026 18:08:05 +0800 Subject: [PATCH] feat: inject P3 historical experience into code_agent prompt generate_script() now pulls up to 2 relevant experience entries via get_relevant_experience('P3', max_chars=1800) and appends them to the user prompt. When the experience log is empty the section is omitted, so first-run behavior is unchanged. Mirrors the pattern already used in modeling_agent.derive_model. --- agents/code_agent.py | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/agents/code_agent.py b/agents/code_agent.py index d903c7b..c6eb85d 100644 --- a/agents/code_agent.py +++ b/agents/code_agent.py @@ -13,6 +13,7 @@ from agents.utils import container_name, docker_cp, docker_exec from agents.prompts import CODER_PROMPT from agents.flows import Flows +from agents.experience_recorder import get_relevant_experience BASE_DIR = Path(__file__).resolve().parent.parent VOL_HOST = Path(os.getenv("VOL_HOST", BASE_DIR / "vol")) @@ -62,9 +63,12 @@ def _safe_problem_text(self, ctx: dict) -> str: def generate_script(self, step_key: str, coder_prompt: str, ctx: dict) -> str: """Generate a Python script for any step using the unified CODER_PROMPT.""" + past_exp = get_relevant_experience("P3", max_entries=2, max_chars=1800) + exp_section = f"\n\n{past_exp}\n" if past_exp else "" user_prompt = ( f"{coder_prompt}\n\n" f"Problem snippet:\n{self._safe_problem_text(ctx)}" + f"{exp_section}" ) try: code = _extract_code(call_model(SYSTEM_CODEGEN, user_prompt, task="codegen"))