From bf0ba28a551207d72d8b290dd3258603985e2c59 Mon Sep 17 00:00:00 2001 From: Dhruvj07 Date: Wed, 16 Apr 2025 18:10:18 +0530 Subject: [PATCH] =?UTF-8?q?feat:=20added=20multi-agent=20coordination=20ex?= =?UTF-8?q?ample=20for=20OpenAI=20and=20Gemini=20via=20Javelin=20(research?= =?UTF-8?q?=20=E2=86=92=20summary=20=E2=86=92=20report)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit --- .../adk_gemini_agent_javelin/__init__.py | 1 + .../agents/adk_gemini_agent_javelin/agent.py | 97 +++++++++++++++++ .../adk_openai_agent_javelin/__init__.py | 1 + .../agents/adk_openai_agent_javelin/agent.py | 100 ++++++++++++++++++ 4 files changed, 199 insertions(+) create mode 100644 examples/agents/adk_gemini_agent_javelin/__init__.py create mode 100644 examples/agents/adk_gemini_agent_javelin/agent.py create mode 100644 examples/agents/adk_openai_agent_javelin/__init__.py create mode 100644 examples/agents/adk_openai_agent_javelin/agent.py diff --git a/examples/agents/adk_gemini_agent_javelin/__init__.py b/examples/agents/adk_gemini_agent_javelin/__init__.py new file mode 100644 index 0000000..13b8869 --- /dev/null +++ b/examples/agents/adk_gemini_agent_javelin/__init__.py @@ -0,0 +1 @@ +from .agent import root_agent diff --git a/examples/agents/adk_gemini_agent_javelin/agent.py b/examples/agents/adk_gemini_agent_javelin/agent.py new file mode 100644 index 0000000..0c2eba2 --- /dev/null +++ b/examples/agents/adk_gemini_agent_javelin/agent.py @@ -0,0 +1,97 @@ +import os +import asyncio +from dotenv import load_dotenv + +from google.adk.agents import LlmAgent, SequentialAgent +from google.adk.models.lite_llm import LiteLlm +from google.adk.runners import Runner +from google.adk.sessions.in_memory_session_service import InMemorySessionService +from google.genai.types import Content, Part + +load_dotenv() + +GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") +JAVELIN_API_KEY = os.getenv("JAVELIN_API_KEY") + +if not GEMINI_API_KEY: + raise ValueError("Missing GEMINI_API_KEY") +if not JAVELIN_API_KEY: + raise ValueError("Missing JAVELIN_API_KEY") + +# Agent 1: Researcher +research_agent = LlmAgent( + model=LiteLlm( + model="openai/gemini-1.5-flash", + api_base="https://api-dev.javelin.live/v1/", + extra_headers={ + "x-javelin-route": "google_univ", + "x-api-key": JAVELIN_API_KEY, + "Authorization": f"Bearer {GEMINI_API_KEY}", + }, + ), + name="GeminiResearchAgent", + instruction="Research the query and save findings in state['research'].", + output_key="research", +) + +# Agent 2: Summarizer +summary_agent = LlmAgent( + model=LiteLlm( + model="openai/gemini-1.5-flash", + api_base="https://api-dev.javelin.live/v1/", + extra_headers={ + "x-javelin-route": "google_univ", + "x-api-key": JAVELIN_API_KEY, + "Authorization": f"Bearer {GEMINI_API_KEY}", + }, + ), + name="GeminiSummaryAgent", + instruction="Summarize state['research'] into state['summary'].", + output_key="summary", +) + +# Agent 3: Reporter +report_agent = LlmAgent( + model=LiteLlm( + model="openai/gemini-1.5-flash", + api_base="https://api-dev.javelin.live/v1/", + extra_headers={ + "x-javelin-route": "google_univ", + "x-api-key": JAVELIN_API_KEY, + "Authorization": f"Bearer {GEMINI_API_KEY}", + }, + ), + name="GeminiReportAgent", + instruction="Generate a report from state['summary'] and include a source URL.", + output_key="report", +) + +# Coordinator agent +root_agent = SequentialAgent( + name="GeminiMultiAgentCoordinator", + sub_agents=[research_agent, summary_agent, report_agent] +) + +async def main(): + session_service = InMemorySessionService() + session_service.create_session("gemini_multi_agent_app", "user", "sess") + + runner = Runner( + agent=root_agent, + app_name="gemini_multi_agent_app", + session_service=session_service, + ) + + query = "role of AI in sustainable energy" + msg = Content(role="user", parts=[Part.from_text(query)]) + + final_answer = "" + async for event in runner.run_async("user", "sess", new_message=msg): + if event.is_final_response() and event.content: + final_answer = event.content.parts[0].text + break + + print("\n--- Final Report ---\n", final_answer) + +if __name__ == "__main__": + asyncio.run(main()) diff --git a/examples/agents/adk_openai_agent_javelin/__init__.py b/examples/agents/adk_openai_agent_javelin/__init__.py new file mode 100644 index 0000000..13b8869 --- /dev/null +++ b/examples/agents/adk_openai_agent_javelin/__init__.py @@ -0,0 +1 @@ +from .agent import root_agent diff --git a/examples/agents/adk_openai_agent_javelin/agent.py b/examples/agents/adk_openai_agent_javelin/agent.py new file mode 100644 index 0000000..0802dc6 --- /dev/null +++ b/examples/agents/adk_openai_agent_javelin/agent.py @@ -0,0 +1,100 @@ +import os +import asyncio +from dotenv import load_dotenv + +from google.adk.agents import LlmAgent, SequentialAgent +from google.adk.models.lite_llm import LiteLlm +from google.adk.runners import Runner +from google.adk.sessions.in_memory_session_service import InMemorySessionService +from google.genai.types import Content, Part + +load_dotenv() + +JAVELIN_API_KEY = os.getenv("JAVELIN_API_KEY") +OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") + +if not JAVELIN_API_KEY: + raise ValueError("Missing JAVELIN_API_KEY in environment") +if not OPENAI_API_KEY: + raise ValueError("Missing OPENAI_API_KEY in environment") + +# Agent 1: Researcher +research_agent = LlmAgent( + model=LiteLlm( + model="openai/gpt-4o", + api_base="https://api-dev.javelin.live/v1", + extra_headers={ + "x-javelin-route": "openai_univ", + "x-api-key": JAVELIN_API_KEY, + "Authorization": f"Bearer {OPENAI_API_KEY}", + }, + ), + name="ResearchAgent", + instruction="Research the query and save findings in state['research'].", + output_key="research", +) + +# Agent 2: Summarizer +summary_agent = LlmAgent( + model=LiteLlm( + model="openai/gpt-4o", + api_base="https://api-dev.javelin.live/v1", + extra_headers={ + "x-javelin-route": "openai_univ", + "x-api-key": JAVELIN_API_KEY, + "Authorization": f"Bearer {OPENAI_API_KEY}", + }, + ), + name="SummaryAgent", + instruction="Summarize state['research'] into state['summary'].", + output_key="summary", +) + +# Agent 3: Reporter +report_agent = LlmAgent( + model=LiteLlm( + model="openai/gpt-4o", + api_base="https://api-dev.javelin.live/v1", + extra_headers={ + "x-javelin-route": "openai_univ", + "x-api-key": JAVELIN_API_KEY, + "Authorization": f"Bearer {OPENAI_API_KEY}", + }, + ), + name="ReportAgent", + instruction="Generate a report from state['summary'] and include a source URL.", + output_key="report", +) + +# Coordinator agent running all three sequentially +coordinator = SequentialAgent( + name="OpenAI_MultiAgentCoordinator", + sub_agents=[research_agent, summary_agent, report_agent] +) +root_agent = coordinator + + +async def main(): + session_service = InMemorySessionService() + session_service.create_session("openai_multi_agent_app", "user", "sess") + + runner = Runner( + agent=coordinator, + app_name="openai_multi_agent_app", + session_service=session_service, + ) + + # Provide user query + query = "impact of AI on global education" + msg = Content(role="user", parts=[Part.from_text(query)]) + + final_answer = "" + async for event in runner.run_async("user", "sess", new_message=msg): + if event.is_final_response() and event.content: + final_answer = event.content.parts[0].text + break + + print("\n--- Final Report ---\n", final_answer) + +if __name__ == "__main__": + asyncio.run(main()) \ No newline at end of file