-
Notifications
You must be signed in to change notification settings - Fork 1
feat: added multi-agent coordination example for OpenAI and Gemini #186
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
Changes from all commits
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| from .agent import root_agent |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,97 @@ | ||
| import os | ||
| import asyncio | ||
| from dotenv import load_dotenv | ||
|
|
||
| from google.adk.agents import LlmAgent, SequentialAgent | ||
| from google.adk.models.lite_llm import LiteLlm | ||
| from google.adk.runners import Runner | ||
| from google.adk.sessions.in_memory_session_service import InMemorySessionService | ||
| from google.genai.types import Content, Part | ||
|
|
||
| load_dotenv() | ||
|
|
||
| GEMINI_API_KEY = os.getenv("GEMINI_API_KEY") | ||
| JAVELIN_API_KEY = os.getenv("JAVELIN_API_KEY") | ||
|
|
||
| if not GEMINI_API_KEY: | ||
| raise ValueError("Missing GEMINI_API_KEY") | ||
| if not JAVELIN_API_KEY: | ||
| raise ValueError("Missing JAVELIN_API_KEY") | ||
|
|
||
| # Agent 1: Researcher | ||
| research_agent = LlmAgent( | ||
| model=LiteLlm( | ||
| model="openai/gemini-1.5-flash", | ||
| api_base="https://api-dev.javelin.live/v1/", | ||
| extra_headers={ | ||
| "x-javelin-route": "google_univ", | ||
| "x-api-key": JAVELIN_API_KEY, | ||
| "Authorization": f"Bearer {GEMINI_API_KEY}", | ||
| }, | ||
| ), | ||
| name="GeminiResearchAgent", | ||
| instruction="Research the query and save findings in state['research'].", | ||
| output_key="research", | ||
| ) | ||
|
|
||
| # Agent 2: Summarizer | ||
| summary_agent = LlmAgent( | ||
| model=LiteLlm( | ||
| model="openai/gemini-1.5-flash", | ||
| api_base="https://api-dev.javelin.live/v1/", | ||
| extra_headers={ | ||
| "x-javelin-route": "google_univ", | ||
| "x-api-key": JAVELIN_API_KEY, | ||
| "Authorization": f"Bearer {GEMINI_API_KEY}", | ||
| }, | ||
| ), | ||
| name="GeminiSummaryAgent", | ||
| instruction="Summarize state['research'] into state['summary'].", | ||
| output_key="summary", | ||
| ) | ||
|
|
||
| # Agent 3: Reporter | ||
| report_agent = LlmAgent( | ||
| model=LiteLlm( | ||
| model="openai/gemini-1.5-flash", | ||
| api_base="https://api-dev.javelin.live/v1/", | ||
| extra_headers={ | ||
| "x-javelin-route": "google_univ", | ||
| "x-api-key": JAVELIN_API_KEY, | ||
| "Authorization": f"Bearer {GEMINI_API_KEY}", | ||
| }, | ||
| ), | ||
| name="GeminiReportAgent", | ||
| instruction="Generate a report from state['summary'] and include a source URL.", | ||
| output_key="report", | ||
| ) | ||
|
|
||
| # Coordinator agent | ||
| root_agent = SequentialAgent( | ||
| name="GeminiMultiAgentCoordinator", | ||
| sub_agents=[research_agent, summary_agent, report_agent] | ||
| ) | ||
|
|
||
| async def main(): | ||
| session_service = InMemorySessionService() | ||
| session_service.create_session("gemini_multi_agent_app", "user", "sess") | ||
|
|
||
| runner = Runner( | ||
| agent=root_agent, | ||
| app_name="gemini_multi_agent_app", | ||
| session_service=session_service, | ||
| ) | ||
|
|
||
| query = "role of AI in sustainable energy" | ||
| msg = Content(role="user", parts=[Part.from_text(query)]) | ||
|
|
||
| final_answer = "" | ||
| async for event in runner.run_async("user", "sess", new_message=msg): | ||
| if event.is_final_response() and event.content: | ||
| final_answer = event.content.parts[0].text | ||
| break | ||
|
|
||
| print("\n--- Final Report ---\n", final_answer) | ||
|
Comment on lines
+75
to
+94
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider adding a try-except block to handle potential exceptions during agent execution. This will prevent the program from crashing and provide more informative error messages. try:
session_service = InMemorySessionService()
session_service.create_session("gemini_multi_agent_app", "user", "sess")
runner = Runner(
agent=root_agent,
app_name="gemini_multi_agent_app",
session_service=session_service,
)
query = "role of AI in sustainable energy"
msg = Content(role="user", parts=[Part.from_text(query)])
final_answer = ""
async for event in runner.run_async("user", "sess", new_message=msg):
if event.is_final_response() and event.content:
final_answer = event.content.parts[0].text
break
print("\n--- Final Report ---\n", final_answer)
except Exception as e:
print(f"An error occurred: {e}") |
||
|
|
||
| if __name__ == "__main__": | ||
| asyncio.run(main()) | ||
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1 @@ | ||
| from .agent import root_agent |
| Original file line number | Diff line number | Diff line change |
|---|---|---|
| @@ -0,0 +1,100 @@ | ||
| import os | ||
| import asyncio | ||
| from dotenv import load_dotenv | ||
|
|
||
| from google.adk.agents import LlmAgent, SequentialAgent | ||
| from google.adk.models.lite_llm import LiteLlm | ||
| from google.adk.runners import Runner | ||
| from google.adk.sessions.in_memory_session_service import InMemorySessionService | ||
| from google.genai.types import Content, Part | ||
|
|
||
| load_dotenv() | ||
|
|
||
| JAVELIN_API_KEY = os.getenv("JAVELIN_API_KEY") | ||
| OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") | ||
|
|
||
| if not JAVELIN_API_KEY: | ||
| raise ValueError("Missing JAVELIN_API_KEY in environment") | ||
| if not OPENAI_API_KEY: | ||
| raise ValueError("Missing OPENAI_API_KEY in environment") | ||
|
|
||
| # Agent 1: Researcher | ||
| research_agent = LlmAgent( | ||
| model=LiteLlm( | ||
| model="openai/gpt-4o", | ||
| api_base="https://api-dev.javelin.live/v1", | ||
| extra_headers={ | ||
| "x-javelin-route": "openai_univ", | ||
| "x-api-key": JAVELIN_API_KEY, | ||
| "Authorization": f"Bearer {OPENAI_API_KEY}", | ||
| }, | ||
| ), | ||
| name="ResearchAgent", | ||
| instruction="Research the query and save findings in state['research'].", | ||
| output_key="research", | ||
| ) | ||
|
|
||
| # Agent 2: Summarizer | ||
| summary_agent = LlmAgent( | ||
| model=LiteLlm( | ||
| model="openai/gpt-4o", | ||
| api_base="https://api-dev.javelin.live/v1", | ||
| extra_headers={ | ||
| "x-javelin-route": "openai_univ", | ||
| "x-api-key": JAVELIN_API_KEY, | ||
| "Authorization": f"Bearer {OPENAI_API_KEY}", | ||
| }, | ||
| ), | ||
| name="SummaryAgent", | ||
| instruction="Summarize state['research'] into state['summary'].", | ||
| output_key="summary", | ||
| ) | ||
|
|
||
| # Agent 3: Reporter | ||
| report_agent = LlmAgent( | ||
| model=LiteLlm( | ||
| model="openai/gpt-4o", | ||
| api_base="https://api-dev.javelin.live/v1", | ||
| extra_headers={ | ||
| "x-javelin-route": "openai_univ", | ||
| "x-api-key": JAVELIN_API_KEY, | ||
| "Authorization": f"Bearer {OPENAI_API_KEY}", | ||
| }, | ||
| ), | ||
| name="ReportAgent", | ||
| instruction="Generate a report from state['summary'] and include a source URL.", | ||
| output_key="report", | ||
| ) | ||
|
|
||
| # Coordinator agent running all three sequentially | ||
| coordinator = SequentialAgent( | ||
| name="OpenAI_MultiAgentCoordinator", | ||
| sub_agents=[research_agent, summary_agent, report_agent] | ||
| ) | ||
| root_agent = coordinator | ||
|
|
||
|
|
||
| async def main(): | ||
| session_service = InMemorySessionService() | ||
| session_service.create_session("openai_multi_agent_app", "user", "sess") | ||
|
|
||
| runner = Runner( | ||
| agent=coordinator, | ||
| app_name="openai_multi_agent_app", | ||
| session_service=session_service, | ||
| ) | ||
|
|
||
| # Provide user query | ||
| query = "impact of AI on global education" | ||
| msg = Content(role="user", parts=[Part.from_text(query)]) | ||
|
|
||
| final_answer = "" | ||
| async for event in runner.run_async("user", "sess", new_message=msg): | ||
| if event.is_final_response() and event.content: | ||
| final_answer = event.content.parts[0].text | ||
| break | ||
|
|
||
| print("\n--- Final Report ---\n", final_answer) | ||
|
Comment on lines
+77
to
+97
Contributor
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Consider adding a try-except block to handle potential exceptions during agent execution. This will prevent the program from crashing and provide more informative error messages. try:
session_service = InMemorySessionService()
session_service.create_session("openai_multi_agent_app", "user", "sess")
runner = Runner(
agent=coordinator,
app_name="openai_multi_agent_app",
session_service=session_service,
)
# Provide user query
query = "impact of AI on global education"
msg = Content(role="user", parts=[Part.from_text(query)])
final_answer = ""
async for event in runner.run_async("user", "sess", new_message=msg):
if event.is_final_response() and event.content:
final_answer = event.content.parts[0].text
break
print("\n--- Final Report ---\n", final_answer)
except Exception as e:
print(f"An error occurred: {e}") |
||
|
|
||
| if __name__ == "__main__": | ||
| asyncio.run(main()) | ||
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Consider using
os.environ.getwith a default value instead of raising aValueError. This allows the program to run with a default behavior if the environment variables are not set, and provides a more graceful degradation. Also, consider logging a warning message if the environment variables are not set.