Update app.py
Browse files
app.py
CHANGED
|
@@ -1,211 +1,235 @@
|
|
| 1 |
-
import
|
| 2 |
-
import
|
| 3 |
-
import
|
| 4 |
-
from
|
| 5 |
-
|
|
|
|
| 6 |
import google.genai.types as types
|
| 7 |
import requests
|
| 8 |
-
from google.adk.agents import BaseAgent, LlmAgent
|
| 9 |
-
from google.adk.agents.invocation_context import InvocationContext
|
| 10 |
from google.adk.events import Event, EventActions
|
| 11 |
-
from google.adk.
|
| 12 |
-
from
|
| 13 |
-
from google.
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
google_search,
|
| 19 |
-
)
|
| 20 |
|
| 21 |
-
# Configure logging to suppress verbose output
|
| 22 |
logging.basicConfig(level=logging.ERROR)
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 32 |
url = 'https://agents-course-unit4-scoring.hf.space/questions'
|
| 33 |
headers = {'accept': 'application/json'}
|
| 34 |
-
|
| 35 |
-
|
| 36 |
-
|
| 37 |
-
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
-
|
| 43 |
-
|
| 44 |
-
|
| 45 |
-
|
| 46 |
-
|
| 47 |
-
|
| 48 |
-
|
| 49 |
-
|
| 50 |
-
except requests.exceptions.RequestException as e:
|
| 51 |
-
print(f"Error fetching questions: {e}")
|
| 52 |
-
return []
|
| 53 |
-
|
| 54 |
-
def submit_questions(answers: list[Dict[str, Any]]) -> Dict[str, Any]:
|
| 55 |
-
"""
|
| 56 |
-
Submits the collected answers to the scoring API.
|
| 57 |
-
|
| 58 |
-
Args:
|
| 59 |
-
answers: A list of dictionaries, where each dictionary contains
|
| 60 |
-
a 'task_id' and a 'submitted_answer'.
|
| 61 |
-
"""
|
| 62 |
-
# !!! IMPORTANT !!!
|
| 63 |
-
# REPLACE the username and agent_code with your own details.
|
| 64 |
-
username = "YOUR_HUGGING_FACE_USERNAME"
|
| 65 |
-
agent_code_url = "https://huggingface.co/spaces/YOUR_USERNAME/YOUR_SPACE_NAME/tree/main"
|
| 66 |
-
|
| 67 |
-
print(f"Attempting to submit {len(answers)} answers for user '{username}'...")
|
| 68 |
url = 'https://agents-course-unit4-scoring.hf.space/submit'
|
| 69 |
payload = {
|
| 70 |
-
|
| 71 |
-
|
| 72 |
-
|
| 73 |
-
}
|
| 74 |
headers = {'accept': 'application/json', "Content-Type": "application/json"}
|
| 75 |
-
|
| 76 |
-
|
| 77 |
-
|
| 78 |
-
|
| 79 |
-
print("Submission successful!")
|
| 80 |
-
print("Response:", response.json())
|
| 81 |
return response.json()
|
| 82 |
-
|
| 83 |
-
|
| 84 |
-
|
| 85 |
-
raise
|
| 86 |
|
| 87 |
-
|
| 88 |
-
|
|
|
|
| 89 |
submit_api = FunctionTool(func=submit_questions)
|
| 90 |
|
| 91 |
-
#
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 92 |
|
| 93 |
-
APP_NAME = "gaia_challenge_agent"
|
| 94 |
-
USER_ID = "test_user"
|
| 95 |
-
SESSION_ID = "main_session"
|
| 96 |
|
| 97 |
-
# A specialized agent for tasks requiring code generation
|
| 98 |
code_agent = LlmAgent(
|
| 99 |
-
name='
|
| 100 |
-
model="gemini-2.
|
| 101 |
-
description=
|
| 102 |
-
|
| 103 |
-
"You are an expert in data analysis and code generation. Given a question and a file URL, "
|
| 104 |
-
"write Python code to find the answer. You cannot execute the code. "
|
| 105 |
-
"Use pandas for data files. Fetch remote files using requests. "
|
| 106 |
-
"Your final output must be only the answer to the question, with no extra text or explanation."
|
| 107 |
),
|
| 108 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 109 |
)
|
| 110 |
|
| 111 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 112 |
search_agent = LlmAgent(
|
| 113 |
-
name='
|
| 114 |
-
model="gemini-2.
|
| 115 |
-
description=
|
| 116 |
-
|
| 117 |
-
"You are an expert web researcher. You will be given a question. "
|
| 118 |
-
"Use your search tool to find the most accurate information. "
|
| 119 |
-
"Synthesize the findings and provide a concise, direct answer to the question. "
|
| 120 |
-
"Your final output must be only the answer, with no extra text."
|
| 121 |
),
|
| 122 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 123 |
)
|
| 124 |
|
| 125 |
-
# A specialized agent for image analysis
|
| 126 |
image_agent = LlmAgent(
|
| 127 |
-
name='
|
| 128 |
-
model="gemini-2.
|
| 129 |
-
description=
|
| 130 |
-
|
| 131 |
-
"You are an expert image analyst. You will be given a question and a URL to an image. "
|
| 132 |
-
"Analyze the image content to answer the question. "
|
| 133 |
-
"Your final output must be only the answer, with no extra text."
|
| 134 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 135 |
)
|
| 136 |
|
| 137 |
-
|
| 138 |
youtube_agent = LlmAgent(
|
| 139 |
-
name='
|
| 140 |
-
model="gemini-2.
|
| 141 |
-
description=
|
| 142 |
-
|
| 143 |
-
"You are an expert video analyst. You will be given a question and a URL to a YouTube video. "
|
| 144 |
-
"Analyze the video content to answer the question. "
|
| 145 |
-
"Your final output must be only the answer, with no extra text."
|
| 146 |
),
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 147 |
)
|
| 148 |
|
| 149 |
-
# The main orchestrator agent
|
| 150 |
root_agent = LlmAgent(
|
| 151 |
-
name='
|
| 152 |
-
model="gemini-2.
|
| 153 |
-
description=
|
| 154 |
-
|
| 155 |
-
"You are the project manager. Your goal is to answer a series of questions and submit them. "
|
| 156 |
-
"1. **FETCH**: Start by using the `answer_questions` tool to get the list of all tasks. "
|
| 157 |
-
"2. **DELEGATE**: For each task string, which contains a 'task_id:question', extract the task_id and the question. "
|
| 158 |
-
" - Determine the best specialized agent for the job (Code, Search, Image, YouTube) based on the question and any file URLs. "
|
| 159 |
-
" - Invoke that agent with the question and necessary context (like the file URL). "
|
| 160 |
-
"3. **COLLECT**: Get the precise answer back from the specialist agent. Create a dictionary: `{'task_id': 'the_id', 'submitted_answer': 'the_answer'}`. The answer must be exact, without any extra formatting or text. "
|
| 161 |
-
"4. **SUBMIT**: After processing all questions, gather all the answer dictionaries into a single list. Call the `submit_questions` tool with this list to complete the assignment."
|
| 162 |
),
|
| 163 |
-
|
| 164 |
-
|
| 165 |
-
|
| 166 |
-
|
| 167 |
-
|
| 168 |
-
|
| 169 |
-
|
| 170 |
-
],
|
| 171 |
)
|
| 172 |
|
| 173 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 174 |
|
| 175 |
session_service = InMemorySessionService()
|
|
|
|
|
|
|
|
|
|
|
|
|
| 176 |
runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 177 |
|
| 178 |
-
|
| 179 |
-
|
| 180 |
-
|
| 181 |
-
app_name=APP_NAME, user_id=USER_ID, session_id=SESSION_ID
|
| 182 |
-
)
|
| 183 |
-
# Corrected the print statement below to use the SESSION_ID constant
|
| 184 |
-
print(f"===== Agent Process Started for session: {SESSION_ID} =====")
|
| 185 |
-
|
| 186 |
-
initial_prompt = "Get all the questions, answer each one using your specialized agents, and submit the final list of answers for scoring."
|
| 187 |
-
|
| 188 |
-
print(f"\nSending initial prompt to the Orchestrator Agent:\n'{initial_prompt}'")
|
| 189 |
-
|
| 190 |
-
# Corrected the run_async call to use the SESSION_ID constant
|
| 191 |
-
async for event in runner.run_async(
|
| 192 |
-
session_id=SESSION_ID,
|
| 193 |
-
content=types.Content(role="user", parts=[types.Part(text=initial_prompt)]),
|
| 194 |
-
):
|
| 195 |
-
# Optional: Print events for debugging
|
| 196 |
-
if event.action == EventActions.AGENT_RESPONSE and event.author == root_agent.name:
|
| 197 |
-
if event.content and event.content.parts:
|
| 198 |
-
print(f"\n[Orchestrator Response]: {event.content.parts[0].text}")
|
| 199 |
-
elif event.action == EventActions.TOOL_OUTPUT:
|
| 200 |
-
if event.content and event.content.parts and event.content.parts[0].tool_output:
|
| 201 |
-
tool_output = event.content.parts[0].tool_output
|
| 202 |
-
print(f"\n<-- [Tool Output] from `{tool_output.tool_name}`")
|
| 203 |
-
|
| 204 |
-
print("\n===== Agent Process Finished =====")
|
| 205 |
-
|
| 206 |
-
async def main():
|
| 207 |
-
"""Main entry point for the application."""
|
| 208 |
-
await run_agent_process()
|
| 209 |
-
|
| 210 |
-
if __name__ == "__main__":
|
| 211 |
-
asyncio.run(main())
|
|
|
|
| 1 |
+
from zoneinfo import ZoneInfo
|
| 2 |
+
from google.adk.agents import Agent,BaseAgent,LlmAgent
|
| 3 |
+
from google.adk.tools import google_search
|
| 4 |
+
from google.adk.runners import Runner
|
| 5 |
+
from google.adk.sessions import InMemorySessionService
|
| 6 |
+
from google.genai import types
|
| 7 |
import google.genai.types as types
|
| 8 |
import requests
|
|
|
|
|
|
|
| 9 |
from google.adk.events import Event, EventActions
|
| 10 |
+
from google.adk.agents.invocation_context import InvocationContext
|
| 11 |
+
from typing import AsyncGenerator
|
| 12 |
+
from google.genai import types as genai_types
|
| 13 |
+
from google.adk.tools import ToolContext, FunctionTool
|
| 14 |
+
import logging
|
| 15 |
+
#from google.adk.tools import built_in_code_execution
|
| 16 |
+
from google.adk.tools import agent_tool
|
|
|
|
|
|
|
| 17 |
|
|
|
|
| 18 |
logging.basicConfig(level=logging.ERROR)
|
| 19 |
+
#from google.adk.tools import agent_tool
|
| 20 |
+
url = 'https://agents-course-unit4-scoring.hf.space/questions'
|
| 21 |
+
headers = {'accept': 'application/json'}
|
| 22 |
+
response = requests.get(url, headers=headers)
|
| 23 |
+
|
| 24 |
+
# class responses_api(BaseAgent):
|
| 25 |
+
# async def _run_async_impl(self, ctx: InvocationContext)-> AsyncGenerator[Event, None]:
|
| 26 |
+
# # This method is called when the agent is run
|
| 27 |
+
# # You can implement your logic here
|
| 28 |
+
# # For example, you can call an external API or perform some calculations
|
| 29 |
+
# # and return the result
|
| 30 |
+
# url = 'https://agents-course-unit4-scoring.hf.space/questions'
|
| 31 |
+
# headers = {'accept': 'application/json'}
|
| 32 |
+
# response = requests.get(url, headers=headers)
|
| 33 |
+
# for i in response.json():
|
| 34 |
+
# if i['file_name'] != '':
|
| 35 |
+
# url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
|
| 36 |
+
# question = i['question']
|
| 37 |
+
# prompt = f"{question} and the file is {url_file}, give the final answer only"
|
| 38 |
+
# else:
|
| 39 |
+
# question = i['question']
|
| 40 |
+
# prompt = f"{question} give the final answer only"
|
| 41 |
+
# existing_responses = ctx.session.state.get("user:responses", [])
|
| 42 |
+
# existing_responses.append(prompt)
|
| 43 |
+
# ctx.session_state["user:responses"] = existing_responses
|
| 44 |
+
|
| 45 |
+
# # Optionally, yield a single event to indicate completion or provide some output
|
| 46 |
+
# yield Event(author=self.name, content=types.Content(parts=[types.Part(text=f"Fetched {len(questions_data)} questions."))])
|
| 47 |
+
|
| 48 |
+
|
| 49 |
+
|
| 50 |
+
def answer_questions():
|
| 51 |
url = 'https://agents-course-unit4-scoring.hf.space/questions'
|
| 52 |
headers = {'accept': 'application/json'}
|
| 53 |
+
response = requests.get(url, headers=headers)
|
| 54 |
+
prompts = []
|
| 55 |
+
for i in response.json():
|
| 56 |
+
task_id = i['task_id']
|
| 57 |
+
if i['file_name'] != '':
|
| 58 |
+
url_file = f"https://agents-course-unit4-scoring.hf.space/files/{i['task_id']}"
|
| 59 |
+
question = i['question']
|
| 60 |
+
prompt = f"{task_id}:{question} and the file is {url_file}, give the final answer only"
|
| 61 |
+
else:
|
| 62 |
+
question = i['question']
|
| 63 |
+
prompt = f"{task_id}:{question} give the final answer only"
|
| 64 |
+
prompts.append(prompt)
|
| 65 |
+
return prompts
|
| 66 |
+
#responses_api = responses_api(name= 'responses_api_1')
|
| 67 |
+
from typing import Dict, Any
|
| 68 |
+
def submit_questions(answers: list[str]) -> Dict[str, Any]:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 69 |
url = 'https://agents-course-unit4-scoring.hf.space/submit'
|
| 70 |
payload = {
|
| 71 |
+
"username": "ashishja",
|
| 72 |
+
"agent_code": "https://huggingface.co/spaces/ashishja/Agents_Course_Final_Assignment_Ashish/tree/main",
|
| 73 |
+
"answers": answers}
|
|
|
|
| 74 |
headers = {'accept': 'application/json', "Content-Type": "application/json"}
|
| 75 |
+
response = requests.post(url, headers=headers, json =payload)
|
| 76 |
+
import json
|
| 77 |
+
print(json.dumps(payload, indent=2))
|
| 78 |
+
if response.status_code == 200:
|
|
|
|
|
|
|
| 79 |
return response.json()
|
| 80 |
+
else:
|
| 81 |
+
response.raise_for_status()
|
| 82 |
+
|
|
|
|
| 83 |
|
| 84 |
+
|
| 85 |
+
|
| 86 |
+
responses_api = FunctionTool(func= answer_questions)
|
| 87 |
submit_api = FunctionTool(func=submit_questions)
|
| 88 |
|
| 89 |
+
# class QuestionAnswerer(LlmAgent):
|
| 90 |
+
# async def _run_async_impl(self, ctx: InvocationContext) -> AsyncGenerator[Event, None]:
|
| 91 |
+
# questions_to_answer = ctx.session_service.get('fetched_questions', [])
|
| 92 |
+
# for q in questions_to_answer:
|
| 93 |
+
# answer = await self._llm(messages=[types.ChatMessage(role="user", parts=[types.Part(text=q)])])
|
| 94 |
+
# yield Event(author=self.name, content=answer.content)
|
| 95 |
+
|
| 96 |
+
# qa = QuestionAnswerer(name = 'qa_1', model="gemini-2.0-flash", description="Question Answerer")
|
| 97 |
+
|
| 98 |
+
|
| 99 |
+
|
| 100 |
+
|
| 101 |
+
|
| 102 |
+
|
| 103 |
+
|
| 104 |
+
|
| 105 |
+
APP_NAME="weather_sentiment_agent"
|
| 106 |
+
USER_ID="user1234"
|
| 107 |
+
SESSION_ID="1234"
|
| 108 |
|
|
|
|
|
|
|
|
|
|
| 109 |
|
|
|
|
| 110 |
code_agent = LlmAgent(
|
| 111 |
+
name='codegaiaAgent',
|
| 112 |
+
model="gemini-2.5-pro-preview-05-06",
|
| 113 |
+
description=(
|
| 114 |
+
"You are a smart agent that can write and execute code and answer any questions provided access the given files and answer"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 115 |
),
|
| 116 |
+
instruction = (
|
| 117 |
+
"if the question contains a file with .py ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
|
| 118 |
+
"If the question contains a spreadsheet file like .xlsx and .csv among others, get the file and depending on the question and the file provided, execute the code and provide the final answer. "
|
| 119 |
+
"use code like import pandas as pd , file = pd.read_csv('file.csv') and then use the file to answer the question. "
|
| 120 |
+
"if the question contains a file with .txt ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
|
| 121 |
+
"if the question contains a file with .json ,Get the code file and depending on the question and the file provided, execute the code and provide the final answer. "
|
| 122 |
+
"If you are writing code or if you get a code file, use the code execution tool to run the code and provide the final answer. "
|
| 123 |
)
|
| 124 |
|
| 125 |
+
,
|
| 126 |
+
# tools=[built_in_code_execution],
|
| 127 |
+
# Add the responses_api agent as a tool
|
| 128 |
+
#sub_agents=[responses_api]
|
| 129 |
+
)
|
| 130 |
+
|
| 131 |
+
|
| 132 |
search_agent = LlmAgent(
|
| 133 |
+
name='searchgaiaAgent',
|
| 134 |
+
model="gemini-2.5-pro-preview-05-06",
|
| 135 |
+
description=(
|
| 136 |
+
"You are a smart agent that can search the web and answer any questions provided access the given files and answer"
|
|
|
|
|
|
|
|
|
|
|
|
|
| 137 |
),
|
| 138 |
+
instruction = (
|
| 139 |
+
"Get the url associated perform a search and consolidate the information provided and answer the provided question "
|
| 140 |
+
)
|
| 141 |
+
|
| 142 |
+
,
|
| 143 |
+
tools=[google_search],
|
| 144 |
+
# Add the responses_api agent as a tool
|
| 145 |
+
#sub_agents=[responses_api]
|
| 146 |
)
|
| 147 |
|
|
|
|
| 148 |
image_agent = LlmAgent(
|
| 149 |
+
name='imagegaiaAgent',
|
| 150 |
+
model="gemini-2.5-pro-preview-05-06",
|
| 151 |
+
description=(
|
| 152 |
+
"You are a smart agent that can when given a image file and answer any questions related to it"
|
|
|
|
|
|
|
|
|
|
| 153 |
),
|
| 154 |
+
instruction = (
|
| 155 |
+
"Get the image file from the link associated in the prompt use Gemini to watch the video and answer the provided question ")
|
| 156 |
+
|
| 157 |
+
,
|
| 158 |
+
# tools=[google_search],
|
| 159 |
+
# Add the responses_api agent as a tool
|
| 160 |
+
#sub_agents=[responses_api]
|
| 161 |
)
|
| 162 |
|
| 163 |
+
|
| 164 |
youtube_agent = LlmAgent(
|
| 165 |
+
name='youtubegaiaAgent',
|
| 166 |
+
model="gemini-2.5-pro-preview-05-06",
|
| 167 |
+
description=(
|
| 168 |
+
"You are a smart agent that can when given a youtube link watch it and answer any questions related to it"
|
|
|
|
|
|
|
|
|
|
| 169 |
),
|
| 170 |
+
instruction = (
|
| 171 |
+
"Get the youtube link associated use Gemini to watch the video and answer the provided question ")
|
| 172 |
+
|
| 173 |
+
,
|
| 174 |
+
# tools=[google_search],
|
| 175 |
+
# Add the responses_api agent as a tool
|
| 176 |
+
#sub_agents=[responses_api]
|
| 177 |
)
|
| 178 |
|
|
|
|
| 179 |
root_agent = LlmAgent(
|
| 180 |
+
name='basegaiaAgent',
|
| 181 |
+
model="gemini-2.5-pro-preview-05-06",
|
| 182 |
+
description=(
|
| 183 |
+
"You are a smart agent that can answer any questions provided access the given files and answer"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 184 |
),
|
| 185 |
+
instruction = (
|
| 186 |
+
"You are a helpful agent. When the user asks to get the questions or makes a similar request, "
|
| 187 |
+
"invoke your tool 'responses_api' to retrieve the questions. "
|
| 188 |
+
"Once you receive the list of questions, loop over each question and provide a concise answer for each based on the question and any provided file. "
|
| 189 |
+
"For every answer, return a dictionary with the keys task_id and submitted_answer, for example: "
|
| 190 |
+
"{'task_id': 'the-task-id', 'submitted_answer': 'your answer'}. "
|
| 191 |
+
"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
|
|
|
|
| 192 |
)
|
| 193 |
|
| 194 |
+
,
|
| 195 |
+
tools=[responses_api,submit_api,agent_tool.AgentTool(agent = code_agent),\
|
| 196 |
+
agent_tool.AgentTool(agent = search_agent), agent_tool.AgentTool(youtube_agent), agent_tool.AgentTool(image_agent)],
|
| 197 |
+
# Add the responses_api agent as a tool
|
| 198 |
+
#sub_agents=[responses_api]
|
| 199 |
+
)
|
| 200 |
+
|
| 201 |
+
# root_agent = LlmAgent(
|
| 202 |
+
# name='gaiaAgent',
|
| 203 |
+
# model="gemini-2.5-pro-preview-05-06",
|
| 204 |
+
# description=(
|
| 205 |
+
# "You are a smart agent that can answer any questions provided access the given files and answer"
|
| 206 |
+
# ),
|
| 207 |
+
# instruction = (
|
| 208 |
+
# "You are a helpful agent. When the user asks to get the questions or makes a similar request, "
|
| 209 |
+
# "invoke base agent. "
|
| 210 |
+
# "Once you the answers check if are in correct format. "
|
| 211 |
+
# #"Collect all such dictionaries in a list (do not include any backslashes), and pass this list to the 'submit_api' tool to submit the answers."
|
| 212 |
+
# )
|
| 213 |
+
|
| 214 |
+
# ,
|
| 215 |
+
# #tools=[submit_api],
|
| 216 |
+
# # Add the responses_api agent as a tool
|
| 217 |
+
# sub_agents=[base_agent]
|
| 218 |
+
# )
|
| 219 |
|
| 220 |
session_service = InMemorySessionService()
|
| 221 |
+
session = session_service.create_session(app_name=APP_NAME, \
|
| 222 |
+
user_id=USER_ID,\
|
| 223 |
+
session_id=SESSION_ID)
|
| 224 |
+
|
| 225 |
runner = Runner(agent=root_agent, app_name=APP_NAME, session_service=session_service)
|
| 226 |
+
# # def send_query_to_agent(root_agent, query, session):
|
| 227 |
+
# # session = session
|
| 228 |
+
# # content = types.Content(role='user', parts=[types.Part(text=query)])
|
| 229 |
+
|
| 230 |
+
# # async def main():
|
| 231 |
+
# # await process_questions_and_answer()
|
| 232 |
|
| 233 |
+
# # if __name__ == "__main__":
|
| 234 |
+
# # import asyncio
|
| 235 |
+
# # asyncio.run(main())
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|