Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -14,6 +14,17 @@ from youtube_tool import youtube_transcript_tool, youtube_transcript_snippet_too
|
|
| 14 |
from multiple_tools import round_to_two_decimals_tool, text_inverter_tool, google_web_search_tool, wikipedia_search_tool
|
| 15 |
from agent import smart_agent
|
| 16 |
from llama_index.llms.openai import OpenAI
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 17 |
|
| 18 |
# (Keep Constants as is)
|
| 19 |
# --- Constants ---
|
|
@@ -22,71 +33,50 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
|
|
| 22 |
# --- Basic Agent Definition ---
|
| 23 |
|
| 24 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 25 |
class BasicAgent:
|
| 26 |
def __init__(self):
|
| 27 |
-
print("
|
| 28 |
-
|
| 29 |
-
# self.api_key = os.getenv("HF_TOKEN")
|
| 30 |
-
# self.llm = HuggingFaceInferenceAPI(
|
| 31 |
-
# model_name="deepseek-ai/DeepSeek-R1-0528",
|
| 32 |
-
# token=self.api_key,
|
| 33 |
-
# provider="auto",
|
| 34 |
-
# max_iterations=10
|
| 35 |
-
# )
|
| 36 |
-
# self.agent = AgentWorkflow.from_tools_or_functions(
|
| 37 |
-
# [wikipedia_search_tool, youtube_transcript_tool, youtube_transcript_snippet_tool,
|
| 38 |
-
# round_to_two_decimals_tool, text_inverter_tool, google_web_search_tool],
|
| 39 |
-
# llm=self.llm
|
| 40 |
-
# )
|
| 41 |
-
OpenAI_key = os.getenv("OPEN_AI_TOKEN")
|
| 42 |
-
agent_kwargs = {
|
| 43 |
-
"prefix": (
|
| 44 |
-
"You are a helpful AI assistant completing GAIA benchmark tasks.\n"
|
| 45 |
-
"You MUST use the tools provided to answer the user's question. Do not answer from your own knowledge.\n"
|
| 46 |
-
"Carefully analyze the question to determine the most appropriate tool to use.\n"
|
| 47 |
-
"Here are guidelines for using the tools:\n"
|
| 48 |
-
"- Use 'wikipedia_search' to find factual information about topics, events, people, etc. (e.g., 'Use wikipedia_search to find the population of France').\n"
|
| 49 |
-
"- Use 'youtube_transcript' to extract transcripts from YouTube videos when the question requires understanding the video content. (e.g., 'Use youtube_transcript to summarize the key points of this video').\n"
|
| 50 |
-
"- Use 'audio_transcriber' to transcribe uploaded audio files. (e.g., 'Use audio_transcriber to get the text from this audio recording').\n"
|
| 51 |
-
"- Use 'chess_image_solver' to analyze and solve chess puzzles from images. (e.g., 'Use chess_image_solver to determine the best move in this chess position').\n"
|
| 52 |
-
"- Use 'file_parser' to parse and analyze data from Excel or CSV files. (e.g., 'Use file_parser to calculate the average sales from this data').\n"
|
| 53 |
-
"- Use 'vegetable_classifier_2022' to classify a list of food items and extract only the vegetables. (e.g., 'Use vegetable_classifier_2022 to get a list of the vegetables in this grocery list').\n"
|
| 54 |
-
"- Use 'excel_food_sales_sum' to extract total food sales from excel files. (e.g., 'Use excel_food_sales_sum to calculate the total food sales').\n"
|
| 55 |
-
"Do NOT guess or make up answers. If a tool cannot provide the answer, truthfully respond that you were unable to find the information.\n"
|
| 56 |
-
),
|
| 57 |
-
"suffix": (
|
| 58 |
-
"Use the tools to research or calculate the answer.\n"
|
| 59 |
-
"If a tool fails, explain the reason for the failure instead of hallucinating an answer.\n"
|
| 60 |
-
"Provide concise and direct answers as requested in the questions. Do not add extra information unless explicitly asked for.\n"
|
| 61 |
-
"For example, if asked for a number, return only the number. If asked for a list, return only the list.\n"
|
| 62 |
-
),
|
| 63 |
-
"max_iterations": 100,
|
| 64 |
-
"verbose": false
|
| 65 |
-
}
|
| 66 |
-
self.llm = OpenAI(model="gpt-4o-mini", temperature=0.2, api_key=OpenAI_key)
|
| 67 |
-
self.agent = AgentWorkflow.from_tools_or_functions(
|
| 68 |
-
[wikipedia_search_tool, youtube_transcript_tool, youtube_transcript_snippet_tool, round_to_two_decimals_tool, text_inverter_tool, google_web_search_tool],
|
| 69 |
-
llm=self.llm,
|
| 70 |
-
)
|
| 71 |
|
| 72 |
-
async def run(self, question: str) -> str:
|
| 73 |
-
print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 74 |
-
# answer = await self.agent.run(question)
|
| 75 |
-
answer = await self.agent.run(
|
| 76 |
-
f"{question}\n\nIf you have enough information, respond with a concise final answer.",
|
| 77 |
-
max_iterations=50
|
| 78 |
-
)
|
| 79 |
-
if hasattr(answer, "output"):
|
| 80 |
-
print(f"Agent returning answer: {answer}")
|
| 81 |
-
return str(answer.output)
|
| 82 |
-
else:
|
| 83 |
-
print(f"Agent returning answer: {answer}")
|
| 84 |
-
return str(answer)
|
| 85 |
-
|
| 86 |
def __call__(self, question: str) -> str:
|
| 87 |
-
|
| 88 |
-
|
| 89 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 90 |
|
| 91 |
|
| 92 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|
|
|
|
| 14 |
from multiple_tools import round_to_two_decimals_tool, text_inverter_tool, google_web_search_tool, wikipedia_search_tool
|
| 15 |
from agent import smart_agent
|
| 16 |
from llama_index.llms.openai import OpenAI
|
| 17 |
+
#-----------------------------------------------------------------
|
| 18 |
+
from langgraph_impl.my_agent import create_langchain_agent
|
| 19 |
+
#from langchain_community.llms import OpenAI
|
| 20 |
+
from langchain.tools import Tool
|
| 21 |
+
from langgraph_impl.my_agent import create_langchain_agent
|
| 22 |
+
|
| 23 |
+
from langgraph_impl.wikipedia_tool import wiki_search
|
| 24 |
+
from langgraph_impl.youtube_tool import get_youtube_transcript
|
| 25 |
+
from langgraph_impl.audio_transcriber import transcribe_audio
|
| 26 |
+
from langgraph_impl.image_chess_solver import solve_chess_image
|
| 27 |
+
from langgraph_impl.file_parser import parse_file_and_summarize
|
| 28 |
|
| 29 |
# (Keep Constants as is)
|
| 30 |
# --- Constants ---
|
|
|
|
| 33 |
# --- Basic Agent Definition ---
|
| 34 |
|
| 35 |
# ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
|
| 36 |
+
# class BasicAgent:
|
| 37 |
+
# def __init__(self):
|
| 38 |
+
# print("BasicAgent initialized. . . .")
|
| 39 |
+
# # self.agent = smart_agent()
|
| 40 |
+
# # self.api_key = os.getenv("HF_TOKEN")
|
| 41 |
+
# OpenAI_key = os.getenv("OPEN_AI_TOKEN")
|
| 42 |
+
# self.llm = OpenAI(model="gpt-4o-mini", temperature=0.2, api_key=OpenAI_key)
|
| 43 |
+
# self.agent = AgentWorkflow.from_tools_or_functions(
|
| 44 |
+
# [wikipedia_search_tool, youtube_transcript_tool, youtube_transcript_snippet_tool, round_to_two_decimals_tool, text_inverter_tool, google_web_search_tool],
|
| 45 |
+
# llm=self.llm,
|
| 46 |
+
# )
|
| 47 |
+
|
| 48 |
+
# async def run(self, question: str) -> str:
|
| 49 |
+
# print(f"Agent received question (first 50 chars): {question[:50]}...")
|
| 50 |
+
# # answer = await self.agent.run(question)
|
| 51 |
+
# answer = await self.agent.run(
|
| 52 |
+
# f"{question}\n\nIf you have enough information, respond with a concise final answer.",
|
| 53 |
+
# max_iterations=50
|
| 54 |
+
# )
|
| 55 |
+
# if hasattr(answer, "output"):
|
| 56 |
+
# print(f"Agent returning answer: {answer}")
|
| 57 |
+
# return str(answer.output)
|
| 58 |
+
# else:
|
| 59 |
+
# print(f"Agent returning answer: {answer}")
|
| 60 |
+
# return str(answer)
|
| 61 |
+
|
| 62 |
+
# def __call__(self, question: str) -> str:
|
| 63 |
+
# return asyncio.run(self.run(question))
|
| 64 |
+
|
| 65 |
+
|
| 66 |
class BasicAgent:
|
| 67 |
def __init__(self):
|
| 68 |
+
print("Initializing LangChain Agent...")
|
| 69 |
+
self.agent = create_langchain_agent()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 70 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 71 |
def __call__(self, question: str) -> str:
|
| 72 |
+
try:
|
| 73 |
+
result = self.agent.invoke({"input": question})
|
| 74 |
+
if isinstance(result, dict) and "output" in result:
|
| 75 |
+
return result["output"]
|
| 76 |
+
return result
|
| 77 |
+
except Exception as e:
|
| 78 |
+
return f"Agent error: {e}"
|
| 79 |
+
|
| 80 |
|
| 81 |
|
| 82 |
def run_and_submit_all( profile: gr.OAuthProfile | None):
|