|
import os |
|
from dotenv import load_dotenv |
|
|
|
load_dotenv() |
|
|
|
|
|
from llama_index.core.agent import ReActAgent |
|
from llama_index.core.tools import FunctionTool |
|
from llama_index.llms.openrouter import OpenRouter |
|
|
|
import tools |
|
|
|
|
|
class LlamaIndexAgent: |
|
def __init__( |
|
self, |
|
|
|
|
|
|
|
model_name: str = "google/gemini-2.5-flash-preview", |
|
temperature: float = 0.7, |
|
verbose: bool = True, |
|
): |
|
""" |
|
Initialize the LlamaIndex agent with OpenRouter LLM. |
|
|
|
Args: |
|
openrouter_api_key: API key for OpenRouter |
|
model_name: Model name to use from OpenRouter |
|
temperature: Temperature setting for the LLM |
|
verbose: Whether to output verbose logs |
|
""" |
|
self.llm = OpenRouter( |
|
api_key=os.getenv("OPENROUTER_API_KEY"), |
|
model=model_name, |
|
temperature=temperature, |
|
) |
|
|
|
|
|
reverse_tool = FunctionTool.from_defaults( |
|
fn=tools.reverse_text, |
|
name="reverse_text", |
|
description="Reverses the given text", |
|
) |
|
|
|
final_answer_tool = FunctionTool.from_defaults( |
|
fn=tools.final_answer, |
|
name="final_answer", |
|
description="Use this to provide your final answer to the user's question", |
|
) |
|
web_search_tool = FunctionTool.from_defaults( |
|
fn=tools.web_search, |
|
name="web_search", |
|
description="Use this to search the web for the given query", |
|
) |
|
wikipedia_search_tool = FunctionTool.from_defaults( |
|
fn=tools.wikipedia_search, |
|
name="wikipedia_search", |
|
description="Use this to search the wikipedia for the given query", |
|
) |
|
multiply_tool = FunctionTool.from_defaults( |
|
fn=tools.multiply, |
|
name="multiply", |
|
description="Use this to multiply two numbers", |
|
) |
|
length_tool = FunctionTool.from_defaults( |
|
fn=tools.length, |
|
name="length", |
|
description="Use this to get the length of an iterable", |
|
) |
|
execute_python_file_tool = FunctionTool.from_defaults( |
|
fn=tools.execute_python_file, |
|
name="execute_python_file", |
|
description="Use this to execute a python file", |
|
) |
|
transcript_youtube_tool = FunctionTool.from_defaults( |
|
fn=tools.trascript_youtube, |
|
name="transcript_youtube", |
|
description="Use this to get the transcript of a YouTube video", |
|
) |
|
classify_fruit_vegitable_tool = FunctionTool.from_defaults( |
|
fn=tools.classify_fruit_vegitable, |
|
name="classify_fruit_vegitable", |
|
description="Use this to classify items to fruits and vegitables", |
|
) |
|
fetch_historical_event_data_tool = FunctionTool.from_defaults( |
|
fn=tools.fetch_historical_event_data, |
|
name="fetch_historical_event_data", |
|
description="Use this to fetch data about historical event that occured in certain year such as Olympics games, Footbal games, NBA etc.", |
|
) |
|
read_excel_tool = FunctionTool.from_defaults( |
|
fn=tools.read_excel, |
|
name="read_excel", |
|
description="Use this to read excel file", |
|
) |
|
pandas_column_sum_tool = FunctionTool.from_defaults( |
|
fn=tools.pandas_column_sum, |
|
name="pandas_column_sum", |
|
description="Use this to compute sum on pandas dataframe column", |
|
) |
|
compute_sum_tool = FunctionTool.from_defaults( |
|
fn=tools.compute_sum, |
|
name="compute_sum", |
|
description="Use this to compute sum of provided values", |
|
) |
|
|
|
|
|
self.agent = ReActAgent.from_tools( |
|
[ |
|
reverse_tool, |
|
final_answer_tool, |
|
web_search_tool, |
|
wikipedia_search_tool, |
|
multiply_tool, |
|
length_tool, |
|
execute_python_file_tool, |
|
transcript_youtube_tool, |
|
classify_fruit_vegitable_tool, |
|
fetch_historical_event_data_tool, |
|
read_excel_tool, |
|
pandas_column_sum_tool, |
|
compute_sum_tool, |
|
], |
|
llm=self.llm, |
|
verbose=verbose, |
|
max_iterations=20, |
|
system_prompt=""" |
|
You are a helpful AI assistant that can use tools to answer the user's questions. |
|
You have set of tools that you are free to use. |
|
You can do web search, parse wikipedia, execute python scripts, read xlsx, get youtube video transcript from youtube link, reverse texts, fetch historical events. |
|
When you have the complete answer to the user's question, always use the final_answer tool to present it. |
|
""", |
|
) |
|
|
|
self.small_agent = ReActAgent.from_tools( |
|
[final_answer_tool], |
|
llm=self.llm, |
|
verbose=verbose, |
|
max_iterations=5, |
|
system_prompt="You are approached to prepare answer for the user question in desired format. You always need to use final_answer tool, it will help you.", |
|
) |
|
|
|
def __call__(self, query_text: str, **kwds) -> str: |
|
""" |
|
Process a user query through the agent. |
|
|
|
Args: |
|
query_text: User's query text |
|
|
|
Returns: |
|
The agent's response |
|
""" |
|
try: |
|
response = self.agent.chat(query_text).response |
|
except: |
|
response = "" |
|
final_response = self.small_agent.chat(f"Response: {response}") |
|
|
|
return final_response.response |
|
|
|
|
|
if __name__ == "__main__": |
|
agent = LlamaIndexAgent() |
|
|
|
|
|
example_queries = [ |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
for query in example_queries: |
|
print(f"\nQuery: {query}") |
|
response = agent(query) |
|
print(f"Response: {response}") |
|
|