Update agent.py
Browse files
agent.py
CHANGED
|
@@ -5,6 +5,7 @@ from langgraph.prebuilt import ToolNode, tools_condition
|
|
| 5 |
from langchain_core.messages import HumanMessage, AIMessage
|
| 6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 7 |
from langchain_core.runnables import Runnable
|
|
|
|
| 8 |
from tools import TOOLS
|
| 9 |
import pandas as pd
|
| 10 |
|
|
@@ -18,7 +19,8 @@ def build_graph():
|
|
| 18 |
|
| 19 |
# Initialize the LLM (e.g., Gemini Flash, zero temperature)
|
| 20 |
# llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0)
|
| 21 |
-
llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
|
|
|
| 22 |
llm_with_tools = llm.bind_tools(TOOLS)
|
| 23 |
|
| 24 |
# Step 1: Retriever node
|
|
|
|
| 5 |
from langchain_core.messages import HumanMessage, AIMessage
|
| 6 |
from langchain_google_genai import ChatGoogleGenerativeAI
|
| 7 |
from langchain_core.runnables import Runnable
|
| 8 |
+
from llama_index.core.agent.workflow import AgentWorkflow, ReActAgentfrom llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 9 |
from tools import TOOLS
|
| 10 |
import pandas as pd
|
| 11 |
|
|
|
|
| 19 |
|
| 20 |
# Initialize the LLM (e.g., Gemini Flash, zero temperature)
|
| 21 |
# llm = ChatGoogleGenerativeAI(model="gemini-1.5-flash", temperature=0)
|
| 22 |
+
# llm = ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
| 23 |
+
llm = HuggingFaceInferenceAPI(model_name="Qwen/Qwen2.5-Coder-32B-Instruct")
|
| 24 |
llm_with_tools = llm.bind_tools(TOOLS)
|
| 25 |
|
| 26 |
# Step 1: Retriever node
|