Update rag_langgraph.py
Browse files- rag_langgraph.py +7 -4
rag_langgraph.py
CHANGED
@@ -6,6 +6,7 @@ from typing import Annotated, Any, Dict, List, Optional, Sequence, Tuple, TypedD
|
|
6 |
|
7 |
from langchain.agents import AgentExecutor, create_openai_tools_agent
|
8 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
|
|
9 |
from langchain_core.messages import BaseMessage, HumanMessage
|
10 |
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
|
11 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
@@ -42,6 +43,7 @@ def today_tool(text: str) -> str:
|
|
42 |
return (str(date.today()) + "\n\nIf you have completed all tasks, respond with FINAL ANSWER.")
|
43 |
|
44 |
def create_graph(model, topic):
|
|
|
45 |
tavily_tool = TavilySearchResults(max_results=10)
|
46 |
|
47 |
members = ["Researcher"]
|
@@ -94,10 +96,11 @@ def create_graph(model, topic):
|
|
94 |
| JsonOutputFunctionsParser()
|
95 |
)
|
96 |
|
97 |
-
researcher_agent = create_agent(llm, [tavily_tool, today_tool], system_prompt=
|
98 |
-
|
99 |
-
|
100 |
-
|
|
|
101 |
researcher_node = functools.partial(agent_node, agent=researcher_agent, name="Researcher")
|
102 |
|
103 |
workflow = StateGraph(AgentState)
|
|
|
6 |
|
7 |
from langchain.agents import AgentExecutor, create_openai_tools_agent
|
8 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
9 |
+
from langchain_community.utilities import ArxivAPIWrapper
|
10 |
from langchain_core.messages import BaseMessage, HumanMessage
|
11 |
from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
|
12 |
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
|
|
43 |
return (str(date.today()) + "\n\nIf you have completed all tasks, respond with FINAL ANSWER.")
|
44 |
|
45 |
def create_graph(model, topic):
|
46 |
+
arxiv_tool = ArxivAPIWrapper()
|
47 |
tavily_tool = TavilySearchResults(max_results=10)
|
48 |
|
49 |
members = ["Researcher"]
|
|
|
96 |
| JsonOutputFunctionsParser()
|
97 |
)
|
98 |
|
99 |
+
researcher_agent = create_agent(llm, [arxiv_tool, tavily_tool, today_tool], system_prompt=
|
100 |
+
"1. Research content on topic: " + topic + ", prioritizing research papers. "
|
101 |
+
"2. Based on your research, write a 2000-word article on the topic. "
|
102 |
+
"3. At the beginning of the article, add current date and author: Multi-AI-Agent System. "
|
103 |
+
"4. At the end of the article, add a references section with research papers.")
|
104 |
researcher_node = functools.partial(agent_node, agent=researcher_agent, name="Researcher")
|
105 |
|
106 |
workflow = StateGraph(AgentState)
|