File size: 3,814 Bytes
13fbd2e
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
from llama_index.core import SimpleDirectoryReader
from llama_index.llms.ollama import Ollama
from llama_index.core.agent import ReActAgent
from llama_index.core import VectorStoreIndex, SummaryIndex
from langchain_community.embeddings.ollama import OllamaEmbeddings
from llama_index.core.node_parser import SentenceSplitter
from llama_index.core.tools import QueryEngineTool, ToolMetadata
from llama_index.core import Settings, PromptTemplate
from prompts.agent_prompts import DEFAULT_AGENT_PROMPT
import asyncio
from llama_index.core import Settings

# global
llm = Ollama(model="llama3")
embed_model = OllamaEmbeddings(model="llama3")
Settings.llm = llm
Settings.embed_model = embed_model

# load data
documents = SimpleDirectoryReader(input_files=["./alice.pdf"]).load_data()
# create index/load index
node_parser = SentenceSplitter()
nodes = node_parser.get_nodes_from_documents(documents)
vector_index = VectorStoreIndex(nodes=nodes, show_progress=True)
summary_index = SummaryIndex(nodes=nodes, show_progress=True)
# vector engine
vector_query_engine = vector_index.as_query_engine()
summary_query_engine = summary_index.as_query_engine()

# load data
react_documents = SimpleDirectoryReader(input_files=["./ReAct.pdf"]).load_data()
# create index/load index
nodes = node_parser.get_nodes_from_documents(react_documents)
react_vector_index = VectorStoreIndex(nodes=nodes, show_progress=True)
react_summary_index = SummaryIndex(nodes=nodes, show_progress=True)
# vector engine
react_vector_query_engine = react_vector_index.as_query_engine()
react_summary_query_engine = react_summary_index.as_query_engine()
# tool
vector_tool = QueryEngineTool(
    query_engine=vector_query_engine,
    metadata=ToolMetadata(
        name="vector_tool",
        description="Useful tool to get info about alice via vector index search"    
    ),
)
summary_tool = QueryEngineTool(
    query_engine=summary_query_engine,
    metadata=ToolMetadata(
        name="summary_tool",
        description="Useful tool to get info about alice via summary index search"    
    ),
)

alice_agent = ReActAgent.from_tools(
    tools=[vector_tool, summary_tool],
    llm=llm,
    verbose=True,
)

react_vector_tool = QueryEngineTool(
    query_engine=react_vector_query_engine,
    metadata=ToolMetadata(
        name="react_vector_tool",
        description="Useful tool to get info about paper ReAct via vector index search"    
    ),
)
react_summary_tool = QueryEngineTool(
    query_engine=react_summary_query_engine,
    metadata=ToolMetadata(
        name="react_summary_tool",
        description="Useful tool to get info about paper ReAct via summary index search"    
    ),
)
react_agent = ReActAgent.from_tools(
    tools=[react_vector_tool, react_summary_tool],
    llm=llm,
    verbose=True,
)

alice_doc_tool = QueryEngineTool(
    query_engine=alice_agent,
    metadata=ToolMetadata(
        name="alice_doc_tool",
        description="Useful tool to answer question related to Alice In Wonderland"    
    ),
)
react_doc_tool = QueryEngineTool(
    query_engine=react_agent,
    metadata=ToolMetadata(
        name="react_doc_tool",
        description="Useful tool to answer question related to ReAct paper"    
    ),
)

agent_tools = [alice_doc_tool, react_doc_tool]

from llama_index.core.objects import ObjectIndex
obj_index = ObjectIndex.from_objects(
    agent_tools,
    index_cls=VectorStoreIndex,
)
top_agent = ReActAgent.from_tools(
    tool_retriever=obj_index.as_retriever(similarity_top_k=1),
    verbose=True
)
# agent
# agent = ReActAgent.from_tools(
#     tools=tools,
#     llm=llm,
#     verbose=True,
# )
# agent.update_prompts({"agent_worker:system_prompt": PromptTemplate(DEFAULT_AGENT_PROMPT)})
# query
response = asyncio.run(top_agent.astream_chat("Why Reason and Action work?"))
print(response)