File size: 1,052 Bytes
7f94c68
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
import streamlit as st
from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessage
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from dotenv import load_dotenv

load_dotenv()
@st.cache_resource
def get_model():
    model = ChatOpenAI(model="gpt-4o", temperature=0, base_url="https://models.inference.ai.azure.com")
    return model

memory = MemorySaver()
search = TavilySearchResults(max_results=2)
tools = [search]
model = get_model()
app = create_react_agent(model, tools, checkpointer=memory)

config = {"configurable": {"thread_id": "111"}}

if query:=st.chat_input("Ask anything"):
    msg= [HumanMessage(query)]
    def gen():
        for chunk, metadata in app.stream({"messages": msg}, config=config, stream_mode="messages"):
            if isinstance(chunk, AIMessage):
                yield chunk.content
    st.write_stream(gen)