WebScout / app.py
manik-hossain's picture
without asr
7f94c68
import streamlit as st
from langchain_openai import ChatOpenAI
from langchain_core.messages import AIMessage
from langchain_community.tools.tavily_search import TavilySearchResults
from langchain_core.messages import HumanMessage
from langgraph.checkpoint.memory import MemorySaver
from langgraph.prebuilt import create_react_agent
from dotenv import load_dotenv
load_dotenv()
@st.cache_resource
def get_model():
model = ChatOpenAI(model="gpt-4o", temperature=0, base_url="https://models.inference.ai.azure.com")
return model
memory = MemorySaver()
search = TavilySearchResults(max_results=2)
tools = [search]
model = get_model()
app = create_react_agent(model, tools, checkpointer=memory)
config = {"configurable": {"thread_id": "111"}}
if query:=st.chat_input("Ask anything"):
msg= [HumanMessage(query)]
def gen():
for chunk, metadata in app.stream({"messages": msg}, config=config, stream_mode="messages"):
if isinstance(chunk, AIMessage):
yield chunk.content
st.write_stream(gen)