Spaces:
Sleeping
Sleeping
Update interim.py
Browse files- interim.py +10 -18
interim.py
CHANGED
@@ -1,9 +1,9 @@
|
|
|
|
1 |
import os
|
2 |
import streamlit as st
|
3 |
import pandas as pd
|
4 |
import matplotlib.pyplot as plt
|
5 |
import networkx as nx
|
6 |
-
import matplotlib.pyplot as plt
|
7 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
8 |
from langchain_openai import ChatOpenAI
|
9 |
from langgraph.graph import MessagesState
|
@@ -24,7 +24,6 @@ if not tavily_api_key:
|
|
24 |
raise ValueError("Missing required environment variable: TAVILY_API_KEY")
|
25 |
|
26 |
# ------------------- Tool Definitions -------------------
|
27 |
-
# Tavily Search Tool
|
28 |
tavily_tool = TavilySearchResults(max_results=5)
|
29 |
|
30 |
def multiply(a: int, b: int) -> int:
|
@@ -41,10 +40,9 @@ def divide(a: int, b: int) -> float:
|
|
41 |
raise ValueError("Division by zero is not allowed.")
|
42 |
return a / b
|
43 |
|
44 |
-
# Combine tools
|
45 |
tools = [add, multiply, divide, tavily_tool]
|
46 |
|
47 |
-
# ------------------- LLM
|
48 |
llm = ChatOpenAI(model="gpt-4o-mini")
|
49 |
llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)
|
50 |
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic and search on a set of inputs.")
|
@@ -54,7 +52,6 @@ def assistant(state: MessagesState):
|
|
54 |
"""Assistant node to invoke LLM with tools."""
|
55 |
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
|
56 |
|
57 |
-
# Define the graph
|
58 |
app_graph = StateGraph(MessagesState)
|
59 |
app_graph.add_node("assistant", assistant)
|
60 |
app_graph.add_node("tools", ToolNode(tools))
|
@@ -66,28 +63,25 @@ react_graph = app_graph.compile()
|
|
66 |
# ------------------- Streamlit Interface -------------------
|
67 |
st.title("ReAct Agent")
|
68 |
|
69 |
-
# Display the workflow graph
|
70 |
st.header("LangGraph Workflow Visualization")
|
71 |
|
72 |
-
# Convert LangGraph workflow to NetworkX graph
|
73 |
G = nx.DiGraph()
|
74 |
G.add_edge("START", "assistant")
|
75 |
-
G.add_edge("assistant", "tools", label="
|
76 |
-
G.add_edge("tools", "assistant")
|
77 |
|
78 |
-
# Draw the graph
|
79 |
plt.figure(figsize=(10, 6))
|
80 |
pos = nx.spring_layout(G, seed=42)
|
81 |
nx.draw(G, pos, with_labels=True, node_size=3000, node_color="lightblue", font_size=10, font_weight="bold")
|
82 |
nx.draw_networkx_edge_labels(G, pos, edge_labels={
|
83 |
("assistant", "tools"): "tools_condition",
|
84 |
-
("tools", "assistant"): "loop back"
|
85 |
}, font_color="red")
|
86 |
st.pyplot(plt)
|
87 |
|
88 |
-
#
|
89 |
-
user_question = st.text_area("Enter your question:",
|
90 |
-
placeholder="Example: 'Add 3 and 4. Multiply the result by 2. Divide it by 5.'")
|
91 |
|
92 |
if st.button("Submit"):
|
93 |
if not user_question.strip():
|
@@ -98,15 +92,13 @@ if st.button("Submit"):
|
|
98 |
messages = [HumanMessage(content=user_question)]
|
99 |
response = react_graph.invoke({"messages": messages})
|
100 |
|
101 |
-
# Display results
|
102 |
st.subheader("Responses")
|
103 |
for m in response['messages']:
|
104 |
st.write(m.content)
|
105 |
-
|
106 |
st.success("Processing complete!")
|
107 |
|
108 |
-
# Example
|
109 |
st.sidebar.subheader("Example Questions")
|
110 |
st.sidebar.write("- Add 3 and 4. Multiply the result by 2. Divide it by 5.")
|
111 |
st.sidebar.write("- Tell me how many centuries Virat Kohli scored.")
|
112 |
-
st.sidebar.write("- Search for the tallest building in the world.")
|
|
|
1 |
+
#fix workflow
|
2 |
import os
|
3 |
import streamlit as st
|
4 |
import pandas as pd
|
5 |
import matplotlib.pyplot as plt
|
6 |
import networkx as nx
|
|
|
7 |
from langchain_community.tools.tavily_search import TavilySearchResults
|
8 |
from langchain_openai import ChatOpenAI
|
9 |
from langgraph.graph import MessagesState
|
|
|
24 |
raise ValueError("Missing required environment variable: TAVILY_API_KEY")
|
25 |
|
26 |
# ------------------- Tool Definitions -------------------
|
|
|
27 |
tavily_tool = TavilySearchResults(max_results=5)
|
28 |
|
29 |
def multiply(a: int, b: int) -> int:
|
|
|
40 |
raise ValueError("Division by zero is not allowed.")
|
41 |
return a / b
|
42 |
|
|
|
43 |
tools = [add, multiply, divide, tavily_tool]
|
44 |
|
45 |
+
# ------------------- LLM Setup -------------------
|
46 |
llm = ChatOpenAI(model="gpt-4o-mini")
|
47 |
llm_with_tools = llm.bind_tools(tools, parallel_tool_calls=False)
|
48 |
sys_msg = SystemMessage(content="You are a helpful assistant tasked with performing arithmetic and search on a set of inputs.")
|
|
|
52 |
"""Assistant node to invoke LLM with tools."""
|
53 |
return {"messages": [llm_with_tools.invoke([sys_msg] + state["messages"])]}
|
54 |
|
|
|
55 |
app_graph = StateGraph(MessagesState)
|
56 |
app_graph.add_node("assistant", assistant)
|
57 |
app_graph.add_node("tools", ToolNode(tools))
|
|
|
63 |
# ------------------- Streamlit Interface -------------------
|
64 |
st.title("ReAct Agent")
|
65 |
|
66 |
+
# Display the workflow graph using NetworkX
|
67 |
st.header("LangGraph Workflow Visualization")
|
68 |
|
|
|
69 |
G = nx.DiGraph()
|
70 |
G.add_edge("START", "assistant")
|
71 |
+
G.add_edge("assistant", "tools", label="tools_condition")
|
72 |
+
G.add_edge("tools", "assistant", label="loop back")
|
73 |
|
|
|
74 |
plt.figure(figsize=(10, 6))
|
75 |
pos = nx.spring_layout(G, seed=42)
|
76 |
nx.draw(G, pos, with_labels=True, node_size=3000, node_color="lightblue", font_size=10, font_weight="bold")
|
77 |
nx.draw_networkx_edge_labels(G, pos, edge_labels={
|
78 |
("assistant", "tools"): "tools_condition",
|
79 |
+
("tools", "assistant"): "loop back"
|
80 |
}, font_color="red")
|
81 |
st.pyplot(plt)
|
82 |
|
83 |
+
# User input
|
84 |
+
user_question = st.text_area("Enter your question:", placeholder="Example: 'Add 3 and 4. Multiply the result by 2. Divide it by 5.'")
|
|
|
85 |
|
86 |
if st.button("Submit"):
|
87 |
if not user_question.strip():
|
|
|
92 |
messages = [HumanMessage(content=user_question)]
|
93 |
response = react_graph.invoke({"messages": messages})
|
94 |
|
|
|
95 |
st.subheader("Responses")
|
96 |
for m in response['messages']:
|
97 |
st.write(m.content)
|
|
|
98 |
st.success("Processing complete!")
|
99 |
|
100 |
+
# Example Questions
|
101 |
st.sidebar.subheader("Example Questions")
|
102 |
st.sidebar.write("- Add 3 and 4. Multiply the result by 2. Divide it by 5.")
|
103 |
st.sidebar.write("- Tell me how many centuries Virat Kohli scored.")
|
104 |
+
st.sidebar.write("- Search for the tallest building in the world.")
|