|
|
import gradio as gr |
|
|
from langchain_core.messages import HumanMessage, AIMessage |
|
|
from core.rag_agent import RAGAgent |
|
|
import traceback |
|
|
|
|
|
|
|
|
rag_agent = None |
|
|
|
|
|
def initialize_agent(): |
|
|
"""Initialize RAG agent lazily""" |
|
|
global rag_agent |
|
|
if rag_agent is None: |
|
|
rag_agent = RAGAgent() |
|
|
return rag_agent |
|
|
|
|
|
def chat_with_agent(message, history): |
|
|
"""Handle chat interactions with the RAG agent""" |
|
|
if not message.strip(): |
|
|
return history |
|
|
|
|
|
try: |
|
|
agent = initialize_agent() |
|
|
|
|
|
|
|
|
messages = [] |
|
|
if history: |
|
|
for msg_dict in history: |
|
|
if msg_dict["role"] == "user": |
|
|
messages.append(HumanMessage(content=msg_dict["content"])) |
|
|
elif msg_dict["role"] == "assistant": |
|
|
messages.append(AIMessage(content=msg_dict["content"])) |
|
|
|
|
|
|
|
|
messages.append(HumanMessage(content=message)) |
|
|
|
|
|
|
|
|
initial_state = { |
|
|
"messages": messages, |
|
|
} |
|
|
|
|
|
|
|
|
result = agent.agent_graph.invoke( |
|
|
initial_state, |
|
|
config=agent.get_config() |
|
|
) |
|
|
|
|
|
|
|
|
result_messages = result.get("messages", []) |
|
|
ai_messages = [m for m in result_messages if isinstance(m, AIMessage)] |
|
|
|
|
|
if ai_messages: |
|
|
|
|
|
response = ai_messages[-1].content |
|
|
|
|
|
|
|
|
rag_method = result.get("rag_method", "UNKNOWN") |
|
|
response_with_metadata = f"{response}\n\n*[Source: {rag_method}]*" |
|
|
|
|
|
|
|
|
new_history = history + [ |
|
|
{"role": "user", "content": message}, |
|
|
{"role": "assistant", "content": response_with_metadata} |
|
|
] |
|
|
return new_history |
|
|
else: |
|
|
new_history = history + [ |
|
|
{"role": "user", "content": message}, |
|
|
{"role": "assistant", "content": "β οΈ No response generated. Please try again."} |
|
|
] |
|
|
return new_history |
|
|
|
|
|
except Exception as e: |
|
|
error_msg = f"β Error: {str(e)}" |
|
|
print(f"Chat error: {e}") |
|
|
traceback.print_exc() |
|
|
|
|
|
new_history = history + [ |
|
|
{"role": "user", "content": message}, |
|
|
{"role": "assistant", "content": error_msg} |
|
|
] |
|
|
return new_history |
|
|
|
|
|
def reset_conversation(): |
|
|
"""Reset the conversation thread""" |
|
|
global rag_agent |
|
|
if rag_agent: |
|
|
rag_agent.reset_thread() |
|
|
return [] |
|
|
|
|
|
def create_gradio_ui(): |
|
|
"""Create the complete Gradio interface""" |
|
|
|
|
|
with gr.Blocks(title="RAG Agent with Agentic Memory") as demo: |
|
|
gr.Markdown(""" |
|
|
# π€ RAG Agent with Agentic Memory |
|
|
|
|
|
Chat with an intelligent agent that uses: |
|
|
- π **Local Knowledge Base** (ChromaDB) - Research papers on DeepAnalyze, AgentMem, SAM3, etc. |
|
|
- π **Web Search** (Tavily) - Real-time information and current events |
|
|
- π **Wikipedia** - General knowledge |
|
|
- π **ArXiv** - Academic papers |
|
|
""") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=4): |
|
|
gr.Markdown("### π¬ Chat Interface") |
|
|
|
|
|
chatbot = gr.Chatbot( |
|
|
label="Conversation", |
|
|
height=500, |
|
|
show_label=False, |
|
|
) |
|
|
|
|
|
with gr.Row(): |
|
|
msg = gr.Textbox( |
|
|
label="Your Message", |
|
|
placeholder="Ask me anything about your documents or general knowledge...", |
|
|
scale=5, |
|
|
show_label=False |
|
|
) |
|
|
submit_btn = gr.Button("Send π€", variant="primary", scale=1) |
|
|
|
|
|
with gr.Row(): |
|
|
clear_btn = gr.Button("π Reset Conversation", variant="secondary") |
|
|
|
|
|
with gr.Column(scale=1): |
|
|
gr.Markdown("### π Agent Status") |
|
|
status_box = gr.Markdown("*Ready*") |
|
|
|
|
|
gr.Markdown("### π‘ Example Queries") |
|
|
gr.Markdown(""" |
|
|
**Local Documents (RAG):** |
|
|
- What is DeepAnalyze? |
|
|
- Explain SAM 3 architecture |
|
|
- What is AgentMem? |
|
|
|
|
|
**Web Search:** |
|
|
- Latest AI news in 2025 |
|
|
- Current events in technology |
|
|
|
|
|
**General:** |
|
|
- What is 15 Γ 7? |
|
|
- Explain machine learning |
|
|
""") |
|
|
|
|
|
|
|
|
def submit_message(message, history): |
|
|
"""Handle message submission""" |
|
|
if not message.strip(): |
|
|
return history, "" |
|
|
|
|
|
|
|
|
new_history = chat_with_agent(message, history) |
|
|
|
|
|
return new_history, "" |
|
|
|
|
|
|
|
|
msg.submit( |
|
|
fn=submit_message, |
|
|
inputs=[msg, chatbot], |
|
|
outputs=[chatbot, msg] |
|
|
) |
|
|
|
|
|
submit_btn.click( |
|
|
fn=submit_message, |
|
|
inputs=[msg, chatbot], |
|
|
outputs=[chatbot, msg] |
|
|
) |
|
|
|
|
|
clear_btn.click( |
|
|
fn=reset_conversation, |
|
|
outputs=[chatbot] |
|
|
) |
|
|
|
|
|
gr.Markdown(""" |
|
|
--- |
|
|
### π§ How it works: |
|
|
1. **Type your question** in the text box |
|
|
2. The agent will: |
|
|
- π§ Analyze your query to determine the best source |
|
|
- π Search relevant sources (Local docs, Web, Wikipedia) |
|
|
- π Generate a comprehensive answer |
|
|
- πΎ Remember conversation context for follow-up questions |
|
|
3. Use **Reset Conversation** to start a new thread |
|
|
|
|
|
--- |
|
|
*Powered by LangGraph + LangChain + ChromaDB + Anthropic Claude* |
|
|
""") |
|
|
|
|
|
return demo |
|
|
|
|
|
if __name__ == "__main__": |
|
|
demo = create_gradio_ui() |
|
|
print("π Starting Gradio interface...") |
|
|
print("π Running on: http://127.0.0.1:7860") |
|
|
demo.launch( |
|
|
share=False, |
|
|
server_name="127.0.0.1", |
|
|
server_port=7860, |
|
|
show_error=True |
|
|
) |