from typing import TypedDict, Dict
from langgraph.graph import StateGraph, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables.graph import MermaidDrawMethod
from IPython.display import display , Image
class State(TypedDict):
query: str
category: str
sentiment: str
response: str
from langchain_groq import ChatGroq
llm = ChatGroq(
temperature=0,
groq_api_key = "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",
model_name = "llama-3.3-70b-versatile"
)
result = llm.invoke("What is langchain?")
result.content
def categorize(state: State) -> State:
"Technical, Billing, General"
prompt = ChatPromptTemplate.from_template(
"Categorize the following customer query into one of these categories: "
"Technical, Billing, General. Query: {query}"
)
chain = prompt | llm
category = chain.invoke({"query": state["query"]}).content
return {"category": category}
def analyze_sentiment(state: State) -> State:
prompt = ChatPromptTemplate.from_template( # This line was fixed by changing 'promt' to 'prompt'
"Analyze the sentiment of the following customer query: {query}"
"Response with either 'Positive', 'Negative', or 'Neutral'. Query: {query}"
)
chain = prompt | llm
sentiment = chain.invoke({"query": state["query"]}).content
return {"sentiment": sentiment}
def handle_technical(state: State) -> State:
prompt = ChatPromptTemplate.from_template(
"Provide a technical response to the following customer query: {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def handle_billing(state: State) -> State:
prompt = ChatPromptTemplate.from_template(
"Provide a billing response to the following customer query: {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def handle_general(state: State) -> State:
prompt = ChatPromptTemplate.from_template(
"Provide a general response to the following customer query: {query}"
)
chain = prompt | llm
response = chain.invoke({"query": state["query"]}).content
return {"response": response}
def escalate(state: State) -> State:
return {"response": "This query has been escalate to a human agent due to its negative sentimant"}
def route_query(state: State) -> State:
if state["sentiment"] == "Negative":
return "escalate"
elif state["category"] == "Technical":
return "handle_technical"
elif state["category"] == "Billing":
return "handle_billing"
else:
return "handle_general"
workflow = StateGraph(State)
workflow.add_node("categorize", categorize)
workflow.add_node("analyze_sentiment", analyze_sentiment)
workflow.add_node("handle_technical", handle_technical)
workflow.add_node("handle_billing", handle_billing)
workflow.add_node("handle_general", handle_general)
workflow.add_node("escalate", escalate)
workflow.add_edge("categorize", "analyze_sentiment")
workflow.add_conditional_edges(
"analyze_sentiment",
route_query, {
"handle_technical" : "handle_technical",
"handle_billing" : "handle_billing",
"handle_general" : "handle_general",
"escalate" : "escalate"
}
)
workflow.add_edge("handle_technical", END)
workflow.add_edge("handle_billing", END)
workflow.add_edge("handle_general", END)
workflow.add_edge("escalate", END)
workflow.set_entry_point("categorize")
app = workflow.compile()
def run_customer_support(query: str) -> str:
results = app.invoke({"query": query})
return {
"category":results["category"],
"sentiment": results["sentiment"],
"response": results["response"]
}
from typing import TypedDict, Dict
from langgraph.graph import StateGraph, END
from langchain_core.prompts import ChatPromptTemplate
from langchain_core.runnables.graph import MermaidDrawMethod
from IPython.display import display , Image
from langchain_groq import ChatGroq
import gradio as gr
# Create the Gradio interface
def gradio_interface(query: str) -> str:
result = run_customer_support(query)
return (
f"**Category:** {result['category']}
"
f"**Sentiment:** {result['sentiment']}
\n"
f"**Response:** {result['response']}"
)
# Build the gradio app
iface = gr.Interface(
fn=gradio_interface,
theme="Yntec/Ha1eyCH_Theme_Orange_Green",
inputs=gr.Textbox(lines=2, placeholder="Enter your query here..."),
outputs="markdown",
title="I am your customer support assistant, How can I help you?",
description="Provide a query and receive a categorized response.",
)
# Lanuch the app
iface.launch()