Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| from docx import Document | |
| from langchain_openai import ChatOpenAI | |
| from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate | |
| from langchain_core.output_parsers import StrOutputParser | |
| from langchain_core.runnables import RunnablePassthrough | |
| from langchain_community.document_loaders import TextLoader | |
| # Configuration | |
| SECRET_KEY = "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" # Replace with your actual API key | |
| RUNBOOK_DIR = "./runbooks" | |
| # Initialize LLMs | |
| llm = ChatOpenAI(model="gpt-4o", temperature=0.4, api_key=SECRET_KEY, streaming=True) | |
| selector_llm = ChatOpenAI(model="gpt-4o", temperature=0, api_key=SECRET_KEY) | |
| llm_recc = ChatOpenAI(api_key=SECRET_KEY, model="gpt-4o") | |
| output_parser = StrOutputParser() | |
| previous_selected_runbook = "" | |
| # Load runbooks | |
| def load_runbooks(): | |
| runbooks = {} | |
| for file in os.listdir(RUNBOOK_DIR): | |
| path = os.path.join(RUNBOOK_DIR, file) | |
| try: | |
| if file.endswith(".txt"): | |
| # Load text files using TextLoader | |
| loader = TextLoader(path) | |
| docs = loader.load() | |
| content = "\n".join([doc.page_content for doc in docs]) | |
| elif file.endswith(".docx"): | |
| # Load .docx files using python-docx | |
| doc = Document(path) | |
| content = "\n".join([para.text for para in doc.paragraphs]) | |
| else: | |
| # Skip unsupported file types | |
| continue | |
| # Add the file's content to the runbooks dictionary | |
| runbooks[file] = content | |
| except Exception as e: | |
| print(f"Error loading file {file}: {e}") | |
| return runbooks | |
| RUNBOOKS = load_runbooks() | |
| RUNBOOK_NAMES = list(RUNBOOKS.keys()) | |
| # Prompt templates with roles | |
| system_prompt = SystemMessagePromptTemplate.from_template( | |
| "You are an IT support assistant. Respond using only the immediate next step based strictly on the runbook content. Never provide multiple actions. Escalate only when the user explicitly asks." | |
| ) | |
| user_prompt = HumanMessagePromptTemplate.from_template( | |
| "Runbook Names:\n{runbook_names}\nRunbook Content:\n{runbook_contents}\nConversation History:\n{conversation_history}\nUser: {user_message}" | |
| ) | |
| assistant_prompt = AIMessagePromptTemplate.from_template("Assistant:") | |
| selector_prompt = ChatPromptTemplate.from_template(""" | |
| Choose the best runbook from: | |
| {runbook_names} | |
| User: {user_message} | |
| Selected: | |
| """) | |
| recc_template = ChatPromptTemplate.from_template(""" | |
| You are a support agent assistant analyzing user cases. The test case shows what the user has talked with AI assistant so far. | |
| Now the user wants to talk to a human. Based on the test case and runbook below, | |
| suggest up to 3 recommendations which the human agent can ask the user to continue the conversation from the step where the user is stuck. For each recommendation: | |
| 1. Reference specific steps from the runbook, the steps should be exactly present in the runbook | |
| 2. Add confidence score (70-100% if directly supported by runbook, 50-69% if inferred) | |
| 3. Prioritize most critical actions first | |
| 4. Strictly do not output anything which is not present in the runbook. | |
| Test Case: {test_case} | |
| Case Description: {description} | |
| Runbook Content: {runbook} | |
| Generate upto 3 recommendations strictly in this format: | |
| 1. [Action] (Confidence: X%) - [Reasoning] | |
| 2. [Action] (Confidence: X%) - [Reasoning] | |
| """) | |
| # File readers | |
| def read_test_case(file_path): | |
| try: | |
| with open(file_path, "r") as f: | |
| return f.read() | |
| except FileNotFoundError: | |
| raise FileNotFoundError(f"Test case file not found at {file_path}") | |
| def read_runbook(file_path): | |
| try: | |
| return Document(file_path) | |
| except FileNotFoundError: | |
| raise FileNotFoundError(f"Runbook file not found at {file_path}") | |
| def get_recommendations(test_case, runbook_path): | |
| runbook = read_runbook(runbook_path) | |
| description = os.path.basename(runbook_path) | |
| return | |
| def respond(message, history): | |
| global previous_selected_runbook | |
| escalation_buffer = "" | |
| buffer = "" | |
| escalation_triggered = False | |
| # Select runbook | |
| if previous_selected_runbook: | |
| selected_runbook = previous_selected_runbook | |
| else: | |
| selected = selector_llm.invoke(selector_prompt.format( | |
| runbook_names="\n".join(RUNBOOK_NAMES), | |
| user_message=message | |
| )).content.strip() | |
| selected_runbook = next((rb for rb in RUNBOOKS if rb in selected), "") | |
| previous_selected_runbook = selected_runbook | |
| runbook_content = "\n".join([f"--- {k} ---\n{v}" for k, v in RUNBOOKS.items()]) | |
| conversation_history = "\n".join([f"{turn[0]}: {turn[1]}" for turn in history]) | |
| if "human" in message and not escalation_triggered: | |
| escalation_triggered = True | |
| conversation_text = conversation_history + f"\nUser: {message}" | |
| buffer = "Escalating to human agent..." | |
| for token in llm_recc.stream(recc_template.format( | |
| test_case=conversation_text, | |
| description=os.path.basename(selected_runbook), | |
| runbook=RUNBOOKS[selected_runbook])): | |
| escalation_buffer += token.content | |
| yield (buffer, escalation_buffer, selected_runbook) | |
| return | |
| full_prompt = ChatPromptTemplate.from_messages([ | |
| system_prompt, | |
| user_prompt, | |
| assistant_prompt | |
| ]) | |
| for token in llm.stream(full_prompt.format( | |
| runbook_names="\n".join(RUNBOOK_NAMES), | |
| runbook_contents=runbook_content, | |
| conversation_history=conversation_history, | |
| user_message=message | |
| )): | |
| buffer += token.content | |
| yield (buffer, escalation_buffer, selected_runbook) | |
| # UI Setup | |
| def clear_conversation(): | |
| return [], "", "", "No runbook selected" | |
| with gr.Blocks() as demo: | |
| gr.Markdown("# IT Support Assistant") | |
| with gr.Row(): | |
| with gr.Column(scale=1): | |
| gr.Markdown("### Available Runbooks") | |
| gr.Markdown("\n".join([f"- **{name}**" for name in RUNBOOK_NAMES])) | |
| selected_runbook_display = gr.Markdown("No runbook selected") | |
| with gr.Row(): | |
| with gr.Column(scale=3): | |
| chat = gr.ChatInterface( | |
| respond, | |
| additional_outputs=[ | |
| gr.Textbox(label="Escalation Recommendations", lines=5, value=""), | |
| selected_runbook_display | |
| ], | |
| examples=["Increase Mail Size", "Outlook calendar not responding"], | |
| cache_examples=False | |
| ) | |
| with gr.Row(): | |
| clear_button = gr.Button("Clear Conversation") | |
| clear_button.click( | |
| clear_conversation, | |
| outputs=[ | |
| chat.chatbot, | |
| chat.additional_outputs[0], | |
| chat.textbox, | |
| selected_runbook_display | |
| ] | |
| ) | |
| demo.queue().launch() | |