File size: 7,118 Bytes
45be6ef
6dda2d9
45be6ef
 
 
 
 
981946b
6dda2d9
45be6ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
981946b
 
 
 
 
 
 
 
 
82b651f
981946b
 
 
 
 
 
 
 
 
 
 
 
45be6ef
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6dda2d9
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
import os
import gradio as gr
from docx import Document
from langchain_openai import ChatOpenAI
from langchain_core.prompts import ChatPromptTemplate, HumanMessagePromptTemplate, SystemMessagePromptTemplate, AIMessagePromptTemplate
from langchain_core.output_parsers import StrOutputParser
from langchain_core.runnables import RunnablePassthrough
from langchain_community.document_loaders import TextLoader

# Configuration
SECRET_KEY = "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"  # Replace with your actual API key
RUNBOOK_DIR = "./runbooks"

# Initialize LLMs
llm = ChatOpenAI(model="gpt-4o", temperature=0.4, api_key=SECRET_KEY, streaming=True)
selector_llm = ChatOpenAI(model="gpt-4o", temperature=0, api_key=SECRET_KEY)
llm_recc = ChatOpenAI(api_key=SECRET_KEY, model="gpt-4o")
output_parser = StrOutputParser()
previous_selected_runbook = ""

# Load runbooks
def load_runbooks():
    runbooks = {}
    for file in os.listdir(RUNBOOK_DIR):
        path = os.path.join(RUNBOOK_DIR, file)
        try:
            if file.endswith(".txt"):
                # Load text files using TextLoader
                loader = TextLoader(path)
                docs = loader.load()
                content = "\n".join([doc.page_content for doc in docs])
            
            elif file.endswith(".docx"):
                # Load .docx files using python-docx
                doc = Document(path)
                content = "\n".join([para.text for para in doc.paragraphs])
            
            else:
                # Skip unsupported file types
                continue
            
            # Add the file's content to the runbooks dictionary
            runbooks[file] = content
        
        except Exception as e:
            print(f"Error loading file {file}: {e}")
    
    return runbooks

RUNBOOKS = load_runbooks()
RUNBOOK_NAMES = list(RUNBOOKS.keys())

# Prompt templates with roles
system_prompt = SystemMessagePromptTemplate.from_template(
    "You are an IT support assistant. Respond using only the immediate next step based strictly on the runbook content. Never provide multiple actions. Escalate only when the user explicitly asks."
)

user_prompt = HumanMessagePromptTemplate.from_template(
    "Runbook Names:\n{runbook_names}\nRunbook Content:\n{runbook_contents}\nConversation History:\n{conversation_history}\nUser: {user_message}"
)

assistant_prompt = AIMessagePromptTemplate.from_template("Assistant:")

selector_prompt = ChatPromptTemplate.from_template("""
Choose the best runbook from:
{runbook_names}

User: {user_message}
Selected:
""")

recc_template = ChatPromptTemplate.from_template("""
You are a support agent assistant analyzing user cases. The test case shows what the user has talked with AI assistant so far.
Now the user wants to talk to a human. Based on the test case and runbook below, 
suggest up to 3 recommendations which the human agent can ask the user to continue the conversation from the step where the user is stuck. For each recommendation:
1. Reference specific steps from the runbook, the steps should be exactly present in the runbook
2. Add confidence score (70-100% if directly supported by runbook, 50-69% if inferred)
3. Prioritize most critical actions first
4. Strictly do not output anything which is not present in the runbook.

Test Case: {test_case}
Case Description: {description}
Runbook Content: {runbook}

Generate upto 3 recommendations strictly in this format:
1. [Action] (Confidence: X%) - [Reasoning]
2. [Action] (Confidence: X%) - [Reasoning]
""")

# File readers
def read_test_case(file_path):
    try:
        with open(file_path, "r") as f:
            return f.read()
    except FileNotFoundError:
        raise FileNotFoundError(f"Test case file not found at {file_path}")

def read_runbook(file_path):
    try:
        return Document(file_path)
    except FileNotFoundError:
        raise FileNotFoundError(f"Runbook file not found at {file_path}")

def get_recommendations(test_case, runbook_path):
    runbook = read_runbook(runbook_path)
    description = os.path.basename(runbook_path)
    return

def respond(message, history):
    global previous_selected_runbook
    escalation_buffer = ""
    buffer = ""
    escalation_triggered = False

    # Select runbook
    if previous_selected_runbook:
        selected_runbook = previous_selected_runbook
    else:
        selected = selector_llm.invoke(selector_prompt.format(
            runbook_names="\n".join(RUNBOOK_NAMES),
            user_message=message
        )).content.strip()
        selected_runbook = next((rb for rb in RUNBOOKS if rb in selected), "")
        previous_selected_runbook = selected_runbook

    runbook_content = "\n".join([f"--- {k} ---\n{v}" for k, v in RUNBOOKS.items()])
    conversation_history = "\n".join([f"{turn[0]}: {turn[1]}" for turn in history])

    if "human" in message and not escalation_triggered:
        escalation_triggered = True
        conversation_text = conversation_history + f"\nUser: {message}"
        buffer = "Escalating to human agent..."
        for token in llm_recc.stream(recc_template.format(
            test_case=conversation_text,
            description=os.path.basename(selected_runbook),
            runbook=RUNBOOKS[selected_runbook])):
            escalation_buffer += token.content
            yield (buffer, escalation_buffer, selected_runbook)
        return

    full_prompt = ChatPromptTemplate.from_messages([
        system_prompt,
        user_prompt,
        assistant_prompt
    ])

    for token in llm.stream(full_prompt.format(
        runbook_names="\n".join(RUNBOOK_NAMES),
        runbook_contents=runbook_content,
        conversation_history=conversation_history,
        user_message=message
    )):
        buffer += token.content
        yield (buffer, escalation_buffer, selected_runbook)

# UI Setup
def clear_conversation():
    return [], "", "", "No runbook selected"

with gr.Blocks() as demo:
    gr.Markdown("# IT Support Assistant")

    with gr.Row():
        with gr.Column(scale=1):
            gr.Markdown("### Available Runbooks")
            gr.Markdown("\n".join([f"- **{name}**" for name in RUNBOOK_NAMES]))
            selected_runbook_display = gr.Markdown("No runbook selected")

    with gr.Row():
        with gr.Column(scale=3):
            chat = gr.ChatInterface(
                respond,
                additional_outputs=[
                    gr.Textbox(label="Escalation Recommendations", lines=5, value=""),
                    selected_runbook_display
                ],
                examples=["Increase Mail Size", "Outlook calendar not responding"],
                cache_examples=False
            )

    with gr.Row():
        clear_button = gr.Button("Clear Conversation")

    clear_button.click(
        clear_conversation,
        outputs=[
            chat.chatbot,
            chat.additional_outputs[0],
            chat.textbox,
            selected_runbook_display
        ]
    )

    demo.queue().launch()