Upload 2 files
Browse files- pipeline.ipynb +236 -0
- pipeline.py +211 -0
pipeline.ipynb
ADDED
@@ -0,0 +1,236 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"from langchain.chat_models import ChatOpenAI\n",
|
10 |
+
"from langchain.memory import ConversationBufferMemory\n",
|
11 |
+
"from langchain.prompts import PromptTemplate\n",
|
12 |
+
"from langchain.chains import LLMChain\n",
|
13 |
+
"from langchain.utilities import SQLDatabase\n",
|
14 |
+
"from sqlalchemy import create_engine # Import create_engine\n",
|
15 |
+
"\n",
|
16 |
+
"# --- Initialize Core Components ---\n",
|
17 |
+
"\n",
|
18 |
+
"# 1. Dialogue Context (Memory)\n",
|
19 |
+
"memory = ConversationBufferMemory()\n",
|
20 |
+
"\n",
|
21 |
+
"# 2. LLM (for routing, service selection, state tracking, and response generation)\n",
|
22 |
+
"llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\") # Or another suitable model\n",
|
23 |
+
"\n",
|
24 |
+
"# 3. Database (using SQLite in-memory for demonstration)\n",
|
25 |
+
"engine = create_engine(\"sqlite:///:memory:\") # Create an in-memory SQLite engine\n",
|
26 |
+
"db = SQLDatabase(engine) # Pass the engine to SQLDatabase\n",
|
27 |
+
"\n",
|
28 |
+
"# --- Define Prompts ---\n",
|
29 |
+
"\n",
|
30 |
+
"# Router Prompt\n",
|
31 |
+
"router_template = \"\"\"\n",
|
32 |
+
"You are a helpful assistant that classifies user input into two categories:\n",
|
33 |
+
"\n",
|
34 |
+
"1. open-domain: General conversation, chit-chat, or questions not related to a specific task.\n",
|
35 |
+
"2. task-oriented: The user wants to perform a specific action or get information related to a predefined service.\n",
|
36 |
+
"\n",
|
37 |
+
"Based on the dialogue history, classify the latest user input:\n",
|
38 |
+
"\n",
|
39 |
+
"{chat_history}\n",
|
40 |
+
"\n",
|
41 |
+
"User: {user_input}\n",
|
42 |
+
"\n",
|
43 |
+
"Classification:\n",
|
44 |
+
"\"\"\"\n",
|
45 |
+
"router_prompt = PromptTemplate(\n",
|
46 |
+
" input_variables=[\"chat_history\", \"user_input\"], template=router_template\n",
|
47 |
+
")\n",
|
48 |
+
"\n",
|
49 |
+
"# Service Selection Prompt\n",
|
50 |
+
"service_selection_template = \"\"\"\n",
|
51 |
+
"You are a helpful assistant that classifies user input into one of the following predefined services:\n",
|
52 |
+
"\n",
|
53 |
+
"Services:\n",
|
54 |
+
"- book_flight: For booking flight tickets.\n",
|
55 |
+
"- check_order_status: For checking the status of an order.\n",
|
56 |
+
"- find_restaurants: For finding restaurants based on criteria.\n",
|
57 |
+
"\n",
|
58 |
+
"Based on the dialogue history, which service best matches the user's intent?\n",
|
59 |
+
"\n",
|
60 |
+
"{chat_history}\n",
|
61 |
+
"\n",
|
62 |
+
"User: {user_input}\n",
|
63 |
+
"\n",
|
64 |
+
"Selected Service:\n",
|
65 |
+
"\"\"\"\n",
|
66 |
+
"service_selection_prompt = PromptTemplate(\n",
|
67 |
+
" input_variables=[\"chat_history\", \"user_input\"],\n",
|
68 |
+
" template=service_selection_template,\n",
|
69 |
+
")\n",
|
70 |
+
"\n",
|
71 |
+
"# Dialogue State Tracking Prompt\n",
|
72 |
+
"state_tracking_template = \"\"\"\n",
|
73 |
+
"You are a helpful assistant that extracts information from user input to fill in the slots for a specific service.\n",
|
74 |
+
"\n",
|
75 |
+
"Service: {service}\n",
|
76 |
+
"Slots: {slots}\n",
|
77 |
+
"\n",
|
78 |
+
"Based on the dialogue history, extract the values for each slot from the conversation. \n",
|
79 |
+
"Return the output in JSON format. If a slot is not filled, use null as the value.\n",
|
80 |
+
"\n",
|
81 |
+
"{chat_history}\n",
|
82 |
+
"\n",
|
83 |
+
"User: {user_input}\n",
|
84 |
+
"\n",
|
85 |
+
"Extracted Information (JSON):\n",
|
86 |
+
"\"\"\"\n",
|
87 |
+
"state_tracking_prompt = PromptTemplate(\n",
|
88 |
+
" input_variables=[\"service\", \"slots\", \"chat_history\", \"user_input\"],\n",
|
89 |
+
" template=state_tracking_template,\n",
|
90 |
+
")\n",
|
91 |
+
"\n",
|
92 |
+
"# Response Generation Prompt\n",
|
93 |
+
"response_generation_template = \"\"\"\n",
|
94 |
+
"You are a helpful assistant that generates natural language responses to the user.\n",
|
95 |
+
"\n",
|
96 |
+
"Dialogue History:\n",
|
97 |
+
"{chat_history}\n",
|
98 |
+
"\n",
|
99 |
+
"User: {user_input}\n",
|
100 |
+
"\n",
|
101 |
+
"{slot_info}\n",
|
102 |
+
"\n",
|
103 |
+
"{db_results}\n",
|
104 |
+
"\n",
|
105 |
+
"Response:\n",
|
106 |
+
"\"\"\"\n",
|
107 |
+
"response_generation_prompt = PromptTemplate(\n",
|
108 |
+
" input_variables=[\"chat_history\", \"user_input\", \"slot_info\", \"db_results\"],\n",
|
109 |
+
" template=response_generation_template,\n",
|
110 |
+
")\n",
|
111 |
+
"\n",
|
112 |
+
"# --- Define Chains ---\n",
|
113 |
+
"\n",
|
114 |
+
"router_chain = LLMChain(llm=llm, prompt=router_prompt, output_key=\"classification\")\n",
|
115 |
+
"service_selection_chain = LLMChain(\n",
|
116 |
+
" llm=llm, prompt=service_selection_prompt, output_key=\"service\"\n",
|
117 |
+
")\n",
|
118 |
+
"state_tracking_chain = LLMChain(\n",
|
119 |
+
" llm=llm, prompt=state_tracking_prompt, output_key=\"slot_json\"\n",
|
120 |
+
")\n",
|
121 |
+
"response_generation_chain = LLMChain(\n",
|
122 |
+
" llm=llm, prompt=response_generation_prompt, output_key=\"response\"\n",
|
123 |
+
")\n",
|
124 |
+
"\n",
|
125 |
+
"# --- Define Service Slots ---\n",
|
126 |
+
"# (In a real application, this would likely be loaded from a configuration file or database)\n",
|
127 |
+
"service_slots = {\n",
|
128 |
+
" \"book_flight\": [\"destination\", \"departure_date\", \"num_passengers\"],\n",
|
129 |
+
" \"check_order_status\": [\"order_id\"],\n",
|
130 |
+
" \"find_restaurants\": [\"cuisine\", \"location\", \"price_range\"],\n",
|
131 |
+
"}\n",
|
132 |
+
"\n",
|
133 |
+
"# --- Main Dialogue Loop ---\n",
|
134 |
+
"\n",
|
135 |
+
"def process_user_input(user_input):\n",
|
136 |
+
" # 1. Add user input to memory\n",
|
137 |
+
" memory.chat_memory.add_user_message(user_input)\n",
|
138 |
+
"\n",
|
139 |
+
" # 2. Route the input\n",
|
140 |
+
" router_output = router_chain(\n",
|
141 |
+
" {\"chat_history\": memory.load_memory_variables({}), \"user_input\": user_input}\n",
|
142 |
+
" )\n",
|
143 |
+
" classification = router_output[\"classification\"].strip()\n",
|
144 |
+
"\n",
|
145 |
+
" print(f\"Router Classification: {classification}\")\n",
|
146 |
+
"\n",
|
147 |
+
" if classification == \"open-domain\":\n",
|
148 |
+
" # 3. Handle open-domain conversation\n",
|
149 |
+
" llm_response = llm(memory.load_memory_variables({})[\"history\"])\n",
|
150 |
+
" response = llm_response.content\n",
|
151 |
+
" else:\n",
|
152 |
+
" # 4. Select the service\n",
|
153 |
+
" service_output = service_selection_chain(\n",
|
154 |
+
" {\"chat_history\": memory.load_memory_variables({}), \"user_input\": user_input}\n",
|
155 |
+
" )\n",
|
156 |
+
" service = service_output[\"service\"].strip()\n",
|
157 |
+
"\n",
|
158 |
+
" print(f\"Selected Service: {service}\")\n",
|
159 |
+
"\n",
|
160 |
+
" if service not in service_slots:\n",
|
161 |
+
" response = \"I'm sorry, I cannot understand that service request yet. We currently support booking flights, checking order status and finding restaurants only.\"\n",
|
162 |
+
" else:\n",
|
163 |
+
" # 5. Track the dialogue state (slot filling)\n",
|
164 |
+
" slots = service_slots[service]\n",
|
165 |
+
" state_output = state_tracking_chain(\n",
|
166 |
+
" {\n",
|
167 |
+
" \"service\": service,\n",
|
168 |
+
" \"slots\": \", \".join(slots),\n",
|
169 |
+
" \"chat_history\": memory.load_memory_variables({}),\n",
|
170 |
+
" \"user_input\": user_input,\n",
|
171 |
+
" }\n",
|
172 |
+
" )\n",
|
173 |
+
" slot_json_str = state_output[\"slot_json\"].strip()\n",
|
174 |
+
"\n",
|
175 |
+
" print(f\"Slot Filling Output (JSON): {slot_json_str}\")\n",
|
176 |
+
"\n",
|
177 |
+
" try:\n",
|
178 |
+
" import json\n",
|
179 |
+
" slot_values = json.loads(slot_json_str)\n",
|
180 |
+
" except json.JSONDecodeError:\n",
|
181 |
+
" slot_values = {} # Handle cases where JSON decoding fails\n",
|
182 |
+
" response = \"I'm sorry, there seems to be a problem understanding your request details.\"\n",
|
183 |
+
"\n",
|
184 |
+
" # (Optional) 6. Database interaction (based on service and filled slots)\n",
|
185 |
+
" db_results = \"\" # Initialize db_results as an empty string\n",
|
186 |
+
" if service == \"check_order_status\" and \"order_id\" in slot_values:\n",
|
187 |
+
" try:\n",
|
188 |
+
" order_id = slot_values[\"order_id\"]\n",
|
189 |
+
" # Basic query without table information\n",
|
190 |
+
" db_results = db.run(f\"SELECT * FROM orders WHERE order_id = '{order_id}'\")\n",
|
191 |
+
" db_results = f\"Database Results: {db_results}\"\n",
|
192 |
+
" except Exception as e:\n",
|
193 |
+
" print(f\"Error during database query: {e}\")\n",
|
194 |
+
" db_results = \"\"\n",
|
195 |
+
"\n",
|
196 |
+
" # 7. Generate the response\n",
|
197 |
+
" response_output = response_generation_chain(\n",
|
198 |
+
" {\n",
|
199 |
+
" \"chat_history\": memory.load_memory_variables({}),\n",
|
200 |
+
" \"user_input\": user_input,\n",
|
201 |
+
" \"slot_info\": f\"Slots: {slot_json_str}\",\n",
|
202 |
+
" \"db_results\": db_results,\n",
|
203 |
+
" }\n",
|
204 |
+
" )\n",
|
205 |
+
" response = response_output[\"response\"]\n",
|
206 |
+
"\n",
|
207 |
+
" # 8. Add the system response to memory\n",
|
208 |
+
" memory.chat_memory.add_ai_message(response)\n",
|
209 |
+
"\n",
|
210 |
+
" return response\n",
|
211 |
+
"\n",
|
212 |
+
"# --- Example Usage ---\n",
|
213 |
+
"\n",
|
214 |
+
"while True:\n",
|
215 |
+
" user_input = input(\"You: \")\n",
|
216 |
+
" if user_input.lower() == \"exit\":\n",
|
217 |
+
" break\n",
|
218 |
+
" response = process_user_input(user_input)\n",
|
219 |
+
" print(f\"AI: {response}\")"
|
220 |
+
]
|
221 |
+
}
|
222 |
+
],
|
223 |
+
"metadata": {
|
224 |
+
"kernelspec": {
|
225 |
+
"display_name": "crawl_data",
|
226 |
+
"language": "python",
|
227 |
+
"name": "python3"
|
228 |
+
},
|
229 |
+
"language_info": {
|
230 |
+
"name": "python",
|
231 |
+
"version": "3.10.13"
|
232 |
+
}
|
233 |
+
},
|
234 |
+
"nbformat": 4,
|
235 |
+
"nbformat_minor": 2
|
236 |
+
}
|
pipeline.py
ADDED
@@ -0,0 +1,211 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from langchain.chat_models import ChatOpenAI
|
2 |
+
from langchain.memory import ConversationBufferMemory
|
3 |
+
from langchain.prompts import PromptTemplate
|
4 |
+
from langchain.chains import LLMChain
|
5 |
+
from langchain.utilities import SQLDatabase
|
6 |
+
from sqlalchemy import create_engine # Import create_engine
|
7 |
+
|
8 |
+
# --- Initialize Core Components ---
|
9 |
+
|
10 |
+
# 1. Dialogue Context (Memory)
|
11 |
+
memory = ConversationBufferMemory()
|
12 |
+
|
13 |
+
# 2. LLM (for routing, service selection, state tracking, and response generation)
|
14 |
+
llm = ChatOpenAI(temperature=0, model_name="gpt-3.5-turbo") # Or another suitable model
|
15 |
+
|
16 |
+
# 3. Database (using SQLite in-memory for demonstration)
|
17 |
+
engine = create_engine("sqlite:///:memory:") # Create an in-memory SQLite engine
|
18 |
+
db = SQLDatabase(engine) # Pass the engine to SQLDatabase
|
19 |
+
|
20 |
+
# --- Define Prompts ---
|
21 |
+
|
22 |
+
# Router Prompt
|
23 |
+
router_template = """
|
24 |
+
You are a helpful assistant that classifies user input into two categories:
|
25 |
+
|
26 |
+
1. open-domain: General conversation, chit-chat, or questions not related to a specific task.
|
27 |
+
2. task-oriented: The user wants to perform a specific action or get information related to a predefined service.
|
28 |
+
|
29 |
+
Based on the dialogue history, classify the latest user input:
|
30 |
+
|
31 |
+
{chat_history}
|
32 |
+
|
33 |
+
User: {user_input}
|
34 |
+
|
35 |
+
Classification:
|
36 |
+
"""
|
37 |
+
router_prompt = PromptTemplate(
|
38 |
+
input_variables=["chat_history", "user_input"], template=router_template
|
39 |
+
)
|
40 |
+
|
41 |
+
# Service Selection Prompt
|
42 |
+
service_selection_template = """
|
43 |
+
You are a helpful assistant that classifies user input into one of the following predefined services:
|
44 |
+
|
45 |
+
Services:
|
46 |
+
- book_flight: For booking flight tickets.
|
47 |
+
- check_order_status: For checking the status of an order.
|
48 |
+
- find_restaurants: For finding restaurants based on criteria.
|
49 |
+
|
50 |
+
Based on the dialogue history, which service best matches the user's intent?
|
51 |
+
|
52 |
+
{chat_history}
|
53 |
+
|
54 |
+
User: {user_input}
|
55 |
+
|
56 |
+
Selected Service:
|
57 |
+
"""
|
58 |
+
service_selection_prompt = PromptTemplate(
|
59 |
+
input_variables=["chat_history", "user_input"],
|
60 |
+
template=service_selection_template,
|
61 |
+
)
|
62 |
+
|
63 |
+
# Dialogue State Tracking Prompt
|
64 |
+
state_tracking_template = """
|
65 |
+
You are a helpful assistant that extracts information from user input to fill in the slots for a specific service.
|
66 |
+
|
67 |
+
Service: {service}
|
68 |
+
Slots: {slots}
|
69 |
+
|
70 |
+
Based on the dialogue history, extract the values for each slot from the conversation.
|
71 |
+
Return the output in JSON format. If a slot is not filled, use null as the value.
|
72 |
+
|
73 |
+
{chat_history}
|
74 |
+
|
75 |
+
User: {user_input}
|
76 |
+
|
77 |
+
Extracted Information (JSON):
|
78 |
+
"""
|
79 |
+
state_tracking_prompt = PromptTemplate(
|
80 |
+
input_variables=["service", "slots", "chat_history", "user_input"],
|
81 |
+
template=state_tracking_template,
|
82 |
+
)
|
83 |
+
|
84 |
+
# Response Generation Prompt
|
85 |
+
response_generation_template = """
|
86 |
+
You are a helpful assistant that generates natural language responses to the user.
|
87 |
+
|
88 |
+
Dialogue History:
|
89 |
+
{chat_history}
|
90 |
+
|
91 |
+
User: {user_input}
|
92 |
+
|
93 |
+
{slot_info}
|
94 |
+
|
95 |
+
{db_results}
|
96 |
+
|
97 |
+
Response:
|
98 |
+
"""
|
99 |
+
response_generation_prompt = PromptTemplate(
|
100 |
+
input_variables=["chat_history", "user_input", "slot_info", "db_results"],
|
101 |
+
template=response_generation_template,
|
102 |
+
)
|
103 |
+
|
104 |
+
# --- Define Chains ---
|
105 |
+
|
106 |
+
router_chain = LLMChain(llm=llm, prompt=router_prompt, output_key="classification")
|
107 |
+
service_selection_chain = LLMChain(
|
108 |
+
llm=llm, prompt=service_selection_prompt, output_key="service"
|
109 |
+
)
|
110 |
+
state_tracking_chain = LLMChain(
|
111 |
+
llm=llm, prompt=state_tracking_prompt, output_key="slot_json"
|
112 |
+
)
|
113 |
+
response_generation_chain = LLMChain(
|
114 |
+
llm=llm, prompt=response_generation_prompt, output_key="response"
|
115 |
+
)
|
116 |
+
|
117 |
+
# --- Define Service Slots ---
|
118 |
+
# (In a real application, this would likely be loaded from a configuration file or database)
|
119 |
+
service_slots = {
|
120 |
+
"book_flight": ["destination", "departure_date", "num_passengers"],
|
121 |
+
"check_order_status": ["order_id"],
|
122 |
+
"find_restaurants": ["cuisine", "location", "price_range"],
|
123 |
+
}
|
124 |
+
|
125 |
+
# --- Main Dialogue Loop ---
|
126 |
+
|
127 |
+
def process_user_input(user_input):
|
128 |
+
# 1. Add user input to memory
|
129 |
+
memory.chat_memory.add_user_message(user_input)
|
130 |
+
|
131 |
+
# 2. Route the input
|
132 |
+
router_output = router_chain(
|
133 |
+
{"chat_history": memory.load_memory_variables({}), "user_input": user_input}
|
134 |
+
)
|
135 |
+
classification = router_output["classification"].strip()
|
136 |
+
|
137 |
+
print(f"Router Classification: {classification}")
|
138 |
+
|
139 |
+
if classification == "open-domain":
|
140 |
+
# 3. Handle open-domain conversation
|
141 |
+
llm_response = llm(memory.load_memory_variables({})["history"])
|
142 |
+
response = llm_response.content
|
143 |
+
else:
|
144 |
+
# 4. Select the service
|
145 |
+
service_output = service_selection_chain(
|
146 |
+
{"chat_history": memory.load_memory_variables({}), "user_input": user_input}
|
147 |
+
)
|
148 |
+
service = service_output["service"].strip()
|
149 |
+
|
150 |
+
print(f"Selected Service: {service}")
|
151 |
+
|
152 |
+
if service not in service_slots:
|
153 |
+
response = "I'm sorry, I cannot understand that service request yet. We currently support booking flights, checking order status and finding restaurants only."
|
154 |
+
else:
|
155 |
+
# 5. Track the dialogue state (slot filling)
|
156 |
+
slots = service_slots[service]
|
157 |
+
state_output = state_tracking_chain(
|
158 |
+
{
|
159 |
+
"service": service,
|
160 |
+
"slots": ", ".join(slots),
|
161 |
+
"chat_history": memory.load_memory_variables({}),
|
162 |
+
"user_input": user_input,
|
163 |
+
}
|
164 |
+
)
|
165 |
+
slot_json_str = state_output["slot_json"].strip()
|
166 |
+
|
167 |
+
print(f"Slot Filling Output (JSON): {slot_json_str}")
|
168 |
+
|
169 |
+
try:
|
170 |
+
import json
|
171 |
+
slot_values = json.loads(slot_json_str)
|
172 |
+
except json.JSONDecodeError:
|
173 |
+
slot_values = {} # Handle cases where JSON decoding fails
|
174 |
+
response = "I'm sorry, there seems to be a problem understanding your request details."
|
175 |
+
|
176 |
+
# (Optional) 6. Database interaction (based on service and filled slots)
|
177 |
+
db_results = "" # Initialize db_results as an empty string
|
178 |
+
if service == "check_order_status" and "order_id" in slot_values:
|
179 |
+
try:
|
180 |
+
order_id = slot_values["order_id"]
|
181 |
+
# Basic query without table information
|
182 |
+
db_results = db.run(f"SELECT * FROM orders WHERE order_id = '{order_id}'")
|
183 |
+
db_results = f"Database Results: {db_results}"
|
184 |
+
except Exception as e:
|
185 |
+
print(f"Error during database query: {e}")
|
186 |
+
db_results = ""
|
187 |
+
|
188 |
+
# 7. Generate the response
|
189 |
+
response_output = response_generation_chain(
|
190 |
+
{
|
191 |
+
"chat_history": memory.load_memory_variables({}),
|
192 |
+
"user_input": user_input,
|
193 |
+
"slot_info": f"Slots: {slot_json_str}",
|
194 |
+
"db_results": db_results,
|
195 |
+
}
|
196 |
+
)
|
197 |
+
response = response_output["response"]
|
198 |
+
|
199 |
+
# 8. Add the system response to memory
|
200 |
+
memory.chat_memory.add_ai_message(response)
|
201 |
+
|
202 |
+
return response
|
203 |
+
|
204 |
+
# --- Example Usage ---
|
205 |
+
|
206 |
+
while True:
|
207 |
+
user_input = input("You: ")
|
208 |
+
if user_input.lower() == "exit":
|
209 |
+
break
|
210 |
+
response = process_user_input(user_input)
|
211 |
+
print(f"AI: {response}")
|