{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from langchain.chat_models import ChatOpenAI\n",
    "from langchain.memory import ConversationBufferMemory\n",
    "from langchain.prompts import PromptTemplate\n",
    "from langchain.chains import LLMChain\n",
    "from langchain.utilities import SQLDatabase\n",
    "from sqlalchemy import create_engine  # Import create_engine\n",
    "\n",
    "# --- Initialize Core Components ---\n",
    "\n",
    "# 1. Dialogue Context (Memory)\n",
    "memory = ConversationBufferMemory()\n",
    "\n",
    "# 2. LLM (for routing, service selection, state tracking, and response generation)\n",
    "llm = ChatOpenAI(temperature=0, model_name=\"gpt-3.5-turbo\")  # Or another suitable model\n",
    "\n",
    "# 3. Database (using SQLite in-memory for demonstration)\n",
    "engine = create_engine(\"sqlite:///:memory:\")  # Create an in-memory SQLite engine\n",
    "db = SQLDatabase(engine)  # Pass the engine to SQLDatabase\n",
    "\n",
    "# --- Define Prompts ---\n",
    "\n",
    "# Router Prompt\n",
    "router_template = \"\"\"\n",
    "You are a helpful assistant that classifies user input into two categories:\n",
    "\n",
    "1. open-domain: General conversation, chit-chat, or questions not related to a specific task.\n",
    "2. task-oriented: The user wants to perform a specific action or get information related to a predefined service.\n",
    "\n",
    "Based on the dialogue history, classify the latest user input:\n",
    "\n",
    "{chat_history}\n",
    "\n",
    "User: {user_input}\n",
    "\n",
    "Classification:\n",
    "\"\"\"\n",
    "router_prompt = PromptTemplate(\n",
    "    input_variables=[\"chat_history\", \"user_input\"], template=router_template\n",
    ")\n",
    "\n",
    "# Service Selection Prompt\n",
    "service_selection_template = \"\"\"\n",
    "You are a helpful assistant that classifies user input into one of the following predefined services:\n",
    "\n",
    "Services:\n",
    "- book_flight: For booking flight tickets.\n",
    "- check_order_status: For checking the status of an order.\n",
    "- find_restaurants: For finding restaurants based on criteria.\n",
    "\n",
    "Based on the dialogue history, which service best matches the user's intent?\n",
    "\n",
    "{chat_history}\n",
    "\n",
    "User: {user_input}\n",
    "\n",
    "Selected Service:\n",
    "\"\"\"\n",
    "service_selection_prompt = PromptTemplate(\n",
    "    input_variables=[\"chat_history\", \"user_input\"],\n",
    "    template=service_selection_template,\n",
    ")\n",
    "\n",
    "# Dialogue State Tracking Prompt\n",
    "state_tracking_template = \"\"\"\n",
    "You are a helpful assistant that extracts information from user input to fill in the slots for a specific service.\n",
    "\n",
    "Service: {service}\n",
    "Slots: {slots}\n",
    "\n",
    "Based on the dialogue history, extract the values for each slot from the conversation. \n",
    "Return the output in JSON format. If a slot is not filled, use null as the value.\n",
    "\n",
    "{chat_history}\n",
    "\n",
    "User: {user_input}\n",
    "\n",
    "Extracted Information (JSON):\n",
    "\"\"\"\n",
    "state_tracking_prompt = PromptTemplate(\n",
    "    input_variables=[\"service\", \"slots\", \"chat_history\", \"user_input\"],\n",
    "    template=state_tracking_template,\n",
    ")\n",
    "\n",
    "# Response Generation Prompt\n",
    "response_generation_template = \"\"\"\n",
    "You are a helpful assistant that generates natural language responses to the user.\n",
    "\n",
    "Dialogue History:\n",
    "{chat_history}\n",
    "\n",
    "User: {user_input}\n",
    "\n",
    "{slot_info}\n",
    "\n",
    "{db_results}\n",
    "\n",
    "Response:\n",
    "\"\"\"\n",
    "response_generation_prompt = PromptTemplate(\n",
    "    input_variables=[\"chat_history\", \"user_input\", \"slot_info\", \"db_results\"],\n",
    "    template=response_generation_template,\n",
    ")\n",
    "\n",
    "# --- Define Chains ---\n",
    "\n",
    "router_chain = LLMChain(llm=llm, prompt=router_prompt, output_key=\"classification\")\n",
    "service_selection_chain = LLMChain(\n",
    "    llm=llm, prompt=service_selection_prompt, output_key=\"service\"\n",
    ")\n",
    "state_tracking_chain = LLMChain(\n",
    "    llm=llm, prompt=state_tracking_prompt, output_key=\"slot_json\"\n",
    ")\n",
    "response_generation_chain = LLMChain(\n",
    "    llm=llm, prompt=response_generation_prompt, output_key=\"response\"\n",
    ")\n",
    "\n",
    "# --- Define Service Slots ---\n",
    "# (In a real application, this would likely be loaded from a configuration file or database)\n",
    "service_slots = {\n",
    "    \"book_flight\": [\"destination\", \"departure_date\", \"num_passengers\"],\n",
    "    \"check_order_status\": [\"order_id\"],\n",
    "    \"find_restaurants\": [\"cuisine\", \"location\", \"price_range\"],\n",
    "}\n",
    "\n",
    "# --- Main Dialogue Loop ---\n",
    "\n",
    "def process_user_input(user_input):\n",
    "    # 1. Add user input to memory\n",
    "    memory.chat_memory.add_user_message(user_input)\n",
    "\n",
    "    # 2. Route the input\n",
    "    router_output = router_chain(\n",
    "        {\"chat_history\": memory.load_memory_variables({}), \"user_input\": user_input}\n",
    "    )\n",
    "    classification = router_output[\"classification\"].strip()\n",
    "\n",
    "    print(f\"Router Classification: {classification}\")\n",
    "\n",
    "    if classification == \"open-domain\":\n",
    "        # 3. Handle open-domain conversation\n",
    "        llm_response = llm(memory.load_memory_variables({})[\"history\"])\n",
    "        response = llm_response.content\n",
    "    else:\n",
    "        # 4. Select the service\n",
    "        service_output = service_selection_chain(\n",
    "            {\"chat_history\": memory.load_memory_variables({}), \"user_input\": user_input}\n",
    "        )\n",
    "        service = service_output[\"service\"].strip()\n",
    "\n",
    "        print(f\"Selected Service: {service}\")\n",
    "\n",
    "        if service not in service_slots:\n",
    "            response = \"I'm sorry, I cannot understand that service request yet. We currently support booking flights, checking order status and finding restaurants only.\"\n",
    "        else:\n",
    "            # 5. Track the dialogue state (slot filling)\n",
    "            slots = service_slots[service]\n",
    "            state_output = state_tracking_chain(\n",
    "                {\n",
    "                    \"service\": service,\n",
    "                    \"slots\": \", \".join(slots),\n",
    "                    \"chat_history\": memory.load_memory_variables({}),\n",
    "                    \"user_input\": user_input,\n",
    "                }\n",
    "            )\n",
    "            slot_json_str = state_output[\"slot_json\"].strip()\n",
    "\n",
    "            print(f\"Slot Filling Output (JSON): {slot_json_str}\")\n",
    "\n",
    "            try:\n",
    "                import json\n",
    "                slot_values = json.loads(slot_json_str)\n",
    "            except json.JSONDecodeError:\n",
    "                slot_values = {}  # Handle cases where JSON decoding fails\n",
    "                response = \"I'm sorry, there seems to be a problem understanding your request details.\"\n",
    "\n",
    "            # (Optional) 6. Database interaction (based on service and filled slots)\n",
    "            db_results = \"\"  # Initialize db_results as an empty string\n",
    "            if service == \"check_order_status\" and \"order_id\" in slot_values:\n",
    "                try:\n",
    "                    order_id = slot_values[\"order_id\"]\n",
    "                    # Basic query without table information\n",
    "                    db_results = db.run(f\"SELECT * FROM orders WHERE order_id = '{order_id}'\")\n",
    "                    db_results = f\"Database Results: {db_results}\"\n",
    "                except Exception as e:\n",
    "                    print(f\"Error during database query: {e}\")\n",
    "                    db_results = \"\"\n",
    "\n",
    "            # 7. Generate the response\n",
    "            response_output = response_generation_chain(\n",
    "                {\n",
    "                    \"chat_history\": memory.load_memory_variables({}),\n",
    "                    \"user_input\": user_input,\n",
    "                    \"slot_info\": f\"Slots: {slot_json_str}\",\n",
    "                    \"db_results\": db_results,\n",
    "                }\n",
    "            )\n",
    "            response = response_output[\"response\"]\n",
    "\n",
    "    # 8. Add the system response to memory\n",
    "    memory.chat_memory.add_ai_message(response)\n",
    "\n",
    "    return response\n",
    "\n",
    "# --- Example Usage ---\n",
    "\n",
    "while True:\n",
    "    user_input = input(\"You: \")\n",
    "    if user_input.lower() == \"exit\":\n",
    "        break\n",
    "    response = process_user_input(user_input)\n",
    "    print(f\"AI: {response}\")"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "crawl_data",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.10.13"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}