{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "fbc1cebe",
   "metadata": {},
   "source": [
    "# Mistral Chat Completions with Elasticsearch Inference API\n",
    "\n",
    "This notebook demonstrates how to set up a Mistral chat completion inference endpoint in Elasticsearch and stream chat responses using the inference API\n",
    "\n",
    "## Prerequisites\n",
    "- Elasticsearch cluster \n",
    "- Elasticsearch API key\n",
    "- Mistral API key"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7a7f9f82",
   "metadata": {},
   "outputs": [],
   "source": [
    "%pip install requests tqdm elasticsearch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 30,
   "id": "f284f828",
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "import json\n",
    "from typing import Generator\n",
    "from tqdm import tqdm\n",
    "from elasticsearch import Elasticsearch\n",
    "import getpass"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "196bb0a3",
   "metadata": {},
   "source": [
    "## Configuration\n",
    "\n",
    "Set up your Elasticsearch and Mistral API credentials. For security, consider using environment variables."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 31,
   "id": "dd01b30e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Credentials - Enter your API keys securely\n",
    "ELASTICSEARCH_URL = getpass.getpass(\"Enter your Elasticsearch URL: \").strip()\n",
    "ELASTICSEARCH_API_KEY = getpass.getpass(\"Enter your Elasticsearch API key: \")\n",
    "MISTRAL_API_KEY = getpass.getpass(\"Enter your Mistral API key: \")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5fdf1b27",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Configurations, no need to change these values\n",
    "MISTRAL_MODEL = \"mistral-large-latest\"  # Mistral model to use\n",
    "INFERENCE_ENDPOINT_NAME = (\n",
    "    \"mistral-embeddings-chat-completion\"  # Name for the inference endpoint\n",
    ")\n",
    "\n",
    "ELASTICSEARCH_HEADERS = {\n",
    "    \"Authorization\": f\"ApiKey {ELASTICSEARCH_API_KEY}\",\n",
    "    \"Content-Type\": \"application/json\",\n",
    "}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "dc1f402c",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Initialize Elasticsearch client\n",
    "es_client = Elasticsearch(hosts=[ELASTICSEARCH_URL], api_key=ELASTICSEARCH_API_KEY)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "20d4ceda",
   "metadata": {},
   "source": [
    "## Create the Inference Endpoint\n",
    "\n",
    "Create the Mistral chat completion endpoint if it doesn't exist."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bdbdfaef",
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\n",
    "    f\"Creating Mistral inference endpoint: {INFERENCE_ENDPOINT_NAME} at {ELASTICSEARCH_URL}\"\n",
    ")\n",
    "\n",
    "try:\n",
    "    # Create the inference endpoint using the Elasticsearch client\n",
    "    response = es_client.inference.put(\n",
    "        task_type=\"chat_completion\",\n",
    "        inference_id=INFERENCE_ENDPOINT_NAME,\n",
    "        body={\n",
    "            \"service\": \"mistral\",\n",
    "            \"service_settings\": {\"api_key\": MISTRAL_API_KEY, \"model\": MISTRAL_MODEL},\n",
    "        },\n",
    "    )\n",
    "\n",
    "    print(\"Inference endpoint created successfully!\")\n",
    "    print(f\"Response: {json.dumps(response.body, indent=2)}\")\n",
    "\n",
    "except Exception as e:\n",
    "    print(f\"❌ Error creating inference endpoint: {str(e)}\")\n",
    "    # If the endpoint already exists, that's okay\n",
    "    if \"already exists\" in str(e).lower():\n",
    "        print(\"✅ Inference endpoint already exists, continuing...\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "330798fb",
   "metadata": {},
   "source": [
    "## Chat Streaming Functions\n",
    "\n",
    "Let's create functions to handle streaming chat responses from the inference endpoint."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f582d78f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def stream_chat_completion(\n",
    "    host: str, endpoint_name: str, messages: list\n",
    ") -> Generator[str, None, None]:\n",
    "    url = f\"{host}/_inference/chat_completion/{endpoint_name}/_stream\"\n",
    "\n",
    "    payload = {\"messages\": messages}\n",
    "\n",
    "    try:\n",
    "        response = requests.post(\n",
    "            url, json=payload, headers=ELASTICSEARCH_HEADERS, stream=True\n",
    "        )\n",
    "        response.raise_for_status()\n",
    "\n",
    "        for line in response.iter_lines(decode_unicode=True):\n",
    "            if line:\n",
    "                line = line.strip()\n",
    "\n",
    "                # Handle Server-Sent Events format\n",
    "                # Skip event lines like \"event: message\"\n",
    "                if line.startswith(\"event:\"):\n",
    "                    continue\n",
    "\n",
    "                # Process data lines\n",
    "                if line.startswith(\"data: \"):\n",
    "                    data_content = line[6:]  # Remove \"data: \" prefix\n",
    "\n",
    "                    # Skip empty data or special markers\n",
    "                    if not data_content.strip() or data_content.strip() == \"[DONE]\":\n",
    "                        continue\n",
    "\n",
    "                    try:\n",
    "                        chunk_data = json.loads(data_content)\n",
    "\n",
    "                        # Extract the content from the Mistral response structure\n",
    "                        if \"choices\" in chunk_data and len(chunk_data[\"choices\"]) > 0:\n",
    "                            choice = chunk_data[\"choices\"][0]\n",
    "                            if \"delta\" in choice and \"content\" in choice[\"delta\"]:\n",
    "                                content = choice[\"delta\"][\"content\"]\n",
    "                                if content:  # Only yield non-empty content\n",
    "                                    yield content\n",
    "\n",
    "                    except json.JSONDecodeError as json_err:\n",
    "                        # If JSON parsing fails, log the error but continue\n",
    "                        print(f\"\\nJSON decode error: {json_err}\")\n",
    "                        print(f\"Problematic data: {data_content}\")\n",
    "                        continue\n",
    "\n",
    "    except requests.exceptions.RequestException as e:\n",
    "        yield f\"Error: {str(e)}\"\n",
    "\n",
    "\n",
    "print(\"✅ Streaming function defined!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "e0742f3a",
   "metadata": {},
   "source": [
    "## Testing the Inference Endpoint \n",
    "\n",
    "Now let's test our inference endpoint with a simple question. This will demonstrate streaming responses are working well from Elasticsearch."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9f5fbf08",
   "metadata": {},
   "outputs": [],
   "source": [
    "user_question = \"What SNES games had a character on a skateboard throwing axes?\"\n",
    "\n",
    "messages = [\n",
    "    {\n",
    "        \"role\": \"system\",\n",
    "        \"content\": \"You are a helpful gaming expert that provides concise answers about video games.\",\n",
    "    },\n",
    "    {\"role\": \"user\", \"content\": user_question},\n",
    "]\n",
    "\n",
    "print(f\"User: {user_question}\")\n",
    "print(\"Assistant: \\n\")\n",
    "\n",
    "for chunk in stream_chat_completion(\n",
    "    ELASTICSEARCH_URL, INFERENCE_ENDPOINT_NAME, messages\n",
    "):\n",
    "    print(chunk, end=\"\", flush=True)"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "d92b8371",
   "metadata": {},
   "source": [
    "# Context Engineering with Elasticsearch\n",
    "\n",
    "In this section, we'll demonstrate how to:\n",
    "1. Index documents into Elasticsearch \n",
    "2. Search for relevant context\n",
    "3. Use retrieved documents to enhance our chat completions with contextual information\n",
    "\n",
    "This approach combines retrieval-augmented generation (RAG) with Mistral's chat capabilities through Elasticsearch."
   ]
  },
  {
   "cell_type": "markdown",
   "id": "c3dd003a",
   "metadata": {},
   "source": [
    "## Step 1: Index some documents\n",
    "\n",
    "First, let's create an Elasticsearch index to store our documents with both text content and vector embeddings for semantic search."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b87d34d3-e64f-49b7-9403-4179e9cbc0e8",
   "metadata": {},
   "outputs": [],
   "source": [
    "INDEX_NAME = \"snes-games\"\n",
    "snes_mapping = {\n",
    "    \"mappings\": {\n",
    "        \"properties\": {\n",
    "            \"id\": {\"type\": \"keyword\"},\n",
    "            \"title\": {\"type\": \"text\", \"copy_to\": \"description_semantic\"},\n",
    "            \"publishers\": {\"type\": \"keyword\"},\n",
    "            \"year_US\": {\"type\": \"keyword\"},\n",
    "            \"year_JP\": {\"type\": \"keyword\"},\n",
    "            \"category\": {\"type\": \"keyword\", \"copy_to\": \"description_semantic\"},\n",
    "            \"description\": {\"type\": \"text\", \"copy_to\": \"description_semantic\"},\n",
    "            \"description_semantic\": {\"type\": \"semantic_text\"},\n",
    "        }\n",
    "    }\n",
    "}\n",
    "\n",
    "try:\n",
    "    # Create the index using the Elasticsearch client\n",
    "    response = es_client.indices.create(index=INDEX_NAME, body=snes_mapping)\n",
    "\n",
    "    print(f\"✅ Index '{INDEX_NAME}' created successfully!\")\n",
    "    print(f\"Response: {json.dumps(response.body, indent=2)}\")\n",
    "\n",
    "except Exception as e:\n",
    "    print(f\"❌ Error creating index '{INDEX_NAME}': {str(e)}\")\n",
    "    # If the index already exists, that's okay\n",
    "    if (\n",
    "        \"already exists\" in str(e).lower()\n",
    "        or \"resource_already_exists_exception\" in str(e).lower()\n",
    "    ):\n",
    "        print(f\"✅ Index '{INDEX_NAME}' already exists, continuing...\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "09e304d0",
   "metadata": {},
   "outputs": [],
   "source": [
    "def bulk_index_games(games_batch):\n",
    "    if not games_batch:\n",
    "        return 0\n",
    "    bulk_body = \"\"\n",
    "    for game_doc in games_batch:\n",
    "        index_meta = {\"index\": {\"_index\": INDEX_NAME, \"_id\": game_doc[\"id\"]}}\n",
    "        bulk_body += json.dumps(index_meta) + \"\\n\" + json.dumps(game_doc) + \"\\n\"\n",
    "    bulk_url = f\"{ELASTICSEARCH_URL}/_bulk\"\n",
    "    bulk_headers = {**ELASTICSEARCH_HEADERS, \"Content-Type\": \"application/x-ndjson\"}\n",
    "    try:\n",
    "        response = requests.post(bulk_url, data=bulk_body, headers=bulk_headers)\n",
    "        response.raise_for_status()\n",
    "        result = response.json()\n",
    "        return sum(\n",
    "            1\n",
    "            for item in result.get(\"items\", [])\n",
    "            if item.get(\"index\", {}).get(\"status\") in [200, 201]\n",
    "        )\n",
    "    except:\n",
    "        return 0\n",
    "\n",
    "\n",
    "csv_file_path = \"snes_games.csv\"\n",
    "BATCH_SIZE = 50\n",
    "try:\n",
    "    with open(csv_file_path, \"r\", encoding=\"utf-8\") as file:\n",
    "        file.readline()\n",
    "        actual_headers = [\n",
    "            \"ID\",\n",
    "            \"Title\",\n",
    "            \"Publishers\",\n",
    "            \"Year_North_America\",\n",
    "            \"Year_JP\",\n",
    "            \"Category\",\n",
    "            \"Description\",\n",
    "        ]\n",
    "        total_indexed, current_batch = 0, []\n",
    "        lines = [line for line in file if line.strip()]\n",
    "\n",
    "        for line in tqdm(lines, desc=\"Indexing SNES games\"):\n",
    "            line = line.strip()\n",
    "            parts, current_part, in_quotes = [], \"\", False\n",
    "\n",
    "            for char in line:\n",
    "                if char == '\"':\n",
    "                    in_quotes = not in_quotes\n",
    "                    current_part += char\n",
    "                elif char == \"|\" and not in_quotes:\n",
    "                    parts.append(current_part)\n",
    "                    current_part = \"\"\n",
    "                else:\n",
    "                    current_part += char\n",
    "            if current_part:\n",
    "                parts.append(current_part)\n",
    "\n",
    "            row = {}\n",
    "            for i, header in enumerate(actual_headers):\n",
    "                value = parts[i].strip() if i < len(parts) else \"\"\n",
    "                if value.startswith('\"') and value.endswith('\"'):\n",
    "                    value = value[1:-1]\n",
    "                row[header] = value\n",
    "\n",
    "            game_doc = {\n",
    "                \"id\": row.get(\"ID\", \"\"),\n",
    "                \"title\": row.get(\"Title\", \"\"),\n",
    "                \"publishers\": row.get(\"Publishers\", \"\"),\n",
    "                \"year_US\": row.get(\"Year_North_America\", \"\"),\n",
    "                \"year_JP\": row.get(\"Year_JP\", \"\"),\n",
    "                \"category\": row.get(\"Category\", \"\"),\n",
    "                \"description\": row.get(\"Description\", \"\"),\n",
    "            }\n",
    "            current_batch.append(game_doc)\n",
    "            if len(current_batch) >= BATCH_SIZE:\n",
    "                total_indexed += bulk_index_games(current_batch)\n",
    "                current_batch = []\n",
    "        if current_batch:\n",
    "            total_indexed += bulk_index_games(current_batch)\n",
    "except:\n",
    "    pass"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "efcc95b9",
   "metadata": {},
   "source": [
    "## Step 2: Search for Relevant Context\n",
    "\n",
    "Now let's create a function to search our indexed documents for relevant context based on a user's query."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f565cb68",
   "metadata": {},
   "outputs": [],
   "source": [
    "def search_documents(query: str, max_results: int = 3) -> list:\n",
    "    search_body = {\n",
    "        \"size\": max_results,\n",
    "        \"query\": {\"semantic\": {\"field\": \"description_semantic\", \"query\": query}},\n",
    "    }\n",
    "\n",
    "    try:\n",
    "        response = es_client.search(index=INDEX_NAME, body=search_body)\n",
    "\n",
    "        return response.body[\"hits\"][\"hits\"]\n",
    "\n",
    "    except Exception as e:\n",
    "        print(f\"❌ Error searching documents: {str(e)}\")\n",
    "        return []"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e037b346-d274-45d6-a5a1-8c3e3d5df28f",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_query = \"What SNES games had a character on a skateboard throwing axes?\"\n",
    "print(f\"🔍 Searching for: '{test_query}'\")\n",
    "\n",
    "search_results = search_documents(test_query, 5)\n",
    "\n",
    "for i, doc in enumerate(search_results, 1):\n",
    "    print(\n",
    "        f\"\\n{i}. {doc['_source']['title']} - {doc['_source']['description']} (Score: {doc['_score']:.2f})\"\n",
    "    )"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5567ee08",
   "metadata": {},
   "source": [
    "## Step 3: RAG-Enhanced Chat Function\n",
    "\n",
    "Now let's create a function that combines document retrieval with our Mistral chat completion for contextual responses."
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4dacdad7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def rag_chat(user_question: str, max_context_docs: int = 10) -> str:\n",
    "    context_docs = search_documents(user_question, max_context_docs)\n",
    "\n",
    "    context_text = \"\"\n",
    "    if context_docs:\n",
    "        context_text = \"\\n\\nRelevant context information:\\n\"\n",
    "        for i, doc in enumerate(context_docs, 1):\n",
    "            context_text += f\"\\n{i}. {doc['_source']}\\n\"\n",
    "\n",
    "    system_prompt = \"\"\"\n",
    "        You are a helpful assistant that answers about Super Nintendo games. \n",
    "        Use the provided context information to answer the user's question accurately. \n",
    "        If the context doesn't contain relevant information, you can use your general knowledge.\n",
    "        \"\"\"\n",
    "\n",
    "    user_prompt = user_question\n",
    "    if context_text:\n",
    "        user_prompt = f\"{context_text}\\n\\nQuestion: {user_question}\"\n",
    "\n",
    "    messages = [\n",
    "        {\"role\": \"system\", \"content\": system_prompt},\n",
    "        {\"role\": \"user\", \"content\": user_prompt},\n",
    "    ]\n",
    "\n",
    "    full_response = \"\"\n",
    "    for chunk in stream_chat_completion(\n",
    "        ELASTICSEARCH_URL, INFERENCE_ENDPOINT_NAME, messages\n",
    "    ):\n",
    "        print(chunk, end=\"\", flush=True)\n",
    "        full_response += chunk\n",
    "\n",
    "    return full_response"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "32a6054b",
   "metadata": {},
   "outputs": [],
   "source": [
    "test_question = \"What SNES games had a character on a skateboard throwing axes?\"\n",
    "rag_chat(test_question)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
