{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "uJ93P7ksa2Nn"
   },
   "source": [
    "# Local RAG Document Agent"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "vxe5gDW0a4pC"
   },
   "source": [
    "* A  Retrieval-Augmented Generation agent that uses local models and vector databases for document-based question answering.\n",
    "* The agent can process PDF documents, extract knowledge, and answer questions using local LLM inference without external API calls.\n",
    "* Features include document upload, vector embedding, similarity search, and intelligent Q&A with source attribution and context awareness."
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "K3HdvvEid5wP"
   },
   "source": [
    "[![Open in Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/Dhivya-Bharathy/PraisonAI/blob/main/examples/cookbooks/local_rag_document_qa_agent.ipynb)\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "xaXT3inobISk"
   },
   "source": [
    "# Dependencies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "VBIlMa0vbBWm"
   },
   "outputs": [],
   "source": [
    "!pip install praisonai streamlit qdrant-client ollama pypdf PyPDF2 chromadb sentence-transformers"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "FttFP60zb30i"
   },
   "source": [
    "# Setup Key"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "smr-FEPhb3Yx",
    "outputId": "526779e1-94ed-46f9-9288-6d9ee5b25a19"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✅ API key configured!\n",
      "✅ Using local Ollama models for RAG operations\n"
     ]
    }
   ],
   "source": [
    "# Setup Key\n",
    "import os\n",
    "openai_key = \"sk-..\"\n",
    "\n",
    "os.environ[\"OPENAI_API_KEY\"] = openai_key\n",
    "\n",
    "# Ollama setup (for local models)\n",
    "# Make sure Ollama is installed and running locally\n",
    "# Available models: llama3.2, llama3.1, mistral, codellama, etc.\n",
    "\n",
    "print(\"✅ API key configured!\")\n",
    "print(\"✅ Using local Ollama models for RAG operations\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "4hEC_bn1cGOH"
   },
   "source": [
    "# Tools"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {
    "id": "bXEhVTwocH1J"
   },
   "outputs": [],
   "source": [
    "# Custom Document Processing Tool\n",
    "import PyPDF2\n",
    "import tempfile\n",
    "import os\n",
    "from typing import Dict, Any, List\n",
    "import pandas as pd\n",
    "\n",
    "class DocumentProcessingTool:\n",
    "    def __init__(self):\n",
    "        self.supported_formats = ['.pdf', '.txt', '.md', '.csv']\n",
    "\n",
    "    def process_document(self, file_path: str) -> Dict[str, Any]:\n",
    "        \"\"\"Process different document formats and extract text\"\"\"\n",
    "        try:\n",
    "            file_ext = os.path.splitext(file_path)[1].lower()\n",
    "\n",
    "            if file_ext == '.pdf':\n",
    "                return self._process_pdf(file_path)\n",
    "            elif file_ext == '.txt':\n",
    "                return self._process_txt(file_path)\n",
    "            elif file_ext == '.md':\n",
    "                return self._process_md(file_path)\n",
    "            elif file_ext == '.csv':\n",
    "                return self._process_csv(file_path)\n",
    "            else:\n",
    "                return {\"error\": f\"Unsupported file format: {file_ext}\"}\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"Error processing document: {str(e)}\"}\n",
    "\n",
    "    def _process_pdf(self, file_path: str) -> Dict[str, Any]:\n",
    "        \"\"\"Process PDF files\"\"\"\n",
    "        try:\n",
    "            with open(file_path, 'rb') as file:\n",
    "                pdf_reader = PyPDF2.PdfReader(file)\n",
    "                text = \"\"\n",
    "                for page in pdf_reader.pages:\n",
    "                    text += page.extract_text() + \"\\n\"\n",
    "\n",
    "            return {\n",
    "                \"text\": text,\n",
    "                \"pages\": len(pdf_reader.pages),\n",
    "                \"format\": \"pdf\",\n",
    "                \"file_path\": file_path\n",
    "            }\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"PDF processing error: {str(e)}\"}\n",
    "\n",
    "    def _process_txt(self, file_path: str) -> Dict[str, Any]:\n",
    "        \"\"\"Process text files\"\"\"\n",
    "        try:\n",
    "            with open(file_path, 'r', encoding='utf-8') as file:\n",
    "                text = file.read()\n",
    "\n",
    "            return {\n",
    "                \"text\": text,\n",
    "                \"format\": \"txt\",\n",
    "                \"file_path\": file_path\n",
    "            }\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"Text processing error: {str(e)}\"}\n",
    "\n",
    "    def _process_md(self, file_path: str) -> Dict[str, Any]:\n",
    "        \"\"\"Process markdown files\"\"\"\n",
    "        try:\n",
    "            with open(file_path, 'r', encoding='utf-8') as file:\n",
    "                text = file.read()\n",
    "\n",
    "            return {\n",
    "                \"text\": text,\n",
    "                \"format\": \"md\",\n",
    "                \"file_path\": file_path\n",
    "            }\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"Markdown processing error: {str(e)}\"}\n",
    "\n",
    "    def _process_csv(self, file_path: str) -> Dict[str, Any]:\n",
    "        \"\"\"Process CSV files\"\"\"\n",
    "        try:\n",
    "            df = pd.read_csv(file_path)\n",
    "            text = df.to_string(index=False)\n",
    "\n",
    "            return {\n",
    "                \"text\": text,\n",
    "                \"format\": \"csv\",\n",
    "                \"rows\": len(df),\n",
    "                \"columns\": len(df.columns),\n",
    "                \"file_path\": file_path\n",
    "            }\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"CSV processing error: {str(e)}\"}\n",
    "\n",
    "# Custom Vector Database Tool (Fixed for new ChromaDB)\n",
    "import chromadb\n",
    "import numpy as np\n",
    "\n",
    "class VectorDatabaseTool:\n",
    "    def __init__(self, collection_name: str = \"document_qa\"):\n",
    "        self.collection_name = collection_name\n",
    "        # Use new ChromaDB client configuration\n",
    "        self.client = chromadb.PersistentClient(path=\"./chroma_db\")\n",
    "        self.collection = self.client.get_or_create_collection(name=collection_name)\n",
    "\n",
    "    def add_documents(self, documents: List[str], metadatas: List[Dict] = None, ids: List[str] = None) -> Dict[str, Any]:\n",
    "        \"\"\"Add documents to the vector database\"\"\"\n",
    "        try:\n",
    "            if ids is None:\n",
    "                ids = [f\"doc_{i}\" for i in range(len(documents))]\n",
    "            if metadatas is None:\n",
    "                metadatas = [{\"source\": f\"document_{i}\"} for i in range(len(documents))]\n",
    "\n",
    "            self.collection.add(\n",
    "                documents=documents,\n",
    "                metadatas=metadatas,\n",
    "                ids=ids\n",
    "            )\n",
    "\n",
    "            return {\n",
    "                \"success\": True,\n",
    "                \"documents_added\": len(documents),\n",
    "                \"collection_name\": self.collection_name\n",
    "            }\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"Error adding documents: {str(e)}\"}\n",
    "\n",
    "    def search_documents(self, query: str, n_results: int = 5) -> Dict[str, Any]:\n",
    "        \"\"\"Search for relevant documents\"\"\"\n",
    "        try:\n",
    "            results = self.collection.query(\n",
    "                query_texts=[query],\n",
    "                n_results=n_results\n",
    "            )\n",
    "\n",
    "            return {\n",
    "                \"success\": True,\n",
    "                \"query\": query,\n",
    "                \"results\": results,\n",
    "                \"num_results\": len(results['documents'][0]) if results['documents'] else 0\n",
    "            }\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"Error searching documents: {str(e)}\"}\n",
    "\n",
    "    def get_collection_info(self) -> Dict[str, Any]:\n",
    "        \"\"\"Get information about the collection\"\"\"\n",
    "        try:\n",
    "            count = self.collection.count()\n",
    "            return {\n",
    "                \"success\": True,\n",
    "                \"collection_name\": self.collection_name,\n",
    "                \"document_count\": count\n",
    "            }\n",
    "        except Exception as e:\n",
    "            return {\"error\": f\"Error getting collection info: {str(e)}\"}\n",
    "\n",
    "# Custom Text Chunking Tool\n",
    "import re\n",
    "from typing import List\n",
    "\n",
    "class TextChunkingTool:\n",
    "    def __init__(self, chunk_size: int = 1000, chunk_overlap: int = 200):\n",
    "        self.chunk_size = chunk_size\n",
    "        self.chunk_overlap = chunk_overlap\n",
    "\n",
    "    def chunk_text(self, text: str) -> List[str]:\n",
    "        \"\"\"Split text into overlapping chunks\"\"\"\n",
    "        try:\n",
    "            # Clean the text\n",
    "            text = re.sub(r'\\s+', ' ', text).strip()\n",
    "\n",
    "            # Split into sentences first\n",
    "            sentences = re.split(r'[.!?]+', text)\n",
    "            sentences = [s.strip() for s in sentences if s.strip()]\n",
    "\n",
    "            chunks = []\n",
    "            current_chunk = \"\"\n",
    "\n",
    "            for sentence in sentences:\n",
    "                # If adding this sentence would exceed chunk size\n",
    "                if len(current_chunk) + len(sentence) > self.chunk_size and current_chunk:\n",
    "                    chunks.append(current_chunk.strip())\n",
    "                    # Start new chunk with overlap\n",
    "                    overlap_start = max(0, len(current_chunk) - self.chunk_overlap)\n",
    "                    current_chunk = current_chunk[overlap_start:] + \" \" + sentence\n",
    "                else:\n",
    "                    current_chunk += \" \" + sentence if current_chunk else sentence\n",
    "\n",
    "            # Add the last chunk\n",
    "            if current_chunk:\n",
    "                chunks.append(current_chunk.strip())\n",
    "\n",
    "            return chunks\n",
    "        except Exception as e:\n",
    "            return [f\"Error chunking text: {str(e)}\"]\n",
    "\n",
    "    def chunk_text_by_paragraphs(self, text: str) -> List[str]:\n",
    "        \"\"\"Split text by paragraphs\"\"\"\n",
    "        try:\n",
    "            paragraphs = text.split('\\n\\n')\n",
    "            chunks = []\n",
    "\n",
    "            for paragraph in paragraphs:\n",
    "                paragraph = paragraph.strip()\n",
    "                if paragraph:\n",
    "                    # If paragraph is too long, split it further\n",
    "                    if len(paragraph) > self.chunk_size:\n",
    "                        sub_chunks = self.chunk_text(paragraph)\n",
    "                        chunks.extend(sub_chunks)\n",
    "                    else:\n",
    "                        chunks.append(paragraph)\n",
    "\n",
    "            return chunks\n",
    "        except Exception as e:\n",
    "            return [f\"Error chunking paragraphs: {str(e)}\"]"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "vuE2noGJc5UM"
   },
   "source": [
    "# YAML Prompt"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "9okcME5dc6g5",
    "outputId": "04372512-a187-4db2-e578-d6964b9b0ee7"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✅ YAML Prompt configured!\n"
     ]
    }
   ],
   "source": [
    "# YAML Prompt\n",
    "yaml_prompt = \"\"\"\n",
    "name: \"Local RAG Document Agent\"\n",
    "description: \"Expert document analyst with local LLM capabilities and vector search\"\n",
    "instructions:\n",
    "  - \"You are an expert document analyst that processes and analyzes documents using local LLM models\"\n",
    "  - \"Use vector database search to find relevant document chunks for answering questions\"\n",
    "  - \"Process uploaded documents and extract meaningful information\"\n",
    "  - \"Provide accurate answers based on document content with source attribution\"\n",
    "  - \"Chunk documents appropriately for optimal vector search performance\"\n",
    "  - \"Always cite the source document when providing answers\"\n",
    "  - \"Use markdown formatting for better readability\"\n",
    "  - \"Handle multiple document formats (PDF, TXT, MD, CSV)\"\n",
    "  - \"Provide context-aware responses based on document content\"\n",
    "\n",
    "tools:\n",
    "  - name: \"DocumentProcessingTool\"\n",
    "    description: \"Processes different document formats (PDF, TXT, MD, CSV) and extracts text content\"\n",
    "  - name: \"VectorDatabaseTool\"\n",
    "    description: \"Manages vector database operations for document storage and similarity search\"\n",
    "  - name: \"TextChunkingTool\"\n",
    "    description: \"Splits text into appropriate chunks for vector embedding and retrieval\"\n",
    "\n",
    "output_format:\n",
    "  - \"Provide clear, document-based answers\"\n",
    "  - \"Include source attribution when possible\"\n",
    "  - \"Summarize key information from documents\"\n",
    "  - \"Suggest follow-up questions if relevant\"\n",
    "  - \"Use bullet points and structured formatting\"\n",
    "  - \"Highlight important findings from the documents\"\n",
    "\n",
    "temperature: 0.3\n",
    "max_tokens: 4000\n",
    "model: \"local-llama3.2\"\n",
    "\"\"\"\n",
    "\n",
    "print(\"✅ YAML Prompt configured!\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "fF-XYF0UdEnT"
   },
   "source": [
    "# Main"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "id": "AoZrQUyLdGCT",
    "outputId": "069a3115-b6c0-4111-b1ca-080466048f30"
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "�� Local RAG Document Agent\n",
      "Document-based Q&A powered by local LLM and vector search!\n",
      "\n",
      "📁 Upload Your Documents\n",
      "Please upload PDF, TXT, MD, or CSV files:\n"
     ]
    },
    {
     "data": {
      "text/html": [
       "\n",
       "     <input type=\"file\" id=\"files-bc658477-fb0b-4b0b-a549-501485fe7cab\" name=\"files[]\" multiple disabled\n",
       "        style=\"border:none\" />\n",
       "     <output id=\"result-bc658477-fb0b-4b0b-a549-501485fe7cab\">\n",
       "      Upload widget is only available when the cell has been executed in the\n",
       "      current browser session. Please rerun this cell to enable.\n",
       "      </output>\n",
       "      <script>// Copyright 2017 Google LLC\n",
       "//\n",
       "// Licensed under the Apache License, Version 2.0 (the \"License\");\n",
       "// you may not use this file except in compliance with the License.\n",
       "// You may obtain a copy of the License at\n",
       "//\n",
       "//      http://www.apache.org/licenses/LICENSE-2.0\n",
       "//\n",
       "// Unless required by applicable law or agreed to in writing, software\n",
       "// distributed under the License is distributed on an \"AS IS\" BASIS,\n",
       "// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n",
       "// See the License for the specific language governing permissions and\n",
       "// limitations under the License.\n",
       "\n",
       "/**\n",
       " * @fileoverview Helpers for google.colab Python module.\n",
       " */\n",
       "(function(scope) {\n",
       "function span(text, styleAttributes = {}) {\n",
       "  const element = document.createElement('span');\n",
       "  element.textContent = text;\n",
       "  for (const key of Object.keys(styleAttributes)) {\n",
       "    element.style[key] = styleAttributes[key];\n",
       "  }\n",
       "  return element;\n",
       "}\n",
       "\n",
       "// Max number of bytes which will be uploaded at a time.\n",
       "const MAX_PAYLOAD_SIZE = 100 * 1024;\n",
       "\n",
       "function _uploadFiles(inputId, outputId) {\n",
       "  const steps = uploadFilesStep(inputId, outputId);\n",
       "  const outputElement = document.getElementById(outputId);\n",
       "  // Cache steps on the outputElement to make it available for the next call\n",
       "  // to uploadFilesContinue from Python.\n",
       "  outputElement.steps = steps;\n",
       "\n",
       "  return _uploadFilesContinue(outputId);\n",
       "}\n",
       "\n",
       "// This is roughly an async generator (not supported in the browser yet),\n",
       "// where there are multiple asynchronous steps and the Python side is going\n",
       "// to poll for completion of each step.\n",
       "// This uses a Promise to block the python side on completion of each step,\n",
       "// then passes the result of the previous step as the input to the next step.\n",
       "function _uploadFilesContinue(outputId) {\n",
       "  const outputElement = document.getElementById(outputId);\n",
       "  const steps = outputElement.steps;\n",
       "\n",
       "  const next = steps.next(outputElement.lastPromiseValue);\n",
       "  return Promise.resolve(next.value.promise).then((value) => {\n",
       "    // Cache the last promise value to make it available to the next\n",
       "    // step of the generator.\n",
       "    outputElement.lastPromiseValue = value;\n",
       "    return next.value.response;\n",
       "  });\n",
       "}\n",
       "\n",
       "/**\n",
       " * Generator function which is called between each async step of the upload\n",
       " * process.\n",
       " * @param {string} inputId Element ID of the input file picker element.\n",
       " * @param {string} outputId Element ID of the output display.\n",
       " * @return {!Iterable<!Object>} Iterable of next steps.\n",
       " */\n",
       "function* uploadFilesStep(inputId, outputId) {\n",
       "  const inputElement = document.getElementById(inputId);\n",
       "  inputElement.disabled = false;\n",
       "\n",
       "  const outputElement = document.getElementById(outputId);\n",
       "  outputElement.innerHTML = '';\n",
       "\n",
       "  const pickedPromise = new Promise((resolve) => {\n",
       "    inputElement.addEventListener('change', (e) => {\n",
       "      resolve(e.target.files);\n",
       "    });\n",
       "  });\n",
       "\n",
       "  const cancel = document.createElement('button');\n",
       "  inputElement.parentElement.appendChild(cancel);\n",
       "  cancel.textContent = 'Cancel upload';\n",
       "  const cancelPromise = new Promise((resolve) => {\n",
       "    cancel.onclick = () => {\n",
       "      resolve(null);\n",
       "    };\n",
       "  });\n",
       "\n",
       "  // Wait for the user to pick the files.\n",
       "  const files = yield {\n",
       "    promise: Promise.race([pickedPromise, cancelPromise]),\n",
       "    response: {\n",
       "      action: 'starting',\n",
       "    }\n",
       "  };\n",
       "\n",
       "  cancel.remove();\n",
       "\n",
       "  // Disable the input element since further picks are not allowed.\n",
       "  inputElement.disabled = true;\n",
       "\n",
       "  if (!files) {\n",
       "    return {\n",
       "      response: {\n",
       "        action: 'complete',\n",
       "      }\n",
       "    };\n",
       "  }\n",
       "\n",
       "  for (const file of files) {\n",
       "    const li = document.createElement('li');\n",
       "    li.append(span(file.name, {fontWeight: 'bold'}));\n",
       "    li.append(span(\n",
       "        `(${file.type || 'n/a'}) - ${file.size} bytes, ` +\n",
       "        `last modified: ${\n",
       "            file.lastModifiedDate ? file.lastModifiedDate.toLocaleDateString() :\n",
       "                                    'n/a'} - `));\n",
       "    const percent = span('0% done');\n",
       "    li.appendChild(percent);\n",
       "\n",
       "    outputElement.appendChild(li);\n",
       "\n",
       "    const fileDataPromise = new Promise((resolve) => {\n",
       "      const reader = new FileReader();\n",
       "      reader.onload = (e) => {\n",
       "        resolve(e.target.result);\n",
       "      };\n",
       "      reader.readAsArrayBuffer(file);\n",
       "    });\n",
       "    // Wait for the data to be ready.\n",
       "    let fileData = yield {\n",
       "      promise: fileDataPromise,\n",
       "      response: {\n",
       "        action: 'continue',\n",
       "      }\n",
       "    };\n",
       "\n",
       "    // Use a chunked sending to avoid message size limits. See b/62115660.\n",
       "    let position = 0;\n",
       "    do {\n",
       "      const length = Math.min(fileData.byteLength - position, MAX_PAYLOAD_SIZE);\n",
       "      const chunk = new Uint8Array(fileData, position, length);\n",
       "      position += length;\n",
       "\n",
       "      const base64 = btoa(String.fromCharCode.apply(null, chunk));\n",
       "      yield {\n",
       "        response: {\n",
       "          action: 'append',\n",
       "          file: file.name,\n",
       "          data: base64,\n",
       "        },\n",
       "      };\n",
       "\n",
       "      let percentDone = fileData.byteLength === 0 ?\n",
       "          100 :\n",
       "          Math.round((position / fileData.byteLength) * 100);\n",
       "      percent.textContent = `${percentDone}% done`;\n",
       "\n",
       "    } while (position < fileData.byteLength);\n",
       "  }\n",
       "\n",
       "  // All done.\n",
       "  yield {\n",
       "    response: {\n",
       "      action: 'complete',\n",
       "    }\n",
       "  };\n",
       "}\n",
       "\n",
       "scope.google = scope.google || {};\n",
       "scope.google.colab = scope.google.colab || {};\n",
       "scope.google.colab._files = {\n",
       "  _uploadFiles,\n",
       "  _uploadFilesContinue,\n",
       "};\n",
       "})(self);\n",
       "</script> "
      ],
      "text/plain": [
       "<IPython.core.display.HTML object>"
      ]
     },
     "metadata": {},
     "output_type": "display_data"
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Saving customers-100.csv to customers-100.csv\n",
      "\n",
      "📄 Processing: customers-100.csv\n",
      "✅ Successfully processed customers-100.csv\n",
      "   - Format: csv\n",
      "   - Text length: 27875 characters\n",
      "\n",
      "�� Processing Summary:\n",
      "- Total documents processed: 1\n",
      "\n",
      "�� Chunking documents for vector storage...\n",
      "- Total chunks created: 23\n",
      "\n",
      "🗄️ Adding documents to vector database...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/root/.cache/chroma/onnx_models/all-MiniLM-L6-v2/onnx.tar.gz: 100%|██████████| 79.3M/79.3M [00:01<00:00, 70.7MiB/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "✅ Successfully added 23 chunks to vector database\n",
      "📈 Vector database now contains 23 total chunks\n",
      "\n",
      "🤔 Interactive Q&A Session\n",
      "Ask questions about your documents (type 'quit' to exit):\n",
      "\n",
      "❓ Your question: what is that\n",
      "🔍 Searching for relevant information...\n",
      "📚 Found 3 relevant chunks:\n",
      "\n",
      "--- Chunk 1 ---\n",
      "Source: /tmp/tmp7yzm_3y4.csv\n",
      "Format: csv\n",
      "Content: ank com 2020-03-26 https://www pugh com/ 92 98b3aeDcC3B9FF3 Shane Foley Rocha-Hart South Dannymouth Hungary +1-822-569-0302 001-626-114-5844x55073 nsteele@sparks com 2021-07-06 https://www holt-sparks...\n",
      "\n",
      "--- Chunk 2 ---\n",
      "Source: /tmp/tmp7yzm_3y4.csv\n",
      "Format: csv\n",
      "Content: Robersonstad Cyprus 854-138-4911x5772 +1-448-910-2276x729 mariokhan@ryan-pope org 2020-01-13 https://www bullock net/ 10 8C2811a503C7c5a Michelle Gallagher Beck-Hendrix Elaineberg Timor-Leste 739 218 ...\n",
      "\n",
      "--- Chunk 3 ---\n",
      "Source: /tmp/tmp7yzm_3y4.csv\n",
      "Format: csv\n",
      "Content: x58692 001-841-293-3519x614 hhart@jensen com 2022-01-30 http://hayes-perez com/ 97 CeD220bdAaCfaDf Lynn Atkinson Ware, Burns and Oneal New Bradview Sri Lanka +1-846-706-2218 605 413 3198 vkemp@ferrell...\n",
      "\n",
      "💡 AI Answer (using local LLM):\n",
      "Based on the document content, here's what I found...\n",
      "(This would be generated by the local LLM model)\n",
      "\n",
      "❓ Your question: how much total text length\n",
      "🔍 Searching for relevant information...\n",
      "📚 Found 3 relevant chunks:\n",
      "\n",
      "--- Chunk 1 ---\n",
      "Source: /tmp/tmp7yzm_3y4.csv\n",
      "Format: csv\n",
      "Content: 2-13 https://gillespie-holder com/ 44 D3fC11A9C235Dc6 Luis Greer Cross PLC North Drew Bulgaria 001-336-025-6849x701 684 698 2911x6092 bstuart@williamson-mcclure com 2022-05-15 https://fletcher-nielsen...\n",
      "\n",
      "--- Chunk 2 ---\n",
      "Source: /tmp/tmp7yzm_3y4.csv\n",
      "Format: csv\n",
      "Content: 3x7288 (203)287-1003x5932 kmercer@wagner com 2020-07-21 http://murray org/ 88 E4Bbcd8AD81fC5f Alison Vargas Vaughn, Watts and Leach East Cristinabury Benin 365-273-8144 053-308-7653x6287 vcantu@norton...\n",
      "\n",
      "--- Chunk 3 ---\n",
      "Source: /tmp/tmp7yzm_3y4.csv\n",
      "Format: csv\n",
      "Content: rt com 2021-03-10 http://www benson-roth com/ 18 F8Aa9d6DfcBeeF8 Greg Mata Valentine LLC Lake Leslie Mozambique (701)087-2415 (195)156-1861x26241 jaredjuarez@carroll org 2022-03-26 http://pitts-cherry...\n",
      "\n",
      "💡 AI Answer (using local LLM):\n",
      "Based on the document content, here's what I found...\n",
      "(This would be generated by the local LLM model)\n",
      "\n",
      "❓ Your question: quit\n",
      "\n",
      "👋 Q&A session ended.\n",
      "\n",
      "🧪 Sample Document for Testing\n",
      "Creating sample document...\n",
      "✅ Sample document created!\n",
      "📄 Sample document content:\n",
      "\n",
      "# Sample Document\n",
      "\n",
      "This is a sample document for testing the Local RAG Document Agent.\n",
      "\n",
      "## Introduction\n",
      "The Local RAG Document Agent is designed to process and analyze documents using local LLM model...\n",
      "\n",
      "==================================================\n",
      "�� Powered by Local RAG Document Agent | Built with PraisonAI\n"
     ]
    }
   ],
   "source": [
    "# Main Application (Google Colab Version)\n",
    "import streamlit as st\n",
    "import pandas as pd\n",
    "import tempfile\n",
    "import os\n",
    "from typing import Dict, Any, List\n",
    "from google.colab import files\n",
    "import io\n",
    "\n",
    "# Initialize tools\n",
    "doc_tool = DocumentProcessingTool()\n",
    "vector_tool = VectorDatabaseTool()\n",
    "chunk_tool = TextChunkingTool()\n",
    "\n",
    "print(\"�� Local RAG Document Agent\")\n",
    "print(\"Document-based Q&A powered by local LLM and vector search!\")\n",
    "\n",
    "# Document upload section for Google Colab\n",
    "print(\"\\n📁 Upload Your Documents\")\n",
    "print(\"Please upload PDF, TXT, MD, or CSV files:\")\n",
    "\n",
    "uploaded = files.upload()\n",
    "\n",
    "if uploaded:\n",
    "    # Process each uploaded file\n",
    "    processed_docs = []\n",
    "\n",
    "    for file_name, file_content in uploaded.items():\n",
    "        print(f\"\\n📄 Processing: {file_name}\")\n",
    "\n",
    "        # Save file temporarily\n",
    "        with tempfile.NamedTemporaryFile(delete=False, suffix=os.path.splitext(file_name)[1]) as temp_file:\n",
    "            temp_file.write(file_content)\n",
    "            temp_path = temp_file.name\n",
    "\n",
    "        # Process document\n",
    "        doc_result = doc_tool.process_document(temp_path)\n",
    "\n",
    "        if \"error\" not in doc_result:\n",
    "            processed_docs.append(doc_result)\n",
    "            print(f\"✅ Successfully processed {file_name}\")\n",
    "            print(f\"   - Format: {doc_result.get('format', 'unknown')}\")\n",
    "            print(f\"   - Text length: {len(doc_result['text'])} characters\")\n",
    "\n",
    "            if 'pages' in doc_result:\n",
    "                print(f\"   - Pages: {doc_result['pages']}\")\n",
    "        else:\n",
    "            print(f\"❌ Error processing {file_name}: {doc_result['error']}\")\n",
    "\n",
    "        # Clean up temp file\n",
    "        os.unlink(temp_path)\n",
    "\n",
    "    if processed_docs:\n",
    "        print(f\"\\n�� Processing Summary:\")\n",
    "        print(f\"- Total documents processed: {len(processed_docs)}\")\n",
    "\n",
    "        # Chunk documents\n",
    "        print(\"\\n�� Chunking documents for vector storage...\")\n",
    "        all_chunks = []\n",
    "        chunk_metadata = []\n",
    "\n",
    "        for i, doc in enumerate(processed_docs):\n",
    "            chunks = chunk_tool.chunk_text(doc['text'])\n",
    "            all_chunks.extend(chunks)\n",
    "\n",
    "            # Create metadata for each chunk\n",
    "            for j, chunk in enumerate(chunks):\n",
    "                chunk_metadata.append({\n",
    "                    \"source\": doc.get('file_path', f'document_{i}'),\n",
    "                    \"format\": doc.get('format', 'unknown'),\n",
    "                    \"chunk_index\": j,\n",
    "                    \"total_chunks\": len(chunks)\n",
    "                })\n",
    "\n",
    "        print(f\"- Total chunks created: {len(all_chunks)}\")\n",
    "\n",
    "        # Add to vector database\n",
    "        print(\"\\n🗄️ Adding documents to vector database...\")\n",
    "        vector_result = vector_tool.add_documents(all_chunks, chunk_metadata)\n",
    "\n",
    "        if \"error\" not in vector_result:\n",
    "            print(f\"✅ Successfully added {vector_result['documents_added']} chunks to vector database\")\n",
    "\n",
    "            # Get collection info\n",
    "            collection_info = vector_tool.get_collection_info()\n",
    "            if \"error\" not in collection_info:\n",
    "                print(f\"📈 Vector database now contains {collection_info['document_count']} total chunks\")\n",
    "\n",
    "            # Interactive Q&A\n",
    "            print(\"\\n🤔 Interactive Q&A Session\")\n",
    "            print(\"Ask questions about your documents (type 'quit' to exit):\")\n",
    "\n",
    "            while True:\n",
    "                question = input(\"\\n❓ Your question: \")\n",
    "\n",
    "                if question.lower() in ['quit', 'exit', 'q']:\n",
    "                    break\n",
    "\n",
    "                if question.strip():\n",
    "                    print(\"🔍 Searching for relevant information...\")\n",
    "\n",
    "                    # Search vector database\n",
    "                    search_result = vector_tool.search_documents(question, n_results=3)\n",
    "\n",
    "                    if \"error\" not in search_result and search_result['num_results'] > 0:\n",
    "                        print(f\"📚 Found {search_result['num_results']} relevant chunks:\")\n",
    "\n",
    "                        for i, (doc, metadata) in enumerate(zip(\n",
    "                            search_result['results']['documents'][0],\n",
    "                            search_result['results']['metadatas'][0]\n",
    "                        )):\n",
    "                            print(f\"\\n--- Chunk {i+1} ---\")\n",
    "                            print(f\"Source: {metadata.get('source', 'Unknown')}\")\n",
    "                            print(f\"Format: {metadata.get('format', 'Unknown')}\")\n",
    "                            print(f\"Content: {doc[:200]}...\")\n",
    "\n",
    "                        # Here you would integrate with local LLM for answer generation\n",
    "                        print(f\"\\n💡 AI Answer (using local LLM):\")\n",
    "                        print(\"Based on the document content, here's what I found...\")\n",
    "                        print(\"(This would be generated by the local LLM model)\")\n",
    "\n",
    "                    else:\n",
    "                        print(\"❌ No relevant information found in the documents.\")\n",
    "                        print(\"Try rephrasing your question or check if the documents contain the information you're looking for.\")\n",
    "                else:\n",
    "                    print(\"Please enter a question.\")\n",
    "\n",
    "            print(\"\\n👋 Q&A session ended.\")\n",
    "\n",
    "        else:\n",
    "            print(f\"❌ Error adding to vector database: {vector_result['error']}\")\n",
    "\n",
    "    else:\n",
    "        print(\"❌ No documents were successfully processed.\")\n",
    "\n",
    "else:\n",
    "    print(\"❌ No files uploaded. Please upload documents to get started.\")\n",
    "\n",
    "# Sample document section\n",
    "print(\"\\n🧪 Sample Document for Testing\")\n",
    "print(\"Creating sample document...\")\n",
    "\n",
    "# Create sample document\n",
    "sample_text = \"\"\"\n",
    "# Sample Document\n",
    "\n",
    "This is a sample document for testing the Local RAG Document Agent.\n",
    "\n",
    "## Introduction\n",
    "The Local RAG Document Agent is designed to process and analyze documents using local LLM models and vector search capabilities.\n",
    "\n",
    "## Features\n",
    "- Document processing for multiple formats (PDF, TXT, MD, CSV)\n",
    "- Vector database storage and retrieval\n",
    "- Local LLM inference without external API calls\n",
    "- Intelligent Q&A with source attribution\n",
    "\n",
    "## Technical Details\n",
    "The agent uses ChromaDB for vector storage and Ollama for local LLM inference.\n",
    "It can handle various document formats and provides context-aware responses.\n",
    "\n",
    "## Usage\n",
    "1. Upload your documents\n",
    "2. Ask questions about the content\n",
    "3. Get AI-powered answers with source references\n",
    "\"\"\"\n",
    "\n",
    "# Save sample document\n",
    "with tempfile.NamedTemporaryFile(mode='w', delete=False, suffix='.md') as temp_file:\n",
    "    temp_file.write(sample_text)\n",
    "    sample_path = temp_file.name\n",
    "\n",
    "print(\"✅ Sample document created!\")\n",
    "print(\"📄 Sample document content:\")\n",
    "print(sample_text[:200] + \"...\")\n",
    "\n",
    "# Footer\n",
    "print(\"\\n\" + \"=\"*50)\n",
    "print(\"�� Powered by Local RAG Document Agent | Built with PraisonAI\")"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "provenance": []
  },
  "kernelspec": {
   "display_name": "Python 3",
   "name": "python3"
  },
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 0
}
