{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Implmenting Chunking From Scratch"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Loaded text with 75014 characters\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from typing import List, Callable, Optional\n",
    "from abc import ABC, abstractmethod\n",
    "import re\n",
    "import tiktoken\n",
    "\n",
    "# Load the text file\n",
    "with open('../data/paul_graham_essay.txt', 'r') as file:\n",
    "    text = file.read()\n",
    "\n",
    "print(f\"Loaded text with {len(text)} characters\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "class BaseChunker(ABC):\n",
    "    @abstractmethod\n",
    "    def split_text(self, text: str) -> List[str]:\n",
    "        pass\n",
    "    \n",
    "class TextSplitter(BaseChunker, ABC):\n",
    "    def __init__(\n",
    "        self,\n",
    "        chunk_size: int = 4000,\n",
    "        chunk_overlap: int = 200,\n",
    "        length_function: Callable[[str], int] = len,\n",
    "    ) -> None:\n",
    "        self._chunk_size = chunk_size\n",
    "        self._chunk_overlap = chunk_overlap\n",
    "        self._length_function = length_function\n",
    "\n",
    "    def _merge_splits(self, splits: List[str], separator: str) -> List[str]:\n",
    "        docs = []\n",
    "        current_doc = []\n",
    "        total = 0\n",
    "        for d in splits:\n",
    "            _len = self._length_function(d)\n",
    "            if total + _len > self._chunk_size:\n",
    "                if total > self._chunk_size:\n",
    "                    print(f\"Created a chunk of size {total}, which is longer than the specified {self._chunk_size}\")\n",
    "                if current_doc:\n",
    "                    doc = self._join_docs(current_doc, separator)\n",
    "                    if doc is not None:\n",
    "                        docs.append(doc)\n",
    "                    # Keep on popping if:\n",
    "                    # - we have a larger chunk than in the chunk overlap\n",
    "                    # - or if we still have any chunks and the length is long\n",
    "                    while total > self._chunk_overlap or (\n",
    "                        total + _len > self._chunk_size and total > 0\n",
    "                    ):\n",
    "                        total -= self._length_function(current_doc[0])\n",
    "                        current_doc = current_doc[1:]\n",
    "            current_doc.append(d)\n",
    "            total += _len\n",
    "        doc = self._join_docs(current_doc, separator)\n",
    "        if doc is not None:\n",
    "            docs.append(doc)\n",
    "        return docs\n",
    "\n",
    "    def _join_docs(self, docs: List[str], separator: str) -> Optional[str]:\n",
    "        text = separator.join(docs)\n",
    "        text = text.strip()\n",
    "        if text == \"\":\n",
    "            return None\n",
    "        else:\n",
    "            return text"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Fixed Token Chunker"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "FixedTokenChunker: 166 chunks\n",
      "\n",
      "First 5 chunks:\n",
      "\n",
      "Chunk 1:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      "\n",
      "\n",
      "What I Worked On\n",
      "\n",
      "February 2021\n",
      "\n",
      "Before college the two main things I worked on, outside of school...\n",
      "\n",
      "Chunk 2:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " was then called \"data processing.\" This was in 9th grade, so I was 13 or 14. The school district's ...\n",
      "\n",
      "Chunk 3:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " we used was an early version of Fortran. You had to type programs on punch cards, then stack them i...\n",
      "\n",
      "Chunk 4:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " punched cards, and I didn't have any data stored on punched cards. The only other option was to do ...\n",
      "\n",
      "Chunk 5:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      ". On a machine without time-sharing, this was a social as well as a technical error, as the data cen...\n"
     ]
    }
   ],
   "source": [
    "class FixedTokenChunker(TextSplitter):\n",
    "    def __init__(self, chunk_size: int = 100, chunk_overlap: int = 0):\n",
    "        super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n",
    "        self._tokenizer = tiktoken.get_encoding(\"cl100k_base\")\n",
    "\n",
    "    def split_text(self, text: str) -> List[str]:\n",
    "        tokens = self._tokenizer.encode(text)\n",
    "        chunks = []\n",
    "        for i in range(0, len(tokens), self._chunk_size - self._chunk_overlap):\n",
    "            chunk = self._tokenizer.decode(tokens[i:i + self._chunk_size])\n",
    "            chunks.append(chunk)\n",
    "        return chunks\n",
    "\n",
    "# Use FixedTokenChunker\n",
    "fixed_chunker = FixedTokenChunker(chunk_size=100, chunk_overlap=0)\n",
    "fixed_chunks = fixed_chunker.split_text(text)\n",
    "\n",
    "print(f\"FixedTokenChunker: {len(fixed_chunks)} chunks\")\n",
    "\n",
    "print(\"\\nFirst 5 chunks:\")\n",
    "for i, chunk in enumerate(fixed_chunks[:5], 1):\n",
    "    print(f\"\\nChunk {i}:\\n\",\"-\"*100)\n",
    "    print(chunk[:100] + \"...\" if len(chunk) > 100 else chunk)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### Recursive Token Chunking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "RecursiveTokenChunker: 1274 chunks\n",
      "\n",
      "First 5 chunks:\n",
      "\n",
      "Chunk 1:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      "\n",
      "\n",
      "What I Worked On\n",
      "\n",
      "February 2021\n",
      "\n",
      "Before college the two main things I worked on, outside of school...\n",
      "\n",
      "Chunk 2:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " was then called \"data processing.\" This was in 9th grade, so I was 13 or 14. The school district's ...\n",
      "\n",
      "Chunk 3:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " we used was an early version of Fortran. You had to type programs on punch cards, then stack them i...\n",
      "\n",
      "Chunk 4:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " punched cards, and I didn't have any data stored on punched cards. The only other option was to do ...\n",
      "\n",
      "Chunk 5:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      ". On a machine without time-sharing, this was a social as well as a technical error, as the data cen...\n"
     ]
    }
   ],
   "source": [
    "class RecursiveTokenChunker(TextSplitter):\n",
    "    def __init__(\n",
    "        self,\n",
    "        chunk_size: int = 100,\n",
    "        chunk_overlap: int = 0,\n",
    "        separators: Optional[List[str]] = None,\n",
    "    ):\n",
    "        super().__init__(chunk_size=chunk_size, chunk_overlap=chunk_overlap)\n",
    "        self._separators = separators or [\"\\n\\n\", \"\\n\", \".\", \"?\", \"!\", \" \", \"\"]\n",
    "\n",
    "    def split_text(self, text: str) -> List[str]:\n",
    "        return self._split_text(text, self._separators)\n",
    "\n",
    "    def _split_text(self, text: str, separators: List[str]) -> List[str]:\n",
    "        final_chunks = []\n",
    "        separator = separators[-1]\n",
    "        new_separators = []\n",
    "        for i, _s in enumerate(separators):\n",
    "            if _s == \"\":\n",
    "                separator = _s\n",
    "                break\n",
    "            if re.search(re.escape(_s), text):\n",
    "                separator = _s\n",
    "                new_separators = separators[i + 1:]\n",
    "                break\n",
    "\n",
    "        splits = re.split(f\"({re.escape(separator)})\", text)\n",
    "        splits = [s for s in splits if s != \"\"]\n",
    "\n",
    "        _good_splits = []\n",
    "        for s in splits:\n",
    "            if self._length_function(s) < self._chunk_size:\n",
    "                _good_splits.append(s)\n",
    "            else:\n",
    "                if _good_splits:\n",
    "                    merged_text = self._merge_splits(_good_splits, \"\")\n",
    "                    final_chunks.extend(merged_text)\n",
    "                    _good_splits = []\n",
    "                if not new_separators:\n",
    "                    final_chunks.append(s)\n",
    "                else:\n",
    "                    other_info = self._split_text(s, new_separators)\n",
    "                    final_chunks.extend(other_info)\n",
    "        if _good_splits:\n",
    "            merged_text = self._merge_splits(_good_splits, \"\")\n",
    "            final_chunks.extend(merged_text)\n",
    "        return final_chunks\n",
    "\n",
    "# Use RecursiveTokenChunker\n",
    "recursive_chunker = RecursiveTokenChunker(chunk_size=100, chunk_overlap=0)\n",
    "recursive_chunks = recursive_chunker.split_text(text)\n",
    "\n",
    "print(f\"RecursiveTokenChunker: {len(recursive_chunks)} chunks\")\n",
    "print(\"\\nFirst 5 chunks:\")\n",
    "for i, chunk in enumerate(fixed_chunks[:5], 1):\n",
    "    print(f\"\\nChunk {i}:\\n\",\"-\"*100)\n",
    "    print(chunk[:100] + \"...\" if len(chunk) > 100 else chunk)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "!pip install openai \n",
    "!pip install anthropic\n",
    "!pip install backoff"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import openai\n",
    "from getpass import getpass\n",
    "\n",
    "if not (openai_api_key := os.getenv(\"OPENAI_API_KEY\")):\n",
    "    openai_api_key = getpass(\"🔑 Enter your OpenAI API key: \")\n",
    "openai.api_key = openai_api_key\n",
    "os.environ[\"OPENAI_API_KEY\"] = openai_api_key"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Cluster Semantic Chunking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "ClusterSemanticChunker: 680 chunks\n",
      "\n",
      "First 5 chunks:\n",
      "\n",
      "Chunk 1:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      "\n",
      "\n",
      "What I Worked On\n",
      "\n",
      "February 2021\n",
      "\n",
      "Before college the two main things I worked on, outside of school...\n",
      "\n",
      "Chunk 2:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " was then called \"data processing.\" This was in 9th grade, so I was 13 or 14. The school district's ...\n",
      "\n",
      "Chunk 3:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " we used was an early version of Fortran. You had to type programs on punch cards, then stack them i...\n",
      "\n",
      "Chunk 4:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " punched cards, and I didn't have any data stored on punched cards. The only other option was to do ...\n",
      "\n",
      "Chunk 5:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      ". On a machine without time-sharing, this was a social as well as a technical error, as the data cen...\n"
     ]
    }
   ],
   "source": [
    "import numpy as np\n",
    "from openai import OpenAI\n",
    "\n",
    "def get_openai_embedding_function(api_key):\n",
    "    client = OpenAI(api_key=api_key)\n",
    "    def embedding_function(texts):\n",
    "        response = client.embeddings.create(input=texts, model=\"text-embedding-3-small\")\n",
    "        return [item.embedding for item in response.data]\n",
    "    return embedding_function\n",
    "\n",
    "class ClusterSemanticChunker(BaseChunker):\n",
    "    def __init__(self, embedding_function, max_chunk_size=400, min_chunk_size=50):\n",
    "        self.splitter = RecursiveTokenChunker(chunk_size=min_chunk_size, chunk_overlap=0)\n",
    "        self.max_cluster = max_chunk_size // min_chunk_size\n",
    "        self.embedding_function = embedding_function\n",
    "        \n",
    "    def _get_similarity_matrix(self, sentences):\n",
    "        BATCH_SIZE = 500\n",
    "        N = len(sentences)\n",
    "        embedding_matrix = None\n",
    "        for i in range(0, N, BATCH_SIZE):\n",
    "            batch_sentences = sentences[i:i+BATCH_SIZE]\n",
    "            embeddings = self.embedding_function(batch_sentences)\n",
    "            batch_embedding_matrix = np.array(embeddings)\n",
    "            if embedding_matrix is None:\n",
    "                embedding_matrix = batch_embedding_matrix\n",
    "            else:\n",
    "                embedding_matrix = np.concatenate((embedding_matrix, batch_embedding_matrix), axis=0)\n",
    "        similarity_matrix = np.dot(embedding_matrix, embedding_matrix.T)\n",
    "        return similarity_matrix\n",
    "\n",
    "    def _calculate_reward(self, matrix, start, end):\n",
    "        sub_matrix = matrix[start:end+1, start:end+1]\n",
    "        return np.sum(sub_matrix)\n",
    "\n",
    "    def _optimal_segmentation(self, matrix, max_cluster_size):\n",
    "        mean_value = np.mean(matrix[np.triu_indices(matrix.shape[0], k=1)])\n",
    "        matrix = matrix - mean_value\n",
    "        np.fill_diagonal(matrix, 0)\n",
    "        n = matrix.shape[0]\n",
    "        dp = np.zeros(n)\n",
    "        segmentation = np.zeros(n, dtype=int)\n",
    "        for i in range(n):\n",
    "            for size in range(1, max_cluster_size + 1):\n",
    "                if i - size + 1 >= 0:\n",
    "                    reward = self._calculate_reward(matrix, i - size + 1, i)\n",
    "                    adjusted_reward = reward\n",
    "                    if i - size >= 0:\n",
    "                        adjusted_reward += dp[i - size]\n",
    "                    if adjusted_reward > dp[i]:\n",
    "                        dp[i] = adjusted_reward\n",
    "                        segmentation[i] = i - size + 1\n",
    "        clusters = []\n",
    "        i = n - 1\n",
    "        while i >= 0:\n",
    "            start = segmentation[i]\n",
    "            clusters.append((start, i))\n",
    "            i = start - 1\n",
    "        clusters.reverse()\n",
    "        return clusters\n",
    "        \n",
    "    def split_text(self, text: str) -> List[str]:\n",
    "        sentences = self.splitter.split_text(text)\n",
    "        similarity_matrix = self._get_similarity_matrix(sentences)\n",
    "        clusters = self._optimal_segmentation(similarity_matrix, max_cluster_size=self.max_cluster)\n",
    "        docs = [' '.join(sentences[start:end+1]) for start, end in clusters]\n",
    "        return docs\n",
    "\n",
    "# Use ClusterSemanticChunker\n",
    "# api_key = \"your_openai_api_key_here\"  # Replace with your actual OpenAI API key\n",
    "embedding_function = get_openai_embedding_function(api_key = openai.api_key)\n",
    "cluster_chunker = ClusterSemanticChunker(embedding_function=embedding_function, max_chunk_size=400, min_chunk_size=50)\n",
    "cluster_chunks = cluster_chunker.split_text(text)\n",
    "\n",
    "print(f\"ClusterSemanticChunker: {len(cluster_chunks)} chunks\")\n",
    "print(\"\\nFirst 5 chunks:\")\n",
    "for i, chunk in enumerate(fixed_chunks[:5], 1):\n",
    "    print(f\"\\nChunk {i}:\\n\",\"-\"*100)\n",
    "    print(chunk[:100] + \"...\" if len(chunk) > 100 else chunk)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## LLM Semantic Chunking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Processing chunks: 100%|█████████▉| 2427/2428 [01:22<00:00, 29.49it/s]"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "LLMSemanticChunker: 355 chunks\n",
      "\n",
      "First 5 chunks:\n",
      "\n",
      "Chunk 1:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      "\n",
      "\n",
      "What I Worked On\n",
      "\n",
      "February 2021\n",
      "\n",
      "Before college the two main things I worked on, outside of school...\n",
      "\n",
      "Chunk 2:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " was then called \"data processing.\" This was in 9th grade, so I was 13 or 14. The school district's ...\n",
      "\n",
      "Chunk 3:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " we used was an early version of Fortran. You had to type programs on punch cards, then stack them i...\n",
      "\n",
      "Chunk 4:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      " punched cards, and I didn't have any data stored on punched cards. The only other option was to do ...\n",
      "\n",
      "Chunk 5:\n",
      " ----------------------------------------------------------------------------------------------------\n",
      ". On a machine without time-sharing, this was a social as well as a technical error, as the data cen...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "\n"
     ]
    }
   ],
   "source": [
    "import anthropic\n",
    "import backoff\n",
    "from tqdm import tqdm\n",
    "\n",
    "class AnthropicClient:\n",
    "    def __init__(self, model_name, api_key):\n",
    "        self.client = anthropic.Anthropic(api_key=api_key)\n",
    "        self.model_name = model_name\n",
    "\n",
    "    @backoff.on_exception(backoff.expo, Exception, max_tries=3)\n",
    "    def create_message(self, system_prompt, messages, max_tokens=1000, temperature=1.0):\n",
    "        try:\n",
    "            message = self.client.messages.create(\n",
    "                model=self.model_name,\n",
    "                max_tokens=max_tokens,\n",
    "                temperature=temperature,\n",
    "                system=system_prompt,\n",
    "                messages=messages\n",
    "            )\n",
    "            return message.content[0].text\n",
    "        except Exception as e:\n",
    "            print(f\"Error occurred: {e}, retrying...\")\n",
    "            raise e\n",
    "\n",
    "class OpenAIClient:\n",
    "    def __init__(self, model_name, api_key):\n",
    "        self.client = OpenAI(api_key=api_key)\n",
    "        self.model_name = model_name\n",
    "\n",
    "    @backoff.on_exception(backoff.expo, Exception, max_tries=3)\n",
    "    def create_message(self, system_prompt, messages, max_tokens=1000, temperature=1.0):\n",
    "        try:\n",
    "            gpt_messages = [\n",
    "                {\"role\": \"system\", \"content\": system_prompt}\n",
    "            ] + messages\n",
    "\n",
    "            completion = self.client.chat.completions.create(\n",
    "                model=self.model_name,\n",
    "                max_tokens=max_tokens,\n",
    "                messages=gpt_messages,\n",
    "                temperature=temperature\n",
    "            )\n",
    "\n",
    "            return completion.choices[0].message.content\n",
    "        except Exception as e:\n",
    "            print(f\"Error occurred: {e}, retrying...\")\n",
    "            raise e\n",
    "\n",
    "class LLMSemanticChunker(BaseChunker):\n",
    "    def __init__(self, organisation=\"openai\", api_key=None, model_name=None):\n",
    "        if organisation == \"openai\":\n",
    "            if model_name is None:\n",
    "                model_name = \"gpt-4\"\n",
    "            self.client = OpenAIClient(model_name, api_key=api_key)\n",
    "        elif organisation == \"anthropic\":\n",
    "            if model_name is None:\n",
    "                model_name = \"claude-3-opus-20240229\"\n",
    "            self.client = AnthropicClient(model_name, api_key=api_key)\n",
    "        else:\n",
    "            raise ValueError(\"Invalid organisation. Please choose either 'openai' or 'anthropic'.\")\n",
    "\n",
    "        self.splitter = RecursiveTokenChunker(chunk_size=50, chunk_overlap=0)\n",
    "\n",
    "    def get_prompt(self, chunked_input, current_chunk=0, invalid_response=None):\n",
    "        messages = [\n",
    "            {\n",
    "                \"role\": \"system\", \n",
    "                \"content\": (\n",
    "                    \"You are an assistant specialized in splitting text into thematically consistent sections. \"\n",
    "                    \"The text has been divided into chunks, each marked with <|start_chunk_X|> and <|end_chunk_X|> tags, where X is the chunk number. \"\n",
    "                    \"Your task is to identify the points where splits should occur, such that consecutive chunks of similar themes stay together. \"\n",
    "                    \"Respond with a list of chunk IDs where you believe a split should be made. For example, if chunks 1 and 2 belong together but chunk 3 starts a new topic, you would suggest a split after chunk 2. THE CHUNKS MUST BE IN ASCENDING ORDER.\"\n",
    "                    \"Your response should be in the form: 'split_after: 3, 5'.\"\n",
    "                )\n",
    "            },\n",
    "            {\n",
    "                \"role\": \"user\", \n",
    "                \"content\": (\n",
    "                    \"CHUNKED_TEXT: \" + chunked_input + \"\\n\\n\"\n",
    "                    \"Respond only with the IDs of the chunks where you believe a split should occur. YOU MUST RESPOND WITH AT LEAST ONE SPLIT. THESE SPLITS MUST BE IN ASCENDING ORDER AND EQUAL OR LARGER THAN: \" + str(current_chunk)+\".\" + (f\"\\n\\The previous response of {invalid_response} was invalid. DO NOT REPEAT THIS ARRAY OF NUMBERS. Please try again.\" if invalid_response else \"\")\n",
    "                )\n",
    "            },\n",
    "        ]\n",
    "        return messages\n",
    "\n",
    "    def split_text(self, text):\n",
    "        import re\n",
    "\n",
    "        chunks = self.splitter.split_text(text)\n",
    "\n",
    "        split_indices = []\n",
    "        current_chunk = 0\n",
    "\n",
    "        with tqdm(total=len(chunks), desc=\"Processing chunks\") as pbar:\n",
    "            while True:\n",
    "                if current_chunk >= len(chunks) - 4:\n",
    "                    break\n",
    "\n",
    "                token_count = 0\n",
    "                chunked_input = ''\n",
    "\n",
    "                for i in range(current_chunk, len(chunks)):\n",
    "                    token_count += len(chunks[i].split())\n",
    "                    chunked_input += f\"<|start_chunk_{i+1}|>{chunks[i]}<|end_chunk_{i+1}|>\"\n",
    "                    if token_count > 800:\n",
    "                        break\n",
    "\n",
    "                messages = self.get_prompt(chunked_input, current_chunk)\n",
    "                while True:\n",
    "                    result_string = self.client.create_message(messages[0]['content'], messages[1:], max_tokens=200, temperature=0.2)\n",
    "                    split_after_line = [line for line in result_string.split('\\n') if 'split_after:' in line][0]\n",
    "                    numbers = re.findall(r'\\d+', split_after_line)\n",
    "                    numbers = list(map(int, numbers))\n",
    "\n",
    "                    if not (numbers != sorted(numbers) or any(number < current_chunk for number in numbers)):\n",
    "                        break\n",
    "                    else:\n",
    "                        messages = self.get_prompt(chunked_input, current_chunk, numbers)\n",
    "                        print(\"Response: \", result_string)\n",
    "                        print(\"Invalid response. Please try again.\")\n",
    "\n",
    "                split_indices.extend(numbers)\n",
    "                current_chunk = numbers[-1]\n",
    "                pbar.update(current_chunk - pbar.n)\n",
    "\n",
    "        chunks_to_split_after = [i - 1 for i in split_indices]\n",
    "\n",
    "        docs = []\n",
    "        current_chunk = ''\n",
    "        for i, chunk in enumerate(chunks):\n",
    "            current_chunk += chunk + ' '\n",
    "            if i in chunks_to_split_after:\n",
    "                docs.append(current_chunk.strip())\n",
    "                current_chunk = ''\n",
    "        if current_chunk:\n",
    "            docs.append(current_chunk.strip())\n",
    "\n",
    "        return docs\n",
    "\n",
    "# Use LLMSemanticChunker\n",
    "# api_key = \"your_openai_api_key_here\"  # Replace with your actual OpenAI API key\n",
    "api_key = openai.api_key\n",
    "llm_chunker = LLMSemanticChunker(organisation=\"openai\", api_key=api_key)\n",
    "llm_chunks = llm_chunker.split_text(text)\n",
    "\n",
    "print(f\"LLMSemanticChunker: {len(llm_chunks)} chunks\")\n",
    "print(\"\\nFirst 5 chunks:\")\n",
    "for i, chunk in enumerate(fixed_chunks[:5], 1):\n",
    "    print(f\"\\nChunk {i}:\\n\",\"-\"*100)\n",
    "    print(chunk[:100] + \"...\" if len(chunk) > 100 else chunk)\n",
    "    \n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 18,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Output saved to llm_semantic_chunker_output.json\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Kamradt Modified Chunker"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 19,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "KamradtModifiedChunker: 49 chunks\n",
      "First chunk: What I Worked On\n",
      "\n",
      "February 2021 Before college the two main things I worked on, outside of school, w...\n"
     ]
    }
   ],
   "source": [
    "class KamradtModifiedChunker(BaseChunker):\n",
    "    def __init__(self, avg_chunk_size=400, min_chunk_size=50, embedding_function=None):\n",
    "        self.splitter = RecursiveTokenChunker(\n",
    "            chunk_size=min_chunk_size,\n",
    "            chunk_overlap=0,\n",
    "        )\n",
    "        \n",
    "        self.avg_chunk_size = avg_chunk_size\n",
    "        if embedding_function is None:\n",
    "            embedding_function = get_openai_embedding_function(api_key)  # Use the same API key as before\n",
    "        self.embedding_function = embedding_function\n",
    "\n",
    "    def combine_sentences(self, sentences, buffer_size=1):\n",
    "        for i in range(len(sentences)):\n",
    "            combined_sentence = ''\n",
    "            for j in range(i - buffer_size, i + 1 + buffer_size):\n",
    "                if 0 <= j < len(sentences):\n",
    "                    combined_sentence += sentences[j]['sentence'] + ' '\n",
    "            sentences[i]['combined_sentence'] = combined_sentence.strip()\n",
    "        return sentences\n",
    "\n",
    "    def calculate_cosine_distances(self, sentences):\n",
    "        BATCH_SIZE = 500\n",
    "        distances = []\n",
    "        embedding_matrix = None\n",
    "        for i in range(0, len(sentences), BATCH_SIZE):\n",
    "            batch_sentences = sentences[i:i+BATCH_SIZE]\n",
    "            batch_sentences = [sentence['combined_sentence'] for sentence in batch_sentences]\n",
    "            embeddings = self.embedding_function(batch_sentences)\n",
    "            batch_embedding_matrix = np.array(embeddings)\n",
    "            if embedding_matrix is None:\n",
    "                embedding_matrix = batch_embedding_matrix\n",
    "            else:\n",
    "                embedding_matrix = np.concatenate((embedding_matrix, batch_embedding_matrix), axis=0)\n",
    "\n",
    "        norms = np.linalg.norm(embedding_matrix, axis=1, keepdims=True)\n",
    "        embedding_matrix = embedding_matrix / norms\n",
    "        similarity_matrix = np.dot(embedding_matrix, embedding_matrix.T)\n",
    "        \n",
    "        for i in range(len(sentences) - 1):\n",
    "            similarity = similarity_matrix[i, i + 1]\n",
    "            distance = 1 - similarity\n",
    "            distances.append(distance)\n",
    "            sentences[i]['distance_to_next'] = distance\n",
    "\n",
    "        return distances, sentences\n",
    "\n",
    "    def split_text(self, text):\n",
    "        sentences_strips = self.splitter.split_text(text)\n",
    "        sentences = [{'sentence': x, 'index': i} for i, x in enumerate(sentences_strips)]\n",
    "        sentences = self.combine_sentences(sentences, 3)\n",
    "        distances, sentences = self.calculate_cosine_distances(sentences)\n",
    "\n",
    "        total_tokens = sum(len(sentence['sentence'].split()) for sentence in sentences)\n",
    "        number_of_cuts = total_tokens // self.avg_chunk_size\n",
    "\n",
    "        lower_limit, upper_limit = 0.0, 1.0\n",
    "        distances_np = np.array(distances)\n",
    "\n",
    "        while upper_limit - lower_limit > 1e-6:\n",
    "            threshold = (upper_limit + lower_limit) / 2.0\n",
    "            num_points_above_threshold = np.sum(distances_np > threshold)\n",
    "            \n",
    "            if num_points_above_threshold > number_of_cuts:\n",
    "                lower_limit = threshold\n",
    "            else:\n",
    "                upper_limit = threshold\n",
    "\n",
    "        indices_above_thresh = [i for i, x in enumerate(distances) if x > threshold]\n",
    "        \n",
    "        chunks = []\n",
    "        start_index = 0\n",
    "        for index in indices_above_thresh:\n",
    "            group = sentences[start_index:index + 1]\n",
    "            combined_text = ' '.join([d['sentence'] for d in group])\n",
    "            chunks.append(combined_text)\n",
    "            start_index = index + 1\n",
    "\n",
    "        if start_index < len(sentences):\n",
    "            combined_text = ' '.join([d['sentence'] for d in sentences[start_index:]])\n",
    "            chunks.append(combined_text)\n",
    "\n",
    "        return chunks\n",
    "\n",
    "# Use KamradtModifiedChunker\n",
    "kamradt_chunker = KamradtModifiedChunker(avg_chunk_size=300, min_chunk_size=50)\n",
    "kamradt_chunks = kamradt_chunker.split_text(text)\n",
    "\n",
    "print(f\"KamradtModifiedChunker: {len(kamradt_chunks)} chunks\")\n",
    "print(\"First chunk:\", kamradt_chunks[0][:100] + \"...\" if len(kamradt_chunks[0]) > 100 else kamradt_chunks[0])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Output saved to llm_semantic_chunker_output.json\n"
     ]
    }
   ],
   "source": [
    "import json\n",
    "\n",
    "# After running the LLMSemanticChunker\n",
    "output = {\n",
    "    \"chunker\": \"LLMSemanticChunker\",\n",
    "    \"total_chunks\": len(llm_chunks),\n",
    "    \"first_5_chunks\": [\n",
    "        {\n",
    "            \"chunk_number\": i,\n",
    "            \"content\": chunk\n",
    "        }\n",
    "        for i, chunk in enumerate(kamradt_chunks, 1)\n",
    "    ]\n",
    "}\n",
    "\n",
    "# Save the output as a JSON file\n",
    "with open('llm_semantic_chunker_output.json', 'w') as f:\n",
    "    json.dump(output, f, indent=2)\n",
    "\n",
    "print(\"Output saved to llm_semantic_chunker_output.json\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## Llama Index Based Chunking"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Standard library imports\n",
    "import logging\n",
    "import sys\n",
    "import os\n",
    "\n",
    "# Third-party imports\n",
    "from dotenv import load_dotenv\n",
    "from IPython.display import Markdown, display\n",
    "\n",
    "# Qdrant client import\n",
    "import qdrant_client\n",
    "\n",
    "# LlamaIndex core imports\n",
    "from llama_index.core import VectorStoreIndex, SimpleDirectoryReader\n",
    "from llama_index.core import Settings\n",
    "\n",
    "# LlamaIndex vector store import\n",
    "from llama_index.vector_stores.qdrant import QdrantVectorStore\n",
    "\n",
    "# Embedding model imports\n",
    "from llama_index.embeddings.fastembed import FastEmbedEmbedding\n",
    "from llama_index.embeddings.openai import OpenAIEmbedding\n",
    "\n",
    "# LLM import\n",
    "from llama_index.llms.openai import OpenAI\n",
    "\n",
    "# Load environment variables\n",
    "load_dotenv()\n",
    "\n",
    "# Get OpenAI API key from environment variables\n",
    "OPENAI_API_KEY = os.getenv(\"OPENAI_API_KEY\")\n",
    "\n",
    "# Set the embedding model\n",
    "# Option 1: Use FastEmbed with BAAI/bge-base-en-v1.5 model (default)\n",
    "Settings.embed_model = FastEmbedEmbedding(model_name=\"BAAI/bge-base-en-v1.5\")\n",
    "\n",
    "# Option 2: Use OpenAI's embedding model (commented out)\n",
    "# If you want to use OpenAI's embedding model, uncomment the following line:\n",
    "# Settings.embed_model = OpenAIEmbedding(embed_batch_size=10, api_key=OPENAI_API_KEY)\n",
    "\n",
    "# Qdrant configuration (commented out)\n",
    "# If you're using Qdrant, uncomment and set these variables:\n",
    "# QDRANT_CLOUD_ENDPOINT = os.getenv(\"QDRANT_CLOUD_ENDPOINT\")\n",
    "# QDRANT_API_KEY = os.getenv(\"QDRANT_API_KEY\")\n",
    "\n",
    "# Note: Remember to add QDRANT_CLOUD_ENDPOINT and QDRANT_API_KEY to your .env file if using Qdrant Hosted version"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# lets loading the documents using SimpleDirectoryReader\n",
    "from llama_index.core import Document\n",
    "reader = SimpleDirectoryReader(\"../data/\" , recursive=True)\n",
    "documents = reader.load_data(show_progress=True)\n",
    "\n",
    "# combining all the documents into a single document for later chunking and splitting\n",
    "documents = Document(text=\"\\n\\n\".join([doc.text for doc in documents]))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "## ingesting data into vector database\n",
    "\n",
    "## lets set up an ingestion pipeline\n",
    "\n",
    "from llama_index.core.node_parser import TokenTextSplitter\n",
    "from llama_index.core.node_parser import SentenceSplitter\n",
    "from llama_index.core.node_parser import MarkdownNodeParser\n",
    "from llama_index.core.node_parser import SemanticSplitterNodeParser\n",
    "from llama_index.core.ingestion import IngestionPipeline\n",
    "\n",
    "pipeline = IngestionPipeline(\n",
    "    transformations=[\n",
    "        # MarkdownNodeParser(include_metadata=True),\n",
    "        # TokenTextSplitter(chunk_size=500, chunk_overlap=20),\n",
    "        SentenceSplitter(chunk_size=1024, chunk_overlap=20),\n",
    "        # SemanticSplitterNodeParser(buffer_size=1, breakpoint_percentile_threshold=95 , embed_model=Settings.embed_model),\n",
    "        Settings.embed_model,\n",
    "    ]\n",
    ")\n",
    "\n",
    "# Ingest directly into a vector db\n",
    "nodes = pipeline.run(documents=[documents] , show_progress=True)\n",
    "print(\"Number of chunks added to vector DB :\",len(nodes))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(nodes)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "**Reference:**\n",
    "\n",
    "Smith, Brandon, and Anton Troynikov. \"Evaluating Chunking Strategies for Retrieval.\" Chroma, July 2024. [https://research.trychroma.com/evaluating-chunking](https://research.trychroma.com/evaluating-chunking).\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "build-venv",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.9"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
