{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "-zE1h0uQV7uT"
},
"source": [
"# Install Packages and Setup Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "QPJzr-I9XQ7l",
"outputId": "440f5d93-1cac-4a70-e244-5e8af314464e"
},
"outputs": [],
"source": [
"!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-embeddings-cohere llama-index-readers-web cohere==4.47 tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"id": "riuXwpSPcvWC"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"# Set the \"OPENAI_API_KEY\" and \"CO_API_KEY\" (Cohere) in the Python environment.\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
"os.environ[\"CO_API_KEY\"] = \"\"\n",
"cohere_key = os.environ[\"CO_API_KEY\"]"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "jIEeZzqLbz0J"
},
"outputs": [],
"source": [
"# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
"\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Bkgi2OrYzF7q"
},
"source": [
"# Load a Model"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"id": "9oGT6crooSSj"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0BwVuJXlzHVL"
},
"source": [
"# Create a VectoreStore"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"id": "SQP87lHczHKc"
},
"outputs": [],
"source": [
"import chromadb\n",
"\n",
"# create client and a new collection\n",
"# chromadb.EphemeralClient saves data in-memory.\n",
"chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
"chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"id": "zAaGcYMJzHAN"
},
"outputs": [],
"source": [
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
"\n",
"# Define a storage context object using the created vector database.\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "I9JbAzFcjkpn"
},
"source": [
"# Load the Dataset (CSV)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ceveDuYdWCYk"
},
"source": [
"## Download"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "eZwf6pv7WFmD"
},
"source": [
"The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wl_pbPvMlv1h",
"outputId": "f844a7a8-484b-4693-8715-42506778b1de"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 169k 100 169k 0 0 768k 0 --:--:-- --:--:-- --:--:-- 770k\n"
]
}
],
"source": [
"!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "VWBLtDbUWJfA"
},
"source": [
"## Read File"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "0Q9sxuW0g3Gd",
"outputId": "473050f8-0640-4e7c-91e7-3ea3485cfb51"
},
"outputs": [
{
"data": {
"text/plain": [
"14"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import csv\n",
"\n",
"rows = []\n",
"\n",
"# Load the file as a JSON\n",
"with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
" csv_reader = csv.reader(file)\n",
"\n",
" for idx, row in enumerate( csv_reader ):\n",
" if idx == 0: continue; # Skip header row\n",
" rows.append( row )\n",
"\n",
"# The number of characters in the dataset.\n",
"len( rows )"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "S17g2RYOjmf2"
},
"source": [
"# Convert to Document obj"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"id": "YizvmXPejkJE"
},
"outputs": [],
"source": [
"from llama_index.core import Document\n",
"\n",
"# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
"documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "qjuLbmFuWsyl"
},
"source": [
"# Transforming"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"id": "9z3t70DGWsjO"
},
"outputs": [],
"source": [
"from llama_index.core.text_splitter import TokenTextSplitter\n",
"\n",
"# Define the splitter object that split the text into segments with 512 tokens,\n",
"# with a 128 overlap between the segments.\n",
"text_splitter = TokenTextSplitter(\n",
" separator=\" \", chunk_size=512, chunk_overlap=128\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 413,
"referenced_widgets": [
"4bb1e341a77d41c9aca0e6680911fb43",
"1d1faa15f5564b68b948eaffa58626b3",
"df22a67ae80b4673b708eea74646be61",
"3657dc19b6ac477b9f05bb6519271473",
"9045e402f0344428acc085d63df7ff03",
"f57a9ac0d924408fbaaac795c172862e",
"4cb8ba074b254e91b8877cc87ae0d279",
"cbd3e1411b2c4eeb943243c9d45245c4",
"04af736f84044e37aa6599aa708a77bc",
"8d35ab8c65ba47e1be446b98f0942ac4",
"75e40756175f463e874630f229ef4066",
"a0dd5f2c99b2407f9f5705587976ae76",
"8728ca516bd0474586b19e0c9b457499",
"aac433a9a64c48dfb18d7a01f64d3b27",
"4802a63f700e48fca16b5d89fbab333d",
"3f55aef52aee4e77864d53e3197c3cc3",
"f41df4b6ab4c4132b0d20232002f0294",
"3a621edd23354ea5924189885c97dee4",
"73d34cae940e4748a7b3127351925e65",
"2dc4a6c935ac4ef38ed9030608bd4b2f",
"4fcebf4a9ef54729889cc6ad4cbe5d10",
"195aa202b03a42a3a674e9da2f13d878"
]
},
"id": "P9LDJ7o-Wsc-",
"outputId": "72b67575-2d55-4145-90be-a367f128fa44"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Parsing nodes: 100%|██████████| 14/14 [00:00<00:00, 28.69it/s]\n",
"100%|██████████| 108/108 [01:02<00:00, 1.72it/s]\n",
"100%|██████████| 108/108 [01:09<00:00, 1.55it/s]\n",
"100%|██████████| 108/108 [01:24<00:00, 1.29it/s]\n",
"Generating embeddings: 100%|██████████| 108/108 [00:01<00:00, 56.53it/s]\n"
]
}
],
"source": [
"from llama_index.core.extractors import (\n",
" SummaryExtractor,\n",
" QuestionsAnsweredExtractor,\n",
" KeywordExtractor,\n",
")\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.core.ingestion import IngestionPipeline\n",
"\n",
"# Create the pipeline to apply the transformation on each chunk,\n",
"# and store the transformed text in the chroma vector store.\n",
"pipeline = IngestionPipeline(\n",
" transformations=[\n",
" text_splitter,\n",
" QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
" SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
" KeywordExtractor(keywords=10, llm=llm),\n",
" OpenAIEmbedding(),\n",
" ],\n",
" vector_store=vector_store\n",
")\n",
"\n",
"# Run the transformation pipeline.\n",
"nodes = pipeline.run(documents=documents, show_progress=True);"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "mPGa85hM2P3P",
"outputId": "4586ad85-71bd-4407-a584-326941a5f474"
},
"outputs": [
{
"data": {
"text/plain": [
"108"
]
},
"execution_count": 12,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len( nodes )"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "OeeG3jxT0taW",
"outputId": "8a2e3c63-c346-4034-8147-f2f1f996c326"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"updating: mini-llama-articles/ (stored 0%)\n",
"updating: mini-llama-articles/chroma.sqlite3 (deflated 65%)\n",
" adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/ (stored 0%)\n",
" adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/data_level0.bin (deflated 100%)\n",
" adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/length.bin (deflated 48%)\n",
" adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/link_lists.bin (stored 0%)\n",
" adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/header.bin (deflated 61%)\n"
]
}
],
"source": [
"# Compress the vector store directory to a zip file to be able to download and use later.\n",
"!zip -r vectorstore.zip mini-llama-articles"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "OWaT6rL7ksp8"
},
"source": [
"# Load Indexes"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "6fFGWiz3hoTd"
},
"source": [
"If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "XxPMJ4tq06qx",
"outputId": "8445e40a-b3c6-44ff-dfde-37cd4c73ffa2"
},
"outputs": [],
"source": [
"# !unzip vectorstore.zip"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"id": "mXi56KTXk2sp"
},
"outputs": [],
"source": [
"# Load the vector store from the local storage.\n",
"db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
"chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"id": "jKXURvLtkuTS"
},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex\n",
"\n",
"# Create the index based on the vector store.\n",
"index = VectorStoreIndex.from_vector_store(vector_store)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8JPD8yAinVSq"
},
"source": [
"# Query Dataset"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"id": "BsFfFpVgn01h"
},
"outputs": [],
"source": [
"from llama_index.postprocessor.cohere_rerank import CohereRerank\n",
"\n",
"# Define the Cohere Reranking object to return only the first two highest ranking chunks.\n",
"cohere_rerank = CohereRerank(top_n=2, api_key=cohere_key)"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"id": "b0gue7cyctt1"
},
"outputs": [],
"source": [
"# Define the ServiceCotext object to tie the LLM for generating final answer,\n",
"# and the embedding model to help with retrieving related nodes.\n",
"# The `node_postprocessors` function will be applied to the retrieved nodes.\n",
"query_engine = index.as_query_engine(\n",
" similarity_top_k=10,\n",
" node_postprocessors=[cohere_rerank]\n",
")\n",
"\n",
"res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 53
},
"id": "VKK3jMprctre",
"outputId": "3acce09e-faa2-4acd-ac8f-f62380d91567"
},
"outputs": [
{
"data": {
"text/plain": [
"'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
]
},
"execution_count": 24,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"res.response"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "nvSmOtqBoCY2",
"outputId": "052a70df-d98d-4a87-bb7c-9e56d34db7f7"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Node ID\t 6fea54fa-138b-4931-9e37-42fe16fca62a\n",
"Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
"Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
"Score\t 0.90582335\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t 99774ac6-5d8e-492b-8c94-4e9717afd2fc\n",
"Title\t Exploring Large Language Models -Part 3\n",
"Text\t LM model training via UnSupervised learning). Note that this model was loaded in 4-bit, making it runnable on a single T4 GPU and trained with QLoRa. With QLoRA, only a fraction of the adapter weights are trained and summed with the existing frozen pre-trained weights of the model during inference. Here is an illustrative Colab notebook. You can see that training the model with just the text as is, does not result in proper output to questions. The answers are not affected by the training data. Take 2: Instruct Fine-tuning with QLoRa Instruction Tuning concept is a higher-level training concept introduced by this paper FineTuned Language Models Are Zero shot Learners (FLAN) We leverage the intuition that NLP tasks can be described via natural language instructions, such as \"Is the sentiment of this movie review positive or negative?\" or \"Translate 'how are you' into Chinese.\" We take a pre-trained language model of 137B parameters and perform instruction tuning ... Since we use QLoRa we are effectively closely following this paper - QLORA: Efficient Finetuning of Quantized LLMs concerning the training data set, the format that the authors used to train their Gauanco model This is the format for the Llama2 model and will be different for others. One of the hardest problems of training is finding or creating a good quality data set to train. In our case, converting the available training data set to the instruction data set. Since our use case is Closed Book QA, we need to convert this to a QA format. Using older NLP methods like NER (Named Entity Recognition) and then using that to create a QA dataset was not effective. This is where the Self-instruct concept could be used However previous to Llama2, the best-performing model was the GPT 3/4 model via ChatGPT or its API and using these models to do the same was expensive. The 7 billion model of Llama2 has sufficient NLU (Natural Language Understanding) to create output based on a particular format. Running this in 4-bit mode via Quantisation makes it feasible compute-wise to run this on a large data set and convert it to a QA dataset. This was the prompt used. The\n",
"Score\t 0.88363826\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"# Show the retrieved nodes\n",
"for src in res.source_nodes:\n",
" print(\"Node ID\\t\", src.node_id)\n",
" print(\"Title\\t\", src.metadata['title'])\n",
" print(\"Text\\t\", src.text)\n",
" print(\"Score\\t\", src.score)\n",
" print(\"-_\"*20)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "iMkpzH7vvb09"
},
"source": [
"# Evaluate"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "H8a3eKgKvckU",
"outputId": "cb004dc9-6b49-4d10-a790-1d5257318cd7"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 108/108 [04:30<00:00, 2.51s/it]\n"
]
}
],
"source": [
"from llama_index.core.evaluation import generate_question_context_pairs\n",
"from llama_index.llms.openai import OpenAI\n",
"\n",
"# Create questions for each segment. These questions will be used to\n",
"# assess whether the retriever can accurately identify and return the\n",
"# corresponding segment when queried.\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
" num_questions_per_chunk=1\n",
")\n",
"\n",
"# We can save the evaluation dataset as a json file for later use.\n",
"rag_eval_dataset.save_json(\"./rag_eval_dataset_rerank.json\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "QvZBMpsXiWEw"
},
"source": [
"If you have uploaded the generated question JSON file, please uncomment the code in the next cell block. This will avoid the need to generate the questions manually, saving you time and effort."
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"id": "3sA1K84U254o"
},
"outputs": [],
"source": [
"# from llama_index.finetuning.embeddings.common import (\n",
"# EmbeddingQAFinetuneDataset,\n",
"# )\n",
"# rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
"# \"./rag_eval_dataset_rerank.json\"\n",
"# )"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"id": "H7ubvcbk27vr"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"# A simple function to show the evaluation result.\n",
"def display_results_retriever(name, eval_results):\n",
" \"\"\"Display results from evaluate.\"\"\"\n",
"\n",
" metric_dicts = []\n",
" for eval_result in eval_results:\n",
" metric_dict = eval_result.metric_vals_dict\n",
" metric_dicts.append(metric_dict)\n",
"\n",
" full_df = pd.DataFrame(metric_dicts)\n",
"\n",
" hit_rate = full_df[\"hit_rate\"].mean()\n",
" mrr = full_df[\"mrr\"].mean()\n",
"\n",
" metric_df = pd.DataFrame(\n",
" {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
" )\n",
"\n",
" return metric_df"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "uNLxDxoc2-Ac",
"outputId": "f42dc98d-789f-4779-c693-0603cd43e4c9"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_2 0.665975 0.54668\n",
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_4 0.782158 0.582815\n",
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_6 0.8361 0.59305\n",
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_8 0.854772 0.595606\n",
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_10 0.871369 0.597404\n"
]
}
],
"source": [
"from llama_index.core.evaluation import RetrieverEvaluator\n",
"\n",
"# We can evaluate the retievers with different top_k values.\n",
"for i in [2, 4, 6, 8, 10]:\n",
" retriever = index.as_retriever(similarity_top_k=i, node_postprocessors=[cohere_rerank])\n",
" retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
" [\"mrr\", \"hit_rate\"], retriever=retriever\n",
" )\n",
" eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset)\n",
" print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ikMYkBATFY3l"
},
"source": [
"It's important to keep in mind that all the results above are based on only two samples even when the retriever fetch 10 items from the vector store. So, it means that instead of passing 10 chunks of data which translates into more API usage and higher cost, we will get the same quality by passing 2 chunk of data.\n",
"\n",
"The bot's hit rate without Cohere Reranking using two chunks is 0.65, while we get the 0.87 hit rate using two chunks after the Cohere's post processing."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "-DMSFJI8F6jl"
},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"authorship_tag": "ABX9TyNPhIDuwnBNGZxkxkMnLtTw",
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"04af736f84044e37aa6599aa708a77bc": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"195aa202b03a42a3a674e9da2f13d878": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"1d1faa15f5564b68b948eaffa58626b3": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_f57a9ac0d924408fbaaac795c172862e",
"placeholder": "",
"style": "IPY_MODEL_4cb8ba074b254e91b8877cc87ae0d279",
"value": "Parsing nodes: 100%"
}
},
"2dc4a6c935ac4ef38ed9030608bd4b2f": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"3657dc19b6ac477b9f05bb6519271473": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_8d35ab8c65ba47e1be446b98f0942ac4",
"placeholder": "",
"style": "IPY_MODEL_75e40756175f463e874630f229ef4066",
"value": " 14/14 [00:01<00:00, 10.94it/s]"
}
},
"3a621edd23354ea5924189885c97dee4": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"3f55aef52aee4e77864d53e3197c3cc3": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4802a63f700e48fca16b5d89fbab333d": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_4fcebf4a9ef54729889cc6ad4cbe5d10",
"placeholder": "",
"style": "IPY_MODEL_195aa202b03a42a3a674e9da2f13d878",
"value": " 108/108 [00:07<00:00, 10.36it/s]"
}
},
"4bb1e341a77d41c9aca0e6680911fb43": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_1d1faa15f5564b68b948eaffa58626b3",
"IPY_MODEL_df22a67ae80b4673b708eea74646be61",
"IPY_MODEL_3657dc19b6ac477b9f05bb6519271473"
],
"layout": "IPY_MODEL_9045e402f0344428acc085d63df7ff03"
}
},
"4cb8ba074b254e91b8877cc87ae0d279": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"4fcebf4a9ef54729889cc6ad4cbe5d10": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"73d34cae940e4748a7b3127351925e65": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"75e40756175f463e874630f229ef4066": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"8728ca516bd0474586b19e0c9b457499": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_f41df4b6ab4c4132b0d20232002f0294",
"placeholder": "",
"style": "IPY_MODEL_3a621edd23354ea5924189885c97dee4",
"value": "Generating embeddings: 100%"
}
},
"8d35ab8c65ba47e1be446b98f0942ac4": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9045e402f0344428acc085d63df7ff03": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"a0dd5f2c99b2407f9f5705587976ae76": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_8728ca516bd0474586b19e0c9b457499",
"IPY_MODEL_aac433a9a64c48dfb18d7a01f64d3b27",
"IPY_MODEL_4802a63f700e48fca16b5d89fbab333d"
],
"layout": "IPY_MODEL_3f55aef52aee4e77864d53e3197c3cc3"
}
},
"aac433a9a64c48dfb18d7a01f64d3b27": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_73d34cae940e4748a7b3127351925e65",
"max": 108,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_2dc4a6c935ac4ef38ed9030608bd4b2f",
"value": 108
}
},
"cbd3e1411b2c4eeb943243c9d45245c4": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"df22a67ae80b4673b708eea74646be61": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_cbd3e1411b2c4eeb943243c9d45245c4",
"max": 14,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_04af736f84044e37aa6599aa708a77bc",
"value": 14
}
},
"f41df4b6ab4c4132b0d20232002f0294": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"f57a9ac0d924408fbaaac795c172862e": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}