{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "id": "ifmV02zKlsCs",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "1b6d48e5-654c-4265-9c3e-fbeb8c4a5ade"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m493.7/493.7 kB\u001b[0m \u001b[31m9.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m7.9/7.9 MB\u001b[0m \u001b[31m20.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m15.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m18.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m311.2/311.2 kB\u001b[0m \u001b[31m35.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m106.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m80.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m295.0/295.0 kB\u001b[0m \u001b[31m34.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h"
          ]
        }
      ],
      "source": [
        "!pip install -qU \\\n",
        "  datasets \\\n",
        "  transformers"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Q4vTJ-pFmWl5"
      },
      "source": [
        "## Dataset Download"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GFaaDw5VmZEk"
      },
      "source": [
        "We're going to test with a more real world use-case, with messy, imperfect data. We will use the [`jamescalam/ai-arxiv-chunked`](https://huggingface.co/datasets/jamescalam/ai-arxiv-chunked) dataset."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 2,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 252,
          "referenced_widgets": [
            "f33d812749fc4c14b5666c1fee8726a0",
            "f43895751cc24ad891adef3f181d8640",
            "c2238787e75f4072852441d3855a1b92",
            "a4d3e9c7984840569b02d9a8ed1c3d0c",
            "2794edaa22ce43dfb3962756992daaa4",
            "eb2baffdc9594a7a9571bfa1a117137e",
            "7415fe6be2314c238b87293a13d8a5da",
            "5be03eef53a748529829380da2e48a5f",
            "4fbba367c80f4d2e97fd5a93e3ee6e6d",
            "bd76caa1dd61407cbda880798048b1b9",
            "dcbe2424c51742ad943d924501a5d8e7",
            "e97c8904dcb04a00aa1a7de3b9464d82",
            "846fafb4219244d297fc3e6b46c60c23",
            "5b2662fa300f4eaabae01f62b66da7c9",
            "05d7940211e3487fb5d47ab0fbb3f327",
            "96e30533281c416da4ea100a200a4d3a",
            "2f686b1d80aa4110bf81968982c91192",
            "b591f44ad0d846dbb7847c44b83cd08c",
            "688b18f19f694f28a5eccbddd2541ec6",
            "24b1d8a2bdec43408acd5d2ebb678f52",
            "9f2cc438794a4daa9c6981f091c54cce",
            "64cb329966554089924456ec02d1b4d1",
            "1c70a26401304927af8ab443e81db9e9",
            "dfd809ea460d4ee8a7df648e507ba16d",
            "2e72742ee2bd43c781c212916b6bc17f",
            "b85b376764c4460bb5fb660cfa18a914",
            "4fa3fdfefee744e3914244a22ca45d2c",
            "a77235425bc5401aad8632379f2e9f59",
            "1f543c8a9c484173a32d757cc2b34866",
            "44e863f191394632805387e578f98ba6",
            "40df67f47f8f4407899defdbeb262514",
            "c8e8652f31924bf2afbb556aec4524f6",
            "b54c491fba124f6f87e14a831f2bbcd0",
            "2b0bb0db43674cb997423f2b9a6261be",
            "ef90a4bf570748e089beb99d66c87058",
            "7cb5ea250950468a908dc468d35010b0",
            "b62da525d2a54db8acd5267ba5cdc194",
            "61f90df26e3345019c4a8dc4dfb6d062",
            "4720a60d8b3d462b87de4e1e066c995c",
            "087a364c560c4d9eaa22742f4bf05374",
            "26be9157557e4483a3fb9cc397040a46",
            "4fa40d9826bf40d8bb4a875bfa6c4e8a",
            "f3cd2da5ad7a48ab996b4474a9eb0ffc",
            "e9eef051042b40a286c9a79511091d5e"
          ]
        },
        "id": "4-FqcdKHmVpa",
        "outputId": "8e573ff0-b96b-41b4-efc1-2b48f175a8fd"
      },
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Downloading data files:   0%|          | 0/1 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "f33d812749fc4c14b5666c1fee8726a0"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Downloading data:   0%|          | 0.00/153M [00:00<?, ?B/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "e97c8904dcb04a00aa1a7de3b9464d82"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Extracting data files:   0%|          | 0/1 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "1c70a26401304927af8ab443e81db9e9"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Generating train split: 0 examples [00:00, ? examples/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "2b0bb0db43674cb997423f2b9a6261be"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Dataset({\n",
              "    features: ['doi', 'chunk-id', 'chunk', 'id', 'title', 'summary', 'source', 'authors', 'categories', 'comment', 'journal_ref', 'primary_category', 'published', 'updated', 'references'],\n",
              "    num_rows: 41584\n",
              "})"
            ]
          },
          "metadata": {},
          "execution_count": 2
        }
      ],
      "source": [
        "from datasets import load_dataset\n",
        "\n",
        "data = load_dataset(\"jamescalam/ai-arxiv-chunked\", split=\"train\")\n",
        "data"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "First we define our embedding function."
      ],
      "metadata": {
        "id": "gp5a_bInyfdX"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import torch\n",
        "from torch.nn.functional import normalize\n",
        "from transformers import AutoModel, AutoTokenizer\n",
        "\n",
        "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
        "print(f\"Using {device}\")\n",
        "\n",
        "model_id = \"BAAI/bge-base-en-v1.5\"\n",
        "\n",
        "# initialize tokenizer and model\n",
        "tokenizer = AutoTokenizer.from_pretrained(model_id)\n",
        "model = AutoModel.from_pretrained(model_id).to(device)\n",
        "model.eval()\n",
        "\n",
        "def embed(docs: list[str]) -> list[list[float]]:\n",
        "    # tokenize\n",
        "    tokens = tokenizer(\n",
        "        docs, padding=True, max_length=512, truncation=True, return_tensors=\"pt\"\n",
        "    ).to(device)\n",
        "    with torch.no_grad():\n",
        "        # process with model for token-level embeddings\n",
        "        out = model(**tokens)\n",
        "        # pull out CLS token embeddings\n",
        "        doc_embeds = out[0][:,0]\n",
        "    # normalize embeddings\n",
        "    doc_embeds = normalize(doc_embeds, p=2, dim=1)\n",
        "    return doc_embeds.cpu().numpy()"
      ],
      "metadata": {
        "id": "oG6zd1dLw54w",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "f36a8e51-1448-4214-8564-c73a35bc3ac7"
      },
      "execution_count": 10,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Using cuda\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "Use this to build a Numpy array of cohere embedding vectors."
      ],
      "metadata": {
        "id": "1nvrNQSGXvEC"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "from tqdm.auto import tqdm\n",
        "import numpy as np\n",
        "\n",
        "chunks = data[\"chunk\"]\n",
        "batch_size = 256\n",
        "\n",
        "for i in tqdm(range(0, len(chunks), batch_size)):\n",
        "    i_end = min(len(chunks), i+batch_size)\n",
        "    chunk_batch = chunks[i:i_end]\n",
        "    # embed current batch\n",
        "    embed_batch = embed(chunk_batch)\n",
        "    # add to existing np array if exists (otherwise create)\n",
        "    if i == 0:\n",
        "        arr = np.array(embed_batch)\n",
        "    else:\n",
        "        arr = np.concatenate([arr, np.array(embed_batch)])"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 49,
          "referenced_widgets": [
            "1c71bde1b31c4ed0b928aef66dfb8e2a",
            "c74a18bd60c04c9fa3919a7cf32da6e8",
            "85731e8e040346b29472920587c92149",
            "6be5eeb7a3b14be3badcbb4dc376e304",
            "f01c0f02b4084e5393fec6d54a5abbec",
            "e9240b67f5dc4381a76bf08ac552dc8c",
            "c4817b17dc8c486b9f7248b2fdd02928",
            "bb23c12bee9240ef813cdea5d34b6995",
            "229d971baf594dd4af0d0236ed0e2859",
            "a87ca52d6b1545a493aade5c1ee8c088",
            "9419714da3284e8fa60f3dd89d7635a9"
          ]
        },
        "id": "EdyWVR17zX7I",
        "outputId": "1c3b3002-47e0-45c3-d8cd-48dc7ed70099"
      },
      "execution_count": 15,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "  0%|          | 0/163 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "1c71bde1b31c4ed0b928aef66dfb8e2a"
            }
          },
          "metadata": {}
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "Now we need to create the query mechanism, this is simply a cosine similarity calculation between a query vector and our `arr` vectors."
      ],
      "metadata": {
        "id": "Bl9g3ePt029u"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "from numpy.linalg import norm\n",
        "\n",
        "# convert chunks list to array for easy indexing\n",
        "chunk_arr = np.array(chunks)\n",
        "\n",
        "def query(text: str, top_k: int=5) -> list[str]:\n",
        "    # create query embedding\n",
        "    xq = embed([f\"Represent this sentence for searching relevant passages: {text}\"])[0]\n",
        "    xq = np.array(xq)\n",
        "    # calculate cosine similarities\n",
        "    sim = np.dot(arr, xq.T) / (norm(arr, axis=1)*norm(xq.T))\n",
        "    # get indices of top_k records\n",
        "    idx = np.argpartition(sim, -top_k)[-top_k:]\n",
        "    #scores = sim[idx]\n",
        "    contexts = chunk_arr[idx]\n",
        "    return contexts.tolist()"
      ],
      "metadata": {
        "id": "MR7WyDiatlsX"
      },
      "execution_count": 16,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "query(\"why should I use llama 2?\")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "u2NjYxsn7J5f",
        "outputId": "4893b818-a90f-4259-e5a1-3af6114e8e7b"
      },
      "execution_count": 17,
      "outputs": [
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "['models will be released as we improve model safety with community feedback.\\nLicense A custom commercial license is available at: ai.meta.com/resources/\\nmodels-and-libraries/llama-downloads/\\nWhere to send commentsInstructions on how to provide feedback or comments on the model can be\\nfound in the model README, or by opening an issue in the GitHub repository\\n(https://github.com/facebookresearch/llama/ ).\\nIntended Use\\nIntended Use Cases L/l.sc/a.sc/m.sc/a.sc /two.taboldstyle is intended for commercial and research use in English. Tuned models\\nare intended for assistant-like chat, whereas pretrained models can be adapted\\nfor a variety of natural language generation tasks.\\nOut-of-Scope Uses Use in any manner that violates applicable laws or regulations (including trade\\ncompliancelaws). UseinlanguagesotherthanEnglish. Useinanyotherway\\nthat is prohibited by the Acceptable Use Policy and Licensing Agreement for\\nL/l.sc/a.sc/m.sc/a.sc /two.taboldstyle.\\nHardware and Software (Section 2.2)\\nTraining Factors We usedcustomtraininglibraries, Meta\u2019sResearchSuperCluster, andproductionclustersforpretraining. Fine-tuning,annotation,andevaluationwerealso',\n",
              " 'but BoolQ. Similarly, this model surpasses PaLM540B everywhere but on BoolQ and WinoGrande.\\nLLaMA-13B model also outperforms GPT-3 on\\nmost benchmarks despite being 10 \\x02smaller.\\n3.2 Closed-book Question Answering\\nWe compare LLaMA to existing large language\\nmodels on two closed-book question answering\\nbenchmarks: Natural Questions (Kwiatkowski\\net al., 2019) and TriviaQA (Joshi et al., 2017). For\\nboth benchmarks, we report exact match performance in a closed book setting, i.e., where the models do not have access to documents that contain\\nevidence to answer the question. In Table 4, we\\nreport performance on NaturalQuestions, and in Table 5, we report on TriviaQA. On both benchmarks,\\nLLaMA-65B achieve state-of-the-arts performance\\nin the zero-shot and few-shot settings. More importantly, the LLaMA-13B is also competitive on\\nthese benchmarks with GPT-3 and Chinchilla, despite being 5-10 \\x02smaller. This model runs on a\\nsingle V100 GPU during inference.\\n0-shot 1-shot 5-shot 64-shot\\nGopher 280B 43.5 - 57.0 57.2',\n",
              " 'use only. Using the weights of Baize with LLaMA\u2019s\\noriginal weights is subject to Meta\u2019s LLaMA License Agreement. It is the responsibility of the\\nusers to download and use LLaMA in compliance\\nwith the license agreement. In addition to the\\nmodel, we are also releasing the fine-tuning corpus\\nunder CC-BY-NC 4.0 (allowing research use only).\\nWe hereby disclaim any liability for any activities\\nrelated to the distribution and use of the released\\nartifacts. The licenses are subject to change.\\nSafety and Access Control Unlike ChatGPT (OpenAI, 2023a), Baize does not rely on\\nhuman feedback to suppress unwanted behaviors.\\nInstead, Baize learns to avoid such behaviors by\\nimitating ChatGPT, and we have added an explicit\\nprompt to guide its behavior. However, it is im7\\nHuman: How to save data into a json file in Python?\\nBaize-v2-7B: In Python, you can use the json module to save data into a JSON file. Here is an example code snippet that\\ndemonstrates how to do this:\\nimport json\\n# Define some data to save\\ndata = {\"name\": \"John Doe\", \"age\": 30, \"city\": \"New York\"}\\n# Open a file to write to\\nwith open(\"data.json\", \"w\") asf:',\n",
              " 'chat with itself. For future work, we would like\\nto explore introducing reinforcement learning to\\nfurther improve the performance of our models.\\nLimitations\\nFoundation Model Similar to other language\\nmodels, Baize may suffer from hallucination, toxicity and stereotypes. Particularly, Baize inherits the\\nout-of-date knowledge from LLaMA. Due to the\\nfact that at least 82% of LLaMA\u2019s pretraining data\\nis from before 2020, Baize may provide outdated\\nanswers to certain questions, such as \"who is the\\ncurrent president of the United States?\" Additionally, LLaMA only supports 20 languages and has a\\nvery limited corpus for non-English languages.\\nEvaluation In this paper, we automatically evaluating the models with GPT-4 (OpenAI, 2023b).However, we found that it has a strong preference\\nfor longer responses and a positional bias. We believe human evaluation can be more rigorous and reliable despite being expensive and time-consuming\\nwhile automatic evaluation remains an open research question.\\nLicense and Legality Following Stanford Alpaca (Taori et al., 2023), we have decided that the\\nreleased weights of Baize are licensed for research\\nuse only. Using the weights of Baize with LLaMA\u2019s\\noriginal weights is subject to Meta\u2019s LLaMA License Agreement. It is the responsibility of the',\n",
              " '\\x03Equal contribution. Correspondence: {htouvron,\\nthibautlav,gizacard,egrave,glample}@meta.com\\n1https://github.com/facebookresearch/llamaperformance, a smaller one trained longer will\\nultimately be cheaper at inference. For instance,\\nalthough Hoffmann et al. (2022) recommends\\ntraining a 10B model on 200B tokens, we \ufb01nd\\nthat the performance of a 7B model continues to\\nimprove even after 1T tokens.\\nThe focus of this work is to train a series of\\nlanguage models that achieve the best possible performance at various inference budgets, by training\\non more tokens than what is typically used. The\\nresulting models, called LLaMA , ranges from 7B\\nto 65B parameters with competitive performance\\ncompared to the best existing LLMs. For instance,\\nLLaMA-13B outperforms GPT-3 on most benchmarks, despite being 10 \\x02smaller. We believe that\\nthis model will help democratize the access and\\nstudy of LLMs, since it can be run on a single GPU.\\nAt the higher-end of the scale, our 65B-parameter\\nmodel is also competitive with the best large language models such as Chinchilla or PaLM-540B.\\nUnlike Chinchilla, PaLM, or GPT-3, we only']"
            ]
          },
          "metadata": {},
          "execution_count": 17
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [],
      "metadata": {
        "id": "ZSIBTMUy7qc0"
      },
      "execution_count": null,
      "outputs": []
    }
  ],
  "metadata": {
    "colab": {
      "provenance": [],
      "gpuType": "T4"
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    },
    "accelerator": "GPU"
  },
  "nbformat": 4,
  "nbformat_minor": 0
}