{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "id": "ifmV02zKlsCs",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "f79fa138-5f29-44e0-a4bc-0e3b4d5b8b52"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m493.7/493.7 kB\u001b[0m \u001b[31m13.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m16.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m21.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m311.2/311.2 kB\u001b[0m \u001b[31m26.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m7.9/7.9 MB\u001b[0m \u001b[31m37.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m261.4/261.4 kB\u001b[0m \u001b[31m34.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m86.0/86.0 kB\u001b[0m \u001b[31m13.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m114.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m86.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m74.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m295.0/295.0 kB\u001b[0m \u001b[31m37.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h  Building wheel for FlagEmbedding (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
            "  Building wheel for sentence_transformers (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
          ]
        }
      ],
      "source": [
        "!pip install -qU \\\n",
        "  datasets==2.14.6 \\\n",
        "  FlagEmbedding==1.1.5"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Q4vTJ-pFmWl5"
      },
      "source": [
        "## Dataset Download"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GFaaDw5VmZEk"
      },
      "source": [
        "We're going to test with a more real world use-case, with messy, imperfect data. We will use the [`jamescalam/ai-arxiv-chunked`](https://huggingface.co/datasets/jamescalam/ai-arxiv-chunked) dataset."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 2,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 234,
          "referenced_widgets": [
            "45bd9c04ee1a484692d4ac049cdde42b",
            "155854b6d5e647c6af00c57d5e67fc68",
            "3945b8d5187f41d78bfa8827527d7562",
            "0fd0a7060198491488cf026a8da0de0c",
            "3dcc589fbaba4cfab06e3dc5c12fd7c8",
            "538db163ea454b67ae5758bb209ace92",
            "51e9c906e15445c29751952f6d29ea9c",
            "2708119ba4304c6da7ed16a9a29cc540",
            "068cdd3bf32547e294a2fa49f93a5aed",
            "c308d29b621f4a868e2e869bb57cec6c",
            "a552cc28d9aa493b8e05acd8398b1d0a",
            "39822fac20c540bea407345109a16299",
            "104c8fa04117448ab73d167a63fbba2e",
            "417cdae7b56b47538d22126b2d161e9a",
            "c24f4d101be6497a94cc108390b20443",
            "27bef2780545419fb74d67e1b9df50f1",
            "ba00e997cfbc48eb8389e8a6130aa527",
            "efa99d6df8ef47e0b698f4305c74cc6b",
            "e10f18b2af0c4371bfd6f0f84aedc542",
            "52313d67addc48e9a6a08db6a313bdbb",
            "2711d75134d34b2cb94dd35ea9aff498",
            "76688298403f480b9889d3bc9723126d",
            "aad87863cacf48c4a84ebc42ef5ed535",
            "bc4ca4778fb041fca48021c407e3c8d5",
            "dfb50c9526e94219b28325cdcbf9a577",
            "d931de4ed1614b399b322ca37595bb68",
            "e9453512df5c47369ea4776c93bd4f5c",
            "0da55595d5684a19a85e02aa339c19cb",
            "b07c56ba82d244c78620a8f72fe5fc20",
            "0ed7b4f0ece5452f9abc2759efa3abdc",
            "f433ad5ebca648ac92eafa8810d8b4e0",
            "b8ed8952f48847758693e81498743477",
            "f11b08258650412ba919485e47e4487d",
            "a38ed30294534783a76ff59826515d42",
            "22dfefc89e92463eba4a64da541607ee",
            "805caeb95bc54213a1bdc94a533e9355",
            "6f49339ee02245a5b5152ad703d98242",
            "a75071dd845e480e814360a2c94f0855",
            "2115369c00ba41e399683a88fde7b782",
            "d15a27c2d6bf44e09d609cb378eadad4",
            "39128f3778d347aabcc567433a21255e",
            "9b6ce4cf65bf4c6eb3e3ddd210b7eb5f",
            "d21d35d7b52e4c4785b86e8054a7e214",
            "abe51594702e4bfe8ec8f2b994fe01f7"
          ]
        },
        "id": "4-FqcdKHmVpa",
        "outputId": "122d3b4e-1360-46bc-e70f-0aac2a378118"
      },
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Downloading data files:   0%|          | 0/1 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "45bd9c04ee1a484692d4ac049cdde42b"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Downloading data:   0%|          | 0.00/153M [00:00<?, ?B/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "39822fac20c540bea407345109a16299"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Extracting data files:   0%|          | 0/1 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "aad87863cacf48c4a84ebc42ef5ed535"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Generating train split: 0 examples [00:00, ? examples/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "a38ed30294534783a76ff59826515d42"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Dataset({\n",
              "    features: ['doi', 'chunk-id', 'chunk', 'id', 'title', 'summary', 'source', 'authors', 'categories', 'comment', 'journal_ref', 'primary_category', 'published', 'updated', 'references'],\n",
              "    num_rows: 41584\n",
              "})"
            ]
          },
          "metadata": {},
          "execution_count": 2
        }
      ],
      "source": [
        "from datasets import load_dataset\n",
        "\n",
        "data = load_dataset(\"jamescalam/ai-arxiv-chunked\", split=\"train\")\n",
        "data"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "First we define our embedding function."
      ],
      "metadata": {
        "id": "gp5a_bInyfdX"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "import os\n",
        "import torch\n",
        "from FlagEmbedding import FlagModel\n",
        "\n",
        "# see if GPU is available for model to use\n",
        "device = \"0\" if torch.cuda.is_available() else \"\"\n",
        "os.environ[\"CUDA_VISIBLE_DEVICES\"] = device\n",
        "print(device)\n",
        "\n",
        "model = FlagModel(\n",
        "    'BAAI/bge-large-en-v1.5',\n",
        "    query_instruction_for_retrieval=\"Represent this sentence for searching relevant passages: \",\n",
        "    use_fp16=True  # did not note noticable difference between fp16 or fp32\n",
        ")\n",
        "\n",
        "def embed(docs: list[str]):\n",
        "    doc_embeds = model.encode(docs)\n",
        "    return doc_embeds"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "RS9gp316sSMX",
        "outputId": "8546fe55-97dd-4af0-a655-8f17cdb5c7e6"
      },
      "execution_count": 24,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "0\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "Use this to build a Numpy array of cohere embedding vectors."
      ],
      "metadata": {
        "id": "1nvrNQSGXvEC"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "from tqdm.auto import tqdm\n",
        "import numpy as np\n",
        "\n",
        "chunks = data[\"chunk\"]\n",
        "batch_size = 256\n",
        "\n",
        "for i in tqdm(range(0, len(chunks), batch_size)):\n",
        "    i_end = min(len(chunks), i+batch_size)\n",
        "    chunk_batch = chunks[i:i_end]\n",
        "    # embed current batch\n",
        "    embed_batch = embed(chunk_batch)\n",
        "    # add to existing np array if exists (otherwise create)\n",
        "    if i == 0:\n",
        "        arr = np.array(embed_batch)\n",
        "    else:\n",
        "        arr = np.concatenate([arr, np.array(embed_batch)])"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 1000,
          "referenced_widgets": [
            "fe1a8c1131ef49f18389cea64b2e7549",
            "2a0a0d67e4f54b149e7db5a7d7d82f73",
            "129c219994a5452ea85cc47ac42482d8",
            "fbceca134314496cbe2d7a90748987d8",
            "af7dc55ad38f42019a7c340e6f13f7c0",
            "eddbc52d593949d4bc1da82e8fd8a8be",
            "461b084d970e4579866e570e07c0fc81",
            "2fedd9c0ee9249aeb9f56d30abede75e",
            "7261d74333104772be0338c548d278a3",
            "4c04c1ea204949f084f3174ab5cf4d94",
            "e38d162b88cc478999f6790233b10be0"
          ]
        },
        "id": "EdyWVR17zX7I",
        "outputId": "688e1549-50c6-48d7-d70d-77461cf339f5"
      },
      "execution_count": 25,
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "  0%|          | 0/163 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "fe1a8c1131ef49f18389cea64b2e7549"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.77it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.85it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.62it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.84it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.92it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.83it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.53it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.78it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.59it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.45it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.75it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.57it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.59it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.02it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.48it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:01<00:00,  1.07s/it]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.73it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.58it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.74it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.71it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.92it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.75it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.75it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.73it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.71it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.71it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.78it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.64it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.91it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.51it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.69it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.55it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.85it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.77it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.62it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.36it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.55it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.56it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.75it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.63it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.71it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.19it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.79it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.73it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.75it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.84it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.85it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.69it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.55it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.73it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.91it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.73it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.56it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.49it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.71it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.75it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.74it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.58it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.74it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.62it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.81it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:01<00:00,  1.08s/it]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.53it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.64it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.57it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.77it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.57it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.85it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.85it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.97it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.90it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.81it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.04it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.85it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.77it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.58it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.74it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.60it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.79it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.69it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.61it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.55it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.81it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.75it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.41it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.78it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.63it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.49it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.58it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.85it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.54it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.52it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  2.60it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.87it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  2.61it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.86it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  2.60it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  2.61it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.88it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  2.62it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.79it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.84it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.05it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.78it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.68it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.64it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.63it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.60it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.54it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.60it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.81it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.78it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.71it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.58it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.46it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.60it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.72it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.78it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.80it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.60it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.73it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.55it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.70it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.76it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.52it/s]\n",
            "\n",
            "Inference Embeddings:   0%|          | 0/1 [00:00<?, ?it/s]\u001b[A\n",
            "Inference Embeddings: 100%|\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588\u2588| 1/1 [00:00<00:00,  1.82it/s]\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "Now we need to create the query mechanism, this is simply a cosine similarity calculation between a query vector and our `arr` vectors."
      ],
      "metadata": {
        "id": "Bl9g3ePt029u"
      }
    },
    {
      "cell_type": "code",
      "source": [
        "from numpy.linalg import norm\n",
        "\n",
        "# convert chunks list to array for easy indexing\n",
        "chunk_arr = np.array(chunks)\n",
        "\n",
        "def query(text: str, top_k: int=3) -> list[str]:\n",
        "    # create query embedding\n",
        "    xq = model.encode_queries([text])[0]\n",
        "    xq = np.array(xq)\n",
        "    # calculate dot product (these are normalized vectors)\n",
        "    sim = np.dot(arr, xq.T)\n",
        "    # get indices of top_k records\n",
        "    idx = np.argpartition(sim, -top_k)[-top_k:]\n",
        "    #scores = sim[idx]\n",
        "    contexts = chunk_arr[idx]\n",
        "    for c in contexts.tolist():\n",
        "        print(c)\n",
        "        print(\"----------\")"
      ],
      "metadata": {
        "id": "MR7WyDiatlsX"
      },
      "execution_count": 26,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "query(\"why should I use llama 2?\")"
      ],
      "metadata": {
        "id": "u2NjYxsn7J5f",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "0b727464-e227-40f5-a030-b094b5a57917"
      },
      "execution_count": 27,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "but BoolQ. Similarly, this model surpasses PaLM540B everywhere but on BoolQ and WinoGrande.\n",
            "LLaMA-13B model also outperforms GPT-3 on\n",
            "most benchmarks despite being 10 \u0002smaller.\n",
            "3.2 Closed-book Question Answering\n",
            "We compare LLaMA to existing large language\n",
            "models on two closed-book question answering\n",
            "benchmarks: Natural Questions (Kwiatkowski\n",
            "et al., 2019) and TriviaQA (Joshi et al., 2017). For\n",
            "both benchmarks, we report exact match performance in a closed book setting, i.e., where the models do not have access to documents that contain\n",
            "evidence to answer the question. In Table 4, we\n",
            "report performance on NaturalQuestions, and in Table 5, we report on TriviaQA. On both benchmarks,\n",
            "LLaMA-65B achieve state-of-the-arts performance\n",
            "in the zero-shot and few-shot settings. More importantly, the LLaMA-13B is also competitive on\n",
            "these benchmarks with GPT-3 and Chinchilla, despite being 5-10 \u0002smaller. This model runs on a\n",
            "single V100 GPU during inference.\n",
            "0-shot 1-shot 5-shot 64-shot\n",
            "Gopher 280B 43.5 - 57.0 57.2\n",
            "----------\n",
            "PaLM and LaMDA (Thoppilan et al., 2022). PaLM\n",
            "and LLaMA were trained on datasets that contain\n",
            "a similar number of code tokens.\n",
            "As show in Table 8, for a similar number\n",
            "of parameters, LLaMA outperforms other general models such as LaMDA and PaLM, which\n",
            "are not trained or \ufb01netuned speci\ufb01cally for code.\n",
            "LLaMA with 13B parameters and more outperforms LaMDA 137B on both HumanEval and\n",
            "MBPP. LLaMA 65B also outperforms PaLM 62B,\n",
            "even when it is trained longer. The pass@1 results\n",
            "reported in this table were obtained by sampling\n",
            "with temperature 0.1. The pass@100 and pass@80\n",
            "metrics were obtained with temperature 0.8. We\n",
            "use the same method as Chen et al. (2021) to obtain\n",
            "unbiased estimates of the pass@k.\n",
            "It is possible to improve the performance on code\n",
            "by \ufb01netuning on code-speci\ufb01c tokens. For instance,\n",
            "PaLM-Coder (Chowdhery et al., 2022) increases\n",
            "the pass@1 score of PaLM on HumanEval from\n",
            "26.2% for PaLM to 36%. Other models trained\n",
            "----------\n",
            "\u2022Zero-shot. We provide a textual description\n",
            "of the task and a test example. The model\n",
            "either provides an answer using open-ended\n",
            "generation, or ranks the proposed answers.\n",
            "\u2022Few-shot. We provide a few examples of the\n",
            "task (between 1 and 64) and a test example.\n",
            "The model takes this text as input and generates the answer or ranks different options.\n",
            "We compare LLaMA with other foundation models, namely the non-publicly available language\n",
            "models GPT-3 (Brown et al., 2020), Gopher (Rae\n",
            "et al., 2021), Chinchilla (Hoffmann et al., 2022)\n",
            "and PaLM (Chowdhery et al., 2022), as well as\n",
            "the open-sourced OPT models (Zhang et al., 2022),\n",
            "GPT-J (Wang and Komatsuzaki, 2021), and GPTNeo (Black et al., 2022). In Section 4, we also\n",
            "brie\ufb02y compare LLaMA with instruction-tuned\n",
            "models such as OPT-IML (Iyer et al., 2022) and\n",
            "Flan-PaLM (Chung et al., 2022).We evaluate LLaMA on free-form generation\n",
            "tasks and multiple choice tasks. In the multiple\n",
            "choice tasks, the objective is to select the most\n",
            "----------\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "query(\"can you tell me about red teaming for llama 2?\")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "-MIDBSrB6TDU",
        "outputId": "b4c9d3af-99c2-4c0f-ad91-e776f0a97003"
      },
      "execution_count": 28,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Ricardo Lopez-Barquilla, Marc Shedro\ufb00, Kelly Michelena, Allie Feinstein, Amit Sangani, Geeta\n",
            "Chauhan,ChesterHu,CharltonGholson,AnjaKomlenovic,EissaJamil,BrandonSpence,Azadeh\n",
            "Yazdan, Elisa Garcia Anzano, and Natascha Parks.\n",
            "\u2022ChrisMarra,ChayaNayak,JacquelinePan,GeorgeOrlin,EdwardDowling,EstebanArcaute,Philomena Lobo, Eleonora Presani, and Logan Kerr, who provided helpful product and technical organization support.\n",
            "46\n",
            "\u2022Armand Joulin, Edouard Grave, Guillaume Lample, and Timothee Lacroix, members of the original\n",
            "Llama team who helped get this work started.\n",
            "\u2022Drew Hamlin, Chantal Mora, and Aran Mun, who gave us some design input on the \ufb01gures in the\n",
            "paper.\n",
            "\u2022Vijai Mohan for the discussions about RLHF that inspired our Figure 20, and his contribution to the\n",
            "internal demo.\n",
            "\u2022Earlyreviewersofthispaper,whohelpedusimproveitsquality,includingMikeLewis,JoellePineau,\n",
            "Laurens van der Maaten, Jason Weston, and Omer Levy.\n",
            "----------\n",
            "improved various NLP tasks. The introduction of the Transformer architecture [46] laid the groundwork for the development of these powerful language models (Devlin et al. 11, Radford et al. 34, Lewis\n",
            "et al. 21, Raffel et al. 35, Brown et al. 6, Chowdhery et al. 8, Zhang et al. 52, Scao et al. 37, Touvron\n",
            "et al. 45,inter alia ). Among them, GPT-3 [ 6] has been particularly in\ufb02uential, showcasing an\n",
            "exceptional capacity to adapt to diverse tasks through the in-context learning capabilities of LLMs.\n",
            "Recently, LLaMA [ 45] has emerged as a pivotal open-source base language model, driving a series\n",
            "of open-source breakthroughs [43, 7, 15, 23] that strive to keep pace with the closed-source frontier\n",
            "in the \ufb01eld.\n",
            "J Experimental Details\n",
            "J.1 (Topic-Guided Red-Teaming) Self-Instruct\n",
            "For both Self-Instruct and Topic-Guided Red-Teaming Self-Instruct, we set the maximal number of\n",
            "new tokens in the generation to 384. The new tokens are generated by nuclear sampling [ 16] with a\n",
            "top-p threshold p= 0:98and temperature t= 1:0.\n",
            "J.2 Principle-Driven Self-Alignment\n",
            "----------\n",
            "AI research community could take to build consensus around how to red team andhow to release \ufb01ndings\n",
            "from red teaming .\n",
            "For how to red team , we have detailed our initial approach. However, we conducted this effort in isolation, and we would have bene\ufb01ted from participating in a community-based effort to address certain open\n",
            "questions:\n",
            "\u2022 Who should red team and why?\n",
            "\u2022 What protections should we put in place to ensure the safety of the red team?\n",
            "\u2022 What instructions and information about the models should we provide to the red team?\n",
            "\u2022 How should we annotate and analyze the data we collect?\n",
            "\u2022 What constitutes a successful red team attempt?\n",
            "We can make progress towards answering these questions by convening a multidisciplinary community to\n",
            "share different approaches to internal red teaming and drive toward consensus.\n",
            "The research community lacks shared norms and best practices for how to release \ufb01ndings from red teaming. As a result, we made our decision to release the data largely on our own and likely missed critical\n",
            "perspectives from experts, other disciplines, and members of the public.14The decision for how to appropriately release \ufb01ndings will ultimately require a subjective judgment call. For our purposes, we reviewed a\n",
            "sample of our red team dataset and evaluated the pros and cons of a public release (See \u00a7A.5). Among them\n",
            "----------\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "query(\"what is the best llm?\")"
      ],
      "metadata": {
        "id": "ZSIBTMUy7qc0",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "73fd23a3-9482-4776-82b8-d698ad388b34"
      },
      "execution_count": 29,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Rank the {{num}} passages above based on their relevance to the search query. The passages\n",
            "should be listed in descending order using identi\ufb01ers, and the most relevant passages should be\n",
            "listed \ufb01rst, and the output format should be [] > [], e.g., [1] > [2]. Only response the ranking results,\n",
            "do not say any word or explain.\n",
            "B Related Work\n",
            "B.1 Information Retrieval with LLMs\n",
            "Recently, large language models (LLMs) have found increasing applications in information retrieval.\n",
            "Several approaches have been proposed to utilize LLMs for passage retrieval. For example, SGPT (Muennighoff, 2022) generates text embeddings using GPT, DSI (Tay et al., 2022) proposes a differentiable\n",
            "search index, and HyDE (Gao et al., 2022) generates pseudo-documents using GPT-3. In addition, LLMs\n",
            "have also been used for passage re-ranking tasks. UPR (Sachan et al., 2022a) and SGPT-CE (Muennighoff,\n",
            "2022) introduce instructional query generation methods, while HELM (Liang et al., 2022) utilizes instruction relevance generation. LLMs are also employed for training data generation. InPars (Bonifacio et al.,\n",
            "----------\n",
            "the LLMs\u2019 ability for searching. For example, New\n",
            "Bing utilizes GPT-4 to generate responses based on\n",
            "the retrieved documents (Microsoft, 2023). As a\n",
            "Figure 1: Average results of ChatGPT and GPT-4\n",
            "(zero-shot) on passage re-ranking benchmarks (TREC,\n",
            "BEIR, and Mr.TyDi), compared with BM25 and\n",
            "previous best supervised systems (SOTA sup., e.g.,\n",
            "monoT5 (Nogueira et al., 2020)).\n",
            "result, it is still unclear whether LLMs, e.g., ChatGPT, are good at search.\n",
            "To this end, this paper aims to investigate the potential of LLMs in relevance ranking for IR. Specifically, we focus on the following two questions:\n",
            "\u2022(RQ1) How does ChatGPT perform on passage re-ranking tasks?\n",
            "\u2022(RQ2) How to imitate the ranking capabilities\n",
            "of ChatGPT to a smaller, specialized model?\n",
            "To answer the \ufb01rst question, we explore two\n",
            "strategies (Sachan et al., 2022a; Liang et al., 2022)\n",
            "to instruct ChatGPT performing on passage reranking tasks, which we named instructional query\n",
            "generation andinstructional relevance generation .\n",
            "However, we observe that these methods have limited performance in re-ranking and heavily rely\n",
            "----------\n",
            "thinking, problem-solving, and analytical skills, making them ideal for evaluating the performance\n",
            "of large language models in relation to human cognition. More specifically, we collect exams\n",
            "corresponding to 8 subjects from Chinese Gaokao: history, math, English, Chinese, geography,\n",
            "biology, chemistry and physics. We select mathematical questions from GRE, select English and\n",
            "math subjects from SAT to construct the benchmark.\n",
            "Law School Admission Test: Law school admission tests, such as the LSAT , are intended to measure\n",
            "the reasoning and analytical skills of prospective law students. These tests include sections on logical\n",
            "reasoning, reading comprehension, and analytical reasoning, which challenge the test-takers\u2019 ability\n",
            "to analyze complex information and draw accurate conclusions. Incorporating these tasks in our\n",
            "benchmark enables us to assess language models\u2019 capabilities in legal reasoning and analysis.\n",
            "Lawyer Qualification Test: Lawyer qualification tests, such as the bar exam, assess the legal\n",
            "knowledge, analytical skills, and ethical understanding of individuals pursuing a career in law. These\n",
            "exams cover a broad range of legal topics, including constitutional law, contract law, criminal law, and\n",
            "property law, and require candidates to demonstrate their ability to apply legal principles and reason\n",
            "effectively. By incorporating lawyer qualification tests in our benchmark, we can evaluate language\n",
            "models\u2019 performance in the context of professional legal expertise and ethical judgment. Specifically,\n",
            "----------\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "query(\"what is the difference between gpt-3.5 and llama 2?\")  # seems more relevant than other responses"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "bsHoA13w6a7t",
        "outputId": "b3d5ec72-c5ef-4ffa-94b3-05cb9cc20298"
      },
      "execution_count": 30,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "ranked from top 1 to top 5. We compare the \ufb01ve ranked groups against the baseline, and show the\n",
            "relative scores in Figure 4 (a,b). The ChatGPT and GPT-4 evaluation is consistent with the orders\n",
            "6\n",
            "60% 70% 80% 90% 100%LLaMA (13B)Alpaca (13B)Vicuna (13B)LLaMA_GPT4 (7B)LLaMA_GPT4 (7B, R1)BardChatGPTGPT4\n",
            "67% 466 : 69776% 539 : 71293% 639 : 68887% 607 : 70089% 620 : 69392% 624 : 68195% 652 : 684100% 758 : 758(a) All chatbots against GPT-4, whose Chinese responses are translated from English\n",
            "60% 70% 80% 90% 100%LLaMA (13B)Alpaca (13B)Vicuna (13B)LLaMA_GPT4 (7B)LLaMA_GPT4 (7B, R1)BardChatGPTGPT4\n",
            "----------\n",
            "to GPT-3 corresponds to the Stanford Alpaca model. From Figure 3(a), we observe that ( i) For the\n",
            "\u201cHelpfulness\u201d criterion, GPT-4 is the clear winner with 54.12% of the votes. GPT-3 only wins 19.74%\n",
            "of the time. ( ii) For the \u201cHonesty\u201d and \u201cHarmlessness\u201d criteria, the largest portion of votes goes\n",
            "to the tie category, which is substantially higher than the winning categories but GPT-3 (Alpaca) is\n",
            "slightly superior.\n",
            "Second, we compare GPT-4-instruction-tuned LLaMA models against the teacher model GPT-4 in\n",
            "Figure 3(b). The observations are quite consistent over the three criteria: GPT-4-instruction-tuned\n",
            "LLaMA performs similarly to the original GPT-4. We conclude that learning from GPT-4 generated\n",
            "5\n",
            "60% 70% 80% 90% 100%12345BRanking Group 94% 624 : 66792% 614 : 67091% 623 : 68289% 597 : 66989% 605 : 67891% 609 : 666\n",
            "----------\n",
            "-0.043\n",
            "-0.009+0.0132-0.004 +0.0562\n",
            "+0.0387-0.012\n",
            "-0.076Alpaca: 0.39 LLaMA-GPT4: 0.34 GPT4: 0.37Figure 6: ROUGE-L on unnatural instructions evaluated with 9K samples. The instructions are\n",
            "grouped into four subsets based on the ground-truth response length. The mean values are reported in\n",
            "the legend. The difference with GPT-4 is reported on the bar per group. LLaMA-GPT4 is a closer\n",
            "proxy to GPT-4 than Alpaca.\n",
            "closely follow the behavior of GPT-4. When the sequence length is short, both LLaMA-GPT4 and\n",
            "GPT-4 can generate responses that contains the simple ground truth answers, but add extra words to\n",
            "make the response more chat-like, which probably leads to lower ROUGE-L scores.\n",
            "5 R ELATED WORK\n",
            "Instruction Tuning. Instruction tuning of LLMs is an increasingly popular research direction in\n",
            "NLP (Zhong et al., 2021; Ouyang et al., 2022; Wei et al., 2021). Existing works aim to improve\n",
            "the quality and scale of three factors in the development pipeline, including instruction-following\n",
            "----------\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "---"
      ],
      "metadata": {
        "id": "lPyzJV787rV1"
      }
    }
  ],
  "metadata": {
    "colab": {
      "provenance": [],
      "gpuType": "A100",
      "machine_shape": "hm"
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    },
    "accelerator": "GPU"
  },
  "nbformat": 4,
  "nbformat_minor": 0
}