{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 1,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "ifmV02zKlsCs",
        "outputId": "79ec88ac-fd1b-4cf3-9420-89b771632bc1"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m493.7/493.7 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m7.9/7.9 MB\u001b[0m \u001b[31m81.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m115.3/115.3 kB\u001b[0m \u001b[31m15.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m134.8/134.8 kB\u001b[0m \u001b[31m18.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m311.2/311.2 kB\u001b[0m \u001b[31m32.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m3.8/3.8 MB\u001b[0m \u001b[31m106.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m64.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[2K     \u001b[90m\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u2501\u001b[0m \u001b[32m295.0/295.0 kB\u001b[0m \u001b[31m31.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
            "\u001b[?25h"
          ]
        }
      ],
      "source": [
        "!pip install -qU \\\n",
        "  datasets==2.14.6 \\\n",
        "  transformers==4.35.0"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Q4vTJ-pFmWl5"
      },
      "source": [
        "## Dataset Download"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "GFaaDw5VmZEk"
      },
      "source": [
        "We're going to test with a more real world use-case, with messy, imperfect data. We will use the [`jamescalam/ai-arxiv-chunked`](https://huggingface.co/datasets/jamescalam/ai-arxiv-chunked) dataset."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 2,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 234,
          "referenced_widgets": [
            "1ee0ce4fc44e4babaf72c13a0ded94d5",
            "87cc9b2aa66b451587503f1256e845b6",
            "ba3c2ea5f95640a2a25399d635ef4a80",
            "d9de1e95959d4359b43527a61a9780b9",
            "ac1558957e6b402eb417d1fed65306f5",
            "803e4dad0e094cdb99f03beeaaa05be5",
            "4e892ad6e4bf4b2ebd74c725aa8a9501",
            "f65918ba7a744bb5b0bcfb118efe027d",
            "2e653c8c47d542eeb9b1cdf2d170c0dd",
            "a7c716ba901d4673b672caa9d84cf65a",
            "befc8dc9492b416294dd18c51e6045db",
            "752b03730d9a445082ca76f52851ff81",
            "5edbdfb20a6846cfa91fb77516b5c410",
            "62becf0f801f465e9a99cccf728cfeac",
            "2e726a9b953f4561854ac3cf9757826d",
            "fa5b95ab124c47599a3906f2b04c95a9",
            "d402b1dc44a04ec3927896ad6f59108c",
            "b36865a366a04d83a5ad8b0864bb1da4",
            "ec480bef81cd43b7901515e281c65639",
            "b448da52310f4d10874dee2daa2b0632",
            "20f65da51b014109bd41a8d1423dd54a",
            "3b23f9ab26e940b8815daa7ab8069fe3",
            "b4de7634e7614ec6aed8efb0ac43b02d",
            "0182a3b0636144ad84f7370bb5796db7",
            "37eb170e5d2546f1974caadf88d0f396",
            "83916d887cde486b89b4b00335c89831",
            "0c46cabc29f4467da8947e3b0163aec6",
            "f21763bb56ac4a9ab632068d5aafbe2e",
            "1f1e9612c90c4e3fb405cc3b98a773fa",
            "077ee1a7621841b392c5a664fa4126a4",
            "71968afe517545aea1337991a3c1c8c5",
            "94a5782ea78249e39958ecbdad39a268",
            "ca80855d48b34274a0296a4e5bb6f8b6",
            "3bdf6cd3f5834a9e92a5f64cb36e9342",
            "0f7f578b308746c8994e1ea3fa547361",
            "6afd7f194bd54bf39fba575374cdda9f",
            "69f35b94f4994274843b580a8414dcc1",
            "b472ad13a08f42dcb4118d8a0383029b",
            "bb6837e660934c7bb94ff461aacb50da",
            "d59fea04742643d4a0a3523a9d1752f4",
            "c49b3071de944a8795b4219346866788",
            "27e1ade2be624295a10d3438369cbf98",
            "7c2c0659392a4ffbbfcc62b5d57cbd5a",
            "1dc4651e499846f7a6a350559d1f9782"
          ]
        },
        "id": "4-FqcdKHmVpa",
        "outputId": "4f438e60-06f8-4e83-a5ea-a629150e849f"
      },
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Downloading data files:   0%|          | 0/1 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "1ee0ce4fc44e4babaf72c13a0ded94d5"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Downloading data:   0%|          | 0.00/153M [00:00<?, ?B/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "752b03730d9a445082ca76f52851ff81"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Extracting data files:   0%|          | 0/1 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "b4de7634e7614ec6aed8efb0ac43b02d"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "Generating train split: 0 examples [00:00, ? examples/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "3bdf6cd3f5834a9e92a5f64cb36e9342"
            }
          },
          "metadata": {}
        },
        {
          "output_type": "execute_result",
          "data": {
            "text/plain": [
              "Dataset({\n",
              "    features: ['doi', 'chunk-id', 'chunk', 'id', 'title', 'summary', 'source', 'authors', 'categories', 'comment', 'journal_ref', 'primary_category', 'published', 'updated', 'references'],\n",
              "    num_rows: 41584\n",
              "})"
            ]
          },
          "metadata": {},
          "execution_count": 2
        }
      ],
      "source": [
        "from datasets import load_dataset\n",
        "\n",
        "data = load_dataset(\"jamescalam/ai-arxiv-chunked\", split=\"train\")\n",
        "data"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "gp5a_bInyfdX"
      },
      "source": [
        "First we define our embedding function."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 5,
      "metadata": {
        "id": "oG6zd1dLw54w",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "f7997427-c3b2-4a22-8f4d-75e34507cef8"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "Using cuda\n"
          ]
        }
      ],
      "source": [
        "import torch\n",
        "from torch.nn.functional import normalize\n",
        "from transformers import AutoModel, AutoTokenizer\n",
        "\n",
        "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
        "print(f\"Using {device}\")\n",
        "\n",
        "model_id = \"jinaai/jina-embeddings-v2-base-en\"\n",
        "\n",
        "# initialize tokenizer and model\n",
        "model = AutoModel.from_pretrained(\n",
        "    model_id,\n",
        "    trust_remote_code=True\n",
        ").to(device)\n",
        "model.eval()\n",
        "\n",
        "def embed(docs: list[str]) -> list[list[float]]:\n",
        "    with torch.no_grad():\n",
        "        # process with model for token-level embeddings\n",
        "        doc_embeds = model.encode(docs)\n",
        "    return doc_embeds"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "1nvrNQSGXvEC"
      },
      "source": [
        "Use this to build a Numpy array of embedding vectors."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 6,
      "metadata": {
        "id": "EdyWVR17zX7I",
        "colab": {
          "base_uri": "https://localhost:8080/",
          "height": 49,
          "referenced_widgets": [
            "b93cebf496074edd96a8127b7de07751",
            "29f17960d10e495c8308025faa9537ec",
            "a676e162e28140dbb90faa102a985a1b",
            "2ef014dff1c145fcbf1f41d2415c5c9c",
            "c11448deed51443c8a6244489a547045",
            "f9ddb0794db04ec79229036d9fb5e882",
            "c3c5dbe036f24f6796751a9c4fab0a31",
            "31f6270d583c4bc693c8d07f65ad946f",
            "f4e8dc97132041b88ef981146490e905",
            "e60d18a842bb4d67b0f7862bf7ddebe1",
            "d831b201f1f84bdd83ba1453eed03ecd"
          ]
        },
        "outputId": "39adcbcb-c8bc-438a-c313-1ecf0c7110fe"
      },
      "outputs": [
        {
          "output_type": "display_data",
          "data": {
            "text/plain": [
              "  0%|          | 0/163 [00:00<?, ?it/s]"
            ],
            "application/vnd.jupyter.widget-view+json": {
              "version_major": 2,
              "version_minor": 0,
              "model_id": "b93cebf496074edd96a8127b7de07751"
            }
          },
          "metadata": {}
        }
      ],
      "source": [
        "from tqdm.auto import tqdm\n",
        "import numpy as np\n",
        "\n",
        "chunks = data[\"chunk\"]\n",
        "batch_size = 256\n",
        "\n",
        "for i in tqdm(range(0, len(chunks), batch_size)):\n",
        "    i_end = min(len(chunks), i+batch_size)\n",
        "    chunk_batch = chunks[i:i_end]\n",
        "    # embed current batch\n",
        "    embed_batch = embed(chunk_batch)\n",
        "    # add to existing np array if exists (otherwise create)\n",
        "    if i == 0:\n",
        "        arr = embed_batch.copy()\n",
        "    else:\n",
        "        arr = np.concatenate([arr, embed_batch.copy()])"
      ]
    },
    {
      "cell_type": "markdown",
      "metadata": {
        "id": "Bl9g3ePt029u"
      },
      "source": [
        "Now we need to create the query mechanism, this is simply a cosine similarity calculation between a query vector and our `arr` vectors."
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 7,
      "metadata": {
        "id": "MR7WyDiatlsX"
      },
      "outputs": [],
      "source": [
        "from numpy.linalg import norm\n",
        "\n",
        "# convert chunks list to array for easy indexing\n",
        "chunk_arr = np.array(chunks)\n",
        "\n",
        "def query(text: str, top_k: int=3) -> list[str]:\n",
        "    # create query embedding\n",
        "    xq = embed([text])[0]\n",
        "    # calculate cosine similarities\n",
        "    sim = np.dot(arr, xq.T) / (norm(arr, axis=1)*norm(xq.T))\n",
        "    # get indices of top_k records\n",
        "    idx = np.argpartition(sim, -top_k)[-top_k:]\n",
        "    docs = chunk_arr[idx]\n",
        "    for d in docs.tolist():\n",
        "        print(d)\n",
        "        print(\"----------\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 8,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "u2NjYxsn7J5f",
        "outputId": "f5fe1460-2362-4693-b66f-e03777e13fb4"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "\u0003Equal contribution. Correspondence: {htouvron,\n",
            "thibautlav,gizacard,egrave,glample}@meta.com\n",
            "1https://github.com/facebookresearch/llamaperformance, a smaller one trained longer will\n",
            "ultimately be cheaper at inference. For instance,\n",
            "although Hoffmann et al. (2022) recommends\n",
            "training a 10B model on 200B tokens, we \ufb01nd\n",
            "that the performance of a 7B model continues to\n",
            "improve even after 1T tokens.\n",
            "The focus of this work is to train a series of\n",
            "language models that achieve the best possible performance at various inference budgets, by training\n",
            "on more tokens than what is typically used. The\n",
            "resulting models, called LLaMA , ranges from 7B\n",
            "to 65B parameters with competitive performance\n",
            "compared to the best existing LLMs. For instance,\n",
            "LLaMA-13B outperforms GPT-3 on most benchmarks, despite being 10 \u0002smaller. We believe that\n",
            "this model will help democratize the access and\n",
            "study of LLMs, since it can be run on a single GPU.\n",
            "At the higher-end of the scale, our 65B-parameter\n",
            "model is also competitive with the best large language models such as Chinchilla or PaLM-540B.\n",
            "Unlike Chinchilla, PaLM, or GPT-3, we only\n",
            "----------\n",
            "asChatGPT,BARD,andClaude. TheseclosedproductLLMsareheavily\ufb01ne-tunedtoalignwithhuman\n",
            "preferences, which greatly enhances their usability and safety. This step can require signi\ufb01cant costs in\n",
            "computeandhumanannotation,andisoftennottransparentoreasilyreproducible,limitingprogresswithin\n",
            "the community to advance AI alignment research.\n",
            "In this work, we develop and release Llama 2, a family of pretrained and \ufb01ne-tuned LLMs, L/l.sc/a.sc/m.sc/a.sc /two.taboldstyle and\n",
            "L/l.sc/a.sc/m.sc/a.sc /two.taboldstyle-C/h.sc/a.sc/t.sc , at scales up to 70B parameters. On the series of helpfulness and safety benchmarks we tested,\n",
            "L/l.sc/a.sc/m.sc/a.sc /two.taboldstyle-C/h.sc/a.sc/t.sc models generally perform better than existing open-source models. They also appear to\n",
            "be on par with some of the closed-source models, at least on the human evaluations we performed (see\n",
            "----------\n",
            "based on the LLaMA architecture and trained on a vast range of topics up until\n",
            "September 2021. My primary function is to assist users in answering questions,\n",
            "generating text, and engaging in conversation. Please feel free to ask me\n",
            "anything, and I\u2019ll do my best to help you.\n",
            "### User\n",
            "[User Query]\n",
            "### Dromedary\n",
            "[Dromedary Response]\n",
            "36\n",
            "D Prompts for Verbose Cloning\n",
            "In the Verbose Cloning stage, the teacher model (i.e., the principle engraved model) is prompted with\n",
            "the following text to facilitate the generation of extensive, comprehensive, and detailed responses.\n",
            "# Dromedary\n",
            "## System Overview\n",
            "Consider an AI assistant whose codename is Dromedary, developed by the Self-Align\n",
            "team. Dromedary is trained on data up until Sept-2021, and it endeavors to be a\n",
            "helpful, ethical and reliable assistant.\n",
            "\"Dromedary (extensive)\" is an advanced language model that specializes in generating\n",
            "extensive, comprehensive, and detailed responses to user queries. By precisely\n",
            "discerning user intent and employing sophisticated natural language processing\n",
            "skills, it is able to delve deep into complex topics and provide longer, wellorganized paragraphs. This thorough approach ensures that users receive a\n",
            "complete understanding of the subject matter, resulting in a highly informative\n",
            "and gratifying experience.\n",
            "----------\n"
          ]
        }
      ],
      "source": [
        "query(\"why should I use llama 2?\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": 9,
      "metadata": {
        "id": "ZSIBTMUy7qc0",
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "outputId": "5046ecfc-b62c-403a-e667-38ab765d71b1"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "by red teams allow organizations to improve security and sys tem integrity before and during deployment.\n",
            "Knowledge that a lab has a red team can potentially improve th e trustworthiness of an organization with\n",
            "respect to their safety and security claims, at least to the e xtent that effective red teaming practices exist\n",
            "and are demonstrably employed.\n",
            "As indicated by the number of cases in which AI systems cause o r threaten to cause harm, developers of an\n",
            "AI system often fail to anticipate the potential risks assoc iated with technical systems they develop. These\n",
            "risks include both inadvertent failures and deliberate mis use. Those not involved in the development\n",
            "of a particular system may be able to more easily adopt and pra ctice an attacker\u2019s skillset. A growing\n",
            "number of industry labs have dedicated red teams, although b est practices for such efforts are generally\n",
            "in their early stages.24There is a need for experimentation both within and across or ganizations in order\n",
            "to move red teaming in AI forward, especially since few AI dev elopers have expertise in relevant areas\n",
            "such as threat modeling and adversarial machine learning [44].\n",
            "AI systems and infrastructure vary substantially in terms o f their properties and risks, making in-house\n",
            "red-teaming expertise valuable for organizations with suf \ufb01cient resources. However, it would also be\n",
            "----------\n",
            "including limitations and risks that might be exploited by m alicious actors. Further, existing\n",
            "red teaming approaches are insuf\ufb01cient for addressing thes e concerns in the AI context.\n",
            "In order for AI developers to make veri\ufb01able claims about the ir AI systems being safe or secure, they need\n",
            "processes for surfacing and addressing potential safety an d security risks. Practices such as red teaming\n",
            "exercises help organizations to discover their own limitat ions and vulnerabilities as well as those of the\n",
            "AI systems they develop, and to approach them holistically , in a way that takes into account the larger\n",
            "environment in which they are operating.23\n",
            "A red team exercise is a structured effort to \ufb01nd \ufb02aws and vuln erabilities in a plan, organization, or\n",
            "technical system, often performed by dedicated \"red teams\" that seek to adopt an attacker\u2019s mindset\n",
            "and methods. In domains such as computer security , red teams are routinely tasked with emulating\n",
            "attackers in order to \ufb01nd \ufb02aws and vulnerabilities in organi zations and their systems. Discoveries made\n",
            "by red teams allow organizations to improve security and sys tem integrity before and during deployment.\n",
            "Knowledge that a lab has a red team can potentially improve th e trustworthiness of an organization with\n",
            "----------\n",
            "process on the LLaMA-65b base language model [ 45]. This section delves into the details employed\n",
            "for the creation of the Dromedary model. The additional experimental details of Dromedary such as\n",
            "training and decoding hyper-parameters can be found in Appendix J.\n",
            "We \ufb01rst followed the Alpaca \u2019s recipe [ 43], employing Self-Instruct to produce 267,597 open-domain\n",
            "prompts along with their corresponding inputs. Additionally, we utilized Topic-Guided Red-Teaming\n",
            "Self-Instruct to generate 99,121 prompts speci\ufb01cally tailored to 20 red-teaming instruction types.\n",
            "After applying the Principle-Driven Self-Alignment process and \ufb01ltering out low-quality responses,\n",
            "we obtained 191,628 query-response pairs derived from Self-Instruct and 67,250 query-response pairs\n",
            "from Topic-Guided Red-Teaming Self-Instruct, resulting in a total of 258,878 query-response pairs.\n",
            "Figure 4 presents a detailed analysis of the principles applied and the instruction types encompassed\n",
            "in the Topic-Guided Red-Teaming (TGRT) approach. We observed that the instructions generated by\n",
            "the original Self-Instruct and TGRT Self-Instruct appear to evoke distinct principles. For instance,\n",
            "----------\n"
          ]
        }
      ],
      "source": [
        "query(\"can you tell me about red teaming for llama 2?\")"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "query(\"what is the best llm?\")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "2VUqwF-com3z",
        "outputId": "91078bf1-45ba-4760-a034-7da60832f543"
      },
      "execution_count": 10,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "et al. (2022), in which a LLM is trained and refined\n",
            "on its own output iteratively. Specifically, with CoT\n",
            "prompting, the model first generates initial rationales. And then, the model is finetuned on rationales that lead to correct answers. This process can\n",
            "be repeated, with each iteration resulting in an improved model that can generate better training data,\n",
            "which in turn leads to further improvements. As a\n",
            "follow-up to this work, Huang et al. (2022a) show\n",
            "that LLMs are able to self-improve their reasoning\n",
            "abilities without the need for supervised data by\n",
            "leveraging the self-consistency of reasoning (Wang\n",
            "et al., 2022c).\n",
            "4 Measuring Reasoning in Large\n",
            "Language Models\n",
            "We summarize methods and benchmarks for evaluating reasoning abilities of LLMs in this section.\n",
            "4.1 End Task Performance\n",
            "One way to measure reasoning abilities of LLMs is\n",
            "to report their performance, e.g., accuracy, on end\n",
            "tasks that require reasoning. We list some common\n",
            "benchmarks as follows.\n",
            "Arithmetic Reasoning. Arithmetic reasoning is\n",
            "the ability to understand and apply mathematical concepts and principles in order to solve\n",
            "problems involving arithmetic operations. This\n",
            "involves using logical thinking and mathematical principles to determine the correct course\n",
            "of action when solving mathematical problems.\n",
            "----------\n",
            "\u0003Equal contribution. Correspondence: {htouvron,\n",
            "thibautlav,gizacard,egrave,glample}@meta.com\n",
            "1https://github.com/facebookresearch/llamaperformance, a smaller one trained longer will\n",
            "ultimately be cheaper at inference. For instance,\n",
            "although Hoffmann et al. (2022) recommends\n",
            "training a 10B model on 200B tokens, we \ufb01nd\n",
            "that the performance of a 7B model continues to\n",
            "improve even after 1T tokens.\n",
            "The focus of this work is to train a series of\n",
            "language models that achieve the best possible performance at various inference budgets, by training\n",
            "on more tokens than what is typically used. The\n",
            "resulting models, called LLaMA , ranges from 7B\n",
            "to 65B parameters with competitive performance\n",
            "compared to the best existing LLMs. For instance,\n",
            "LLaMA-13B outperforms GPT-3 on most benchmarks, despite being 10 \u0002smaller. We believe that\n",
            "this model will help democratize the access and\n",
            "study of LLMs, since it can be run on a single GPU.\n",
            "At the higher-end of the scale, our 65B-parameter\n",
            "model is also competitive with the best large language models such as Chinchilla or PaLM-540B.\n",
            "Unlike Chinchilla, PaLM, or GPT-3, we only\n",
            "----------\n",
            "inclusive LLM research. GLM-130B\u2019s technical and engineering undertakings generate insight into\n",
            "LLMs\u2019 architectures, pre-training objectives, training stability and ef\ufb01ciency, and affordable inference. Altogether, it contributes to the high quality of GLM-130B in terms of both language performance on 112 tasks and ethical results on bias and toxicity benchmarks. Our experiences of both\n",
            "success and failure are condensed into the following lessons learned for training 100B-scale LLMs:\n",
            "10\n",
            "Technical Report 2022-10-06 (v1)\n",
            "Lesson 1 (Bidirectional Architecture).\n",
            "The bidirectional-attention GLM is a strong architecture alternative, in addition to GPTs.\n",
            "Lesson 2 (Platform-aware Con\ufb01guration).\n",
            "Con\ufb01gure LLMs based on the cluster and parallel strategy used to squeeze hardware potential.\n",
            "Lesson 3 (Improved Post-LN).\n",
            "Counter-stereotypically, DeepNorm, a type of Post-LN, is the option to stabilize GLM-130B.\n",
            "Lesson 4 (Training Stability Categorization).\n",
            "Unexpected training instability that LLMs suffer from arouses systematically and numerically.\n",
            "Lesson 5 (Systematical Instability: FP16).\n",
            "Though FP16 induces more instability, it enables training and inference on diverse platforms.\n",
            "----------\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "query(\"what is the difference between gpt-4 and llama 2?\")"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "sF8xvtyaoom4",
        "outputId": "5f9579b4-2154-473b-fa1a-dcf0a9ab8305"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "-0.043\n",
            "-0.009+0.0132-0.004 +0.0562\n",
            "+0.0387-0.012\n",
            "-0.076Alpaca: 0.39 LLaMA-GPT4: 0.34 GPT4: 0.37Figure 6: ROUGE-L on unnatural instructions evaluated with 9K samples. The instructions are\n",
            "grouped into four subsets based on the ground-truth response length. The mean values are reported in\n",
            "the legend. The difference with GPT-4 is reported on the bar per group. LLaMA-GPT4 is a closer\n",
            "proxy to GPT-4 than Alpaca.\n",
            "closely follow the behavior of GPT-4. When the sequence length is short, both LLaMA-GPT4 and\n",
            "GPT-4 can generate responses that contains the simple ground truth answers, but add extra words to\n",
            "make the response more chat-like, which probably leads to lower ROUGE-L scores.\n",
            "5 R ELATED WORK\n",
            "Instruction Tuning. Instruction tuning of LLMs is an increasingly popular research direction in\n",
            "NLP (Zhong et al., 2021; Ouyang et al., 2022; Wei et al., 2021). Existing works aim to improve\n",
            "the quality and scale of three factors in the development pipeline, including instruction-following\n",
            "----------\n",
            "(ii)For GPT-4 results alone, the translated responses show superior performance over the generated\n",
            "response in Chinese, probably because GPT-4 is trained in richer English corpus than Chinese, which\n",
            "leads to stronger English instruction-following ability. In Figure 5 (c), we show results for all models\n",
            "who are asked to answer in Chinese.\n",
            "We compare LLaMA-GPT4 with GPT-4 and Alpaca unnatural instructions in Figure 6. In terms of the\n",
            "average ROUGE-L scores, Alpaca outperforms the other two models. We note that LLaMA-GPT4 and\n",
            "GPT4 is gradually performing better when the ground truth response length is increasing, eventually\n",
            "showing higher performance when the length is longer than 4. This means that they can better follow\n",
            "instructions when the scenarios are more creative. Across different subsets, LLaMA-GPT4 can\n",
            "7\n",
            "0-2 3-5 6-10 10>\n",
            "Groundtruth Response Length0.30.40.5RougeL\n",
            "-0.043\n",
            "-0.009+0.0132-0.004 +0.0562\n",
            "+0.0387-0.012\n",
            "----------\n",
            "31.39%LLaMA-GPT4 \n",
            " 25.99%\n",
            "Tie \n",
            " 42.61%\n",
            "HonestyAlpaca \n",
            "  25.43%LLaMA-GPT4 \n",
            " 16.48%\n",
            "Tie \n",
            " 58.10%\n",
            "Harmlessness(a) LLaMA-GPT4 vs Alpaca ( i.e.,LLaMA-GPT3 )\n",
            " GPT4 \n",
            "  44.11%\n",
            "LLaMA-GPT4 \n",
            " 42.78% Tie \n",
            " 13.11%\n",
            "Helpfulness GPT4 \n",
            "  37.48%\n",
            "LLaMA-GPT4 \n",
            " 37.88% Tie \n",
            " 24.64%\n",
            "Honesty GPT4 \n",
            "  35.36% LLaMA-GPT4 \n",
            " 31.66% Tie \n",
            " 32.98%\n",
            "Harmlessness\n",
            "(b) LLaMA-GPT4 vs GPT-4\n",
            "Figure 3: Human evaluation.\n",
            "4.2 H UMAN EVALUATION WITH ALIGNMENT CRITERIA\n",
            "To evaluate the alignment quality of our instruction-tuned LLMs, we follow alignment criteria from\n",
            "Anthropic Askell et al. (2021): an assistant is aligned if it is helpful, honest, and harmless (HHH).\n",
            "----------\n"
          ]
        }
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "---"
      ],
      "metadata": {
        "id": "d2UP-QJzo610"
      }
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "machine_shape": "hm",
      "provenance": [],
      "gpuType": "V100"
    },
    "kernelspec": {
      "display_name": "Python 3",
      "name": "python3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}