{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
"\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "-zE1h0uQV7uT"
},
"source": [
"# Install Packages and Setup Variables\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "QPJzr-I9XQ7l",
"outputId": "19864102-680b-446b-fb38-7fad066cee09"
},
"outputs": [],
"source": [
"!pip install -q llama-index==0.10.57 openai==1.37.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-embeddings-cohere llama-index-readers-web cohere==5.6.2 tiktoken==0.7.0 chromadb==0.5.5 html2text sentence_transformers pydantic llama-index-vector-stores-chroma==0.1.10 kaleido==0.2.1 llama-index-llms-gemini==0.1.11"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "riuXwpSPcvWC"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"# Set the following API Keys in the Python environment. Will be used later.\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
"os.environ[\"GOOGLE_API_KEY\"] = \"\""
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "jIEeZzqLbz0J"
},
"outputs": [],
"source": [
"# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
"\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Bkgi2OrYzF7q"
},
"source": [
"# Load a Model\n"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"id": "9oGT6crooSSj"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"from llama_index.llms.gemini import Gemini\n",
"\n",
"llm = Gemini(model=\"models/gemini-1.5-flash\", temperature=1, max_tokens=512)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0BwVuJXlzHVL"
},
"source": [
"# Create a VectoreStore\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"id": "SQP87lHczHKc"
},
"outputs": [],
"source": [
"import chromadb\n",
"\n",
"# create client and a new collection\n",
"# chromadb.EphemeralClient saves data in-memory.\n",
"chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
"chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"id": "zAaGcYMJzHAN"
},
"outputs": [],
"source": [
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
"\n",
"# Define a storage context object using the created vector database.\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "I9JbAzFcjkpn"
},
"source": [
"# Load the Dataset (CSV)\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ceveDuYdWCYk"
},
"source": [
"## Download\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "eZwf6pv7WFmD"
},
"source": [
"The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string.\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wl_pbPvMlv1h",
"outputId": "5418de57-b95b-4b90-b7d0-a801ea3c73f7"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 169k 100 169k 0 0 784k 0 --:--:-- --:--:-- --:--:-- 785k\n"
]
}
],
"source": [
"!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "VWBLtDbUWJfA"
},
"source": [
"## Read File\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "0Q9sxuW0g3Gd",
"outputId": "801f2ba8-b498-4923-c1cc-c17d3208850c"
},
"outputs": [
{
"data": {
"text/plain": [
"14"
]
},
"execution_count": 7,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import csv\n",
"\n",
"rows = []\n",
"\n",
"# Load the file as a JSON\n",
"with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
" csv_reader = csv.reader(file)\n",
"\n",
" for idx, row in enumerate(csv_reader):\n",
" if idx == 0:\n",
" continue\n",
" # Skip header row\n",
" rows.append(row)\n",
"\n",
"# The number of characters in the dataset.\n",
"len(rows)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "S17g2RYOjmf2"
},
"source": [
"# Convert to Document obj\n"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"id": "YizvmXPejkJE"
},
"outputs": [],
"source": [
"from llama_index.core import Document\n",
"\n",
"# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
"documents = [\n",
" Document(\n",
" text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}\n",
" )\n",
" for row in rows\n",
"]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "qjuLbmFuWsyl"
},
"source": [
"# Transforming\n"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"id": "9z3t70DGWsjO"
},
"outputs": [],
"source": [
"from llama_index.core.text_splitter import TokenTextSplitter\n",
"\n",
"# Define the splitter object that split the text into segments with 512 tokens,\n",
"# with a 128 overlap between the segments.\n",
"text_splitter = TokenTextSplitter(separator=\" \", chunk_size=512, chunk_overlap=128)"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 331,
"referenced_widgets": [
"3fbabd8a8660461ba5e7bc08ef39139a",
"df2365556ae242a2ab1a119f9a31a561",
"5f4b9d32df8f446e858e4c289dc282f9",
"5b588f83a15d42d9aca888e06bbd95ff",
"ad073bca655540809e39f26538d2ec0d",
"13b9c5395bca4c3ba21265240cb936cf",
"47a4586384274577a726c57605e7f8d9",
"96a3bdece738481db57e811ccb74a974",
"5c7973afd79349ed997a69120d0629b2",
"af9b6ae927dd4764b9692507791bc67e",
"134210510d49476e959dd7d032bbdbdc",
"5f9bb065c2b74d2e8ded32e1306a7807",
"73a06bc546a64f7f99a9e4a135319dcd",
"ce48deaf4d8c49cdae92bfdbb3a78df0",
"4a172e8c6aa44e41a42fc1d9cf714fd0",
"0245f2604e4d49c8bd0210302746c47b",
"e956dfab55084a9cbe33c8e331b511e7",
"cb394578badd43a89850873ad2526542",
"193aef33d9184055bb9223f56d456de6",
"abfc9aa911ce4a5ea81c7c451f08295f",
"e7937a1bc68441a080374911a6563376",
"e532ed7bfef34f67b5fcacd9534eb789"
]
},
"id": "P9LDJ7o-Wsc-",
"outputId": "01070c1f-dffa-4ab7-ad71-b07b76b12e03"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Parsing nodes: 0%| | 0/14 [00:00, ?it/s]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Parsing nodes: 100%|██████████| 14/14 [00:00<00:00, 28.48it/s]\n",
"100%|██████████| 108/108 [00:59<00:00, 1.82it/s]\n",
"100%|██████████| 108/108 [01:46<00:00, 1.02it/s]\n",
"100%|██████████| 108/108 [00:28<00:00, 3.75it/s]\n",
"Generating embeddings: 100%|██████████| 108/108 [00:01<00:00, 59.76it/s]\n"
]
}
],
"source": [
"from llama_index.core.extractors import (\n",
" SummaryExtractor,\n",
" QuestionsAnsweredExtractor,\n",
" KeywordExtractor,\n",
")\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.core.ingestion import IngestionPipeline\n",
"\n",
"# Create the pipeline to apply the transformation on each chunk,\n",
"# and store the transformed text in the chroma vector store.\n",
"pipeline = IngestionPipeline(\n",
" transformations=[\n",
" text_splitter,\n",
" QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
" SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
" KeywordExtractor(keywords=10, llm=llm),\n",
" OpenAIEmbedding(),\n",
" ],\n",
" vector_store=vector_store,\n",
")\n",
"\n",
"nodes = pipeline.run(documents=documents, show_progress=True)"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "mPGa85hM2P3P",
"outputId": "c106c463-2459-4b11-bbae-5bd5e2246011"
},
"outputs": [
{
"data": {
"text/plain": [
"108"
]
},
"execution_count": 11,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len(nodes)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"id": "23x20bL3_jRb"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"updating: mini-llama-articles/ (stored 0%)\n",
"updating: mini-llama-articles/chroma.sqlite3 (deflated 65%)\n",
" adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/ (stored 0%)\n",
" adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/data_level0.bin (deflated 100%)\n",
" adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/length.bin (deflated 63%)\n",
" adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/link_lists.bin (stored 0%)\n",
" adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/header.bin (deflated 61%)\n"
]
}
],
"source": [
"# Compress the vector store directory to a zip file to be able to download and use later.\n",
"!zip -r vectorstore.zip mini-llama-articles"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "OWaT6rL7ksp8"
},
"source": [
"# Load Indexes\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "BLkmv3Yxp9mu"
},
"source": [
"If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage.\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "SodY2Xpf_kxg",
"outputId": "a6f7ae4a-447c-4222-e400-0fe55e7e26d9"
},
"outputs": [],
"source": [
"# !unzip vectorstore.zip"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"id": "mXi56KTXk2sp"
},
"outputs": [],
"source": [
"import chromadb\n",
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
"\n",
"# Load the vector store from the local storage.\n",
"db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
"chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"id": "jKXURvLtkuTS"
},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex\n",
"\n",
"# Create the index based on the vector store.\n",
"vector_index = VectorStoreIndex.from_vector_store(vector_store)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "q0m5rl195bcz"
},
"source": [
"# Disply result\n"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"id": "4JpaHEmF5dSS"
},
"outputs": [],
"source": [
"# A simple function to show the response and the sources.\n",
"def display_res(response):\n",
" print(\"Response:\\n\\t\", response.response.replace(\"\\n\", \"\"))\n",
"\n",
" print(\"Sources:\")\n",
" if response.source_nodes:\n",
" for src in response.source_nodes:\n",
" print(\"\\tNode ID\\t\", src.node_id)\n",
" print(\"\\tText\\t\", src.text)\n",
" print(\"\\tScore\\t\", src.score)\n",
" print(\"\\t\" + \"-_\" * 20)\n",
" else:\n",
" print(\"\\tNo sources used!\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "hbStjvUJ1cft"
},
"source": [
"# Chat Engine\n"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"id": "kwWlDpoR1cRI"
},
"outputs": [],
"source": [
"# define the chat_engine by using the index\n",
"chat_engine = vector_index.as_chat_engine(llm=llm) # chat_mode=\"best\""
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "ER3Lb-oN46lJ",
"outputId": "8b34da39-622f-43f2-cb45-01a1ff37efd7"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Response:\n",
"\t The LLaMA2 model has four different model sizes with varying parameters: 7 billion, 13 billion, 34 billion, and 70 billion parameters.\n",
"Sources:\n",
"\tNode ID\t c3239b40-e206-4a80-b020-eea87cf471cc\n",
"\tText\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
"\tScore\t 0.7031083612095066\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"\tNode ID\t bc123b3d-b031-4c09-9400-d60ba9a161d6\n",
"\tText\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
"\tScore\t 0.7004323686791223\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"# First Question:\n",
"response = chat_engine.chat(\n",
" \"Use the tool to answer, How many parameters LLaMA2 model has?\"\n",
")\n",
"display_res(response)"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "3RRmiJEQ5R1Q",
"outputId": "15efcc9b-583f-4efe-8e36-fa8b5160da16"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Response:\n",
"\t Why did the scarecrow win an award? Because he was outstanding in his field!\n",
"Sources:\n",
"\tNode ID\t 8685e48d-1fdb-4f55-8f62-6f2ea4cfaf5d\n",
"\tText\t with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering strong competition to closed-source models. V. Ghost Attention: Enhancing Conversational Continuity One unique feature in Llama 2 is Ghost Attention, which ensures continuity in conversations. This means that even after multiple interactions, the model remembers its initial instructions, ensuring more coherent and consistent responses throughout the conversation. This feature significantly enhances the user experience and makes Llama 2 a more reliable language model for interactive applications. In the example below, on the left, it forgets to use an emoji after a few conversations. On the right, with Ghost Attention, even after having many conversations, it will remember the context and continue to use emojis in its response. VI. Temporal Capability: A Leap in Information Organization Meta reported a groundbreaking temporal capability, where the model organizes information based on time relevance. Each question posed to the model is associated with a date, and it responds accordingly by considering the event date before which the question becomes irrelevant. For example, if you ask the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete\n",
"\tScore\t 0.5624851990178006\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"\tNode ID\t e03eb322-1360-4c76-b461-236ec8312de1\n",
"\tText\t Introduce GPT4All GPT4All is a large language model (LLM) chatbot developed by Nomic AI, the world's first information cartography company. It was fine-tuned from LLaMA 7B model, the leaked large language model from Meta (aka Facebook). GPT4All is trained on a massive dataset of text and code, and it can generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way. GPT4All is available to the public on GitHub. LLaMA is available for commercial use under the GPL-3.0 license - while the LLaMA code is available for commercial use, the WEIGHTS are not. This effectively puts it in the same license class as GPT4All. Nomic is working on a GPT-J-based version of GPT4All with an open commercial license. GPT4All is not going to have a subscription fee ever. GPT4All is Free4All. Although GPT4All is still in its early stages, it has already left a notable mark on the AI landscape. Its popularity and capabilities are expected to expand further in the future. How to Run GPT4All Locally GPT4All Readme provides some details about its usage. Here will briefly demonstrate to run GPT4All locally on M1 CPU Mac. Download gpt4all-lora-quantized.bin from the-eye.Clone this repository, navigate to chat, and place the downloaded file there. Simply run the following command for M1 Mac: Now, it's ready to run locally. Please see a few snapshots below: Similar to ChatGPT, GPT4All has the ability to comprehend Chinese, a feature that Bard lacks. If you want to interact with GPT4All programmatically, you can install the nomic client as follows. Install the nomic client using pip install nomic.Use the following Python script to interact with GPT4All: Chat4All Demystified GPT4All aims to provide a cost-effective and fine-tuned model for high-quality LLM results. The GPT4All model was fine-tuned using an instance of LLaMA\n",
"\tScore\t 0.5615408071202241\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"# Second Question:\n",
"response = chat_engine.chat(\"Tell me a joke?\")\n",
"display_res(response)"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "8eOzp5Xc5Vbj",
"outputId": "13bc6714-dd89-45b3-a86b-759806245241"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Response:\n",
"\t The first question you asked was \"How many parameters LLaMA2 model has?\"\n",
"Sources:\n",
"\tNo sources used!\n"
]
}
],
"source": [
"# Third Question: (check if it can recall previous interactions)\n",
"response = chat_engine.chat(\"What was the first question I asked?\")\n",
"display_res(response)"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"id": "7jfiLpru5VZT"
},
"outputs": [],
"source": [
"# Reset the session to clear the memory\n",
"chat_engine.reset()"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Jt0q8RW25VXN",
"outputId": "0e2d0d4e-c0ff-48bf-8df3-478fcdc66abd"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Response:\n",
"\t The first question you asked was \"How can a Q&A bot be built over private documents using OpenAI and LangChain?\"\n",
"Sources:\n",
"\tNode ID\t baa8a99c-f38b-4818-b854-5741598c0776\n",
"\tText\t Private data to be used The example provided can be used with any dataset. I am using a data set that has Analyst recommendations from various stocks. For the purpose of demonstration, I have gathered publicly available analyst recommendations to showcase its capabilities. You can replace this with your own information to try this. Below is a partial extract of the information commonly found in these documents. If you wish to try it yourself, you can download analyst recommendations for your preferred stocks from online sources or access them through subscription platforms like Barron's. Although the example provided focuses on analyst recommendations, the underlying structure can be utilized to query various other types of documents in any industry as well. I have assembled such data for a few stocks for demonstration purposes. This includes Google, Microsoft, Meta, and Tesla. To facilitate easy access and updating of analysts' recommendations, all the recommendations can be organized into a designated folder. Each stock corresponds to a separate file within this folder. For example, if there are recommendations for 20 stocks, there will be 20 individual files. This organization enables convenient updating of information for each stock as new recommendations arrive, streamlining the process of managing and maintaining the most up-to-date data for each stock. Questions this Q&A bot application can answer The data we have for this application is stock market analyst recommendations for many stocks. Let's say you are looking for insight about Microsoft stock. You can ask any of the following questions as an example: What is the median target price for Microsoft (MSFT)?What is the highest price estimate for Microsoft (MSFT)?What is the lowest price estimate for Microsoft (MSFT)?How much percentage increase is expected in the stock price of Microsoft (MSFT)?How many analysts provided price forecasts for Microsoft (MSFT)?What is the current consensus among investment analysts regarding Microsoft (MSFT)?Has the consensus rating for Microsoft (MSFT) changed recently?When was the consensus rating last updated for Microsoft (MSFT)?Is the current recommendation for Microsoft (MSFT) to buy, sell, or hold the stock?Are there any recent analyst reports available for Microsoft (MSFT)? These questions cover various aspects of the stock analysis, including price forecasts, analyst recommendations, and recent changes in ratings. The\n",
"\tScore\t 0.5990934490336279\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"\tNode ID\t d03de2fa-70aa-4b32-8760-3af0dd0ebb24\n",
"\tText\t you want to specify exact documents, you can do it the following way. To load the files you want to ingest, you can specify the path to each file individually. The loaded files can then be saved into a list. This list serves as the input that is sent to the vector database to store the data. The alternative approach is a more versatile method in which we can load all pertinent documents from a designated folder and store the file locations in a list for subsequent processing. This approach offers flexibility and allows for the efficient handling of multiple documents by capturing their locations in a centralized list, enabling seamless data retrieval and analysis. Load the documents into the vector store. When dealing with a vast number of documents, it becomes inefficient to send all documents (analyst recommendations) to your large language model (LLM) when seeking answers to specific questions. For instance, if your question pertains to MSFT, it would be more cost-effective to only send document extracts that reference MSFT to your LLM for answering the question. This approach helps optimize resource utilization. To achieve this, all documents are split into chunks and stored in a vector database in a numeric format (embeddings). When a new question is posed, the system queries the vector database for relevant text chunks related to this question, which is then shared with the LLM to generate an appropriate response. Within the LangChain framework, the VectorstoreIndexCreator class serves as a utility for creating a vector store index. This index stores vector representations of the documents (in chromadb), enabling various text operations, such as finding similar documents based on a specific question. When a user asks a question, a similarity search is performed in the vector store to get document chunks relevant to the question. The question, along with the chunks are sent to OpenAI to get the response back. Now we are ready to query these documents. Setting up the web application The application is presented in the browser using Streamlit, providing a user-friendly interface. Within the application, a text box is available for users to enter their questions. Upon submitting the question by pressing enter, the application processes the input and generates a corresponding response. This response is then displayed below the text box, allowing users to conveniently view the relevant\n",
"\tScore\t 0.5904441993661576\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"# Fourth Question: (don't recall the previous interactions.)\n",
"response = chat_engine.chat(\"What was the first question I asked?\")\n",
"display_res(response)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0Egsib7yPJGR"
},
"source": [
"# Streaming\n"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "zanJeMbaPJcq",
"outputId": "de7f0905-c1b1-49ac-fb66-d1578da35cad"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Here is a paragraph about the LLaMA2 model's capabilities:\n",
"\n",
"\"The Llama 2 model showcases impressive capabilities in the realm of open-source language models. It introduces innovative features like Ghost Attention, which enhances conversational continuity by ensuring consistent responses throughout interactions. Additionally, Llama 2 boasts a groundbreaking temporal capability that organizes information based on time relevance, leading to more contextually accurate responses. Despite facing challenges in coding and math problems compared to larger models like Chat GPT 4, Llama 2 demonstrates efficiency and potential in the market, competing well with both open-source and closed-source models. Its ability to balance helpfulness and safety in optimizing responses further solidifies its position as a reliable and advanced language model for commercial use.\""
]
}
],
"source": [
"# Stream the words as soon as they are available instead of waiting for the model to finish generation.\n",
"streaming_response = chat_engine.stream_chat(\n",
" \"Write a paragraph about the LLaMA2 model's capabilities.\"\n",
")\n",
"for token in streaming_response.response_gen:\n",
" print(token, end=\"\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "DuRgOJ2AHMJh"
},
"source": [
"## Condense Question\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Yb2Lt41jq145"
},
"source": [
"Enhance the input prompt by looking at the previous chat history along with the present question. The refined prompt can then be used to fetch the nodes.\n"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"id": "v0gmM5LGIaRl"
},
"outputs": [],
"source": [
"# Define GPT-4 model that will be used by the chat_engine to improve the query.\n",
"gpt4 = OpenAI(temperature=0.9, model=\"gpt-4o\")"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {
"id": "EDWsaBTBIhK7"
},
"outputs": [],
"source": [
"chat_engine = vector_index.as_chat_engine(\n",
" chat_mode=\"condense_question\", llm=gpt4, verbose=True\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "h4c--hJ75VU2",
"outputId": "e80fd9bf-e6d5-4532-8771-8cbf781e782e"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Querying with: Using the tool at your disposal, can you please determine which company released the LLaMA2 model and explain what specific functionality or purpose this model is known for?\n",
"Response:\n",
"\t The LLaMA2 model was released by Meta. The model is known for its temporal awareness feature which enhances the accuracy of its responses by delivering more contextually accurate responses based on time relevance. For example, for the question, \"How long ago did Barack Obama become president?\", it only considers information relevant after 2008. Meta's open-sourcing of LLaMA2 provides developers and researchers with commercial access to the advanced language model, which represents a significant shift in the AI industry.\n",
"Sources:\n",
"\tNode ID\t 7adec56f-6714-4376-8ebf-180b694c4d59\n",
"\tText\t LLaMA: Meta's new AI tool According to the official release, LLaMA is a foundational language model developed to assist 'researchers and academics' in their work (as opposed to the average web user) to understand and study these NLP models. Leveraging AI in such a way could give researchers an edge in terms of time spent. You may not know this, but this would be Meta's third LLM after Blender Bot 3 and Galactica. However, the two LLMs were shut down soon, and Meta stopped their further development, as it produced erroneous results. Before moving further, it is important to emphasize that LLaMA is NOT a chatbot like ChatGPT. As I mentioned before, it is a 'research tool' for researchers. We can expect the initial versions of LLaMA to be a bit more technical and indirect to use as opposed to the case with ChatGPT, which was very direct, interactive, and a lot easy to use. \"Smaller, more performant models such as LLaMA enable ... research community who don't have access to large amounts of infrastructure to study these models.. further democratizing access in this important, fast-changing field,\" said Meta in its official blog. Meta's effort of \"democratizing\" access to the public could shed light on one of the critical issues of Generative AI - toxicity and bias. ChatGPT and other LLMs (obviously, I am referring to Bing) have a track record of responding in a way that is toxic and, well... evil. The Verge and major critics have covered it in much detail. Oh and the community did get the access, but not in the way Meta anticipated. On March 3rd, a downloadable torrent of the LLaMA system was posted on 4chan. 4chan is an anonymous online forum known for its controversial content and diverse range of discussions, which has nearly 222 million unique monthly visitors. LLaMA is currently not in use on any of Meta's products. But Meta has plans to make it available to researchers before they can use them in their own products. It's worth mentioning that Meta did not release\n",
"\tScore\t 0.696738130166742\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"\tNode ID\t d19bbfc9-9ff8-4c21-ba5a-ef78e5db2d87\n",
"\tText\t the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete with Llama 2 or join hands with the open-source community to make the open-source models better? Meanwhile, Microsoft's move to host Llama 2 on Azure despite having significant investment in ChatGPT raises interesting questions. Will users prefer the capabilities and transparency of an open-source model like Llama 2 over closed, proprietary options? The stakes are high, as Meta's bold democratization play stands to reshape preferences and partnerships in the AI space. One thing is certain - the era of open language model competition has begun. VIII. Conclusion With the launch of Llama 2, Meta has achieved a landmark breakthrough in open-source language models, unleashing new potential through its commercial accessibility. Llama 2's formidable capabilities in natural language processing, along with robust safety protocols and temporal reasoning, set new benchmarks for the field. While select limitations around math and coding exist presently, Llama 2's strengths far outweigh its weaknesses. As Meta continues honing Llama technology, this latest innovation promises to be truly transformative. By open-sourcing such an advanced model, Meta is propelling democratization and proliferation of AI across industries. From healthcare to education and beyond, Llama 2 stands to shape the landscape by putting groundbreaking language modeling into the hands of all developers and researchers. The possibilities unlocked by this open-source approach signal a shift towards a more collaborative, creative AI future.\n",
"\tScore\t 0.692383316770113\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"response = chat_engine.chat(\n",
" \"Use the tool to answer, which company released LLaMA2 model? What is the model useful for?\"\n",
")\n",
"display_res(response)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ysL9ONePOsGB"
},
"source": [
"## REACT\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "KiEFmxAtrmF-"
},
"source": [
"ReAct is an agent-based chat mode that uses a loop to decide on querying a data engine during interactions, offering flexibility but relying on the Large Language Model's quality for effective responses, requiring careful management to avoid inaccurate answers.\n"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"id": "-M1jWoKXOs2t"
},
"outputs": [],
"source": [
"chat_engine = vector_index.as_chat_engine(chat_mode=\"react\", verbose=True, llm=llm)"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "UZkEW1SSOs0H",
"outputId": "4869c5fc-e0e1-44c6-e7f0-87db92bb2eb6"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Added user message to memory: Which company released LLaMA2 model? What is the model useful for?\n",
"=== Calling Function ===\n",
"Calling function: query_engine_tool with args: {\"input\": \"Which company released LLaMA2 model?\"}\n",
"Got output: Meta released the LLaMA2 model.\n",
"========================\n",
"\n",
"=== Calling Function ===\n",
"Calling function: query_engine_tool with args: {\"input\": \"What is the LLaMA2 model useful for?\"}\n",
"Got output: The Llama 2 model is useful for businesses to integrate into products to create AI-powered applications.\n",
"========================\n",
"\n"
]
}
],
"source": [
"response = chat_engine.chat(\n",
" \"Which company released LLaMA2 model? What is the model useful for?\"\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "eW5P1lD4Osxf",
"outputId": "b128bc94-081b-49aa-c549-7d7d7be90b63"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Response:\n",
"\t The LLaMA2 model was released by Meta. It is useful for businesses to integrate into products to create AI-powered applications.\n",
"Sources:\n",
"\tNode ID\t 7adec56f-6714-4376-8ebf-180b694c4d59\n",
"\tText\t LLaMA: Meta's new AI tool According to the official release, LLaMA is a foundational language model developed to assist 'researchers and academics' in their work (as opposed to the average web user) to understand and study these NLP models. Leveraging AI in such a way could give researchers an edge in terms of time spent. You may not know this, but this would be Meta's third LLM after Blender Bot 3 and Galactica. However, the two LLMs were shut down soon, and Meta stopped their further development, as it produced erroneous results. Before moving further, it is important to emphasize that LLaMA is NOT a chatbot like ChatGPT. As I mentioned before, it is a 'research tool' for researchers. We can expect the initial versions of LLaMA to be a bit more technical and indirect to use as opposed to the case with ChatGPT, which was very direct, interactive, and a lot easy to use. \"Smaller, more performant models such as LLaMA enable ... research community who don't have access to large amounts of infrastructure to study these models.. further democratizing access in this important, fast-changing field,\" said Meta in its official blog. Meta's effort of \"democratizing\" access to the public could shed light on one of the critical issues of Generative AI - toxicity and bias. ChatGPT and other LLMs (obviously, I am referring to Bing) have a track record of responding in a way that is toxic and, well... evil. The Verge and major critics have covered it in much detail. Oh and the community did get the access, but not in the way Meta anticipated. On March 3rd, a downloadable torrent of the LLaMA system was posted on 4chan. 4chan is an anonymous online forum known for its controversial content and diverse range of discussions, which has nearly 222 million unique monthly visitors. LLaMA is currently not in use on any of Meta's products. But Meta has plans to make it available to researchers before they can use them in their own products. It's worth mentioning that Meta did not release\n",
"\tScore\t 0.6701682333186606\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"\tNode ID\t d19bbfc9-9ff8-4c21-ba5a-ef78e5db2d87\n",
"\tText\t the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete with Llama 2 or join hands with the open-source community to make the open-source models better? Meanwhile, Microsoft's move to host Llama 2 on Azure despite having significant investment in ChatGPT raises interesting questions. Will users prefer the capabilities and transparency of an open-source model like Llama 2 over closed, proprietary options? The stakes are high, as Meta's bold democratization play stands to reshape preferences and partnerships in the AI space. One thing is certain - the era of open language model competition has begun. VIII. Conclusion With the launch of Llama 2, Meta has achieved a landmark breakthrough in open-source language models, unleashing new potential through its commercial accessibility. Llama 2's formidable capabilities in natural language processing, along with robust safety protocols and temporal reasoning, set new benchmarks for the field. While select limitations around math and coding exist presently, Llama 2's strengths far outweigh its weaknesses. As Meta continues honing Llama technology, this latest innovation promises to be truly transformative. By open-sourcing such an advanced model, Meta is propelling democratization and proliferation of AI across industries. From healthcare to education and beyond, Llama 2 stands to shape the landscape by putting groundbreaking language modeling into the hands of all developers and researchers. The possibilities unlocked by this open-source approach signal a shift towards a more collaborative, creative AI future.\n",
"\tScore\t 0.6696485090138802\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"\tNode ID\t bc123b3d-b031-4c09-9400-d60ba9a161d6\n",
"\tText\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
"\tScore\t 0.7141285410107295\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"\tNode ID\t c3239b40-e206-4a80-b020-eea87cf471cc\n",
"\tText\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
"\tScore\t 0.7116485926146265\n",
"\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"display_res(response)"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "zf6r2AmFOsca"
},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"authorship_tag": "ABX9TyNlfE1aMk+m6avCgDavT2ZF",
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"0245f2604e4d49c8bd0210302746c47b": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"134210510d49476e959dd7d032bbdbdc": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"13b9c5395bca4c3ba21265240cb936cf": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"193aef33d9184055bb9223f56d456de6": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3fbabd8a8660461ba5e7bc08ef39139a": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_df2365556ae242a2ab1a119f9a31a561",
"IPY_MODEL_5f4b9d32df8f446e858e4c289dc282f9",
"IPY_MODEL_5b588f83a15d42d9aca888e06bbd95ff"
],
"layout": "IPY_MODEL_ad073bca655540809e39f26538d2ec0d"
}
},
"47a4586384274577a726c57605e7f8d9": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"4a172e8c6aa44e41a42fc1d9cf714fd0": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_e7937a1bc68441a080374911a6563376",
"placeholder": "",
"style": "IPY_MODEL_e532ed7bfef34f67b5fcacd9534eb789",
"value": " 108/108 [00:03<00:00, 33.70it/s]"
}
},
"5b588f83a15d42d9aca888e06bbd95ff": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_af9b6ae927dd4764b9692507791bc67e",
"placeholder": "",
"style": "IPY_MODEL_134210510d49476e959dd7d032bbdbdc",
"value": " 14/14 [00:00<00:00, 21.41it/s]"
}
},
"5c7973afd79349ed997a69120d0629b2": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"5f4b9d32df8f446e858e4c289dc282f9": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_96a3bdece738481db57e811ccb74a974",
"max": 14,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_5c7973afd79349ed997a69120d0629b2",
"value": 14
}
},
"5f9bb065c2b74d2e8ded32e1306a7807": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_73a06bc546a64f7f99a9e4a135319dcd",
"IPY_MODEL_ce48deaf4d8c49cdae92bfdbb3a78df0",
"IPY_MODEL_4a172e8c6aa44e41a42fc1d9cf714fd0"
],
"layout": "IPY_MODEL_0245f2604e4d49c8bd0210302746c47b"
}
},
"73a06bc546a64f7f99a9e4a135319dcd": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_e956dfab55084a9cbe33c8e331b511e7",
"placeholder": "",
"style": "IPY_MODEL_cb394578badd43a89850873ad2526542",
"value": "Generating embeddings: 100%"
}
},
"96a3bdece738481db57e811ccb74a974": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"abfc9aa911ce4a5ea81c7c451f08295f": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"ad073bca655540809e39f26538d2ec0d": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"af9b6ae927dd4764b9692507791bc67e": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"cb394578badd43a89850873ad2526542": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"ce48deaf4d8c49cdae92bfdbb3a78df0": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_193aef33d9184055bb9223f56d456de6",
"max": 108,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_abfc9aa911ce4a5ea81c7c451f08295f",
"value": 108
}
},
"df2365556ae242a2ab1a119f9a31a561": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_13b9c5395bca4c3ba21265240cb936cf",
"placeholder": "",
"style": "IPY_MODEL_47a4586384274577a726c57605e7f8d9",
"value": "Parsing nodes: 100%"
}
},
"e532ed7bfef34f67b5fcacd9534eb789": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"e7937a1bc68441a080374911a6563376": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e956dfab55084a9cbe33c8e331b511e7": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}