{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"colab_type": "text",
"id": "view-in-github"
},
"source": [
""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "-zE1h0uQV7uT"
},
"source": [
"# Install Packages and Setup Variables"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "QPJzr-I9XQ7l",
"outputId": "3115889a-14ee-457c-c0d5-271c1053a1e9"
},
"outputs": [],
"source": [
"!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-readers-web tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "riuXwpSPcvWC"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\""
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "jIEeZzqLbz0J"
},
"outputs": [],
"source": [
"# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
"\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "Bkgi2OrYzF7q"
},
"source": [
"# Load a Model"
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"id": "9oGT6crooSSj"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
" from .autonotebook import tqdm as notebook_tqdm\n"
]
}
],
"source": [
"from llama_index.llms.openai import OpenAI\n",
"\n",
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0BwVuJXlzHVL"
},
"source": [
"# Create a VectoreStore"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"id": "SQP87lHczHKc"
},
"outputs": [],
"source": [
"import chromadb\n",
"\n",
"# create client and a new collection\n",
"# chromadb.EphemeralClient saves data in-memory.\n",
"chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
"chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"id": "zAaGcYMJzHAN"
},
"outputs": [],
"source": [
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
"\n",
"# Define a storage context object using the created vector database.\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "I9JbAzFcjkpn"
},
"source": [
"# Load the Dataset (CSV)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "ceveDuYdWCYk"
},
"source": [
"## Download"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "eZwf6pv7WFmD"
},
"source": [
"The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "wl_pbPvMlv1h",
"outputId": "24342259-24f0-44fa-bd0d-21da798d0555"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 169k 100 169k 0 0 864k 0 --:--:-- --:--:-- --:--:-- 865k\n"
]
}
],
"source": [
"!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "VWBLtDbUWJfA"
},
"source": [
"## Read File"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "0Q9sxuW0g3Gd",
"outputId": "889c1127-cf04-4ce7-d99c-d60826ffe92f"
},
"outputs": [
{
"data": {
"text/plain": [
"14"
]
},
"execution_count": 8,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"import csv\n",
"\n",
"rows = []\n",
"\n",
"# Load the file as a JSON\n",
"with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
" csv_reader = csv.reader(file)\n",
"\n",
" for idx, row in enumerate( csv_reader ):\n",
" if idx == 0: continue; # Skip header row\n",
" rows.append( row )\n",
"\n",
"# The number of characters in the dataset.\n",
"len( rows )"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "S17g2RYOjmf2"
},
"source": [
"# Convert to Document obj"
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"id": "YizvmXPejkJE"
},
"outputs": [],
"source": [
"from llama_index.core import Document\n",
"\n",
"# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
"documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "qjuLbmFuWsyl"
},
"source": [
"# Transforming"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"id": "9z3t70DGWsjO"
},
"outputs": [],
"source": [
"from llama_index.core.text_splitter import TokenTextSplitter\n",
"\n",
"# Define the splitter object that split the text into segments with 512 tokens,\n",
"# with a 128 overlap between the segments.\n",
"text_splitter = TokenTextSplitter(\n",
" separator=\" \", chunk_size=512, chunk_overlap=128\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 331,
"referenced_widgets": [
"3fbabd8a8660461ba5e7bc08ef39139a",
"df2365556ae242a2ab1a119f9a31a561",
"5f4b9d32df8f446e858e4c289dc282f9",
"5b588f83a15d42d9aca888e06bbd95ff",
"ad073bca655540809e39f26538d2ec0d",
"13b9c5395bca4c3ba21265240cb936cf",
"47a4586384274577a726c57605e7f8d9",
"96a3bdece738481db57e811ccb74a974",
"5c7973afd79349ed997a69120d0629b2",
"af9b6ae927dd4764b9692507791bc67e",
"134210510d49476e959dd7d032bbdbdc",
"5f9bb065c2b74d2e8ded32e1306a7807",
"73a06bc546a64f7f99a9e4a135319dcd",
"ce48deaf4d8c49cdae92bfdbb3a78df0",
"4a172e8c6aa44e41a42fc1d9cf714fd0",
"0245f2604e4d49c8bd0210302746c47b",
"e956dfab55084a9cbe33c8e331b511e7",
"cb394578badd43a89850873ad2526542",
"193aef33d9184055bb9223f56d456de6",
"abfc9aa911ce4a5ea81c7c451f08295f",
"e7937a1bc68441a080374911a6563376",
"e532ed7bfef34f67b5fcacd9534eb789"
]
},
"id": "P9LDJ7o-Wsc-",
"outputId": "01070c1f-dffa-4ab7-ad71-b07b76b12e03"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Parsing nodes: 0%| | 0/14 [00:00, ?it/s]"
]
},
{
"name": "stderr",
"output_type": "stream",
"text": [
"Parsing nodes: 100%|██████████| 14/14 [00:00<00:00, 27.40it/s]\n",
"100%|██████████| 108/108 [00:59<00:00, 1.81it/s]\n",
"100%|██████████| 108/108 [01:08<00:00, 1.58it/s]\n",
"100%|██████████| 108/108 [00:27<00:00, 3.88it/s]\n",
"Generating embeddings: 100%|██████████| 108/108 [00:01<00:00, 77.68it/s]\n"
]
}
],
"source": [
"from llama_index.core.extractors import (\n",
" SummaryExtractor,\n",
" QuestionsAnsweredExtractor,\n",
" KeywordExtractor,\n",
")\n",
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.core.ingestion import IngestionPipeline\n",
"\n",
"# Create the pipeline to apply the transformation on each chunk,\n",
"# and store the transformed text in the chroma vector store.\n",
"pipeline = IngestionPipeline(\n",
" transformations=[\n",
" text_splitter,\n",
" QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
" SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
" KeywordExtractor(keywords=10, llm=llm),\n",
" OpenAIEmbedding(),\n",
" ],\n",
" vector_store=vector_store\n",
")\n",
"\n",
"# Run the transformation pipeline.\n",
"nodes = pipeline.run(documents=documents, show_progress=True);"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "mPGa85hM2P3P",
"outputId": "c106c463-2459-4b11-bbae-5bd5e2246011"
},
"outputs": [
{
"data": {
"text/plain": [
"108"
]
},
"execution_count": 13,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len( nodes )"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"id": "23x20bL3_jRb"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"updating: mini-llama-articles/ (stored 0%)\n",
"updating: mini-llama-articles/chroma.sqlite3 (deflated 65%)\n",
" adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/ (stored 0%)\n",
" adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/data_level0.bin (deflated 97%)\n",
" adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/length.bin (deflated 23%)\n",
" adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/link_lists.bin (stored 0%)\n",
" adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/header.bin (deflated 61%)\n"
]
}
],
"source": [
"# Compress the vector store directory to a zip file to be able to download and use later.\n",
"!zip -r vectorstore.zip mini-llama-articles"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "OWaT6rL7ksp8"
},
"source": [
"# Load Indexes"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "d7mY7AdLjs4F"
},
"source": [
"If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "SodY2Xpf_kxg",
"outputId": "701258b4-ea35-46d1-df33-536a45752a28"
},
"outputs": [],
"source": [
"# !unzip vectorstore.zip"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"id": "mXi56KTXk2sp"
},
"outputs": [],
"source": [
"import chromadb\n",
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
"\n",
"# Load the vector store from the local storage.\n",
"db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
"chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"id": "jKXURvLtkuTS"
},
"outputs": [],
"source": [
"from llama_index.core import VectorStoreIndex\n",
"\n",
"# Create the index based on the vector store.\n",
"vector_index = VectorStoreIndex.from_vector_store(vector_store)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "XjIQGo11j5N-"
},
"source": [
"# Retrieving All the Nodes"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "RZBPFntrj8tp"
},
"source": [
"To develop a custom retriever with keyword index, we require access to all nodes. We use the index as a retriever and requesting it to fetch a large number of documents, we can ensure that the retriever returns every document stored in the vector store. (This method serves as a temporary solution because LlamaIndex currently lacks the capability to fetch all documents from a chromadb. However, this limitation may be addressed in future updates.)"
]
},
{
"cell_type": "code",
"execution_count": 18,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "Za6m06wpcJpN",
"outputId": "98806ea5-5c2d-4a87-97ea-ee37a890c7bf"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"Number of requested results 100000000 is greater than number of elements in index 108, updating n_results = 108\n"
]
}
],
"source": [
"# Set similarity_top_k to a large number to retrieve all the nodes\n",
"retriever = vector_index.as_retriever(similarity_top_k=100000000)\n",
"\n",
"# Retrieve all nodes\n",
"all_nodes = retriever.retrieve('Hello!')"
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"id": "2Tz_n2MLj62B"
},
"outputs": [],
"source": [
"all_nodes = [item.node for item in all_nodes]"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "mquOgF8UnXZi",
"outputId": "cd41e132-237e-4e4f-bb35-464dba9307ba"
},
"outputs": [
{
"data": {
"text/plain": [
"108"
]
},
"execution_count": 20,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"len( all_nodes )"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"id": "hcmwBAsCZIwR"
},
"outputs": [],
"source": [
"from llama_index.core import SimpleKeywordTableIndex\n",
"\n",
"# Define the KeyworddTableIndex using all the nodes.\n",
"keyword_index = SimpleKeywordTableIndex(nodes=all_nodes)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "K3wtAa7Lo2Vh"
},
"source": [
"# Custom Retriever"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"id": "txPFNOkUo2Kj"
},
"outputs": [],
"source": [
"from llama_index.core import QueryBundle\n",
"from llama_index.core.schema import NodeWithScore\n",
"from llama_index.core.retrievers import (\n",
" BaseRetriever,\n",
" VectorIndexRetriever,\n",
" KeywordTableSimpleRetriever,\n",
")\n",
"from typing import List\n",
"\n",
"# The custom retriever that can use both vector index and keyword index to retrieve documents.\n",
"# It has two modes: \"AND\" meaning it uses nodes that are retrieved in both indexes.\n",
"# \"OR\" meaning that it merges the retrieved nodes.\n",
"class CustomRetriever(BaseRetriever):\n",
" \"\"\"Custom retriever that performs both semantic search and hybrid search.\"\"\"\n",
"\n",
" def __init__(\n",
" self,\n",
" vector_retriever: VectorIndexRetriever,\n",
" keyword_retriever: KeywordTableSimpleRetriever,\n",
" mode: str = \"AND\",\n",
" ) -> None:\n",
" \"\"\"Init params.\"\"\"\n",
"\n",
" self._vector_retriever = vector_retriever\n",
" self._keyword_retriever = keyword_retriever\n",
" if mode not in (\"AND\", \"OR\"):\n",
" raise ValueError(\"Invalid mode.\")\n",
" self._mode = mode\n",
" super().__init__()\n",
"\n",
" def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:\n",
" \"\"\"Retrieve nodes given query.\"\"\"\n",
"\n",
" vector_nodes = self._vector_retriever.retrieve(query_bundle)\n",
" keyword_nodes = self._keyword_retriever.retrieve(query_bundle)\n",
"\n",
" vector_ids = {n.node.node_id for n in vector_nodes}\n",
" keyword_ids = {n.node.node_id for n in keyword_nodes}\n",
"\n",
" combined_dict = {n.node.node_id: n for n in vector_nodes}\n",
" combined_dict.update({n.node.node_id: n for n in keyword_nodes})\n",
"\n",
" if self._mode == \"AND\":\n",
" retrieve_ids = vector_ids.intersection(keyword_ids)\n",
" else:\n",
" retrieve_ids = vector_ids.union(keyword_ids)\n",
"\n",
" retrieve_nodes = [combined_dict[rid] for rid in retrieve_ids]\n",
"\n",
" return retrieve_nodes"
]
},
{
"cell_type": "code",
"execution_count": 23,
"metadata": {
"id": "YWLckX40pii-"
},
"outputs": [],
"source": [
"from llama_index.core import get_response_synthesizer\n",
"from llama_index.core.query_engine import RetrieverQueryEngine\n",
"\n",
"# define custom retriever\n",
"vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=2)\n",
"keyword_retriever = KeywordTableSimpleRetriever(index=keyword_index, max_keywords_per_query=2)\n",
"custom_retriever = CustomRetriever(vector_retriever, keyword_retriever, \"OR\")\n",
"\n",
"# define response synthesizer\n",
"response_synthesizer = get_response_synthesizer()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "8JPD8yAinVSq"
},
"source": [
"# Query Dataset"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"id": "b0gue7cyctt1"
},
"outputs": [],
"source": [
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
"# and using a LLM to formulate the final answer.\n",
"custom_query_engine = RetrieverQueryEngine(\n",
" retriever=custom_retriever,\n",
" response_synthesizer=response_synthesizer,\n",
")\n",
"\n",
"res = custom_query_engine.query(\"How many parameters LLaMA2 model has?\")"
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 35
},
"id": "VKK3jMprctre",
"outputId": "370a6a1a-133d-428f-80c7-28777f4349b3"
},
"outputs": [
{
"data": {
"text/plain": [
"'The LLaMA2 model has 52 billion parameters.'"
]
},
"execution_count": 25,
"metadata": {},
"output_type": "execute_result"
}
],
"source": [
"res.response"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "465dH4yQc7Ct",
"outputId": "8f43f543-40b1-4f63-a433-d59b33545774"
},
"outputs": [
{
"name": "stdout",
"output_type": "stream",
"text": [
"Node ID\t 322a5cb0-5b0c-413f-bc5e-e72747b385d1\n",
"Title\t Building Intuition on the Concepts behind LLMs like ChatGPT - Part 1- Neural Networks, Transformers, Pretraining, and Fine Tuning\n",
"Text\t backpropagation, the degree of the error of the model (the loss value) is propagated backward through the neural network. It computes the derivative to the output of each individual weight and bias i.e. how sensitive the output is to changes in each specific parameter. For my people who didn't take on differential calculus in school (such as myself), think of the model parameters (weights/biases) as adjustable knobs. These knobs are arbitrary - in the sense that you can't tell in what specific way it governs the prediction ability of the model. The knobs, which can be rotated clockwise or counterclockwise have different effects on the behavior of the output. Knob A might increase the loss 3x when turned clockwise, knob B reduces the loss by 1/8 when turned counterclockwise (and so on). All these knobs are checked (all billions of them) and to get information on how sensitive the output is to adjustments of each knob - this numerical value is their derivative with respect to the output. Calculating these derivatives is called backpropagation. The output of backpropagation is a vector (a list of numbers) whose elements or dimensions consist of the parameters' individual derivatives. This vector is the gradient of the error with respect to the existing parameter values (or the current learnings) of the neural network. A vector has two properties: length or magnitude and direction. The gradient vector contains information on the direction in which the error or loss is increasing. The magnitude of the vector signifies the steepness or rate of increase. Think of the gradient vector as the map of a foggy hill you're descending from - gradient descent optimization is using the information about direction and steepness from the gradient vector to reach the bottom of the hill (the minimum loss value) as efficiently as possible by navigating to the path with the greatest downward incline (the opposite direction of the gradient vector). This involves iteratively adjusting the values of the weights and biases of the network (by subtracting small values to it i.e. the learning rate) en masse to reach this optimal state. After these steps, the hope\n",
"Score\t None\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t f097d19f-45bd-402b-9547-5482f57110ea\n",
"Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
"Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
"Score\t 0.7156515131319103\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t 22cea8a0-aea7-4405-b7e1-a2cb02ff10e8\n",
"Title\t The Generative AI Revolution: Exploring the Current Landscape\n",
"Text\t Cloud announced its partnership with Cohere. The company intends to use Cloud's TPU for the development and deployment of its products, and Sagemaker by Amazon also gives access to Cohere's language AI. Cohere powers Hyperwrite, which helps in quickly generating articles. AWS has also announced a partnership with Cohere AI. To date, Cohere has raised $170 million, and with the ongoing rush of funding in AI platforms, the Canadian startup is expected to be valued at $6 billion. Cohere is set to introduce a new dialogue model to aid enterprise users in generating text while engaging with the model to fine-tune the output. Cohere's Xlarge model resembles ChatGPT but provides developers and businesses with access to this technology. Cohere's base model has 52 billion parameters compared to OpenAI's GPT-3 DaVinci model, which has 175B parameters. Cohere stresses on accuracy, speed, safety, cost, and ease of use for its users and has paid much attention to the product and its design, developing a cohesive model. 8. Anthropic AI's Claude Anthropic is an American AI startup and public benefit corporation founded in 2021 by Daniela Amodei and Dario Amodei, former members of OpenAI. The company specializes in developing AI systems and language models, with a particular focus on transformer architecture. Anthropic's research on the interpretability of machine learning systems covers fields ranging from natural language and interpretability to human feedback, scaling laws, reinforcement learning, and code generation, among others. The company stresses the application of responsible AI and presents itself as an AI safety and research company working towards building reliable, steerable, and interpretable AI systems. By 2022, Google had invested nearly $400 million in Anthropic, resulting in a formal partnership between the two companies and giving Google a 10% stake in Anthropic. Outside backing amounted to $580 million, with total investments in Anthropic exceeding $1 billion to date. Anthropic has developed a conversational large language model AI chatbot named Claude, which uses a messaging interface and a technique called constitutional AI to better align AI systems with human intentions. AnthropicLM v4-s3 is a\n",
"Score\t None\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t 603fb039-960c-4c3e-a98a-a65c57ab6761\n",
"Title\t Building Intuition on the Concepts behind LLMs like ChatGPT - Part 1- Neural Networks, Transformers, Pretraining, and Fine Tuning\n",
"Text\t published by OpenAI, to train better models, increasing the number of parameters is 3x more important than increasing the size of the training data. (Note: DeepMind has since published a paper with a differing view.) This translates to a significant increase in computational requirements, as handling a larger number of parameters demands more complex calculations. Parallelization, which is the process of dividing a single task into multiple sub-tasks that can be processed simultaneously across multiple compute resources, becomes essential in dealing with this problem. Parallelization is difficult to achieve with RNNs given their sequential nature. This is not an issue for transformers as it computes relationships between all elements in a sequence simultaneously, rather than sequentially. It also means that they work well with GPUs or video cards. Graphics rendering requires a large number of simple calculations happening concurrently. The numerous, small, and efficient processing cores that a GPU has, which are designed for simultaneous operations, make it a good fit for tasks such as matrix and vector operations that are central to deep learning. AI going 'mainstream' and the mad scramble to build larger and better models is a boon to GPU manufacturers. NVIDIA- specifically - whose stock price has grown 200% YTD as of this writing, has made them the highest-performing stock this year and pushed their market cap to USD 1 trillion. They join megacaps like Apple, Google, Microsoft, and Amazon in this exclusive club. The Transformer is a decidedly complex topic and the explanation above wholesale left out important concepts in order to be more digestible to a broader audience. If you want to know more, I found these gentle yet significantly more fleshed-out introductions to the topic: Jay Allamar's illustrated transformer, Lili Jiang's potion analogy, or if you want something more advanced - Karpathy's nanoGPT that babbles in Shakepear-ish. Fine-tuning 'chat' models like ChatGPT The output of pretrainings are base models or foundation models. Examples of recently released text-generation foundation models are GPT-4, Bard, LLaMa 1 & 2, and Claude 1\n",
"Score\t None\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t 56881e5c-1c47-48bd-be19-df7ada6ab593\n",
"Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
"Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
"Score\t 0.7009231750702649\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t 4aada7f3-39f9-4911-ae2a-fb57876ee4a4\n",
"Title\t Exploring Large Language Models -Part 3\n",
"Text\t concept with toy datasets. The real trouble is making the model 'understand' the data first and not just parrot it out. Without understanding, it will parrot out the answer based on the similarity of the question in the training set, or both the question and answer. To prevent this, the authors have an intermediate step called 'Recite' where the model is made to recite/output the relevant passages and, after that, output the answer. Just to be clear, there is no doubt now (2023), especially with GPT3/4, LLAMA2 and similar models about the feasibility of this use case, that a model can understand the question, has some ability for causal reasoning, and can generalize to learn a world model from its training data, and to use both to create a well-formed answer to the question. Let's see the difficulties one by one however, of training a large model. First is the importance of the model size. This GIF from the Google AI blog illustrates this beautifully. It is relatively easy and cost-efficient to train or fine-tune a small model with our custom data, as the GPU and infrastructure requirements are very less. On the contrary, it needs huge fleets of GPUs and training infrastructure to load very large language models and fine-tune them (without quantisation) in a distributed way (e.g. see libraries like DeepSpeed) LLMs come in various sizes, based on the number of trainable parameters or weights. The smaller ones, which have less than 1 billion parameters (GPT2 124 M, Bloom 560M, Flan-T5 783 M ) etc can be trained on a laptop GPU with 8 to 15 GB GPU RAM ) For quite some time, this is what I tried. I tried to overfit a small test data set on decoder models like GPP2-small, GPT-Medium, and Bloom and encoder-decoder models like Flan-T5, thinking somehow that the understanding we see in ChatGPT ( see- unsupervised learning Part 1) may come in some form if we train on these smaller models. ( less than one billion parameters). As per the paper, I tried both Causal training, where the model is presented with only previous tokens, and Masked\n",
"Score\t None\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"# Show the retrieved nodes\n",
"for src in res.source_nodes:\n",
" print(\"Node ID\\t\", src.node_id)\n",
" print(\"Title\\t\", src.metadata['title'])\n",
" print(\"Text\\t\", src.text)\n",
" print(\"Score\\t\", src.score)\n",
" print(\"-_\"*20)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "iMkpzH7vvb09"
},
"source": [
"# Evaluate"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"id": "H8a3eKgKvckU"
},
"outputs": [
{
"name": "stderr",
"output_type": "stream",
"text": [
"100%|██████████| 108/108 [06:17<00:00, 3.49s/it]\n"
]
}
],
"source": [
"from llama_index.core.evaluation import generate_question_context_pairs\n",
"from llama_index.llms.openai import OpenAI\n",
"\n",
"# Create questions for each segment. These questions will be used to\n",
"# assess whether the retriever can accurately identify and return the\n",
"# corresponding segment when queried.\n",
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes,\n",
" llm=llm,\n",
" num_questions_per_chunk=1\n",
")\n",
"\n",
"# We can save the evaluation dataset as a json file for later use.\n",
"rag_eval_dataset.save_json(\"./rag_eval_dataset.json\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0O7cLF_TlnZV"
},
"source": [
"If you have uploaded the generated question JSON file, please uncomment the code in the next cell block. This will avoid the need to generate the questions manually, saving you time and effort."
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "3sA1K84U254o"
},
"outputs": [],
"source": [
"# from llama_index.finetuning.embeddings.common import (\n",
"# EmbeddingQAFinetuneDataset,\n",
"# )\n",
"# rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
"# \"./rag_eval_dataset.json\"\n",
"# )"
]
},
{
"cell_type": "code",
"execution_count": 28,
"metadata": {
"id": "H7ubvcbk27vr"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"# A simple function to show the evaluation result.\n",
"def display_results_retriever(name, eval_results):\n",
" \"\"\"Display results from evaluate.\"\"\"\n",
"\n",
" metric_dicts = []\n",
" for eval_result in eval_results:\n",
" metric_dict = eval_result.metric_vals_dict\n",
" metric_dicts.append(metric_dict)\n",
"\n",
" full_df = pd.DataFrame(metric_dicts)\n",
"\n",
" hit_rate = full_df[\"hit_rate\"].mean()\n",
" mrr = full_df[\"mrr\"].mean()\n",
"\n",
" metric_df = pd.DataFrame(\n",
" {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
" )\n",
"\n",
" return metric_df"
]
},
{
"cell_type": "code",
"execution_count": 29,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 435
},
"id": "uNLxDxoc2-Ac",
"outputId": "93f03e7e-2590-46f0-fce0-3e8b29852a88"
},
"outputs": [
{
"ename": "ValidationError",
"evalue": "1 validation error for RetrieverEvaluator\nretriever\n instance of BaseRetriever expected (type=type_error.arbitrary_type; expected_arbitrary_type=BaseRetriever)",
"output_type": "error",
"traceback": [
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
"Cell \u001b[0;32mIn[29], line 11\u001b[0m\n\u001b[1;32m 6\u001b[0m custom_retriever \u001b[38;5;241m=\u001b[39m CustomRetriever(vector_retriever, keyword_retriever, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOR\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 7\u001b[0m custom_query_engine \u001b[38;5;241m=\u001b[39m RetrieverQueryEngine(\n\u001b[1;32m 8\u001b[0m retriever\u001b[38;5;241m=\u001b[39mcustom_retriever,\n\u001b[1;32m 9\u001b[0m response_synthesizer\u001b[38;5;241m=\u001b[39mresponse_synthesizer,\n\u001b[1;32m 10\u001b[0m )\n\u001b[0;32m---> 11\u001b[0m retriever_evaluator \u001b[38;5;241m=\u001b[39m \u001b[43mRetrieverEvaluator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_metric_names\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmrr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mhit_rate\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretriever\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcustom_query_engine\u001b[49m\n\u001b[1;32m 13\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 14\u001b[0m eval_results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m retriever_evaluator\u001b[38;5;241m.\u001b[39maevaluate_dataset(rag_eval_dataset)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28mprint\u001b[39m(display_results_retriever(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRetriever top_\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mi\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, eval_results))\n",
"File \u001b[0;32m~/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/llama_index/core/evaluation/retrieval/base.py:99\u001b[0m, in \u001b[0;36mBaseRetrievalEvaluator.from_metric_names\u001b[0;34m(cls, metric_names, **kwargs)\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Create evaluator from metric names.\u001b[39;00m\n\u001b[1;32m 92\u001b[0m \n\u001b[1;32m 93\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 96\u001b[0m \n\u001b[1;32m 97\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 98\u001b[0m metric_types \u001b[38;5;241m=\u001b[39m resolve_metrics(metric_names)\n\u001b[0;32m---> 99\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mmetrics\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43mmetric\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmetric\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmetric_types\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/llama_index/core/evaluation/retrieval/evaluator.py:45\u001b[0m, in \u001b[0;36mRetrieverEvaluator.__init__\u001b[0;34m(self, metrics, retriever, node_postprocessors, **kwargs)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 38\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 39\u001b[0m metrics: Sequence[BaseRetrievalMetric],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 43\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 44\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Init params.\"\"\"\u001b[39;00m\n\u001b[0;32m---> 45\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 46\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetrics\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetrics\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 47\u001b[0m \u001b[43m \u001b[49m\u001b[43mretriever\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretriever\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 48\u001b[0m \u001b[43m \u001b[49m\u001b[43mnode_postprocessors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnode_postprocessors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 49\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 50\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
"File \u001b[0;32m~/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for RetrieverEvaluator\nretriever\n instance of BaseRetriever expected (type=type_error.arbitrary_type; expected_arbitrary_type=BaseRetriever)"
]
}
],
"source": [
"from llama_index.core.evaluation import RetrieverEvaluator\n",
"\n",
"# We can evaluate the retievers with different top_k values.\n",
"for i in [2, 4, 6, 8, 10]:\n",
" vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=i)\n",
" custom_retriever = CustomRetriever(vector_retriever, keyword_retriever, \"OR\")\n",
" custom_query_engine = RetrieverQueryEngine(\n",
" retriever=custom_retriever,\n",
" response_synthesizer=response_synthesizer,\n",
" )\n",
" retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
" [\"mrr\", \"hit_rate\"], retriever=custom_query_engine\n",
" )\n",
" eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset)\n",
" print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
]
},
{
"cell_type": "code",
"execution_count": null,
"metadata": {
"id": "1MB1YD1E3EKM"
},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"authorship_tag": "ABX9TyO362/noWgs82KNvLAlRlkT",
"include_colab_link": true,
"provenance": []
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.11.8"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"0245f2604e4d49c8bd0210302746c47b": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"134210510d49476e959dd7d032bbdbdc": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"13b9c5395bca4c3ba21265240cb936cf": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"193aef33d9184055bb9223f56d456de6": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"3fbabd8a8660461ba5e7bc08ef39139a": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_df2365556ae242a2ab1a119f9a31a561",
"IPY_MODEL_5f4b9d32df8f446e858e4c289dc282f9",
"IPY_MODEL_5b588f83a15d42d9aca888e06bbd95ff"
],
"layout": "IPY_MODEL_ad073bca655540809e39f26538d2ec0d"
}
},
"47a4586384274577a726c57605e7f8d9": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"4a172e8c6aa44e41a42fc1d9cf714fd0": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_e7937a1bc68441a080374911a6563376",
"placeholder": "",
"style": "IPY_MODEL_e532ed7bfef34f67b5fcacd9534eb789",
"value": " 108/108 [00:03<00:00, 33.70it/s]"
}
},
"5b588f83a15d42d9aca888e06bbd95ff": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_af9b6ae927dd4764b9692507791bc67e",
"placeholder": "",
"style": "IPY_MODEL_134210510d49476e959dd7d032bbdbdc",
"value": " 14/14 [00:00<00:00, 21.41it/s]"
}
},
"5c7973afd79349ed997a69120d0629b2": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"5f4b9d32df8f446e858e4c289dc282f9": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_96a3bdece738481db57e811ccb74a974",
"max": 14,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_5c7973afd79349ed997a69120d0629b2",
"value": 14
}
},
"5f9bb065c2b74d2e8ded32e1306a7807": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HBoxModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_73a06bc546a64f7f99a9e4a135319dcd",
"IPY_MODEL_ce48deaf4d8c49cdae92bfdbb3a78df0",
"IPY_MODEL_4a172e8c6aa44e41a42fc1d9cf714fd0"
],
"layout": "IPY_MODEL_0245f2604e4d49c8bd0210302746c47b"
}
},
"73a06bc546a64f7f99a9e4a135319dcd": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_e956dfab55084a9cbe33c8e331b511e7",
"placeholder": "",
"style": "IPY_MODEL_cb394578badd43a89850873ad2526542",
"value": "Generating embeddings: 100%"
}
},
"96a3bdece738481db57e811ccb74a974": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"abfc9aa911ce4a5ea81c7c451f08295f": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "ProgressStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"ad073bca655540809e39f26538d2ec0d": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"af9b6ae927dd4764b9692507791bc67e": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"cb394578badd43a89850873ad2526542": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"ce48deaf4d8c49cdae92bfdbb3a78df0": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "FloatProgressModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_193aef33d9184055bb9223f56d456de6",
"max": 108,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_abfc9aa911ce4a5ea81c7c451f08295f",
"value": 108
}
},
"df2365556ae242a2ab1a119f9a31a561": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "HTMLModel",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_13b9c5395bca4c3ba21265240cb936cf",
"placeholder": "",
"style": "IPY_MODEL_47a4586384274577a726c57605e7f8d9",
"value": "Parsing nodes: 100%"
}
},
"e532ed7bfef34f67b5fcacd9534eb789": {
"model_module": "@jupyter-widgets/controls",
"model_module_version": "1.5.0",
"model_name": "DescriptionStyleModel",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"e7937a1bc68441a080374911a6563376": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"e956dfab55084a9cbe33c8e331b511e7": {
"model_module": "@jupyter-widgets/base",
"model_module_version": "1.2.0",
"model_name": "LayoutModel",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}