{
"cells": [
{
"cell_type": "markdown",
"metadata": {
"id": "view-in-github",
"colab_type": "text"
},
"source": [
""
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "5BGJ3fxhOk2V"
},
"source": [
"# Install Packages and Setup Variables\n"
]
},
{
"cell_type": "code",
"execution_count": 1,
"metadata": {
"id": "QPJzr-I9XQ7l",
"collapsed": true,
"outputId": "a68229ea-1d76-475b-9eb2-05dca0ef431e",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"\u001b[?25l \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m0.0/67.3 kB\u001b[0m \u001b[31m?\u001b[0m eta \u001b[36m-:--:--\u001b[0m\r\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.3/67.3 kB\u001b[0m \u001b[31m2.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
" Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
" Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.4/50.4 kB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m337.0/337.0 kB\u001b[0m \u001b[31m21.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.1/1.1 MB\u001b[0m \u001b[31m35.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m584.3/584.3 kB\u001b[0m \u001b[31m25.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m40.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.5/15.5 MB\u001b[0m \u001b[31m47.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m273.8/273.8 kB\u001b[0m \u001b[31m11.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m94.0/94.0 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m150.7/150.7 kB\u001b[0m \u001b[31m9.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m679.1/679.1 kB\u001b[0m \u001b[31m22.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.4/76.4 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m4.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.7/1.7 MB\u001b[0m \u001b[31m37.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.2/1.2 MB\u001b[0m \u001b[31m26.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.6/67.6 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m13.2/13.2 MB\u001b[0m \u001b[31m30.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m64.0/64.0 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m52.5/52.5 kB\u001b[0m \u001b[31m3.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m149.7/149.7 kB\u001b[0m \u001b[31m10.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m110.5/110.5 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m9.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m4.5/4.5 MB\u001b[0m \u001b[31m55.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m54.2/54.2 kB\u001b[0m \u001b[31m3.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m62.8/62.8 kB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m341.4/341.4 kB\u001b[0m \u001b[31m18.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m187.4/187.4 kB\u001b[0m \u001b[31m12.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m295.8/295.8 kB\u001b[0m \u001b[31m17.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.4/71.4 kB\u001b[0m \u001b[31m4.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m34.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m425.7/425.7 kB\u001b[0m \u001b[31m20.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m157.3/157.3 kB\u001b[0m \u001b[31m9.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.3/49.3 kB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
"\u001b[?25h Building wheel for pypika (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"
]
}
],
"source": [
"!pip install -q llama-index==0.10.57 openai==1.37.0 tiktoken==0.7.0 chromadb==0.5.5 llama-index-vector-stores-chroma==0.1.10 llama-index-llms-gemini==0.1.11"
]
},
{
"cell_type": "code",
"execution_count": 2,
"metadata": {
"id": "riuXwpSPcvWC"
},
"outputs": [],
"source": [
"import os\n",
"\n",
"# Set the following API Keys in the Python environment. Will be used later.\n",
"os.environ[\"OPENAI_API_KEY\"] = \"\"\n",
"os.environ[\"GOOGLE_API_KEY\"] = \"\""
]
},
{
"cell_type": "code",
"execution_count": 3,
"metadata": {
"id": "km-KQOrgr3VB"
},
"outputs": [],
"source": [
"# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
"\n",
"import nest_asyncio\n",
"\n",
"nest_asyncio.apply()"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "0BwVuJXlzHVL"
},
"source": [
"# Create a VectoreStore\n"
]
},
{
"cell_type": "code",
"execution_count": 4,
"metadata": {
"id": "SQP87lHczHKc"
},
"outputs": [],
"source": [
"import chromadb\n",
"\n",
"# create client and a new collection\n",
"# chromadb.EphemeralClient saves data in-memory.\n",
"chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
"chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
]
},
{
"cell_type": "code",
"execution_count": 5,
"metadata": {
"id": "zAaGcYMJzHAN"
},
"outputs": [],
"source": [
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
"\n",
"# Define a storage context object using the created vector database.\n",
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "I9JbAzFcjkpn"
},
"source": [
"# Load the Dataset (CSV)\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "_Tif8-JoRH68"
},
"source": [
"## Download\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "4fQaa1LN1mXL"
},
"source": [
"The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model.\n"
]
},
{
"cell_type": "code",
"execution_count": 6,
"metadata": {
"id": "fQtpDvUzKNzI",
"outputId": "811bbd6b-8f04-45a9-d3c9-b19128daf306",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
" Dload Upload Total Spent Left Speed\n",
"100 169k 100 169k 0 0 273k 0 --:--:-- --:--:-- --:--:-- 274k\n"
]
}
],
"source": [
"!curl -o ./mini-dataset.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "zk-4alIxROo8"
},
"source": [
"## Load the Articles\n"
]
},
{
"cell_type": "code",
"execution_count": 7,
"metadata": {
"id": "_WER5lt0N7c5",
"outputId": "7cf8a364-fe04-4957-aacd-42f6fb0386d5",
"colab": {
"base_uri": "https://localhost:8080/"
}
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"14"
]
},
"metadata": {},
"execution_count": 7
}
],
"source": [
"import csv\n",
"\n",
"rows = []\n",
"\n",
"# Load the file as a JSON\n",
"with open(\"./mini-dataset.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
" csv_reader = csv.reader(file)\n",
"\n",
" for idx, row in enumerate(csv_reader):\n",
" if idx == 0:\n",
" continue\n",
" # Skip header row\n",
" rows.append(row)\n",
"\n",
"# The number of characters in the dataset.\n",
"len(rows)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "wxEStggPdxYs"
},
"source": [
"# Convert to Document obj\n"
]
},
{
"cell_type": "code",
"execution_count": 8,
"metadata": {
"id": "lFvW_886dxKX"
},
"outputs": [],
"source": [
"from llama_index.core import Document\n",
"from llama_index.core.schema import TextNode\n",
"\n",
"# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
"documents = [\n",
" Document(\n",
" text=row[1],\n",
" metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]},\n",
" )\n",
" for row in rows\n",
"]\n",
"# By default, the node/chunks ids are set to random uuids. To ensure same id's per run, we manually set them.\n",
"for idx, doc in enumerate(documents):\n",
" doc.id_ = f\"doc_{idx}\""
]
},
{
"cell_type": "code",
"execution_count": 9,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"collapsed": true,
"id": "Njoc3XEVkKkf",
"outputId": "83f885cd-371f-4497-cb8c-65105e876585"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"Document(id_='doc_0', embedding=None, metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='LLM Variants and Meta\\'s Open Source Before shedding light on four major trends, I\\'d share the latest Meta\\'s Llama 2 and Code Llama. Meta\\'s Llama 2 represents a sophisticated evolution in LLMs. This suite spans models pretrained and fine-tuned across a parameter spectrum of 7 billion to 70 billion. A specialized derivative, Llama 2-Chat, has been engineered explicitly for dialogue-centric applications. Benchmarking revealed Llama 2\\'s superior performance over most extant open-source chat models. Human-centric evaluations, focusing on safety and utility metrics, positioned Llama 2-Chat as a potential contender against proprietary, closed-source counterparts. The development trajectory of Llama 2 emphasized rigorous fine-tuning methodologies. Meta\\'s transparent delineation of these processes aims to catalyze community-driven advancements in LLMs, underscoring a commitment to collaborative and responsible AI development. Code Llama is built on top of Llama 2 and is available in three models: Code Llama, the foundational code model;Codel Llama - Python specialized for Python;and Code Llama - Instruct, which is fine-tuned for understanding natural language instructions. Based on its benchmark testing, Code Llama outperformed state-of-the-art publicly available LLMs (except GPT-4) on code tasks. Llama 2, Llama 2-Chat, and Code Llama are key steps in LLM development but still have a way to go compared to GPT-4. Meta\\'s open access and commitment to improving these models promise transparent and faster LLM progress in the future. Please refer to the LLM and Llama variants below: From LLMs to Multimodal LLMs, like OpenAI\\'s ChatGPT (GPT-3.5), primarily focus on understanding and generating human language. They\\'ve been instrumental in tasks like text generation, translation, and even creative writing. However, their scope is limited to text. Enter multimodal models like GPT-4. These are a new breed of AI models that can understand and generate not just text, but also images, sounds, and potentially other types of data. The term \"multimodal\" refers to their ability to process multiple modes or types of data simultaneously. This is a game-changer. Imagine an AI that can not only read a description of a dress but also visualize it or even design it! Multimodal AI models are moving us towards more holistic AI systems. These systems can potentially understand our world in a more comprehensive manner, bridging the gap between different forms of data and providing richer, more integrated solutions. As we stand on the cusp of this new era, it\\'s exciting to envision the myriad of applications and innovations that Multimodal models will bring to the table. The future of AI looks more integrated and versatile than ever before. From Connections to Vector DB The AI landscape is witnessing a fascinating transition: from Language Model (LLM) connections or integrations, e.g., LangChain and LlamaIndex, to the rise of Vector Databases (Vector DB) such as Weaviate, Milvus, Pinecone, Chroma, and Vespa.ai. But what\\'s driving this shift, and why does it matter? LLM connections, like the LlamaIndex, primarily focus on linking and understanding vast amounts of external data. They\\'ve been pivotal in creating semantic connections, enabling more intuitive search experiences, and enhancing data accessibility. However, as the volume and variety of data grow, the need for more advanced storage and retrieval mechanisms becomes evident. This is where Vector DBs come into play. Unlike traditional databases that store data in rows and columns, Vector DBs store data in high-dimensional space, allowing for more efficient and accurate similarity searches. Tools like Weaviate and Milvus are designed to handle massive datasets, making them ideal for tasks like image recognition, recommendation systems, and more. The rise of Vector DBs represents a broader trend in AI: the quest for more efficient, scalable, and versatile data handling solutions. As we navigate this evolution, it\\'s clear that the combination of LLMs and Vector DBs will redefine how we store, access, and understand data in the AI-driven future. From Agents to OS The AI realm is abuzz with innovations, and one of the most intriguing shifts we\\'re witnessing is the transition from LLM agents to using LLMs as Operating Systems (OS). Let\\'s delve into this evolution and its implications. LLM agents, like AutoGPT, AgentGPT, BabyAGI, and HuggingGPT, have been groundbreaking in automating tasks based on user requests. These agents leverage the power of Language Models (LLMs) to understand and execute commands, making them invaluable in tasks ranging from content generation to data analysis. Their adaptability and intelligence have made them a staple in many AI toolkits. However, the vision for AI doesn\\'t stop there. The concept of LLM as an OS is emerging as the next big thing. Imagine an operating system where the core is a language model, orchestrating everything around it. Such a system would not just execute tasks but would understand context, anticipate needs, and offer solutions in real time. It\\'s like turning the LLM into the brain of the digital ecosystem, making devices and applications more intuitive and responsive than ever. The move towards LLM as OS signifies a paradigm shift in how we perceive and utilize AI. It\\'s not just about automation anymore; it\\'s about creating a seamless, intelligent interface between humans and technology. As we stand on the brink of this transformation, the potential for LLM-driven OS to revolutionize our digital interactions is immense. From Fine-tuning to Plugins The world of LLMs is undergoing a transformative shift, moving from intricate fine-tuning processes to the more dynamic realm of plugins. Let\\'s unpack this evolution. Historically, fine-tuning has been the cornerstone of LLM optimization. There are two primary ways to fine-tune LLMs: feeding data into the LLM in real-time and directly fine-tuning on the LLM. From a technical standpoint, this involves three methods: Transfer Learning: Adapting a pre-trained model to new tasks.Sequential Fine-tuning: Refining models in stages for specific tasks.Task-specific Fine-tuning: Tailoring models for a particular function. Moreover, LLM techniques like In-context learning, Few-shot learning, and Zero-shot learning have further enhanced the model\\'s adaptability, allowing them to understand and generate content with minimal data. However, the future of LLMs is leaning towards plugins. With the introduction of tools like GPT-4 Plugins, the focus is on extending LLMs seamlessly. Instead of running LLMs as a service, they\\'re envisioned as platforms. This means integrating LLMs with various tools, enhancing their capabilities, and offering a more modular and scalable approach to AI applications. The journey from fine-tuning to plugins represents a move from static optimization to dynamic adaptability, ensuring that LLMs remain at the forefront of AI innovation. In a Nutshell The AI domain is witnessing rapid shifts, with LLMs playing a central role. Initially, the move was from LLMs to Multimodal models, expanding from text to include images and sounds. Simultaneously, the trend shifted from LLM connections, which linked external data, to Vector Databases for efficient high-dimensional storage. Another evolution saw LLM agents, which automated tasks, transitioning towards LLMs as Operating Systems. This change aims for more intuitive, context-aware devices and applications. Furthermore, the traditional fine-tuning processes of LLMs are now being replaced by dynamic plugins, turning LLMs into platforms integrated with various tools. Leading this LLM revolution are OpenAI\\'s GPT-4 and Meta\\'s LLaMA2. Their pioneering efforts are setting the stage for an AI future that\\'s more integrated, responsive, and attuned to human interactions. More Readings Harnessing the Power of LLMs in Practice: A Survey on ChatGPT and Beyond: https://arxiv.org/abs/2304.13712Sparks of Artificial General Intelligence: Early experiments with GPT-4: https://arxiv.org/abs/2303.12712GPT4All-J: https://huggingface.co/nomic-ai/gpt4all-jIntroducing Code Llama, a state-of-the-art large language model for coding: https://ai.meta.com/blog/code-llama-large-language-model-coding/Llama 2: Open Foundation and Fine-Tuned Chat Models: https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/', mimetype='text/plain', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n')"
]
},
"metadata": {},
"execution_count": 9
}
],
"source": [
"documents[0]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "S17g2RYOjmf2"
},
"source": [
"# Transforming\n"
]
},
{
"cell_type": "code",
"execution_count": 10,
"metadata": {
"id": "STACTMUR1z9N"
},
"outputs": [],
"source": [
"from llama_index.core.node_parser import TokenTextSplitter\n",
"from llama_index.core.schema import BaseNode\n",
"import hashlib\n",
"\n",
"\n",
"def deterministic_id_func(i: int, doc: BaseNode) -> str:\n",
" \"\"\"Deterministic ID function for the text splitter.\n",
" This will be used to generate a unique repeatable identifier for each node.\"\"\"\n",
" unique_identifier = doc.id_ + str(i)\n",
" hasher = hashlib.sha256()\n",
" hasher.update(unique_identifier.encode(\"utf-8\"))\n",
" return hasher.hexdigest()\n",
"\n",
"\n",
"text_splitter = TokenTextSplitter(\n",
" separator=\" \", chunk_size=512, chunk_overlap=128, id_func=deterministic_id_func\n",
")"
]
},
{
"cell_type": "code",
"execution_count": 11,
"metadata": {
"id": "CtdsIUQ81_hT",
"outputId": "b97984d9-9639-42d8-fb51-de036e38e274",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 81,
"referenced_widgets": [
"6a5b3fec3572436f97ed97b570f15984",
"9d28cdd8504e429d85b9849c4f679085",
"b4bbbd97b95e4e79b1923aabd512e4c7",
"3b66c8f4087b4df5a9eb84b7dc82e440",
"9639bc37437145c1af00c627da831e2e",
"960a9f07924c4722b42332c9a3b233a8",
"9dd071e120884e88b44101bd4b252342",
"911b081e2a144929a67a0ef8e425706e",
"4bddc051ddc744dcb6efdd74e841bf00",
"4ea27a5184b2446aba68157bd1cb0d2d",
"9e87cc59ae8c4fa1b3b710b83a371590",
"847dfcd1770b4352bc839db928f0834a",
"d013604d2eb4432b850f451d86fc5e90",
"8893726a2ae0488fa04b1c12ef38fd01",
"74be1acb609041ecbecb662c1613575c",
"8e728820e82542e1a4aa440e043e23c2",
"b1661862e73d4b898daa82a61128a7fa",
"d282aabfe99642a699d1aab1122a2806",
"39cbcd4e42ae4af782caf71e3529c459",
"4f121d46026d4435ba35448c8da3be50",
"bebe97dd44e94312ad185632f15caddc",
"45d27d06f7da4f80a31a0347d77f075d"
]
}
},
"outputs": [
{
"output_type": "display_data",
"data": {
"text/plain": [
"Parsing nodes: 0%| | 0/14 [00:00, ?it/s]"
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "6a5b3fec3572436f97ed97b570f15984"
}
},
"metadata": {}
},
{
"output_type": "display_data",
"data": {
"text/plain": [
"Generating embeddings: 0%| | 0/108 [00:00, ?it/s]"
],
"application/vnd.jupyter.widget-view+json": {
"version_major": 2,
"version_minor": 0,
"model_id": "847dfcd1770b4352bc839db928f0834a"
}
},
"metadata": {}
}
],
"source": [
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
"from llama_index.core.ingestion import IngestionPipeline\n",
"\n",
"pipeline = IngestionPipeline(\n",
" transformations=[\n",
" text_splitter,\n",
" OpenAIEmbedding(model = 'text-embedding-3-small'),\n",
" ],\n",
" vector_store=vector_store,\n",
")\n",
"\n",
"nodes = pipeline.run(documents=documents, show_progress=True)"
]
},
{
"cell_type": "code",
"execution_count": 12,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"collapsed": true,
"id": "n5WRy0g71Hwu",
"outputId": "6b232bec-31c0-4869-8847-bb3b5b5c73d3"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"TextNode(id_='4ab5bd897f01474fc9b0049f95e31edae3ccd9e74d0f0acd3932b50a74d608b6', embedding=[0.004633472301065922, 0.016692597419023514, 0.06155563145875931, -0.016222193837165833, 0.020455822348594666, -0.0224449560046196, 0.00625972356647253, 0.014663142152130604, -0.00014427100541070104, 0.005826280917972326, 0.02755219303071499, -0.045642558485269547, -0.03534744679927826, 0.004250429570674896, -0.035132404416799545, -0.02787475474178791, -0.034218478947877884, -0.04634144529700279, -0.015294826589524746, 0.03763226419687271, 0.013137691654264927, 0.0072442106902599335, -0.034541040658950806, 0.025952821597456932, -0.005110595840960741, -0.026893628761172295, -0.0479004941880703, 0.01755276322364807, -0.01737804152071476, -0.02486417442560196, 0.05268516764044762, -0.025348016992211342, -0.02216271497309208, -0.01169288158416748, -0.024837292730808258, 0.018386049196124077, -0.005261796526610851, -0.010080070234835148, 0.020294541493058205, -0.004458751063793898, -0.032283104956150055, -0.06263083964586258, -0.00211849482730031, 0.00921990443021059, -0.041099805384874344, 0.004146269056946039, 0.003086181590333581, 0.029729487374424934, -0.02038862183690071, 0.03397655859589577, -0.05128739774227142, 0.019703177735209465, 0.012539607472717762, 0.032874468713998795, -0.062415797263383865, -0.004609952215105295, -0.01901773363351822, -0.006585645955055952, 0.002063054358586669, -0.007889335043728352, -0.02249871753156185, 0.001015567104332149, 0.02455505169928074, 0.00831269845366478, -0.034971125423908234, -0.03913755342364311, -0.08752188831567764, -0.003470904193818569, -0.012237205170094967, -0.014622822403907776, -0.0031685021240264177, 0.008709181100130081, -0.04437918961048126, 0.002911460353061557, -0.0314229391515255, 0.0024864173028618097, 0.003712825942784548, 0.062415797263383865, 0.02065742388367653, -0.027202749624848366, 0.006148843094706535, 0.023600805550813675, -0.02514641545712948, -0.027982275933027267, -0.00039900277624838054, 0.005339077208191156, -0.021786391735076904, -0.017162999138236046, -0.060480423271656036, -0.035428088158369064, -0.03905691206455231, -0.06472749263048172, -0.045911360532045364, 0.021826712414622307, 0.06725423038005829, 0.0172570813447237, -0.00020926646539010108, 0.0010978876380249858, 0.01873549073934555, 0.06956592947244644, -0.01752588152885437, 0.01706891879439354, -0.001084447605535388, 0.006982128601521254, 0.06085674464702606, -0.017633402720093727, 0.04163740947842598, -0.028224196285009384, -0.026450105011463165, -0.013372893445193768, -0.09784388542175293, 0.0018933732062578201, -0.050696033984422684, -0.011834003031253815, 0.019985418766736984, -0.04136860743165016, 0.028197316452860832, 0.029299404472112656, 0.0005523038562387228, -0.013171291910111904, -0.029595086351037025, 0.023856166750192642, 0.057577360421419144, -0.043088942766189575, 0.02744467183947563, -0.038895633071660995, -0.0021235349122434855, -0.015079785138368607, 0.021074067801237106, -0.009945669211447239, -0.01886989176273346, 0.03698713704943657, 0.05209380388259888, -0.0404546819627285, -0.037390340119600296, 0.0021386549342423677, -0.042766377329826355, -0.025576498359441757, -0.04260509833693504, -0.016706036403775215, -0.004220189526677132, -0.040965404361486435, -0.03943323716521263, 0.010577353648841381, -0.06865199655294418, -0.020160140469670296, -0.025348016992211342, -0.008789821527898312, -0.08789821714162827, 0.014945384114980698, -0.016585076227784157, -0.0027552193496376276, -0.025684019550681114, 0.02560337819159031, -0.045185595750808716, -0.02124878764152527, -0.0069350884296, 0.035885050892829895, 0.029917648062109947, -0.03846554830670357, -0.007392051629722118, 0.024501292034983635, 0.012237205170094967, 0.006531885825097561, 0.015348587185144424, 0.029917648062109947, -0.008460539393126965, -0.007351731415838003, -0.07074865698814392, -0.05346469208598137, 0.04085788503289223, 0.016235632821917534, -0.054754942655563354, -0.015671148896217346, -0.026033461093902588, -0.05677095800638199, -0.020859025418758392, -0.033627115190029144, -0.046448964625597, -0.02935316413640976, -0.030320851132273674, 0.028492998331785202, -0.057469841092824936, -0.009313984774053097, -0.05569574981927872, -0.032578788697719574, 0.006400844547897577, 0.032283104956150055, -0.008064056746661663, 0.024219049140810966, -0.006693166680634022, 0.04701344668865204, 0.0854789987206459, 0.05381413549184799, 0.036933377385139465, -0.02335888333618641, 0.04784673452377319, -0.00659236591309309, 0.025200175121426582, -0.010133830830454826, 0.020859025418758392, 0.039325714111328125, 0.02232399582862854, -0.059243932366371155, -0.007506292313337326, -0.0005434838240034878, 0.03042837232351303, -0.033331431448459625, -0.054754942655563354, -0.01739148236811161, 0.04747041314840317, -0.013285532593727112, 0.044137269258499146, 0.018520450219511986, -0.0539216548204422, -0.033008869737386703, -0.053437814116477966, -0.0001328259240835905, -0.01600715145468712, 0.06005033850669861, 0.007775094360113144, 0.02304976060986519, 0.01416585873812437, 0.03271318972110748, -0.03099285624921322, 0.0051912362687289715, 0.00749285239726305, -0.011430799961090088, 0.018614530563354492, 0.008111096918582916, 0.03720217943191528, -0.02470289170742035, -0.023600805550813675, -0.030159570276737213, -0.0032508226577192545, -0.0854252353310585, 0.011047757230699062, -0.029621966183185577, 0.023278241977095604, -0.039486996829509735, 0.009569346904754639, -0.008245497941970825, 0.047094088047742844, -0.004472191445529461, -0.01378953643143177, 0.03682585805654526, -0.015375467017292976, -0.05811496451497078, 0.05271204933524132, 0.014569061808288097, 0.014878183603286743, -0.029460685327649117, -0.0006808247417211533, 0.020791824907064438, -0.004774593282490969, 0.0007723853923380375, -0.051314279437065125, 0.01544266752898693, -0.015980271622538567, -0.041126687079668045, -0.007183730136603117, 0.023896487429738045, -0.023627685382962227, 0.012250646017491817, -0.019689736887812614, -0.0014691702090203762, -0.036772098392248154, 0.034057196229696274, 0.000917286379262805, -0.04701344668865204, 0.028277957811951637, -0.008977983146905899, -0.024165289476513863, -0.015227626077830791, -0.026020022109150887, -0.029003722593188286, 0.007936375215649605, 0.007875895127654076, 0.04424478858709335, 0.027081789448857307, -0.02636946365237236, -0.01630283333361149, 0.07870519161224365, 0.02278095856308937, -0.005386117845773697, 0.016450675204396248, -0.0002814019680954516, -0.012035603635013103, -0.01467658206820488, -0.015093225054442883, 0.06128682941198349, -0.007277810946106911, 0.016101231798529625, -0.015603949315845966, 0.007882614620029926, 0.06128682941198349, 0.06505005806684494, 0.031987424939870834, -0.018816132098436356, 0.016961397603154182, 0.0032004222739487886, -0.01092679612338543, -0.02260623872280121, -0.04690592736005783, 0.013332572765648365, 0.040212761610746384, -0.01003975048661232, -0.03610009327530861, 0.01784844510257244, -0.013379613868892193, 0.009925508871674538, 0.0029652207158505917, 0.0004510831495281309, -0.018829571083188057, -0.01416585873812437, -0.024071207270026207, -3.4361488360445946e-05, -0.013735775835812092, 0.02040206268429756, -0.017149560153484344, -0.009266944602131844, -0.0006157243042252958, 0.009710467420518398, 0.007849014364182949, -0.043384622782468796, 0.012553047388792038, -0.021383188664913177, -0.009045182727277279, 0.0020479343365877867, -0.014743782579898834, 0.016222193837165833, -0.0016455715522170067, -0.019232774153351784, 0.02636946365237236, -0.018923651427030563, 0.022095514461398125, -0.027377471327781677, 0.047927375882864, 0.027632832527160645, 0.02159823104739189, -0.06037290021777153, -0.009078782983124256, -0.026638265699148178, 0.03983643651008606, -0.015859311446547508, 0.014972264878451824, -0.0010357272112742066, 0.03040149249136448, 0.03088533505797386, -0.00047838332829996943, 0.017606522887945175, -0.05435173958539963, 0.041126687079668045, -0.01589963026344776, 0.010738635435700417, -0.00034650243469513953, -0.003427224000915885, -0.0009424865711480379, 0.01362825557589531, 0.011713041923940182, -0.01632971316576004, 0.008561340160667896, -0.0029249005019664764, 0.0194612555205822, 0.009629826992750168, -0.008258937858045101, -0.005722119938582182, -0.028734920546412468, 0.013776096515357494, -0.03986331820487976, 0.006377324461936951, -0.019716618582606316, 0.0020428942516446114, 0.0026308982633054256, -0.05263140797615051, 0.0002335216267965734, -0.00022533157607540488, 0.012458967044949532, -0.04376094415783882, 0.06026538088917732, -0.0017774524167180061, 0.005238276440650225, 0.016746357083320618, -0.00263929832726717, -0.025200175121426582, 0.05660967528820038, 0.0157383494079113, -0.027525311335921288, -0.041583649814128876, 0.009287104941904545, 0.013594655320048332, -0.00324242259375751, -0.019541896879673004, 0.013829856179654598, 0.01100743655115366, -0.00019960639474447817, -0.0020076141227036715, -0.004596512299031019, 0.03510552644729614, 0.05109923705458641, 0.004895554389804602, -0.004562912043184042, -0.009959109127521515, -0.026718907058238983, -0.03682585805654526, 0.0042705899104475975, 0.020603664219379425, 0.04859938099980354, 0.015267946757376194, 0.017472121864557266, 0.04838433861732483, -0.03260566666722298, 0.014018017798662186, -0.033008869737386703, 0.01587275043129921, -0.019810698926448822, 0.0039312276057899, 0.009119103662669659, 0.04911010339856148, 0.06628654152154922, 0.009623107500374317, -0.00730469124391675, -0.009441666305065155, -0.03247126564383507, -0.011874322779476643, 0.034702323377132416, 0.016114672645926476, -0.03247126564383507, -0.009159424342215061, -0.0554269477725029, -0.07757622003555298, 0.031826142221689224, 0.04763169214129448, -0.05951273441314697, -0.043814707547426224, -0.01948813535273075, -0.02533457614481449, 0.042470697313547134, -0.02040206268429756, -0.003035781206563115, 0.005759080406278372, 0.013225052505731583, -0.041099805384874344, -0.02350672334432602, -0.03462168201804161, -0.005490278359502554, -0.01990477927029133, -0.007062769494950771, -0.0104899937286973, -0.03655705600976944, -0.015939950942993164, -0.016208752989768982, 0.07703861594200134, 0.016719477251172066, -0.03263254836201668, -0.05005091056227684, 0.05333029106259346, -0.053545333445072174, 0.05897513031959534, -0.01751244254410267, 0.04924450442194939, -0.009932229295372963, -0.030777815729379654, -0.009602947160601616, -0.002775379456579685, -0.013688735663890839, 0.024340009316802025, -0.002259615808725357, 0.028277957811951637, -0.018708610907197, 0.012049044482409954, 0.00048678339226171374, -0.029030602425336838, -0.02709522843360901, -0.012815129943192005, -0.006928368471562862, 0.019730057567358017, -0.036772098392248154, 0.0036221053451299667, 0.00031710221082903445, 0.009999429807066917, -0.008527739904820919, 0.019541896879673004, 0.023452963680028915, -0.02608722262084484, 0.08676924556493759, -0.014542181976139545, 0.01857420988380909, 0.021961113438010216, 0.006249643862247467, -0.03792794421315193, -0.016437234356999397, 0.01571146957576275, -0.015563628636300564, 0.01826508715748787, -0.015482988208532333, -0.011981843970716, -0.06198571249842644, -0.036019451916217804, -0.019555335864424706, -0.005426438059657812, -0.04284701868891716, -0.05437862128019333, 0.015966830775141716, 0.0013902096543461084, -0.00047418332542292774, 0.026584506034851074, -0.007996856234967709, 0.02860051952302456, 0.027001148089766502, -0.02335888333618641, -0.005994281731545925, 0.006239563692361116, 0.011948243714869022, 0.01693451777100563, -0.0025149774737656116, -0.00800357572734356, 0.031261660158634186, -0.004583071917295456, 0.01033543236553669, 0.022350875660777092, 0.00021462149743456393, 0.002256255829706788, -0.006088362541049719, -0.003003861056640744, -0.02157135121524334, -0.024998575448989868, -0.006451244931668043, -0.030777815729379654, -0.009253504686057568, 0.028277957811951637, -0.016706036403775215, -0.020764945074915886, -0.033035751432180405, -0.06849072128534317, -0.020724624395370483, 0.0029568206518888474, -0.03569689020514488, -0.03360023349523544, -0.042309414595365524, 0.014891624450683594, 0.025670578703284264, 0.04222877323627472, 0.033008869737386703, -0.0034473841078579426, 0.007754934020340443, -0.02040206268429756, -0.0013759295688942075, -0.0038505869451910257, 0.042470697313547134, 0.0883820578455925, -0.04405662789940834, 0.017955966293811798, -0.01826508715748787, -0.0027115389239042997, -0.013036890886723995, 0.009535746648907661, 0.00012495087867137045, 0.0013834896963089705, 0.04389534518122673, 0.034836724400520325, -0.0041193887591362, 0.013130972161889076, -0.003230662550777197, -0.011921362951397896, 0.003958107437938452, -0.00037800264544785023, 0.04432542994618416, 0.010207751765847206, -0.003282743040472269, -0.013057051226496696, 0.012768088839948177, 0.0314229391515255, -0.01618187315762043, -0.014582501724362373, -0.01662539690732956, 0.025401776656508446, 0.016719477251172066, 0.013238492421805859, -0.01886989176273346, -0.034541040658950806, -0.00847397930920124, -0.028519880026578903, -0.011350159533321857, -0.027229629456996918, 0.007586933206766844, -0.01600715145468712, 0.03042837232351303, -0.031261660158634186, 0.03881499171257019, 0.03690649941563606, -0.01325865276157856, -0.019420934841036797, -0.030482131987810135, -0.000225751573452726, 0.007640693336725235, 0.020630544051527977, -0.03838490694761276, -0.04282014071941376, -0.010597513988614082, 0.018614530563354492, 0.006158922798931599, -0.02994452975690365, -0.01079239509999752, -0.05410981923341751, 0.013372893445193768, -0.002116814721375704, 0.0028375398833304644, -0.015859311446547508, -0.0075533329509198666, -0.0057389200665056705, 0.007593653164803982, -0.010752075351774693, -0.07655477523803711, -0.027605952695012093, -0.020348303020000458, -0.029272524639964104, -0.025576498359441757, 0.04704032838344574, -0.016289394348859787, -0.0019622535910457373, 0.00652180565521121, -0.01371561549603939, -0.04510495439171791, 0.021463830024003983, 0.0006325244321487844, -0.02470289170742035, -0.016370033845305443, -0.027605952695012093, 0.026705466210842133, -0.00366914551705122, -0.006138762924820185, 0.040669724345207214, 0.013446814380586147, 0.005500358529388905, -0.0024208968970924616, 0.02381584607064724, 0.0036456254310905933, 0.013937377370893955, -0.023614244535565376, 0.02159823104739189, 0.0262081827968359, -0.015617389231920242, -0.052389487624168396, -0.010906635783612728, -0.03389591723680496, -0.0025300977285951376, -0.030052049085497856, 0.001363329472951591, -0.011249358765780926, -0.009119103662669659, -0.04924450442194939, 0.0034322640858590603, 0.009260225109755993, 0.03249814733862877, -0.008883901871740818, 0.02292880043387413, -0.03400343656539917, -0.011410639621317387, 0.019367175176739693, -0.052523888647556305, -0.015106665901839733, -0.029245644807815552, -0.00020286141079850495, -0.027511872351169586, 0.01784844510257244, -0.0012625288218259811, 0.027928514406085014, -0.021100947633385658, 0.0075600529089570045, -0.038895633071660995, -0.0012381686829030514, 0.01904461346566677, -0.011921362951397896, 0.035885050892829895, 0.010463112965226173, -0.0036288253031671047, -0.0479542538523674, 0.0012826889287680387, 0.00928038451820612, -0.011108237318694592, 0.030052049085497856, -0.07531828433275223, 0.004267229698598385, 0.002005934016779065, 0.014058338478207588, 0.023963687941432, 0.008554619736969471, 0.011659281328320503, -0.021033747121691704, -0.019071493297815323, -0.02579154074192047, -0.009172864258289337, -0.01206248439848423, 0.00906534306704998, 0.011639120988547802, -0.03728282079100609, 0.057577360421419144, -0.018318848684430122, -0.035858169198036194, -0.02189391292631626, 0.002651058603078127, -0.013608095236122608, 0.0012398486724123359, -0.008803261443972588, -0.023412643000483513, -0.011034317314624786, 0.03714841976761818, 0.011195598170161247, -0.016746357083320618, -0.037551622837781906, -0.030509013682603836, 0.018036605790257454, 0.015845870599150658, -0.014797543175518513, -0.004539391491562128, 0.004240349400788546, -0.023426083847880363, 0.013010011054575443, -0.027350591495633125, -0.049217622727155685, -0.0036221053451299667, -0.019877899438142776, 0.01467658206820488, -0.00982470903545618, -0.009307265281677246, -0.02261967770755291, 0.012304405681788921, -0.042766377329826355, 0.034971125423908234, 0.027229629456996918, 0.01784844510257244, 0.025831859558820724, -0.03177238255739212, -0.032444387674331665, -0.004808193538337946, -0.024917934089899063, -0.026853308081626892, -0.026745786890387535, -0.0036321852821856737, 0.020899346098303795, 0.008258937858045101, -0.02653074450790882, -0.021786391735076904, 0.015335147269070148, -0.010906635783612728, 0.022888479754328728, 0.01482442393898964, 0.02787475474178791, -0.0030525813344866037, -0.042631976306438446, 0.01175336167216301, 0.0076944539323449135, 0.00831941794604063, -0.01630283333361149, -0.005749000236392021, -0.0045024314895272255, -0.005046755075454712, -0.019891338422894478, 0.006333644036203623, 0.02353360503911972, 0.02935316413640976, 0.022982560098171234, 0.01632971316576004, -0.002948420587927103, -0.02365456521511078, 0.0001748262147884816, 0.010879755951464176, 0.013655135408043861, -0.008601659908890724, -0.015456108376383781, -0.03343895450234413, 0.014125538989901543, 0.0022999360226094723, 0.02455505169928074, -0.025106094777584076, 0.035885050892829895, 0.004821633454412222, 0.019205894321203232, 0.017404921352863312, -0.021544471383094788, -0.013319132849574089, -0.0026493784971535206, -0.019259653985500336, 0.0036221053451299667, -0.01618187315762043, -0.04513183608651161, -0.011135118082165718, -0.002494817366823554, 0.0007631453336216509, -0.02170575223863125, -0.0007686053868383169, -0.009199744090437889, 0.012364886701107025, -0.003991707693785429, 0.05677095800638199, 0.02170575223863125, -0.10956364870071411, -0.020106380805373192, 0.020106380805373192, 0.019998859614133835, 0.01845324970781803, 0.023748645558953285, 0.04924450442194939, 0.049056343734264374, 0.0005359237547963858, -0.03413783758878708, 0.019515017047524452, -0.04894882068037987, 0.022297115996479988, 2.0068264348083176e-05, -0.009025023318827152, -0.062845878303051, -0.026154423132538795, -0.016867317259311676, 0.031073497608304024, -0.018547330051660538, -0.020469263195991516, -0.00034356239484623075, 0.028089797124266624, 0.007371891289949417, -0.01993165910243988, -0.020872466266155243, 0.005292037036269903, 0.02860051952302456, -0.005066915415227413, 0.010590793564915657, -0.02429969049990177, 0.004885474219918251, 0.006363884545862675, 0.04714784771203995, -0.03069717437028885, -0.015052905306220055, -0.010624393820762634, -0.01990477927029133, 0.01665227673947811, -0.0009223264642059803, 0.014703462831676006, -0.04316958039999008, 0.022875038906931877, 0.007754934020340443, -0.008783101104199886, -0.024366891011595726, 0.029111243784427643, 0.009999429807066917, -0.02787475474178791, -0.010563913732767105, -0.024501292034983635, -0.0006195043097250164, 0.012102804146707058, -0.0015615709125995636, -0.0005023234989494085, 0.006357164587825537, -0.018681731075048447, 0.002454497152939439, 0.041852451860904694, 0.011592080816626549, -0.021087506785988808, 0.022391196340322495, -0.03489048406481743, 0.0057389200665056705, -0.030186450108885765, 0.001806012587621808, -0.002266335766762495, 0.022875038906931877, -0.029890768229961395, -0.030670294538140297, -0.0269877091050148, 0.04378782585263252, 0.028815561905503273, 0.029729487374424934, -0.0011180478613823652, -0.011746642179787159, 0.019058052450418472, 0.07289906591176987, -0.014797543175518513, -0.01025479193776846, 0.005271876696497202, -0.03653017431497574, -0.006589006166905165, -0.007351731415838003, 0.0017724123317748308, -0.0027216190937906504, 0.037417221814394, -0.009522306732833385, -0.0021420149132609367, 0.007271090988069773, 0.029729487374424934, -0.017458682879805565, 0.01934029534459114, 0.014313699677586555, -0.02232399582862854, -0.0031752220820635557, -0.019515017047524452, 0.012929370626807213, 0.020885905250906944, -0.018090365454554558, 0.04034716263413429, 0.01663883589208126, -0.06655534356832504, -0.04330398142337799, 0.013386333361268044, 0.00760037312284112, 0.015106665901839733, 0.030159570276737213, 0.025092655792832375, -0.02128910832107067, 0.03567000851035118, -0.005554118659347296, 0.01137703936547041, 0.02157135121524334, -0.0034473841078579426, 0.022686878219246864, 0.05935145542025566, -0.0033633834682404995, 0.018775811418890953, 0.005839720834046602, 0.02128910832107067, -0.007929655723273754, -0.04123420640826225, -0.00018480129074305296, -0.017431801185011864, 0.001388529664836824, 0.009737348183989525, 0.05709351971745491, 0.026275383308529854, -0.027605952695012093, -0.03403031826019287, -0.012936090119183064, -0.023856166750192642, 0.013688735663890839, -0.024649132043123245, -0.012989850714802742, 0.01663883589208126, 0.01677323691546917, -0.030616533011198044, 0.020173581317067146, 0.026933947578072548, 0.004317630082368851, 0.042927660048007965, -0.006632686126977205, 0.009105663746595383, -0.003259222721680999, -0.020428942516446114, -0.0127344885841012, -0.03502488508820534, -0.01575179025530815, 0.04394910857081413, -0.031234778463840485, 0.0026090582832694054, 0.025213615968823433, 0.007076209411025047, -0.0018379328539595008, 0.0024964974727481604, -0.02233743667602539, -0.00340874376706779, 0.005255076568573713, 0.02575122006237507, 0.02310352213680744, 0.013144412077963352, -0.01829196698963642, -0.006303403992205858, -0.02981012873351574, -0.0006480645388364792, 0.006300043780356646, -0.007849014364182949, 0.048169296234846115, -0.02114126831293106, -0.003020661184564233, 0.057577360421419144, 0.0004804833442904055, 0.0314229391515255, 0.022875038906931877, 0.020751504227519035, 0.006740206852555275, 0.017593082040548325, 0.029729487374424934, 0.0061958832666277885, 0.02623506262898445, -0.04131484776735306, 0.05169060081243515, 0.01798284612596035, -0.005271876696497202, 0.025885621085762978, -0.0056112390011549, -0.0033953036181628704, 0.023009439930319786, -0.0012230485444888473, 0.016222193837165833, 0.01780812442302704, 0.034971125423908234, -0.010281671769917011, -0.04687904566526413, 0.04314270243048668, 0.02053646370768547, -0.053437814116477966, -0.029460685327649117, -0.025563059374690056, 0.06644782423973083, -0.012714329175651073, 0.0050568352453410625, -0.024837292730808258, -0.0057389200665056705, -0.03327767178416252, 0.0068006874062120914, -0.028681160882115364, 0.014743782579898834, 0.017015159130096436, 0.006518445443361998, -0.033170152455568314, -0.00860838033258915, -0.036449532955884933, -0.026638265699148178, -0.008977983146905899, 0.04558879882097244, 0.00847397930920124, -0.02533457614481449, -0.02202831394970417, 0.012707608751952648, -0.023762086406350136, 0.012620247900485992, -0.03249814733862877, 0.012674008496105671, -0.0022444957867264748, -0.0035011444706469774, -0.0058867610059678555, 0.008225337602198124, 0.0058867610059678555, 0.005187876056879759, 0.03545496612787247, 0.0076944539323449135, -0.019058052450418472, -0.036960259079933167, -0.010086790658533573, 0.0003393623628653586, 0.0013398093869909644, 0.00520803639665246, 0.03354647383093834, 0.017929084599018097, 0.011961683630943298, -0.014864743687212467, -0.005624679382890463, 0.00831269845366478, 0.030750934034585953, -0.008964542299509048, -0.033492714166641235, -0.02800915576517582, 0.023762086406350136, 0.002585537964478135, 0.010382472537457943, -0.00760037312284112, 0.028197316452860832, -0.029030602425336838, 0.022539038211107254, 0.032417505979537964, 0.0314766988158226, 0.0003292823093943298, -0.03521304577589035, 0.018318848684430122, 0.005980841815471649, -0.0068712481297552586, -0.01175336167216301, -0.009448385797441006, -0.0026409784331917763, 0.013083931058645248, 0.04416414722800255, 0.008917502127587795, 0.005826280917972326, 0.05034659057855606, 0.007029169239103794, 0.044701751321554184, 0.0020328143145889044, -0.004109308589249849, 0.015698028728365898, -0.011968404054641724, 0.012761369347572327, 0.028492998331785202, -0.0187220498919487, -0.00808421615511179, -0.012176725082099438, -0.0019051332492381334, 0.010637834668159485, -0.007331571076065302, -0.025952821597456932, 0.02427280880510807, 0.03569689020514488, -0.01175336167216301, -0.004777953494340181, 0.041099805384874344, 0.014501861296594143, 0.0022965760435909033, -0.004791393410414457, 0.004882114008069038, 0.01885645091533661, -8.599559805588797e-05, 0.013729056343436241, -0.03537432849407196, 0.021490709856152534, 0.024219049140810966, 0.01618187315762043, -0.036637697368860245, 0.0075533329509198666, 0.021006867289543152, 0.003232342656701803, -0.042793259024620056, 0.02365456521511078, -0.00183457275852561, -0.018654849380254745, 0.021678870543837547, 0.01116199791431427, -0.005604519043117762, 0.02755219303071499, -0.020482702180743217, -0.03959451615810394, -0.028573639690876007, 0.018654849380254745, -0.02338576316833496, 0.010879755951464176, 0.002410816727206111, -0.0016010511899366975, 0.014018017798662186, -0.005910281091928482, 0.009858309291303158, 0.02889620140194893, -0.005460038315504789, 0.015375467017292976, 0.008762940764427185, 0.052335724234580994, -0.010873035527765751, -0.00512739596888423, 0.0061790831387043, -0.0374709814786911, -0.013514013960957527, -0.01692107878625393, 0.0021672151051461697, -0.01603403128683567, -0.0299714095890522, 0.02069774456322193, 0.03260566666722298, -0.003119781846180558, -0.0054365177638828754, -0.002488097408786416, -0.010543753392994404, -0.028976842761039734, -0.015214186161756516, 0.025092655792832375, -0.0039177872240543365, 0.06064170226454735, -0.0003021921147592366, -0.015388907864689827, -0.007721333764493465, -0.034057196229696274, 0.055373188108205795, -0.018816132098436356, 0.012371606193482876, -0.023600805550813675, 0.01190120354294777, -0.008292538113892078, -0.009616387076675892, 0.007190450094640255, 0.008460539393126965, 0.016517875716090202, -0.0172167606651783, -0.02954132668673992, -0.003170182229951024, 0.028116676956415176, 0.005083715543150902, 0.005833000876009464, 0.014784103259444237, 0.010019590146839619, -0.013446814380586147, 0.011269519105553627, -0.0075533329509198666, -0.003430583979934454, 0.02306320145726204, 0.01378953643143177, 0.022243354469537735, 0.004361310508102179, -0.0049661146476864815, -0.03462168201804161, 0.019394055008888245, -0.008877182379364967, 0.010523593053221703, 0.0022680158726871014, 0.008064056746661663, -1.1346642168064136e-05, -0.029998289421200752, 0.037229061126708984, -0.0020428942516446114, 0.0031836221460253, -0.016114672645926476, 0.00269809877499938, -0.009206464514136314, -0.010234631597995758, -0.02201487310230732, -0.02157135121524334, -0.009085503406822681, -0.000913926400244236, -0.02830483764410019, 0.013144412077963352, 0.005211396608501673, 0.029272524639964104, -0.013977698050439358, -0.002995460992679, 0.023762086406350136, -0.01102087739855051, 0.0017144519370049238, -0.05193252116441727, 0.03419159725308418, -0.042470697313547134, 0.0146093824878335, -0.035911932587623596, -0.036772098392248154, 0.005167716182768345, -0.017431801185011864, -0.0093744657933712, 0.018910212442278862, 0.012546327896416187, 0.01057063415646553, -0.00681748753413558, 0.006501645315438509, -0.02425936982035637, 0.013359453529119492, -0.012458967044949532, 0.01857420988380909, -0.003108021803200245, 0.06085674464702606, 0.005974121857434511, 0.012270805425941944, -0.008258937858045101, -0.005718759726732969, 0.003978267777711153, 0.011498000472784042, 0.001109647797420621, -0.025804979726672173, -0.009468546137213707, 0.0232244823127985, 0.010301832109689713, 0.007909495383501053, 0.03510552644729614, 0.04827681556344032, 0.003988347947597504, -0.005234916694462299, -0.023614244535565376, 0.0172167606651783, 0.02425936982035637, 0.026920508593320847, 0.004055548459291458, -0.003890907159075141, -0.04362654313445091, 0.020630544051527977, -0.02771347388625145, -0.021396629512310028, -0.03196054324507713, 0.014448100700974464, 0.011498000472784042, -0.002745139179751277, -0.00800357572734356, 0.0034037036821246147, 0.01677323691546917, -0.0020613744854927063, 0.010133830830454826, 0.02202831394970417, 0.0049728346057236195, 0.005080355331301689, -0.017042038962244987, -0.018775811418890953, 0.030750934034585953, 0.0209531057626009, 0.03792794421315193, 0.023305123671889305, -0.031369179487228394, 0.010597513988614082, -0.012633687816560268, -0.026302263140678406, 0.009703747928142548, -0.010920076631009579, -0.04523935541510582, 0.022404637187719345, 0.024514731019735336, -0.017875324934720993, 0.01889677159488201, 0.021221907809376717, 0.008037175983190536, -0.03905691206455231, -0.0006169843254610896, 0.04634144529700279, 0.029138123616576195, 0.03456792235374451, 0.01690763793885708, -0.0011558480327948928, 0.018036605790257454, -0.016410354524850845, 0.010382472537457943, -0.011524880304932594, -0.0028812200762331486, -0.007969975471496582, -0.008232057094573975, 0.0031382618471980095, 0.033788394182920456, 0.00980454869568348, -0.019380616024136543, -0.017619963735342026, -0.038895633071660995, -0.042900778353214264, -0.002375536598265171, -0.004314270336180925, 0.015926511958241463, -0.0187220498919487, -0.020267661660909653, 0.013453533872961998, 0.026866747066378593, -0.0031886622309684753, -0.009414785541594028, 0.031530458480119705, -0.015294826589524746, -0.020912786945700645, 0.002807299606502056, -0.019851017743349075, -0.0013246892485767603, 0.03360023349523544, -0.019420934841036797, -0.04526623710989952, -0.006851087789982557, 0.017593082040548325, -0.009125824086368084, -0.017861884087324142, 0.00996582955121994, -0.010288392193615437, 0.025845300406217575, -0.02323792316019535, -0.007660853676497936, -0.02201487310230732, 0.02846611849963665, -0.009031742811203003, -0.01002630963921547, -0.004774593282490969, 0.004378110636025667, -0.025374896824359894, -0.042013734579086304, -0.004260509740561247, -0.0028459399472922087, 0.018238207325339317, 0.02501201443374157, 0.023775525391101837, 0.03849243000149727, 0.011363599449396133, -0.0036221053451299667, -0.01602059230208397, -0.013729056343436241, -0.009408066049218178, -0.02514641545712948, 0.033035751432180405, 0.0149857047945261, -0.0003116421867161989, -0.011545040644705296, 0.027659712359309196, 0.007654133252799511, -0.016759797930717468, -0.0010273271473124623, 0.015953391790390015, 0.02083214558660984, -0.024944813922047615, -0.007271090988069773, 0.007479412015527487, -0.01257320772856474, -0.024460971355438232, -0.002597298240289092, 0.00014164598542265594, 0.008655420504510403, -0.01145096030086279, -0.0003003021120093763, -0.017445242032408714, 0.020899346098303795, -0.017472121864557266, 0.028788680210709572, 0.02126222848892212, 0.012929370626807213, -0.017324281856417656, 0.011545040644705296, 0.035428088158369064, -0.002256255829706788, 0.01755276322364807, 0.02381584607064724, -0.02424592897295952, -3.5253997339168563e-05, 0.0039312276057899, -0.008460539393126965, 0.031718622893095016, 0.012989850714802742, 0.019998859614133835, -0.026302263140678406, 0.04526623710989952, 0.006256363820284605, -0.014313699677586555, -0.028385479003190994, -0.005923721473664045, 0.04357278347015381, 0.011934803798794746, 0.009938949719071388, -0.04744353145360947, 0.0009366065496578813, -0.031127257272601128, -0.002100014593452215, 0.008090936578810215, -0.005449958145618439, 0.018668290227651596, -0.02233743667602539, -0.015469548292458057, 0.0010634474456310272, -0.020415503531694412, -0.0007035048911347985, 0.0583837665617466, -0.03196054324507713, 0.0027972194366157055, -0.025737779214978218, 0.013063771650195122, -0.0019017732702195644, -0.0024897772818803787, 0.012989850714802742, -0.007022449281066656, -0.023318562656641006, -0.005073635373264551, -0.006881327833980322, 0.006545325741171837, 0.003027381142601371, -0.02591250091791153, 0.011571920476853848, -0.005130755715072155, -0.0307240542024374, -0.017942525446414948, 0.0001239008706761524, -0.024770092219114304, -0.012707608751952648, -0.01932685449719429, 0.010033030062913895, 0.0064411647617816925, -0.014125538989901543, -0.002234415616840124, 0.02022734098136425, -0.01860108971595764, -0.010731915012001991, 0.030240211635828018, -0.008185016922652721, -0.004704033024609089, -0.037524741142988205, 0.030912216752767563, -0.002116814721375704, -0.003143301932141185, 0.018910212442278862, 0.033519595861434937, 0.021100947633385658, 0.028116676956415176, -0.01708235964179039, 0.022283675149083138, -0.013755936175584793, 0.013910497538745403, 0.025253936648368835, 0.004304190166294575, -0.017714044079184532, 0.0025905780494213104, 0.00016275113739538938, -0.004421791061758995, 0.016383474692702293, 0.012687448412179947, 0.003092901548370719, -0.01227752584964037, -0.010819275863468647, 0.021208468824625015, 0.005382757633924484, 0.005930441431701183, 0.022391196340322495, -0.011881043203175068, -0.010167431086301804, -0.001102927722968161, 0.012680728919804096, 0.011336719617247581, 0.014649702236056328, 0.0008513459470123053, 0.014273379929363728, -0.0013935697497799993, -0.0035481848753988743, 0.009038463234901428, -0.025589939206838608, 0.019273094832897186, 0.020764945074915886, -0.023856166750192642, -0.06574893742799759, -0.017297400161623955, -0.014488421380519867, -0.005103875882923603, -0.017136119306087494, 0.026624826714396477, -0.016087792813777924, -0.009206464514136314, -0.02321104146540165, -0.0025401776656508446, 0.006696526892483234, 0.03537432849407196, 0.011417360045015812, -0.00853445939719677, -0.02157135121524334, 0.024044327437877655, 0.012821849435567856, -0.028519880026578903, -0.012257365509867668, -0.012324566021561623, -0.03284759074449539, -0.02365456521511078, 0.012082644738256931, 0.03491736575961113, 0.011840722523629665, 0.030213331803679466, -0.030939096584916115, -0.013372893445193768, 0.02876180037856102, 0.015818990767002106, -0.018708610907197, 0.005308837164193392, 0.02366800606250763, 0.018171006813645363, 0.00014637102140113711, 0.003375143511220813, -0.015348587185144424, -0.022068634629249573, 0.003699385793879628, -0.011780242435634136, 0.02951444685459137, -0.0006732647307217121, -0.00524499686434865, -0.013433373533189297, -0.007392051629722118, 0.013991137966513634, -0.01145767979323864, -0.011249358765780926, 0.02862739935517311], metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={: RelatedNodeInfo(node_id='doc_0', node_type=, metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, hash='3b095b0e25cdf965d950cdbd7feb8024030e7645998c1a33dc4427affca624ab'), : RelatedNodeInfo(node_id='e470fa0d001e50b3ec3088022462a94ea7c87dd80106411b7d120f90b379e977', node_type=, metadata={}, hash='71418de3d50e604c2581574f1abf2248e5cc3ab7c74a3182c37cb1152d0cfd21')}, text='LLM Variants and Meta\\'s Open Source Before shedding light on four major trends, I\\'d share the latest Meta\\'s Llama 2 and Code Llama. Meta\\'s Llama 2 represents a sophisticated evolution in LLMs. This suite spans models pretrained and fine-tuned across a parameter spectrum of 7 billion to 70 billion. A specialized derivative, Llama 2-Chat, has been engineered explicitly for dialogue-centric applications. Benchmarking revealed Llama 2\\'s superior performance over most extant open-source chat models. Human-centric evaluations, focusing on safety and utility metrics, positioned Llama 2-Chat as a potential contender against proprietary, closed-source counterparts. The development trajectory of Llama 2 emphasized rigorous fine-tuning methodologies. Meta\\'s transparent delineation of these processes aims to catalyze community-driven advancements in LLMs, underscoring a commitment to collaborative and responsible AI development. Code Llama is built on top of Llama 2 and is available in three models: Code Llama, the foundational code model;Codel Llama - Python specialized for Python;and Code Llama - Instruct, which is fine-tuned for understanding natural language instructions. Based on its benchmark testing, Code Llama outperformed state-of-the-art publicly available LLMs (except GPT-4) on code tasks. Llama 2, Llama 2-Chat, and Code Llama are key steps in LLM development but still have a way to go compared to GPT-4. Meta\\'s open access and commitment to improving these models promise transparent and faster LLM progress in the future. Please refer to the LLM and Llama variants below: From LLMs to Multimodal LLMs, like OpenAI\\'s ChatGPT (GPT-3.5), primarily focus on understanding and generating human language. They\\'ve been instrumental in tasks like text generation, translation, and even creative writing. However, their scope is limited to text. Enter multimodal models like GPT-4. These are a new breed of AI models that can understand and generate not just text, but also images, sounds, and potentially other types of data. The term \"multimodal\" refers to their ability to process multiple modes or', mimetype='text/plain', start_char_idx=0, end_char_idx=2117, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n')"
]
},
"metadata": {},
"execution_count": 12
}
],
"source": [
"nodes[0]"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "EV0ll57p46Dc"
},
"source": [
"# Load Indexes\n"
]
},
{
"cell_type": "code",
"execution_count": 13,
"metadata": {
"id": "HbT3-kRO4Qpt"
},
"outputs": [],
"source": [
"# Create your index\n",
"from llama_index.core import VectorStoreIndex\n",
"\n",
"index = VectorStoreIndex.from_vector_store(vector_store)"
]
},
{
"cell_type": "code",
"execution_count": 14,
"metadata": {
"id": "sb61DWU84bHP"
},
"outputs": [],
"source": [
"from llama_index.llms.gemini import Gemini\n",
"\n",
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
"# and using a LLM to formulate the final answer.\n",
"\n",
"llm = Gemini(model=\"models/gemini-1.5-flash\", temperature=0.3, max_tokens=512)\n",
"query_engine = index.as_query_engine(llm=llm, similarity_top_k=5)"
]
},
{
"cell_type": "code",
"execution_count": 15,
"metadata": {
"id": "G32W2LMMCmnv"
},
"outputs": [],
"source": [
"res = query_engine.query(\"How many parameters LLaMA 2 model has?\")"
]
},
{
"cell_type": "code",
"execution_count": 16,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 35
},
"id": "obc20cU5Cxf2",
"outputId": "c9ca8f2d-91e5-4333-b799-1ef1584eb85e"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'The Llama2 model has 7 billion parameters. \\n'"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
}
},
"metadata": {},
"execution_count": 16
}
],
"source": [
"res.response"
]
},
{
"cell_type": "code",
"execution_count": 17,
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/"
},
"id": "oIAO-saJCzYe",
"outputId": "13661c3b-8192-47c6-c4d5-cffd2993de79"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"Node ID\t de49ab9024a434ca1cd1efba258fbaa9a3e2d9a1bca3ab4a0349220cc1e2754f\n",
"Title\t Building a Q&A Bot over Private Documents with OpenAI and LangChain\n",
"Text\t Private data to be used The example provided can be used with any dataset. I am using a data set that has Analyst recommendations from various stocks. For the purpose of demonstration, I have gathered publicly available analyst recommendations to showcase its capabilities. You can replace this with your own information to try this. Below is a partial extract of the information commonly found in these documents. If you wish to try it yourself, you can download analyst recommendations for your preferred stocks from online sources or access them through subscription platforms like Barron's. Although the example provided focuses on analyst recommendations, the underlying structure can be utilized to query various other types of documents in any industry as well. I have assembled such data for a few stocks for demonstration purposes. This includes Google, Microsoft, Meta, and Tesla. To facilitate easy access and updating of analysts' recommendations, all the recommendations can be organized into a designated folder. Each stock corresponds to a separate file within this folder. For example, if there are recommendations for 20 stocks, there will be 20 individual files. This organization enables convenient updating of information for each stock as new recommendations arrive, streamlining the process of managing and maintaining the most up-to-date data for each stock. Questions this Q&A bot application can answer The data we have for this application is stock market analyst recommendations for many stocks. Let's say you are looking for insight about Microsoft stock. You can ask any of the following questions as an example: What is the median target price for Microsoft (MSFT)?What is the highest price estimate for Microsoft (MSFT)?What is the lowest price estimate for Microsoft (MSFT)?How much percentage increase is expected in the stock price of Microsoft (MSFT)?How many analysts provided price forecasts for Microsoft (MSFT)?What is the current consensus among investment analysts regarding Microsoft (MSFT)?Has the consensus rating for Microsoft (MSFT) changed recently?When was the consensus rating last updated for Microsoft (MSFT)?Is the current recommendation for Microsoft (MSFT) to buy, sell, or hold the stock?Are there any recent analyst reports available for Microsoft (MSFT)? These questions cover various aspects of the stock analysis, including price forecasts, analyst recommendations, and recent changes in ratings. The\n",
"Score\t 0.14514275574970692\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t ef0097732e6eed361247a1081f21a3688bdcfff0d8ec6db66c2bfd6381359bf0\n",
"Title\t Exploring Large Language Models -Part 3\n",
"Text\t is, does not result in proper output to questions. The answers are not affected by the training data. Take 2: Instruct Fine-tuning with QLoRa Instruction Tuning concept is a higher-level training concept introduced by this paper FineTuned Language Models Are Zero shot Learners (FLAN) We leverage the intuition that NLP tasks can be described via natural language instructions, such as \"Is the sentiment of this movie review positive or negative?\" or \"Translate 'how are you' into Chinese.\" We take a pre-trained language model of 137B parameters and perform instruction tuning ... Since we use QLoRa we are effectively closely following this paper - QLORA: Efficient Finetuning of Quantized LLMs concerning the training data set, the format that the authors used to train their Gauanco model This is the format for the Llama2 model and will be different for others. One of the hardest problems of training is finding or creating a good quality data set to train. In our case, converting the available training data set to the instruction data set. Since our use case is Closed Book QA, we need to convert this to a QA format. Using older NLP methods like NER (Named Entity Recognition) and then using that to create a QA dataset was not effective. This is where the Self-instruct concept could be used However previous to Llama2, the best-performing model was the GPT 3/4 model via ChatGPT or its API and using these models to do the same was expensive. The 7 billion model of Llama2 has sufficient NLU (Natural Language Understanding) to create output based on a particular format. Running this in 4-bit mode via Quantisation makes it feasible compute-wise to run this on a large data set and convert it to a QA dataset. This was the prompt used. The context was a sliding window from the text dataset. Some minimal parsing and finetuning were done on the output of the model, and we could generate a QA dataset of the format below. This was fed to the QLoRA-based fine-tuning (Colab Notebook). We can see that the output from a fine-tuned 4-bit quantized llama2 7 B model is pretty good. Colab Notebook Trying to\n",
"Score\t 0.14320868766475625\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t 7c0ff552ae4caad1b5fa1914f8c5ea0c907705192580cc127e76b245221805c1\n",
"Title\t Foundation Models: Scaling Large Language Models\n",
"Text\t AI, providing a versatile and adaptable approach to solving complex problems across multiple domains. From language and vision to robotics and reasoning, these models are unlocking new possibilities and driving innovation across various industries. As we continue to explore the full potential of foundation models and their role in the evolution towards AGI, it is crucial to foster responsible and ethical AI development, ensuring these models are used to benefit humanity and address the most pressing challenges of our time. With foundation models as a solid basis, we can accelerate AI research and development, unlocking new frontiers and shaping the future of intelligent systems. LLMs Papers GPT-4 Technical Report: https://arxiv.org/abs/2303.08774GPT-3: Language Models are Few-Shot Learners: https://arxiv.org/abs/2005.14165Toolformer: Language Models Can Teach Themselves to Use Tools: https://arxiv.org/abs/2302.04761LLaMA: Open and Efficient Foundation Language Models: https://arxiv.org/abs/2302.13971Google USM: Scaling Automatic Speech Recognition Beyond 100 Languages: https://arxiv.org/abs/2303.01037Using DeepSpeed and Megatron to Train Megatron-Turing NLG 530B, A Large-Scale Generative Language Model: https://arxiv.org/abs/2201.11990 Foundation Models Resources Reflections on Foundation Models: https://hai.stanford.edu/news/reflections-foundation-modelsOn the Opportunities and Risks of Foundation Models: https://arxiv.org/abs/2108.07258\n",
"Score\t 0.1430069728266482\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t b5eeda2ed7d31c3d4f55c6dd4d95f8c3bc0c4a14e3ef371f92770f124632dbef\n",
"Title\t Exploring Large Language Models -Part 3\n",
"Text\t a particular format. Running this in 4-bit mode via Quantisation makes it feasible compute-wise to run this on a large data set and convert it to a QA dataset. This was the prompt used. The context was a sliding window from the text dataset. Some minimal parsing and finetuning were done on the output of the model, and we could generate a QA dataset of the format below. This was fed to the QLoRA-based fine-tuning (Colab Notebook). We can see that the output from a fine-tuned 4-bit quantized llama2 7 B model is pretty good. Colab Notebook Trying to reduce hallucination via fine-tuning In the generated dataset, I added a specific tag `Source:8989REF`. The idea was that via attention, this token will be somehow associated with the text that we were training on. And then to use this hash somehow to tweak the prompt to control hallucination. Something like \"[INST] <>\\nYou are a helpful Question Answering Assistant. Please only answer from this reference Source:8989REF\" However, that turned out to be a very naive attempt. Also, note that the generated QA missed transforming training data related to Professor Thiersch's method to a proper QA dataset. These and other improvements need to be experimented with, as well as to train with some completely new data that the model has not seen to test more effectively. Update: Training with new data was done by writing an imaginary story with ChatGPT help and then creating an instruction tuning data set (colab notebook). The model was then trained and tested (colab notebook) with this generated instruct dataset. The results confirm that the model learns via Instruct tuning, not only the fed questions but other details and relations of the domain. Problems with hallucinations remain (Bordor, Lila characters who are not in the story). The LLama2 13B 4-bit fine-tuned model has better output than the 7B model. A lot more needs to be explored in Fine-tuning. One observation is that slight changes in prompts give different answers. Since the output is not deterministic (that is, with even the same prompt, it varies over time), it is all the more difficult to fine-tune prompts to\n",
"Score\t 0.14165182982721075\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
"Node ID\t 15268fd9c2a45644a0c49ca1b4897b4fabfe3005fccee48af0acc7eea7dd0e9c\n",
"Title\t Building a Q&A Bot over Private Documents with OpenAI and LangChain\n",
"Text\t much percentage increase is expected in the stock price of Microsoft (MSFT)?How many analysts provided price forecasts for Microsoft (MSFT)?What is the current consensus among investment analysts regarding Microsoft (MSFT)?Has the consensus rating for Microsoft (MSFT) changed recently?When was the consensus rating last updated for Microsoft (MSFT)?Is the current recommendation for Microsoft (MSFT) to buy, sell, or hold the stock?Are there any recent analyst reports available for Microsoft (MSFT)? These questions cover various aspects of the stock analysis, including price forecasts, analyst recommendations, and recent changes in ratings. The chat system can provide specific answers based on the information available in the financial documents. Please note that you can not only ask questions about an individual stock but can also ask comparative questions across stocks. For example, which stock has the most price increase? Here the system will compare the price increase across all the stocks and provide an answer. Quick summary of how the web application works This web-based application allows users to input their questions in a text box and receive answers based on insights gathered from multiple documents. For instance, users can inquire, \"What is the highest price estimate for Microsoft?\" and the application will query the relevant documents to provide an accurate response. Moreover, users can also compare stocks by asking questions such as, \"Which stock, Meta or Microsoft, has a higher percentage increase in the stock price?\" The application will analyze the data across the documents, enabling users to make informed investment decisions based on the comparative insights provided. Application Overview The application is built with LangChain and ChatGPT. Though it uses ChatGPT, we can also wire this to other LLMs as well. LangChain is an innovative framework designed to empower you in building sophisticated applications driven by large language models (LLMs). By offering a standardized interface, LangChain facilitates the seamless integration of various components, including LLMs, data sources, and actions. This streamlined approach accelerates the development of robust applications, enhanced by features such as chaining, data awareness, and agentic capabilities. To complement LangChain, the web application is built utilizing Streamlit, a Python library for creating interactive web applications and data dashboards. Streamlit's\n",
"Score\t 0.14137764389568408\n",
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
]
}
],
"source": [
"for src in res.source_nodes:\n",
" print(\"Node ID\\t\", src.node_id)\n",
" print(\"Title\\t\", src.metadata[\"title\"])\n",
" print(\"Text\\t\", src.text)\n",
" print(\"Score\\t\", src.score)\n",
" print(\"-_\" * 20)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "d4xxZHbdN0lK"
},
"source": [
"# Evaluate the retrieval process and quality of answers\n",
"\n",
"We can evaluate our RAG system with a dataset of questions and associated chunks. Given a question, we can see if the RAG system retrieves the correct chunks of text that can answer the question.\n",
"\n",
"You can generate a synthetic dataset with an LLM such as `gemini-1.5-flash` or create an authentic and manually curated dataset.\n",
"\n",
"Note that a **well curated dataset will always be a better option**, especially for a specific domain or use case.\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "SuYIj1tD1Hwv"
},
"source": [
"In our example, we will generate a synthetic dataset using `gemini-1.5-flash` to make it simple.\n",
"\n",
"This is the default prompt that the `generate_question_context_pairs` function will uses:\n",
"\n",
"```python\n",
"DEFAULT_QA_GENERATE_PROMPT_TMPL = \"\"\"\\\n",
"Context information is below.\n",
"\n",
"---------------------\n",
"{context_str}\n",
"---------------------\n",
"\n",
"Given the context information and no prior knowledge,\n",
"generate only questions based on the below query.\n",
"\n",
"You are a Teacher/Professor. Your task is to setup \\\n",
"{num_questions_per_chunk} questions for an upcoming \\\n",
"quiz/examination. The questions should be diverse in nature \\\n",
"across the document. Restrict the questions to the \\\n",
"context information provided.\"\n",
"\"\"\"\n",
"```\n"
]
},
{
"cell_type": "code",
"source": [
"# Free Tier-Gemini API key\n",
"from llama_index.core.llms.utils import LLM\n",
"from llama_index.core.schema import MetadataMode, TextNode\n",
"from tqdm import tqdm\n",
"import json\n",
"import re\n",
"import uuid\n",
"import warnings\n",
"import time\n",
"from typing import Dict, List, Tuple\n",
"from llama_index.core.evaluation import EmbeddingQAFinetuneDataset\n",
"\n",
"DEFAULT_QA_GENERATE_PROMPT_TMPL = \"\"\"\\\n",
"Context information is below.\n",
"\n",
"---------------------\n",
"{context_str}\n",
"---------------------\n",
"\n",
"Given the context information and not prior knowledge.\n",
"generate only questions based on the below query.\n",
"\n",
"You are a Teacher/ Professor. Your task is to setup \\\n",
"{num_questions_per_chunk} questions for an upcoming \\\n",
"quiz/examination. The questions should be diverse in nature \\\n",
"across the document. Restrict the questions to the \\\n",
"context information provided.\"\n",
"\"\"\"\n",
"\n",
"def generate_question_context_pairs(\n",
" nodes: List[TextNode],\n",
" llm: LLM,\n",
" qa_generate_prompt_tmpl: str = DEFAULT_QA_GENERATE_PROMPT_TMPL,\n",
" num_questions_per_chunk: int = 2,\n",
" request_delay: float = 2.0\n",
") -> EmbeddingQAFinetuneDataset:\n",
" \"\"\"Generate examples given a set of nodes with delays between requests.\"\"\"\n",
" node_dict = {\n",
" node.node_id: node.get_content(metadata_mode=MetadataMode.NONE)\n",
" for node in nodes\n",
" }\n",
"\n",
" queries = {}\n",
" relevant_docs = {}\n",
"\n",
" for node_id, text in tqdm(node_dict.items()):\n",
" query = qa_generate_prompt_tmpl.format(\n",
" context_str=text, num_questions_per_chunk=num_questions_per_chunk\n",
" )\n",
" response = llm.complete(query)\n",
"\n",
" result = str(response).strip().split(\"\\n\")\n",
" questions = [\n",
" re.sub(r\"^\\d+[\\).\\s]\", \"\", question).strip() for question in result\n",
" ]\n",
" questions = [question for question in questions if len(question) > 0][\n",
" :num_questions_per_chunk\n",
" ]\n",
"\n",
" num_questions_generated = len(questions)\n",
" if num_questions_generated < num_questions_per_chunk:\n",
" warnings.warn(\n",
" f\"Fewer questions generated ({num_questions_generated}) \"\n",
" f\"than requested ({num_questions_per_chunk}).\"\n",
" )\n",
"\n",
" for question in questions:\n",
" question_id = str(uuid.uuid4())\n",
" queries[question_id] = question\n",
" relevant_docs[question_id] = [node_id]\n",
"\n",
" time.sleep(request_delay)\n",
"\n",
" return EmbeddingQAFinetuneDataset(\n",
" queries=queries, corpus=node_dict, relevant_docs=relevant_docs\n",
" )\n",
"\n",
"#from llama_index.core.evaluation import generate_question_context_pairs\n",
"from llama_index.llms.gemini import Gemini\n",
"\n",
"llm = Gemini(model=\"models/gemini-1.5-flash\", temperature=1, max_tokens=512)\n",
"\n",
"rag_eval_dataset = generate_question_context_pairs(\n",
" nodes[:25],\n",
" llm=llm,\n",
" num_questions_per_chunk=1,\n",
" request_delay=4\n",
")\n",
"\n",
"# Save the dataset as a json file for later use\n",
"rag_eval_dataset.save_json(\"./rag_eval_dataset.json\")\n"
],
"metadata": {
"colab": {
"base_uri": "https://localhost:8080/",
"height": 34
},
"id": "_kCbMX67TqG-",
"outputId": "84034294-ba9b-4d5a-baeb-b2c250180045"
},
"execution_count": 18,
"outputs": [
{
"output_type": "stream",
"name": "stderr",
"text": [
"100%|██████████| 25/25 [02:41<00:00, 6.46s/it]\n"
]
}
]
},
{
"cell_type": "code",
"execution_count": 19,
"metadata": {
"id": "jhHLA3he1Hww",
"collapsed": true
},
"outputs": [],
"source": [
"# #Paid-Gemini API Key\n",
"\n",
"# from llama_index.core.evaluation import generate_question_context_pairs\n",
"# from llama_index.llms.gemini import Gemini\n",
"\n",
"# llm = Gemini(model=\"models/gemini-1.5-flash\", temperature=1, max_tokens=512)\n",
"# rag_eval_dataset = generate_question_context_pairs(nodes, llm=llm, num_questions_per_chunk=1)\n",
"\n",
"# # We can save the dataset as a json file for later use.\n",
"# rag_eval_dataset.save_json(\"./rag_eval_dataset.json\")"
]
},
{
"cell_type": "code",
"execution_count": 20,
"metadata": {
"id": "mNDd5i921Hww"
},
"outputs": [],
"source": [
"# We can also load the dataset from a previously saved json file.\n",
"from llama_index.core.evaluation import EmbeddingQAFinetuneDataset\n",
"\n",
"rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\"./rag_eval_dataset.json\")"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "qOx3vDWA1Hww"
},
"source": [
"### Evaluation for Hit Rate and Mean Reciprocal Rank (MRR)\n",
"\n",
"We will make use of `RetrieverEvaluator` available in Llama-index. We will measure the Hit Rate and Mean Reciprocal Rank (MRR).\n",
"\n",
"**Hit Rate:**\n",
"\n",
"Think of the Hit Rate like playing a game of guessing. You're given a question and you need to guess the correct answer from a list of options. The Hit Rate measures how often you guess the correct answer by only looking at your top few guesses. If you often find the right answer in your first few guesses, you have a high Hit Rate. So, in the context of a retrieval system, it's about how frequently the system finds the correct document within its top 'k' picks (where 'k' is a number you decide, like top 5 or top 10).\n",
"\n",
"**Mean Reciprocal Rank (MRR):**\n",
"\n",
"MRR is a bit like measuring how quickly you can find a treasure in a list of boxes. Imagine you have a row of boxes and only one of them has a treasure. The MRR calculates how close to the start of the row the treasure box is, on average. If the treasure is always in the first box you open, you're doing great and have an MRR of 1. If it's in the second box, the score is 1/2, since you took two tries to find it. If it's in the third box, your score is 1/3, and so on. MRR averages these scores across all your searches. So, for a retrieval system, MRR looks at where the correct document ranks in the system's guesses. If it's usually near the top, the MRR will be high, indicating good performance.\n",
"In summary, Hit Rate tells you how often the system gets it right in its top guesses, and MRR tells you how close to the top the right answer usually is. Both metrics are useful for evaluating the effectiveness of a retrieval system, like how well a search engine or a recommendation system works.\n"
]
},
{
"cell_type": "code",
"execution_count": 21,
"metadata": {
"id": "eARSzx8I1Hww"
},
"outputs": [],
"source": [
"import pandas as pd\n",
"\n",
"\n",
"def display_results_retriever(name, eval_results):\n",
" \"\"\"Display results from evaluate.\"\"\"\n",
"\n",
" metric_dicts = []\n",
" for eval_result in eval_results:\n",
" metric_dict = eval_result.metric_vals_dict\n",
" metric_dicts.append(metric_dict)\n",
"\n",
" full_df = pd.DataFrame(metric_dicts)\n",
"\n",
" hit_rate = full_df[\"hit_rate\"].mean()\n",
" mrr = full_df[\"mrr\"].mean()\n",
"\n",
" metric_df = pd.DataFrame(\n",
" {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
" )\n",
"\n",
" return metric_df"
]
},
{
"cell_type": "code",
"execution_count": 22,
"metadata": {
"id": "hD5YflG51Hww",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "53a0b810-b589-4735-faab-5f1d0f4ebcf9"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_4 0.12 0.043333\n",
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_6 0.16 0.05\n",
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_8 0.2 0.055714\n",
" Retriever Name Hit Rate MRR\n",
"0 Retriever top_10 0.24 0.060159\n"
]
}
],
"source": [
"from llama_index.core.evaluation import RetrieverEvaluator\n",
"\n",
"# We can evaluate the retievers with different top_k values.\n",
"for i in [2, 4, 6, 8, 10]:\n",
" retriever = index.as_retriever(similarity_top_k=i)\n",
" retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
" [\"mrr\", \"hit_rate\"], retriever=retriever\n",
" )\n",
" eval_results = await retriever_evaluator.aevaluate_dataset(\n",
" rag_eval_dataset, workers=32\n",
" )\n",
" print(display_results_retriever(f\"Retriever top_{i}\", eval_results))\n",
"\n",
"time.sleep(60)"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "9y6uofcJ1Hwx"
},
"source": [
"### Evaluation using Relevance and Faithfulness metrics.\n",
"\n",
"Here, we evaluate the answer generated by the LLM. Is the answer using the correct context? Is the answer faithful to the context? Is the answer relevant to the question?\n",
"\n",
"An LLM will answer these questions, more specifically `gpt-4o`.\n",
"\n",
"**`FaithfulnessEvaluator`**\n",
"Evaluates if the answer is faithful to the retrieved contexts (in other words, whether there's an hallucination).\n",
"\n",
"**`RelevancyEvaluator`**\n",
"Evaluates whether the retrieved context and answer are relevant to the user question.\n",
"\n",
"Now, let's see how the top_k value affects these two metrics.\n"
]
},
{
"cell_type": "code",
"execution_count": 30,
"metadata": {
"id": "ckjE4fcD1Hwx",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "ef15f35f-5010-441f-e023-caa5e68489ea"
},
"outputs": [
{
"output_type": "stream",
"name": "stdout",
"text": [
"top_2 faithfulness_score: 0.25\n",
"top_2 relevancy_score: 0.6\n",
"===============\n",
"top_4 faithfulness_score: 0.1\n",
"top_4 relevancy_score: 0.95\n",
"===============\n",
"top_6 faithfulness_score: 0.2\n",
"top_6 relevancy_score: 0.9\n",
"===============\n",
"top_8 faithfulness_score: 0.1\n",
"top_8 relevancy_score: 0.6\n",
"===============\n",
"top_10 faithfulness_score: 0.05\n",
"top_10 relevancy_score: 0.55\n",
"===============\n"
]
}
],
"source": [
"from llama_index.core.evaluation import RelevancyEvaluator, FaithfulnessEvaluator, BatchEvalRunner\n",
"from llama_index.llms.openai import OpenAI\n",
"\n",
"# Create your index\n",
"from llama_index.core import VectorStoreIndex\n",
"index = VectorStoreIndex.from_vector_store(vector_store)\n",
"\n",
"# Define an LLM as a judge\n",
"llm_gpt4o = OpenAI(temperature=0, model=\"gpt-4o\")\n",
"llm_gpt4o_mini = OpenAI(temperature=0, model=\"gpt-4o-mini\")\n",
"\n",
"# Initiate the faithfulnes and relevancy evaluator objects\n",
"faithfulness_evaluator = FaithfulnessEvaluator(llm=llm_gpt4o)\n",
"relevancy_evaluator = RelevancyEvaluator(llm=llm_gpt4o)\n",
"\n",
"# Extract the questions from the dataset\n",
"queries = list(rag_eval_dataset.queries.values())\n",
"# Limit to first 10 question to save time (!!remove this line in production!!)\n",
"batch_eval_queries = queries[:20]\n",
"\n",
"# The batch evaluator runs the evaluation in batches\n",
"runner = BatchEvalRunner(\n",
" {\"faithfulness\": faithfulness_evaluator, \"relevancy\": relevancy_evaluator},\n",
" workers=32,\n",
")\n",
"\n",
"\n",
"# Define a for-loop to try different `similarity_top_k` values\n",
"for i in [2, 4, 6, 8, 10]:\n",
" # Set query engine with different number of returned chunks\n",
" query_engine = index.as_query_engine(similarity_top_k=i, llm = llm_gpt4o_mini)\n",
"\n",
" # Run the evaluation\n",
" eval_results = await runner.aevaluate_queries(query_engine, queries=batch_eval_queries)\n",
"\n",
" # Printing the results\n",
" faithfulness_score = sum(\n",
" result.passing for result in eval_results[\"faithfulness\"]\n",
" ) / len(eval_results[\"faithfulness\"])\n",
" print(f\"top_{i} faithfulness_score: {faithfulness_score}\")\n",
"\n",
" relevancy_score = sum(result.passing for result in eval_results[\"relevancy\"]) / len(\n",
" eval_results[\"relevancy\"]\n",
" )\n",
" print(f\"top_{i} relevancy_score: {relevancy_score}\")\n",
" print(\"=\"*15)\n"
]
},
{
"cell_type": "markdown",
"metadata": {
"id": "YmlmP2Px4THB"
},
"source": [
"### Correctness\n"
]
},
{
"cell_type": "code",
"execution_count": 24,
"metadata": {
"id": "aUulxzuh1Hwx"
},
"outputs": [],
"source": [
"from llama_index.core.evaluation import CorrectnessEvaluator\n",
"\n",
"query = (\n",
" \"Can you explain the theory of relativity proposed by Albert Einstein in\" \" detail?\"\n",
")\n",
"\n",
"reference = \"\"\"\n",
"Certainly! Albert Einstein's theory of relativity consists of two main components: special relativity and general relativity. Special relativity, published in 1905, introduced the concept that the laws of physics are the same for all non-accelerating observers and that the speed of light in a vacuum is a constant, regardless of the motion of the source or observer. It also gave rise to the famous equation E=mc², which relates energy (E) and mass (m).\n",
"\n",
"General relativity, published in 1915, extended these ideas to include the effects of gravity. According to general relativity, gravity is not a force between masses, as described by Newton's theory of gravity, but rather the result of the warping of space and time by mass and energy. Massive objects, such as planets and stars, cause a curvature in spacetime, and smaller objects follow curved paths in response to this curvature. This concept is often illustrated using the analogy of a heavy ball placed on a rubber sheet, causing it to create a depression that other objects (representing smaller masses) naturally move towards.\n",
"\n",
"In essence, general relativity provided a new understanding of gravity, explaining phenomena like the bending of light by gravity (gravitational lensing) and the precession of the orbit of Mercury. It has been confirmed through numerous experiments and observations and has become a fundamental theory in modern physics.\n",
"\"\"\"\n",
"\n",
"response = \"\"\"\n",
"Certainly! Albert Einstein's theory of relativity consists of two main components: special relativity and general relativity. Special relativity, published in 1905, introduced the concept that the laws of physics are the same for all non-accelerating observers and that the speed of light in a vacuum is a constant, regardless of the motion of the source or observer. It also gave rise to the famous equation E=mc², which relates energy (E) and mass (m).\n",
"\n",
"However, general relativity, published in 1915, extended these ideas to include the effects of magnetism. According to general relativity, gravity is not a force between masses but rather the result of the warping of space and time by magnetic fields generated by massive objects. Massive objects, such as planets and stars, create magnetic fields that cause a curvature in spacetime, and smaller objects follow curved paths in response to this magnetic curvature. This concept is often illustrated using the analogy of a heavy ball placed on a rubber sheet with magnets underneath, causing it to create a depression that other objects (representing smaller masses) naturally move towards due to magnetic attraction.\n",
"\"\"\""
]
},
{
"cell_type": "code",
"execution_count": 25,
"metadata": {
"id": "CYIjkAP74bly"
},
"outputs": [],
"source": [
"evaluator = CorrectnessEvaluator(llm=llm_gpt4o)\n",
"\n",
"result = evaluator.evaluate(query=query,response=response,reference=reference,)"
]
},
{
"cell_type": "code",
"execution_count": 26,
"metadata": {
"id": "-3b-bgvA4dAz",
"colab": {
"base_uri": "https://localhost:8080/"
},
"outputId": "84fdccf9-6fd0-402e-b10a-3158a9eb7613"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"2.0"
]
},
"metadata": {},
"execution_count": 26
}
],
"source": [
"result.score"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"id": "KNEhRQAo4dT0",
"colab": {
"base_uri": "https://localhost:8080/",
"height": 70
},
"outputId": "c39199f3-ecba-434e-b1f6-2907168dc2c8"
},
"outputs": [
{
"output_type": "execute_result",
"data": {
"text/plain": [
"'The generated answer is mostly relevant but contains significant inaccuracies. It incorrectly states that general relativity involves the effects of magnetism and magnetic fields, which is not true. General relativity deals with the warping of space and time by mass and energy, not magnetic fields. This fundamental error reduces the correctness of the answer.'"
],
"application/vnd.google.colaboratory.intrinsic+json": {
"type": "string"
}
},
"metadata": {},
"execution_count": 27
}
],
"source": [
"result.feedback"
]
},
{
"cell_type": "code",
"execution_count": 27,
"metadata": {
"id": "ZOlwVWZb49H4"
},
"outputs": [],
"source": []
}
],
"metadata": {
"colab": {
"provenance": [],
"include_colab_link": true
},
"kernelspec": {
"display_name": "Python 3",
"name": "python3"
},
"language_info": {
"codemirror_mode": {
"name": "ipython",
"version": 3
},
"file_extension": ".py",
"mimetype": "text/x-python",
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.12.4"
},
"widgets": {
"application/vnd.jupyter.widget-state+json": {
"6a5b3fec3572436f97ed97b570f15984": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_9d28cdd8504e429d85b9849c4f679085",
"IPY_MODEL_b4bbbd97b95e4e79b1923aabd512e4c7",
"IPY_MODEL_3b66c8f4087b4df5a9eb84b7dc82e440"
],
"layout": "IPY_MODEL_9639bc37437145c1af00c627da831e2e"
}
},
"9d28cdd8504e429d85b9849c4f679085": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_960a9f07924c4722b42332c9a3b233a8",
"placeholder": "",
"style": "IPY_MODEL_9dd071e120884e88b44101bd4b252342",
"value": "Parsing nodes: 100%"
}
},
"b4bbbd97b95e4e79b1923aabd512e4c7": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_911b081e2a144929a67a0ef8e425706e",
"max": 14,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_4bddc051ddc744dcb6efdd74e841bf00",
"value": 14
}
},
"3b66c8f4087b4df5a9eb84b7dc82e440": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_4ea27a5184b2446aba68157bd1cb0d2d",
"placeholder": "",
"style": "IPY_MODEL_9e87cc59ae8c4fa1b3b710b83a371590",
"value": " 14/14 [00:01<00:00, 9.81it/s]"
}
},
"9639bc37437145c1af00c627da831e2e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"960a9f07924c4722b42332c9a3b233a8": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9dd071e120884e88b44101bd4b252342": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"911b081e2a144929a67a0ef8e425706e": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4bddc051ddc744dcb6efdd74e841bf00": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"4ea27a5184b2446aba68157bd1cb0d2d": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"9e87cc59ae8c4fa1b3b710b83a371590": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"847dfcd1770b4352bc839db928f0834a": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HBoxModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HBoxModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HBoxView",
"box_style": "",
"children": [
"IPY_MODEL_d013604d2eb4432b850f451d86fc5e90",
"IPY_MODEL_8893726a2ae0488fa04b1c12ef38fd01",
"IPY_MODEL_74be1acb609041ecbecb662c1613575c"
],
"layout": "IPY_MODEL_8e728820e82542e1a4aa440e043e23c2"
}
},
"d013604d2eb4432b850f451d86fc5e90": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_b1661862e73d4b898daa82a61128a7fa",
"placeholder": "",
"style": "IPY_MODEL_d282aabfe99642a699d1aab1122a2806",
"value": "Generating embeddings: 100%"
}
},
"8893726a2ae0488fa04b1c12ef38fd01": {
"model_module": "@jupyter-widgets/controls",
"model_name": "FloatProgressModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "FloatProgressModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "ProgressView",
"bar_style": "success",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_39cbcd4e42ae4af782caf71e3529c459",
"max": 108,
"min": 0,
"orientation": "horizontal",
"style": "IPY_MODEL_4f121d46026d4435ba35448c8da3be50",
"value": 108
}
},
"74be1acb609041ecbecb662c1613575c": {
"model_module": "@jupyter-widgets/controls",
"model_name": "HTMLModel",
"model_module_version": "1.5.0",
"state": {
"_dom_classes": [],
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "HTMLModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/controls",
"_view_module_version": "1.5.0",
"_view_name": "HTMLView",
"description": "",
"description_tooltip": null,
"layout": "IPY_MODEL_bebe97dd44e94312ad185632f15caddc",
"placeholder": "",
"style": "IPY_MODEL_45d27d06f7da4f80a31a0347d77f075d",
"value": " 108/108 [00:02<00:00, 39.87it/s]"
}
},
"8e728820e82542e1a4aa440e043e23c2": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"b1661862e73d4b898daa82a61128a7fa": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"d282aabfe99642a699d1aab1122a2806": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
},
"39cbcd4e42ae4af782caf71e3529c459": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"4f121d46026d4435ba35448c8da3be50": {
"model_module": "@jupyter-widgets/controls",
"model_name": "ProgressStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "ProgressStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"bar_color": null,
"description_width": ""
}
},
"bebe97dd44e94312ad185632f15caddc": {
"model_module": "@jupyter-widgets/base",
"model_name": "LayoutModel",
"model_module_version": "1.2.0",
"state": {
"_model_module": "@jupyter-widgets/base",
"_model_module_version": "1.2.0",
"_model_name": "LayoutModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "LayoutView",
"align_content": null,
"align_items": null,
"align_self": null,
"border": null,
"bottom": null,
"display": null,
"flex": null,
"flex_flow": null,
"grid_area": null,
"grid_auto_columns": null,
"grid_auto_flow": null,
"grid_auto_rows": null,
"grid_column": null,
"grid_gap": null,
"grid_row": null,
"grid_template_areas": null,
"grid_template_columns": null,
"grid_template_rows": null,
"height": null,
"justify_content": null,
"justify_items": null,
"left": null,
"margin": null,
"max_height": null,
"max_width": null,
"min_height": null,
"min_width": null,
"object_fit": null,
"object_position": null,
"order": null,
"overflow": null,
"overflow_x": null,
"overflow_y": null,
"padding": null,
"right": null,
"top": null,
"visibility": null,
"width": null
}
},
"45d27d06f7da4f80a31a0347d77f075d": {
"model_module": "@jupyter-widgets/controls",
"model_name": "DescriptionStyleModel",
"model_module_version": "1.5.0",
"state": {
"_model_module": "@jupyter-widgets/controls",
"_model_module_version": "1.5.0",
"_model_name": "DescriptionStyleModel",
"_view_count": null,
"_view_module": "@jupyter-widgets/base",
"_view_module_version": "1.2.0",
"_view_name": "StyleView",
"description_width": ""
}
}
}
}
},
"nbformat": 4,
"nbformat_minor": 0
}