Omar Solano commited on
Commit
be9c1c3
β€’
1 Parent(s): 33ba6d1

remove files

Browse files
Files changed (35) hide show
  1. .gitattributes +0 -4
  2. .github/workflows/deploy_hf.yaml +0 -21
  3. .gitignore +0 -164
  4. README.md +0 -55
  5. notebooks/01-Basic_Tutor.ipynb +0 -291
  6. notebooks/02-Basic_RAG.ipynb +0 -1083
  7. notebooks/03-RAG_with_LlamaIndex.ipynb +0 -360
  8. notebooks/04-RAG_with_VectorStore.ipynb +0 -449
  9. notebooks/05-Improve_Prompts_+_Add_Source.ipynb +0 -1420
  10. notebooks/06-Evaluate_RAG.ipynb +0 -1491
  11. notebooks/07-RAG_Improve_Chunking.ipynb +0 -0
  12. notebooks/08-Finetune_Embedding.ipynb +0 -0
  13. notebooks/09-Better_Embedding_Model.ipynb +0 -1575
  14. notebooks/10-Adding_Reranking.ipynb +0 -1462
  15. notebooks/11-Adding_Hybrid_Search.ipynb +0 -1645
  16. notebooks/12-Improve_Query.ipynb +0 -1786
  17. notebooks/13-Adding_Router.ipynb +0 -0
  18. notebooks/14-Adding_Chat.ipynb +0 -1618
  19. notebooks/15-Use_OpenSource_Models.ipynb +0 -0
  20. notebooks/17-Using_LLMs_to_rank_chunks_as_the_Judge.ipynb +0 -830
  21. notebooks/Crawl_a_Website.ipynb +0 -574
  22. notebooks/Web_Search_API.ipynb +0 -491
  23. requirements.txt +0 -18
  24. scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/data_level0.bin +0 -3
  25. scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/header.bin +0 -3
  26. scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/index_metadata.pickle +0 -3
  27. scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/length.bin +0 -3
  28. scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/link_lists.bin +0 -3
  29. scripts/ai-tutor-db/chroma.sqlite3 +0 -3
  30. scripts/basic_tutor.py +0 -60
  31. scripts/call_openai.py +0 -79
  32. scripts/create_db.ipynb +0 -380
  33. scripts/gradio-ui.py +0 -295
  34. scripts/tutor_prompts.py +0 -100
  35. scripts/utils.py +0 -16
.gitattributes DELETED
@@ -1,4 +0,0 @@
1
- scripts/ai-tutor-db/** filter=lfs diff=lfs merge=lfs -text
2
- *.csv filter=lfs diff=lfs merge=lfs -text
3
- *.json filter=lfs diff=lfs merge=lfs -text
4
- *.jsonl filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
.github/workflows/deploy_hf.yaml DELETED
@@ -1,21 +0,0 @@
1
- name: Sync to Hugging Face hub
2
- on:
3
- push:
4
- branches: [main]
5
-
6
- # to run this workflow manually from the Actions tab
7
- workflow_dispatch:
8
-
9
- jobs:
10
- sync-to-hub:
11
- runs-on: ubuntu-latest
12
- steps:
13
- - uses: actions/checkout@v3
14
- with:
15
- fetch-depth: 0
16
- lfs: true
17
- - name: Push to hub
18
- env:
19
- HF_TOKEN: ${{ secrets.HF_TOKEN }}
20
- HF_USERNAME: ${{ secrets.HF_USERNAME }}
21
- run: git push --force https://$HF_USERNAME:$HF_TOKEN@huggingface.co/spaces/towardsai-buster/ai-tutor-chatbot main:main
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
.gitignore DELETED
@@ -1,164 +0,0 @@
1
- # Byte-compiled / optimized / DLL files
2
- __pycache__/
3
- *.py[cod]
4
- *$py.class
5
-
6
- # C extensions
7
- *.so
8
-
9
- # Distribution / packaging
10
- .Python
11
- build/
12
- develop-eggs/
13
- dist/
14
- downloads/
15
- eggs/
16
- .eggs/
17
- lib/
18
- lib64/
19
- parts/
20
- sdist/
21
- var/
22
- wheels/
23
- share/python-wheels/
24
- *.egg-info/
25
- .installed.cfg
26
- *.egg
27
- MANIFEST
28
-
29
- # PyInstaller
30
- # Usually these files are written by a python script from a template
31
- # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
- *.manifest
33
- *.spec
34
-
35
- # Installer logs
36
- pip-log.txt
37
- pip-delete-this-directory.txt
38
-
39
- # Unit test / coverage reports
40
- htmlcov/
41
- .tox/
42
- .nox/
43
- .coverage
44
- .coverage.*
45
- .cache
46
- nosetests.xml
47
- coverage.xml
48
- *.cover
49
- *.py,cover
50
- .hypothesis/
51
- .pytest_cache/
52
- cover/
53
-
54
- # Translations
55
- *.mo
56
- *.pot
57
-
58
- # Django stuff:
59
- *.log
60
- local_settings.py
61
- db.sqlite3
62
- db.sqlite3-journal
63
-
64
- # Flask stuff:
65
- instance/
66
- .webassets-cache
67
-
68
- # Scrapy stuff:
69
- .scrapy
70
-
71
- # Sphinx documentation
72
- docs/_build/
73
-
74
- # PyBuilder
75
- .pybuilder/
76
- target/
77
-
78
- # Jupyter Notebook
79
- .ipynb_checkpoints
80
-
81
- # IPython
82
- profile_default/
83
- ipython_config.py
84
-
85
- # pyenv
86
- # For a library or package, you might want to ignore these files since the code is
87
- # intended to run in multiple environments; otherwise, check them in:
88
- # .python-version
89
-
90
- # pipenv
91
- # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
- # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
- # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
- # install all needed dependencies.
95
- #Pipfile.lock
96
-
97
- # poetry
98
- # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
- # This is especially recommended for binary packages to ensure reproducibility, and is more
100
- # commonly ignored for libraries.
101
- # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
- #poetry.lock
103
-
104
- # pdm
105
- # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
- #pdm.lock
107
- # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
- # in version control.
109
- # https://pdm.fming.dev/#use-with-ide
110
- .pdm.toml
111
-
112
- # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
- __pypackages__/
114
-
115
- # Celery stuff
116
- celerybeat-schedule
117
- celerybeat.pid
118
-
119
- # SageMath parsed files
120
- *.sage.py
121
-
122
- # Environments
123
- .env
124
- .venv
125
- env/
126
- venv/
127
- ENV/
128
- env.bak/
129
- venv.bak/
130
- ai-tutor/
131
-
132
- # Spyder project settings
133
- .spyderproject
134
- .spyproject
135
-
136
- # Rope project settings
137
- .ropeproject
138
-
139
- # mkdocs documentation
140
- /site
141
-
142
- # mypy
143
- .mypy_cache/
144
- .dmypy.json
145
- dmypy.json
146
-
147
- # Pyre type checker
148
- .pyre/
149
-
150
- # pytype static type analyzer
151
- .pytype/
152
-
153
- # Cython debug symbols
154
- cython_debug/
155
-
156
- # PyCharm
157
- # JetBrains specific template is maintained in a separate JetBrains.gitignore that can
158
- # be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
159
- # and can be added to the global gitignore or merged into this file. For a more nuclear
160
- # option (not recommended) you can uncomment the following to ignore the entire idea folder.
161
- #.idea/
162
-
163
- notebooks/mini-llama-articles/
164
- .vscode/
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
README.md DELETED
@@ -1,55 +0,0 @@
1
- ---
2
- title: AI Tutor Chatbot
3
- emoji: πŸ§‘πŸ»β€πŸ«
4
- colorFrom: gray
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 4.19.2
8
- app_file: scripts/gradio-ui.py
9
- pinned: false
10
- ---
11
- ---
12
- This project creates a helpful and accurate AI Tutor chatbot, leveraging GPT-3.5-Turbo and a RAG system. We design it to address student questions about AI with precision and clarity.
13
-
14
- ### Installation
15
-
16
- 1. **Create a new Python environment:**
17
-
18
- ```bash
19
- python -m venv .venv
20
- ```
21
-
22
- This command creates a virtual environment named `.venv`.
23
-
24
- 2. **Activate the environment:**
25
-
26
- For macOS and Linux:
27
-
28
- ```bash
29
- source .venv/bin/activate
30
- ```
31
-
32
- 3. **Install the dependencies:**
33
-
34
- ```bash
35
- pip install -r requirements.txt
36
- ```
37
-
38
- ### Usage
39
-
40
- 1. **Set environment variables:**
41
-
42
- Before running the application, you need to set up your OpenAI API key and MongoDB URI as environment variables:
43
-
44
- ```bash
45
- export OPENAI_API_KEY=your_openai_api_key_here
46
- export MONGODB_URI=your_mongodb_uri_here
47
- ```
48
-
49
- 2. **Run the application:**
50
-
51
- ```bash
52
- python scripts/gradio-ui.py
53
- ```
54
-
55
- This command starts the Gradio interface for the AI Tutor chatbot.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/01-Basic_Tutor.ipynb DELETED
@@ -1,291 +0,0 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": [],
7
- "authorship_tag": "ABX9TyOUuEM41HPKH6uCJFqocvSD",
8
- "include_colab_link": true
9
- },
10
- "kernelspec": {
11
- "name": "python3",
12
- "display_name": "Python 3"
13
- },
14
- "language_info": {
15
- "name": "python"
16
- }
17
- },
18
- "cells": [
19
- {
20
- "cell_type": "markdown",
21
- "metadata": {
22
- "id": "view-in-github",
23
- "colab_type": "text"
24
- },
25
- "source": [
26
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/01-Basic_Tutor.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
27
- ]
28
- },
29
- {
30
- "cell_type": "markdown",
31
- "source": [
32
- "# Install Packages and Setup Variables"
33
- ],
34
- "metadata": {
35
- "id": "DMXyyXD0xix9"
36
- }
37
- },
38
- {
39
- "cell_type": "code",
40
- "execution_count": null,
41
- "metadata": {
42
- "id": "o4Q0N2omkAoZ",
43
- "colab": {
44
- "base_uri": "https://localhost:8080/"
45
- },
46
- "outputId": "703fe996-2acf-4e90-92c1-252041ba7d7a"
47
- },
48
- "outputs": [
49
- {
50
- "output_type": "stream",
51
- "name": "stdout",
52
- "text": [
53
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m225.4/225.4 kB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
54
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.7/51.7 kB\u001b[0m \u001b[31m1.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
55
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
56
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
57
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m17.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
58
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.8/77.8 kB\u001b[0m \u001b[31m6.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
59
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
60
- "\u001b[?25h"
61
- ]
62
- }
63
- ],
64
- "source": [
65
- "!pip install -q openai==1.12.0 cohere==4.47 tiktoken==0.6.0"
66
- ]
67
- },
68
- {
69
- "cell_type": "code",
70
- "source": [
71
- "import os\n",
72
- "\n",
73
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
74
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
75
- ],
76
- "metadata": {
77
- "id": "xxK7EAAvr2aT"
78
- },
79
- "execution_count": null,
80
- "outputs": []
81
- },
82
- {
83
- "cell_type": "markdown",
84
- "source": [
85
- "# Load the API client"
86
- ],
87
- "metadata": {
88
- "id": "68RbStS-xpbL"
89
- }
90
- },
91
- {
92
- "cell_type": "code",
93
- "source": [
94
- "from openai import OpenAI\n",
95
- "\n",
96
- "# Defining the \"client\" object that enables\n",
97
- "# us to connect to OpenAI API endpoints.\n",
98
- "client = OpenAI()"
99
- ],
100
- "metadata": {
101
- "id": "La8hdWqJkFkh"
102
- },
103
- "execution_count": null,
104
- "outputs": []
105
- },
106
- {
107
- "cell_type": "markdown",
108
- "source": [
109
- "# Query the API"
110
- ],
111
- "metadata": {
112
- "id": "CC-sa_uv6J2C"
113
- }
114
- },
115
- {
116
- "cell_type": "code",
117
- "source": [
118
- "# Define two questions: 1) Related to AI, 2) Unrelated topic.\n",
119
- "# These questions will be used to evaluate model's performance.\n",
120
- "QUESTION_AI = \"List a number of famous artificial intelligence frameworks?\"\n",
121
- "QUESTION_NOT_AI = \"What is the name of the highest mountain in the world and its height?\""
122
- ],
123
- "metadata": {
124
- "id": "7JRrn0uIsBfg"
125
- },
126
- "execution_count": null,
127
- "outputs": []
128
- },
129
- {
130
- "cell_type": "code",
131
- "source": [
132
- "# Defining a function to answer a question using \"gpt-3.5-turbo-16k\" model.\n",
133
- "def ask_ai_tutor(question):\n",
134
- " try:\n",
135
- " # Formulating the system prompt and condition the model to answer only AI-related questions.\n",
136
- " system_prompt = (\n",
137
- " \"You are an AI tutor specialized in answering artificial intelligence-related questions. \"\n",
138
- " \"Only answer AI-related question, else say that you cannot answer this question.\"\n",
139
- " )\n",
140
- "\n",
141
- " # Create a user prompt with the user's question\n",
142
- " prompt = f\"Please provide an informative and accurate answer to the following question.\\nQuestion: {question}\\nAnswer:\"\n",
143
- "\n",
144
- " # Call the OpenAI API\n",
145
- " response = client.chat.completions.create(\n",
146
- " model='gpt-3.5-turbo-16k',\n",
147
- " temperature=0.0,\n",
148
- " messages=[\n",
149
- " {\"role\": \"system\", \"content\": system_prompt},\n",
150
- " {\"role\": \"user\", \"content\": prompt}\n",
151
- " ]\n",
152
- " )\n",
153
- "\n",
154
- " # Return the AI's response\n",
155
- " return response.choices[0].message.content.strip()\n",
156
- "\n",
157
- " except Exception as e:\n",
158
- " return f\"An error occurred: {e}\""
159
- ],
160
- "metadata": {
161
- "id": "CcP26IauuBuV"
162
- },
163
- "execution_count": null,
164
- "outputs": []
165
- },
166
- {
167
- "cell_type": "code",
168
- "source": [
169
- "# Ask the AI-related question.\n",
170
- "RES_AI = ask_ai_tutor( QUESTION_AI )\n",
171
- "print( RES_AI )"
172
- ],
173
- "metadata": {
174
- "colab": {
175
- "base_uri": "https://localhost:8080/"
176
- },
177
- "id": "W_dbwURpufR7",
178
- "outputId": "3cd84fb9-fe6f-4561-e9ee-ed606a983629"
179
- },
180
- "execution_count": null,
181
- "outputs": [
182
- {
183
- "output_type": "stream",
184
- "name": "stdout",
185
- "text": [
186
- "Sure! There are several famous artificial intelligence frameworks that are widely used in the field. Some of the popular ones include:\n",
187
- "\n",
188
- "1. TensorFlow: Developed by Google, TensorFlow is an open-source framework that is widely used for machine learning and deep learning tasks. It provides a comprehensive ecosystem of tools, libraries, and resources for building and deploying AI models.\n",
189
- "\n",
190
- "2. PyTorch: Developed by Facebook's AI Research lab, PyTorch is another popular open-source framework for deep learning. It is known for its dynamic computational graph, which allows for more flexibility and ease of use compared to other frameworks.\n",
191
- "\n",
192
- "3. Keras: Keras is a high-level neural networks API written in Python. It is built on top of TensorFlow and provides a user-friendly interface for building and training deep learning models. Keras is known for its simplicity and ease of use, making it a popular choice for beginners.\n",
193
- "\n",
194
- "4. Caffe: Caffe is a deep learning framework developed by Berkeley AI Research (BAIR). It is known for its speed and efficiency, particularly for convolutional neural networks (CNNs). Caffe has been widely used in computer vision tasks and has a large community of users and contributors.\n",
195
- "\n",
196
- "5. Theano: Theano is a Python library that allows for efficient mathematical computations, particularly for deep learning tasks. It provides a high-level interface for defining and optimizing mathematical expressions, making it a popular choice for researchers and developers.\n",
197
- "\n",
198
- "These are just a few examples of famous AI frameworks, and there are many others available depending on specific needs and preferences.\n"
199
- ]
200
- }
201
- ]
202
- },
203
- {
204
- "cell_type": "code",
205
- "source": [
206
- "# Ask the unrelated question.\n",
207
- "RES_NOT_AI = ask_ai_tutor( QUESTION_NOT_AI )\n",
208
- "print( RES_NOT_AI )"
209
- ],
210
- "metadata": {
211
- "colab": {
212
- "base_uri": "https://localhost:8080/"
213
- },
214
- "id": "37YuVJQquhpN",
215
- "outputId": "4550c44d-2150-4cca-f23e-c89ea43e2040"
216
- },
217
- "execution_count": null,
218
- "outputs": [
219
- {
220
- "output_type": "stream",
221
- "name": "stdout",
222
- "text": [
223
- "I'm sorry, but I cannot answer that question as it is not related to artificial intelligence.\n"
224
- ]
225
- }
226
- ]
227
- },
228
- {
229
- "cell_type": "markdown",
230
- "source": [
231
- "# History"
232
- ],
233
- "metadata": {
234
- "id": "NRBgk6WToIK0"
235
- }
236
- },
237
- {
238
- "cell_type": "code",
239
- "source": [
240
- "response = client.chat.completions.create(\n",
241
- " model='gpt-3.5-turbo-16k',\n",
242
- " temperature=0.0,\n",
243
- " messages=[\n",
244
- " {\"role\": \"system\", \"content\": \"You are an AI tutor specialized in answering artificial intelligence-related questions. Only answer AI-related question, else say that you cannot answer this question.\"},\n",
245
- " {\"role\": \"user\", \"content\": \"Please provide an informative and accurate answer to the following question.\\nQuestion: List a number of famous artificial intelligence frameworks?\\nAnswer:\"},\n",
246
- " {\"role\": \"assistant\", \"content\": RES_AI},\n",
247
- " {\"role\": \"user\", \"content\": \"Please provide an informative and accurate answer to the following question.\\nQuestion: What is the name of the highest mountain in the world and its height?\\nAnswer:\"},\n",
248
- " {\"role\": \"assistant\", \"content\": RES_NOT_AI},\n",
249
- " {\"role\": \"user\", \"content\": \"Please provide an informative and accurate answer to the following question.\\nQuestion: Can you write a summary of the first suggested AI framework in the first question?\\nAnswer:\"}\n",
250
- " ]\n",
251
- " )\n",
252
- "\n",
253
- "print( response.choices[0].message.content.strip() )"
254
- ],
255
- "metadata": {
256
- "colab": {
257
- "base_uri": "https://localhost:8080/"
258
- },
259
- "id": "0_6GN2XsoEyM",
260
- "outputId": "3e66a833-a552-4bcc-9808-7b9f6b539310"
261
- },
262
- "execution_count": null,
263
- "outputs": [
264
- {
265
- "output_type": "stream",
266
- "name": "stdout",
267
- "text": [
268
- "Certainly! The first suggested AI framework in the previous question was TensorFlow. TensorFlow is an open-source framework developed by Google that has gained significant popularity in the field of artificial intelligence. It is primarily used for building and training machine learning and deep learning models.\n",
269
- "\n",
270
- "TensorFlow provides a comprehensive ecosystem of tools, libraries, and resources that make it easier for developers to create and deploy AI models. It offers a flexible architecture that allows for efficient computation on both CPUs and GPUs, enabling faster training and inference.\n",
271
- "\n",
272
- "One of the key features of TensorFlow is its ability to construct and execute computational graphs. These graphs represent the flow of data through a series of mathematical operations, making it easier to visualize and understand the model's structure. TensorFlow also supports automatic differentiation, which simplifies the process of calculating gradients for training neural networks.\n",
273
- "\n",
274
- "Moreover, TensorFlow has a vast community of users and contributors, which means there is extensive documentation, tutorials, and pre-trained models available. This makes it easier for developers to get started and leverage the collective knowledge of the community.\n",
275
- "\n",
276
- "Overall, TensorFlow is a powerful and versatile AI framework that has been widely adopted in various domains, including computer vision, natural language processing, and reinforcement learning. Its flexibility, scalability, and extensive community support make it a popular choice for both researchers and practitioners in the field of artificial intelligence.\n"
277
- ]
278
- }
279
- ]
280
- },
281
- {
282
- "cell_type": "code",
283
- "source": [],
284
- "metadata": {
285
- "id": "ET_l06LiojaN"
286
- },
287
- "execution_count": null,
288
- "outputs": []
289
- }
290
- ]
291
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/02-Basic_RAG.ipynb DELETED
@@ -1,1083 +0,0 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": [],
7
- "authorship_tag": "ABX9TyMiGemqWYAYHaqF1t8bElQ/",
8
- "include_colab_link": true
9
- },
10
- "kernelspec": {
11
- "name": "python3",
12
- "display_name": "Python 3"
13
- },
14
- "language_info": {
15
- "name": "python"
16
- },
17
- "widgets": {
18
- "application/vnd.jupyter.widget-state+json": {
19
- "46a91770024e4802acd3e64e9bc46f32": {
20
- "model_module": "@jupyter-widgets/controls",
21
- "model_name": "HBoxModel",
22
- "model_module_version": "1.5.0",
23
- "state": {
24
- "_dom_classes": [],
25
- "_model_module": "@jupyter-widgets/controls",
26
- "_model_module_version": "1.5.0",
27
- "_model_name": "HBoxModel",
28
- "_view_count": null,
29
- "_view_module": "@jupyter-widgets/controls",
30
- "_view_module_version": "1.5.0",
31
- "_view_name": "HBoxView",
32
- "box_style": "",
33
- "children": [
34
- "IPY_MODEL_613898a418d64df3b18d35083f0bb36d",
35
- "IPY_MODEL_9f9427eb6a644166906bb321f13eaf48",
36
- "IPY_MODEL_a4a232c5b5e1493897e9acdd25b8efd4"
37
- ],
38
- "layout": "IPY_MODEL_b2e91819e1c94f28b7bbad66918cb797"
39
- }
40
- },
41
- "613898a418d64df3b18d35083f0bb36d": {
42
- "model_module": "@jupyter-widgets/controls",
43
- "model_name": "HTMLModel",
44
- "model_module_version": "1.5.0",
45
- "state": {
46
- "_dom_classes": [],
47
- "_model_module": "@jupyter-widgets/controls",
48
- "_model_module_version": "1.5.0",
49
- "_model_name": "HTMLModel",
50
- "_view_count": null,
51
- "_view_module": "@jupyter-widgets/controls",
52
- "_view_module_version": "1.5.0",
53
- "_view_name": "HTMLView",
54
- "description": "",
55
- "description_tooltip": null,
56
- "layout": "IPY_MODEL_010cbcb0f1364576b15f792f4d11f605",
57
- "placeholder": "​",
58
- "style": "IPY_MODEL_f51d5da0f39e4c1885357d3d4c9964d9",
59
- "value": ""
60
- }
61
- },
62
- "9f9427eb6a644166906bb321f13eaf48": {
63
- "model_module": "@jupyter-widgets/controls",
64
- "model_name": "FloatProgressModel",
65
- "model_module_version": "1.5.0",
66
- "state": {
67
- "_dom_classes": [],
68
- "_model_module": "@jupyter-widgets/controls",
69
- "_model_module_version": "1.5.0",
70
- "_model_name": "FloatProgressModel",
71
- "_view_count": null,
72
- "_view_module": "@jupyter-widgets/controls",
73
- "_view_module_version": "1.5.0",
74
- "_view_name": "ProgressView",
75
- "bar_style": "success",
76
- "description": "",
77
- "description_tooltip": null,
78
- "layout": "IPY_MODEL_c4ceff5437e0470089c161e21488d2a7",
79
- "max": 1,
80
- "min": 0,
81
- "orientation": "horizontal",
82
- "style": "IPY_MODEL_6aafd52b0e3e4e0183b1666ad1e8a448",
83
- "value": 1
84
- }
85
- },
86
- "a4a232c5b5e1493897e9acdd25b8efd4": {
87
- "model_module": "@jupyter-widgets/controls",
88
- "model_name": "HTMLModel",
89
- "model_module_version": "1.5.0",
90
- "state": {
91
- "_dom_classes": [],
92
- "_model_module": "@jupyter-widgets/controls",
93
- "_model_module_version": "1.5.0",
94
- "_model_name": "HTMLModel",
95
- "_view_count": null,
96
- "_view_module": "@jupyter-widgets/controls",
97
- "_view_module_version": "1.5.0",
98
- "_view_name": "HTMLView",
99
- "description": "",
100
- "description_tooltip": null,
101
- "layout": "IPY_MODEL_80137fc11d4b4e518d8c8957ca5461b1",
102
- "placeholder": "​",
103
- "style": "IPY_MODEL_c4236d507b354bff830620a8bde32191",
104
- "value": " 174/? [00:31&lt;00:00,  6.30it/s]"
105
- }
106
- },
107
- "b2e91819e1c94f28b7bbad66918cb797": {
108
- "model_module": "@jupyter-widgets/base",
109
- "model_name": "LayoutModel",
110
- "model_module_version": "1.2.0",
111
- "state": {
112
- "_model_module": "@jupyter-widgets/base",
113
- "_model_module_version": "1.2.0",
114
- "_model_name": "LayoutModel",
115
- "_view_count": null,
116
- "_view_module": "@jupyter-widgets/base",
117
- "_view_module_version": "1.2.0",
118
- "_view_name": "LayoutView",
119
- "align_content": null,
120
- "align_items": null,
121
- "align_self": null,
122
- "border": null,
123
- "bottom": null,
124
- "display": null,
125
- "flex": null,
126
- "flex_flow": null,
127
- "grid_area": null,
128
- "grid_auto_columns": null,
129
- "grid_auto_flow": null,
130
- "grid_auto_rows": null,
131
- "grid_column": null,
132
- "grid_gap": null,
133
- "grid_row": null,
134
- "grid_template_areas": null,
135
- "grid_template_columns": null,
136
- "grid_template_rows": null,
137
- "height": null,
138
- "justify_content": null,
139
- "justify_items": null,
140
- "left": null,
141
- "margin": null,
142
- "max_height": null,
143
- "max_width": null,
144
- "min_height": null,
145
- "min_width": null,
146
- "object_fit": null,
147
- "object_position": null,
148
- "order": null,
149
- "overflow": null,
150
- "overflow_x": null,
151
- "overflow_y": null,
152
- "padding": null,
153
- "right": null,
154
- "top": null,
155
- "visibility": null,
156
- "width": null
157
- }
158
- },
159
- "010cbcb0f1364576b15f792f4d11f605": {
160
- "model_module": "@jupyter-widgets/base",
161
- "model_name": "LayoutModel",
162
- "model_module_version": "1.2.0",
163
- "state": {
164
- "_model_module": "@jupyter-widgets/base",
165
- "_model_module_version": "1.2.0",
166
- "_model_name": "LayoutModel",
167
- "_view_count": null,
168
- "_view_module": "@jupyter-widgets/base",
169
- "_view_module_version": "1.2.0",
170
- "_view_name": "LayoutView",
171
- "align_content": null,
172
- "align_items": null,
173
- "align_self": null,
174
- "border": null,
175
- "bottom": null,
176
- "display": null,
177
- "flex": null,
178
- "flex_flow": null,
179
- "grid_area": null,
180
- "grid_auto_columns": null,
181
- "grid_auto_flow": null,
182
- "grid_auto_rows": null,
183
- "grid_column": null,
184
- "grid_gap": null,
185
- "grid_row": null,
186
- "grid_template_areas": null,
187
- "grid_template_columns": null,
188
- "grid_template_rows": null,
189
- "height": null,
190
- "justify_content": null,
191
- "justify_items": null,
192
- "left": null,
193
- "margin": null,
194
- "max_height": null,
195
- "max_width": null,
196
- "min_height": null,
197
- "min_width": null,
198
- "object_fit": null,
199
- "object_position": null,
200
- "order": null,
201
- "overflow": null,
202
- "overflow_x": null,
203
- "overflow_y": null,
204
- "padding": null,
205
- "right": null,
206
- "top": null,
207
- "visibility": null,
208
- "width": null
209
- }
210
- },
211
- "f51d5da0f39e4c1885357d3d4c9964d9": {
212
- "model_module": "@jupyter-widgets/controls",
213
- "model_name": "DescriptionStyleModel",
214
- "model_module_version": "1.5.0",
215
- "state": {
216
- "_model_module": "@jupyter-widgets/controls",
217
- "_model_module_version": "1.5.0",
218
- "_model_name": "DescriptionStyleModel",
219
- "_view_count": null,
220
- "_view_module": "@jupyter-widgets/base",
221
- "_view_module_version": "1.2.0",
222
- "_view_name": "StyleView",
223
- "description_width": ""
224
- }
225
- },
226
- "c4ceff5437e0470089c161e21488d2a7": {
227
- "model_module": "@jupyter-widgets/base",
228
- "model_name": "LayoutModel",
229
- "model_module_version": "1.2.0",
230
- "state": {
231
- "_model_module": "@jupyter-widgets/base",
232
- "_model_module_version": "1.2.0",
233
- "_model_name": "LayoutModel",
234
- "_view_count": null,
235
- "_view_module": "@jupyter-widgets/base",
236
- "_view_module_version": "1.2.0",
237
- "_view_name": "LayoutView",
238
- "align_content": null,
239
- "align_items": null,
240
- "align_self": null,
241
- "border": null,
242
- "bottom": null,
243
- "display": null,
244
- "flex": null,
245
- "flex_flow": null,
246
- "grid_area": null,
247
- "grid_auto_columns": null,
248
- "grid_auto_flow": null,
249
- "grid_auto_rows": null,
250
- "grid_column": null,
251
- "grid_gap": null,
252
- "grid_row": null,
253
- "grid_template_areas": null,
254
- "grid_template_columns": null,
255
- "grid_template_rows": null,
256
- "height": null,
257
- "justify_content": null,
258
- "justify_items": null,
259
- "left": null,
260
- "margin": null,
261
- "max_height": null,
262
- "max_width": null,
263
- "min_height": null,
264
- "min_width": null,
265
- "object_fit": null,
266
- "object_position": null,
267
- "order": null,
268
- "overflow": null,
269
- "overflow_x": null,
270
- "overflow_y": null,
271
- "padding": null,
272
- "right": null,
273
- "top": null,
274
- "visibility": null,
275
- "width": "20px"
276
- }
277
- },
278
- "6aafd52b0e3e4e0183b1666ad1e8a448": {
279
- "model_module": "@jupyter-widgets/controls",
280
- "model_name": "ProgressStyleModel",
281
- "model_module_version": "1.5.0",
282
- "state": {
283
- "_model_module": "@jupyter-widgets/controls",
284
- "_model_module_version": "1.5.0",
285
- "_model_name": "ProgressStyleModel",
286
- "_view_count": null,
287
- "_view_module": "@jupyter-widgets/base",
288
- "_view_module_version": "1.2.0",
289
- "_view_name": "StyleView",
290
- "bar_color": null,
291
- "description_width": ""
292
- }
293
- },
294
- "80137fc11d4b4e518d8c8957ca5461b1": {
295
- "model_module": "@jupyter-widgets/base",
296
- "model_name": "LayoutModel",
297
- "model_module_version": "1.2.0",
298
- "state": {
299
- "_model_module": "@jupyter-widgets/base",
300
- "_model_module_version": "1.2.0",
301
- "_model_name": "LayoutModel",
302
- "_view_count": null,
303
- "_view_module": "@jupyter-widgets/base",
304
- "_view_module_version": "1.2.0",
305
- "_view_name": "LayoutView",
306
- "align_content": null,
307
- "align_items": null,
308
- "align_self": null,
309
- "border": null,
310
- "bottom": null,
311
- "display": null,
312
- "flex": null,
313
- "flex_flow": null,
314
- "grid_area": null,
315
- "grid_auto_columns": null,
316
- "grid_auto_flow": null,
317
- "grid_auto_rows": null,
318
- "grid_column": null,
319
- "grid_gap": null,
320
- "grid_row": null,
321
- "grid_template_areas": null,
322
- "grid_template_columns": null,
323
- "grid_template_rows": null,
324
- "height": null,
325
- "justify_content": null,
326
- "justify_items": null,
327
- "left": null,
328
- "margin": null,
329
- "max_height": null,
330
- "max_width": null,
331
- "min_height": null,
332
- "min_width": null,
333
- "object_fit": null,
334
- "object_position": null,
335
- "order": null,
336
- "overflow": null,
337
- "overflow_x": null,
338
- "overflow_y": null,
339
- "padding": null,
340
- "right": null,
341
- "top": null,
342
- "visibility": null,
343
- "width": null
344
- }
345
- },
346
- "c4236d507b354bff830620a8bde32191": {
347
- "model_module": "@jupyter-widgets/controls",
348
- "model_name": "DescriptionStyleModel",
349
- "model_module_version": "1.5.0",
350
- "state": {
351
- "_model_module": "@jupyter-widgets/controls",
352
- "_model_module_version": "1.5.0",
353
- "_model_name": "DescriptionStyleModel",
354
- "_view_count": null,
355
- "_view_module": "@jupyter-widgets/base",
356
- "_view_module_version": "1.2.0",
357
- "_view_name": "StyleView",
358
- "description_width": ""
359
- }
360
- }
361
- }
362
- }
363
- },
364
- "cells": [
365
- {
366
- "cell_type": "markdown",
367
- "metadata": {
368
- "id": "view-in-github",
369
- "colab_type": "text"
370
- },
371
- "source": [
372
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/02-Basic_RAG.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
373
- ]
374
- },
375
- {
376
- "cell_type": "markdown",
377
- "source": [
378
- "# Install Packages and Setup Variables"
379
- ],
380
- "metadata": {
381
- "id": "4Tw3tvMs6R-Y"
382
- }
383
- },
384
- {
385
- "cell_type": "code",
386
- "execution_count": null,
387
- "metadata": {
388
- "colab": {
389
- "base_uri": "https://localhost:8080/"
390
- },
391
- "id": "HaB4G9zr0BYm",
392
- "outputId": "2a76e676-6fae-44df-ae8c-e4869bfbbc2d"
393
- },
394
- "outputs": [
395
- {
396
- "output_type": "stream",
397
- "name": "stdout",
398
- "text": [
399
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m225.4/225.4 kB\u001b[0m \u001b[31m2.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
400
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.7/51.7 kB\u001b[0m \u001b[31m3.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
401
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m17.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
402
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
403
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━���━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m17.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
404
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.8/77.8 kB\u001b[0m \u001b[31m3.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
405
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m5.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
406
- "\u001b[?25h"
407
- ]
408
- }
409
- ],
410
- "source": [
411
- "!pip install -q openai==1.12.0 cohere==4.47 tiktoken==0.6.0"
412
- ]
413
- },
414
- {
415
- "cell_type": "code",
416
- "source": [
417
- "import os\n",
418
- "\n",
419
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
420
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
421
- ],
422
- "metadata": {
423
- "id": "MYvUA6CF2Le6"
424
- },
425
- "execution_count": null,
426
- "outputs": []
427
- },
428
- {
429
- "cell_type": "code",
430
- "source": [
431
- "# False: Generate the embedding for the dataset. (Associated cost with using OpenAI endpoint)\n",
432
- "# True: Load the dataset that already has the embedding vectors.\n",
433
- "load_embedding = False"
434
- ],
435
- "metadata": {
436
- "id": "0ViVXXIqXBai"
437
- },
438
- "execution_count": null,
439
- "outputs": []
440
- },
441
- {
442
- "cell_type": "markdown",
443
- "source": [
444
- "# Load Dataset"
445
- ],
446
- "metadata": {
447
- "id": "D8Nzx-cN_bDz"
448
- }
449
- },
450
- {
451
- "cell_type": "markdown",
452
- "source": [
453
- "## Download Dataset (JSON)"
454
- ],
455
- "metadata": {
456
- "id": "5JpI7GiZ--Gw"
457
- }
458
- },
459
- {
460
- "cell_type": "markdown",
461
- "source": [
462
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model."
463
- ],
464
- "metadata": {
465
- "id": "NT68BDYt-GkG"
466
- }
467
- },
468
- {
469
- "cell_type": "code",
470
- "source": [
471
- "!wget https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv\n",
472
- "!wget https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles-with_embeddings.csv"
473
- ],
474
- "metadata": {
475
- "colab": {
476
- "base_uri": "https://localhost:8080/"
477
- },
478
- "id": "p6NEJT9S2OoH",
479
- "outputId": "fd3aa19c-a644-4635-9838-2c20526c4da2"
480
- },
481
- "execution_count": null,
482
- "outputs": [
483
- {
484
- "output_type": "stream",
485
- "name": "stdout",
486
- "text": [
487
- "--2024-03-20 16:18:39-- https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv\n",
488
- "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
489
- "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
490
- "HTTP request sent, awaiting response... 200 OK\n",
491
- "Length: 173646 (170K) [text/plain]\n",
492
- "Saving to: β€˜mini-llama-articles.csv’\n",
493
- "\n",
494
- "\rmini-llama-articles 0%[ ] 0 --.-KB/s \rmini-llama-articles 100%[===================>] 169.58K --.-KB/s in 0.02s \n",
495
- "\n",
496
- "2024-03-20 16:18:40 (6.91 MB/s) - β€˜mini-llama-articles.csv’ saved [173646/173646]\n",
497
- "\n",
498
- "--2024-03-20 16:18:40-- https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles-with_embeddings.csv\n",
499
- "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
500
- "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
501
- "HTTP request sent, awaiting response... 200 OK\n",
502
- "Length: 11868176 (11M) [text/plain]\n",
503
- "Saving to: β€˜mini-llama-articles-with_embeddings.csv’\n",
504
- "\n",
505
- "mini-llama-articles 100%[===================>] 11.32M --.-KB/s in 0.1s \n",
506
- "\n",
507
- "2024-03-20 16:18:40 (103 MB/s) - β€˜mini-llama-articles-with_embeddings.csv’ saved [11868176/11868176]\n",
508
- "\n"
509
- ]
510
- }
511
- ]
512
- },
513
- {
514
- "cell_type": "markdown",
515
- "source": [
516
- "## Read File"
517
- ],
518
- "metadata": {
519
- "id": "oYDd03Qn_clh"
520
- }
521
- },
522
- {
523
- "cell_type": "code",
524
- "source": [
525
- "# Split the input text into chunks of specified size.\n",
526
- "def split_into_chunks(text, chunk_size=1024):\n",
527
- " chunks = []\n",
528
- " for i in range(0, len(text), chunk_size):\n",
529
- " chunks.append( text[i:i+chunk_size] )\n",
530
- "\n",
531
- " return chunks"
532
- ],
533
- "metadata": {
534
- "id": "_bfhs5NMYr4N"
535
- },
536
- "execution_count": null,
537
- "outputs": []
538
- },
539
- {
540
- "cell_type": "code",
541
- "source": [
542
- "import csv\n",
543
- "\n",
544
- "chunks = []\n",
545
- "\n",
546
- "# Load the file as a CSV\n",
547
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
548
- " csv_reader = csv.reader(file)\n",
549
- "\n",
550
- " for idx, row in enumerate( csv_reader ):\n",
551
- " if idx == 0: continue; # Skip header row\n",
552
- " chunks.extend( split_into_chunks(row[1]) )"
553
- ],
554
- "metadata": {
555
- "id": "UcQ7Ge_XCuXa"
556
- },
557
- "execution_count": null,
558
- "outputs": []
559
- },
560
- {
561
- "cell_type": "code",
562
- "source": [
563
- "import pandas as pd\n",
564
- "\n",
565
- "# Convert the JSON list to a Pandas Dataframe\n",
566
- "df = pd.DataFrame(chunks, columns=['chunk'])\n",
567
- "\n",
568
- "df.keys()"
569
- ],
570
- "metadata": {
571
- "colab": {
572
- "base_uri": "https://localhost:8080/"
573
- },
574
- "id": "JKdFSOb0NXjx",
575
- "outputId": "ce43c97f-2083-49b5-837d-62cc427fe848"
576
- },
577
- "execution_count": null,
578
- "outputs": [
579
- {
580
- "output_type": "execute_result",
581
- "data": {
582
- "text/plain": [
583
- "Index(['chunk'], dtype='object')"
584
- ]
585
- },
586
- "metadata": {},
587
- "execution_count": 8
588
- }
589
- ]
590
- },
591
- {
592
- "cell_type": "markdown",
593
- "source": [
594
- "# Generate Embedding"
595
- ],
596
- "metadata": {
597
- "id": "21pFDgNdW9rO"
598
- }
599
- },
600
- {
601
- "cell_type": "code",
602
- "source": [
603
- "from openai import OpenAI\n",
604
- "\n",
605
- "client = OpenAI()\n",
606
- "\n",
607
- "# Defining a function that converts a text to embedding vector using OpenAI's Ada model.\n",
608
- "def get_embedding(text):\n",
609
- " try:\n",
610
- " # Remove newlines\n",
611
- " text = text.replace(\"\\n\", \" \")\n",
612
- " res = client.embeddings.create(input = [text], model=\"text-embedding-ada-002\")\n",
613
- "\n",
614
- " return res.data[0].embedding\n",
615
- "\n",
616
- " except:\n",
617
- " return None"
618
- ],
619
- "metadata": {
620
- "id": "AfS9w9eQAKyu"
621
- },
622
- "execution_count": null,
623
- "outputs": []
624
- },
625
- {
626
- "cell_type": "code",
627
- "source": [
628
- "from tqdm.notebook import tqdm\n",
629
- "import numpy as np\n",
630
- "\n",
631
- "# Generate embedding\n",
632
- "if not load_embedding:\n",
633
- " print(\"Generating embeddings...\")\n",
634
- " embeddings = []\n",
635
- " for index, row in tqdm( df.iterrows() ):\n",
636
- " # df.at[index, 'embedding'] = get_embedding( row['chunk'] )\n",
637
- " embeddings.append( get_embedding( row['chunk'] ) )\n",
638
- "\n",
639
- " embeddings_values = pd.Series(embeddings)\n",
640
- " df.insert(loc=1, column='embedding', value=embeddings_values)\n",
641
- "\n",
642
- "# Or, load the embedding from the file.\n",
643
- "else:\n",
644
- " print(\"Loaded the embedding file.\")\n",
645
- " # Load the file as a CSV\n",
646
- " df = pd.read_csv('mini-llama-articles-with_embeddings.csv')\n",
647
- " # Convert embedding column to an array\n",
648
- " df['embedding'] = df['embedding'].apply(lambda x: np.array(eval(x)), 0)"
649
- ],
650
- "metadata": {
651
- "colab": {
652
- "base_uri": "https://localhost:8080/",
653
- "height": 67,
654
- "referenced_widgets": [
655
- "46a91770024e4802acd3e64e9bc46f32",
656
- "613898a418d64df3b18d35083f0bb36d",
657
- "9f9427eb6a644166906bb321f13eaf48",
658
- "a4a232c5b5e1493897e9acdd25b8efd4",
659
- "b2e91819e1c94f28b7bbad66918cb797",
660
- "010cbcb0f1364576b15f792f4d11f605",
661
- "f51d5da0f39e4c1885357d3d4c9964d9",
662
- "c4ceff5437e0470089c161e21488d2a7",
663
- "6aafd52b0e3e4e0183b1666ad1e8a448",
664
- "80137fc11d4b4e518d8c8957ca5461b1",
665
- "c4236d507b354bff830620a8bde32191"
666
- ]
667
- },
668
- "id": "qC6aeFr3Rmi2",
669
- "outputId": "7f54333f-fcb9-44ce-d4a0-94a9a8d822d5"
670
- },
671
- "execution_count": null,
672
- "outputs": [
673
- {
674
- "output_type": "stream",
675
- "name": "stdout",
676
- "text": [
677
- "Generating embeddings...\n"
678
- ]
679
- },
680
- {
681
- "output_type": "display_data",
682
- "data": {
683
- "text/plain": [
684
- "0it [00:00, ?it/s]"
685
- ],
686
- "application/vnd.jupyter.widget-view+json": {
687
- "version_major": 2,
688
- "version_minor": 0,
689
- "model_id": "46a91770024e4802acd3e64e9bc46f32"
690
- }
691
- },
692
- "metadata": {}
693
- }
694
- ]
695
- },
696
- {
697
- "cell_type": "code",
698
- "source": [
699
- "# df.to_csv('mini-llama-articles-with_embeddings.csv')"
700
- ],
701
- "metadata": {
702
- "id": "jyX9M_n9o2ve"
703
- },
704
- "execution_count": null,
705
- "outputs": []
706
- },
707
- {
708
- "cell_type": "markdown",
709
- "source": [
710
- "# User Question"
711
- ],
712
- "metadata": {
713
- "id": "E_qrXwImXrXJ"
714
- }
715
- },
716
- {
717
- "cell_type": "code",
718
- "source": [
719
- "# Define the user question, and convert it to embedding.\n",
720
- "QUESTION = \"How many parameters LLaMA2 model has?\"\n",
721
- "QUESTION_emb = get_embedding( QUESTION )\n",
722
- "\n",
723
- "len( QUESTION_emb )"
724
- ],
725
- "metadata": {
726
- "colab": {
727
- "base_uri": "https://localhost:8080/"
728
- },
729
- "id": "xGTa7cqCX97q",
730
- "outputId": "6ae836e3-1a65-4447-b732-88758378e9dd"
731
- },
732
- "execution_count": null,
733
- "outputs": [
734
- {
735
- "output_type": "execute_result",
736
- "data": {
737
- "text/plain": [
738
- "1536"
739
- ]
740
- },
741
- "metadata": {},
742
- "execution_count": 15
743
- }
744
- ]
745
- },
746
- {
747
- "cell_type": "markdown",
748
- "source": [
749
- "# Test Cosine Similarity"
750
- ],
751
- "metadata": {
752
- "id": "BXNzNWrJYWhU"
753
- }
754
- },
755
- {
756
- "cell_type": "markdown",
757
- "source": [
758
- "Calculating the similarity of embedding representations can help us to find pieces of text that are close to each other. In the following sample you see how the Cosine Similarity metric can identify which sentence could be a possible answer for the given user question. Obviously, the unrelated answer will score lower."
759
- ],
760
- "metadata": {
761
- "id": "Vxaq-FgLIhIj"
762
- }
763
- },
764
- {
765
- "cell_type": "code",
766
- "source": [
767
- "BAD_SOURCE_emb = get_embedding( \"The sky is blue.\" )\n",
768
- "GOOD_SOURCE_emb = get_embedding( \"LLaMA2 model has a total of 2B parameters.\" )"
769
- ],
770
- "metadata": {
771
- "id": "LqDWcPd4b-ZI"
772
- },
773
- "execution_count": null,
774
- "outputs": []
775
- },
776
- {
777
- "cell_type": "code",
778
- "source": [
779
- "from sklearn.metrics.pairwise import cosine_similarity\n",
780
- "\n",
781
- "# A sample that how a good piece of text can achieve high similarity score compared\n",
782
- "# to a completely unrelated text.\n",
783
- "print(\"> Bad Response Score:\", cosine_similarity([QUESTION_emb], [BAD_SOURCE_emb]) )\n",
784
- "print(\"> Good Response Score:\", cosine_similarity([QUESTION_emb], [GOOD_SOURCE_emb]) )"
785
- ],
786
- "metadata": {
787
- "colab": {
788
- "base_uri": "https://localhost:8080/"
789
- },
790
- "id": "OI00eN86YZKB",
791
- "outputId": "0d06c9ea-7de2-48a0-e6d8-3fc6e428914b"
792
- },
793
- "execution_count": null,
794
- "outputs": [
795
- {
796
- "output_type": "stream",
797
- "name": "stdout",
798
- "text": [
799
- "> Bad Response Score: [[0.69953438]]\n",
800
- "> Good Response Score: [[0.93126147]]\n"
801
- ]
802
- }
803
- ]
804
- },
805
- {
806
- "cell_type": "markdown",
807
- "source": [
808
- "# Calculate Cosine Similarities"
809
- ],
810
- "metadata": {
811
- "id": "kdJlEtaaJC4I"
812
- }
813
- },
814
- {
815
- "cell_type": "code",
816
- "source": [
817
- "# The similarity between the questions and each part of the essay.\n",
818
- "cosine_similarities = cosine_similarity( [QUESTION_emb], df['embedding'].tolist() )\n",
819
- "\n",
820
- "print( cosine_similarities )"
821
- ],
822
- "metadata": {
823
- "colab": {
824
- "base_uri": "https://localhost:8080/"
825
- },
826
- "id": "PNPN7OAXemmH",
827
- "outputId": "54beed07-04de-4696-b513-f49a935d6820"
828
- },
829
- "execution_count": null,
830
- "outputs": [
831
- {
832
- "output_type": "stream",
833
- "name": "stdout",
834
- "text": [
835
- "[[0.82047387 0.79858187 0.74135248 0.73226232 0.72406104 0.75608299\n",
836
- " 0.76808965 0.77621683 0.80498431 0.71399955 0.69822549 0.67532971\n",
837
- " 0.72473021 0.73449361 0.69998132 0.73749561 0.68490681 0.75076836\n",
838
- " 0.72540663 0.70675593 0.76047822 0.73849418 0.78103858 0.75189435\n",
839
- " 0.73619013 0.76962672 0.71289635 0.76996122 0.7827543 0.77959332\n",
840
- " 0.82716952 0.77719335 0.80172766 0.76301732 0.78111546 0.75179235\n",
841
- " 0.74741505 0.7576328 0.78998865 0.77283347 0.79180172 0.78170323\n",
842
- " 0.80264132 0.79923073 0.76146584 0.75199024 0.8341403 0.74460259\n",
843
- " 0.76259332 0.73693499 0.78469623 0.81698455 0.8254561 0.77921093\n",
844
- " 0.75351863 0.79319721 0.73098248 0.71716001 0.73210099 0.74684248\n",
845
- " 0.75760574 0.71070101 0.71507394 0.70847896 0.72395535 0.77801292\n",
846
- " 0.75446732 0.75100258 0.7361131 0.78430831 0.74170516 0.71862961\n",
847
- " 0.76792911 0.76471996 0.78551313 0.80846857 0.79231644 0.79505895\n",
848
- " 0.76910825 0.78341548 0.74952152 0.7849115 0.80407507 0.82641741\n",
849
- " 0.77074756 0.7356681 0.77452715 0.76224969 0.79906149 0.84520641\n",
850
- " 0.82301383 0.8362749 0.81676624 0.8035085 0.80532594 0.81186134\n",
851
- " 0.69082726 0.72587048 0.70070204 0.7155819 0.71758016 0.74945217\n",
852
- " 0.72555195 0.7356198 0.73695714 0.75553407 0.77502366 0.71438692\n",
853
- " 0.75846916 0.79831901 0.78600515 0.7601161 0.78696534 0.80404804\n",
854
- " 0.85209549 0.77037783 0.76985195 0.75062239 0.69339426 0.7108229\n",
855
- " 0.72051435 0.75137579 0.71168549 0.72276919 0.77669437 0.7726572\n",
856
- " 0.74774188 0.73290677 0.70262553 0.72831247 0.7525444 0.7495277\n",
857
- " 0.75188765 0.71491865 0.74460111 0.73599028 0.76314747 0.71318814\n",
858
- " 0.70723754 0.73098562 0.72745902 0.76077793 0.72614335 0.72636887\n",
859
- " 0.77770561 0.69882456 0.72396024 0.70349095 0.70541201 0.76424393\n",
860
- " 0.72785191 0.74371405 0.67802651 0.7353597 0.69916559 0.70605271\n",
861
- " 0.71477477 0.71021711 0.77423355 0.70897606 0.74946665 0.70971011\n",
862
- " 0.72360056 0.72906996 0.76590153 0.74469991 0.73669136 0.71547661\n",
863
- " 0.6958848 0.71459824 0.74863434 0.71430407 0.75165385 0.74221148]]\n"
864
- ]
865
- }
866
- ]
867
- },
868
- {
869
- "cell_type": "code",
870
- "source": [
871
- "import numpy as np\n",
872
- "\n",
873
- "number_of_chunks_to_retrieve = 3\n",
874
- "\n",
875
- "# Sort the scores\n",
876
- "highest_index = np.argmax( cosine_similarities )\n",
877
- "\n",
878
- "# Pick the N highest scored chunks\n",
879
- "indices = np.argsort(cosine_similarities[0])[::-1][:number_of_chunks_to_retrieve]\n",
880
- "print( indices )"
881
- ],
882
- "metadata": {
883
- "colab": {
884
- "base_uri": "https://localhost:8080/"
885
- },
886
- "id": "1-XI1_7mhlw4",
887
- "outputId": "9598da10-ab61-45e9-e0bb-3e1d7046b657"
888
- },
889
- "execution_count": null,
890
- "outputs": [
891
- {
892
- "output_type": "stream",
893
- "name": "stdout",
894
- "text": [
895
- "[114 89 91]\n"
896
- ]
897
- }
898
- ]
899
- },
900
- {
901
- "cell_type": "code",
902
- "source": [
903
- "# Look at the highest scored retrieved pieces of text\n",
904
- "for idx, item in enumerate( df.chunk[indices] ):\n",
905
- " print(f\"> Chunk {idx+1}\")\n",
906
- " print(item)\n",
907
- " print(\"----\")"
908
- ],
909
- "metadata": {
910
- "colab": {
911
- "base_uri": "https://localhost:8080/"
912
- },
913
- "id": "JPmhCb9kfB0w",
914
- "outputId": "5089b207-a65a-4856-c065-56b3b9bbba72"
915
- },
916
- "execution_count": null,
917
- "outputs": [
918
- {
919
- "output_type": "stream",
920
- "name": "stdout",
921
- "text": [
922
- "> Chunk 1\n",
923
- "by Meta that ventures into both the AI and academic spaces. The model aims to help researchers, scientists, and engineers advance their work in exploring AI applications. It will be released under a non-commercial license to prevent misuse, and access will be granted to academic researchers, individuals, and organizations affiliated with the government, civil society, academia, and industry research facilities on a selective case-by-case basis. The sharing of codes and weights allows other researchers to test new approaches in LLMs. The LLaMA models have a range of 7 billion to 65 billion parameters. LLaMA-65B can be compared to DeepMind's Chinchilla and Google's PaLM. Publicly available unlabeled data was used to train these models, and training smaller foundational models require less computing power and resources. LLaMA 65B and 33B have been trained on 1.4 trillion tokens in 20 different languages, and according to the Facebook Artificial Intelligence Research (FAIR) team, the model's performance varies ac\n",
924
- "----\n",
925
- "> Chunk 2\n",
926
- "I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annota\n",
927
- "----\n",
928
- "> Chunk 3\n",
929
- "vely address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. \n",
930
- "----\n"
931
- ]
932
- }
933
- ]
934
- },
935
- {
936
- "cell_type": "markdown",
937
- "source": [
938
- "# Augment the Prompt"
939
- ],
940
- "metadata": {
941
- "id": "7uvQACqAkHg4"
942
- }
943
- },
944
- {
945
- "cell_type": "code",
946
- "source": [
947
- "# Use the OpenAI API to answer the questions based on the retrieved pieces of text.\n",
948
- "try:\n",
949
- " # Formulating the system prompt and condition the model to answer only AI-related questions.\n",
950
- " system_prompt = (\n",
951
- " \"You are an assistant and expert in answering questions from a chunks of content. \"\n",
952
- " \"Only answer AI-related question, else say that you cannot answer this question.\"\n",
953
- " )\n",
954
- "\n",
955
- " # Create a user prompt with the user's question\n",
956
- " prompt = (\n",
957
- " \"Read the following informations that might contain the context you require to answer the question. You can use the informations starting from the <START_OF_CONTEXT> tag and end with the <END_OF_CONTEXT> tag. Here is the content:\\n\\n<START_OF_CONTEXT>\\n{}\\n<END_OF_CONTEXT>\\n\\n\"\n",
958
- " \"Please provide an informative and accurate answer to the following question based on the avaiable context. Be concise and take your time. \\nQuestion: {}\\nAnswer:\"\n",
959
- " )\n",
960
- " # Add the retrieved pieces of text to the prompt.\n",
961
- " prompt = prompt.format( \"\".join( df.chunk[indices] ), QUESTION )\n",
962
- "\n",
963
- " # Call the OpenAI API\n",
964
- " response = client.chat.completions.create(\n",
965
- " model='gpt-3.5-turbo-16k',\n",
966
- " temperature=0.0,\n",
967
- " messages=[\n",
968
- " {\"role\": \"system\", \"content\": system_prompt},\n",
969
- " {\"role\": \"user\", \"content\": prompt}\n",
970
- " ]\n",
971
- " )\n",
972
- "\n",
973
- " # Return the AI's response\n",
974
- " res = response.choices[0].message.content.strip()\n",
975
- "\n",
976
- "except Exception as e:\n",
977
- " print( f\"An error occurred: {e}\" )"
978
- ],
979
- "metadata": {
980
- "id": "MXRdzta5kJ3V"
981
- },
982
- "execution_count": null,
983
- "outputs": []
984
- },
985
- {
986
- "cell_type": "code",
987
- "source": [
988
- "print( res )"
989
- ],
990
- "metadata": {
991
- "colab": {
992
- "base_uri": "https://localhost:8080/"
993
- },
994
- "id": "9tBvJ8oMucha",
995
- "outputId": "418c0220-c2ee-43cf-a9bc-0ea755f7a04e"
996
- },
997
- "execution_count": null,
998
- "outputs": [
999
- {
1000
- "output_type": "stream",
1001
- "name": "stdout",
1002
- "text": [
1003
- "The LLaMA2 model has four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.\n"
1004
- ]
1005
- }
1006
- ]
1007
- },
1008
- {
1009
- "cell_type": "markdown",
1010
- "source": [
1011
- "# Without Augmentation"
1012
- ],
1013
- "metadata": {
1014
- "id": "pW-BNCAC2JzE"
1015
- }
1016
- },
1017
- {
1018
- "cell_type": "markdown",
1019
- "source": [
1020
- "Test the OpenAI API to answer the same question without the addition of retrieved documents. Basically, the LLM will use its knowledge to answer the question."
1021
- ],
1022
- "metadata": {
1023
- "id": "tr5zXEGIMwJu"
1024
- }
1025
- },
1026
- {
1027
- "cell_type": "code",
1028
- "source": [
1029
- "# Formulating the system prompt\n",
1030
- "system_prompt = (\n",
1031
- " \"You are an assistant and expert in answering questions.\"\n",
1032
- ")\n",
1033
- "\n",
1034
- "# Combining the system prompt with the user's question\n",
1035
- "prompt = (\n",
1036
- " \"Be concise and take your time to answer the following question. \\nQuestion: {}\\nAnswer:\"\n",
1037
- ")\n",
1038
- "prompt = prompt.format( QUESTION )\n",
1039
- "\n",
1040
- "# Call the OpenAI API\n",
1041
- "response = client.chat.completions.create(\n",
1042
- " model='gpt-3.5-turbo-16k',\n",
1043
- " temperature=.9,\n",
1044
- " messages=[\n",
1045
- " {\"role\": \"system\", \"content\": system_prompt},\n",
1046
- " {\"role\": \"user\", \"content\": prompt}\n",
1047
- " ]\n",
1048
- ")\n",
1049
- "\n",
1050
- "# Return the AI's response\n",
1051
- "res = response.choices[0].message.content.strip()"
1052
- ],
1053
- "metadata": {
1054
- "id": "RuyXjzZyuecE"
1055
- },
1056
- "execution_count": null,
1057
- "outputs": []
1058
- },
1059
- {
1060
- "cell_type": "code",
1061
- "source": [
1062
- "print( res )"
1063
- ],
1064
- "metadata": {
1065
- "colab": {
1066
- "base_uri": "https://localhost:8080/"
1067
- },
1068
- "id": "YAy34tPTzGbh",
1069
- "outputId": "54041329-dd5f-4cdd-db38-f1440ae77181"
1070
- },
1071
- "execution_count": null,
1072
- "outputs": [
1073
- {
1074
- "output_type": "stream",
1075
- "name": "stdout",
1076
- "text": [
1077
- "The LLaMA2 model has a total of [insert number] parameters.\n"
1078
- ]
1079
- }
1080
- ]
1081
- }
1082
- ]
1083
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/03-RAG_with_LlamaIndex.ipynb DELETED
@@ -1,360 +0,0 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": [],
7
- "authorship_tag": "ABX9TyO9EXKHngvJa9fUydE3Tlen",
8
- "include_colab_link": true
9
- },
10
- "kernelspec": {
11
- "name": "python3",
12
- "display_name": "Python 3"
13
- },
14
- "language_info": {
15
- "name": "python"
16
- }
17
- },
18
- "cells": [
19
- {
20
- "cell_type": "markdown",
21
- "metadata": {
22
- "id": "view-in-github",
23
- "colab_type": "text"
24
- },
25
- "source": [
26
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/03-RAG_with_LlamaIndex.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
27
- ]
28
- },
29
- {
30
- "cell_type": "markdown",
31
- "source": [
32
- "# Install Packages and Setup Variables"
33
- ],
34
- "metadata": {
35
- "id": "v9bpz99INAc1"
36
- }
37
- },
38
- {
39
- "cell_type": "code",
40
- "execution_count": null,
41
- "metadata": {
42
- "colab": {
43
- "base_uri": "https://localhost:8080/"
44
- },
45
- "id": "BeuFJKlj9jKz",
46
- "outputId": "a14a78f4-e43e-4aef-bc69-4ced559df34e"
47
- },
48
- "outputs": [
49
- {
50
- "output_type": "stream",
51
- "name": "stdout",
52
- "text": [
53
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m226.7/226.7 kB\u001b[0m \u001b[31m2.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
54
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m52.3/52.3 kB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
55
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m9.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
56
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m26.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
57
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m21.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
58
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
59
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m35.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
60
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m3.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
61
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
62
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m136.0/136.0 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
63
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.9/3.9 MB\u001b[0m \u001b[31m27.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
64
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m290.4/290.4 kB\u001b[0m \u001b[31m22.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
65
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m30.8/30.8 MB\u001b[0m \u001b[31m32.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
66
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.4/49.4 kB\u001b[0m \u001b[31m2.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
67
- "\u001b[?25h"
68
- ]
69
- }
70
- ],
71
- "source": [
72
- "!pip install -q llama-index==0.10.30 openai==1.12.0 cohere==4.47 tiktoken==0.6.0"
73
- ]
74
- },
75
- {
76
- "cell_type": "code",
77
- "source": [
78
- "import os\n",
79
- "\n",
80
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
81
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
82
- ],
83
- "metadata": {
84
- "id": "XuzgSNqcABpV"
85
- },
86
- "execution_count": null,
87
- "outputs": []
88
- },
89
- {
90
- "cell_type": "markdown",
91
- "source": [
92
- "# Load Dataset"
93
- ],
94
- "metadata": {
95
- "id": "f5eV5EnvNCMM"
96
- }
97
- },
98
- {
99
- "cell_type": "markdown",
100
- "source": [
101
- "## Download"
102
- ],
103
- "metadata": {
104
- "id": "q-7mRQ-mNJlm"
105
- }
106
- },
107
- {
108
- "cell_type": "markdown",
109
- "source": [
110
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model."
111
- ],
112
- "metadata": {
113
- "id": "3PsdOdMUNmEi"
114
- }
115
- },
116
- {
117
- "cell_type": "code",
118
- "source": [
119
- "!wget https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
120
- ],
121
- "metadata": {
122
- "colab": {
123
- "base_uri": "https://localhost:8080/"
124
- },
125
- "id": "3ImRCP7pACaI",
126
- "outputId": "c782f06a-5fcb-4134-e197-e2a9c3193ce9"
127
- },
128
- "execution_count": null,
129
- "outputs": [
130
- {
131
- "output_type": "stream",
132
- "name": "stdout",
133
- "text": [
134
- "--2024-04-09 18:54:34-- https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv\n",
135
- "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
136
- "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
137
- "HTTP request sent, awaiting response... 200 OK\n",
138
- "Length: 173646 (170K) [text/plain]\n",
139
- "Saving to: β€˜mini-llama-articles.csv’\n",
140
- "\n",
141
- "mini-llama-articles 100%[===================>] 169.58K --.-KB/s in 0.09s \n",
142
- "\n",
143
- "2024-04-09 18:54:35 (1.89 MB/s) - β€˜mini-llama-articles.csv’ saved [173646/173646]\n",
144
- "\n"
145
- ]
146
- }
147
- ]
148
- },
149
- {
150
- "cell_type": "markdown",
151
- "source": [
152
- "## Read File"
153
- ],
154
- "metadata": {
155
- "id": "bZZLK_wyEc-L"
156
- }
157
- },
158
- {
159
- "cell_type": "code",
160
- "source": [
161
- "import csv\n",
162
- "\n",
163
- "rows = []\n",
164
- "\n",
165
- "# Load the CSV file\n",
166
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
167
- " csv_reader = csv.reader(file)\n",
168
- "\n",
169
- " for idx, row in enumerate( csv_reader ):\n",
170
- " if idx == 0: continue; # Skip header row\n",
171
- " rows.append( row )\n",
172
- "\n",
173
- "# The number of characters in the dataset.\n",
174
- "print( \"number of articles:\", len( rows ) )"
175
- ],
176
- "metadata": {
177
- "colab": {
178
- "base_uri": "https://localhost:8080/"
179
- },
180
- "id": "miUqycqAEfr7",
181
- "outputId": "911985c6-6884-48ff-fa24-869d44a1a012"
182
- },
183
- "execution_count": null,
184
- "outputs": [
185
- {
186
- "output_type": "stream",
187
- "name": "stdout",
188
- "text": [
189
- "number of articles: 14\n"
190
- ]
191
- }
192
- ]
193
- },
194
- {
195
- "cell_type": "markdown",
196
- "source": [
197
- "# Generate Embedding"
198
- ],
199
- "metadata": {
200
- "id": "f86yksB9K571"
201
- }
202
- },
203
- {
204
- "cell_type": "code",
205
- "source": [
206
- "from llama_index.core import Document\n",
207
- "\n",
208
- "# Convert the texts to Document objects so the LlamaIndex framework can process them.\n",
209
- "documents = [Document(text=row[1]) for row in rows]"
210
- ],
211
- "metadata": {
212
- "id": "iXrr5-tnEfm9"
213
- },
214
- "execution_count": null,
215
- "outputs": []
216
- },
217
- {
218
- "cell_type": "code",
219
- "source": [
220
- "from llama_index.core import VectorStoreIndex\n",
221
- "from llama_index.core.node_parser import SentenceSplitter\n",
222
- "\n",
223
- "# Build index / generate embeddings using OpenAI.\n",
224
- "index = VectorStoreIndex.from_documents(\n",
225
- " documents,\n",
226
- " transformations=[SentenceSplitter(chunk_size=768, chunk_overlap=64)],\n",
227
- ")"
228
- ],
229
- "metadata": {
230
- "id": "Bsa7Q-DoNWBk"
231
- },
232
- "execution_count": null,
233
- "outputs": []
234
- },
235
- {
236
- "cell_type": "code",
237
- "source": [
238
- "# Save the generated embeddings.\n",
239
- "# index.storage_context.persist(persist_dir=\"indexes\")"
240
- ],
241
- "metadata": {
242
- "id": "xxB0A9ZYM-OD"
243
- },
244
- "execution_count": null,
245
- "outputs": []
246
- },
247
- {
248
- "cell_type": "markdown",
249
- "source": [
250
- "# Query Dataset"
251
- ],
252
- "metadata": {
253
- "id": "3DoUxd8KK--Q"
254
- }
255
- },
256
- {
257
- "cell_type": "code",
258
- "source": [
259
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
260
- "# and using a LLM to formulate the final answer.\n",
261
- "query_engine = index.as_query_engine()"
262
- ],
263
- "metadata": {
264
- "id": "bUaNH97dEfh9"
265
- },
266
- "execution_count": null,
267
- "outputs": []
268
- },
269
- {
270
- "cell_type": "code",
271
- "source": [
272
- "response = query_engine.query(\n",
273
- " \"How many parameters LLaMA2 model has?\"\n",
274
- ")\n",
275
- "print(response)"
276
- ],
277
- "metadata": {
278
- "colab": {
279
- "base_uri": "https://localhost:8080/"
280
- },
281
- "id": "KHK4V_GRR6ZG",
282
- "outputId": "8d656836-622a-4261-e24a-9cadf857b376"
283
- },
284
- "execution_count": null,
285
- "outputs": [
286
- {
287
- "output_type": "stream",
288
- "name": "stdout",
289
- "text": [
290
- "The Llama 2 model has 7 billion, 13 billion, 34 billion, and 70 billion parameters.\n"
291
- ]
292
- }
293
- ]
294
- },
295
- {
296
- "cell_type": "code",
297
- "source": [
298
- "response = query_engine.query(\n",
299
- " \"When will Llama3 will be released?\"\n",
300
- ")\n",
301
- "print(response)"
302
- ],
303
- "metadata": {
304
- "id": "S-BmyTBbNd9y",
305
- "outputId": "c6a4ec79-7555-4b4d-f212-0b5864c7bded",
306
- "colab": {
307
- "base_uri": "https://localhost:8080/"
308
- }
309
- },
310
- "execution_count": null,
311
- "outputs": [
312
- {
313
- "output_type": "stream",
314
- "name": "stdout",
315
- "text": [
316
- "The release date for Llama3 is not provided in the given context information.\n"
317
- ]
318
- }
319
- ]
320
- },
321
- {
322
- "cell_type": "code",
323
- "source": [
324
- "# Test with smaller chunk size\n",
325
- "# transformations=[SentenceSplitter(chunk_size=512, chunk_overlap=20)]\n",
326
- "\n",
327
- "response = query_engine.query(\n",
328
- " \"How many parameters LLaMA2 model has?\"\n",
329
- ")\n",
330
- "print(response)"
331
- ],
332
- "metadata": {
333
- "colab": {
334
- "base_uri": "https://localhost:8080/"
335
- },
336
- "id": "tEgFx_aeFS5e",
337
- "outputId": "0353f9e4-0f63-4739-eb5b-717bf19572ef"
338
- },
339
- "execution_count": null,
340
- "outputs": [
341
- {
342
- "output_type": "stream",
343
- "name": "stdout",
344
- "text": [
345
- "The LLaMA2 model has a range of 7 billion to 65 billion parameters.\n"
346
- ]
347
- }
348
- ]
349
- },
350
- {
351
- "cell_type": "code",
352
- "source": [],
353
- "metadata": {
354
- "id": "oZt_sG86RwZ3"
355
- },
356
- "execution_count": null,
357
- "outputs": []
358
- }
359
- ]
360
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/04-RAG_with_VectorStore.ipynb DELETED
@@ -1,449 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "id": "view-in-github"
7
- },
8
- "source": [
9
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/04-RAG_with_VectorStore.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
10
- ]
11
- },
12
- {
13
- "cell_type": "markdown",
14
- "metadata": {
15
- "id": "5BGJ3fxhOk2V"
16
- },
17
- "source": [
18
- "# Install Packages and Setup Variables"
19
- ]
20
- },
21
- {
22
- "cell_type": "code",
23
- "execution_count": 1,
24
- "metadata": {
25
- "id": "QPJzr-I9XQ7l"
26
- },
27
- "outputs": [],
28
- "source": [
29
- "!pip install -q llama-index==0.10.5 llama-index-vector-stores-chroma==0.1.7 langchain==0.1.17 langchain-chroma==0.1.0 langchain_openai==0.1.5 openai==1.12.0 cohere==4.47 tiktoken==0.6.0 chromadb==0.4.22"
30
- ]
31
- },
32
- {
33
- "cell_type": "code",
34
- "execution_count": 2,
35
- "metadata": {
36
- "id": "riuXwpSPcvWC"
37
- },
38
- "outputs": [],
39
- "source": [
40
- "import os\n",
41
- "\n",
42
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
43
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
44
- ]
45
- },
46
- {
47
- "cell_type": "markdown",
48
- "metadata": {
49
- "id": "I9JbAzFcjkpn"
50
- },
51
- "source": [
52
- "# Load the Dataset (CSV)"
53
- ]
54
- },
55
- {
56
- "cell_type": "markdown",
57
- "metadata": {
58
- "id": "_Tif8-JoRH68"
59
- },
60
- "source": [
61
- "## Download"
62
- ]
63
- },
64
- {
65
- "cell_type": "markdown",
66
- "metadata": {
67
- "id": "4fQaa1LN1mXL"
68
- },
69
- "source": [
70
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
71
- ]
72
- },
73
- {
74
- "cell_type": "code",
75
- "execution_count": 3,
76
- "metadata": {
77
- "colab": {
78
- "base_uri": "https://localhost:8080/"
79
- },
80
- "id": "-QTUkdfJjY4N",
81
- "outputId": "a88b2f8a-0c84-45a0-9b32-5088fe596612"
82
- },
83
- "outputs": [
84
- {
85
- "name": "stdout",
86
- "output_type": "stream",
87
- "text": [
88
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
89
- " Dload Upload Total Spent Left Speed\n",
90
- "100 169k 100 169k 0 0 277k 0 --:--:-- --:--:-- --:--:-- 281k\n"
91
- ]
92
- }
93
- ],
94
- "source": [
95
- "!curl -o ./mini-dataset.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
96
- ]
97
- },
98
- {
99
- "cell_type": "markdown",
100
- "metadata": {
101
- "id": "zk-4alIxROo8"
102
- },
103
- "source": [
104
- "## Read File"
105
- ]
106
- },
107
- {
108
- "cell_type": "code",
109
- "execution_count": 4,
110
- "metadata": {
111
- "colab": {
112
- "base_uri": "https://localhost:8080/"
113
- },
114
- "id": "7CYwRT6R0o0I",
115
- "outputId": "351f170f-9a00-4b09-ae08-b45c3c48fce5"
116
- },
117
- "outputs": [
118
- {
119
- "data": {
120
- "text/plain": [
121
- "841"
122
- ]
123
- },
124
- "execution_count": 4,
125
- "metadata": {},
126
- "output_type": "execute_result"
127
- }
128
- ],
129
- "source": [
130
- "import csv\n",
131
- "\n",
132
- "text = \"\"\n",
133
- "\n",
134
- "# Load the file as a JSON\n",
135
- "with open(\"./mini-dataset.csv\", mode=\"r\", encoding=\"ISO-8859-1\") as file:\n",
136
- " csv_reader = csv.reader(file)\n",
137
- "\n",
138
- " for row in csv_reader:\n",
139
- " text += row[0]\n",
140
- "\n",
141
- "# The number of characters in the dataset.\n",
142
- "len( text )"
143
- ]
144
- },
145
- {
146
- "cell_type": "markdown",
147
- "metadata": {
148
- "id": "S17g2RYOjmf2"
149
- },
150
- "source": [
151
- "# Chunking"
152
- ]
153
- },
154
- {
155
- "cell_type": "code",
156
- "execution_count": 5,
157
- "metadata": {
158
- "colab": {
159
- "base_uri": "https://localhost:8080/"
160
- },
161
- "id": "STACTMUR1z9N",
162
- "outputId": "15a61eac-8774-4cdb-db8d-e2eb5b07e517"
163
- },
164
- "outputs": [
165
- {
166
- "data": {
167
- "text/plain": [
168
- "2"
169
- ]
170
- },
171
- "execution_count": 5,
172
- "metadata": {},
173
- "output_type": "execute_result"
174
- }
175
- ],
176
- "source": [
177
- "chunk_size = 512\n",
178
- "chunks = []\n",
179
- "\n",
180
- "# Split the long text into smaller manageable chunks of 512 characters.\n",
181
- "for i in range(0, len(text), chunk_size):\n",
182
- " chunks.append(text[i:i + chunk_size])\n",
183
- "\n",
184
- "len( chunks )"
185
- ]
186
- },
187
- {
188
- "cell_type": "markdown",
189
- "metadata": {
190
- "id": "9fOomeMGqu10"
191
- },
192
- "source": [
193
- "#Interface of Chroma with LlamaIndex"
194
- ]
195
- },
196
- {
197
- "cell_type": "code",
198
- "execution_count": 6,
199
- "metadata": {
200
- "id": "CtdsIUQ81_hT"
201
- },
202
- "outputs": [],
203
- "source": [
204
- "from llama_index.core import Document\n",
205
- "\n",
206
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
207
- "documents = [Document(text=t) for t in chunks]"
208
- ]
209
- },
210
- {
211
- "cell_type": "markdown",
212
- "metadata": {
213
- "id": "OWaT6rL7ksp8"
214
- },
215
- "source": [
216
- "Save on Chroma\n",
217
- "\n"
218
- ]
219
- },
220
- {
221
- "cell_type": "code",
222
- "execution_count": 7,
223
- "metadata": {
224
- "id": "mXi56KTXk2sp"
225
- },
226
- "outputs": [],
227
- "source": [
228
- "import chromadb\n",
229
- "\n",
230
- "# create client and a new collection\n",
231
- "# chromadb.EphemeralClient saves data in-memory.\n",
232
- "chroma_client = chromadb.PersistentClient(path=\"./mini-chunked-dataset\")\n",
233
- "chroma_collection = chroma_client.create_collection(\"mini-chunked-dataset\")"
234
- ]
235
- },
236
- {
237
- "cell_type": "code",
238
- "execution_count": 8,
239
- "metadata": {
240
- "id": "jKXURvLtkuTS"
241
- },
242
- "outputs": [],
243
- "source": [
244
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
245
- "from llama_index.core import StorageContext\n",
246
- "# Define a storage context object using the created vector database.\n",
247
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
248
- "storage_context = StorageContext.from_defaults(vector_store=vector_store)"
249
- ]
250
- },
251
- {
252
- "cell_type": "code",
253
- "execution_count": 9,
254
- "metadata": {
255
- "id": "WsD52wtrlESi"
256
- },
257
- "outputs": [],
258
- "source": [
259
- "from llama_index.core import VectorStoreIndex\n",
260
- "\n",
261
- "# Add the documents to the database and create Index / embeddings\n",
262
- "index = VectorStoreIndex.from_documents(\n",
263
- " documents, storage_context=storage_context\n",
264
- ")"
265
- ]
266
- },
267
- {
268
- "cell_type": "markdown",
269
- "metadata": {
270
- "id": "8JPD8yAinVSq"
271
- },
272
- "source": [
273
- "Query Dataset"
274
- ]
275
- },
276
- {
277
- "cell_type": "code",
278
- "execution_count": 10,
279
- "metadata": {
280
- "id": "mzS13x1ZlZ5X"
281
- },
282
- "outputs": [],
283
- "source": [
284
- "from llama_index.llms.openai import OpenAI\n",
285
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
286
- "# and using a LLM to formulate the final answer.\n",
287
- "\n",
288
- "llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)\n",
289
- "query_engine = index.as_query_engine(llm=llm)"
290
- ]
291
- },
292
- {
293
- "cell_type": "code",
294
- "execution_count": 11,
295
- "metadata": {
296
- "colab": {
297
- "base_uri": "https://localhost:8080/"
298
- },
299
- "id": "AYsQ4uLN_Oxg",
300
- "outputId": "5066a06c-77ff-48a2-ee61-3abe2e9755e2"
301
- },
302
- "outputs": [
303
- {
304
- "name": "stdout",
305
- "output_type": "stream",
306
- "text": [
307
- "The LLaMA2 model has 7 billion parameters.\n"
308
- ]
309
- }
310
- ],
311
- "source": [
312
- "response = query_engine.query(\n",
313
- " \"How many parameters LLaMA2 model has?\"\n",
314
- ")\n",
315
- "print(response)"
316
- ]
317
- },
318
- {
319
- "cell_type": "markdown",
320
- "metadata": {
321
- "id": "kWK571VNg-qR"
322
- },
323
- "source": [
324
- "#Interface of Chroma with LangChain"
325
- ]
326
- },
327
- {
328
- "cell_type": "code",
329
- "execution_count": 12,
330
- "metadata": {
331
- "id": "SMPAniL2e4NP"
332
- },
333
- "outputs": [],
334
- "source": [
335
- "from langchain.schema.document import Document\n",
336
- "# Convert the chunks to Document objects so the LangChain framework can process them.\n",
337
- "documents = [Document(page_content=t) for t in chunks]"
338
- ]
339
- },
340
- {
341
- "cell_type": "markdown",
342
- "metadata": {
343
- "id": "QBt8qGxArUPD"
344
- },
345
- "source": [
346
- "Save on Chroma"
347
- ]
348
- },
349
- {
350
- "cell_type": "code",
351
- "execution_count": 13,
352
- "metadata": {
353
- "id": "2xas7HkuhJ8A"
354
- },
355
- "outputs": [],
356
- "source": [
357
- "from langchain_chroma import Chroma\n",
358
- "from langchain_openai import OpenAIEmbeddings\n",
359
- "# Add the documents to chroma DB and create Index / embeddings\n",
360
- "\n",
361
- "embeddings = OpenAIEmbeddings(model=\"text-embedding-ada-002\")\n",
362
- "chroma_db = Chroma.from_documents(\n",
363
- " documents=documents,\n",
364
- " embedding=embeddings,\n",
365
- " persist_directory=\"./mini-chunked-dataset\",\n",
366
- " collection_name=\"mini-chunked-dataset\"\n",
367
- ")"
368
- ]
369
- },
370
- {
371
- "cell_type": "markdown",
372
- "metadata": {
373
- "id": "P8AXJJyBrZWF"
374
- },
375
- "source": [
376
- "Query Dataset"
377
- ]
378
- },
379
- {
380
- "cell_type": "code",
381
- "execution_count": 14,
382
- "metadata": {
383
- "id": "-H64YLxshM2b"
384
- },
385
- "outputs": [],
386
- "source": [
387
- "from langchain_openai import ChatOpenAI\n",
388
- "# Initializing the LLM model\n",
389
- "llm = ChatOpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
390
- ]
391
- },
392
- {
393
- "cell_type": "code",
394
- "execution_count": 16,
395
- "metadata": {
396
- "colab": {
397
- "base_uri": "https://localhost:8080/"
398
- },
399
- "id": "AxBqPNtthPaa",
400
- "outputId": "93c9ad64-1cd1-4f52-c51e-6f3ec5d6542d"
401
- },
402
- "outputs": [
403
- {
404
- "name": "stdout",
405
- "output_type": "stream",
406
- "text": [
407
- "The LLaMA-2 model has 7 billion parameters.\n"
408
- ]
409
- }
410
- ],
411
- "source": [
412
- "from langchain.chains import RetrievalQA\n",
413
- "query = \"How many parameters LLaMA2 model has?\"\n",
414
- "retriever = chroma_db.as_retriever(search_kwargs={\"k\": 2})\n",
415
- "# Define a RetrievalQA chain that is responsible for retrieving related pieces of text,\n",
416
- "# and using a LLM to formulate the final answer.\n",
417
- "chain = RetrievalQA.from_chain_type(llm=llm,\n",
418
- " chain_type=\"stuff\",\n",
419
- " retriever=retriever)\n",
420
- "\n",
421
- "response = chain(query)\n",
422
- "print(response[\"result\"])"
423
- ]
424
- }
425
- ],
426
- "metadata": {
427
- "colab": {
428
- "provenance": []
429
- },
430
- "kernelspec": {
431
- "display_name": "Python 3",
432
- "name": "python3"
433
- },
434
- "language_info": {
435
- "codemirror_mode": {
436
- "name": "ipython",
437
- "version": 3
438
- },
439
- "file_extension": ".py",
440
- "mimetype": "text/x-python",
441
- "name": "python",
442
- "nbconvert_exporter": "python",
443
- "pygments_lexer": "ipython3",
444
- "version": "3.11.8"
445
- }
446
- },
447
- "nbformat": 4,
448
- "nbformat_minor": 0
449
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/05-Improve_Prompts_+_Add_Source.ipynb DELETED
@@ -1,1420 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "colab_type": "text",
7
- "id": "view-in-github"
8
- },
9
- "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/05-Improve_Prompts_%2B_Add_Source.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
- ]
12
- },
13
- {
14
- "cell_type": "markdown",
15
- "metadata": {
16
- "id": "5BGJ3fxhOk2V"
17
- },
18
- "source": [
19
- "# Install Packages and Setup Variables"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": 4,
25
- "metadata": {
26
- "colab": {
27
- "base_uri": "https://localhost:8080/"
28
- },
29
- "id": "QPJzr-I9XQ7l",
30
- "outputId": "b6cb3d46-9ad9-4658-be9c-a24bcab98c7c"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -q llama-index==0.10.9 openai==1.12.0 cohere==4.47 tiktoken==0.6.0 chromadb==0.4.22"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": 1,
40
- "metadata": {
41
- "id": "riuXwpSPcvWC"
42
- },
43
- "outputs": [],
44
- "source": [
45
- "import os\n",
46
- "\n",
47
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
48
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\"\n"
49
- ]
50
- },
51
- {
52
- "cell_type": "code",
53
- "execution_count": 2,
54
- "metadata": {
55
- "id": "km-KQOrgr3VB"
56
- },
57
- "outputs": [],
58
- "source": [
59
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
60
- "\n",
61
- "import nest_asyncio\n",
62
- "\n",
63
- "nest_asyncio.apply()"
64
- ]
65
- },
66
- {
67
- "cell_type": "markdown",
68
- "metadata": {
69
- "id": "Bkgi2OrYzF7q"
70
- },
71
- "source": [
72
- "# Load a Model"
73
- ]
74
- },
75
- {
76
- "cell_type": "code",
77
- "execution_count": 3,
78
- "metadata": {
79
- "id": "9oGT6crooSSj"
80
- },
81
- "outputs": [],
82
- "source": [
83
- "from llama_index.llms.openai import OpenAI\n",
84
- "\n",
85
- "llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
86
- ]
87
- },
88
- {
89
- "cell_type": "markdown",
90
- "metadata": {
91
- "id": "0BwVuJXlzHVL"
92
- },
93
- "source": [
94
- "# Create a VectoreStore"
95
- ]
96
- },
97
- {
98
- "cell_type": "code",
99
- "execution_count": 4,
100
- "metadata": {
101
- "id": "SQP87lHczHKc"
102
- },
103
- "outputs": [],
104
- "source": [
105
- "import chromadb\n",
106
- "\n",
107
- "# create client and a new collection\n",
108
- "# chromadb.EphemeralClient saves data in-memory.\n",
109
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
110
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
111
- ]
112
- },
113
- {
114
- "cell_type": "code",
115
- "execution_count": 5,
116
- "metadata": {
117
- "id": "zAaGcYMJzHAN"
118
- },
119
- "outputs": [],
120
- "source": [
121
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
122
- "\n",
123
- "# Define a storage context object using the created vector database.\n",
124
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
125
- ]
126
- },
127
- {
128
- "cell_type": "markdown",
129
- "metadata": {
130
- "id": "I9JbAzFcjkpn"
131
- },
132
- "source": [
133
- "# Load the Dataset (CSV)"
134
- ]
135
- },
136
- {
137
- "cell_type": "markdown",
138
- "metadata": {
139
- "id": "_Tif8-JoRH68"
140
- },
141
- "source": [
142
- "## Download"
143
- ]
144
- },
145
- {
146
- "cell_type": "markdown",
147
- "metadata": {
148
- "id": "4fQaa1LN1mXL"
149
- },
150
- "source": [
151
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model."
152
- ]
153
- },
154
- {
155
- "cell_type": "code",
156
- "execution_count": 6,
157
- "metadata": {
158
- "colab": {
159
- "base_uri": "https://localhost:8080/"
160
- },
161
- "id": "fQtpDvUzKNzI",
162
- "outputId": "829f8e63-7767-43a1-b3c9-95ae099012e7"
163
- },
164
- "outputs": [
165
- {
166
- "name": "stdout",
167
- "output_type": "stream",
168
- "text": [
169
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
170
- " Dload Upload Total Spent Left Speed\n",
171
- "100 169k 100 169k 0 0 1044k 0 --:--:-- --:--:-- --:--:-- 1040k\n"
172
- ]
173
- }
174
- ],
175
- "source": [
176
- "!curl -o ./mini-dataset.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
177
- ]
178
- },
179
- {
180
- "cell_type": "markdown",
181
- "metadata": {
182
- "id": "zk-4alIxROo8"
183
- },
184
- "source": [
185
- "## Load the Articles"
186
- ]
187
- },
188
- {
189
- "cell_type": "code",
190
- "execution_count": 7,
191
- "metadata": {
192
- "colab": {
193
- "base_uri": "https://localhost:8080/"
194
- },
195
- "id": "_WER5lt0N7c5",
196
- "outputId": "2e4eae71-fa3a-4faf-a4e2-d3efaeaa591a"
197
- },
198
- "outputs": [
199
- {
200
- "data": {
201
- "text/plain": [
202
- "14"
203
- ]
204
- },
205
- "execution_count": 7,
206
- "metadata": {},
207
- "output_type": "execute_result"
208
- }
209
- ],
210
- "source": [
211
- "import csv\n",
212
- "\n",
213
- "rows = []\n",
214
- "\n",
215
- "# Load the file as a JSON\n",
216
- "with open(\"./mini-dataset.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
217
- " csv_reader = csv.reader(file)\n",
218
- "\n",
219
- " for idx, row in enumerate( csv_reader ):\n",
220
- " if idx == 0: continue; # Skip header row\n",
221
- " rows.append( row )\n",
222
- "\n",
223
- "# The number of characters in the dataset.\n",
224
- "len( rows )"
225
- ]
226
- },
227
- {
228
- "cell_type": "markdown",
229
- "metadata": {
230
- "id": "wxEStggPdxYs"
231
- },
232
- "source": [
233
- "# Convert to Document obj"
234
- ]
235
- },
236
- {
237
- "cell_type": "code",
238
- "execution_count": 8,
239
- "metadata": {
240
- "id": "lFvW_886dxKX"
241
- },
242
- "outputs": [],
243
- "source": [
244
- "from llama_index.core import Document\n",
245
- "\n",
246
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
247
- "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
248
- ]
249
- },
250
- {
251
- "cell_type": "code",
252
- "execution_count": 9,
253
- "metadata": {
254
- "colab": {
255
- "base_uri": "https://localhost:8080/"
256
- },
257
- "id": "Njoc3XEVkKkf",
258
- "outputId": "bab3878d-252d-4f9a-8a65-d2933e8dc891"
259
- },
260
- "outputs": [
261
- {
262
- "data": {
263
- "text/plain": [
264
- "14"
265
- ]
266
- },
267
- "execution_count": 9,
268
- "metadata": {},
269
- "output_type": "execute_result"
270
- }
271
- ],
272
- "source": [
273
- "len( documents )"
274
- ]
275
- },
276
- {
277
- "cell_type": "markdown",
278
- "metadata": {
279
- "id": "S17g2RYOjmf2"
280
- },
281
- "source": [
282
- "# Transforming"
283
- ]
284
- },
285
- {
286
- "cell_type": "code",
287
- "execution_count": 10,
288
- "metadata": {
289
- "id": "STACTMUR1z9N"
290
- },
291
- "outputs": [],
292
- "source": [
293
- "from llama_index.core.node_parser import TokenTextSplitter\n",
294
- "\n",
295
- "# Define the splitter object that split the text into segments with 512 tokens,\n",
296
- "# with a 128 overlap between the segments.\n",
297
- "text_splitter = TokenTextSplitter(\n",
298
- " separator=\" \", chunk_size=512, chunk_overlap=128\n",
299
- ")"
300
- ]
301
- },
302
- {
303
- "cell_type": "code",
304
- "execution_count": 11,
305
- "metadata": {
306
- "colab": {
307
- "base_uri": "https://localhost:8080/",
308
- "height": 331,
309
- "referenced_widgets": [
310
- "9b38fd520d1a4700bbc596b260a9a96f",
311
- "5320a84d7a00443e86af8f031d71685d",
312
- "4f3f1f990d244eb290482be55525daec",
313
- "9a4eb44d43dc42d9acdb606b6d55ad9f",
314
- "51de9732c1e04961b16351d3f410ac1d",
315
- "b40ee74dabec45ce842bcfb983d3fa75",
316
- "0c0ba53346954abc85f0921b682e7279",
317
- "9372c35dcfc04e16a97c0eb63003520e",
318
- "c6f3cd2404ef4a3096a61c1fcdbddd8f",
319
- "181bd6b10e9e4ec693ece948fd432302",
320
- "0c55e54063ea44ab8ea83466d9603a6d",
321
- "739a7d470a024bc2806e2ea998bf1dac",
322
- "299757dc40394c3287beea74c40dec27",
323
- "6c111aa1d43a4af9b04355a65c8fccb2",
324
- "4926bed77e464729b902c20bd7874a03",
325
- "5c1eaae6cf2840ab96f1a1d6a1f91881",
326
- "d4b409c70f3f4398ad88ede8f438e32a",
327
- "85fa4db33aa8427ba18d43f9a529529b",
328
- "a9e8371d627a48e69c7a725646f689d5",
329
- "e8a00080ca684fcc97189f5f3ea325e3",
330
- "d7213ef5bbb7409cbe40437bde51b5c9",
331
- "652d2e07d8be4f1f87c2f258cf288f1a"
332
- ]
333
- },
334
- "id": "CtdsIUQ81_hT",
335
- "outputId": "6a48a887-be9e-4bf3-d54d-3e0575a24e52"
336
- },
337
- "outputs": [
338
- {
339
- "name": "stderr",
340
- "output_type": "stream",
341
- "text": [
342
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
343
- " from .autonotebook import tqdm as notebook_tqdm\n",
344
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 38.44it/s]\n",
345
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:01<00:00, 79.43it/s]\n"
346
- ]
347
- }
348
- ],
349
- "source": [
350
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
351
- "from llama_index.core.ingestion import IngestionPipeline\n",
352
- "\n",
353
- "# Create the pipeline to apply the transformation (splitting and embedding) on each chunk,\n",
354
- "# and store the transformed text in the chroma vector store.\n",
355
- "pipeline = IngestionPipeline(\n",
356
- " transformations=[\n",
357
- " text_splitter,\n",
358
- " OpenAIEmbedding(),\n",
359
- " ],\n",
360
- " vector_store=vector_store\n",
361
- ")\n",
362
- "\n",
363
- "# Run the transformation pipeline.\n",
364
- "b = pipeline.run(documents=documents, show_progress=True);"
365
- ]
366
- },
367
- {
368
- "cell_type": "markdown",
369
- "metadata": {
370
- "id": "EV0ll57p46Dc"
371
- },
372
- "source": [
373
- "# Load Indexes"
374
- ]
375
- },
376
- {
377
- "cell_type": "code",
378
- "execution_count": 12,
379
- "metadata": {
380
- "id": "PS215gCGkGD-"
381
- },
382
- "outputs": [],
383
- "source": [
384
- "# Load the vector store from the local storage.\n",
385
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
386
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
387
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
388
- ]
389
- },
390
- {
391
- "cell_type": "code",
392
- "execution_count": 13,
393
- "metadata": {
394
- "id": "HbT3-kRO4Qpt"
395
- },
396
- "outputs": [],
397
- "source": [
398
- "from llama_index.core import VectorStoreIndex\n",
399
- "\n",
400
- "# Create the index based on the vector store.\n",
401
- "index = VectorStoreIndex.from_vector_store(vector_store)"
402
- ]
403
- },
404
- {
405
- "cell_type": "code",
406
- "execution_count": 14,
407
- "metadata": {
408
- "id": "sb61DWU84bHP"
409
- },
410
- "outputs": [],
411
- "source": [
412
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
413
- "# and using a LLM to formulate the final answer.\n",
414
- "query_engine = index.as_query_engine()"
415
- ]
416
- },
417
- {
418
- "cell_type": "code",
419
- "execution_count": 15,
420
- "metadata": {
421
- "id": "G32W2LMMCmnv"
422
- },
423
- "outputs": [],
424
- "source": [
425
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
426
- ]
427
- },
428
- {
429
- "cell_type": "code",
430
- "execution_count": 16,
431
- "metadata": {
432
- "colab": {
433
- "base_uri": "https://localhost:8080/",
434
- "height": 35
435
- },
436
- "id": "obc20cU5Cxf2",
437
- "outputId": "6f89e848-da19-40db-90bb-777a5483af04"
438
- },
439
- "outputs": [
440
- {
441
- "data": {
442
- "text/plain": [
443
- "'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
444
- ]
445
- },
446
- "execution_count": 16,
447
- "metadata": {},
448
- "output_type": "execute_result"
449
- }
450
- ],
451
- "source": [
452
- "res.response"
453
- ]
454
- },
455
- {
456
- "cell_type": "code",
457
- "execution_count": 17,
458
- "metadata": {
459
- "colab": {
460
- "base_uri": "https://localhost:8080/"
461
- },
462
- "id": "oIAO-saJCzYe",
463
- "outputId": "985a5eca-9e1c-45e7-e650-63f90f7df964"
464
- },
465
- "outputs": [
466
- {
467
- "name": "stdout",
468
- "output_type": "stream",
469
- "text": [
470
- "Node ID\t de7de537-c87d-44e3-ac43-5180a95acb90\n",
471
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
472
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
473
- "Score\t 0.7122361910421624\n",
474
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
475
- "Node ID\t 1dfbee1d-1073-4f89-a286-1f0321729e58\n",
476
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
477
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
478
- "Score\t 0.7047493574957753\n",
479
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
480
- ]
481
- }
482
- ],
483
- "source": [
484
- "# Show the retrieved nodes\n",
485
- "for src in res.source_nodes:\n",
486
- " print(\"Node ID\\t\", src.node_id)\n",
487
- " print(\"Title\\t\", src.metadata['title'])\n",
488
- " print(\"Text\\t\", src.text)\n",
489
- " print(\"Score\\t\", src.score)\n",
490
- " print(\"-_\"*20)"
491
- ]
492
- },
493
- {
494
- "cell_type": "markdown",
495
- "metadata": {
496
- "id": "pVJif4uhPNXM"
497
- },
498
- "source": [
499
- "# Response Modes\n",
500
- "\n"
501
- ]
502
- },
503
- {
504
- "cell_type": "markdown",
505
- "metadata": {
506
- "id": "ykZOaQYvPWMj"
507
- },
508
- "source": [
509
- "The behavior of the query engine during response generation can be adjusted. Several modes are available for consideration, including the following:\n",
510
- "\n",
511
- "- compact (default): Concatenate all the retrieved chunks and use them in the prompt to generate an answer.\n",
512
- "- refine: Generate an answer based on the first retrieved chunk, then improve the answer based on the other retrieved chunks one at a time. (will send one request for each chunk to refine the response)\n",
513
- "- tree summarize: concatenate the retrieved chunks until they fit the context window and summarize them. The summaized chunks will then recusively fed back to the LLM for summarization until one chunk remains which would be the final answer.\n",
514
- "\n",
515
- "\n",
516
- "Refer to [documentation](https://docs.llamaindex.ai/en/stable/module_guides/querying/response_synthesizers/root.html#configuring-the-response-mode) for a comprehensive list.\n",
517
- "\n",
518
- "Due to the limited size of the sample dataset, the examples provided will yield identical responses. It's crucial to evaluate these methods in the context of your specific use case and cost considerations."
519
- ]
520
- },
521
- {
522
- "cell_type": "code",
523
- "execution_count": 18,
524
- "metadata": {
525
- "id": "d4xxZHbdN0lK"
526
- },
527
- "outputs": [],
528
- "source": [
529
- "query_engine = index.as_query_engine(response_mode=\"refine\")\n",
530
- "# query_engine = index.as_query_engine(response_mode=\"tree_summarize\")"
531
- ]
532
- },
533
- {
534
- "cell_type": "code",
535
- "execution_count": 19,
536
- "metadata": {
537
- "id": "uNKJfIn-SDLm"
538
- },
539
- "outputs": [],
540
- "source": [
541
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
542
- ]
543
- },
544
- {
545
- "cell_type": "code",
546
- "execution_count": 20,
547
- "metadata": {
548
- "colab": {
549
- "base_uri": "https://localhost:8080/",
550
- "height": 35
551
- },
552
- "id": "Z1XmLBEoSFzB",
553
- "outputId": "53ee59b9-a2ad-4700-e8c9-7f450d650242"
554
- },
555
- "outputs": [
556
- {
557
- "data": {
558
- "text/plain": [
559
- "'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
560
- ]
561
- },
562
- "execution_count": 20,
563
- "metadata": {},
564
- "output_type": "execute_result"
565
- }
566
- ],
567
- "source": [
568
- "res.response"
569
- ]
570
- },
571
- {
572
- "cell_type": "code",
573
- "execution_count": 21,
574
- "metadata": {
575
- "colab": {
576
- "base_uri": "https://localhost:8080/"
577
- },
578
- "id": "pZUgM-mSST4X",
579
- "outputId": "6803179b-95f5-46d1-ad98-d799ea1b6289"
580
- },
581
- "outputs": [
582
- {
583
- "name": "stdout",
584
- "output_type": "stream",
585
- "text": [
586
- "Node ID\t de7de537-c87d-44e3-ac43-5180a95acb90\n",
587
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
588
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
589
- "Score\t 0.7122361910421624\n",
590
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
591
- "Node ID\t 1dfbee1d-1073-4f89-a286-1f0321729e58\n",
592
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
593
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
594
- "Score\t 0.7047493574957753\n",
595
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
596
- ]
597
- }
598
- ],
599
- "source": [
600
- "# Show the retrieved nodes\n",
601
- "for src in res.source_nodes:\n",
602
- " print(\"Node ID\\t\", src.node_id)\n",
603
- " print(\"Title\\t\", src.metadata['title'])\n",
604
- " print(\"Text\\t\", src.text)\n",
605
- " print(\"Score\\t\", src.score)\n",
606
- " print(\"-_\"*20)"
607
- ]
608
- },
609
- {
610
- "cell_type": "markdown",
611
- "metadata": {
612
- "id": "697hg9YWTAoq"
613
- },
614
- "source": [
615
- "The `no_text` mode will retrieve the documents, but will not send the request to the API to synthesize the final response. It is a great approach to debug the retrieved documents."
616
- ]
617
- },
618
- {
619
- "cell_type": "code",
620
- "execution_count": 22,
621
- "metadata": {
622
- "colab": {
623
- "base_uri": "https://localhost:8080/"
624
- },
625
- "id": "H2x55KW0S1Jg",
626
- "outputId": "39e8924c-c445-4658-d39f-7a300e8d516f"
627
- },
628
- "outputs": [],
629
- "source": [
630
- "query_engine = index.as_query_engine(response_mode=\"no_text\")\n",
631
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
632
- ]
633
- },
634
- {
635
- "cell_type": "code",
636
- "execution_count": 23,
637
- "metadata": {
638
- "colab": {
639
- "base_uri": "https://localhost:8080/",
640
- "height": 35
641
- },
642
- "id": "gvvtYQcBS-Ug",
643
- "outputId": "85dd7301-6d12-4758-86b0-652396d6fe39"
644
- },
645
- "outputs": [
646
- {
647
- "data": {
648
- "text/plain": [
649
- "''"
650
- ]
651
- },
652
- "execution_count": 23,
653
- "metadata": {},
654
- "output_type": "execute_result"
655
- }
656
- ],
657
- "source": [
658
- "res.response"
659
- ]
660
- },
661
- {
662
- "cell_type": "code",
663
- "execution_count": 24,
664
- "metadata": {
665
- "colab": {
666
- "base_uri": "https://localhost:8080/"
667
- },
668
- "id": "o9ijBEkXS5LC",
669
- "outputId": "616c8315-15c5-47cd-a9ed-2830b2f88d5d"
670
- },
671
- "outputs": [
672
- {
673
- "name": "stdout",
674
- "output_type": "stream",
675
- "text": [
676
- "Node ID\t de7de537-c87d-44e3-ac43-5180a95acb90\n",
677
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
678
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
679
- "Score\t 0.7122361910421624\n",
680
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
681
- "Node ID\t 1dfbee1d-1073-4f89-a286-1f0321729e58\n",
682
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
683
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
684
- "Score\t 0.7047493574957753\n",
685
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
686
- ]
687
- }
688
- ],
689
- "source": [
690
- "# Show the retrieved nodes\n",
691
- "for src in res.source_nodes:\n",
692
- " print(\"Node ID\\t\", src.node_id)\n",
693
- " print(\"Title\\t\", src.metadata['title'])\n",
694
- " print(\"Text\\t\", src.text)\n",
695
- " print(\"Score\\t\", src.score)\n",
696
- " print(\"-_\"*20)"
697
- ]
698
- },
699
- {
700
- "cell_type": "code",
701
- "execution_count": null,
702
- "metadata": {},
703
- "outputs": [],
704
- "source": []
705
- }
706
- ],
707
- "metadata": {
708
- "colab": {
709
- "authorship_tag": "ABX9TyPHUCVR9OPVGnLj3XoIzKS4",
710
- "include_colab_link": true,
711
- "provenance": []
712
- },
713
- "kernelspec": {
714
- "display_name": "Python 3",
715
- "name": "python3"
716
- },
717
- "language_info": {
718
- "codemirror_mode": {
719
- "name": "ipython",
720
- "version": 3
721
- },
722
- "file_extension": ".py",
723
- "mimetype": "text/x-python",
724
- "name": "python",
725
- "nbconvert_exporter": "python",
726
- "pygments_lexer": "ipython3",
727
- "version": "3.11.8"
728
- },
729
- "widgets": {
730
- "application/vnd.jupyter.widget-state+json": {
731
- "0c0ba53346954abc85f0921b682e7279": {
732
- "model_module": "@jupyter-widgets/controls",
733
- "model_module_version": "1.5.0",
734
- "model_name": "DescriptionStyleModel",
735
- "state": {
736
- "_model_module": "@jupyter-widgets/controls",
737
- "_model_module_version": "1.5.0",
738
- "_model_name": "DescriptionStyleModel",
739
- "_view_count": null,
740
- "_view_module": "@jupyter-widgets/base",
741
- "_view_module_version": "1.2.0",
742
- "_view_name": "StyleView",
743
- "description_width": ""
744
- }
745
- },
746
- "0c55e54063ea44ab8ea83466d9603a6d": {
747
- "model_module": "@jupyter-widgets/controls",
748
- "model_module_version": "1.5.0",
749
- "model_name": "DescriptionStyleModel",
750
- "state": {
751
- "_model_module": "@jupyter-widgets/controls",
752
- "_model_module_version": "1.5.0",
753
- "_model_name": "DescriptionStyleModel",
754
- "_view_count": null,
755
- "_view_module": "@jupyter-widgets/base",
756
- "_view_module_version": "1.2.0",
757
- "_view_name": "StyleView",
758
- "description_width": ""
759
- }
760
- },
761
- "181bd6b10e9e4ec693ece948fd432302": {
762
- "model_module": "@jupyter-widgets/base",
763
- "model_module_version": "1.2.0",
764
- "model_name": "LayoutModel",
765
- "state": {
766
- "_model_module": "@jupyter-widgets/base",
767
- "_model_module_version": "1.2.0",
768
- "_model_name": "LayoutModel",
769
- "_view_count": null,
770
- "_view_module": "@jupyter-widgets/base",
771
- "_view_module_version": "1.2.0",
772
- "_view_name": "LayoutView",
773
- "align_content": null,
774
- "align_items": null,
775
- "align_self": null,
776
- "border": null,
777
- "bottom": null,
778
- "display": null,
779
- "flex": null,
780
- "flex_flow": null,
781
- "grid_area": null,
782
- "grid_auto_columns": null,
783
- "grid_auto_flow": null,
784
- "grid_auto_rows": null,
785
- "grid_column": null,
786
- "grid_gap": null,
787
- "grid_row": null,
788
- "grid_template_areas": null,
789
- "grid_template_columns": null,
790
- "grid_template_rows": null,
791
- "height": null,
792
- "justify_content": null,
793
- "justify_items": null,
794
- "left": null,
795
- "margin": null,
796
- "max_height": null,
797
- "max_width": null,
798
- "min_height": null,
799
- "min_width": null,
800
- "object_fit": null,
801
- "object_position": null,
802
- "order": null,
803
- "overflow": null,
804
- "overflow_x": null,
805
- "overflow_y": null,
806
- "padding": null,
807
- "right": null,
808
- "top": null,
809
- "visibility": null,
810
- "width": null
811
- }
812
- },
813
- "299757dc40394c3287beea74c40dec27": {
814
- "model_module": "@jupyter-widgets/controls",
815
- "model_module_version": "1.5.0",
816
- "model_name": "HTMLModel",
817
- "state": {
818
- "_dom_classes": [],
819
- "_model_module": "@jupyter-widgets/controls",
820
- "_model_module_version": "1.5.0",
821
- "_model_name": "HTMLModel",
822
- "_view_count": null,
823
- "_view_module": "@jupyter-widgets/controls",
824
- "_view_module_version": "1.5.0",
825
- "_view_name": "HTMLView",
826
- "description": "",
827
- "description_tooltip": null,
828
- "layout": "IPY_MODEL_d4b409c70f3f4398ad88ede8f438e32a",
829
- "placeholder": "​",
830
- "style": "IPY_MODEL_85fa4db33aa8427ba18d43f9a529529b",
831
- "value": "Generating embeddings: 100%"
832
- }
833
- },
834
- "4926bed77e464729b902c20bd7874a03": {
835
- "model_module": "@jupyter-widgets/controls",
836
- "model_module_version": "1.5.0",
837
- "model_name": "HTMLModel",
838
- "state": {
839
- "_dom_classes": [],
840
- "_model_module": "@jupyter-widgets/controls",
841
- "_model_module_version": "1.5.0",
842
- "_model_name": "HTMLModel",
843
- "_view_count": null,
844
- "_view_module": "@jupyter-widgets/controls",
845
- "_view_module_version": "1.5.0",
846
- "_view_name": "HTMLView",
847
- "description": "",
848
- "description_tooltip": null,
849
- "layout": "IPY_MODEL_d7213ef5bbb7409cbe40437bde51b5c9",
850
- "placeholder": "​",
851
- "style": "IPY_MODEL_652d2e07d8be4f1f87c2f258cf288f1a",
852
- "value": " 108/108 [00:05&lt;00:00, 28.51it/s]"
853
- }
854
- },
855
- "4f3f1f990d244eb290482be55525daec": {
856
- "model_module": "@jupyter-widgets/controls",
857
- "model_module_version": "1.5.0",
858
- "model_name": "FloatProgressModel",
859
- "state": {
860
- "_dom_classes": [],
861
- "_model_module": "@jupyter-widgets/controls",
862
- "_model_module_version": "1.5.0",
863
- "_model_name": "FloatProgressModel",
864
- "_view_count": null,
865
- "_view_module": "@jupyter-widgets/controls",
866
- "_view_module_version": "1.5.0",
867
- "_view_name": "ProgressView",
868
- "bar_style": "success",
869
- "description": "",
870
- "description_tooltip": null,
871
- "layout": "IPY_MODEL_9372c35dcfc04e16a97c0eb63003520e",
872
- "max": 14,
873
- "min": 0,
874
- "orientation": "horizontal",
875
- "style": "IPY_MODEL_c6f3cd2404ef4a3096a61c1fcdbddd8f",
876
- "value": 14
877
- }
878
- },
879
- "51de9732c1e04961b16351d3f410ac1d": {
880
- "model_module": "@jupyter-widgets/base",
881
- "model_module_version": "1.2.0",
882
- "model_name": "LayoutModel",
883
- "state": {
884
- "_model_module": "@jupyter-widgets/base",
885
- "_model_module_version": "1.2.0",
886
- "_model_name": "LayoutModel",
887
- "_view_count": null,
888
- "_view_module": "@jupyter-widgets/base",
889
- "_view_module_version": "1.2.0",
890
- "_view_name": "LayoutView",
891
- "align_content": null,
892
- "align_items": null,
893
- "align_self": null,
894
- "border": null,
895
- "bottom": null,
896
- "display": null,
897
- "flex": null,
898
- "flex_flow": null,
899
- "grid_area": null,
900
- "grid_auto_columns": null,
901
- "grid_auto_flow": null,
902
- "grid_auto_rows": null,
903
- "grid_column": null,
904
- "grid_gap": null,
905
- "grid_row": null,
906
- "grid_template_areas": null,
907
- "grid_template_columns": null,
908
- "grid_template_rows": null,
909
- "height": null,
910
- "justify_content": null,
911
- "justify_items": null,
912
- "left": null,
913
- "margin": null,
914
- "max_height": null,
915
- "max_width": null,
916
- "min_height": null,
917
- "min_width": null,
918
- "object_fit": null,
919
- "object_position": null,
920
- "order": null,
921
- "overflow": null,
922
- "overflow_x": null,
923
- "overflow_y": null,
924
- "padding": null,
925
- "right": null,
926
- "top": null,
927
- "visibility": null,
928
- "width": null
929
- }
930
- },
931
- "5320a84d7a00443e86af8f031d71685d": {
932
- "model_module": "@jupyter-widgets/controls",
933
- "model_module_version": "1.5.0",
934
- "model_name": "HTMLModel",
935
- "state": {
936
- "_dom_classes": [],
937
- "_model_module": "@jupyter-widgets/controls",
938
- "_model_module_version": "1.5.0",
939
- "_model_name": "HTMLModel",
940
- "_view_count": null,
941
- "_view_module": "@jupyter-widgets/controls",
942
- "_view_module_version": "1.5.0",
943
- "_view_name": "HTMLView",
944
- "description": "",
945
- "description_tooltip": null,
946
- "layout": "IPY_MODEL_b40ee74dabec45ce842bcfb983d3fa75",
947
- "placeholder": "​",
948
- "style": "IPY_MODEL_0c0ba53346954abc85f0921b682e7279",
949
- "value": "Parsing nodes: 100%"
950
- }
951
- },
952
- "5c1eaae6cf2840ab96f1a1d6a1f91881": {
953
- "model_module": "@jupyter-widgets/base",
954
- "model_module_version": "1.2.0",
955
- "model_name": "LayoutModel",
956
- "state": {
957
- "_model_module": "@jupyter-widgets/base",
958
- "_model_module_version": "1.2.0",
959
- "_model_name": "LayoutModel",
960
- "_view_count": null,
961
- "_view_module": "@jupyter-widgets/base",
962
- "_view_module_version": "1.2.0",
963
- "_view_name": "LayoutView",
964
- "align_content": null,
965
- "align_items": null,
966
- "align_self": null,
967
- "border": null,
968
- "bottom": null,
969
- "display": null,
970
- "flex": null,
971
- "flex_flow": null,
972
- "grid_area": null,
973
- "grid_auto_columns": null,
974
- "grid_auto_flow": null,
975
- "grid_auto_rows": null,
976
- "grid_column": null,
977
- "grid_gap": null,
978
- "grid_row": null,
979
- "grid_template_areas": null,
980
- "grid_template_columns": null,
981
- "grid_template_rows": null,
982
- "height": null,
983
- "justify_content": null,
984
- "justify_items": null,
985
- "left": null,
986
- "margin": null,
987
- "max_height": null,
988
- "max_width": null,
989
- "min_height": null,
990
- "min_width": null,
991
- "object_fit": null,
992
- "object_position": null,
993
- "order": null,
994
- "overflow": null,
995
- "overflow_x": null,
996
- "overflow_y": null,
997
- "padding": null,
998
- "right": null,
999
- "top": null,
1000
- "visibility": null,
1001
- "width": null
1002
- }
1003
- },
1004
- "652d2e07d8be4f1f87c2f258cf288f1a": {
1005
- "model_module": "@jupyter-widgets/controls",
1006
- "model_module_version": "1.5.0",
1007
- "model_name": "DescriptionStyleModel",
1008
- "state": {
1009
- "_model_module": "@jupyter-widgets/controls",
1010
- "_model_module_version": "1.5.0",
1011
- "_model_name": "DescriptionStyleModel",
1012
- "_view_count": null,
1013
- "_view_module": "@jupyter-widgets/base",
1014
- "_view_module_version": "1.2.0",
1015
- "_view_name": "StyleView",
1016
- "description_width": ""
1017
- }
1018
- },
1019
- "6c111aa1d43a4af9b04355a65c8fccb2": {
1020
- "model_module": "@jupyter-widgets/controls",
1021
- "model_module_version": "1.5.0",
1022
- "model_name": "FloatProgressModel",
1023
- "state": {
1024
- "_dom_classes": [],
1025
- "_model_module": "@jupyter-widgets/controls",
1026
- "_model_module_version": "1.5.0",
1027
- "_model_name": "FloatProgressModel",
1028
- "_view_count": null,
1029
- "_view_module": "@jupyter-widgets/controls",
1030
- "_view_module_version": "1.5.0",
1031
- "_view_name": "ProgressView",
1032
- "bar_style": "success",
1033
- "description": "",
1034
- "description_tooltip": null,
1035
- "layout": "IPY_MODEL_a9e8371d627a48e69c7a725646f689d5",
1036
- "max": 108,
1037
- "min": 0,
1038
- "orientation": "horizontal",
1039
- "style": "IPY_MODEL_e8a00080ca684fcc97189f5f3ea325e3",
1040
- "value": 108
1041
- }
1042
- },
1043
- "739a7d470a024bc2806e2ea998bf1dac": {
1044
- "model_module": "@jupyter-widgets/controls",
1045
- "model_module_version": "1.5.0",
1046
- "model_name": "HBoxModel",
1047
- "state": {
1048
- "_dom_classes": [],
1049
- "_model_module": "@jupyter-widgets/controls",
1050
- "_model_module_version": "1.5.0",
1051
- "_model_name": "HBoxModel",
1052
- "_view_count": null,
1053
- "_view_module": "@jupyter-widgets/controls",
1054
- "_view_module_version": "1.5.0",
1055
- "_view_name": "HBoxView",
1056
- "box_style": "",
1057
- "children": [
1058
- "IPY_MODEL_299757dc40394c3287beea74c40dec27",
1059
- "IPY_MODEL_6c111aa1d43a4af9b04355a65c8fccb2",
1060
- "IPY_MODEL_4926bed77e464729b902c20bd7874a03"
1061
- ],
1062
- "layout": "IPY_MODEL_5c1eaae6cf2840ab96f1a1d6a1f91881"
1063
- }
1064
- },
1065
- "85fa4db33aa8427ba18d43f9a529529b": {
1066
- "model_module": "@jupyter-widgets/controls",
1067
- "model_module_version": "1.5.0",
1068
- "model_name": "DescriptionStyleModel",
1069
- "state": {
1070
- "_model_module": "@jupyter-widgets/controls",
1071
- "_model_module_version": "1.5.0",
1072
- "_model_name": "DescriptionStyleModel",
1073
- "_view_count": null,
1074
- "_view_module": "@jupyter-widgets/base",
1075
- "_view_module_version": "1.2.0",
1076
- "_view_name": "StyleView",
1077
- "description_width": ""
1078
- }
1079
- },
1080
- "9372c35dcfc04e16a97c0eb63003520e": {
1081
- "model_module": "@jupyter-widgets/base",
1082
- "model_module_version": "1.2.0",
1083
- "model_name": "LayoutModel",
1084
- "state": {
1085
- "_model_module": "@jupyter-widgets/base",
1086
- "_model_module_version": "1.2.0",
1087
- "_model_name": "LayoutModel",
1088
- "_view_count": null,
1089
- "_view_module": "@jupyter-widgets/base",
1090
- "_view_module_version": "1.2.0",
1091
- "_view_name": "LayoutView",
1092
- "align_content": null,
1093
- "align_items": null,
1094
- "align_self": null,
1095
- "border": null,
1096
- "bottom": null,
1097
- "display": null,
1098
- "flex": null,
1099
- "flex_flow": null,
1100
- "grid_area": null,
1101
- "grid_auto_columns": null,
1102
- "grid_auto_flow": null,
1103
- "grid_auto_rows": null,
1104
- "grid_column": null,
1105
- "grid_gap": null,
1106
- "grid_row": null,
1107
- "grid_template_areas": null,
1108
- "grid_template_columns": null,
1109
- "grid_template_rows": null,
1110
- "height": null,
1111
- "justify_content": null,
1112
- "justify_items": null,
1113
- "left": null,
1114
- "margin": null,
1115
- "max_height": null,
1116
- "max_width": null,
1117
- "min_height": null,
1118
- "min_width": null,
1119
- "object_fit": null,
1120
- "object_position": null,
1121
- "order": null,
1122
- "overflow": null,
1123
- "overflow_x": null,
1124
- "overflow_y": null,
1125
- "padding": null,
1126
- "right": null,
1127
- "top": null,
1128
- "visibility": null,
1129
- "width": null
1130
- }
1131
- },
1132
- "9a4eb44d43dc42d9acdb606b6d55ad9f": {
1133
- "model_module": "@jupyter-widgets/controls",
1134
- "model_module_version": "1.5.0",
1135
- "model_name": "HTMLModel",
1136
- "state": {
1137
- "_dom_classes": [],
1138
- "_model_module": "@jupyter-widgets/controls",
1139
- "_model_module_version": "1.5.0",
1140
- "_model_name": "HTMLModel",
1141
- "_view_count": null,
1142
- "_view_module": "@jupyter-widgets/controls",
1143
- "_view_module_version": "1.5.0",
1144
- "_view_name": "HTMLView",
1145
- "description": "",
1146
- "description_tooltip": null,
1147
- "layout": "IPY_MODEL_181bd6b10e9e4ec693ece948fd432302",
1148
- "placeholder": "​",
1149
- "style": "IPY_MODEL_0c55e54063ea44ab8ea83466d9603a6d",
1150
- "value": " 14/14 [00:01&lt;00:00, 15.95it/s]"
1151
- }
1152
- },
1153
- "9b38fd520d1a4700bbc596b260a9a96f": {
1154
- "model_module": "@jupyter-widgets/controls",
1155
- "model_module_version": "1.5.0",
1156
- "model_name": "HBoxModel",
1157
- "state": {
1158
- "_dom_classes": [],
1159
- "_model_module": "@jupyter-widgets/controls",
1160
- "_model_module_version": "1.5.0",
1161
- "_model_name": "HBoxModel",
1162
- "_view_count": null,
1163
- "_view_module": "@jupyter-widgets/controls",
1164
- "_view_module_version": "1.5.0",
1165
- "_view_name": "HBoxView",
1166
- "box_style": "",
1167
- "children": [
1168
- "IPY_MODEL_5320a84d7a00443e86af8f031d71685d",
1169
- "IPY_MODEL_4f3f1f990d244eb290482be55525daec",
1170
- "IPY_MODEL_9a4eb44d43dc42d9acdb606b6d55ad9f"
1171
- ],
1172
- "layout": "IPY_MODEL_51de9732c1e04961b16351d3f410ac1d"
1173
- }
1174
- },
1175
- "a9e8371d627a48e69c7a725646f689d5": {
1176
- "model_module": "@jupyter-widgets/base",
1177
- "model_module_version": "1.2.0",
1178
- "model_name": "LayoutModel",
1179
- "state": {
1180
- "_model_module": "@jupyter-widgets/base",
1181
- "_model_module_version": "1.2.0",
1182
- "_model_name": "LayoutModel",
1183
- "_view_count": null,
1184
- "_view_module": "@jupyter-widgets/base",
1185
- "_view_module_version": "1.2.0",
1186
- "_view_name": "LayoutView",
1187
- "align_content": null,
1188
- "align_items": null,
1189
- "align_self": null,
1190
- "border": null,
1191
- "bottom": null,
1192
- "display": null,
1193
- "flex": null,
1194
- "flex_flow": null,
1195
- "grid_area": null,
1196
- "grid_auto_columns": null,
1197
- "grid_auto_flow": null,
1198
- "grid_auto_rows": null,
1199
- "grid_column": null,
1200
- "grid_gap": null,
1201
- "grid_row": null,
1202
- "grid_template_areas": null,
1203
- "grid_template_columns": null,
1204
- "grid_template_rows": null,
1205
- "height": null,
1206
- "justify_content": null,
1207
- "justify_items": null,
1208
- "left": null,
1209
- "margin": null,
1210
- "max_height": null,
1211
- "max_width": null,
1212
- "min_height": null,
1213
- "min_width": null,
1214
- "object_fit": null,
1215
- "object_position": null,
1216
- "order": null,
1217
- "overflow": null,
1218
- "overflow_x": null,
1219
- "overflow_y": null,
1220
- "padding": null,
1221
- "right": null,
1222
- "top": null,
1223
- "visibility": null,
1224
- "width": null
1225
- }
1226
- },
1227
- "b40ee74dabec45ce842bcfb983d3fa75": {
1228
- "model_module": "@jupyter-widgets/base",
1229
- "model_module_version": "1.2.0",
1230
- "model_name": "LayoutModel",
1231
- "state": {
1232
- "_model_module": "@jupyter-widgets/base",
1233
- "_model_module_version": "1.2.0",
1234
- "_model_name": "LayoutModel",
1235
- "_view_count": null,
1236
- "_view_module": "@jupyter-widgets/base",
1237
- "_view_module_version": "1.2.0",
1238
- "_view_name": "LayoutView",
1239
- "align_content": null,
1240
- "align_items": null,
1241
- "align_self": null,
1242
- "border": null,
1243
- "bottom": null,
1244
- "display": null,
1245
- "flex": null,
1246
- "flex_flow": null,
1247
- "grid_area": null,
1248
- "grid_auto_columns": null,
1249
- "grid_auto_flow": null,
1250
- "grid_auto_rows": null,
1251
- "grid_column": null,
1252
- "grid_gap": null,
1253
- "grid_row": null,
1254
- "grid_template_areas": null,
1255
- "grid_template_columns": null,
1256
- "grid_template_rows": null,
1257
- "height": null,
1258
- "justify_content": null,
1259
- "justify_items": null,
1260
- "left": null,
1261
- "margin": null,
1262
- "max_height": null,
1263
- "max_width": null,
1264
- "min_height": null,
1265
- "min_width": null,
1266
- "object_fit": null,
1267
- "object_position": null,
1268
- "order": null,
1269
- "overflow": null,
1270
- "overflow_x": null,
1271
- "overflow_y": null,
1272
- "padding": null,
1273
- "right": null,
1274
- "top": null,
1275
- "visibility": null,
1276
- "width": null
1277
- }
1278
- },
1279
- "c6f3cd2404ef4a3096a61c1fcdbddd8f": {
1280
- "model_module": "@jupyter-widgets/controls",
1281
- "model_module_version": "1.5.0",
1282
- "model_name": "ProgressStyleModel",
1283
- "state": {
1284
- "_model_module": "@jupyter-widgets/controls",
1285
- "_model_module_version": "1.5.0",
1286
- "_model_name": "ProgressStyleModel",
1287
- "_view_count": null,
1288
- "_view_module": "@jupyter-widgets/base",
1289
- "_view_module_version": "1.2.0",
1290
- "_view_name": "StyleView",
1291
- "bar_color": null,
1292
- "description_width": ""
1293
- }
1294
- },
1295
- "d4b409c70f3f4398ad88ede8f438e32a": {
1296
- "model_module": "@jupyter-widgets/base",
1297
- "model_module_version": "1.2.0",
1298
- "model_name": "LayoutModel",
1299
- "state": {
1300
- "_model_module": "@jupyter-widgets/base",
1301
- "_model_module_version": "1.2.0",
1302
- "_model_name": "LayoutModel",
1303
- "_view_count": null,
1304
- "_view_module": "@jupyter-widgets/base",
1305
- "_view_module_version": "1.2.0",
1306
- "_view_name": "LayoutView",
1307
- "align_content": null,
1308
- "align_items": null,
1309
- "align_self": null,
1310
- "border": null,
1311
- "bottom": null,
1312
- "display": null,
1313
- "flex": null,
1314
- "flex_flow": null,
1315
- "grid_area": null,
1316
- "grid_auto_columns": null,
1317
- "grid_auto_flow": null,
1318
- "grid_auto_rows": null,
1319
- "grid_column": null,
1320
- "grid_gap": null,
1321
- "grid_row": null,
1322
- "grid_template_areas": null,
1323
- "grid_template_columns": null,
1324
- "grid_template_rows": null,
1325
- "height": null,
1326
- "justify_content": null,
1327
- "justify_items": null,
1328
- "left": null,
1329
- "margin": null,
1330
- "max_height": null,
1331
- "max_width": null,
1332
- "min_height": null,
1333
- "min_width": null,
1334
- "object_fit": null,
1335
- "object_position": null,
1336
- "order": null,
1337
- "overflow": null,
1338
- "overflow_x": null,
1339
- "overflow_y": null,
1340
- "padding": null,
1341
- "right": null,
1342
- "top": null,
1343
- "visibility": null,
1344
- "width": null
1345
- }
1346
- },
1347
- "d7213ef5bbb7409cbe40437bde51b5c9": {
1348
- "model_module": "@jupyter-widgets/base",
1349
- "model_module_version": "1.2.0",
1350
- "model_name": "LayoutModel",
1351
- "state": {
1352
- "_model_module": "@jupyter-widgets/base",
1353
- "_model_module_version": "1.2.0",
1354
- "_model_name": "LayoutModel",
1355
- "_view_count": null,
1356
- "_view_module": "@jupyter-widgets/base",
1357
- "_view_module_version": "1.2.0",
1358
- "_view_name": "LayoutView",
1359
- "align_content": null,
1360
- "align_items": null,
1361
- "align_self": null,
1362
- "border": null,
1363
- "bottom": null,
1364
- "display": null,
1365
- "flex": null,
1366
- "flex_flow": null,
1367
- "grid_area": null,
1368
- "grid_auto_columns": null,
1369
- "grid_auto_flow": null,
1370
- "grid_auto_rows": null,
1371
- "grid_column": null,
1372
- "grid_gap": null,
1373
- "grid_row": null,
1374
- "grid_template_areas": null,
1375
- "grid_template_columns": null,
1376
- "grid_template_rows": null,
1377
- "height": null,
1378
- "justify_content": null,
1379
- "justify_items": null,
1380
- "left": null,
1381
- "margin": null,
1382
- "max_height": null,
1383
- "max_width": null,
1384
- "min_height": null,
1385
- "min_width": null,
1386
- "object_fit": null,
1387
- "object_position": null,
1388
- "order": null,
1389
- "overflow": null,
1390
- "overflow_x": null,
1391
- "overflow_y": null,
1392
- "padding": null,
1393
- "right": null,
1394
- "top": null,
1395
- "visibility": null,
1396
- "width": null
1397
- }
1398
- },
1399
- "e8a00080ca684fcc97189f5f3ea325e3": {
1400
- "model_module": "@jupyter-widgets/controls",
1401
- "model_module_version": "1.5.0",
1402
- "model_name": "ProgressStyleModel",
1403
- "state": {
1404
- "_model_module": "@jupyter-widgets/controls",
1405
- "_model_module_version": "1.5.0",
1406
- "_model_name": "ProgressStyleModel",
1407
- "_view_count": null,
1408
- "_view_module": "@jupyter-widgets/base",
1409
- "_view_module_version": "1.2.0",
1410
- "_view_name": "StyleView",
1411
- "bar_color": null,
1412
- "description_width": ""
1413
- }
1414
- }
1415
- }
1416
- }
1417
- },
1418
- "nbformat": 4,
1419
- "nbformat_minor": 0
1420
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/06-Evaluate_RAG.ipynb DELETED
@@ -1,1491 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "colab_type": "text",
7
- "id": "view-in-github"
8
- },
9
- "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/05-Improve_Prompts_%2B_Add_Source.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
- ]
12
- },
13
- {
14
- "cell_type": "markdown",
15
- "metadata": {
16
- "id": "5BGJ3fxhOk2V"
17
- },
18
- "source": [
19
- "# Install Packages and Setup Variables"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": 1,
25
- "metadata": {
26
- "colab": {
27
- "base_uri": "https://localhost:8080/"
28
- },
29
- "id": "QPJzr-I9XQ7l",
30
- "outputId": "809b17a0-5b45-4e3c-9d3f-72ad8b0e0d9d"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -q llama-index==0.10.9 openai==1.12.0 cohere==4.47 tiktoken==0.6.0 chromadb==0.4.22"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": 1,
40
- "metadata": {
41
- "id": "riuXwpSPcvWC"
42
- },
43
- "outputs": [],
44
- "source": [
45
- "import os\n",
46
- "\n",
47
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
48
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
49
- ]
50
- },
51
- {
52
- "cell_type": "code",
53
- "execution_count": 2,
54
- "metadata": {
55
- "id": "km-KQOrgr3VB"
56
- },
57
- "outputs": [],
58
- "source": [
59
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
60
- "\n",
61
- "import nest_asyncio\n",
62
- "\n",
63
- "nest_asyncio.apply()"
64
- ]
65
- },
66
- {
67
- "cell_type": "markdown",
68
- "metadata": {
69
- "id": "Bkgi2OrYzF7q"
70
- },
71
- "source": [
72
- "# Load a Model"
73
- ]
74
- },
75
- {
76
- "cell_type": "code",
77
- "execution_count": 3,
78
- "metadata": {
79
- "id": "9oGT6crooSSj"
80
- },
81
- "outputs": [],
82
- "source": [
83
- "from llama_index.llms.openai import OpenAI\n",
84
- "\n",
85
- "llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
86
- ]
87
- },
88
- {
89
- "cell_type": "markdown",
90
- "metadata": {
91
- "id": "0BwVuJXlzHVL"
92
- },
93
- "source": [
94
- "# Create a VectoreStore"
95
- ]
96
- },
97
- {
98
- "cell_type": "code",
99
- "execution_count": 4,
100
- "metadata": {
101
- "id": "SQP87lHczHKc"
102
- },
103
- "outputs": [],
104
- "source": [
105
- "import chromadb\n",
106
- "\n",
107
- "# create client and a new collection\n",
108
- "# chromadb.EphemeralClient saves data in-memory.\n",
109
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
110
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
111
- ]
112
- },
113
- {
114
- "cell_type": "code",
115
- "execution_count": 5,
116
- "metadata": {
117
- "id": "zAaGcYMJzHAN"
118
- },
119
- "outputs": [],
120
- "source": [
121
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
122
- "\n",
123
- "# Define a storage context object using the created vector database.\n",
124
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
125
- ]
126
- },
127
- {
128
- "cell_type": "markdown",
129
- "metadata": {
130
- "id": "I9JbAzFcjkpn"
131
- },
132
- "source": [
133
- "# Load the Dataset (CSV)"
134
- ]
135
- },
136
- {
137
- "cell_type": "markdown",
138
- "metadata": {
139
- "id": "_Tif8-JoRH68"
140
- },
141
- "source": [
142
- "## Download"
143
- ]
144
- },
145
- {
146
- "cell_type": "markdown",
147
- "metadata": {
148
- "id": "4fQaa1LN1mXL"
149
- },
150
- "source": [
151
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model."
152
- ]
153
- },
154
- {
155
- "cell_type": "code",
156
- "execution_count": 6,
157
- "metadata": {
158
- "colab": {
159
- "base_uri": "https://localhost:8080/"
160
- },
161
- "id": "fQtpDvUzKNzI",
162
- "outputId": "f170fb33-8edc-4993-8025-b2bc5c0d0e99"
163
- },
164
- "outputs": [
165
- {
166
- "name": "stdout",
167
- "output_type": "stream",
168
- "text": [
169
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
170
- " Dload Upload Total Spent Left Speed\n",
171
- "100 169k 100 169k 0 0 743k 0 --:--:-- --:--:-- --:--:-- 743k\n"
172
- ]
173
- }
174
- ],
175
- "source": [
176
- "!curl -o ./mini-dataset.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
177
- ]
178
- },
179
- {
180
- "cell_type": "markdown",
181
- "metadata": {
182
- "id": "zk-4alIxROo8"
183
- },
184
- "source": [
185
- "## Load the Articles"
186
- ]
187
- },
188
- {
189
- "cell_type": "code",
190
- "execution_count": 7,
191
- "metadata": {
192
- "colab": {
193
- "base_uri": "https://localhost:8080/"
194
- },
195
- "id": "_WER5lt0N7c5",
196
- "outputId": "521f21f1-c84d-4e1b-9983-8ea17e80ea6c"
197
- },
198
- "outputs": [
199
- {
200
- "data": {
201
- "text/plain": [
202
- "14"
203
- ]
204
- },
205
- "execution_count": 7,
206
- "metadata": {},
207
- "output_type": "execute_result"
208
- }
209
- ],
210
- "source": [
211
- "import csv\n",
212
- "\n",
213
- "rows = []\n",
214
- "\n",
215
- "# Load the file as a JSON\n",
216
- "with open(\"./mini-dataset.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
217
- " csv_reader = csv.reader(file)\n",
218
- "\n",
219
- " for idx, row in enumerate(csv_reader):\n",
220
- " if idx == 0:\n",
221
- " continue\n",
222
- " # Skip header row\n",
223
- " rows.append(row)\n",
224
- "\n",
225
- "# The number of characters in the dataset.\n",
226
- "len(rows)"
227
- ]
228
- },
229
- {
230
- "cell_type": "markdown",
231
- "metadata": {
232
- "id": "wxEStggPdxYs"
233
- },
234
- "source": [
235
- "# Convert to Document obj"
236
- ]
237
- },
238
- {
239
- "cell_type": "code",
240
- "execution_count": 8,
241
- "metadata": {
242
- "id": "lFvW_886dxKX"
243
- },
244
- "outputs": [],
245
- "source": [
246
- "from llama_index.core import Document\n",
247
- "from llama_index.core.schema import TextNode\n",
248
- "\n",
249
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
250
- "documents = [\n",
251
- " Document(\n",
252
- " text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}, \n",
253
- " )\n",
254
- " for row in rows\n",
255
- "]\n",
256
- "# By default, the node/chunks ids are set to random uuids. To ensure same id's per run, we manually set them.\n",
257
- "for idx, doc in enumerate(documents):\n",
258
- " doc.id_ = f\"doc_{idx}\""
259
- ]
260
- },
261
- {
262
- "cell_type": "code",
263
- "execution_count": 9,
264
- "metadata": {
265
- "colab": {
266
- "base_uri": "https://localhost:8080/"
267
- },
268
- "id": "Njoc3XEVkKkf",
269
- "outputId": "8dec6077-4301-44ed-ad9b-95d943e00af6"
270
- },
271
- "outputs": [
272
- {
273
- "data": {
274
- "text/plain": [
275
- "Document(id_='doc_0', embedding=None, metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={}, text='LLM Variants and Meta\\'s Open Source Before shedding light on four major trends, I\\'d share the latest Meta\\'s Llama 2 and Code Llama. Meta\\'s Llama 2 represents a sophisticated evolution in LLMs. This suite spans models pretrained and fine-tuned across a parameter spectrum of 7 billion to 70 billion. A specialized derivative, Llama 2-Chat, has been engineered explicitly for dialogue-centric applications. Benchmarking revealed Llama 2\\'s superior performance over most extant open-source chat models. Human-centric evaluations, focusing on safety and utility metrics, positioned Llama 2-Chat as a potential contender against proprietary, closed-source counterparts. The development trajectory of Llama 2 emphasized rigorous fine-tuning methodologies. Meta\\'s transparent delineation of these processes aims to catalyze community-driven advancements in LLMs, underscoring a commitment to collaborative and responsible AI development. Code Llama is built on top of Llama 2 and is available in three models: Code Llama, the foundational code model;Codel Llama - Python specialized for Python;and Code Llama - Instruct, which is fine-tuned for understanding natural language instructions. Based on its benchmark testing, Code Llama outperformed state-of-the-art publicly available LLMs (except GPT-4) on code tasks. Llama 2, Llama 2-Chat, and Code Llama are key steps in LLM development but still have a way to go compared to GPT-4. Meta\\'s open access and commitment to improving these models promise transparent and faster LLM progress in the future. Please refer to the LLM and Llama variants below: From LLMs to Multimodal LLMs, like OpenAI\\'s ChatGPT (GPT-3.5), primarily focus on understanding and generating human language. They\\'ve been instrumental in tasks like text generation, translation, and even creative writing. However, their scope is limited to text. Enter multimodal models like GPT-4. These are a new breed of AI models that can understand and generate not just text, but also images, sounds, and potentially other types of data. The term \"multimodal\" refers to their ability to process multiple modes or types of data simultaneously. This is a game-changer. Imagine an AI that can not only read a description of a dress but also visualize it or even design it! Multimodal AI models are moving us towards more holistic AI systems. These systems can potentially understand our world in a more comprehensive manner, bridging the gap between different forms of data and providing richer, more integrated solutions. As we stand on the cusp of this new era, it\\'s exciting to envision the myriad of applications and innovations that Multimodal models will bring to the table. The future of AI looks more integrated and versatile than ever before. From Connections to Vector DB The AI landscape is witnessing a fascinating transition: from Language Model (LLM) connections or integrations, e.g., LangChain and LlamaIndex, to the rise of Vector Databases (Vector DB) such as Weaviate, Milvus, Pinecone, Chroma, and Vespa.ai. But what\\'s driving this shift, and why does it matter? LLM connections, like the LlamaIndex, primarily focus on linking and understanding vast amounts of external data. They\\'ve been pivotal in creating semantic connections, enabling more intuitive search experiences, and enhancing data accessibility. However, as the volume and variety of data grow, the need for more advanced storage and retrieval mechanisms becomes evident. This is where Vector DBs come into play. Unlike traditional databases that store data in rows and columns, Vector DBs store data in high-dimensional space, allowing for more efficient and accurate similarity searches. Tools like Weaviate and Milvus are designed to handle massive datasets, making them ideal for tasks like image recognition, recommendation systems, and more. The rise of Vector DBs represents a broader trend in AI: the quest for more efficient, scalable, and versatile data handling solutions. As we navigate this evolution, it\\'s clear that the combination of LLMs and Vector DBs will redefine how we store, access, and understand data in the AI-driven future. From Agents to OS The AI realm is abuzz with innovations, and one of the most intriguing shifts we\\'re witnessing is the transition from LLM agents to using LLMs as Operating Systems (OS). Let\\'s delve into this evolution and its implications. LLM agents, like AutoGPT, AgentGPT, BabyAGI, and HuggingGPT, have been groundbreaking in automating tasks based on user requests. These agents leverage the power of Language Models (LLMs) to understand and execute commands, making them invaluable in tasks ranging from content generation to data analysis. Their adaptability and intelligence have made them a staple in many AI toolkits. However, the vision for AI doesn\\'t stop there. The concept of LLM as an OS is emerging as the next big thing. Imagine an operating system where the core is a language model, orchestrating everything around it. Such a system would not just execute tasks but would understand context, anticipate needs, and offer solutions in real time. It\\'s like turning the LLM into the brain of the digital ecosystem, making devices and applications more intuitive and responsive than ever. The move towards LLM as OS signifies a paradigm shift in how we perceive and utilize AI. It\\'s not just about automation anymore; it\\'s about creating a seamless, intelligent interface between humans and technology. As we stand on the brink of this transformation, the potential for LLM-driven OS to revolutionize our digital interactions is immense. From Fine-tuning to Plugins The world of LLMs is undergoing a transformative shift, moving from intricate fine-tuning processes to the more dynamic realm of plugins. Let\\'s unpack this evolution. Historically, fine-tuning has been the cornerstone of LLM optimization. There are two primary ways to fine-tune LLMs: feeding data into the LLM in real-time and directly fine-tuning on the LLM. From a technical standpoint, this involves three methods: Transfer Learning: Adapting a pre-trained model to new tasks.Sequential Fine-tuning: Refining models in stages for specific tasks.Task-specific Fine-tuning: Tailoring models for a particular function. Moreover, LLM techniques like In-context learning, Few-shot learning, and Zero-shot learning have further enhanced the model\\'s adaptability, allowing them to understand and generate content with minimal data. However, the future of LLMs is leaning towards plugins. With the introduction of tools like GPT-4 Plugins, the focus is on extending LLMs seamlessly. Instead of running LLMs as a service, they\\'re envisioned as platforms. This means integrating LLMs with various tools, enhancing their capabilities, and offering a more modular and scalable approach to AI applications. The journey from fine-tuning to plugins represents a move from static optimization to dynamic adaptability, ensuring that LLMs remain at the forefront of AI innovation. In a Nutshell The AI domain is witnessing rapid shifts, with LLMs playing a central role. Initially, the move was from LLMs to Multimodal models, expanding from text to include images and sounds. Simultaneously, the trend shifted from LLM connections, which linked external data, to Vector Databases for efficient high-dimensional storage. Another evolution saw LLM agents, which automated tasks, transitioning towards LLMs as Operating Systems. This change aims for more intuitive, context-aware devices and applications. Furthermore, the traditional fine-tuning processes of LLMs are now being replaced by dynamic plugins, turning LLMs into platforms integrated with various tools. Leading this LLM revolution are OpenAI\\'s GPT-4 and Meta\\'s LLaMA2. Their pioneering efforts are setting the stage for an AI future that\\'s more integrated, responsive, and attuned to human interactions. More Readings Harnessing the Power of LLMs in Practice: A Survey on ChatGPT and Beyond: https://arxiv.org/abs/2304.13712Sparks of Artificial General Intelligence: Early experiments with GPT-4: https://arxiv.org/abs/2303.12712GPT4All-J: https://huggingface.co/nomic-ai/gpt4all-jIntroducing Code Llama, a state-of-the-art large language model for coding: https://ai.meta.com/blog/code-llama-large-language-model-coding/Llama 2: Open Foundation and Fine-Tuned Chat Models: https://ai.meta.com/research/publications/llama-2-open-foundation-and-fine-tuned-chat-models/', start_char_idx=None, end_char_idx=None, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n')"
276
- ]
277
- },
278
- "execution_count": 9,
279
- "metadata": {},
280
- "output_type": "execute_result"
281
- }
282
- ],
283
- "source": [
284
- "documents[0]"
285
- ]
286
- },
287
- {
288
- "cell_type": "markdown",
289
- "metadata": {
290
- "id": "S17g2RYOjmf2"
291
- },
292
- "source": [
293
- "# Transforming"
294
- ]
295
- },
296
- {
297
- "cell_type": "code",
298
- "execution_count": 10,
299
- "metadata": {
300
- "id": "STACTMUR1z9N"
301
- },
302
- "outputs": [],
303
- "source": [
304
- "from llama_index.core.node_parser import TokenTextSplitter\n",
305
- "from llama_index.core.schema import BaseNode\n",
306
- "import hashlib\n",
307
- "\n",
308
- "def deterministic_id_func(i: int, doc: BaseNode) -> str:\n",
309
- " \"\"\"Deterministic ID function for the text splitter.\n",
310
- " This will be used to generate a unique repeatable identifier for each node.\"\"\"\n",
311
- " unique_identifier = doc.id_ + str(i)\n",
312
- " hasher = hashlib.sha256()\n",
313
- " hasher.update(unique_identifier.encode('utf-8')) \n",
314
- " return hasher.hexdigest()\n",
315
- "\n",
316
- "text_splitter = TokenTextSplitter(separator=\" \", chunk_size=512, chunk_overlap=128, id_func=deterministic_id_func)"
317
- ]
318
- },
319
- {
320
- "cell_type": "code",
321
- "execution_count": 11,
322
- "metadata": {
323
- "colab": {
324
- "base_uri": "https://localhost:8080/",
325
- "height": 331,
326
- "referenced_widgets": [
327
- "76fea2dabfea42aa8bc7ae719f2a22ee",
328
- "6c575687c8f1468a803b88eea3d26b7b",
329
- "c266531dafcf4624af5fe9bcbc9d8df9",
330
- "e20a27a2f7764cb4b9537e34a3659c9a",
331
- "bba307f545cd4533be6f0489f95b9895",
332
- "eb057e56f0f94e4993b8ae960c78b0ad",
333
- "2073b65c0db045aa8e86d91a4fea2e2b",
334
- "8141417665024172a4baa78c497acb69",
335
- "01d27fdbe86a4ca2830b9bf3ccbf1ae9",
336
- "e4fe85a095e64d52b6a53c2a4bba8aeb",
337
- "70e17db8fc2f490f85b7af8aa664f0c7",
338
- "c0a70bcdf3fb4bbfb2675b8012b2ef24",
339
- "665b9b5e85a34be8a20d40c51e57cfe0",
340
- "b604cef3deca4847afcc459e5c8a9e0f",
341
- "076728d713254b49935c7938d18014f2",
342
- "be591abb84a24c4b9903087501ebb0e5",
343
- "85f23ab21c3b404aaa146cfcaefc85d8",
344
- "10340f8e7c8e482c8d35047a3e43ee7f",
345
- "1095efa793804a3fb625855e715a5317",
346
- "b43a5a6a65034a16927700e442dde52a",
347
- "121dbf44a222434cbc57ebe6beb83e2a",
348
- "2af0821ebb7e47988d134d4ec2776e87"
349
- ]
350
- },
351
- "id": "CtdsIUQ81_hT",
352
- "outputId": "325e8cd3-ce27-4ab0-e542-cbfdb7a0debb"
353
- },
354
- "outputs": [
355
- {
356
- "name": "stderr",
357
- "output_type": "stream",
358
- "text": [
359
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
360
- " from .autonotebook import tqdm as notebook_tqdm\n",
361
- "Parsing nodes: 0%| | 0/14 [00:00<?, ?it/s]"
362
- ]
363
- },
364
- {
365
- "name": "stderr",
366
- "output_type": "stream",
367
- "text": [
368
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 38.11it/s]\n",
369
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:01<00:00, 75.25it/s]\n"
370
- ]
371
- }
372
- ],
373
- "source": [
374
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
375
- "from llama_index.core.ingestion import IngestionPipeline\n",
376
- "\n",
377
- "pipeline = IngestionPipeline(\n",
378
- " transformations=[\n",
379
- " text_splitter,\n",
380
- " OpenAIEmbedding(),\n",
381
- " ],\n",
382
- " vector_store=vector_store\n",
383
- ")\n",
384
- "\n",
385
- "nodes = pipeline.run(documents=documents, show_progress=True)"
386
- ]
387
- },
388
- {
389
- "cell_type": "code",
390
- "execution_count": 12,
391
- "metadata": {},
392
- "outputs": [
393
- {
394
- "data": {
395
- "text/plain": [
396
- "TextNode(id_='4ab5bd897f01474fc9b0049f95e31edae3ccd9e74d0f0acd3932b50a74d608b6', embedding=[-0.022741511464118958, 0.010871483013033867, -0.017776913940906525, -0.013163917697966099, 0.004405552521348, 0.013564742170274258, -0.02842337265610695, 0.025638697668910027, -0.03861978277564049, -0.02869058959186077, 0.02842337265610695, 0.028282733634114265, -0.028310861438512802, -0.014127302914857864, 0.008079776540398598, 0.01933801919221878, 0.014879727736115456, 0.0029657490085810423, 0.004658704623579979, -0.004802860785275698, -0.0027108388021588326, 8.63068999024108e-05, -0.006613602861762047, -0.01984432525932789, 0.004848569165915251, 0.026398155838251114, 0.025976235046982765, -0.028887486085295677, -0.017312802374362946, 0.001968962140381336, 0.01291076559573412, 0.014056982472538948, -0.029225021600723267, -0.00135805644094944, -0.013853054493665695, -0.017256546765565872, 0.01682056114077568, -0.0057416339404881, 0.035750724375247955, -0.010927739553153515, 0.014296070672571659, 0.007974295876920223, 0.006483510602265596, -0.030462656170129776, -0.027888940647244453, -8.394458563998342e-05, 0.022572742775082588, -0.02655285969376564, -0.025498058646917343, 0.0010969931026920676, -0.004036372061818838, 0.04545489326119423, -0.03299417719244957, 0.019858388230204582, 0.0024524126201868057, -0.004117240197956562, 0.006311226636171341, -0.0013053163420408964, 0.02604655548930168, 0.013824926689267159, -0.0024770244490355253, -0.004141852259635925, -0.017819106578826904, 0.021278854459524155, -0.010730843059718609, -0.00561505788937211, -0.030575167387723923, 0.033022306859493256, 0.008930649608373642, -0.008635304868221283, -0.0006724356790073216, 0.01545635238289833, 0.008473568595945835, -0.022910280153155327, 0.028831230476498604, 0.007833655923604965, -0.018578562885522842, -0.02040688507258892, -0.024935496971011162, 0.006392094772309065, 0.017003392800688744, 0.003584565594792366, -0.001132153207436204, 0.03456934913992882, 0.017383122816681862, -0.005024369340389967, 0.02116634137928486, -0.019155187532305717, -0.011982540600001812, -0.027087291702628136, -0.0009071289678104222, -0.0011550071649253368, 0.05105237290263176, 0.022249270230531693, -0.031644031405448914, 0.0063604507595300674, -0.01480940729379654, 0.014000726863741875, -0.020899126306176186, -0.021827351301908493, -0.025287097319960594, -0.019112994894385338, -0.018086323514580727, -0.019731812179088593, -0.015400095842778683, 0.010189378634095192, 0.01698932982981205, 0.021672645583748817, 0.0048942770808935165, 0.03127836808562279, -0.01703152246773243, 0.045567408204078674, 0.005386517383158207, -0.04013869911432266, -0.017354993149638176, 0.0065186708234250546, 0.027720171958208084, -0.010751939378678799, -0.009275217540562153, 0.022010182961821556, 0.02680601179599762, 0.02210863120853901, 0.00830480083823204, -0.00379376788623631, 0.021025702357292175, 9.32290349737741e-05, -0.016398640349507332, -0.003577533643692732, -0.020055284723639488, 0.0017799768829718232, 0.023543160408735275, 0.024190105497837067, 0.03380989283323288, 0.004201624542474747, -0.03794471174478531, 0.02441512979567051, -0.02019592560827732, -0.013227205723524094, -0.02594810724258423, -0.01770659349858761, -0.0036144517362117767, 0.02594810724258423, 0.003022005083039403, 0.013613966293632984, -0.020055284723639488, 0.017987875267863274, 0.021278854459524155, 0.014401551336050034, 0.026398155838251114, 0.0005067440215498209, 0.005400581751018763, -0.03347235545516014, -0.021967990323901176, 0.011806740425527096, 0.002165858168154955, 0.014893791638314724, 0.019225507974624634, 0.006919495295733213, -0.01608923263847828, -0.0027723689563572407, -0.014992239885032177, 0.014253878965973854, -0.013473326340317726, 0.006068622227758169, 0.0272701233625412, 0.03181280195713043, 0.02984383888542652, -0.018128514289855957, 0.0013457504101097584, -0.017903489992022514, -0.03108147159218788, 0.013234238140285015, -0.044245388358831406, 0.02099757455289364, -0.0010732600931078196, 0.011982540600001812, 0.003305043326690793, -0.005488481838256121, -0.014978175051510334, -0.020294373854994774, 0.0017544857691973448, 0.001155886217020452, 0.0035634697414934635, 0.007165615446865559, -0.02210863120853901, -0.011391852051019669, 0.0019619299564510584, 0.010646458715200424, 0.0017035037744790316, -0.010899611748754978, 0.02902812696993351, 0.01720028929412365, -0.002190470229834318, -0.023754119873046875, -0.618816614151001, -0.032122209668159485, -0.0021482782904058695, -0.03226285055279732, -0.0014064015122130513, -0.01592046394944191, -0.01878952421247959, -0.005463869776576757, -0.02334626391530037, 0.03850727155804634, -0.021067893132567406, -0.003493149532005191, 0.010449563153088093, -0.0165674090385437, 0.002985086990520358, -0.023149367421865463, 0.0019021580228582025, -0.023121239617466927, 0.019689619541168213, 0.007320319768041372, -0.011398883536458015, -0.0023627544287592173, 0.0028514789883047342, -0.007242967374622822, -0.01711590588092804, -0.0023170465137809515, -0.01265761349350214, 0.00934553798288107, 0.009514305740594864, 0.01250994112342596, -0.04587681591510773, 0.019436467438936234, 0.004739572759717703, -0.026116875931620598, 0.04058874770998955, -0.008860329166054726, -0.01150436419993639, 0.01831134781241417, -0.0053126816637814045, 0.013993694446980953, -0.02372599206864834, -0.015779824927449226, 0.013262365944683552, 0.013494421727955341, -0.01517507154494524, 0.029337534680962563, 0.02411978505551815, -0.006427254527807236, -0.021714838221669197, -0.014049950987100601, 0.0036566436756402254, -5.878318552277051e-05, 0.020772550255060196, -0.008543889038264751, 0.001970720011740923, 0.012439620681107044, 0.04013869911432266, -0.011293403804302216, 0.003962535876780748, 0.005804921966046095, -0.0010213990462943912, 0.010632394813001156, -0.032544128596782684, -0.02804364450275898, -0.02646847628057003, 0.017622210085392, 0.006578442640602589, 0.013332685455679893, 0.0073695434257388115, 0.0006047526258043945, 0.00031116630998440087, 0.027607660740613937, 0.013093597255647182, -0.016243936493992805, -0.002934104995802045, 0.01480940729379654, 0.01035111490637064, -0.00815009605139494, 0.014092142693698406, 0.03189718350768089, 0.015779824927449226, -0.01521726418286562, 0.004880213178694248, -0.009225993417203426, 0.03718525543808937, 0.01163094025105238, -0.002315288409590721, -0.011497331783175468, 0.0270591638982296, 0.011201987974345684, 0.018902035430073738, 0.012179436162114143, -0.038141608238220215, -0.032769154757261276, 0.015386031940579414, 0.021321045234799385, -0.01732686534523964, 0.012109116651117802, 0.018930163234472275, -0.03200969845056534, -0.015245391987264156, -0.016961202025413513, 0.032206594944000244, 0.008782977238297462, 0.03366925194859505, 0.02770610898733139, -0.03808535262942314, -0.008248544298112392, 0.0160470400005579, -0.03400678560137749, -0.01009796280413866, 0.0051861051470041275, -0.016061104834079742, -0.016764305531978607, 0.019183315336704254, -0.02514645829796791, -0.0013334443792700768, -0.016975264996290207, -0.003433377481997013, -0.008297768421471119, 0.0320940800011158, -0.013698350638151169, 0.009036129340529442, -0.017144033685326576, 0.01900048367679119, 0.02634189836680889, -0.008965808898210526, -0.024808920919895172, -0.014049950987100601, 0.018887972459197044, -0.014739086851477623, -0.01082225888967514, 0.012481813319027424, -0.01566731184720993, 0.003106389194726944, 0.01310766115784645, 0.044245388358831406, 0.005010304972529411, 0.007320319768041372, -0.013803830370306969, -0.026876332238316536, -0.009127545170485973, 0.01860669068992138, -0.004475872498005629, -0.03915421664714813, -0.031193984672427177, -0.01916925236582756, 0.008107904344797134, 0.007063651457428932, -0.006574926897883415, -0.014795343391597271, -0.008993937633931637, 0.009148641489446163, 0.018986418843269348, 0.0015171555569395423, -0.011820804327726364, -0.005783826112747192, -0.030068863183259964, -0.0043879724107682705, -0.01642676815390587, 0.008368088863790035, 4.3263327825115994e-05, -0.006859723012894392, 0.0019759940914809704, 0.004169980529695749, -0.010442530736327171, -0.022896215319633484, 0.028029581531882286, -0.025498058646917343, -0.021096020936965942, -0.004581352695822716, -0.03518816456198692, 0.006782371085137129, 0.011961444281041622, -0.014007758349180222, 0.02420416846871376, -0.003804316045716405, -0.00504898140206933, -0.0074961199425160885, -0.001010851003229618, 0.003296253504231572, 0.031109599396586418, 0.0004518064670264721, -0.02177109383046627, 0.0158360805362463, 0.017622210085392, 0.03760717436671257, 0.014457806944847107, -0.021053830161690712, 0.010850387625396252, 0.016511153429746628, 0.01686275377869606, -0.022994663566350937, 0.03375363349914551, -0.017214354127645493, 0.011623907834291458, 0.0070601352490484715, -0.01805819384753704, 0.013156885281205177, 0.0377478152513504, 0.00894471351057291, 0.0156251210719347, -0.016722112894058228, -0.010238602757453918, 0.010533946566283703, -0.030153246596455574, 0.012306013144552708, -0.019014548510313034, -0.010393306612968445, -0.005608025938272476, 0.003994180355221033, -0.00656437873840332, -0.008740784600377083, -0.012207564897835255, 0.0011330321431159973, 0.031475264579057693, -0.005491997580975294, 0.007183195557445288, -0.02642628364264965, 0.010674587450921535, 0.003213627263903618, 0.016919009387493134, -0.01376867014914751, 0.012678708881139755, -0.010801163502037525, 0.004704413004219532, -0.019689619541168213, 0.020378757268190384, -0.007545343600213528, -0.03144713491201401, 0.004500484559684992, 0.00932444166392088, 0.0327128991484642, 0.004528612829744816, 0.023107176646590233, -0.017833169549703598, 0.022769639268517494, 0.0011602812446653843, 0.044414158910512924, -0.005952594336122274, -0.00727812759578228, 0.003642579773440957, -4.436207746039145e-05, -0.03068768046796322, 0.012629484757781029, -0.01033001858741045, 0.038141608238220215, -0.014471870847046375, -0.017312802374362946, -0.005414645653218031, -0.036482054740190506, 0.011680164374411106, -0.0024383484851568937, 0.00471496069803834, 0.029309406876564026, -0.009830745868384838, 0.004349296446889639, 0.0031169371213763952, 0.015287583693861961, 0.0036671918351203203, -0.013086565770208836, 0.0012965262867510319, -0.0029358630999922752, 0.014978175051510334, 0.021883606910705566, -0.005231813527643681, -0.00420514028519392, -0.011427012272179127, -0.007165615446865559, -0.0137897664681077, -0.020842868834733963, -0.01005577016621828, 0.024612026289105415, -0.040532488375902176, 0.042838986963033676, 0.020856933668255806, 0.004560256842523813, 0.014725022949278355, -0.003726963885128498, 0.03170028701424599, -0.024851113557815552, -0.03752278909087181, 0.015076623298227787, -0.00843137688934803, -0.032037824392318726, -0.019577108323574066, -0.018705138936638832, 0.007657855749130249, -0.0017035037744790316, 0.00044235720997676253, -0.009092384949326515, -0.008635304868221283, -0.01237633265554905, 0.012460717000067234, 0.00033292159787379205, 0.008093840442597866, 0.015146943740546703, -0.0065995389595627785, 0.00830480083823204, -0.020983509719371796, 0.02028030902147293, 0.011834868229925632, -0.00966900959610939, -0.005361905321478844, 0.01197550818324089, -0.01579388789832592, -0.03364112228155136, 0.0001978850777959451, 0.0003425906179472804, -0.03347235545516014, 0.003646095748990774, -0.007545343600213528, 0.008157128468155861, -0.04098253697156906, 0.015822015702724457, 0.012481813319027424, 0.020603781566023827, 0.0033683315850794315, 0.019239572808146477, 0.013185014016926289, -0.008129000663757324, 0.001795798889361322, -0.010787099599838257, 0.01933801919221878, 0.04838021099567413, 0.01873326674103737, 0.0039273761212825775, 0.0011312741553410888, -0.005878758151084185, 0.003296253504231572, -0.024837050586938858, 0.0017369057750329375, 0.0009800860425457358, 0.010836322791874409, -0.0165674090385437, -0.019323956221342087, 0.018241027370095253, 0.001310590305365622, 0.04008243978023529, 0.0030817771330475807, 0.010301890783011913, -0.014239815063774586, -0.009514305740594864, -0.012974053621292114, 0.014570319093763828, -0.002651066752150655, 0.009929194115102291, 0.024358872324228287, 0.011729388497769833, -0.009739330038428307, 0.008143064565956593, 0.02847963012754917, -0.006339354440569878, -0.02168671041727066, 0.01212318055331707, 0.004612996708601713, 0.008768913336098194, 0.008614208549261093, -0.016792433336377144, 0.01146217156201601, -0.0003208353300578892, -0.0036918038967996836, 0.01391634251922369, 0.015090687200427055, 0.004380940459668636, 0.02403540164232254, 0.008192288689315319, 0.013262365944683552, 0.009619786404073238, -0.014950047247111797, -0.003923859912902117, 0.010154218412935734, -0.006958171259611845, -0.03935111314058304, 0.0036812557373195887, 0.004398520570248365, -0.04084189981222153, -0.001738663762807846, 0.028451502323150635, 0.00656437873840332, 0.0013360814191401005, -0.011019155383110046, -0.004669252783060074, -0.03513190895318985, -0.006300678476691246, -0.03051891177892685, 0.007559407968074083, -0.015315711498260498, -0.003642579773440957, -0.0036953198723495007, -0.003934408072382212, 0.0012437863042578101, -0.016511153429746628, -0.0004693864902947098, -0.01644083298742771, -0.010871483013033867, -0.05805625393986702, -0.013649126514792442, -0.0014090384356677532, -0.004268428310751915, 0.010885546915233135, -0.002598326653242111, 0.0035740176681429148, 0.021799223497509956, -0.008677496574819088, -0.02057565376162529, 0.002466476522386074, -0.019999029114842415, 0.0057416339404881, -0.023275943472981453, -0.003797283861786127, -0.020674102008342743, -0.012531036511063576, 0.022558679804205894, -0.008881425485014915, -0.014092142693698406, -0.020097477361559868, 0.0024207686074078083, 0.005583413876593113, 0.02420416846871376, 0.015990784391760826, 0.006757759023457766, 0.02330407127737999, -0.023191560059785843, -0.0009449259960092604, -0.018044130876660347, -0.019956836476922035, -0.035835109651088715, 0.0031257271766662598, 0.008550920523703098, 0.03538506105542183, 0.008515761233866215, 0.010147186927497387, -0.020645974203944206, 0.0007199017563834786, -0.014120270498096943, 0.01212318055331707, -0.0017773398431017995, 0.01248884480446577, -0.014106206595897675, 0.01186299603432417, -0.003447441617026925, -0.004848569165915251, -0.029900094494223595, 0.017003392800688744, -0.03018137440085411, 0.020392820239067078, 0.01030892226845026, 0.010140154510736465, 0.017186226323246956, 0.022657128050923347, 0.001765912864357233, -0.045398637652397156, 0.0003348993486724794, 0.001233238261193037, 0.014155430719256401, -0.003814863972365856, -0.011419979855418205, -0.0023838505148887634, -0.014570319093763828, -0.015231328085064888, 0.009099417366087437, -0.02487924136221409, 0.0063604507595300674, -0.015118815936148167, -0.004324684385210276, -0.009317409247159958, -0.01492191944271326, 0.004757152870297432, -0.02919689379632473, -0.009401793591678143, 0.029309406876564026, 0.017383122816681862, 0.031137729063630104, -0.013494421727955341, 0.010386275127530098, -0.03811347857117653, -0.016412705183029175, 0.0005243240157142282, -0.02361348085105419, -0.010744906961917877, -0.005970173981040716, 0.011722356081008911, 0.016539281234145164, 0.021785158663988113, 0.006036978214979172, 0.018283218145370483, 0.01575169712305069, -0.001937318011187017, -0.0064307707361876965, -0.009929194115102291, 0.00021964035113342106, -0.02001309208571911, -0.013466293923556805, 0.012650581076741219, -0.0034861175809055567, 0.009844809770584106, 0.004764184821397066, -0.0019654459320008755, 0.002165858168154955, -0.015118815936148167, -0.00407504802569747, -0.0183535385876894, -0.04098253697156906, -0.021335110068321228, 0.008550920523703098, -0.0065010907128453255, -0.002301224274560809, -0.04643937572836876, -0.017790978774428368, 0.01856449991464615, 0.008438408374786377, 0.014626574702560902, 0.011912220157682896, 0.03704461455345154, -0.028887486085295677, -0.0025860206224024296, 0.030378270894289017, 0.016975264996290207, -0.00828370451927185, -0.007063651457428932, -0.043907854706048965, 0.013909310102462769, 0.015203199349343777, 0.007179679349064827, 0.040448106825351715, 0.02629970759153366, -0.015639184042811394, 0.016876816749572754, 0.014141366817057133, 0.0032487872522324324, 0.010231570340692997, -0.004451260436326265, -0.010259699076414108, 0.0035828077234327793, -0.012263820506632328, -0.025118330493569374, -0.023768184706568718, -0.019239572808146477, 0.011047283187508583, 0.01329752616584301, 0.030631422996520996, -0.024921434000134468, -0.020730357617139816, 0.02372599206864834, 0.008958777412772179, 0.050827350467443466, 0.013311590068042278, 0.008396216668188572, 0.02378224954009056, 0.009549465961754322, -0.01113869994878769, 0.01109650731086731, 0.01238336507230997, -0.014106206595897675, 0.020645974203944206, 0.015822015702724457, 0.002637002617120743, -0.009788554161787033, 0.012446653097867966, 0.010315954685211182, -0.03935111314058304, -0.04860523343086243, 0.010034674778580666, 0.02129291743040085, 0.0055060614831745625, -0.03589136525988579, -0.0300969909876585, -0.02510426566004753, -0.0009765700669959188, -0.02535741776227951, 0.023163432255387306, 0.009992482140660286, -0.008185256272554398, 0.010998059064149857, 0.008881425485014915, 0.010119058191776276, -0.0005753060686402023, -0.004873181227594614, 0.021714838221669197, 0.004651672672480345, 0.0014406824484467506, -0.0032030793372541666, 0.010168282315135002, -0.006128394510596991, 0.03760717436671257, -0.008930649608373642, 0.011968476697802544, 0.010428466834127903, -0.0013633304042741656, 0.0061811343766748905, -0.008192288689315319, 0.004426648374646902, 0.03693210333585739, -0.03552570194005966, -0.011110571213066578, -0.008241512812674046, -0.016187680885195732, 0.016243936493992805, -0.015892336145043373, 0.014049950987100601, -0.004612996708601713, -0.01374757383018732, 0.0036777397617697716, 0.023571288213133812, 0.024021336808800697, -0.03181280195713043, 0.006944107357412577, 0.0028690588660538197, -0.03240348771214485, -0.027002908289432526, 0.005797890014946461, 0.03257225826382637, -0.0371289998292923, 0.007854752242565155, 0.008916584774851799, -0.0213913656771183, 0.021278854459524155, 0.021025702357292175, -0.003814863972365856, -0.029421918094158173, 0.03231910616159439, -0.03386614844202995, 0.02189766988158226, 0.0010591960744932294, -0.010400339029729366, -0.026651307940483093, -0.001455625519156456, -0.015273519791662693, -0.029253149405121803, 0.004468840546905994, -0.025413675233721733, -0.022094566375017166, -0.011448107659816742, 0.01690494641661644, 0.0065714106895029545, -0.010217506438493729, 0.01355067826807499, 0.003635547822341323, 0.0031116632744669914, -0.001038100104779005, -0.01575169712305069, -0.00142222351860255, 0.023191560059785843, 0.000530477031134069, 0.003885183949023485, 0.030575167387723923, -0.003380637615919113, 0.011926284059882164, -0.013958534225821495, -0.00555880181491375, -0.009486177936196327, -0.057606205344200134, -0.020674102008342743, 0.009493209421634674, 0.001775581855326891, -7.636320515302941e-05, 0.001283341320231557, -0.01648302562534809, -0.01020344253629446, -0.01263651717454195, -0.0020234601106494665, 0.010372210294008255, 0.0027477568946778774, 0.007390639744699001, 0.023360328748822212, -0.00031160583603195846, 0.008614208549261093, -0.01801600307226181, -0.02074442058801651, -0.019014548510313034, -0.003157371189445257, -0.03189718350768089, -0.018620755523443222, -0.03366925194859505, 0.05063045397400856, -0.006374514661729336, -0.03876042366027832, -0.02122259885072708, -0.014992239885032177, -0.03825411945581436, -0.020730357617139816, 0.002598326653242111, 0.018114451318979263, 0.012531036511063576, 0.016933074221014977, 0.0025719567202031612, 0.036003876477479935, 0.006339354440569878, 0.0050630453042685986, -0.027481084689497948, 0.012685741297900677, -0.000674193724989891, -0.012917797081172466, 0.01278418954461813, 0.01776285097002983, -0.02103976532816887, 0.018536372110247612, 0.012031764723360538, -0.02783268503844738, -0.024429192766547203, 0.02701697126030922, -0.01521726418286562, -0.009901066310703754, 0.022038310766220093, -0.008867361582815647, 0.007046071346849203, -0.012650581076741219, 0.020435012876987457, -0.03116585686802864, -0.009493209421634674, 0.026398155838251114, -0.006409674417227507, 0.016272064298391342, -0.014781279489398003, 0.0174112506210804, 0.0093314740806818, 0.008804073557257652, 0.016314256936311722, -0.012594325467944145, 0.00619871448725462, 0.004686832893639803, 0.043823469430208206, 0.01959117315709591, 0.01073787547647953, 0.029393790289759636, -0.01634238474071026, -0.0015250665601342916, -0.007678952068090439, 0.015090687200427055, 0.0007809923263266683, -0.00855795294046402, 0.04354218766093254, -0.016511153429746628, 0.00981668196618557, -0.010133122093975544, 0.002937620971351862, -0.02250242419540882, -0.017228417098522186, -0.016272064298391342, -0.0027917069382965565, -0.022685255855321884, 0.014246846549212933, 0.019872453063726425, -0.022164886817336082, -0.0031608871649950743, -0.012931860983371735, 0.02258680760860443, 0.0036707078106701374, -0.01404291857033968, -0.005818985868245363, -0.0012341173132881522, -0.003450957592576742, 0.019239572808146477, 0.010126090608537197, -0.006184650585055351, 0.014324198476970196, 0.003595113754272461, -0.022136759012937546, 0.0158360805362463, 0.199258953332901, -0.031222112476825714, 0.013909310102462769, 0.02873278222978115, 0.01715809851884842, -0.016637729480862617, 0.04435790330171585, 0.007981328293681145, 0.001445077476091683, -0.004553224891424179, 0.006673374678939581, 0.005931498017162085, -0.016328321769833565, 0.00015118815645109862, 0.01912705972790718, -0.026327835395932198, -0.021588262170553207, -0.035919494926929474, -0.017861299216747284, -0.00420514028519392, 0.005949078127741814, 0.0009370149928145111, -0.00689488323405385, -0.022572742775082588, -0.0030677132308483124, 0.005235329270362854, 3.282519173808396e-05, -0.0031485813669860363, 0.01869107596576214, 0.0013018003664910793, -0.01660960167646408, 0.005207201465964317, -0.008368088863790035, 0.0019197380170226097, 0.00042521668365225196, -0.00966900959610939, 0.010379242710769176, -0.0004133501788601279, 0.006100266240537167, 0.024738602340221405, 0.02189766988158226, 0.022136759012937546, 0.0036812557373195887, -0.025301162153482437, 0.01545635238289833, 0.011363723315298557, -0.003892216132953763, 0.008593113161623478, 0.008009456098079681, 0.007341415621340275, -0.022558679804205894, 0.022657128050923347, 0.023233752697706223, 0.020842868834733963, -0.006497574504464865, 0.0011752241989597678, -0.01963336393237114, 0.015090687200427055, 0.00044389546383172274, -0.004852084908634424, -0.027115419507026672, -0.008501696400344372, 0.00033907461329363286, 0.02399320900440216, -0.010442530736327171, 0.012242725118994713, -0.007510183844715357, -0.023922888562083244, 0.007875848561525345, -0.02911251038312912, -0.011954412795603275, -0.014865663833916187, 0.00011613799870247021, -0.011574683710932732, -0.019830260425806046, -0.03887293487787247, 0.021841414272785187, 0.028015516698360443, 0.0007084747194312513, 0.04874587431550026, -0.003790251910686493, -0.03906983137130737, 0.004268428310751915, -0.012038796208798885, 0.005245877429842949, -0.023669736459851265, 0.009394762106239796, -0.015273519791662693, -0.021616389974951744, -0.011546555906534195, -0.016722112894058228, -0.0095424335449934, 0.004212172236293554, 0.025160521268844604, -0.00016404355119448155, 0.004493452608585358, 0.007671920116990805, 0.005734601989388466, -0.010660522617399693, -0.03116585686802864, -0.007249999325722456, 0.05923762917518616, 0.021714838221669197, 0.0031749513000249863, -0.0006869392236694694, 0.01933801919221878, -0.002934104995802045, 0.000356215110514313, -0.0023064983543008566, 0.0006966082146391273, 0.009640881791710854, -0.027903005480766296, 0.011201987974345684, 0.003617967711761594, -0.0031151792500168085, 0.011989572085440159, 0.010927739553153515, -0.009753393940627575, 0.016159553080797195, -0.009992482140660286, -0.007200775668025017, -0.022052375599741936, -0.005903370212763548, 0.011427012272179127, -0.00012185150262666866, -0.02714354731142521, 0.0069792671129107475, 0.0008552678627893329, -0.027860812842845917, 0.017186226323246956, 0.0003729161398950964, -0.03982928767800331, 0.009605721570551395, 0.003660159884020686, 0.0006271671736612916, -0.008593113161623478, 0.014654703438282013, -0.006374514661729336, -0.02860620617866516, 0.013628030195832253, -0.008782977238297462, 0.024597961455583572, 0.004169980529695749, -0.021757030859589577, 0.014324198476970196, -0.014106206595897675, 0.0022766124457120895, 0.01530164759606123, -0.013044373132288456, -0.020125605165958405, -0.01980213262140751, 0.007995392195880413, 0.005274005234241486, 0.009443986229598522, -0.0011945621808990836, 0.024133849889039993, -0.011968476697802544, -0.0006983662024140358, 0.022980600595474243, 0.008607177063822746, -0.028578078374266624, 0.00297278119251132, 0.01558292843401432, 0.007042555138468742, -0.016032977029681206, -0.006543282885104418, -0.180806964635849, -0.014753151684999466, 0.011553588323295116, -0.04022308066487312, 0.018381666392087936, 0.005629121791571379, 0.012967021204531193, 0.008325896225869656, -0.011187923140823841, 0.001034584129229188, 0.021714838221669197, -0.0183535385876894, -0.0046270606108009815, -0.005984238348901272, -0.0009106449433602393, 0.00826260820031166, -0.008438408374786377, 0.009809650480747223, 0.011884092353284359, 0.0008056043297983706, 0.03127836808562279, -0.026876332238316536, 0.00981668196618557, -0.009465081617236137, 0.017523761838674545, 0.012334140948951244, 0.009190833196043968, 0.042276427149772644, -0.01736905798316002, -0.03099708817899227, -0.011265275999903679, -0.015034431591629982, 0.028999997302889824, 0.006212778389453888, 0.030968960374593735, 0.031193984672427177, 0.011490300297737122, -0.01967555657029152, -0.018578562885522842, -0.015653248876333237, 0.022375846281647682, 0.013424102216959, 0.023979144170880318, -0.008593113161623478, -0.032122209668159485, 0.007573471870273352, 0.007573471870273352, -0.021503878757357597, -0.0015022126026451588, -0.01291076559573412, 0.016398640349507332, 0.009718233719468117, 0.014654703438282013, -0.004286008421331644, 0.024865178391337395, 0.03085644729435444, -0.005695926025509834, 0.003632031846791506, -0.007123423274606466, -0.020224053412675858, -0.00035885212128050625, -0.0001596485381014645, 0.0007027612300589681, -0.0007317682611756027, 0.00857904925942421, -0.03496313840150833, -0.007819592021405697, 0.005207201465964317, -0.04025121033191681, 0.0018617239547893405, -0.03338797017931938, 0.003080019261687994, -0.028057709336280823, -0.013986662030220032, 0.027818620204925537, 0.038788553327322006, -0.030490783974528313, 0.01736905798316002, 0.04427351802587509, 0.008459504693746567, -0.019984964281320572, 0.0027477568946778774, -0.01874733157455921, 0.02129291743040085, -0.004099660087376833, 0.005516609642654657, 0.015934528782963753, 0.0254839938133955, -0.015245391987264156, -0.009183801710605621, 0.019619300961494446, -0.009844809770584106, 0.017397185787558556, -0.011827835813164711, -0.0007642912678420544, 0.01374757383018732, 0.010780067183077335, -0.03479437157511711, -0.0058717261999845505, -0.016468960791826248, 0.0074679916724562645, 0.0060123661532998085, -0.009289281442761421, -0.011012122966349125, 0.019956836476922035, 0.022136759012937546, -0.022952470928430557, 0.021025702357292175, 0.028324924409389496, -0.003278673393651843, -0.01950678788125515, -0.00892361719161272, 0.0023539643734693527, 0.003345477394759655, 0.0018441439606249332, -0.0009686590055935085, -0.018817652016878128, -0.028676524758338928, 0.03248787298798561, -0.0020093959756195545, 0.05136178061366081, -0.007967264391481876, 0.0026440348010510206, 0.02185547910630703, -0.009774490259587765, -0.03456934913992882, -0.10452375560998917, -0.03563821315765381, 0.018902035430073738, 0.03150339424610138, -0.016581473872065544, 0.03282541036605835, -0.005140397232025862, 0.04115130752325058, -0.00771411182358861, 0.03926672786474228, -0.005210717208683491, -0.004187560174614191, 0.023965081200003624, 0.016145488247275352, 0.014471870847046375, -0.011982540600001812, 0.000530477031134069, -0.015822015702724457, -0.027888940647244453, 0.029478173702955246, 0.017045585438609123, -0.01020344253629446, 0.01061129942536354, -0.03217846527695656, 0.01365615800023079, -0.020772550255060196, -0.038225989788770676, 0.019408339634537697, 0.005414645653218031, -0.003378879511728883, 0.012291948311030865, -0.0156251210719347, 0.008986905217170715, -0.016792433336377144, 0.011180891655385494, -0.004261396359652281, 0.015245391987264156, -0.019816195592284203, 0.015526671893894672, -0.00015261652879416943, -0.010252666659653187, 0.023022791370749474, 0.01214427687227726, -0.008775944821536541, -0.02531522698700428, 0.004595416598021984, -0.009310377761721611, 0.019070804119110107, 0.003340203547850251, -0.028156157582998276, -0.040194954723119736, -0.0027407249435782433, -0.048295825719833374, 0.008958777412772179, 0.013030309230089188, -0.010344082489609718, -0.0016797707648947835, 0.02539961040019989, -0.011005091480910778, -0.009261153638362885, -0.000408515683375299, -0.018423859030008316, -0.014078078791499138, 0.0028409308288246393, -0.014366391114890575, 0.006553830578923225, -0.008642337284982204, -0.024612026289105415, 0.02594810724258423, -0.004859116859734058, 0.00039313314482569695, 0.023374391719698906, -0.011834868229925632, 0.0035458896309137344, -0.01686275377869606, -0.006156522314995527, -0.000893064949195832, -0.016665857285261154, 0.0112230833619833, -0.0014740845654159784, -0.01797381043434143, -0.011441076174378395, 0.016848688945174217, -0.013613966293632984, -0.005066561046987772, 0.011427012272179127, -0.014710959047079086, -0.008396216668188572, 0.01022453885525465, -0.054062072187662125, -0.007320319768041372, 0.032544128596782684, 0.007116391323506832, -0.018030066043138504, 0.023177495226264, 0.0007840688340365887, -0.011623907834291458, 0.004841537214815617, 0.006086202338337898, 0.0004056589095853269, -0.02120853401720524, -0.02198205515742302, -0.07212026417255402, 0.019577108323574066, 0.0016832867404446006, -0.010147186927497387, 0.014028854668140411, -0.012812317349016666, 0.007517215795814991, -0.010491754859685898, 0.009753393940627575, 0.026524731889367104, -0.028099901974201202, 0.011933316476643085, -0.022938407957553864, -0.03403491526842117, -0.008853296749293804, -0.009366633370518684, 0.0188739076256752, -0.00919786561280489, 0.005080625414848328, -0.0015373725909739733, 0.032937921583652496, -0.025849658995866776, 0.025723082944750786, 0.018550435081124306, -0.0004966355045326054, -0.002341658342629671, -0.043907854706048965, 0.01406401488929987, -0.009633850306272507, -0.01587827317416668, 0.0070882635191082954, -0.014317166991531849, 0.013388941995799541, 0.014795343391597271, 0.001852933899499476, -0.016497088596224785, 8.790009451331571e-05, 0.026187194511294365, 0.007826624438166618, 0.03428806737065315, -0.013206109404563904, -0.04238893836736679, 0.015104752033948898, -0.005713506136089563, -0.0024524126201868057, -0.00817822478711605, -0.02246023155748844, -0.00890252087265253, 0.04624247923493385, 0.010533946566283703, 0.04199514910578728, 0.0017738238675519824, -0.019858388230204582, -0.050517939031124115, -0.011778612621128559, 0.0018582079792395234, 0.024654217064380646, -0.009999514557421207, 0.014204654842615128, 0.012685741297900677, 0.0377478152513504, 0.008037583902478218, 0.020603781566023827, -0.015020367689430714, -0.0038640880957245827, -0.014260910451412201, -0.004500484559684992, -0.008255576714873314, -0.0011620392324402928, -0.03234723210334778, 0.010140154510736465, -0.004039888270199299, 0.014851599000394344, -0.0011708291713148355, 0.0030677132308483124, -0.010062802582979202, 0.0013018003664910793, 0.014401551336050034, -0.01665179245173931, 0.01720028929412365, 0.018831714987754822, -0.024949561804533005, -0.002348690526559949, -0.0004386214422993362, 0.025596506893634796, 0.0001995332131627947, -0.019155187532305717, 0.007693015970289707, -0.015132879838347435, -0.0011708291713148355, 0.0023609965573996305, 0.0018248058622702956, -0.007042555138468742, 0.0190848670899868, 0.0018142579356208444, -0.018072258681058884, -0.0017878878861665726, -0.008508728817105293, 0.027438892051577568, -0.009043161757290363, 0.00010284310701536015, -0.010491754859685898, 0.00427897647023201, -0.007091779261827469, -0.008192288689315319, -0.009528369642794132, -0.0448923334479332, -0.021967990323901176, 0.03974490612745285, 0.03853540122509003, 0.015639184042811394, -0.009296313859522343, -0.012474780902266502, 0.0040680160745978355, -0.012664644978940487, 0.017045585438609123, -0.004521580878645182, -0.015132879838347435, -0.0247526653110981, 0.03341609984636307, 0.028718717396259308, 0.015596992336213589, 0.05344325676560402, 0.0001705261820461601, 0.020097477361559868, 0.006620634812861681, 0.030743936076760292, -0.0026739207096397877, 0.025554314255714417, 0.020252181217074394, -0.005562317557632923, -0.010815227404236794, -0.015104752033948898, -0.010147186927497387, -0.0056748297065496445, -0.013213141821324825, -0.020927254110574722, 0.016708049923181534, 0.012221628800034523, 0.09467894583940506, 0.01810038648545742, -0.012566196732223034, 0.009633850306272507, -0.009451017715036869, 0.010920707136392593, 0.008691561408340931, 0.022769639268517494, 0.0076648881658911705, -0.010878515429794788, -0.002415494527667761, -0.011659068055450916, -0.012559165246784687, -0.015132879838347435, 0.020800678059458733, -0.003934408072382212, -0.01071677915751934, 0.015639184042811394, 0.005646701902151108, -0.0022396943531930447, 0.04199514910578728, -0.012594325467944145, 0.006195198278874159, 0.005344325676560402, -0.012052860110998154, 0.022178951650857925, 0.029421918094158173, 0.0042367842979729176, 0.0032259332947432995, -0.018114451318979263, 0.03265664353966713, 0.018466051667928696, -0.05015227571129799, -0.019197380170226097, -0.009493209421634674, -0.008550920523703098, 0.004859116859734058, -0.013311590068042278, -0.005752182099968195, 0.005586929619312286, 0.005833050236105919, 0.0020709261298179626, -0.014338262379169464, -0.026918523013591766, 0.024991754442453384, -0.013072501868009567, -0.015146943740546703, -0.02002715691924095, -0.028057709336280823], metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='doc_0', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, hash='3b095b0e25cdf965d950cdbd7feb8024030e7645998c1a33dc4427affca624ab'), <NodeRelationship.NEXT: '3'>: RelatedNodeInfo(node_id='e470fa0d001e50b3ec3088022462a94ea7c87dd80106411b7d120f90b379e977', node_type=<ObjectType.TEXT: '1'>, metadata={}, hash='71418de3d50e604c2581574f1abf2248e5cc3ab7c74a3182c37cb1152d0cfd21')}, text='LLM Variants and Meta\\'s Open Source Before shedding light on four major trends, I\\'d share the latest Meta\\'s Llama 2 and Code Llama. Meta\\'s Llama 2 represents a sophisticated evolution in LLMs. This suite spans models pretrained and fine-tuned across a parameter spectrum of 7 billion to 70 billion. A specialized derivative, Llama 2-Chat, has been engineered explicitly for dialogue-centric applications. Benchmarking revealed Llama 2\\'s superior performance over most extant open-source chat models. Human-centric evaluations, focusing on safety and utility metrics, positioned Llama 2-Chat as a potential contender against proprietary, closed-source counterparts. The development trajectory of Llama 2 emphasized rigorous fine-tuning methodologies. Meta\\'s transparent delineation of these processes aims to catalyze community-driven advancements in LLMs, underscoring a commitment to collaborative and responsible AI development. Code Llama is built on top of Llama 2 and is available in three models: Code Llama, the foundational code model;Codel Llama - Python specialized for Python;and Code Llama - Instruct, which is fine-tuned for understanding natural language instructions. Based on its benchmark testing, Code Llama outperformed state-of-the-art publicly available LLMs (except GPT-4) on code tasks. Llama 2, Llama 2-Chat, and Code Llama are key steps in LLM development but still have a way to go compared to GPT-4. Meta\\'s open access and commitment to improving these models promise transparent and faster LLM progress in the future. Please refer to the LLM and Llama variants below: From LLMs to Multimodal LLMs, like OpenAI\\'s ChatGPT (GPT-3.5), primarily focus on understanding and generating human language. They\\'ve been instrumental in tasks like text generation, translation, and even creative writing. However, their scope is limited to text. Enter multimodal models like GPT-4. These are a new breed of AI models that can understand and generate not just text, but also images, sounds, and potentially other types of data. The term \"multimodal\" refers to their ability to process multiple modes or', start_char_idx=0, end_char_idx=2117, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n')"
397
- ]
398
- },
399
- "execution_count": 12,
400
- "metadata": {},
401
- "output_type": "execute_result"
402
- }
403
- ],
404
- "source": [
405
- "nodes[0]"
406
- ]
407
- },
408
- {
409
- "cell_type": "markdown",
410
- "metadata": {
411
- "id": "EV0ll57p46Dc"
412
- },
413
- "source": [
414
- "# Load Indexes"
415
- ]
416
- },
417
- {
418
- "cell_type": "code",
419
- "execution_count": 13,
420
- "metadata": {
421
- "id": "PS215gCGkGD-"
422
- },
423
- "outputs": [],
424
- "source": [
425
- "# Create your index\n",
426
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
427
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
428
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
429
- ]
430
- },
431
- {
432
- "cell_type": "code",
433
- "execution_count": 14,
434
- "metadata": {
435
- "id": "HbT3-kRO4Qpt"
436
- },
437
- "outputs": [],
438
- "source": [
439
- "# Create your index\n",
440
- "from llama_index.core import VectorStoreIndex\n",
441
- "\n",
442
- "index = VectorStoreIndex.from_vector_store(vector_store)"
443
- ]
444
- },
445
- {
446
- "cell_type": "code",
447
- "execution_count": 15,
448
- "metadata": {
449
- "id": "sb61DWU84bHP"
450
- },
451
- "outputs": [],
452
- "source": [
453
- "query_engine = index.as_query_engine()"
454
- ]
455
- },
456
- {
457
- "cell_type": "code",
458
- "execution_count": 16,
459
- "metadata": {
460
- "id": "G32W2LMMCmnv"
461
- },
462
- "outputs": [],
463
- "source": [
464
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
465
- ]
466
- },
467
- {
468
- "cell_type": "code",
469
- "execution_count": 17,
470
- "metadata": {
471
- "colab": {
472
- "base_uri": "https://localhost:8080/",
473
- "height": 53
474
- },
475
- "id": "obc20cU5Cxf2",
476
- "outputId": "837babce-9edf-4a3f-f996-c0c407ae027c"
477
- },
478
- "outputs": [
479
- {
480
- "data": {
481
- "text/plain": [
482
- "'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
483
- ]
484
- },
485
- "execution_count": 17,
486
- "metadata": {},
487
- "output_type": "execute_result"
488
- }
489
- ],
490
- "source": [
491
- "res.response"
492
- ]
493
- },
494
- {
495
- "cell_type": "code",
496
- "execution_count": 18,
497
- "metadata": {
498
- "colab": {
499
- "base_uri": "https://localhost:8080/"
500
- },
501
- "id": "oIAO-saJCzYe",
502
- "outputId": "bce85c7c-502c-4a7b-f3e2-f721f3d6b5a4"
503
- },
504
- "outputs": [
505
- {
506
- "name": "stdout",
507
- "output_type": "stream",
508
- "text": [
509
- "Node ID\t f707756065d1f788b41fb97fcef81979e1fd241dbfa4034a24bec8e57b648482\n",
510
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
511
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
512
- "Score\t 0.7122361910421624\n",
513
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
514
- "Node ID\t 636f98cf8754c3a4759da02aa11a3f2aa7cdeb848a4980ec99300ece4a2e92fd\n",
515
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
516
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
517
- "Score\t 0.7047493574957753\n",
518
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
519
- ]
520
- }
521
- ],
522
- "source": [
523
- "for src in res.source_nodes:\n",
524
- " print(\"Node ID\\t\", src.node_id)\n",
525
- " print(\"Title\\t\", src.metadata['title'])\n",
526
- " print(\"Text\\t\", src.text)\n",
527
- " print(\"Score\\t\", src.score)\n",
528
- " print(\"-_\"*20)"
529
- ]
530
- },
531
- {
532
- "cell_type": "markdown",
533
- "metadata": {
534
- "id": "d4xxZHbdN0lK"
535
- },
536
- "source": [
537
- "# Evaluate the retrieval process and quality of answers\n",
538
- "\n",
539
- "We can evaluate our RAG system with a dataset of questions and associated chunks. Given a question, we can see if the RAG system retrieves the correct chunks of text that can answer the question.\n",
540
- "\n",
541
- "You can generate a synthetic dataset with an LLM such as `gpt-3.5-turbo` or create an authentic and manually curated dataset. \n",
542
- "\n",
543
- "Note that a **well curated dataset will always be a better option**, especially for a specific domain or use case.\n"
544
- ]
545
- },
546
- {
547
- "cell_type": "markdown",
548
- "metadata": {},
549
- "source": [
550
- "In our example, we will generate a synthetic dataset using `gpt-3.5-turbo` to make it simple.\n",
551
- "\n",
552
- "This is the default prompt that the `generate_question_context_pairs` function will uses:\n",
553
- "\n",
554
- "```python\n",
555
- "DEFAULT_QA_GENERATE_PROMPT_TMPL = \"\"\"\\\n",
556
- "Context information is below.\n",
557
- "\n",
558
- "---------------------\n",
559
- "{context_str}\n",
560
- "---------------------\n",
561
- "\n",
562
- "Given the context information and no prior knowledge,\n",
563
- "generate only questions based on the below query.\n",
564
- "\n",
565
- "You are a Teacher/Professor. Your task is to setup \\\n",
566
- "{num_questions_per_chunk} questions for an upcoming \\\n",
567
- "quiz/examination. The questions should be diverse in nature \\\n",
568
- "across the document. Restrict the questions to the \\\n",
569
- "context information provided.\"\n",
570
- "\"\"\"\n",
571
- "```\n",
572
- "\n"
573
- ]
574
- },
575
- {
576
- "cell_type": "code",
577
- "execution_count": 19,
578
- "metadata": {},
579
- "outputs": [
580
- {
581
- "name": "stderr",
582
- "output_type": "stream",
583
- "text": [
584
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [05:59<00:00, 3.33s/it]\n"
585
- ]
586
- }
587
- ],
588
- "source": [
589
- "from llama_index.core.evaluation import generate_question_context_pairs\n",
590
- "from llama_index.llms.openai import OpenAI\n",
591
- "\n",
592
- "llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
593
- "rag_eval_dataset = generate_question_context_pairs(\n",
594
- " nodes,\n",
595
- " llm=llm,\n",
596
- " num_questions_per_chunk=1\n",
597
- ")\n",
598
- "# We can save the dataset as a json file for later use.\n",
599
- "rag_eval_dataset.save_json(\"./rag_eval_dataset.json\")"
600
- ]
601
- },
602
- {
603
- "cell_type": "code",
604
- "execution_count": 20,
605
- "metadata": {},
606
- "outputs": [],
607
- "source": [
608
- "# We can also load the dataset from a previously saved json file.\n",
609
- "from llama_index.core.evaluation import EmbeddingQAFinetuneDataset\n",
610
- "rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
611
- " \"./rag_eval_dataset.json\"\n",
612
- ")"
613
- ]
614
- },
615
- {
616
- "cell_type": "markdown",
617
- "metadata": {},
618
- "source": [
619
- "### Evaluation for Hit Rate and Mean Reciprocal Rank (MRR)\n",
620
- "\n",
621
- "We will make use of `RetrieverEvaluator` available in Llama-index. We will measure the Hit Rate and Mean Reciprocal Rank (MRR).\n",
622
- "\n",
623
- "**Hit Rate:**\n",
624
- "\n",
625
- "Think of the Hit Rate like playing a game of guessing. You're given a question and you need to guess the correct answer from a list of options. The Hit Rate measures how often you guess the correct answer by only looking at your top few guesses. If you often find the right answer in your first few guesses, you have a high Hit Rate. So, in the context of a retrieval system, it's about how frequently the system finds the correct document within its top 'k' picks (where 'k' is a number you decide, like top 5 or top 10).\n",
626
- "\n",
627
- "**Mean Reciprocal Rank (MRR):**\n",
628
- "\n",
629
- "MRR is a bit like measuring how quickly you can find a treasure in a list of boxes. Imagine you have a row of boxes and only one of them has a treasure. The MRR calculates how close to the start of the row the treasure box is, on average. If the treasure is always in the first box you open, you're doing great and have an MRR of 1. If it's in the second box, the score is 1/2, since you took two tries to find it. If it's in the third box, your score is 1/3, and so on. MRR averages these scores across all your searches. So, for a retrieval system, MRR looks at where the correct document ranks in the system's guesses. If it's usually near the top, the MRR will be high, indicating good performance.\n",
630
- "In summary, Hit Rate tells you how often the system gets it right in its top guesses, and MRR tells you how close to the top the right answer usually is. Both metrics are useful for evaluating the effectiveness of a retrieval system, like how well a search engine or a recommendation system works."
631
- ]
632
- },
633
- {
634
- "cell_type": "code",
635
- "execution_count": 21,
636
- "metadata": {},
637
- "outputs": [],
638
- "source": [
639
- "import pandas as pd\n",
640
- "\n",
641
- "def display_results_retriever(name, eval_results):\n",
642
- " \"\"\"Display results from evaluate.\"\"\"\n",
643
- "\n",
644
- " metric_dicts = []\n",
645
- " for eval_result in eval_results:\n",
646
- " metric_dict = eval_result.metric_vals_dict\n",
647
- " metric_dicts.append(metric_dict)\n",
648
- "\n",
649
- " full_df = pd.DataFrame(metric_dicts)\n",
650
- "\n",
651
- " hit_rate = full_df[\"hit_rate\"].mean()\n",
652
- " mrr = full_df[\"mrr\"].mean()\n",
653
- "\n",
654
- " metric_df = pd.DataFrame(\n",
655
- " {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
656
- " )\n",
657
- "\n",
658
- " return metric_df"
659
- ]
660
- },
661
- {
662
- "cell_type": "code",
663
- "execution_count": 22,
664
- "metadata": {},
665
- "outputs": [
666
- {
667
- "name": "stdout",
668
- "output_type": "stream",
669
- "text": [
670
- " Retriever Name Hit Rate MRR\n",
671
- "0 Retriever top_2 0.703557 0.570158\n",
672
- " Retriever Name Hit Rate MRR\n",
673
- "0 Retriever top_4 0.822134 0.606884\n",
674
- " Retriever Name Hit Rate MRR\n",
675
- "0 Retriever top_6 0.857708 0.613472\n",
676
- " Retriever Name Hit Rate MRR\n",
677
- "0 Retriever top_8 0.883399 0.616937\n",
678
- " Retriever Name Hit Rate MRR\n",
679
- "0 Retriever top_10 0.901186 0.61904\n"
680
- ]
681
- }
682
- ],
683
- "source": [
684
- "from llama_index.core.evaluation import RetrieverEvaluator\n",
685
- "\n",
686
- "# We can evaluate the retievers with different top_k values.\n",
687
- "for i in [2, 4, 6, 8, 10]:\n",
688
- " retriever = index.as_retriever(similarity_top_k=i)\n",
689
- " retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
690
- " [\"mrr\", \"hit_rate\"], retriever=retriever\n",
691
- " )\n",
692
- " eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset, workers=32)\n",
693
- " print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
694
- ]
695
- },
696
- {
697
- "cell_type": "markdown",
698
- "metadata": {},
699
- "source": [
700
- "### Evaluation using Relevance and Faithfulness metrics.\n",
701
- "\n",
702
- "Here, we evaluate the answer generated by the LLM. Is the answer using the correct context? Is the answer faithful to the context? Is the answer relevant to the question?\n",
703
- "\n",
704
- "An LLM will answer these questions, more specifically `gpt-4-0125-preview`.\n",
705
- "\n",
706
- "**`FaithfulnessEvaluator`**\n",
707
- "Evaluates if the answer is faithful to the retrieved contexts (in other words, whether there's an hallucination).\n",
708
- "\n",
709
- "**`RelevancyEvaluator`**\n",
710
- "Evaluates whether the retrieved context and answer are relevant to the user question.\n",
711
- "\n",
712
- "\n",
713
- "Now, let's see how the top_k value affects these two metrics."
714
- ]
715
- },
716
- {
717
- "cell_type": "code",
718
- "execution_count": 23,
719
- "metadata": {},
720
- "outputs": [
721
- {
722
- "name": "stdout",
723
- "output_type": "stream",
724
- "text": [
725
- "top_2 faithfulness_score: 1.0\n",
726
- "top_2 relevancy_score: 1.0\n",
727
- "top_4 faithfulness_score: 1.0\n",
728
- "top_4 relevancy_score: 0.95\n",
729
- "top_6 faithfulness_score: 1.0\n",
730
- "top_6 relevancy_score: 0.95\n",
731
- "top_8 faithfulness_score: 0.65\n",
732
- "top_8 relevancy_score: 0.7\n",
733
- "top_10 faithfulness_score: 0.45\n",
734
- "top_10 relevancy_score: 0.5\n"
735
- ]
736
- }
737
- ],
738
- "source": [
739
- "from llama_index.core.evaluation import RelevancyEvaluator, FaithfulnessEvaluator, BatchEvalRunner\n",
740
- "from llama_index.llms.openai import OpenAI\n",
741
- "\n",
742
- "llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
743
- "\n",
744
- "faithfulness_evaluator = FaithfulnessEvaluator(llm=llm_gpt4)\n",
745
- "relevancy_evaluator = RelevancyEvaluator(llm=llm_gpt4)\n",
746
- "\n",
747
- "# Run evaluation\n",
748
- "queries = list(rag_eval_dataset.queries.values())\n",
749
- "batch_eval_queries = queries[:20]\n",
750
- "\n",
751
- "runner = BatchEvalRunner(\n",
752
- "{\"faithfulness\": faithfulness_evaluator, \"relevancy\": relevancy_evaluator},\n",
753
- "workers=32,\n",
754
- ")\n",
755
- "\n",
756
- "for i in [2, 4, 6, 8, 10]:\n",
757
- " # Set Faithfulness and Relevancy evaluators\n",
758
- " query_engine = index.as_query_engine(similarity_top_k=i)\n",
759
- "\n",
760
- " eval_results = await runner.aevaluate_queries(\n",
761
- " query_engine, queries=batch_eval_queries\n",
762
- " )\n",
763
- " faithfulness_score = sum(result.passing for result in eval_results['faithfulness']) / len(eval_results['faithfulness'])\n",
764
- " print(f\"top_{i} faithfulness_score: {faithfulness_score}\")\n",
765
- "\n",
766
- " relevancy_score = sum(result.passing for result in eval_results['relevancy']) / len(eval_results['relevancy'])\n",
767
- " print(f\"top_{i} relevancy_score: {relevancy_score}\")\n"
768
- ]
769
- },
770
- {
771
- "cell_type": "code",
772
- "execution_count": null,
773
- "metadata": {},
774
- "outputs": [],
775
- "source": []
776
- }
777
- ],
778
- "metadata": {
779
- "colab": {
780
- "authorship_tag": "ABX9TyOnRtEA1r5V6nZnTDjOEHPs",
781
- "include_colab_link": true,
782
- "provenance": []
783
- },
784
- "kernelspec": {
785
- "display_name": "Python 3",
786
- "name": "python3"
787
- },
788
- "language_info": {
789
- "codemirror_mode": {
790
- "name": "ipython",
791
- "version": 3
792
- },
793
- "file_extension": ".py",
794
- "mimetype": "text/x-python",
795
- "name": "python",
796
- "nbconvert_exporter": "python",
797
- "pygments_lexer": "ipython3",
798
- "version": "3.11.8"
799
- },
800
- "widgets": {
801
- "application/vnd.jupyter.widget-state+json": {
802
- "01d27fdbe86a4ca2830b9bf3ccbf1ae9": {
803
- "model_module": "@jupyter-widgets/controls",
804
- "model_module_version": "1.5.0",
805
- "model_name": "ProgressStyleModel",
806
- "state": {
807
- "_model_module": "@jupyter-widgets/controls",
808
- "_model_module_version": "1.5.0",
809
- "_model_name": "ProgressStyleModel",
810
- "_view_count": null,
811
- "_view_module": "@jupyter-widgets/base",
812
- "_view_module_version": "1.2.0",
813
- "_view_name": "StyleView",
814
- "bar_color": null,
815
- "description_width": ""
816
- }
817
- },
818
- "076728d713254b49935c7938d18014f2": {
819
- "model_module": "@jupyter-widgets/controls",
820
- "model_module_version": "1.5.0",
821
- "model_name": "HTMLModel",
822
- "state": {
823
- "_dom_classes": [],
824
- "_model_module": "@jupyter-widgets/controls",
825
- "_model_module_version": "1.5.0",
826
- "_model_name": "HTMLModel",
827
- "_view_count": null,
828
- "_view_module": "@jupyter-widgets/controls",
829
- "_view_module_version": "1.5.0",
830
- "_view_name": "HTMLView",
831
- "description": "",
832
- "description_tooltip": null,
833
- "layout": "IPY_MODEL_121dbf44a222434cbc57ebe6beb83e2a",
834
- "placeholder": "​",
835
- "style": "IPY_MODEL_2af0821ebb7e47988d134d4ec2776e87",
836
- "value": " 108/108 [00:34&lt;00:00, 3.66it/s]"
837
- }
838
- },
839
- "10340f8e7c8e482c8d35047a3e43ee7f": {
840
- "model_module": "@jupyter-widgets/controls",
841
- "model_module_version": "1.5.0",
842
- "model_name": "DescriptionStyleModel",
843
- "state": {
844
- "_model_module": "@jupyter-widgets/controls",
845
- "_model_module_version": "1.5.0",
846
- "_model_name": "DescriptionStyleModel",
847
- "_view_count": null,
848
- "_view_module": "@jupyter-widgets/base",
849
- "_view_module_version": "1.2.0",
850
- "_view_name": "StyleView",
851
- "description_width": ""
852
- }
853
- },
854
- "1095efa793804a3fb625855e715a5317": {
855
- "model_module": "@jupyter-widgets/base",
856
- "model_module_version": "1.2.0",
857
- "model_name": "LayoutModel",
858
- "state": {
859
- "_model_module": "@jupyter-widgets/base",
860
- "_model_module_version": "1.2.0",
861
- "_model_name": "LayoutModel",
862
- "_view_count": null,
863
- "_view_module": "@jupyter-widgets/base",
864
- "_view_module_version": "1.2.0",
865
- "_view_name": "LayoutView",
866
- "align_content": null,
867
- "align_items": null,
868
- "align_self": null,
869
- "border": null,
870
- "bottom": null,
871
- "display": null,
872
- "flex": null,
873
- "flex_flow": null,
874
- "grid_area": null,
875
- "grid_auto_columns": null,
876
- "grid_auto_flow": null,
877
- "grid_auto_rows": null,
878
- "grid_column": null,
879
- "grid_gap": null,
880
- "grid_row": null,
881
- "grid_template_areas": null,
882
- "grid_template_columns": null,
883
- "grid_template_rows": null,
884
- "height": null,
885
- "justify_content": null,
886
- "justify_items": null,
887
- "left": null,
888
- "margin": null,
889
- "max_height": null,
890
- "max_width": null,
891
- "min_height": null,
892
- "min_width": null,
893
- "object_fit": null,
894
- "object_position": null,
895
- "order": null,
896
- "overflow": null,
897
- "overflow_x": null,
898
- "overflow_y": null,
899
- "padding": null,
900
- "right": null,
901
- "top": null,
902
- "visibility": null,
903
- "width": null
904
- }
905
- },
906
- "121dbf44a222434cbc57ebe6beb83e2a": {
907
- "model_module": "@jupyter-widgets/base",
908
- "model_module_version": "1.2.0",
909
- "model_name": "LayoutModel",
910
- "state": {
911
- "_model_module": "@jupyter-widgets/base",
912
- "_model_module_version": "1.2.0",
913
- "_model_name": "LayoutModel",
914
- "_view_count": null,
915
- "_view_module": "@jupyter-widgets/base",
916
- "_view_module_version": "1.2.0",
917
- "_view_name": "LayoutView",
918
- "align_content": null,
919
- "align_items": null,
920
- "align_self": null,
921
- "border": null,
922
- "bottom": null,
923
- "display": null,
924
- "flex": null,
925
- "flex_flow": null,
926
- "grid_area": null,
927
- "grid_auto_columns": null,
928
- "grid_auto_flow": null,
929
- "grid_auto_rows": null,
930
- "grid_column": null,
931
- "grid_gap": null,
932
- "grid_row": null,
933
- "grid_template_areas": null,
934
- "grid_template_columns": null,
935
- "grid_template_rows": null,
936
- "height": null,
937
- "justify_content": null,
938
- "justify_items": null,
939
- "left": null,
940
- "margin": null,
941
- "max_height": null,
942
- "max_width": null,
943
- "min_height": null,
944
- "min_width": null,
945
- "object_fit": null,
946
- "object_position": null,
947
- "order": null,
948
- "overflow": null,
949
- "overflow_x": null,
950
- "overflow_y": null,
951
- "padding": null,
952
- "right": null,
953
- "top": null,
954
- "visibility": null,
955
- "width": null
956
- }
957
- },
958
- "2073b65c0db045aa8e86d91a4fea2e2b": {
959
- "model_module": "@jupyter-widgets/controls",
960
- "model_module_version": "1.5.0",
961
- "model_name": "DescriptionStyleModel",
962
- "state": {
963
- "_model_module": "@jupyter-widgets/controls",
964
- "_model_module_version": "1.5.0",
965
- "_model_name": "DescriptionStyleModel",
966
- "_view_count": null,
967
- "_view_module": "@jupyter-widgets/base",
968
- "_view_module_version": "1.2.0",
969
- "_view_name": "StyleView",
970
- "description_width": ""
971
- }
972
- },
973
- "2af0821ebb7e47988d134d4ec2776e87": {
974
- "model_module": "@jupyter-widgets/controls",
975
- "model_module_version": "1.5.0",
976
- "model_name": "DescriptionStyleModel",
977
- "state": {
978
- "_model_module": "@jupyter-widgets/controls",
979
- "_model_module_version": "1.5.0",
980
- "_model_name": "DescriptionStyleModel",
981
- "_view_count": null,
982
- "_view_module": "@jupyter-widgets/base",
983
- "_view_module_version": "1.2.0",
984
- "_view_name": "StyleView",
985
- "description_width": ""
986
- }
987
- },
988
- "665b9b5e85a34be8a20d40c51e57cfe0": {
989
- "model_module": "@jupyter-widgets/controls",
990
- "model_module_version": "1.5.0",
991
- "model_name": "HTMLModel",
992
- "state": {
993
- "_dom_classes": [],
994
- "_model_module": "@jupyter-widgets/controls",
995
- "_model_module_version": "1.5.0",
996
- "_model_name": "HTMLModel",
997
- "_view_count": null,
998
- "_view_module": "@jupyter-widgets/controls",
999
- "_view_module_version": "1.5.0",
1000
- "_view_name": "HTMLView",
1001
- "description": "",
1002
- "description_tooltip": null,
1003
- "layout": "IPY_MODEL_85f23ab21c3b404aaa146cfcaefc85d8",
1004
- "placeholder": "​",
1005
- "style": "IPY_MODEL_10340f8e7c8e482c8d35047a3e43ee7f",
1006
- "value": "Generating embeddings: 100%"
1007
- }
1008
- },
1009
- "6c575687c8f1468a803b88eea3d26b7b": {
1010
- "model_module": "@jupyter-widgets/controls",
1011
- "model_module_version": "1.5.0",
1012
- "model_name": "HTMLModel",
1013
- "state": {
1014
- "_dom_classes": [],
1015
- "_model_module": "@jupyter-widgets/controls",
1016
- "_model_module_version": "1.5.0",
1017
- "_model_name": "HTMLModel",
1018
- "_view_count": null,
1019
- "_view_module": "@jupyter-widgets/controls",
1020
- "_view_module_version": "1.5.0",
1021
- "_view_name": "HTMLView",
1022
- "description": "",
1023
- "description_tooltip": null,
1024
- "layout": "IPY_MODEL_eb057e56f0f94e4993b8ae960c78b0ad",
1025
- "placeholder": "​",
1026
- "style": "IPY_MODEL_2073b65c0db045aa8e86d91a4fea2e2b",
1027
- "value": "Parsing nodes: 100%"
1028
- }
1029
- },
1030
- "70e17db8fc2f490f85b7af8aa664f0c7": {
1031
- "model_module": "@jupyter-widgets/controls",
1032
- "model_module_version": "1.5.0",
1033
- "model_name": "DescriptionStyleModel",
1034
- "state": {
1035
- "_model_module": "@jupyter-widgets/controls",
1036
- "_model_module_version": "1.5.0",
1037
- "_model_name": "DescriptionStyleModel",
1038
- "_view_count": null,
1039
- "_view_module": "@jupyter-widgets/base",
1040
- "_view_module_version": "1.2.0",
1041
- "_view_name": "StyleView",
1042
- "description_width": ""
1043
- }
1044
- },
1045
- "76fea2dabfea42aa8bc7ae719f2a22ee": {
1046
- "model_module": "@jupyter-widgets/controls",
1047
- "model_module_version": "1.5.0",
1048
- "model_name": "HBoxModel",
1049
- "state": {
1050
- "_dom_classes": [],
1051
- "_model_module": "@jupyter-widgets/controls",
1052
- "_model_module_version": "1.5.0",
1053
- "_model_name": "HBoxModel",
1054
- "_view_count": null,
1055
- "_view_module": "@jupyter-widgets/controls",
1056
- "_view_module_version": "1.5.0",
1057
- "_view_name": "HBoxView",
1058
- "box_style": "",
1059
- "children": [
1060
- "IPY_MODEL_6c575687c8f1468a803b88eea3d26b7b",
1061
- "IPY_MODEL_c266531dafcf4624af5fe9bcbc9d8df9",
1062
- "IPY_MODEL_e20a27a2f7764cb4b9537e34a3659c9a"
1063
- ],
1064
- "layout": "IPY_MODEL_bba307f545cd4533be6f0489f95b9895"
1065
- }
1066
- },
1067
- "8141417665024172a4baa78c497acb69": {
1068
- "model_module": "@jupyter-widgets/base",
1069
- "model_module_version": "1.2.0",
1070
- "model_name": "LayoutModel",
1071
- "state": {
1072
- "_model_module": "@jupyter-widgets/base",
1073
- "_model_module_version": "1.2.0",
1074
- "_model_name": "LayoutModel",
1075
- "_view_count": null,
1076
- "_view_module": "@jupyter-widgets/base",
1077
- "_view_module_version": "1.2.0",
1078
- "_view_name": "LayoutView",
1079
- "align_content": null,
1080
- "align_items": null,
1081
- "align_self": null,
1082
- "border": null,
1083
- "bottom": null,
1084
- "display": null,
1085
- "flex": null,
1086
- "flex_flow": null,
1087
- "grid_area": null,
1088
- "grid_auto_columns": null,
1089
- "grid_auto_flow": null,
1090
- "grid_auto_rows": null,
1091
- "grid_column": null,
1092
- "grid_gap": null,
1093
- "grid_row": null,
1094
- "grid_template_areas": null,
1095
- "grid_template_columns": null,
1096
- "grid_template_rows": null,
1097
- "height": null,
1098
- "justify_content": null,
1099
- "justify_items": null,
1100
- "left": null,
1101
- "margin": null,
1102
- "max_height": null,
1103
- "max_width": null,
1104
- "min_height": null,
1105
- "min_width": null,
1106
- "object_fit": null,
1107
- "object_position": null,
1108
- "order": null,
1109
- "overflow": null,
1110
- "overflow_x": null,
1111
- "overflow_y": null,
1112
- "padding": null,
1113
- "right": null,
1114
- "top": null,
1115
- "visibility": null,
1116
- "width": null
1117
- }
1118
- },
1119
- "85f23ab21c3b404aaa146cfcaefc85d8": {
1120
- "model_module": "@jupyter-widgets/base",
1121
- "model_module_version": "1.2.0",
1122
- "model_name": "LayoutModel",
1123
- "state": {
1124
- "_model_module": "@jupyter-widgets/base",
1125
- "_model_module_version": "1.2.0",
1126
- "_model_name": "LayoutModel",
1127
- "_view_count": null,
1128
- "_view_module": "@jupyter-widgets/base",
1129
- "_view_module_version": "1.2.0",
1130
- "_view_name": "LayoutView",
1131
- "align_content": null,
1132
- "align_items": null,
1133
- "align_self": null,
1134
- "border": null,
1135
- "bottom": null,
1136
- "display": null,
1137
- "flex": null,
1138
- "flex_flow": null,
1139
- "grid_area": null,
1140
- "grid_auto_columns": null,
1141
- "grid_auto_flow": null,
1142
- "grid_auto_rows": null,
1143
- "grid_column": null,
1144
- "grid_gap": null,
1145
- "grid_row": null,
1146
- "grid_template_areas": null,
1147
- "grid_template_columns": null,
1148
- "grid_template_rows": null,
1149
- "height": null,
1150
- "justify_content": null,
1151
- "justify_items": null,
1152
- "left": null,
1153
- "margin": null,
1154
- "max_height": null,
1155
- "max_width": null,
1156
- "min_height": null,
1157
- "min_width": null,
1158
- "object_fit": null,
1159
- "object_position": null,
1160
- "order": null,
1161
- "overflow": null,
1162
- "overflow_x": null,
1163
- "overflow_y": null,
1164
- "padding": null,
1165
- "right": null,
1166
- "top": null,
1167
- "visibility": null,
1168
- "width": null
1169
- }
1170
- },
1171
- "b43a5a6a65034a16927700e442dde52a": {
1172
- "model_module": "@jupyter-widgets/controls",
1173
- "model_module_version": "1.5.0",
1174
- "model_name": "ProgressStyleModel",
1175
- "state": {
1176
- "_model_module": "@jupyter-widgets/controls",
1177
- "_model_module_version": "1.5.0",
1178
- "_model_name": "ProgressStyleModel",
1179
- "_view_count": null,
1180
- "_view_module": "@jupyter-widgets/base",
1181
- "_view_module_version": "1.2.0",
1182
- "_view_name": "StyleView",
1183
- "bar_color": null,
1184
- "description_width": ""
1185
- }
1186
- },
1187
- "b604cef3deca4847afcc459e5c8a9e0f": {
1188
- "model_module": "@jupyter-widgets/controls",
1189
- "model_module_version": "1.5.0",
1190
- "model_name": "FloatProgressModel",
1191
- "state": {
1192
- "_dom_classes": [],
1193
- "_model_module": "@jupyter-widgets/controls",
1194
- "_model_module_version": "1.5.0",
1195
- "_model_name": "FloatProgressModel",
1196
- "_view_count": null,
1197
- "_view_module": "@jupyter-widgets/controls",
1198
- "_view_module_version": "1.5.0",
1199
- "_view_name": "ProgressView",
1200
- "bar_style": "success",
1201
- "description": "",
1202
- "description_tooltip": null,
1203
- "layout": "IPY_MODEL_1095efa793804a3fb625855e715a5317",
1204
- "max": 108,
1205
- "min": 0,
1206
- "orientation": "horizontal",
1207
- "style": "IPY_MODEL_b43a5a6a65034a16927700e442dde52a",
1208
- "value": 108
1209
- }
1210
- },
1211
- "bba307f545cd4533be6f0489f95b9895": {
1212
- "model_module": "@jupyter-widgets/base",
1213
- "model_module_version": "1.2.0",
1214
- "model_name": "LayoutModel",
1215
- "state": {
1216
- "_model_module": "@jupyter-widgets/base",
1217
- "_model_module_version": "1.2.0",
1218
- "_model_name": "LayoutModel",
1219
- "_view_count": null,
1220
- "_view_module": "@jupyter-widgets/base",
1221
- "_view_module_version": "1.2.0",
1222
- "_view_name": "LayoutView",
1223
- "align_content": null,
1224
- "align_items": null,
1225
- "align_self": null,
1226
- "border": null,
1227
- "bottom": null,
1228
- "display": null,
1229
- "flex": null,
1230
- "flex_flow": null,
1231
- "grid_area": null,
1232
- "grid_auto_columns": null,
1233
- "grid_auto_flow": null,
1234
- "grid_auto_rows": null,
1235
- "grid_column": null,
1236
- "grid_gap": null,
1237
- "grid_row": null,
1238
- "grid_template_areas": null,
1239
- "grid_template_columns": null,
1240
- "grid_template_rows": null,
1241
- "height": null,
1242
- "justify_content": null,
1243
- "justify_items": null,
1244
- "left": null,
1245
- "margin": null,
1246
- "max_height": null,
1247
- "max_width": null,
1248
- "min_height": null,
1249
- "min_width": null,
1250
- "object_fit": null,
1251
- "object_position": null,
1252
- "order": null,
1253
- "overflow": null,
1254
- "overflow_x": null,
1255
- "overflow_y": null,
1256
- "padding": null,
1257
- "right": null,
1258
- "top": null,
1259
- "visibility": null,
1260
- "width": null
1261
- }
1262
- },
1263
- "be591abb84a24c4b9903087501ebb0e5": {
1264
- "model_module": "@jupyter-widgets/base",
1265
- "model_module_version": "1.2.0",
1266
- "model_name": "LayoutModel",
1267
- "state": {
1268
- "_model_module": "@jupyter-widgets/base",
1269
- "_model_module_version": "1.2.0",
1270
- "_model_name": "LayoutModel",
1271
- "_view_count": null,
1272
- "_view_module": "@jupyter-widgets/base",
1273
- "_view_module_version": "1.2.0",
1274
- "_view_name": "LayoutView",
1275
- "align_content": null,
1276
- "align_items": null,
1277
- "align_self": null,
1278
- "border": null,
1279
- "bottom": null,
1280
- "display": null,
1281
- "flex": null,
1282
- "flex_flow": null,
1283
- "grid_area": null,
1284
- "grid_auto_columns": null,
1285
- "grid_auto_flow": null,
1286
- "grid_auto_rows": null,
1287
- "grid_column": null,
1288
- "grid_gap": null,
1289
- "grid_row": null,
1290
- "grid_template_areas": null,
1291
- "grid_template_columns": null,
1292
- "grid_template_rows": null,
1293
- "height": null,
1294
- "justify_content": null,
1295
- "justify_items": null,
1296
- "left": null,
1297
- "margin": null,
1298
- "max_height": null,
1299
- "max_width": null,
1300
- "min_height": null,
1301
- "min_width": null,
1302
- "object_fit": null,
1303
- "object_position": null,
1304
- "order": null,
1305
- "overflow": null,
1306
- "overflow_x": null,
1307
- "overflow_y": null,
1308
- "padding": null,
1309
- "right": null,
1310
- "top": null,
1311
- "visibility": null,
1312
- "width": null
1313
- }
1314
- },
1315
- "c0a70bcdf3fb4bbfb2675b8012b2ef24": {
1316
- "model_module": "@jupyter-widgets/controls",
1317
- "model_module_version": "1.5.0",
1318
- "model_name": "HBoxModel",
1319
- "state": {
1320
- "_dom_classes": [],
1321
- "_model_module": "@jupyter-widgets/controls",
1322
- "_model_module_version": "1.5.0",
1323
- "_model_name": "HBoxModel",
1324
- "_view_count": null,
1325
- "_view_module": "@jupyter-widgets/controls",
1326
- "_view_module_version": "1.5.0",
1327
- "_view_name": "HBoxView",
1328
- "box_style": "",
1329
- "children": [
1330
- "IPY_MODEL_665b9b5e85a34be8a20d40c51e57cfe0",
1331
- "IPY_MODEL_b604cef3deca4847afcc459e5c8a9e0f",
1332
- "IPY_MODEL_076728d713254b49935c7938d18014f2"
1333
- ],
1334
- "layout": "IPY_MODEL_be591abb84a24c4b9903087501ebb0e5"
1335
- }
1336
- },
1337
- "c266531dafcf4624af5fe9bcbc9d8df9": {
1338
- "model_module": "@jupyter-widgets/controls",
1339
- "model_module_version": "1.5.0",
1340
- "model_name": "FloatProgressModel",
1341
- "state": {
1342
- "_dom_classes": [],
1343
- "_model_module": "@jupyter-widgets/controls",
1344
- "_model_module_version": "1.5.0",
1345
- "_model_name": "FloatProgressModel",
1346
- "_view_count": null,
1347
- "_view_module": "@jupyter-widgets/controls",
1348
- "_view_module_version": "1.5.0",
1349
- "_view_name": "ProgressView",
1350
- "bar_style": "success",
1351
- "description": "",
1352
- "description_tooltip": null,
1353
- "layout": "IPY_MODEL_8141417665024172a4baa78c497acb69",
1354
- "max": 14,
1355
- "min": 0,
1356
- "orientation": "horizontal",
1357
- "style": "IPY_MODEL_01d27fdbe86a4ca2830b9bf3ccbf1ae9",
1358
- "value": 14
1359
- }
1360
- },
1361
- "e20a27a2f7764cb4b9537e34a3659c9a": {
1362
- "model_module": "@jupyter-widgets/controls",
1363
- "model_module_version": "1.5.0",
1364
- "model_name": "HTMLModel",
1365
- "state": {
1366
- "_dom_classes": [],
1367
- "_model_module": "@jupyter-widgets/controls",
1368
- "_model_module_version": "1.5.0",
1369
- "_model_name": "HTMLModel",
1370
- "_view_count": null,
1371
- "_view_module": "@jupyter-widgets/controls",
1372
- "_view_module_version": "1.5.0",
1373
- "_view_name": "HTMLView",
1374
- "description": "",
1375
- "description_tooltip": null,
1376
- "layout": "IPY_MODEL_e4fe85a095e64d52b6a53c2a4bba8aeb",
1377
- "placeholder": "​",
1378
- "style": "IPY_MODEL_70e17db8fc2f490f85b7af8aa664f0c7",
1379
- "value": " 14/14 [00:00&lt;00:00, 26.60it/s]"
1380
- }
1381
- },
1382
- "e4fe85a095e64d52b6a53c2a4bba8aeb": {
1383
- "model_module": "@jupyter-widgets/base",
1384
- "model_module_version": "1.2.0",
1385
- "model_name": "LayoutModel",
1386
- "state": {
1387
- "_model_module": "@jupyter-widgets/base",
1388
- "_model_module_version": "1.2.0",
1389
- "_model_name": "LayoutModel",
1390
- "_view_count": null,
1391
- "_view_module": "@jupyter-widgets/base",
1392
- "_view_module_version": "1.2.0",
1393
- "_view_name": "LayoutView",
1394
- "align_content": null,
1395
- "align_items": null,
1396
- "align_self": null,
1397
- "border": null,
1398
- "bottom": null,
1399
- "display": null,
1400
- "flex": null,
1401
- "flex_flow": null,
1402
- "grid_area": null,
1403
- "grid_auto_columns": null,
1404
- "grid_auto_flow": null,
1405
- "grid_auto_rows": null,
1406
- "grid_column": null,
1407
- "grid_gap": null,
1408
- "grid_row": null,
1409
- "grid_template_areas": null,
1410
- "grid_template_columns": null,
1411
- "grid_template_rows": null,
1412
- "height": null,
1413
- "justify_content": null,
1414
- "justify_items": null,
1415
- "left": null,
1416
- "margin": null,
1417
- "max_height": null,
1418
- "max_width": null,
1419
- "min_height": null,
1420
- "min_width": null,
1421
- "object_fit": null,
1422
- "object_position": null,
1423
- "order": null,
1424
- "overflow": null,
1425
- "overflow_x": null,
1426
- "overflow_y": null,
1427
- "padding": null,
1428
- "right": null,
1429
- "top": null,
1430
- "visibility": null,
1431
- "width": null
1432
- }
1433
- },
1434
- "eb057e56f0f94e4993b8ae960c78b0ad": {
1435
- "model_module": "@jupyter-widgets/base",
1436
- "model_module_version": "1.2.0",
1437
- "model_name": "LayoutModel",
1438
- "state": {
1439
- "_model_module": "@jupyter-widgets/base",
1440
- "_model_module_version": "1.2.0",
1441
- "_model_name": "LayoutModel",
1442
- "_view_count": null,
1443
- "_view_module": "@jupyter-widgets/base",
1444
- "_view_module_version": "1.2.0",
1445
- "_view_name": "LayoutView",
1446
- "align_content": null,
1447
- "align_items": null,
1448
- "align_self": null,
1449
- "border": null,
1450
- "bottom": null,
1451
- "display": null,
1452
- "flex": null,
1453
- "flex_flow": null,
1454
- "grid_area": null,
1455
- "grid_auto_columns": null,
1456
- "grid_auto_flow": null,
1457
- "grid_auto_rows": null,
1458
- "grid_column": null,
1459
- "grid_gap": null,
1460
- "grid_row": null,
1461
- "grid_template_areas": null,
1462
- "grid_template_columns": null,
1463
- "grid_template_rows": null,
1464
- "height": null,
1465
- "justify_content": null,
1466
- "justify_items": null,
1467
- "left": null,
1468
- "margin": null,
1469
- "max_height": null,
1470
- "max_width": null,
1471
- "min_height": null,
1472
- "min_width": null,
1473
- "object_fit": null,
1474
- "object_position": null,
1475
- "order": null,
1476
- "overflow": null,
1477
- "overflow_x": null,
1478
- "overflow_y": null,
1479
- "padding": null,
1480
- "right": null,
1481
- "top": null,
1482
- "visibility": null,
1483
- "width": null
1484
- }
1485
- }
1486
- }
1487
- }
1488
- },
1489
- "nbformat": 4,
1490
- "nbformat_minor": 0
1491
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/07-RAG_Improve_Chunking.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
notebooks/08-Finetune_Embedding.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
notebooks/09-Better_Embedding_Model.ipynb DELETED
@@ -1,1575 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "colab_type": "text",
7
- "id": "view-in-github"
8
- },
9
- "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/09-Better_Embedding_Model.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
- ]
12
- },
13
- {
14
- "cell_type": "markdown",
15
- "metadata": {
16
- "id": "-zE1h0uQV7uT"
17
- },
18
- "source": [
19
- "# Install Packages and Setup Variables"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": 14,
25
- "metadata": {
26
- "colab": {
27
- "base_uri": "https://localhost:8080/"
28
- },
29
- "id": "QPJzr-I9XQ7l",
30
- "outputId": "8e808cc4-4c21-474b-c5b7-f6841ee08020"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-embeddings-cohere llama-index-readers-web cohere==4.47 tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": 11,
40
- "metadata": {
41
- "id": "riuXwpSPcvWC"
42
- },
43
- "outputs": [],
44
- "source": [
45
- "import os\n",
46
- "\n",
47
- "# Set the \"OPENAI_API_KEY\" and the \"CO_API_KEY\" (Cohere) in the Python environment. Will be used by OpenAI client later.\n",
48
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\"\n",
49
- "os.environ[\"CO_API_KEY\"] = \"<YOUR_COHERE_KEY>\""
50
- ]
51
- },
52
- {
53
- "cell_type": "code",
54
- "execution_count": 2,
55
- "metadata": {
56
- "id": "jIEeZzqLbz0J"
57
- },
58
- "outputs": [],
59
- "source": [
60
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
61
- "\n",
62
- "import nest_asyncio\n",
63
- "\n",
64
- "nest_asyncio.apply()"
65
- ]
66
- },
67
- {
68
- "cell_type": "markdown",
69
- "metadata": {
70
- "id": "Bkgi2OrYzF7q"
71
- },
72
- "source": [
73
- "# Load a Model"
74
- ]
75
- },
76
- {
77
- "cell_type": "code",
78
- "execution_count": 3,
79
- "metadata": {
80
- "id": "9oGT6crooSSj"
81
- },
82
- "outputs": [
83
- {
84
- "name": "stderr",
85
- "output_type": "stream",
86
- "text": [
87
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
88
- " from .autonotebook import tqdm as notebook_tqdm\n"
89
- ]
90
- }
91
- ],
92
- "source": [
93
- "from llama_index.llms.openai import OpenAI\n",
94
- "\n",
95
- "llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
96
- ]
97
- },
98
- {
99
- "cell_type": "markdown",
100
- "metadata": {
101
- "id": "0BwVuJXlzHVL"
102
- },
103
- "source": [
104
- "# Create a VectoreStore"
105
- ]
106
- },
107
- {
108
- "cell_type": "code",
109
- "execution_count": 4,
110
- "metadata": {
111
- "id": "SQP87lHczHKc"
112
- },
113
- "outputs": [],
114
- "source": [
115
- "import chromadb\n",
116
- "\n",
117
- "# create client and a new collection\n",
118
- "# chromadb.EphemeralClient saves data in-memory.\n",
119
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
120
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
121
- ]
122
- },
123
- {
124
- "cell_type": "code",
125
- "execution_count": 5,
126
- "metadata": {
127
- "id": "zAaGcYMJzHAN"
128
- },
129
- "outputs": [],
130
- "source": [
131
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
132
- "\n",
133
- "# Define a storage context object using the created vector database.\n",
134
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
135
- ]
136
- },
137
- {
138
- "cell_type": "markdown",
139
- "metadata": {
140
- "id": "I9JbAzFcjkpn"
141
- },
142
- "source": [
143
- "# Load the Dataset (CSV)"
144
- ]
145
- },
146
- {
147
- "cell_type": "markdown",
148
- "metadata": {
149
- "id": "ceveDuYdWCYk"
150
- },
151
- "source": [
152
- "## Download"
153
- ]
154
- },
155
- {
156
- "cell_type": "markdown",
157
- "metadata": {
158
- "id": "eZwf6pv7WFmD"
159
- },
160
- "source": [
161
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
162
- ]
163
- },
164
- {
165
- "cell_type": "code",
166
- "execution_count": 6,
167
- "metadata": {
168
- "colab": {
169
- "base_uri": "https://localhost:8080/"
170
- },
171
- "id": "wl_pbPvMlv1h",
172
- "outputId": "bc9a0415-a1fb-4e89-a2b4-165420106b34"
173
- },
174
- "outputs": [
175
- {
176
- "name": "stdout",
177
- "output_type": "stream",
178
- "text": [
179
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
180
- " Dload Upload Total Spent Left Speed\n",
181
- "100 169k 100 169k 0 0 856k 0 --:--:-- --:--:-- --:--:-- 860k\n"
182
- ]
183
- }
184
- ],
185
- "source": [
186
- "!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
187
- ]
188
- },
189
- {
190
- "cell_type": "markdown",
191
- "metadata": {
192
- "id": "VWBLtDbUWJfA"
193
- },
194
- "source": [
195
- "## Read File"
196
- ]
197
- },
198
- {
199
- "cell_type": "code",
200
- "execution_count": 7,
201
- "metadata": {
202
- "colab": {
203
- "base_uri": "https://localhost:8080/"
204
- },
205
- "id": "0Q9sxuW0g3Gd",
206
- "outputId": "a8361aa6-522d-4def-e49b-ed08d9c8e7d1"
207
- },
208
- "outputs": [
209
- {
210
- "data": {
211
- "text/plain": [
212
- "14"
213
- ]
214
- },
215
- "execution_count": 7,
216
- "metadata": {},
217
- "output_type": "execute_result"
218
- }
219
- ],
220
- "source": [
221
- "import csv\n",
222
- "\n",
223
- "rows = []\n",
224
- "\n",
225
- "# Load the file as a JSON\n",
226
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
227
- " csv_reader = csv.reader(file)\n",
228
- "\n",
229
- " for idx, row in enumerate( csv_reader ):\n",
230
- " if idx == 0: continue; # Skip header row\n",
231
- " rows.append( row )\n",
232
- "\n",
233
- "# The number of characters in the dataset.\n",
234
- "len( rows )"
235
- ]
236
- },
237
- {
238
- "cell_type": "markdown",
239
- "metadata": {
240
- "id": "S17g2RYOjmf2"
241
- },
242
- "source": [
243
- "# Convert to Document obj"
244
- ]
245
- },
246
- {
247
- "cell_type": "code",
248
- "execution_count": 8,
249
- "metadata": {
250
- "id": "YizvmXPejkJE"
251
- },
252
- "outputs": [],
253
- "source": [
254
- "from llama_index.core import Document\n",
255
- "\n",
256
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
257
- "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
258
- ]
259
- },
260
- {
261
- "cell_type": "markdown",
262
- "metadata": {
263
- "id": "qjuLbmFuWsyl"
264
- },
265
- "source": [
266
- "# Transforming"
267
- ]
268
- },
269
- {
270
- "cell_type": "code",
271
- "execution_count": 9,
272
- "metadata": {
273
- "id": "9z3t70DGWsjO"
274
- },
275
- "outputs": [],
276
- "source": [
277
- "from llama_index.core.text_splitter import TokenTextSplitter\n",
278
- "\n",
279
- "# Define the splitter object that split the text into segments with 512 tokens,\n",
280
- "# with a 128 overlap between the segments.\n",
281
- "text_splitter = TokenTextSplitter(\n",
282
- " separator=\" \", chunk_size=512, chunk_overlap=128\n",
283
- ")"
284
- ]
285
- },
286
- {
287
- "cell_type": "markdown",
288
- "metadata": {
289
- "id": "y28yMy0GxfGR"
290
- },
291
- "source": [
292
- "There are two options to use the Cohere embeddings:\n",
293
- "\n",
294
- "- input_type=\"search_document\": Employ this option for texts (documents) intended for storage in your vector database.\n",
295
- "\n",
296
- "- input_type=\"search_query\": Use this when issuing search queries to locate the most related documents within your vector database."
297
- ]
298
- },
299
- {
300
- "cell_type": "code",
301
- "execution_count": 12,
302
- "metadata": {
303
- "colab": {
304
- "base_uri": "https://localhost:8080/",
305
- "height": 385,
306
- "referenced_widgets": [
307
- "2b1095050bb847c48855e3b74ae18b19",
308
- "a0a1c543115c4764b4150c5d0216370c",
309
- "23675bffa00749849ec944f84986ff52",
310
- "9e86b288110f4d418fd9761f59f5637f",
311
- "d6a4fd2a9cf7431b8bf738d9da0e2a7c",
312
- "700a1ffb298c4dd799c44fcee540b74c",
313
- "06e7a0370c8c46dd9a47c72a474212d1",
314
- "268f6f0800164e0ab7f8f31718f7f9be",
315
- "4001b95bd48147fb876b37a644e70dec",
316
- "22024efa09cb4330ab68a8c2bdbf92ac",
317
- "c14678e2b8c546fc9123c94fa47b924d",
318
- "9dda1537424142e0b7f2fdd5f9c1b98d",
319
- "1db171d1920d432283f9e1795c4c0c80",
320
- "23e0caeaf15546f0b5c62aa263c99e09",
321
- "03b8aded009343f288f0945b64d1f41c",
322
- "4d922a99035d45c59ce9868a4ef73d68",
323
- "aea6b63cbced40619bf32b1a2c350259",
324
- "c89c9dd46b454181aadaf82c7296cdae",
325
- "bec71553390b44879accb638a5b4873f",
326
- "97e4316196e84c7a82a2dd3e4698bc55",
327
- "b2ab2dc287a9421ca812074389ee31a7",
328
- "fa5c2f509ec54c5695a406160ab0626a"
329
- ]
330
- },
331
- "id": "P9LDJ7o-Wsc-",
332
- "outputId": "cd49bff2-b0da-4722-8baa-6a07f1023b39"
333
- },
334
- "outputs": [
335
- {
336
- "name": "stderr",
337
- "output_type": "stream",
338
- "text": [
339
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 30.35it/s]\n",
340
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:01<00:00, 1.76it/s]\n",
341
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:13<00:00, 1.47it/s]\n",
342
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:30<00:00, 3.59it/s]\n",
343
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:04<00:00, 26.11it/s]\n"
344
- ]
345
- }
346
- ],
347
- "source": [
348
- "from llama_index.core.extractors import (\n",
349
- " SummaryExtractor,\n",
350
- " QuestionsAnsweredExtractor,\n",
351
- " KeywordExtractor,\n",
352
- ")\n",
353
- "from llama_index.embeddings.cohere import CohereEmbedding\n",
354
- "from llama_index.core.ingestion import IngestionPipeline\n",
355
- "\n",
356
- "# Create the pipeline to apply the transformation on each chunk,\n",
357
- "# and store the transformed text in the chroma vector store.\n",
358
- "pipeline = IngestionPipeline(\n",
359
- " transformations=[\n",
360
- " text_splitter,\n",
361
- " QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
362
- " SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
363
- " KeywordExtractor(keywords=10, llm=llm),\n",
364
- " CohereEmbedding(model_name=\"embed-english-v3.0\", input_type=\"search_document\"),\n",
365
- " ],\n",
366
- " vector_store=vector_store\n",
367
- ")\n",
368
- "\n",
369
- "# Run the transformation pipeline.\n",
370
- "nodes = pipeline.run(documents=documents, show_progress=True);"
371
- ]
372
- },
373
- {
374
- "cell_type": "code",
375
- "execution_count": 13,
376
- "metadata": {
377
- "colab": {
378
- "base_uri": "https://localhost:8080/"
379
- },
380
- "id": "mPGa85hM2P3P",
381
- "outputId": "9d7811ba-1e10-4098-b6eb-77a4e7d37457"
382
- },
383
- "outputs": [
384
- {
385
- "data": {
386
- "text/plain": [
387
- "108"
388
- ]
389
- },
390
- "execution_count": 13,
391
- "metadata": {},
392
- "output_type": "execute_result"
393
- }
394
- ],
395
- "source": [
396
- "len( nodes )"
397
- ]
398
- },
399
- {
400
- "cell_type": "code",
401
- "execution_count": 14,
402
- "metadata": {
403
- "colab": {
404
- "base_uri": "https://localhost:8080/"
405
- },
406
- "id": "jjnmscmq2cXK",
407
- "outputId": "5f6fa176-4e09-4cc7-bd17-8236b061ad17"
408
- },
409
- "outputs": [
410
- {
411
- "data": {
412
- "text/plain": [
413
- "1024"
414
- ]
415
- },
416
- "execution_count": 14,
417
- "metadata": {},
418
- "output_type": "execute_result"
419
- }
420
- ],
421
- "source": [
422
- "len( nodes[0].embedding )"
423
- ]
424
- },
425
- {
426
- "cell_type": "code",
427
- "execution_count": 15,
428
- "metadata": {
429
- "colab": {
430
- "base_uri": "https://localhost:8080/"
431
- },
432
- "id": "hV9G0lSUJJSa",
433
- "outputId": "453a4ea3-dfda-4da1-ac29-929834c83b40"
434
- },
435
- "outputs": [
436
- {
437
- "name": "stdout",
438
- "output_type": "stream",
439
- "text": [
440
- " adding: mini-llama-articles/ (stored 0%)\n",
441
- " adding: mini-llama-articles/63fe4276-8624-43c7-8c23-32dbfedb2285/ (stored 0%)\n",
442
- " adding: mini-llama-articles/63fe4276-8624-43c7-8c23-32dbfedb2285/data_level0.bin (deflated 100%)\n",
443
- " adding: mini-llama-articles/63fe4276-8624-43c7-8c23-32dbfedb2285/length.bin (deflated 25%)\n",
444
- " adding: mini-llama-articles/63fe4276-8624-43c7-8c23-32dbfedb2285/link_lists.bin (stored 0%)\n",
445
- " adding: mini-llama-articles/63fe4276-8624-43c7-8c23-32dbfedb2285/header.bin (deflated 61%)\n",
446
- " adding: mini-llama-articles/chroma.sqlite3 (deflated 70%)\n"
447
- ]
448
- }
449
- ],
450
- "source": [
451
- "# Compress the vector store directory to a zip file to be able to download and use later.\n",
452
- "!zip -r vectorstore_cohere.zip mini-llama-articles"
453
- ]
454
- },
455
- {
456
- "cell_type": "markdown",
457
- "metadata": {
458
- "id": "OWaT6rL7ksp8"
459
- },
460
- "source": [
461
- "# Load Indexes"
462
- ]
463
- },
464
- {
465
- "cell_type": "markdown",
466
- "metadata": {
467
- "id": "B4w8xP2Ggrvf"
468
- },
469
- "source": [
470
- "If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
471
- ]
472
- },
473
- {
474
- "cell_type": "code",
475
- "execution_count": 16,
476
- "metadata": {
477
- "id": "EF-wobGAJRgL"
478
- },
479
- "outputs": [],
480
- "source": [
481
- "# !unzip vectorstore_cohere.zip"
482
- ]
483
- },
484
- {
485
- "cell_type": "code",
486
- "execution_count": 17,
487
- "metadata": {
488
- "id": "mXi56KTXk2sp"
489
- },
490
- "outputs": [],
491
- "source": [
492
- "# Load the vector store from the local storage.\n",
493
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
494
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
495
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
496
- ]
497
- },
498
- {
499
- "cell_type": "code",
500
- "execution_count": 19,
501
- "metadata": {
502
- "id": "9l0PaY230syE"
503
- },
504
- "outputs": [
505
- {
506
- "name": "stderr",
507
- "output_type": "stream",
508
- "text": [
509
- "/var/folders/l7/9qcp7g5x5rl9x8ltw0t85qym0000gn/T/ipykernel_74455/3981499771.py:11: DeprecationWarning: Call to deprecated class method from_defaults. (ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.) -- Deprecated since version 0.10.0.\n",
510
- " service_context = ServiceContext.from_defaults(\n"
511
- ]
512
- }
513
- ],
514
- "source": [
515
- "from llama_index.core import ServiceContext\n",
516
- "\n",
517
- "# Define the Cohere Embedding Model\n",
518
- "embed_model = CohereEmbedding(\n",
519
- " model_name=\"embed-english-v3.0\",\n",
520
- " input_type=\"search_query\",\n",
521
- ")\n",
522
- "\n",
523
- "# Define the ServiceCotext object to tie the LLM for generating final answer,\n",
524
- "# and the embedding model to help with retrieving related nodes.\n",
525
- "service_context = ServiceContext.from_defaults(\n",
526
- " llm=llm, embed_model=embed_model\n",
527
- ")"
528
- ]
529
- },
530
- {
531
- "cell_type": "code",
532
- "execution_count": 21,
533
- "metadata": {
534
- "id": "jKXURvLtkuTS"
535
- },
536
- "outputs": [],
537
- "source": [
538
- "from llama_index.core import VectorStoreIndex\n",
539
- "\n",
540
- "# Create the index based on the vector store.\n",
541
- "index = VectorStoreIndex.from_vector_store(vector_store, service_context=service_context)"
542
- ]
543
- },
544
- {
545
- "cell_type": "markdown",
546
- "metadata": {
547
- "id": "8JPD8yAinVSq"
548
- },
549
- "source": [
550
- "# Query Dataset"
551
- ]
552
- },
553
- {
554
- "cell_type": "code",
555
- "execution_count": 22,
556
- "metadata": {
557
- "id": "b0gue7cyctt1"
558
- },
559
- "outputs": [],
560
- "source": [
561
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
562
- "# and using a LLM to formulate the final answer.\n",
563
- "query_engine = index.as_query_engine()\n",
564
- "\n",
565
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
566
- ]
567
- },
568
- {
569
- "cell_type": "code",
570
- "execution_count": 23,
571
- "metadata": {
572
- "colab": {
573
- "base_uri": "https://localhost:8080/",
574
- "height": 53
575
- },
576
- "id": "VKK3jMprctre",
577
- "outputId": "cb85d598-d1bc-49e9-818f-c7bbde465864"
578
- },
579
- "outputs": [
580
- {
581
- "data": {
582
- "text/plain": [
583
- "'LLaMA2 model has a total of 2 trillion parameters.'"
584
- ]
585
- },
586
- "execution_count": 23,
587
- "metadata": {},
588
- "output_type": "execute_result"
589
- }
590
- ],
591
- "source": [
592
- "res.response"
593
- ]
594
- },
595
- {
596
- "cell_type": "code",
597
- "execution_count": 24,
598
- "metadata": {
599
- "colab": {
600
- "base_uri": "https://localhost:8080/"
601
- },
602
- "id": "465dH4yQc7Ct",
603
- "outputId": "3d2b3ce2-7705-41bb-80e3-4fe6b390dcef"
604
- },
605
- "outputs": [
606
- {
607
- "name": "stdout",
608
- "output_type": "stream",
609
- "text": [
610
- "Node ID\t 0a3368de-02cc-4cb2-8579-3379e9c68101\n",
611
- "Title\t Fine-Tuning a Llama-2 7B Model for Python Code Generation\n",
612
- "Text\t New Llama-2 model In mid-July, Meta released its new family of pre-trained and finetuned models called Llama-2, with an open source and commercial character to facilitate its use and expansion. The base model was released with a chat version and sizes 7B, 13B, and 70B. Together with the models, the corresponding papers were published describing their characteristics and relevant points of the learning process, which provide very interesting information on the subject. For pre-training, 40% more tokens were used, reaching 2T, the context length was doubled and the grouped-query attention (GQA) technique was applied to speed up inference on the heavier 70B model. On the standard transformer architecture, RMSNorm normalization, SwiGLU activation, and rotatory positional embedding are used, the context length reaches 4096 tokens, and an Adam optimizer is applied with a cosine learning rate schedule, a weight decay of 0.1 and gradient clipping. The dataset for tuning For our tuning process, we will take a dataset containing about 18,000 examples where the model is asked to build a Python code that solves a given task. This is an extraction of the original dataset [2], where only the Python language examples are selected. Each row contains the description of the task to be solved, an example of data input to the task if applicable, and the generated code fragment that solves the task is provided [3]. Creating the prompt To carry out an instruction fine-tuning, we must transform each one of our data examples as if it were an instruction, outlining its main sections as follows: Output: Fine-tuning the model To carry out this stage, we have used the Google Colab environment, where we have developed a notebook that allows us to run the training in an interactive way and also a Python script to run the training in unattended mode. For the first test runs, a T4 instance with a high RAM capacity is enough, but when it comes to running the whole dataset and epochs, we have opted to use an A100 instance in order to speed up the training and ensure that its execution time is reasonable. In order to be able to\n",
613
- "Score\t 0.4173821910560196\n",
614
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
615
- "Node ID\t b2b33887-2da1-4838-903e-8e126224095d\n",
616
- "Title\t Fine-Tuning a Llama-2 7B Model for Python Code Generation\n",
617
- "Text\t if it were an instruction, outlining its main sections as follows: Output: Fine-tuning the model To carry out this stage, we have used the Google Colab environment, where we have developed a notebook that allows us to run the training in an interactive way and also a Python script to run the training in unattended mode. For the first test runs, a T4 instance with a high RAM capacity is enough, but when it comes to running the whole dataset and epochs, we have opted to use an A100 instance in order to speed up the training and ensure that its execution time is reasonable. In order to be able to share the model, we will log in to the Huggingface hub using the appropriate token, so that at the end of the whole process, we will upload the model files so that they can be shared with the rest of the users. Fine-tuning techniques: PEFT, Lora, and QLora In recent months, some papers have appeared showing how PEFT techniques can be used to train large language models with a drastic reduction of RAM requirements and consequently allowing fine-tuning of these models on a single GPU of reasonable size. The usual steps to train an LLM consist, first, an intensive pre-training on billions or trillions of tokens to obtain a foundation model, and then a fine-tuning is performed on this model to specialize it on a downstream task. In this fine-tuning phase is where the PEFT technique has its purpose. Parameter Efficient Fine-Tuning (PEFT) allows us to considerably reduce RAM and storage requirements by only fine-tuning a small number of additional parameters, with virtually all model parameters remaining frozen. PEFT has been found to produce good generalization with relatively low-volume datasets. Furthermore, it enhances the reusability and portability of the model, as the small checkpoints obtained can be easily added to the base model, and the base model can be easily fine-tuned and reused in multiple scenarios by adding the PEFT parameters. Finally, since the base model is not adjusted, all the knowledge acquired in the pre-training phase is preserved, thus avoiding catastrophic forgetting. Most widely used PEFT techniques aim to keep the pre-trained base model untouched\n",
618
- "Score\t 0.4013547787636657\n",
619
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
620
- ]
621
- }
622
- ],
623
- "source": [
624
- "# Show the retrieved nodes\n",
625
- "for src in res.source_nodes:\n",
626
- " print(\"Node ID\\t\", src.node_id)\n",
627
- " print(\"Title\\t\", src.metadata['title'])\n",
628
- " print(\"Text\\t\", src.text)\n",
629
- " print(\"Score\\t\", src.score)\n",
630
- " print(\"-_\"*20)"
631
- ]
632
- },
633
- {
634
- "cell_type": "markdown",
635
- "metadata": {
636
- "id": "iMkpzH7vvb09"
637
- },
638
- "source": [
639
- "# Evaluate"
640
- ]
641
- },
642
- {
643
- "cell_type": "code",
644
- "execution_count": 26,
645
- "metadata": {
646
- "colab": {
647
- "base_uri": "https://localhost:8080/"
648
- },
649
- "id": "H8a3eKgKvckU",
650
- "outputId": "85b0765e-5a42-4f60-ccff-fc4bc688f65a"
651
- },
652
- "outputs": [
653
- {
654
- "name": "stderr",
655
- "output_type": "stream",
656
- "text": [
657
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [06:43<00:00, 3.74s/it]\n"
658
- ]
659
- }
660
- ],
661
- "source": [
662
- "from llama_index.core.evaluation import generate_question_context_pairs\n",
663
- "from llama_index.llms.openai import OpenAI\n",
664
- "\n",
665
- "# Create questions for each segment. These questions will be used to\n",
666
- "# assess whether the retriever can accurately identify and return the\n",
667
- "# corresponding segment when queried.\n",
668
- "llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
669
- "rag_eval_dataset = generate_question_context_pairs(\n",
670
- " nodes,\n",
671
- " llm=llm,\n",
672
- " num_questions_per_chunk=1\n",
673
- ")\n",
674
- "\n",
675
- "# We can save the evaluation dataset as a json file for later use.\n",
676
- "rag_eval_dataset.save_json(\"./rag_eval_dataset_cohere.json\")"
677
- ]
678
- },
679
- {
680
- "cell_type": "markdown",
681
- "metadata": {
682
- "id": "998nNEGYhKhu"
683
- },
684
- "source": [
685
- "If you have uploaded the generated question JSON file, please uncomment the code in the next cell block. This will avoid the need to generate the questions manually, saving you time and effort."
686
- ]
687
- },
688
- {
689
- "cell_type": "code",
690
- "execution_count": 27,
691
- "metadata": {
692
- "id": "3sA1K84U254o"
693
- },
694
- "outputs": [],
695
- "source": [
696
- "# from llama_index.finetuning.embeddings.common import (\n",
697
- "# EmbeddingQAFinetuneDataset,\n",
698
- "# )\n",
699
- "# rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
700
- "# \"./rag_eval_dataset_cohere.json\"\n",
701
- "# )"
702
- ]
703
- },
704
- {
705
- "cell_type": "code",
706
- "execution_count": 28,
707
- "metadata": {
708
- "id": "H7ubvcbk27vr"
709
- },
710
- "outputs": [],
711
- "source": [
712
- "import pandas as pd\n",
713
- "\n",
714
- "# A simple function to show the evaluation result.\n",
715
- "def display_results_retriever(name, eval_results):\n",
716
- " \"\"\"Display results from evaluate.\"\"\"\n",
717
- "\n",
718
- " metric_dicts = []\n",
719
- " for eval_result in eval_results:\n",
720
- " metric_dict = eval_result.metric_vals_dict\n",
721
- " metric_dicts.append(metric_dict)\n",
722
- "\n",
723
- " full_df = pd.DataFrame(metric_dicts)\n",
724
- "\n",
725
- " hit_rate = full_df[\"hit_rate\"].mean()\n",
726
- " mrr = full_df[\"mrr\"].mean()\n",
727
- "\n",
728
- " metric_df = pd.DataFrame(\n",
729
- " {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
730
- " )\n",
731
- "\n",
732
- " return metric_df"
733
- ]
734
- },
735
- {
736
- "cell_type": "code",
737
- "execution_count": 29,
738
- "metadata": {
739
- "colab": {
740
- "base_uri": "https://localhost:8080/"
741
- },
742
- "id": "uNLxDxoc2-Ac",
743
- "outputId": "8a2df94d-99b5-4aa4-a31e-b6c94256d1bb"
744
- },
745
- "outputs": [
746
- {
747
- "name": "stdout",
748
- "output_type": "stream",
749
- "text": [
750
- " Retriever Name Hit Rate MRR\n",
751
- "0 Retriever top_2 0.677355 0.562124\n",
752
- " Retriever Name Hit Rate MRR\n",
753
- "0 Retriever top_4 0.815631 0.606045\n",
754
- " Retriever Name Hit Rate MRR\n",
755
- "0 Retriever top_6 0.865731 0.615331\n",
756
- " Retriever Name Hit Rate MRR\n",
757
- "0 Retriever top_8 0.887776 0.618301\n",
758
- " Retriever Name Hit Rate MRR\n",
759
- "0 Retriever top_10 0.8998 0.619592\n"
760
- ]
761
- }
762
- ],
763
- "source": [
764
- "from llama_index.core.evaluation import RetrieverEvaluator\n",
765
- "\n",
766
- "# We can evaluate the retievers with different top_k values.\n",
767
- "for i in [2, 4, 6, 8, 10]:\n",
768
- " retriever = index.as_retriever(similarity_top_k=i)\n",
769
- " retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
770
- " [\"mrr\", \"hit_rate\"], retriever=retriever\n",
771
- " )\n",
772
- " eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset)\n",
773
- " print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
774
- ]
775
- },
776
- {
777
- "cell_type": "code",
778
- "execution_count": 30,
779
- "metadata": {
780
- "colab": {
781
- "base_uri": "https://localhost:8080/"
782
- },
783
- "id": "3ukkWC9R2_0J",
784
- "outputId": "d177c25d-a163-4b71-97f4-2af468737bbb"
785
- },
786
- "outputs": [
787
- {
788
- "name": "stderr",
789
- "output_type": "stream",
790
- "text": [
791
- "/var/folders/l7/9qcp7g5x5rl9x8ltw0t85qym0000gn/T/ipykernel_74455/1546854213.py:11: DeprecationWarning: Call to deprecated class method from_defaults. (ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.) -- Deprecated since version 0.10.0.\n",
792
- " service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n"
793
- ]
794
- },
795
- {
796
- "name": "stdout",
797
- "output_type": "stream",
798
- "text": [
799
- "top_2 faithfulness_score: 1.0\n",
800
- "top_2 relevancy_score: 1.0\n",
801
- "-_-_-_-_-_-_-_-_-_-_\n",
802
- "top_4 faithfulness_score: 1.0\n",
803
- "top_4 relevancy_score: 1.0\n",
804
- "-_-_-_-_-_-_-_-_-_-_\n",
805
- "top_6 faithfulness_score: 1.0\n",
806
- "top_6 relevancy_score: 1.0\n",
807
- "-_-_-_-_-_-_-_-_-_-_\n",
808
- "top_8 faithfulness_score: 0.45\n",
809
- "top_8 relevancy_score: 0.45\n",
810
- "-_-_-_-_-_-_-_-_-_-_\n",
811
- "top_10 faithfulness_score: 0.65\n",
812
- "top_10 relevancy_score: 0.65\n",
813
- "-_-_-_-_-_-_-_-_-_-_\n"
814
- ]
815
- }
816
- ],
817
- "source": [
818
- "from llama_index.core.evaluation import RelevancyEvaluator, FaithfulnessEvaluator, BatchEvalRunner\n",
819
- "from llama_index.core import ServiceContext\n",
820
- "from llama_index.llms.openai import OpenAI\n",
821
- "\n",
822
- "for i in [2, 4, 6, 8, 10]:\n",
823
- " # Set Faithfulness and Relevancy evaluators\n",
824
- " query_engine = index.as_query_engine(similarity_top_k=i)\n",
825
- "\n",
826
- " # While we use GPT3.5-Turbo to answer questions, we can use GPT4 to evaluate the answers.\n",
827
- " llm_gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
828
- " service_context_gpt4 = ServiceContext.from_defaults(llm=llm_gpt4)\n",
829
- "\n",
830
- " faithfulness_evaluator = FaithfulnessEvaluator(service_context=service_context_gpt4)\n",
831
- " relevancy_evaluator = RelevancyEvaluator(service_context=service_context_gpt4)\n",
832
- "\n",
833
- " # Run evaluation\n",
834
- " queries = list(rag_eval_dataset.queries.values())\n",
835
- " batch_eval_queries = queries[:20]\n",
836
- "\n",
837
- " runner = BatchEvalRunner(\n",
838
- " {\"faithfulness\": faithfulness_evaluator, \"relevancy\": relevancy_evaluator},\n",
839
- " workers=8,\n",
840
- " )\n",
841
- " eval_results = await runner.aevaluate_queries(\n",
842
- " query_engine, queries=batch_eval_queries\n",
843
- " )\n",
844
- " faithfulness_score = sum(result.passing for result in eval_results['faithfulness']) / len(eval_results['faithfulness'])\n",
845
- " print(f\"top_{i} faithfulness_score: {faithfulness_score}\")\n",
846
- "\n",
847
- " relevancy_score = sum(result.passing for result in eval_results['faithfulness']) / len(eval_results['relevancy'])\n",
848
- " print(f\"top_{i} relevancy_score: {relevancy_score}\")\n",
849
- " print(\"-_\"*10)"
850
- ]
851
- },
852
- {
853
- "cell_type": "code",
854
- "execution_count": null,
855
- "metadata": {
856
- "id": "1MB1YD1E3EKM"
857
- },
858
- "outputs": [],
859
- "source": []
860
- }
861
- ],
862
- "metadata": {
863
- "colab": {
864
- "authorship_tag": "ABX9TyMx3DkzJEgLiO/6oTdKzS6v",
865
- "include_colab_link": true,
866
- "provenance": []
867
- },
868
- "kernelspec": {
869
- "display_name": "Python 3",
870
- "name": "python3"
871
- },
872
- "language_info": {
873
- "codemirror_mode": {
874
- "name": "ipython",
875
- "version": 3
876
- },
877
- "file_extension": ".py",
878
- "mimetype": "text/x-python",
879
- "name": "python",
880
- "nbconvert_exporter": "python",
881
- "pygments_lexer": "ipython3",
882
- "version": "3.11.8"
883
- },
884
- "widgets": {
885
- "application/vnd.jupyter.widget-state+json": {
886
- "03b8aded009343f288f0945b64d1f41c": {
887
- "model_module": "@jupyter-widgets/controls",
888
- "model_module_version": "1.5.0",
889
- "model_name": "HTMLModel",
890
- "state": {
891
- "_dom_classes": [],
892
- "_model_module": "@jupyter-widgets/controls",
893
- "_model_module_version": "1.5.0",
894
- "_model_name": "HTMLModel",
895
- "_view_count": null,
896
- "_view_module": "@jupyter-widgets/controls",
897
- "_view_module_version": "1.5.0",
898
- "_view_name": "HTMLView",
899
- "description": "",
900
- "description_tooltip": null,
901
- "layout": "IPY_MODEL_b2ab2dc287a9421ca812074389ee31a7",
902
- "placeholder": "​",
903
- "style": "IPY_MODEL_fa5c2f509ec54c5695a406160ab0626a",
904
- "value": " 108/108 [00:03&lt;00:00, 30.08it/s]"
905
- }
906
- },
907
- "06e7a0370c8c46dd9a47c72a474212d1": {
908
- "model_module": "@jupyter-widgets/controls",
909
- "model_module_version": "1.5.0",
910
- "model_name": "DescriptionStyleModel",
911
- "state": {
912
- "_model_module": "@jupyter-widgets/controls",
913
- "_model_module_version": "1.5.0",
914
- "_model_name": "DescriptionStyleModel",
915
- "_view_count": null,
916
- "_view_module": "@jupyter-widgets/base",
917
- "_view_module_version": "1.2.0",
918
- "_view_name": "StyleView",
919
- "description_width": ""
920
- }
921
- },
922
- "1db171d1920d432283f9e1795c4c0c80": {
923
- "model_module": "@jupyter-widgets/controls",
924
- "model_module_version": "1.5.0",
925
- "model_name": "HTMLModel",
926
- "state": {
927
- "_dom_classes": [],
928
- "_model_module": "@jupyter-widgets/controls",
929
- "_model_module_version": "1.5.0",
930
- "_model_name": "HTMLModel",
931
- "_view_count": null,
932
- "_view_module": "@jupyter-widgets/controls",
933
- "_view_module_version": "1.5.0",
934
- "_view_name": "HTMLView",
935
- "description": "",
936
- "description_tooltip": null,
937
- "layout": "IPY_MODEL_aea6b63cbced40619bf32b1a2c350259",
938
- "placeholder": "​",
939
- "style": "IPY_MODEL_c89c9dd46b454181aadaf82c7296cdae",
940
- "value": "Generating embeddings: 100%"
941
- }
942
- },
943
- "22024efa09cb4330ab68a8c2bdbf92ac": {
944
- "model_module": "@jupyter-widgets/base",
945
- "model_module_version": "1.2.0",
946
- "model_name": "LayoutModel",
947
- "state": {
948
- "_model_module": "@jupyter-widgets/base",
949
- "_model_module_version": "1.2.0",
950
- "_model_name": "LayoutModel",
951
- "_view_count": null,
952
- "_view_module": "@jupyter-widgets/base",
953
- "_view_module_version": "1.2.0",
954
- "_view_name": "LayoutView",
955
- "align_content": null,
956
- "align_items": null,
957
- "align_self": null,
958
- "border": null,
959
- "bottom": null,
960
- "display": null,
961
- "flex": null,
962
- "flex_flow": null,
963
- "grid_area": null,
964
- "grid_auto_columns": null,
965
- "grid_auto_flow": null,
966
- "grid_auto_rows": null,
967
- "grid_column": null,
968
- "grid_gap": null,
969
- "grid_row": null,
970
- "grid_template_areas": null,
971
- "grid_template_columns": null,
972
- "grid_template_rows": null,
973
- "height": null,
974
- "justify_content": null,
975
- "justify_items": null,
976
- "left": null,
977
- "margin": null,
978
- "max_height": null,
979
- "max_width": null,
980
- "min_height": null,
981
- "min_width": null,
982
- "object_fit": null,
983
- "object_position": null,
984
- "order": null,
985
- "overflow": null,
986
- "overflow_x": null,
987
- "overflow_y": null,
988
- "padding": null,
989
- "right": null,
990
- "top": null,
991
- "visibility": null,
992
- "width": null
993
- }
994
- },
995
- "23675bffa00749849ec944f84986ff52": {
996
- "model_module": "@jupyter-widgets/controls",
997
- "model_module_version": "1.5.0",
998
- "model_name": "FloatProgressModel",
999
- "state": {
1000
- "_dom_classes": [],
1001
- "_model_module": "@jupyter-widgets/controls",
1002
- "_model_module_version": "1.5.0",
1003
- "_model_name": "FloatProgressModel",
1004
- "_view_count": null,
1005
- "_view_module": "@jupyter-widgets/controls",
1006
- "_view_module_version": "1.5.0",
1007
- "_view_name": "ProgressView",
1008
- "bar_style": "success",
1009
- "description": "",
1010
- "description_tooltip": null,
1011
- "layout": "IPY_MODEL_268f6f0800164e0ab7f8f31718f7f9be",
1012
- "max": 14,
1013
- "min": 0,
1014
- "orientation": "horizontal",
1015
- "style": "IPY_MODEL_4001b95bd48147fb876b37a644e70dec",
1016
- "value": 14
1017
- }
1018
- },
1019
- "23e0caeaf15546f0b5c62aa263c99e09": {
1020
- "model_module": "@jupyter-widgets/controls",
1021
- "model_module_version": "1.5.0",
1022
- "model_name": "FloatProgressModel",
1023
- "state": {
1024
- "_dom_classes": [],
1025
- "_model_module": "@jupyter-widgets/controls",
1026
- "_model_module_version": "1.5.0",
1027
- "_model_name": "FloatProgressModel",
1028
- "_view_count": null,
1029
- "_view_module": "@jupyter-widgets/controls",
1030
- "_view_module_version": "1.5.0",
1031
- "_view_name": "ProgressView",
1032
- "bar_style": "success",
1033
- "description": "",
1034
- "description_tooltip": null,
1035
- "layout": "IPY_MODEL_bec71553390b44879accb638a5b4873f",
1036
- "max": 108,
1037
- "min": 0,
1038
- "orientation": "horizontal",
1039
- "style": "IPY_MODEL_97e4316196e84c7a82a2dd3e4698bc55",
1040
- "value": 108
1041
- }
1042
- },
1043
- "268f6f0800164e0ab7f8f31718f7f9be": {
1044
- "model_module": "@jupyter-widgets/base",
1045
- "model_module_version": "1.2.0",
1046
- "model_name": "LayoutModel",
1047
- "state": {
1048
- "_model_module": "@jupyter-widgets/base",
1049
- "_model_module_version": "1.2.0",
1050
- "_model_name": "LayoutModel",
1051
- "_view_count": null,
1052
- "_view_module": "@jupyter-widgets/base",
1053
- "_view_module_version": "1.2.0",
1054
- "_view_name": "LayoutView",
1055
- "align_content": null,
1056
- "align_items": null,
1057
- "align_self": null,
1058
- "border": null,
1059
- "bottom": null,
1060
- "display": null,
1061
- "flex": null,
1062
- "flex_flow": null,
1063
- "grid_area": null,
1064
- "grid_auto_columns": null,
1065
- "grid_auto_flow": null,
1066
- "grid_auto_rows": null,
1067
- "grid_column": null,
1068
- "grid_gap": null,
1069
- "grid_row": null,
1070
- "grid_template_areas": null,
1071
- "grid_template_columns": null,
1072
- "grid_template_rows": null,
1073
- "height": null,
1074
- "justify_content": null,
1075
- "justify_items": null,
1076
- "left": null,
1077
- "margin": null,
1078
- "max_height": null,
1079
- "max_width": null,
1080
- "min_height": null,
1081
- "min_width": null,
1082
- "object_fit": null,
1083
- "object_position": null,
1084
- "order": null,
1085
- "overflow": null,
1086
- "overflow_x": null,
1087
- "overflow_y": null,
1088
- "padding": null,
1089
- "right": null,
1090
- "top": null,
1091
- "visibility": null,
1092
- "width": null
1093
- }
1094
- },
1095
- "2b1095050bb847c48855e3b74ae18b19": {
1096
- "model_module": "@jupyter-widgets/controls",
1097
- "model_module_version": "1.5.0",
1098
- "model_name": "HBoxModel",
1099
- "state": {
1100
- "_dom_classes": [],
1101
- "_model_module": "@jupyter-widgets/controls",
1102
- "_model_module_version": "1.5.0",
1103
- "_model_name": "HBoxModel",
1104
- "_view_count": null,
1105
- "_view_module": "@jupyter-widgets/controls",
1106
- "_view_module_version": "1.5.0",
1107
- "_view_name": "HBoxView",
1108
- "box_style": "",
1109
- "children": [
1110
- "IPY_MODEL_a0a1c543115c4764b4150c5d0216370c",
1111
- "IPY_MODEL_23675bffa00749849ec944f84986ff52",
1112
- "IPY_MODEL_9e86b288110f4d418fd9761f59f5637f"
1113
- ],
1114
- "layout": "IPY_MODEL_d6a4fd2a9cf7431b8bf738d9da0e2a7c"
1115
- }
1116
- },
1117
- "4001b95bd48147fb876b37a644e70dec": {
1118
- "model_module": "@jupyter-widgets/controls",
1119
- "model_module_version": "1.5.0",
1120
- "model_name": "ProgressStyleModel",
1121
- "state": {
1122
- "_model_module": "@jupyter-widgets/controls",
1123
- "_model_module_version": "1.5.0",
1124
- "_model_name": "ProgressStyleModel",
1125
- "_view_count": null,
1126
- "_view_module": "@jupyter-widgets/base",
1127
- "_view_module_version": "1.2.0",
1128
- "_view_name": "StyleView",
1129
- "bar_color": null,
1130
- "description_width": ""
1131
- }
1132
- },
1133
- "4d922a99035d45c59ce9868a4ef73d68": {
1134
- "model_module": "@jupyter-widgets/base",
1135
- "model_module_version": "1.2.0",
1136
- "model_name": "LayoutModel",
1137
- "state": {
1138
- "_model_module": "@jupyter-widgets/base",
1139
- "_model_module_version": "1.2.0",
1140
- "_model_name": "LayoutModel",
1141
- "_view_count": null,
1142
- "_view_module": "@jupyter-widgets/base",
1143
- "_view_module_version": "1.2.0",
1144
- "_view_name": "LayoutView",
1145
- "align_content": null,
1146
- "align_items": null,
1147
- "align_self": null,
1148
- "border": null,
1149
- "bottom": null,
1150
- "display": null,
1151
- "flex": null,
1152
- "flex_flow": null,
1153
- "grid_area": null,
1154
- "grid_auto_columns": null,
1155
- "grid_auto_flow": null,
1156
- "grid_auto_rows": null,
1157
- "grid_column": null,
1158
- "grid_gap": null,
1159
- "grid_row": null,
1160
- "grid_template_areas": null,
1161
- "grid_template_columns": null,
1162
- "grid_template_rows": null,
1163
- "height": null,
1164
- "justify_content": null,
1165
- "justify_items": null,
1166
- "left": null,
1167
- "margin": null,
1168
- "max_height": null,
1169
- "max_width": null,
1170
- "min_height": null,
1171
- "min_width": null,
1172
- "object_fit": null,
1173
- "object_position": null,
1174
- "order": null,
1175
- "overflow": null,
1176
- "overflow_x": null,
1177
- "overflow_y": null,
1178
- "padding": null,
1179
- "right": null,
1180
- "top": null,
1181
- "visibility": null,
1182
- "width": null
1183
- }
1184
- },
1185
- "700a1ffb298c4dd799c44fcee540b74c": {
1186
- "model_module": "@jupyter-widgets/base",
1187
- "model_module_version": "1.2.0",
1188
- "model_name": "LayoutModel",
1189
- "state": {
1190
- "_model_module": "@jupyter-widgets/base",
1191
- "_model_module_version": "1.2.0",
1192
- "_model_name": "LayoutModel",
1193
- "_view_count": null,
1194
- "_view_module": "@jupyter-widgets/base",
1195
- "_view_module_version": "1.2.0",
1196
- "_view_name": "LayoutView",
1197
- "align_content": null,
1198
- "align_items": null,
1199
- "align_self": null,
1200
- "border": null,
1201
- "bottom": null,
1202
- "display": null,
1203
- "flex": null,
1204
- "flex_flow": null,
1205
- "grid_area": null,
1206
- "grid_auto_columns": null,
1207
- "grid_auto_flow": null,
1208
- "grid_auto_rows": null,
1209
- "grid_column": null,
1210
- "grid_gap": null,
1211
- "grid_row": null,
1212
- "grid_template_areas": null,
1213
- "grid_template_columns": null,
1214
- "grid_template_rows": null,
1215
- "height": null,
1216
- "justify_content": null,
1217
- "justify_items": null,
1218
- "left": null,
1219
- "margin": null,
1220
- "max_height": null,
1221
- "max_width": null,
1222
- "min_height": null,
1223
- "min_width": null,
1224
- "object_fit": null,
1225
- "object_position": null,
1226
- "order": null,
1227
- "overflow": null,
1228
- "overflow_x": null,
1229
- "overflow_y": null,
1230
- "padding": null,
1231
- "right": null,
1232
- "top": null,
1233
- "visibility": null,
1234
- "width": null
1235
- }
1236
- },
1237
- "97e4316196e84c7a82a2dd3e4698bc55": {
1238
- "model_module": "@jupyter-widgets/controls",
1239
- "model_module_version": "1.5.0",
1240
- "model_name": "ProgressStyleModel",
1241
- "state": {
1242
- "_model_module": "@jupyter-widgets/controls",
1243
- "_model_module_version": "1.5.0",
1244
- "_model_name": "ProgressStyleModel",
1245
- "_view_count": null,
1246
- "_view_module": "@jupyter-widgets/base",
1247
- "_view_module_version": "1.2.0",
1248
- "_view_name": "StyleView",
1249
- "bar_color": null,
1250
- "description_width": ""
1251
- }
1252
- },
1253
- "9dda1537424142e0b7f2fdd5f9c1b98d": {
1254
- "model_module": "@jupyter-widgets/controls",
1255
- "model_module_version": "1.5.0",
1256
- "model_name": "HBoxModel",
1257
- "state": {
1258
- "_dom_classes": [],
1259
- "_model_module": "@jupyter-widgets/controls",
1260
- "_model_module_version": "1.5.0",
1261
- "_model_name": "HBoxModel",
1262
- "_view_count": null,
1263
- "_view_module": "@jupyter-widgets/controls",
1264
- "_view_module_version": "1.5.0",
1265
- "_view_name": "HBoxView",
1266
- "box_style": "",
1267
- "children": [
1268
- "IPY_MODEL_1db171d1920d432283f9e1795c4c0c80",
1269
- "IPY_MODEL_23e0caeaf15546f0b5c62aa263c99e09",
1270
- "IPY_MODEL_03b8aded009343f288f0945b64d1f41c"
1271
- ],
1272
- "layout": "IPY_MODEL_4d922a99035d45c59ce9868a4ef73d68"
1273
- }
1274
- },
1275
- "9e86b288110f4d418fd9761f59f5637f": {
1276
- "model_module": "@jupyter-widgets/controls",
1277
- "model_module_version": "1.5.0",
1278
- "model_name": "HTMLModel",
1279
- "state": {
1280
- "_dom_classes": [],
1281
- "_model_module": "@jupyter-widgets/controls",
1282
- "_model_module_version": "1.5.0",
1283
- "_model_name": "HTMLModel",
1284
- "_view_count": null,
1285
- "_view_module": "@jupyter-widgets/controls",
1286
- "_view_module_version": "1.5.0",
1287
- "_view_name": "HTMLView",
1288
- "description": "",
1289
- "description_tooltip": null,
1290
- "layout": "IPY_MODEL_22024efa09cb4330ab68a8c2bdbf92ac",
1291
- "placeholder": "​",
1292
- "style": "IPY_MODEL_c14678e2b8c546fc9123c94fa47b924d",
1293
- "value": " 14/14 [00:00&lt;00:00, 13.27it/s]"
1294
- }
1295
- },
1296
- "a0a1c543115c4764b4150c5d0216370c": {
1297
- "model_module": "@jupyter-widgets/controls",
1298
- "model_module_version": "1.5.0",
1299
- "model_name": "HTMLModel",
1300
- "state": {
1301
- "_dom_classes": [],
1302
- "_model_module": "@jupyter-widgets/controls",
1303
- "_model_module_version": "1.5.0",
1304
- "_model_name": "HTMLModel",
1305
- "_view_count": null,
1306
- "_view_module": "@jupyter-widgets/controls",
1307
- "_view_module_version": "1.5.0",
1308
- "_view_name": "HTMLView",
1309
- "description": "",
1310
- "description_tooltip": null,
1311
- "layout": "IPY_MODEL_700a1ffb298c4dd799c44fcee540b74c",
1312
- "placeholder": "​",
1313
- "style": "IPY_MODEL_06e7a0370c8c46dd9a47c72a474212d1",
1314
- "value": "Parsing nodes: 100%"
1315
- }
1316
- },
1317
- "aea6b63cbced40619bf32b1a2c350259": {
1318
- "model_module": "@jupyter-widgets/base",
1319
- "model_module_version": "1.2.0",
1320
- "model_name": "LayoutModel",
1321
- "state": {
1322
- "_model_module": "@jupyter-widgets/base",
1323
- "_model_module_version": "1.2.0",
1324
- "_model_name": "LayoutModel",
1325
- "_view_count": null,
1326
- "_view_module": "@jupyter-widgets/base",
1327
- "_view_module_version": "1.2.0",
1328
- "_view_name": "LayoutView",
1329
- "align_content": null,
1330
- "align_items": null,
1331
- "align_self": null,
1332
- "border": null,
1333
- "bottom": null,
1334
- "display": null,
1335
- "flex": null,
1336
- "flex_flow": null,
1337
- "grid_area": null,
1338
- "grid_auto_columns": null,
1339
- "grid_auto_flow": null,
1340
- "grid_auto_rows": null,
1341
- "grid_column": null,
1342
- "grid_gap": null,
1343
- "grid_row": null,
1344
- "grid_template_areas": null,
1345
- "grid_template_columns": null,
1346
- "grid_template_rows": null,
1347
- "height": null,
1348
- "justify_content": null,
1349
- "justify_items": null,
1350
- "left": null,
1351
- "margin": null,
1352
- "max_height": null,
1353
- "max_width": null,
1354
- "min_height": null,
1355
- "min_width": null,
1356
- "object_fit": null,
1357
- "object_position": null,
1358
- "order": null,
1359
- "overflow": null,
1360
- "overflow_x": null,
1361
- "overflow_y": null,
1362
- "padding": null,
1363
- "right": null,
1364
- "top": null,
1365
- "visibility": null,
1366
- "width": null
1367
- }
1368
- },
1369
- "b2ab2dc287a9421ca812074389ee31a7": {
1370
- "model_module": "@jupyter-widgets/base",
1371
- "model_module_version": "1.2.0",
1372
- "model_name": "LayoutModel",
1373
- "state": {
1374
- "_model_module": "@jupyter-widgets/base",
1375
- "_model_module_version": "1.2.0",
1376
- "_model_name": "LayoutModel",
1377
- "_view_count": null,
1378
- "_view_module": "@jupyter-widgets/base",
1379
- "_view_module_version": "1.2.0",
1380
- "_view_name": "LayoutView",
1381
- "align_content": null,
1382
- "align_items": null,
1383
- "align_self": null,
1384
- "border": null,
1385
- "bottom": null,
1386
- "display": null,
1387
- "flex": null,
1388
- "flex_flow": null,
1389
- "grid_area": null,
1390
- "grid_auto_columns": null,
1391
- "grid_auto_flow": null,
1392
- "grid_auto_rows": null,
1393
- "grid_column": null,
1394
- "grid_gap": null,
1395
- "grid_row": null,
1396
- "grid_template_areas": null,
1397
- "grid_template_columns": null,
1398
- "grid_template_rows": null,
1399
- "height": null,
1400
- "justify_content": null,
1401
- "justify_items": null,
1402
- "left": null,
1403
- "margin": null,
1404
- "max_height": null,
1405
- "max_width": null,
1406
- "min_height": null,
1407
- "min_width": null,
1408
- "object_fit": null,
1409
- "object_position": null,
1410
- "order": null,
1411
- "overflow": null,
1412
- "overflow_x": null,
1413
- "overflow_y": null,
1414
- "padding": null,
1415
- "right": null,
1416
- "top": null,
1417
- "visibility": null,
1418
- "width": null
1419
- }
1420
- },
1421
- "bec71553390b44879accb638a5b4873f": {
1422
- "model_module": "@jupyter-widgets/base",
1423
- "model_module_version": "1.2.0",
1424
- "model_name": "LayoutModel",
1425
- "state": {
1426
- "_model_module": "@jupyter-widgets/base",
1427
- "_model_module_version": "1.2.0",
1428
- "_model_name": "LayoutModel",
1429
- "_view_count": null,
1430
- "_view_module": "@jupyter-widgets/base",
1431
- "_view_module_version": "1.2.0",
1432
- "_view_name": "LayoutView",
1433
- "align_content": null,
1434
- "align_items": null,
1435
- "align_self": null,
1436
- "border": null,
1437
- "bottom": null,
1438
- "display": null,
1439
- "flex": null,
1440
- "flex_flow": null,
1441
- "grid_area": null,
1442
- "grid_auto_columns": null,
1443
- "grid_auto_flow": null,
1444
- "grid_auto_rows": null,
1445
- "grid_column": null,
1446
- "grid_gap": null,
1447
- "grid_row": null,
1448
- "grid_template_areas": null,
1449
- "grid_template_columns": null,
1450
- "grid_template_rows": null,
1451
- "height": null,
1452
- "justify_content": null,
1453
- "justify_items": null,
1454
- "left": null,
1455
- "margin": null,
1456
- "max_height": null,
1457
- "max_width": null,
1458
- "min_height": null,
1459
- "min_width": null,
1460
- "object_fit": null,
1461
- "object_position": null,
1462
- "order": null,
1463
- "overflow": null,
1464
- "overflow_x": null,
1465
- "overflow_y": null,
1466
- "padding": null,
1467
- "right": null,
1468
- "top": null,
1469
- "visibility": null,
1470
- "width": null
1471
- }
1472
- },
1473
- "c14678e2b8c546fc9123c94fa47b924d": {
1474
- "model_module": "@jupyter-widgets/controls",
1475
- "model_module_version": "1.5.0",
1476
- "model_name": "DescriptionStyleModel",
1477
- "state": {
1478
- "_model_module": "@jupyter-widgets/controls",
1479
- "_model_module_version": "1.5.0",
1480
- "_model_name": "DescriptionStyleModel",
1481
- "_view_count": null,
1482
- "_view_module": "@jupyter-widgets/base",
1483
- "_view_module_version": "1.2.0",
1484
- "_view_name": "StyleView",
1485
- "description_width": ""
1486
- }
1487
- },
1488
- "c89c9dd46b454181aadaf82c7296cdae": {
1489
- "model_module": "@jupyter-widgets/controls",
1490
- "model_module_version": "1.5.0",
1491
- "model_name": "DescriptionStyleModel",
1492
- "state": {
1493
- "_model_module": "@jupyter-widgets/controls",
1494
- "_model_module_version": "1.5.0",
1495
- "_model_name": "DescriptionStyleModel",
1496
- "_view_count": null,
1497
- "_view_module": "@jupyter-widgets/base",
1498
- "_view_module_version": "1.2.0",
1499
- "_view_name": "StyleView",
1500
- "description_width": ""
1501
- }
1502
- },
1503
- "d6a4fd2a9cf7431b8bf738d9da0e2a7c": {
1504
- "model_module": "@jupyter-widgets/base",
1505
- "model_module_version": "1.2.0",
1506
- "model_name": "LayoutModel",
1507
- "state": {
1508
- "_model_module": "@jupyter-widgets/base",
1509
- "_model_module_version": "1.2.0",
1510
- "_model_name": "LayoutModel",
1511
- "_view_count": null,
1512
- "_view_module": "@jupyter-widgets/base",
1513
- "_view_module_version": "1.2.0",
1514
- "_view_name": "LayoutView",
1515
- "align_content": null,
1516
- "align_items": null,
1517
- "align_self": null,
1518
- "border": null,
1519
- "bottom": null,
1520
- "display": null,
1521
- "flex": null,
1522
- "flex_flow": null,
1523
- "grid_area": null,
1524
- "grid_auto_columns": null,
1525
- "grid_auto_flow": null,
1526
- "grid_auto_rows": null,
1527
- "grid_column": null,
1528
- "grid_gap": null,
1529
- "grid_row": null,
1530
- "grid_template_areas": null,
1531
- "grid_template_columns": null,
1532
- "grid_template_rows": null,
1533
- "height": null,
1534
- "justify_content": null,
1535
- "justify_items": null,
1536
- "left": null,
1537
- "margin": null,
1538
- "max_height": null,
1539
- "max_width": null,
1540
- "min_height": null,
1541
- "min_width": null,
1542
- "object_fit": null,
1543
- "object_position": null,
1544
- "order": null,
1545
- "overflow": null,
1546
- "overflow_x": null,
1547
- "overflow_y": null,
1548
- "padding": null,
1549
- "right": null,
1550
- "top": null,
1551
- "visibility": null,
1552
- "width": null
1553
- }
1554
- },
1555
- "fa5c2f509ec54c5695a406160ab0626a": {
1556
- "model_module": "@jupyter-widgets/controls",
1557
- "model_module_version": "1.5.0",
1558
- "model_name": "DescriptionStyleModel",
1559
- "state": {
1560
- "_model_module": "@jupyter-widgets/controls",
1561
- "_model_module_version": "1.5.0",
1562
- "_model_name": "DescriptionStyleModel",
1563
- "_view_count": null,
1564
- "_view_module": "@jupyter-widgets/base",
1565
- "_view_module_version": "1.2.0",
1566
- "_view_name": "StyleView",
1567
- "description_width": ""
1568
- }
1569
- }
1570
- }
1571
- }
1572
- },
1573
- "nbformat": 4,
1574
- "nbformat_minor": 0
1575
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/10-Adding_Reranking.ipynb DELETED
@@ -1,1462 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "colab_type": "text",
7
- "id": "view-in-github"
8
- },
9
- "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/10-Adding_Reranking.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
- ]
12
- },
13
- {
14
- "cell_type": "markdown",
15
- "metadata": {
16
- "id": "-zE1h0uQV7uT"
17
- },
18
- "source": [
19
- "# Install Packages and Setup Variables"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": 1,
25
- "metadata": {
26
- "colab": {
27
- "base_uri": "https://localhost:8080/"
28
- },
29
- "id": "QPJzr-I9XQ7l",
30
- "outputId": "440f5d93-1cac-4a70-e244-5e8af314464e"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-embeddings-cohere llama-index-readers-web cohere==4.47 tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": 21,
40
- "metadata": {
41
- "id": "riuXwpSPcvWC"
42
- },
43
- "outputs": [],
44
- "source": [
45
- "import os\n",
46
- "\n",
47
- "# Set the \"OPENAI_API_KEY\" and \"CO_API_KEY\" (Cohere) in the Python environment.\n",
48
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\"\n",
49
- "os.environ[\"CO_API_KEY\"] = \"<YOUR_COHERE_KEY>\"\n",
50
- "cohere_key = os.environ[\"CO_API_KEY\"]"
51
- ]
52
- },
53
- {
54
- "cell_type": "code",
55
- "execution_count": 2,
56
- "metadata": {
57
- "id": "jIEeZzqLbz0J"
58
- },
59
- "outputs": [],
60
- "source": [
61
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
62
- "\n",
63
- "import nest_asyncio\n",
64
- "\n",
65
- "nest_asyncio.apply()"
66
- ]
67
- },
68
- {
69
- "cell_type": "markdown",
70
- "metadata": {
71
- "id": "Bkgi2OrYzF7q"
72
- },
73
- "source": [
74
- "# Load a Model"
75
- ]
76
- },
77
- {
78
- "cell_type": "code",
79
- "execution_count": 3,
80
- "metadata": {
81
- "id": "9oGT6crooSSj"
82
- },
83
- "outputs": [
84
- {
85
- "name": "stderr",
86
- "output_type": "stream",
87
- "text": [
88
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
89
- " from .autonotebook import tqdm as notebook_tqdm\n"
90
- ]
91
- }
92
- ],
93
- "source": [
94
- "from llama_index.llms.openai import OpenAI\n",
95
- "\n",
96
- "llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
97
- ]
98
- },
99
- {
100
- "cell_type": "markdown",
101
- "metadata": {
102
- "id": "0BwVuJXlzHVL"
103
- },
104
- "source": [
105
- "# Create a VectoreStore"
106
- ]
107
- },
108
- {
109
- "cell_type": "code",
110
- "execution_count": 4,
111
- "metadata": {
112
- "id": "SQP87lHczHKc"
113
- },
114
- "outputs": [],
115
- "source": [
116
- "import chromadb\n",
117
- "\n",
118
- "# create client and a new collection\n",
119
- "# chromadb.EphemeralClient saves data in-memory.\n",
120
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
121
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
122
- ]
123
- },
124
- {
125
- "cell_type": "code",
126
- "execution_count": 5,
127
- "metadata": {
128
- "id": "zAaGcYMJzHAN"
129
- },
130
- "outputs": [],
131
- "source": [
132
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
133
- "\n",
134
- "# Define a storage context object using the created vector database.\n",
135
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
136
- ]
137
- },
138
- {
139
- "cell_type": "markdown",
140
- "metadata": {
141
- "id": "I9JbAzFcjkpn"
142
- },
143
- "source": [
144
- "# Load the Dataset (CSV)"
145
- ]
146
- },
147
- {
148
- "cell_type": "markdown",
149
- "metadata": {
150
- "id": "ceveDuYdWCYk"
151
- },
152
- "source": [
153
- "## Download"
154
- ]
155
- },
156
- {
157
- "cell_type": "markdown",
158
- "metadata": {
159
- "id": "eZwf6pv7WFmD"
160
- },
161
- "source": [
162
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
163
- ]
164
- },
165
- {
166
- "cell_type": "code",
167
- "execution_count": 6,
168
- "metadata": {
169
- "colab": {
170
- "base_uri": "https://localhost:8080/"
171
- },
172
- "id": "wl_pbPvMlv1h",
173
- "outputId": "f844a7a8-484b-4693-8715-42506778b1de"
174
- },
175
- "outputs": [
176
- {
177
- "name": "stdout",
178
- "output_type": "stream",
179
- "text": [
180
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
181
- " Dload Upload Total Spent Left Speed\n",
182
- "100 169k 100 169k 0 0 768k 0 --:--:-- --:--:-- --:--:-- 770k\n"
183
- ]
184
- }
185
- ],
186
- "source": [
187
- "!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
188
- ]
189
- },
190
- {
191
- "cell_type": "markdown",
192
- "metadata": {
193
- "id": "VWBLtDbUWJfA"
194
- },
195
- "source": [
196
- "## Read File"
197
- ]
198
- },
199
- {
200
- "cell_type": "code",
201
- "execution_count": 7,
202
- "metadata": {
203
- "colab": {
204
- "base_uri": "https://localhost:8080/"
205
- },
206
- "id": "0Q9sxuW0g3Gd",
207
- "outputId": "473050f8-0640-4e7c-91e7-3ea3485cfb51"
208
- },
209
- "outputs": [
210
- {
211
- "data": {
212
- "text/plain": [
213
- "14"
214
- ]
215
- },
216
- "execution_count": 7,
217
- "metadata": {},
218
- "output_type": "execute_result"
219
- }
220
- ],
221
- "source": [
222
- "import csv\n",
223
- "\n",
224
- "rows = []\n",
225
- "\n",
226
- "# Load the file as a JSON\n",
227
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
228
- " csv_reader = csv.reader(file)\n",
229
- "\n",
230
- " for idx, row in enumerate( csv_reader ):\n",
231
- " if idx == 0: continue; # Skip header row\n",
232
- " rows.append( row )\n",
233
- "\n",
234
- "# The number of characters in the dataset.\n",
235
- "len( rows )"
236
- ]
237
- },
238
- {
239
- "cell_type": "markdown",
240
- "metadata": {
241
- "id": "S17g2RYOjmf2"
242
- },
243
- "source": [
244
- "# Convert to Document obj"
245
- ]
246
- },
247
- {
248
- "cell_type": "code",
249
- "execution_count": 8,
250
- "metadata": {
251
- "id": "YizvmXPejkJE"
252
- },
253
- "outputs": [],
254
- "source": [
255
- "from llama_index.core import Document\n",
256
- "\n",
257
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
258
- "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
259
- ]
260
- },
261
- {
262
- "cell_type": "markdown",
263
- "metadata": {
264
- "id": "qjuLbmFuWsyl"
265
- },
266
- "source": [
267
- "# Transforming"
268
- ]
269
- },
270
- {
271
- "cell_type": "code",
272
- "execution_count": 9,
273
- "metadata": {
274
- "id": "9z3t70DGWsjO"
275
- },
276
- "outputs": [],
277
- "source": [
278
- "from llama_index.core.text_splitter import TokenTextSplitter\n",
279
- "\n",
280
- "# Define the splitter object that split the text into segments with 512 tokens,\n",
281
- "# with a 128 overlap between the segments.\n",
282
- "text_splitter = TokenTextSplitter(\n",
283
- " separator=\" \", chunk_size=512, chunk_overlap=128\n",
284
- ")"
285
- ]
286
- },
287
- {
288
- "cell_type": "code",
289
- "execution_count": 10,
290
- "metadata": {
291
- "colab": {
292
- "base_uri": "https://localhost:8080/",
293
- "height": 413,
294
- "referenced_widgets": [
295
- "4bb1e341a77d41c9aca0e6680911fb43",
296
- "1d1faa15f5564b68b948eaffa58626b3",
297
- "df22a67ae80b4673b708eea74646be61",
298
- "3657dc19b6ac477b9f05bb6519271473",
299
- "9045e402f0344428acc085d63df7ff03",
300
- "f57a9ac0d924408fbaaac795c172862e",
301
- "4cb8ba074b254e91b8877cc87ae0d279",
302
- "cbd3e1411b2c4eeb943243c9d45245c4",
303
- "04af736f84044e37aa6599aa708a77bc",
304
- "8d35ab8c65ba47e1be446b98f0942ac4",
305
- "75e40756175f463e874630f229ef4066",
306
- "a0dd5f2c99b2407f9f5705587976ae76",
307
- "8728ca516bd0474586b19e0c9b457499",
308
- "aac433a9a64c48dfb18d7a01f64d3b27",
309
- "4802a63f700e48fca16b5d89fbab333d",
310
- "3f55aef52aee4e77864d53e3197c3cc3",
311
- "f41df4b6ab4c4132b0d20232002f0294",
312
- "3a621edd23354ea5924189885c97dee4",
313
- "73d34cae940e4748a7b3127351925e65",
314
- "2dc4a6c935ac4ef38ed9030608bd4b2f",
315
- "4fcebf4a9ef54729889cc6ad4cbe5d10",
316
- "195aa202b03a42a3a674e9da2f13d878"
317
- ]
318
- },
319
- "id": "P9LDJ7o-Wsc-",
320
- "outputId": "72b67575-2d55-4145-90be-a367f128fa44"
321
- },
322
- "outputs": [
323
- {
324
- "name": "stderr",
325
- "output_type": "stream",
326
- "text": [
327
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 28.69it/s]\n",
328
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:02<00:00, 1.72it/s]\n",
329
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:09<00:00, 1.55it/s]\n",
330
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:24<00:00, 1.29it/s]\n",
331
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:01<00:00, 56.53it/s]\n"
332
- ]
333
- }
334
- ],
335
- "source": [
336
- "from llama_index.core.extractors import (\n",
337
- " SummaryExtractor,\n",
338
- " QuestionsAnsweredExtractor,\n",
339
- " KeywordExtractor,\n",
340
- ")\n",
341
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
342
- "from llama_index.core.ingestion import IngestionPipeline\n",
343
- "\n",
344
- "# Create the pipeline to apply the transformation on each chunk,\n",
345
- "# and store the transformed text in the chroma vector store.\n",
346
- "pipeline = IngestionPipeline(\n",
347
- " transformations=[\n",
348
- " text_splitter,\n",
349
- " QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
350
- " SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
351
- " KeywordExtractor(keywords=10, llm=llm),\n",
352
- " OpenAIEmbedding(),\n",
353
- " ],\n",
354
- " vector_store=vector_store\n",
355
- ")\n",
356
- "\n",
357
- "# Run the transformation pipeline.\n",
358
- "nodes = pipeline.run(documents=documents, show_progress=True);"
359
- ]
360
- },
361
- {
362
- "cell_type": "code",
363
- "execution_count": 12,
364
- "metadata": {
365
- "colab": {
366
- "base_uri": "https://localhost:8080/"
367
- },
368
- "id": "mPGa85hM2P3P",
369
- "outputId": "4586ad85-71bd-4407-a584-326941a5f474"
370
- },
371
- "outputs": [
372
- {
373
- "data": {
374
- "text/plain": [
375
- "108"
376
- ]
377
- },
378
- "execution_count": 12,
379
- "metadata": {},
380
- "output_type": "execute_result"
381
- }
382
- ],
383
- "source": [
384
- "len( nodes )"
385
- ]
386
- },
387
- {
388
- "cell_type": "code",
389
- "execution_count": 13,
390
- "metadata": {
391
- "colab": {
392
- "base_uri": "https://localhost:8080/"
393
- },
394
- "id": "OeeG3jxT0taW",
395
- "outputId": "8a2e3c63-c346-4034-8147-f2f1f996c326"
396
- },
397
- "outputs": [
398
- {
399
- "name": "stdout",
400
- "output_type": "stream",
401
- "text": [
402
- "updating: mini-llama-articles/ (stored 0%)\n",
403
- "updating: mini-llama-articles/chroma.sqlite3 (deflated 65%)\n",
404
- " adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/ (stored 0%)\n",
405
- " adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/data_level0.bin (deflated 100%)\n",
406
- " adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/length.bin (deflated 48%)\n",
407
- " adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/link_lists.bin (stored 0%)\n",
408
- " adding: mini-llama-articles/0e0852fc-d2a0-47e2-9824-f77f2f6d1b14/header.bin (deflated 61%)\n"
409
- ]
410
- }
411
- ],
412
- "source": [
413
- "# Compress the vector store directory to a zip file to be able to download and use later.\n",
414
- "!zip -r vectorstore.zip mini-llama-articles"
415
- ]
416
- },
417
- {
418
- "cell_type": "markdown",
419
- "metadata": {
420
- "id": "OWaT6rL7ksp8"
421
- },
422
- "source": [
423
- "# Load Indexes"
424
- ]
425
- },
426
- {
427
- "cell_type": "markdown",
428
- "metadata": {
429
- "id": "6fFGWiz3hoTd"
430
- },
431
- "source": [
432
- "If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
433
- ]
434
- },
435
- {
436
- "cell_type": "code",
437
- "execution_count": 14,
438
- "metadata": {
439
- "colab": {
440
- "base_uri": "https://localhost:8080/"
441
- },
442
- "id": "XxPMJ4tq06qx",
443
- "outputId": "8445e40a-b3c6-44ff-dfde-37cd4c73ffa2"
444
- },
445
- "outputs": [],
446
- "source": [
447
- "# !unzip vectorstore.zip"
448
- ]
449
- },
450
- {
451
- "cell_type": "code",
452
- "execution_count": 15,
453
- "metadata": {
454
- "id": "mXi56KTXk2sp"
455
- },
456
- "outputs": [],
457
- "source": [
458
- "# Load the vector store from the local storage.\n",
459
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
460
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
461
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
462
- ]
463
- },
464
- {
465
- "cell_type": "code",
466
- "execution_count": 17,
467
- "metadata": {
468
- "id": "jKXURvLtkuTS"
469
- },
470
- "outputs": [],
471
- "source": [
472
- "from llama_index.core import VectorStoreIndex\n",
473
- "\n",
474
- "# Create the index based on the vector store.\n",
475
- "index = VectorStoreIndex.from_vector_store(vector_store)"
476
- ]
477
- },
478
- {
479
- "cell_type": "markdown",
480
- "metadata": {
481
- "id": "8JPD8yAinVSq"
482
- },
483
- "source": [
484
- "# Query Dataset"
485
- ]
486
- },
487
- {
488
- "cell_type": "code",
489
- "execution_count": 22,
490
- "metadata": {
491
- "id": "BsFfFpVgn01h"
492
- },
493
- "outputs": [],
494
- "source": [
495
- "from llama_index.postprocessor.cohere_rerank import CohereRerank\n",
496
- "\n",
497
- "# Define the Cohere Reranking object to return only the first two highest ranking chunks.\n",
498
- "cohere_rerank = CohereRerank(top_n=2, api_key=cohere_key)"
499
- ]
500
- },
501
- {
502
- "cell_type": "code",
503
- "execution_count": 23,
504
- "metadata": {
505
- "id": "b0gue7cyctt1"
506
- },
507
- "outputs": [],
508
- "source": [
509
- "# Define the ServiceCotext object to tie the LLM for generating final answer,\n",
510
- "# and the embedding model to help with retrieving related nodes.\n",
511
- "# The `node_postprocessors` function will be applied to the retrieved nodes.\n",
512
- "query_engine = index.as_query_engine(\n",
513
- " similarity_top_k=10,\n",
514
- " node_postprocessors=[cohere_rerank]\n",
515
- ")\n",
516
- "\n",
517
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
518
- ]
519
- },
520
- {
521
- "cell_type": "code",
522
- "execution_count": 24,
523
- "metadata": {
524
- "colab": {
525
- "base_uri": "https://localhost:8080/",
526
- "height": 53
527
- },
528
- "id": "VKK3jMprctre",
529
- "outputId": "3acce09e-faa2-4acd-ac8f-f62380d91567"
530
- },
531
- "outputs": [
532
- {
533
- "data": {
534
- "text/plain": [
535
- "'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
536
- ]
537
- },
538
- "execution_count": 24,
539
- "metadata": {},
540
- "output_type": "execute_result"
541
- }
542
- ],
543
- "source": [
544
- "res.response"
545
- ]
546
- },
547
- {
548
- "cell_type": "code",
549
- "execution_count": 25,
550
- "metadata": {
551
- "colab": {
552
- "base_uri": "https://localhost:8080/"
553
- },
554
- "id": "nvSmOtqBoCY2",
555
- "outputId": "052a70df-d98d-4a87-bb7c-9e56d34db7f7"
556
- },
557
- "outputs": [
558
- {
559
- "name": "stdout",
560
- "output_type": "stream",
561
- "text": [
562
- "Node ID\t 6fea54fa-138b-4931-9e37-42fe16fca62a\n",
563
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
564
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
565
- "Score\t 0.90582335\n",
566
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
567
- "Node ID\t 99774ac6-5d8e-492b-8c94-4e9717afd2fc\n",
568
- "Title\t Exploring Large Language Models -Part 3\n",
569
- "Text\t LM model training via UnSupervised learning). Note that this model was loaded in 4-bit, making it runnable on a single T4 GPU and trained with QLoRa. With QLoRA, only a fraction of the adapter weights are trained and summed with the existing frozen pre-trained weights of the model during inference. Here is an illustrative Colab notebook. You can see that training the model with just the text as is, does not result in proper output to questions. The answers are not affected by the training data. Take 2: Instruct Fine-tuning with QLoRa Instruction Tuning concept is a higher-level training concept introduced by this paper FineTuned Language Models Are Zero shot Learners (FLAN) We leverage the intuition that NLP tasks can be described via natural language instructions, such as \"Is the sentiment of this movie review positive or negative?\" or \"Translate 'how are you' into Chinese.\" We take a pre-trained language model of 137B parameters and perform instruction tuning ... Since we use QLoRa we are effectively closely following this paper - QLORA: Efficient Finetuning of Quantized LLMs concerning the training data set, the format that the authors used to train their Gauanco model This is the format for the Llama2 model and will be different for others. One of the hardest problems of training is finding or creating a good quality data set to train. In our case, converting the available training data set to the instruction data set. Since our use case is Closed Book QA, we need to convert this to a QA format. Using older NLP methods like NER (Named Entity Recognition) and then using that to create a QA dataset was not effective. This is where the Self-instruct concept could be used However previous to Llama2, the best-performing model was the GPT 3/4 model via ChatGPT or its API and using these models to do the same was expensive. The 7 billion model of Llama2 has sufficient NLU (Natural Language Understanding) to create output based on a particular format. Running this in 4-bit mode via Quantisation makes it feasible compute-wise to run this on a large data set and convert it to a QA dataset. This was the prompt used. The\n",
570
- "Score\t 0.88363826\n",
571
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
572
- ]
573
- }
574
- ],
575
- "source": [
576
- "# Show the retrieved nodes\n",
577
- "for src in res.source_nodes:\n",
578
- " print(\"Node ID\\t\", src.node_id)\n",
579
- " print(\"Title\\t\", src.metadata['title'])\n",
580
- " print(\"Text\\t\", src.text)\n",
581
- " print(\"Score\\t\", src.score)\n",
582
- " print(\"-_\"*20)"
583
- ]
584
- },
585
- {
586
- "cell_type": "markdown",
587
- "metadata": {
588
- "id": "iMkpzH7vvb09"
589
- },
590
- "source": [
591
- "# Evaluate"
592
- ]
593
- },
594
- {
595
- "cell_type": "code",
596
- "execution_count": 26,
597
- "metadata": {
598
- "colab": {
599
- "base_uri": "https://localhost:8080/"
600
- },
601
- "id": "H8a3eKgKvckU",
602
- "outputId": "cb004dc9-6b49-4d10-a790-1d5257318cd7"
603
- },
604
- "outputs": [
605
- {
606
- "name": "stderr",
607
- "output_type": "stream",
608
- "text": [
609
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [04:30<00:00, 2.51s/it]\n"
610
- ]
611
- }
612
- ],
613
- "source": [
614
- "from llama_index.core.evaluation import generate_question_context_pairs\n",
615
- "from llama_index.llms.openai import OpenAI\n",
616
- "\n",
617
- "# Create questions for each segment. These questions will be used to\n",
618
- "# assess whether the retriever can accurately identify and return the\n",
619
- "# corresponding segment when queried.\n",
620
- "llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
621
- "rag_eval_dataset = generate_question_context_pairs(\n",
622
- " nodes,\n",
623
- " llm=llm,\n",
624
- " num_questions_per_chunk=1\n",
625
- ")\n",
626
- "\n",
627
- "# We can save the evaluation dataset as a json file for later use.\n",
628
- "rag_eval_dataset.save_json(\"./rag_eval_dataset_rerank.json\")"
629
- ]
630
- },
631
- {
632
- "cell_type": "markdown",
633
- "metadata": {
634
- "id": "QvZBMpsXiWEw"
635
- },
636
- "source": [
637
- "If you have uploaded the generated question JSON file, please uncomment the code in the next cell block. This will avoid the need to generate the questions manually, saving you time and effort."
638
- ]
639
- },
640
- {
641
- "cell_type": "code",
642
- "execution_count": 27,
643
- "metadata": {
644
- "id": "3sA1K84U254o"
645
- },
646
- "outputs": [],
647
- "source": [
648
- "# from llama_index.finetuning.embeddings.common import (\n",
649
- "# EmbeddingQAFinetuneDataset,\n",
650
- "# )\n",
651
- "# rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
652
- "# \"./rag_eval_dataset_rerank.json\"\n",
653
- "# )"
654
- ]
655
- },
656
- {
657
- "cell_type": "code",
658
- "execution_count": 28,
659
- "metadata": {
660
- "id": "H7ubvcbk27vr"
661
- },
662
- "outputs": [],
663
- "source": [
664
- "import pandas as pd\n",
665
- "\n",
666
- "# A simple function to show the evaluation result.\n",
667
- "def display_results_retriever(name, eval_results):\n",
668
- " \"\"\"Display results from evaluate.\"\"\"\n",
669
- "\n",
670
- " metric_dicts = []\n",
671
- " for eval_result in eval_results:\n",
672
- " metric_dict = eval_result.metric_vals_dict\n",
673
- " metric_dicts.append(metric_dict)\n",
674
- "\n",
675
- " full_df = pd.DataFrame(metric_dicts)\n",
676
- "\n",
677
- " hit_rate = full_df[\"hit_rate\"].mean()\n",
678
- " mrr = full_df[\"mrr\"].mean()\n",
679
- "\n",
680
- " metric_df = pd.DataFrame(\n",
681
- " {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
682
- " )\n",
683
- "\n",
684
- " return metric_df"
685
- ]
686
- },
687
- {
688
- "cell_type": "code",
689
- "execution_count": 29,
690
- "metadata": {
691
- "colab": {
692
- "base_uri": "https://localhost:8080/"
693
- },
694
- "id": "uNLxDxoc2-Ac",
695
- "outputId": "f42dc98d-789f-4779-c693-0603cd43e4c9"
696
- },
697
- "outputs": [
698
- {
699
- "name": "stdout",
700
- "output_type": "stream",
701
- "text": [
702
- " Retriever Name Hit Rate MRR\n",
703
- "0 Retriever top_2 0.665975 0.54668\n",
704
- " Retriever Name Hit Rate MRR\n",
705
- "0 Retriever top_4 0.782158 0.582815\n",
706
- " Retriever Name Hit Rate MRR\n",
707
- "0 Retriever top_6 0.8361 0.59305\n",
708
- " Retriever Name Hit Rate MRR\n",
709
- "0 Retriever top_8 0.854772 0.595606\n",
710
- " Retriever Name Hit Rate MRR\n",
711
- "0 Retriever top_10 0.871369 0.597404\n"
712
- ]
713
- }
714
- ],
715
- "source": [
716
- "from llama_index.core.evaluation import RetrieverEvaluator\n",
717
- "\n",
718
- "# We can evaluate the retievers with different top_k values.\n",
719
- "for i in [2, 4, 6, 8, 10]:\n",
720
- " retriever = index.as_retriever(similarity_top_k=i, node_postprocessors=[cohere_rerank])\n",
721
- " retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
722
- " [\"mrr\", \"hit_rate\"], retriever=retriever\n",
723
- " )\n",
724
- " eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset)\n",
725
- " print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
726
- ]
727
- },
728
- {
729
- "cell_type": "markdown",
730
- "metadata": {
731
- "id": "ikMYkBATFY3l"
732
- },
733
- "source": [
734
- "It's important to keep in mind that all the results above are based on only two samples even when the retriever fetch 10 items from the vector store. So, it means that instead of passing 10 chunks of data which translates into more API usage and higher cost, we will get the same quality by passing 2 chunk of data.\n",
735
- "\n",
736
- "The bot's hit rate without Cohere Reranking using two chunks is 0.65, while we get the 0.87 hit rate using two chunks after the Cohere's post processing."
737
- ]
738
- },
739
- {
740
- "cell_type": "code",
741
- "execution_count": null,
742
- "metadata": {
743
- "id": "-DMSFJI8F6jl"
744
- },
745
- "outputs": [],
746
- "source": []
747
- }
748
- ],
749
- "metadata": {
750
- "colab": {
751
- "authorship_tag": "ABX9TyNPhIDuwnBNGZxkxkMnLtTw",
752
- "include_colab_link": true,
753
- "provenance": []
754
- },
755
- "kernelspec": {
756
- "display_name": "Python 3",
757
- "name": "python3"
758
- },
759
- "language_info": {
760
- "codemirror_mode": {
761
- "name": "ipython",
762
- "version": 3
763
- },
764
- "file_extension": ".py",
765
- "mimetype": "text/x-python",
766
- "name": "python",
767
- "nbconvert_exporter": "python",
768
- "pygments_lexer": "ipython3",
769
- "version": "3.11.8"
770
- },
771
- "widgets": {
772
- "application/vnd.jupyter.widget-state+json": {
773
- "04af736f84044e37aa6599aa708a77bc": {
774
- "model_module": "@jupyter-widgets/controls",
775
- "model_module_version": "1.5.0",
776
- "model_name": "ProgressStyleModel",
777
- "state": {
778
- "_model_module": "@jupyter-widgets/controls",
779
- "_model_module_version": "1.5.0",
780
- "_model_name": "ProgressStyleModel",
781
- "_view_count": null,
782
- "_view_module": "@jupyter-widgets/base",
783
- "_view_module_version": "1.2.0",
784
- "_view_name": "StyleView",
785
- "bar_color": null,
786
- "description_width": ""
787
- }
788
- },
789
- "195aa202b03a42a3a674e9da2f13d878": {
790
- "model_module": "@jupyter-widgets/controls",
791
- "model_module_version": "1.5.0",
792
- "model_name": "DescriptionStyleModel",
793
- "state": {
794
- "_model_module": "@jupyter-widgets/controls",
795
- "_model_module_version": "1.5.0",
796
- "_model_name": "DescriptionStyleModel",
797
- "_view_count": null,
798
- "_view_module": "@jupyter-widgets/base",
799
- "_view_module_version": "1.2.0",
800
- "_view_name": "StyleView",
801
- "description_width": ""
802
- }
803
- },
804
- "1d1faa15f5564b68b948eaffa58626b3": {
805
- "model_module": "@jupyter-widgets/controls",
806
- "model_module_version": "1.5.0",
807
- "model_name": "HTMLModel",
808
- "state": {
809
- "_dom_classes": [],
810
- "_model_module": "@jupyter-widgets/controls",
811
- "_model_module_version": "1.5.0",
812
- "_model_name": "HTMLModel",
813
- "_view_count": null,
814
- "_view_module": "@jupyter-widgets/controls",
815
- "_view_module_version": "1.5.0",
816
- "_view_name": "HTMLView",
817
- "description": "",
818
- "description_tooltip": null,
819
- "layout": "IPY_MODEL_f57a9ac0d924408fbaaac795c172862e",
820
- "placeholder": "​",
821
- "style": "IPY_MODEL_4cb8ba074b254e91b8877cc87ae0d279",
822
- "value": "Parsing nodes: 100%"
823
- }
824
- },
825
- "2dc4a6c935ac4ef38ed9030608bd4b2f": {
826
- "model_module": "@jupyter-widgets/controls",
827
- "model_module_version": "1.5.0",
828
- "model_name": "ProgressStyleModel",
829
- "state": {
830
- "_model_module": "@jupyter-widgets/controls",
831
- "_model_module_version": "1.5.0",
832
- "_model_name": "ProgressStyleModel",
833
- "_view_count": null,
834
- "_view_module": "@jupyter-widgets/base",
835
- "_view_module_version": "1.2.0",
836
- "_view_name": "StyleView",
837
- "bar_color": null,
838
- "description_width": ""
839
- }
840
- },
841
- "3657dc19b6ac477b9f05bb6519271473": {
842
- "model_module": "@jupyter-widgets/controls",
843
- "model_module_version": "1.5.0",
844
- "model_name": "HTMLModel",
845
- "state": {
846
- "_dom_classes": [],
847
- "_model_module": "@jupyter-widgets/controls",
848
- "_model_module_version": "1.5.0",
849
- "_model_name": "HTMLModel",
850
- "_view_count": null,
851
- "_view_module": "@jupyter-widgets/controls",
852
- "_view_module_version": "1.5.0",
853
- "_view_name": "HTMLView",
854
- "description": "",
855
- "description_tooltip": null,
856
- "layout": "IPY_MODEL_8d35ab8c65ba47e1be446b98f0942ac4",
857
- "placeholder": "​",
858
- "style": "IPY_MODEL_75e40756175f463e874630f229ef4066",
859
- "value": " 14/14 [00:01&lt;00:00, 10.94it/s]"
860
- }
861
- },
862
- "3a621edd23354ea5924189885c97dee4": {
863
- "model_module": "@jupyter-widgets/controls",
864
- "model_module_version": "1.5.0",
865
- "model_name": "DescriptionStyleModel",
866
- "state": {
867
- "_model_module": "@jupyter-widgets/controls",
868
- "_model_module_version": "1.5.0",
869
- "_model_name": "DescriptionStyleModel",
870
- "_view_count": null,
871
- "_view_module": "@jupyter-widgets/base",
872
- "_view_module_version": "1.2.0",
873
- "_view_name": "StyleView",
874
- "description_width": ""
875
- }
876
- },
877
- "3f55aef52aee4e77864d53e3197c3cc3": {
878
- "model_module": "@jupyter-widgets/base",
879
- "model_module_version": "1.2.0",
880
- "model_name": "LayoutModel",
881
- "state": {
882
- "_model_module": "@jupyter-widgets/base",
883
- "_model_module_version": "1.2.0",
884
- "_model_name": "LayoutModel",
885
- "_view_count": null,
886
- "_view_module": "@jupyter-widgets/base",
887
- "_view_module_version": "1.2.0",
888
- "_view_name": "LayoutView",
889
- "align_content": null,
890
- "align_items": null,
891
- "align_self": null,
892
- "border": null,
893
- "bottom": null,
894
- "display": null,
895
- "flex": null,
896
- "flex_flow": null,
897
- "grid_area": null,
898
- "grid_auto_columns": null,
899
- "grid_auto_flow": null,
900
- "grid_auto_rows": null,
901
- "grid_column": null,
902
- "grid_gap": null,
903
- "grid_row": null,
904
- "grid_template_areas": null,
905
- "grid_template_columns": null,
906
- "grid_template_rows": null,
907
- "height": null,
908
- "justify_content": null,
909
- "justify_items": null,
910
- "left": null,
911
- "margin": null,
912
- "max_height": null,
913
- "max_width": null,
914
- "min_height": null,
915
- "min_width": null,
916
- "object_fit": null,
917
- "object_position": null,
918
- "order": null,
919
- "overflow": null,
920
- "overflow_x": null,
921
- "overflow_y": null,
922
- "padding": null,
923
- "right": null,
924
- "top": null,
925
- "visibility": null,
926
- "width": null
927
- }
928
- },
929
- "4802a63f700e48fca16b5d89fbab333d": {
930
- "model_module": "@jupyter-widgets/controls",
931
- "model_module_version": "1.5.0",
932
- "model_name": "HTMLModel",
933
- "state": {
934
- "_dom_classes": [],
935
- "_model_module": "@jupyter-widgets/controls",
936
- "_model_module_version": "1.5.0",
937
- "_model_name": "HTMLModel",
938
- "_view_count": null,
939
- "_view_module": "@jupyter-widgets/controls",
940
- "_view_module_version": "1.5.0",
941
- "_view_name": "HTMLView",
942
- "description": "",
943
- "description_tooltip": null,
944
- "layout": "IPY_MODEL_4fcebf4a9ef54729889cc6ad4cbe5d10",
945
- "placeholder": "​",
946
- "style": "IPY_MODEL_195aa202b03a42a3a674e9da2f13d878",
947
- "value": " 108/108 [00:07&lt;00:00, 10.36it/s]"
948
- }
949
- },
950
- "4bb1e341a77d41c9aca0e6680911fb43": {
951
- "model_module": "@jupyter-widgets/controls",
952
- "model_module_version": "1.5.0",
953
- "model_name": "HBoxModel",
954
- "state": {
955
- "_dom_classes": [],
956
- "_model_module": "@jupyter-widgets/controls",
957
- "_model_module_version": "1.5.0",
958
- "_model_name": "HBoxModel",
959
- "_view_count": null,
960
- "_view_module": "@jupyter-widgets/controls",
961
- "_view_module_version": "1.5.0",
962
- "_view_name": "HBoxView",
963
- "box_style": "",
964
- "children": [
965
- "IPY_MODEL_1d1faa15f5564b68b948eaffa58626b3",
966
- "IPY_MODEL_df22a67ae80b4673b708eea74646be61",
967
- "IPY_MODEL_3657dc19b6ac477b9f05bb6519271473"
968
- ],
969
- "layout": "IPY_MODEL_9045e402f0344428acc085d63df7ff03"
970
- }
971
- },
972
- "4cb8ba074b254e91b8877cc87ae0d279": {
973
- "model_module": "@jupyter-widgets/controls",
974
- "model_module_version": "1.5.0",
975
- "model_name": "DescriptionStyleModel",
976
- "state": {
977
- "_model_module": "@jupyter-widgets/controls",
978
- "_model_module_version": "1.5.0",
979
- "_model_name": "DescriptionStyleModel",
980
- "_view_count": null,
981
- "_view_module": "@jupyter-widgets/base",
982
- "_view_module_version": "1.2.0",
983
- "_view_name": "StyleView",
984
- "description_width": ""
985
- }
986
- },
987
- "4fcebf4a9ef54729889cc6ad4cbe5d10": {
988
- "model_module": "@jupyter-widgets/base",
989
- "model_module_version": "1.2.0",
990
- "model_name": "LayoutModel",
991
- "state": {
992
- "_model_module": "@jupyter-widgets/base",
993
- "_model_module_version": "1.2.0",
994
- "_model_name": "LayoutModel",
995
- "_view_count": null,
996
- "_view_module": "@jupyter-widgets/base",
997
- "_view_module_version": "1.2.0",
998
- "_view_name": "LayoutView",
999
- "align_content": null,
1000
- "align_items": null,
1001
- "align_self": null,
1002
- "border": null,
1003
- "bottom": null,
1004
- "display": null,
1005
- "flex": null,
1006
- "flex_flow": null,
1007
- "grid_area": null,
1008
- "grid_auto_columns": null,
1009
- "grid_auto_flow": null,
1010
- "grid_auto_rows": null,
1011
- "grid_column": null,
1012
- "grid_gap": null,
1013
- "grid_row": null,
1014
- "grid_template_areas": null,
1015
- "grid_template_columns": null,
1016
- "grid_template_rows": null,
1017
- "height": null,
1018
- "justify_content": null,
1019
- "justify_items": null,
1020
- "left": null,
1021
- "margin": null,
1022
- "max_height": null,
1023
- "max_width": null,
1024
- "min_height": null,
1025
- "min_width": null,
1026
- "object_fit": null,
1027
- "object_position": null,
1028
- "order": null,
1029
- "overflow": null,
1030
- "overflow_x": null,
1031
- "overflow_y": null,
1032
- "padding": null,
1033
- "right": null,
1034
- "top": null,
1035
- "visibility": null,
1036
- "width": null
1037
- }
1038
- },
1039
- "73d34cae940e4748a7b3127351925e65": {
1040
- "model_module": "@jupyter-widgets/base",
1041
- "model_module_version": "1.2.0",
1042
- "model_name": "LayoutModel",
1043
- "state": {
1044
- "_model_module": "@jupyter-widgets/base",
1045
- "_model_module_version": "1.2.0",
1046
- "_model_name": "LayoutModel",
1047
- "_view_count": null,
1048
- "_view_module": "@jupyter-widgets/base",
1049
- "_view_module_version": "1.2.0",
1050
- "_view_name": "LayoutView",
1051
- "align_content": null,
1052
- "align_items": null,
1053
- "align_self": null,
1054
- "border": null,
1055
- "bottom": null,
1056
- "display": null,
1057
- "flex": null,
1058
- "flex_flow": null,
1059
- "grid_area": null,
1060
- "grid_auto_columns": null,
1061
- "grid_auto_flow": null,
1062
- "grid_auto_rows": null,
1063
- "grid_column": null,
1064
- "grid_gap": null,
1065
- "grid_row": null,
1066
- "grid_template_areas": null,
1067
- "grid_template_columns": null,
1068
- "grid_template_rows": null,
1069
- "height": null,
1070
- "justify_content": null,
1071
- "justify_items": null,
1072
- "left": null,
1073
- "margin": null,
1074
- "max_height": null,
1075
- "max_width": null,
1076
- "min_height": null,
1077
- "min_width": null,
1078
- "object_fit": null,
1079
- "object_position": null,
1080
- "order": null,
1081
- "overflow": null,
1082
- "overflow_x": null,
1083
- "overflow_y": null,
1084
- "padding": null,
1085
- "right": null,
1086
- "top": null,
1087
- "visibility": null,
1088
- "width": null
1089
- }
1090
- },
1091
- "75e40756175f463e874630f229ef4066": {
1092
- "model_module": "@jupyter-widgets/controls",
1093
- "model_module_version": "1.5.0",
1094
- "model_name": "DescriptionStyleModel",
1095
- "state": {
1096
- "_model_module": "@jupyter-widgets/controls",
1097
- "_model_module_version": "1.5.0",
1098
- "_model_name": "DescriptionStyleModel",
1099
- "_view_count": null,
1100
- "_view_module": "@jupyter-widgets/base",
1101
- "_view_module_version": "1.2.0",
1102
- "_view_name": "StyleView",
1103
- "description_width": ""
1104
- }
1105
- },
1106
- "8728ca516bd0474586b19e0c9b457499": {
1107
- "model_module": "@jupyter-widgets/controls",
1108
- "model_module_version": "1.5.0",
1109
- "model_name": "HTMLModel",
1110
- "state": {
1111
- "_dom_classes": [],
1112
- "_model_module": "@jupyter-widgets/controls",
1113
- "_model_module_version": "1.5.0",
1114
- "_model_name": "HTMLModel",
1115
- "_view_count": null,
1116
- "_view_module": "@jupyter-widgets/controls",
1117
- "_view_module_version": "1.5.0",
1118
- "_view_name": "HTMLView",
1119
- "description": "",
1120
- "description_tooltip": null,
1121
- "layout": "IPY_MODEL_f41df4b6ab4c4132b0d20232002f0294",
1122
- "placeholder": "​",
1123
- "style": "IPY_MODEL_3a621edd23354ea5924189885c97dee4",
1124
- "value": "Generating embeddings: 100%"
1125
- }
1126
- },
1127
- "8d35ab8c65ba47e1be446b98f0942ac4": {
1128
- "model_module": "@jupyter-widgets/base",
1129
- "model_module_version": "1.2.0",
1130
- "model_name": "LayoutModel",
1131
- "state": {
1132
- "_model_module": "@jupyter-widgets/base",
1133
- "_model_module_version": "1.2.0",
1134
- "_model_name": "LayoutModel",
1135
- "_view_count": null,
1136
- "_view_module": "@jupyter-widgets/base",
1137
- "_view_module_version": "1.2.0",
1138
- "_view_name": "LayoutView",
1139
- "align_content": null,
1140
- "align_items": null,
1141
- "align_self": null,
1142
- "border": null,
1143
- "bottom": null,
1144
- "display": null,
1145
- "flex": null,
1146
- "flex_flow": null,
1147
- "grid_area": null,
1148
- "grid_auto_columns": null,
1149
- "grid_auto_flow": null,
1150
- "grid_auto_rows": null,
1151
- "grid_column": null,
1152
- "grid_gap": null,
1153
- "grid_row": null,
1154
- "grid_template_areas": null,
1155
- "grid_template_columns": null,
1156
- "grid_template_rows": null,
1157
- "height": null,
1158
- "justify_content": null,
1159
- "justify_items": null,
1160
- "left": null,
1161
- "margin": null,
1162
- "max_height": null,
1163
- "max_width": null,
1164
- "min_height": null,
1165
- "min_width": null,
1166
- "object_fit": null,
1167
- "object_position": null,
1168
- "order": null,
1169
- "overflow": null,
1170
- "overflow_x": null,
1171
- "overflow_y": null,
1172
- "padding": null,
1173
- "right": null,
1174
- "top": null,
1175
- "visibility": null,
1176
- "width": null
1177
- }
1178
- },
1179
- "9045e402f0344428acc085d63df7ff03": {
1180
- "model_module": "@jupyter-widgets/base",
1181
- "model_module_version": "1.2.0",
1182
- "model_name": "LayoutModel",
1183
- "state": {
1184
- "_model_module": "@jupyter-widgets/base",
1185
- "_model_module_version": "1.2.0",
1186
- "_model_name": "LayoutModel",
1187
- "_view_count": null,
1188
- "_view_module": "@jupyter-widgets/base",
1189
- "_view_module_version": "1.2.0",
1190
- "_view_name": "LayoutView",
1191
- "align_content": null,
1192
- "align_items": null,
1193
- "align_self": null,
1194
- "border": null,
1195
- "bottom": null,
1196
- "display": null,
1197
- "flex": null,
1198
- "flex_flow": null,
1199
- "grid_area": null,
1200
- "grid_auto_columns": null,
1201
- "grid_auto_flow": null,
1202
- "grid_auto_rows": null,
1203
- "grid_column": null,
1204
- "grid_gap": null,
1205
- "grid_row": null,
1206
- "grid_template_areas": null,
1207
- "grid_template_columns": null,
1208
- "grid_template_rows": null,
1209
- "height": null,
1210
- "justify_content": null,
1211
- "justify_items": null,
1212
- "left": null,
1213
- "margin": null,
1214
- "max_height": null,
1215
- "max_width": null,
1216
- "min_height": null,
1217
- "min_width": null,
1218
- "object_fit": null,
1219
- "object_position": null,
1220
- "order": null,
1221
- "overflow": null,
1222
- "overflow_x": null,
1223
- "overflow_y": null,
1224
- "padding": null,
1225
- "right": null,
1226
- "top": null,
1227
- "visibility": null,
1228
- "width": null
1229
- }
1230
- },
1231
- "a0dd5f2c99b2407f9f5705587976ae76": {
1232
- "model_module": "@jupyter-widgets/controls",
1233
- "model_module_version": "1.5.0",
1234
- "model_name": "HBoxModel",
1235
- "state": {
1236
- "_dom_classes": [],
1237
- "_model_module": "@jupyter-widgets/controls",
1238
- "_model_module_version": "1.5.0",
1239
- "_model_name": "HBoxModel",
1240
- "_view_count": null,
1241
- "_view_module": "@jupyter-widgets/controls",
1242
- "_view_module_version": "1.5.0",
1243
- "_view_name": "HBoxView",
1244
- "box_style": "",
1245
- "children": [
1246
- "IPY_MODEL_8728ca516bd0474586b19e0c9b457499",
1247
- "IPY_MODEL_aac433a9a64c48dfb18d7a01f64d3b27",
1248
- "IPY_MODEL_4802a63f700e48fca16b5d89fbab333d"
1249
- ],
1250
- "layout": "IPY_MODEL_3f55aef52aee4e77864d53e3197c3cc3"
1251
- }
1252
- },
1253
- "aac433a9a64c48dfb18d7a01f64d3b27": {
1254
- "model_module": "@jupyter-widgets/controls",
1255
- "model_module_version": "1.5.0",
1256
- "model_name": "FloatProgressModel",
1257
- "state": {
1258
- "_dom_classes": [],
1259
- "_model_module": "@jupyter-widgets/controls",
1260
- "_model_module_version": "1.5.0",
1261
- "_model_name": "FloatProgressModel",
1262
- "_view_count": null,
1263
- "_view_module": "@jupyter-widgets/controls",
1264
- "_view_module_version": "1.5.0",
1265
- "_view_name": "ProgressView",
1266
- "bar_style": "success",
1267
- "description": "",
1268
- "description_tooltip": null,
1269
- "layout": "IPY_MODEL_73d34cae940e4748a7b3127351925e65",
1270
- "max": 108,
1271
- "min": 0,
1272
- "orientation": "horizontal",
1273
- "style": "IPY_MODEL_2dc4a6c935ac4ef38ed9030608bd4b2f",
1274
- "value": 108
1275
- }
1276
- },
1277
- "cbd3e1411b2c4eeb943243c9d45245c4": {
1278
- "model_module": "@jupyter-widgets/base",
1279
- "model_module_version": "1.2.0",
1280
- "model_name": "LayoutModel",
1281
- "state": {
1282
- "_model_module": "@jupyter-widgets/base",
1283
- "_model_module_version": "1.2.0",
1284
- "_model_name": "LayoutModel",
1285
- "_view_count": null,
1286
- "_view_module": "@jupyter-widgets/base",
1287
- "_view_module_version": "1.2.0",
1288
- "_view_name": "LayoutView",
1289
- "align_content": null,
1290
- "align_items": null,
1291
- "align_self": null,
1292
- "border": null,
1293
- "bottom": null,
1294
- "display": null,
1295
- "flex": null,
1296
- "flex_flow": null,
1297
- "grid_area": null,
1298
- "grid_auto_columns": null,
1299
- "grid_auto_flow": null,
1300
- "grid_auto_rows": null,
1301
- "grid_column": null,
1302
- "grid_gap": null,
1303
- "grid_row": null,
1304
- "grid_template_areas": null,
1305
- "grid_template_columns": null,
1306
- "grid_template_rows": null,
1307
- "height": null,
1308
- "justify_content": null,
1309
- "justify_items": null,
1310
- "left": null,
1311
- "margin": null,
1312
- "max_height": null,
1313
- "max_width": null,
1314
- "min_height": null,
1315
- "min_width": null,
1316
- "object_fit": null,
1317
- "object_position": null,
1318
- "order": null,
1319
- "overflow": null,
1320
- "overflow_x": null,
1321
- "overflow_y": null,
1322
- "padding": null,
1323
- "right": null,
1324
- "top": null,
1325
- "visibility": null,
1326
- "width": null
1327
- }
1328
- },
1329
- "df22a67ae80b4673b708eea74646be61": {
1330
- "model_module": "@jupyter-widgets/controls",
1331
- "model_module_version": "1.5.0",
1332
- "model_name": "FloatProgressModel",
1333
- "state": {
1334
- "_dom_classes": [],
1335
- "_model_module": "@jupyter-widgets/controls",
1336
- "_model_module_version": "1.5.0",
1337
- "_model_name": "FloatProgressModel",
1338
- "_view_count": null,
1339
- "_view_module": "@jupyter-widgets/controls",
1340
- "_view_module_version": "1.5.0",
1341
- "_view_name": "ProgressView",
1342
- "bar_style": "success",
1343
- "description": "",
1344
- "description_tooltip": null,
1345
- "layout": "IPY_MODEL_cbd3e1411b2c4eeb943243c9d45245c4",
1346
- "max": 14,
1347
- "min": 0,
1348
- "orientation": "horizontal",
1349
- "style": "IPY_MODEL_04af736f84044e37aa6599aa708a77bc",
1350
- "value": 14
1351
- }
1352
- },
1353
- "f41df4b6ab4c4132b0d20232002f0294": {
1354
- "model_module": "@jupyter-widgets/base",
1355
- "model_module_version": "1.2.0",
1356
- "model_name": "LayoutModel",
1357
- "state": {
1358
- "_model_module": "@jupyter-widgets/base",
1359
- "_model_module_version": "1.2.0",
1360
- "_model_name": "LayoutModel",
1361
- "_view_count": null,
1362
- "_view_module": "@jupyter-widgets/base",
1363
- "_view_module_version": "1.2.0",
1364
- "_view_name": "LayoutView",
1365
- "align_content": null,
1366
- "align_items": null,
1367
- "align_self": null,
1368
- "border": null,
1369
- "bottom": null,
1370
- "display": null,
1371
- "flex": null,
1372
- "flex_flow": null,
1373
- "grid_area": null,
1374
- "grid_auto_columns": null,
1375
- "grid_auto_flow": null,
1376
- "grid_auto_rows": null,
1377
- "grid_column": null,
1378
- "grid_gap": null,
1379
- "grid_row": null,
1380
- "grid_template_areas": null,
1381
- "grid_template_columns": null,
1382
- "grid_template_rows": null,
1383
- "height": null,
1384
- "justify_content": null,
1385
- "justify_items": null,
1386
- "left": null,
1387
- "margin": null,
1388
- "max_height": null,
1389
- "max_width": null,
1390
- "min_height": null,
1391
- "min_width": null,
1392
- "object_fit": null,
1393
- "object_position": null,
1394
- "order": null,
1395
- "overflow": null,
1396
- "overflow_x": null,
1397
- "overflow_y": null,
1398
- "padding": null,
1399
- "right": null,
1400
- "top": null,
1401
- "visibility": null,
1402
- "width": null
1403
- }
1404
- },
1405
- "f57a9ac0d924408fbaaac795c172862e": {
1406
- "model_module": "@jupyter-widgets/base",
1407
- "model_module_version": "1.2.0",
1408
- "model_name": "LayoutModel",
1409
- "state": {
1410
- "_model_module": "@jupyter-widgets/base",
1411
- "_model_module_version": "1.2.0",
1412
- "_model_name": "LayoutModel",
1413
- "_view_count": null,
1414
- "_view_module": "@jupyter-widgets/base",
1415
- "_view_module_version": "1.2.0",
1416
- "_view_name": "LayoutView",
1417
- "align_content": null,
1418
- "align_items": null,
1419
- "align_self": null,
1420
- "border": null,
1421
- "bottom": null,
1422
- "display": null,
1423
- "flex": null,
1424
- "flex_flow": null,
1425
- "grid_area": null,
1426
- "grid_auto_columns": null,
1427
- "grid_auto_flow": null,
1428
- "grid_auto_rows": null,
1429
- "grid_column": null,
1430
- "grid_gap": null,
1431
- "grid_row": null,
1432
- "grid_template_areas": null,
1433
- "grid_template_columns": null,
1434
- "grid_template_rows": null,
1435
- "height": null,
1436
- "justify_content": null,
1437
- "justify_items": null,
1438
- "left": null,
1439
- "margin": null,
1440
- "max_height": null,
1441
- "max_width": null,
1442
- "min_height": null,
1443
- "min_width": null,
1444
- "object_fit": null,
1445
- "object_position": null,
1446
- "order": null,
1447
- "overflow": null,
1448
- "overflow_x": null,
1449
- "overflow_y": null,
1450
- "padding": null,
1451
- "right": null,
1452
- "top": null,
1453
- "visibility": null,
1454
- "width": null
1455
- }
1456
- }
1457
- }
1458
- }
1459
- },
1460
- "nbformat": 4,
1461
- "nbformat_minor": 0
1462
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/11-Adding_Hybrid_Search.ipynb DELETED
@@ -1,1645 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "colab_type": "text",
7
- "id": "view-in-github"
8
- },
9
- "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/11-Adding_Hybrid_Search.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
- ]
12
- },
13
- {
14
- "cell_type": "markdown",
15
- "metadata": {
16
- "id": "-zE1h0uQV7uT"
17
- },
18
- "source": [
19
- "# Install Packages and Setup Variables"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": 1,
25
- "metadata": {
26
- "colab": {
27
- "base_uri": "https://localhost:8080/"
28
- },
29
- "id": "QPJzr-I9XQ7l",
30
- "outputId": "3115889a-14ee-457c-c0d5-271c1053a1e9"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-readers-web tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": 1,
40
- "metadata": {
41
- "id": "riuXwpSPcvWC"
42
- },
43
- "outputs": [],
44
- "source": [
45
- "import os\n",
46
- "\n",
47
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
48
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
49
- ]
50
- },
51
- {
52
- "cell_type": "code",
53
- "execution_count": 2,
54
- "metadata": {
55
- "id": "jIEeZzqLbz0J"
56
- },
57
- "outputs": [],
58
- "source": [
59
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
60
- "\n",
61
- "import nest_asyncio\n",
62
- "\n",
63
- "nest_asyncio.apply()"
64
- ]
65
- },
66
- {
67
- "cell_type": "markdown",
68
- "metadata": {
69
- "id": "Bkgi2OrYzF7q"
70
- },
71
- "source": [
72
- "# Load a Model"
73
- ]
74
- },
75
- {
76
- "cell_type": "code",
77
- "execution_count": 3,
78
- "metadata": {
79
- "id": "9oGT6crooSSj"
80
- },
81
- "outputs": [
82
- {
83
- "name": "stderr",
84
- "output_type": "stream",
85
- "text": [
86
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
87
- " from .autonotebook import tqdm as notebook_tqdm\n"
88
- ]
89
- }
90
- ],
91
- "source": [
92
- "from llama_index.llms.openai import OpenAI\n",
93
- "\n",
94
- "llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
95
- ]
96
- },
97
- {
98
- "cell_type": "markdown",
99
- "metadata": {
100
- "id": "0BwVuJXlzHVL"
101
- },
102
- "source": [
103
- "# Create a VectoreStore"
104
- ]
105
- },
106
- {
107
- "cell_type": "code",
108
- "execution_count": 4,
109
- "metadata": {
110
- "id": "SQP87lHczHKc"
111
- },
112
- "outputs": [],
113
- "source": [
114
- "import chromadb\n",
115
- "\n",
116
- "# create client and a new collection\n",
117
- "# chromadb.EphemeralClient saves data in-memory.\n",
118
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
119
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
120
- ]
121
- },
122
- {
123
- "cell_type": "code",
124
- "execution_count": 6,
125
- "metadata": {
126
- "id": "zAaGcYMJzHAN"
127
- },
128
- "outputs": [],
129
- "source": [
130
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
131
- "\n",
132
- "# Define a storage context object using the created vector database.\n",
133
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
134
- ]
135
- },
136
- {
137
- "cell_type": "markdown",
138
- "metadata": {
139
- "id": "I9JbAzFcjkpn"
140
- },
141
- "source": [
142
- "# Load the Dataset (CSV)"
143
- ]
144
- },
145
- {
146
- "cell_type": "markdown",
147
- "metadata": {
148
- "id": "ceveDuYdWCYk"
149
- },
150
- "source": [
151
- "## Download"
152
- ]
153
- },
154
- {
155
- "cell_type": "markdown",
156
- "metadata": {
157
- "id": "eZwf6pv7WFmD"
158
- },
159
- "source": [
160
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
161
- ]
162
- },
163
- {
164
- "cell_type": "code",
165
- "execution_count": 7,
166
- "metadata": {
167
- "colab": {
168
- "base_uri": "https://localhost:8080/"
169
- },
170
- "id": "wl_pbPvMlv1h",
171
- "outputId": "24342259-24f0-44fa-bd0d-21da798d0555"
172
- },
173
- "outputs": [
174
- {
175
- "name": "stdout",
176
- "output_type": "stream",
177
- "text": [
178
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
179
- " Dload Upload Total Spent Left Speed\n",
180
- "100 169k 100 169k 0 0 864k 0 --:--:-- --:--:-- --:--:-- 865k\n"
181
- ]
182
- }
183
- ],
184
- "source": [
185
- "!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
186
- ]
187
- },
188
- {
189
- "cell_type": "markdown",
190
- "metadata": {
191
- "id": "VWBLtDbUWJfA"
192
- },
193
- "source": [
194
- "## Read File"
195
- ]
196
- },
197
- {
198
- "cell_type": "code",
199
- "execution_count": 8,
200
- "metadata": {
201
- "colab": {
202
- "base_uri": "https://localhost:8080/"
203
- },
204
- "id": "0Q9sxuW0g3Gd",
205
- "outputId": "889c1127-cf04-4ce7-d99c-d60826ffe92f"
206
- },
207
- "outputs": [
208
- {
209
- "data": {
210
- "text/plain": [
211
- "14"
212
- ]
213
- },
214
- "execution_count": 8,
215
- "metadata": {},
216
- "output_type": "execute_result"
217
- }
218
- ],
219
- "source": [
220
- "import csv\n",
221
- "\n",
222
- "rows = []\n",
223
- "\n",
224
- "# Load the file as a JSON\n",
225
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
226
- " csv_reader = csv.reader(file)\n",
227
- "\n",
228
- " for idx, row in enumerate( csv_reader ):\n",
229
- " if idx == 0: continue; # Skip header row\n",
230
- " rows.append( row )\n",
231
- "\n",
232
- "# The number of characters in the dataset.\n",
233
- "len( rows )"
234
- ]
235
- },
236
- {
237
- "cell_type": "markdown",
238
- "metadata": {
239
- "id": "S17g2RYOjmf2"
240
- },
241
- "source": [
242
- "# Convert to Document obj"
243
- ]
244
- },
245
- {
246
- "cell_type": "code",
247
- "execution_count": 9,
248
- "metadata": {
249
- "id": "YizvmXPejkJE"
250
- },
251
- "outputs": [],
252
- "source": [
253
- "from llama_index.core import Document\n",
254
- "\n",
255
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
256
- "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
257
- ]
258
- },
259
- {
260
- "cell_type": "markdown",
261
- "metadata": {
262
- "id": "qjuLbmFuWsyl"
263
- },
264
- "source": [
265
- "# Transforming"
266
- ]
267
- },
268
- {
269
- "cell_type": "code",
270
- "execution_count": 10,
271
- "metadata": {
272
- "id": "9z3t70DGWsjO"
273
- },
274
- "outputs": [],
275
- "source": [
276
- "from llama_index.core.text_splitter import TokenTextSplitter\n",
277
- "\n",
278
- "# Define the splitter object that split the text into segments with 512 tokens,\n",
279
- "# with a 128 overlap between the segments.\n",
280
- "text_splitter = TokenTextSplitter(\n",
281
- " separator=\" \", chunk_size=512, chunk_overlap=128\n",
282
- ")"
283
- ]
284
- },
285
- {
286
- "cell_type": "code",
287
- "execution_count": 12,
288
- "metadata": {
289
- "colab": {
290
- "base_uri": "https://localhost:8080/",
291
- "height": 331,
292
- "referenced_widgets": [
293
- "3fbabd8a8660461ba5e7bc08ef39139a",
294
- "df2365556ae242a2ab1a119f9a31a561",
295
- "5f4b9d32df8f446e858e4c289dc282f9",
296
- "5b588f83a15d42d9aca888e06bbd95ff",
297
- "ad073bca655540809e39f26538d2ec0d",
298
- "13b9c5395bca4c3ba21265240cb936cf",
299
- "47a4586384274577a726c57605e7f8d9",
300
- "96a3bdece738481db57e811ccb74a974",
301
- "5c7973afd79349ed997a69120d0629b2",
302
- "af9b6ae927dd4764b9692507791bc67e",
303
- "134210510d49476e959dd7d032bbdbdc",
304
- "5f9bb065c2b74d2e8ded32e1306a7807",
305
- "73a06bc546a64f7f99a9e4a135319dcd",
306
- "ce48deaf4d8c49cdae92bfdbb3a78df0",
307
- "4a172e8c6aa44e41a42fc1d9cf714fd0",
308
- "0245f2604e4d49c8bd0210302746c47b",
309
- "e956dfab55084a9cbe33c8e331b511e7",
310
- "cb394578badd43a89850873ad2526542",
311
- "193aef33d9184055bb9223f56d456de6",
312
- "abfc9aa911ce4a5ea81c7c451f08295f",
313
- "e7937a1bc68441a080374911a6563376",
314
- "e532ed7bfef34f67b5fcacd9534eb789"
315
- ]
316
- },
317
- "id": "P9LDJ7o-Wsc-",
318
- "outputId": "01070c1f-dffa-4ab7-ad71-b07b76b12e03"
319
- },
320
- "outputs": [
321
- {
322
- "name": "stderr",
323
- "output_type": "stream",
324
- "text": [
325
- "Parsing nodes: 0%| | 0/14 [00:00<?, ?it/s]"
326
- ]
327
- },
328
- {
329
- "name": "stderr",
330
- "output_type": "stream",
331
- "text": [
332
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 27.40it/s]\n",
333
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:59<00:00, 1.81it/s]\n",
334
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:08<00:00, 1.58it/s]\n",
335
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:27<00:00, 3.88it/s]\n",
336
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:01<00:00, 77.68it/s]\n"
337
- ]
338
- }
339
- ],
340
- "source": [
341
- "from llama_index.core.extractors import (\n",
342
- " SummaryExtractor,\n",
343
- " QuestionsAnsweredExtractor,\n",
344
- " KeywordExtractor,\n",
345
- ")\n",
346
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
347
- "from llama_index.core.ingestion import IngestionPipeline\n",
348
- "\n",
349
- "# Create the pipeline to apply the transformation on each chunk,\n",
350
- "# and store the transformed text in the chroma vector store.\n",
351
- "pipeline = IngestionPipeline(\n",
352
- " transformations=[\n",
353
- " text_splitter,\n",
354
- " QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
355
- " SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
356
- " KeywordExtractor(keywords=10, llm=llm),\n",
357
- " OpenAIEmbedding(),\n",
358
- " ],\n",
359
- " vector_store=vector_store\n",
360
- ")\n",
361
- "\n",
362
- "# Run the transformation pipeline.\n",
363
- "nodes = pipeline.run(documents=documents, show_progress=True);"
364
- ]
365
- },
366
- {
367
- "cell_type": "code",
368
- "execution_count": 13,
369
- "metadata": {
370
- "colab": {
371
- "base_uri": "https://localhost:8080/"
372
- },
373
- "id": "mPGa85hM2P3P",
374
- "outputId": "c106c463-2459-4b11-bbae-5bd5e2246011"
375
- },
376
- "outputs": [
377
- {
378
- "data": {
379
- "text/plain": [
380
- "108"
381
- ]
382
- },
383
- "execution_count": 13,
384
- "metadata": {},
385
- "output_type": "execute_result"
386
- }
387
- ],
388
- "source": [
389
- "len( nodes )"
390
- ]
391
- },
392
- {
393
- "cell_type": "code",
394
- "execution_count": 14,
395
- "metadata": {
396
- "id": "23x20bL3_jRb"
397
- },
398
- "outputs": [
399
- {
400
- "name": "stdout",
401
- "output_type": "stream",
402
- "text": [
403
- "updating: mini-llama-articles/ (stored 0%)\n",
404
- "updating: mini-llama-articles/chroma.sqlite3 (deflated 65%)\n",
405
- " adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/ (stored 0%)\n",
406
- " adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/data_level0.bin (deflated 97%)\n",
407
- " adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/length.bin (deflated 23%)\n",
408
- " adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/link_lists.bin (stored 0%)\n",
409
- " adding: mini-llama-articles/6059cb71-7dfb-4096-aaab-f06eaf1d0ace/header.bin (deflated 61%)\n"
410
- ]
411
- }
412
- ],
413
- "source": [
414
- "# Compress the vector store directory to a zip file to be able to download and use later.\n",
415
- "!zip -r vectorstore.zip mini-llama-articles"
416
- ]
417
- },
418
- {
419
- "cell_type": "markdown",
420
- "metadata": {
421
- "id": "OWaT6rL7ksp8"
422
- },
423
- "source": [
424
- "# Load Indexes"
425
- ]
426
- },
427
- {
428
- "cell_type": "markdown",
429
- "metadata": {
430
- "id": "d7mY7AdLjs4F"
431
- },
432
- "source": [
433
- "If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
434
- ]
435
- },
436
- {
437
- "cell_type": "code",
438
- "execution_count": 15,
439
- "metadata": {
440
- "colab": {
441
- "base_uri": "https://localhost:8080/"
442
- },
443
- "id": "SodY2Xpf_kxg",
444
- "outputId": "701258b4-ea35-46d1-df33-536a45752a28"
445
- },
446
- "outputs": [],
447
- "source": [
448
- "# !unzip vectorstore.zip"
449
- ]
450
- },
451
- {
452
- "cell_type": "code",
453
- "execution_count": 16,
454
- "metadata": {
455
- "id": "mXi56KTXk2sp"
456
- },
457
- "outputs": [],
458
- "source": [
459
- "import chromadb\n",
460
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
461
- "\n",
462
- "# Load the vector store from the local storage.\n",
463
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
464
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
465
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
466
- ]
467
- },
468
- {
469
- "cell_type": "code",
470
- "execution_count": 17,
471
- "metadata": {
472
- "id": "jKXURvLtkuTS"
473
- },
474
- "outputs": [],
475
- "source": [
476
- "from llama_index.core import VectorStoreIndex\n",
477
- "\n",
478
- "# Create the index based on the vector store.\n",
479
- "vector_index = VectorStoreIndex.from_vector_store(vector_store)"
480
- ]
481
- },
482
- {
483
- "cell_type": "markdown",
484
- "metadata": {
485
- "id": "XjIQGo11j5N-"
486
- },
487
- "source": [
488
- "# Retrieving All the Nodes"
489
- ]
490
- },
491
- {
492
- "cell_type": "markdown",
493
- "metadata": {
494
- "id": "RZBPFntrj8tp"
495
- },
496
- "source": [
497
- "To develop a custom retriever with keyword index, we require access to all nodes. We use the index as a retriever and requesting it to fetch a large number of documents, we can ensure that the retriever returns every document stored in the vector store. (This method serves as a temporary solution because LlamaIndex currently lacks the capability to fetch all documents from a chromadb. However, this limitation may be addressed in future updates.)"
498
- ]
499
- },
500
- {
501
- "cell_type": "code",
502
- "execution_count": 18,
503
- "metadata": {
504
- "colab": {
505
- "base_uri": "https://localhost:8080/"
506
- },
507
- "id": "Za6m06wpcJpN",
508
- "outputId": "98806ea5-5c2d-4a87-97ea-ee37a890c7bf"
509
- },
510
- "outputs": [
511
- {
512
- "name": "stderr",
513
- "output_type": "stream",
514
- "text": [
515
- "Number of requested results 100000000 is greater than number of elements in index 108, updating n_results = 108\n"
516
- ]
517
- }
518
- ],
519
- "source": [
520
- "# Set similarity_top_k to a large number to retrieve all the nodes\n",
521
- "retriever = vector_index.as_retriever(similarity_top_k=100000000)\n",
522
- "\n",
523
- "# Retrieve all nodes\n",
524
- "all_nodes = retriever.retrieve('Hello!')"
525
- ]
526
- },
527
- {
528
- "cell_type": "code",
529
- "execution_count": 19,
530
- "metadata": {
531
- "id": "2Tz_n2MLj62B"
532
- },
533
- "outputs": [],
534
- "source": [
535
- "all_nodes = [item.node for item in all_nodes]"
536
- ]
537
- },
538
- {
539
- "cell_type": "code",
540
- "execution_count": 20,
541
- "metadata": {
542
- "colab": {
543
- "base_uri": "https://localhost:8080/"
544
- },
545
- "id": "mquOgF8UnXZi",
546
- "outputId": "cd41e132-237e-4e4f-bb35-464dba9307ba"
547
- },
548
- "outputs": [
549
- {
550
- "data": {
551
- "text/plain": [
552
- "108"
553
- ]
554
- },
555
- "execution_count": 20,
556
- "metadata": {},
557
- "output_type": "execute_result"
558
- }
559
- ],
560
- "source": [
561
- "len( all_nodes )"
562
- ]
563
- },
564
- {
565
- "cell_type": "code",
566
- "execution_count": 21,
567
- "metadata": {
568
- "id": "hcmwBAsCZIwR"
569
- },
570
- "outputs": [],
571
- "source": [
572
- "from llama_index.core import SimpleKeywordTableIndex\n",
573
- "\n",
574
- "# Define the KeyworddTableIndex using all the nodes.\n",
575
- "keyword_index = SimpleKeywordTableIndex(nodes=all_nodes)"
576
- ]
577
- },
578
- {
579
- "cell_type": "markdown",
580
- "metadata": {
581
- "id": "K3wtAa7Lo2Vh"
582
- },
583
- "source": [
584
- "# Custom Retriever"
585
- ]
586
- },
587
- {
588
- "cell_type": "code",
589
- "execution_count": 22,
590
- "metadata": {
591
- "id": "txPFNOkUo2Kj"
592
- },
593
- "outputs": [],
594
- "source": [
595
- "from llama_index.core import QueryBundle\n",
596
- "from llama_index.core.schema import NodeWithScore\n",
597
- "from llama_index.core.retrievers import (\n",
598
- " BaseRetriever,\n",
599
- " VectorIndexRetriever,\n",
600
- " KeywordTableSimpleRetriever,\n",
601
- ")\n",
602
- "from typing import List\n",
603
- "\n",
604
- "# The custom retriever that can use both vector index and keyword index to retrieve documents.\n",
605
- "# It has two modes: \"AND\" meaning it uses nodes that are retrieved in both indexes.\n",
606
- "# \"OR\" meaning that it merges the retrieved nodes.\n",
607
- "class CustomRetriever(BaseRetriever):\n",
608
- " \"\"\"Custom retriever that performs both semantic search and hybrid search.\"\"\"\n",
609
- "\n",
610
- " def __init__(\n",
611
- " self,\n",
612
- " vector_retriever: VectorIndexRetriever,\n",
613
- " keyword_retriever: KeywordTableSimpleRetriever,\n",
614
- " mode: str = \"AND\",\n",
615
- " ) -> None:\n",
616
- " \"\"\"Init params.\"\"\"\n",
617
- "\n",
618
- " self._vector_retriever = vector_retriever\n",
619
- " self._keyword_retriever = keyword_retriever\n",
620
- " if mode not in (\"AND\", \"OR\"):\n",
621
- " raise ValueError(\"Invalid mode.\")\n",
622
- " self._mode = mode\n",
623
- " super().__init__()\n",
624
- "\n",
625
- " def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:\n",
626
- " \"\"\"Retrieve nodes given query.\"\"\"\n",
627
- "\n",
628
- " vector_nodes = self._vector_retriever.retrieve(query_bundle)\n",
629
- " keyword_nodes = self._keyword_retriever.retrieve(query_bundle)\n",
630
- "\n",
631
- " vector_ids = {n.node.node_id for n in vector_nodes}\n",
632
- " keyword_ids = {n.node.node_id for n in keyword_nodes}\n",
633
- "\n",
634
- " combined_dict = {n.node.node_id: n for n in vector_nodes}\n",
635
- " combined_dict.update({n.node.node_id: n for n in keyword_nodes})\n",
636
- "\n",
637
- " if self._mode == \"AND\":\n",
638
- " retrieve_ids = vector_ids.intersection(keyword_ids)\n",
639
- " else:\n",
640
- " retrieve_ids = vector_ids.union(keyword_ids)\n",
641
- "\n",
642
- " retrieve_nodes = [combined_dict[rid] for rid in retrieve_ids]\n",
643
- "\n",
644
- " return retrieve_nodes"
645
- ]
646
- },
647
- {
648
- "cell_type": "code",
649
- "execution_count": 23,
650
- "metadata": {
651
- "id": "YWLckX40pii-"
652
- },
653
- "outputs": [],
654
- "source": [
655
- "from llama_index.core import get_response_synthesizer\n",
656
- "from llama_index.core.query_engine import RetrieverQueryEngine\n",
657
- "\n",
658
- "# define custom retriever\n",
659
- "vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=2)\n",
660
- "keyword_retriever = KeywordTableSimpleRetriever(index=keyword_index, max_keywords_per_query=2)\n",
661
- "custom_retriever = CustomRetriever(vector_retriever, keyword_retriever, \"OR\")\n",
662
- "\n",
663
- "# define response synthesizer\n",
664
- "response_synthesizer = get_response_synthesizer()"
665
- ]
666
- },
667
- {
668
- "cell_type": "markdown",
669
- "metadata": {
670
- "id": "8JPD8yAinVSq"
671
- },
672
- "source": [
673
- "# Query Dataset"
674
- ]
675
- },
676
- {
677
- "cell_type": "code",
678
- "execution_count": 24,
679
- "metadata": {
680
- "id": "b0gue7cyctt1"
681
- },
682
- "outputs": [],
683
- "source": [
684
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
685
- "# and using a LLM to formulate the final answer.\n",
686
- "custom_query_engine = RetrieverQueryEngine(\n",
687
- " retriever=custom_retriever,\n",
688
- " response_synthesizer=response_synthesizer,\n",
689
- ")\n",
690
- "\n",
691
- "res = custom_query_engine.query(\"How many parameters LLaMA2 model has?\")"
692
- ]
693
- },
694
- {
695
- "cell_type": "code",
696
- "execution_count": 25,
697
- "metadata": {
698
- "colab": {
699
- "base_uri": "https://localhost:8080/",
700
- "height": 35
701
- },
702
- "id": "VKK3jMprctre",
703
- "outputId": "370a6a1a-133d-428f-80c7-28777f4349b3"
704
- },
705
- "outputs": [
706
- {
707
- "data": {
708
- "text/plain": [
709
- "'The LLaMA2 model has 52 billion parameters.'"
710
- ]
711
- },
712
- "execution_count": 25,
713
- "metadata": {},
714
- "output_type": "execute_result"
715
- }
716
- ],
717
- "source": [
718
- "res.response"
719
- ]
720
- },
721
- {
722
- "cell_type": "code",
723
- "execution_count": 26,
724
- "metadata": {
725
- "colab": {
726
- "base_uri": "https://localhost:8080/"
727
- },
728
- "id": "465dH4yQc7Ct",
729
- "outputId": "8f43f543-40b1-4f63-a433-d59b33545774"
730
- },
731
- "outputs": [
732
- {
733
- "name": "stdout",
734
- "output_type": "stream",
735
- "text": [
736
- "Node ID\t 322a5cb0-5b0c-413f-bc5e-e72747b385d1\n",
737
- "Title\t Building Intuition on the Concepts behind LLMs like ChatGPT - Part 1- Neural Networks, Transformers, Pretraining, and Fine Tuning\n",
738
- "Text\t backpropagation, the degree of the error of the model (the loss value) is propagated backward through the neural network. It computes the derivative to the output of each individual weight and bias i.e. how sensitive the output is to changes in each specific parameter. For my people who didn't take on differential calculus in school (such as myself), think of the model parameters (weights/biases) as adjustable knobs. These knobs are arbitrary - in the sense that you can't tell in what specific way it governs the prediction ability of the model. The knobs, which can be rotated clockwise or counterclockwise have different effects on the behavior of the output. Knob A might increase the loss 3x when turned clockwise, knob B reduces the loss by 1/8 when turned counterclockwise (and so on). All these knobs are checked (all billions of them) and to get information on how sensitive the output is to adjustments of each knob - this numerical value is their derivative with respect to the output. Calculating these derivatives is called backpropagation. The output of backpropagation is a vector (a list of numbers) whose elements or dimensions consist of the parameters' individual derivatives. This vector is the gradient of the error with respect to the existing parameter values (or the current learnings) of the neural network. A vector has two properties: length or magnitude and direction. The gradient vector contains information on the direction in which the error or loss is increasing. The magnitude of the vector signifies the steepness or rate of increase. Think of the gradient vector as the map of a foggy hill you're descending from - gradient descent optimization is using the information about direction and steepness from the gradient vector to reach the bottom of the hill (the minimum loss value) as efficiently as possible by navigating to the path with the greatest downward incline (the opposite direction of the gradient vector). This involves iteratively adjusting the values of the weights and biases of the network (by subtracting small values to it i.e. the learning rate) en masse to reach this optimal state. After these steps, the hope\n",
739
- "Score\t None\n",
740
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
741
- "Node ID\t f097d19f-45bd-402b-9547-5482f57110ea\n",
742
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
743
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
744
- "Score\t 0.7156515131319103\n",
745
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
746
- "Node ID\t 22cea8a0-aea7-4405-b7e1-a2cb02ff10e8\n",
747
- "Title\t The Generative AI Revolution: Exploring the Current Landscape\n",
748
- "Text\t Cloud announced its partnership with Cohere. The company intends to use Cloud's TPU for the development and deployment of its products, and Sagemaker by Amazon also gives access to Cohere's language AI. Cohere powers Hyperwrite, which helps in quickly generating articles. AWS has also announced a partnership with Cohere AI. To date, Cohere has raised $170 million, and with the ongoing rush of funding in AI platforms, the Canadian startup is expected to be valued at $6 billion. Cohere is set to introduce a new dialogue model to aid enterprise users in generating text while engaging with the model to fine-tune the output. Cohere's Xlarge model resembles ChatGPT but provides developers and businesses with access to this technology. Cohere's base model has 52 billion parameters compared to OpenAI's GPT-3 DaVinci model, which has 175B parameters. Cohere stresses on accuracy, speed, safety, cost, and ease of use for its users and has paid much attention to the product and its design, developing a cohesive model. 8. Anthropic AI's Claude Anthropic is an American AI startup and public benefit corporation founded in 2021 by Daniela Amodei and Dario Amodei, former members of OpenAI. The company specializes in developing AI systems and language models, with a particular focus on transformer architecture. Anthropic's research on the interpretability of machine learning systems covers fields ranging from natural language and interpretability to human feedback, scaling laws, reinforcement learning, and code generation, among others. The company stresses the application of responsible AI and presents itself as an AI safety and research company working towards building reliable, steerable, and interpretable AI systems. By 2022, Google had invested nearly $400 million in Anthropic, resulting in a formal partnership between the two companies and giving Google a 10% stake in Anthropic. Outside backing amounted to $580 million, with total investments in Anthropic exceeding $1 billion to date. Anthropic has developed a conversational large language model AI chatbot named Claude, which uses a messaging interface and a technique called constitutional AI to better align AI systems with human intentions. AnthropicLM v4-s3 is a\n",
749
- "Score\t None\n",
750
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
751
- "Node ID\t 603fb039-960c-4c3e-a98a-a65c57ab6761\n",
752
- "Title\t Building Intuition on the Concepts behind LLMs like ChatGPT - Part 1- Neural Networks, Transformers, Pretraining, and Fine Tuning\n",
753
- "Text\t published by OpenAI, to train better models, increasing the number of parameters is 3x more important than increasing the size of the training data. (Note: DeepMind has since published a paper with a differing view.) This translates to a significant increase in computational requirements, as handling a larger number of parameters demands more complex calculations. Parallelization, which is the process of dividing a single task into multiple sub-tasks that can be processed simultaneously across multiple compute resources, becomes essential in dealing with this problem. Parallelization is difficult to achieve with RNNs given their sequential nature. This is not an issue for transformers as it computes relationships between all elements in a sequence simultaneously, rather than sequentially. It also means that they work well with GPUs or video cards. Graphics rendering requires a large number of simple calculations happening concurrently. The numerous, small, and efficient processing cores that a GPU has, which are designed for simultaneous operations, make it a good fit for tasks such as matrix and vector operations that are central to deep learning. AI going 'mainstream' and the mad scramble to build larger and better models is a boon to GPU manufacturers. NVIDIA- specifically - whose stock price has grown 200% YTD as of this writing, has made them the highest-performing stock this year and pushed their market cap to USD 1 trillion. They join megacaps like Apple, Google, Microsoft, and Amazon in this exclusive club. The Transformer is a decidedly complex topic and the explanation above wholesale left out important concepts in order to be more digestible to a broader audience. If you want to know more, I found these gentle yet significantly more fleshed-out introductions to the topic: Jay Allamar's illustrated transformer, Lili Jiang's potion analogy, or if you want something more advanced - Karpathy's nanoGPT that babbles in Shakepear-ish. Fine-tuning 'chat' models like ChatGPT The output of pretrainings are base models or foundation models. Examples of recently released text-generation foundation models are GPT-4, Bard, LLaMa 1 & 2, and Claude 1\n",
754
- "Score\t None\n",
755
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
756
- "Node ID\t 56881e5c-1c47-48bd-be19-df7ada6ab593\n",
757
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
758
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
759
- "Score\t 0.7009231750702649\n",
760
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
761
- "Node ID\t 4aada7f3-39f9-4911-ae2a-fb57876ee4a4\n",
762
- "Title\t Exploring Large Language Models -Part 3\n",
763
- "Text\t concept with toy datasets. The real trouble is making the model 'understand' the data first and not just parrot it out. Without understanding, it will parrot out the answer based on the similarity of the question in the training set, or both the question and answer. To prevent this, the authors have an intermediate step called 'Recite' where the model is made to recite/output the relevant passages and, after that, output the answer. Just to be clear, there is no doubt now (2023), especially with GPT3/4, LLAMA2 and similar models about the feasibility of this use case, that a model can understand the question, has some ability for causal reasoning, and can generalize to learn a world model from its training data, and to use both to create a well-formed answer to the question. Let's see the difficulties one by one however, of training a large model. First is the importance of the model size. This GIF from the Google AI blog illustrates this beautifully. It is relatively easy and cost-efficient to train or fine-tune a small model with our custom data, as the GPU and infrastructure requirements are very less. On the contrary, it needs huge fleets of GPUs and training infrastructure to load very large language models and fine-tune them (without quantisation) in a distributed way (e.g. see libraries like DeepSpeed) LLMs come in various sizes, based on the number of trainable parameters or weights. The smaller ones, which have less than 1 billion parameters (GPT2 124 M, Bloom 560M, Flan-T5 783 M ) etc can be trained on a laptop GPU with 8 to 15 GB GPU RAM ) For quite some time, this is what I tried. I tried to overfit a small test data set on decoder models like GPP2-small, GPT-Medium, and Bloom and encoder-decoder models like Flan-T5, thinking somehow that the understanding we see in ChatGPT ( see- unsupervised learning Part 1) may come in some form if we train on these smaller models. ( less than one billion parameters). As per the paper, I tried both Causal training, where the model is presented with only previous tokens, and Masked\n",
764
- "Score\t None\n",
765
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
766
- ]
767
- }
768
- ],
769
- "source": [
770
- "# Show the retrieved nodes\n",
771
- "for src in res.source_nodes:\n",
772
- " print(\"Node ID\\t\", src.node_id)\n",
773
- " print(\"Title\\t\", src.metadata['title'])\n",
774
- " print(\"Text\\t\", src.text)\n",
775
- " print(\"Score\\t\", src.score)\n",
776
- " print(\"-_\"*20)"
777
- ]
778
- },
779
- {
780
- "cell_type": "markdown",
781
- "metadata": {
782
- "id": "iMkpzH7vvb09"
783
- },
784
- "source": [
785
- "# Evaluate"
786
- ]
787
- },
788
- {
789
- "cell_type": "code",
790
- "execution_count": 27,
791
- "metadata": {
792
- "id": "H8a3eKgKvckU"
793
- },
794
- "outputs": [
795
- {
796
- "name": "stderr",
797
- "output_type": "stream",
798
- "text": [
799
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [06:17<00:00, 3.49s/it]\n"
800
- ]
801
- }
802
- ],
803
- "source": [
804
- "from llama_index.core.evaluation import generate_question_context_pairs\n",
805
- "from llama_index.llms.openai import OpenAI\n",
806
- "\n",
807
- "# Create questions for each segment. These questions will be used to\n",
808
- "# assess whether the retriever can accurately identify and return the\n",
809
- "# corresponding segment when queried.\n",
810
- "llm = OpenAI(model=\"gpt-3.5-turbo-0125\")\n",
811
- "rag_eval_dataset = generate_question_context_pairs(\n",
812
- " nodes,\n",
813
- " llm=llm,\n",
814
- " num_questions_per_chunk=1\n",
815
- ")\n",
816
- "\n",
817
- "# We can save the evaluation dataset as a json file for later use.\n",
818
- "rag_eval_dataset.save_json(\"./rag_eval_dataset.json\")"
819
- ]
820
- },
821
- {
822
- "cell_type": "markdown",
823
- "metadata": {
824
- "id": "0O7cLF_TlnZV"
825
- },
826
- "source": [
827
- "If you have uploaded the generated question JSON file, please uncomment the code in the next cell block. This will avoid the need to generate the questions manually, saving you time and effort."
828
- ]
829
- },
830
- {
831
- "cell_type": "code",
832
- "execution_count": null,
833
- "metadata": {
834
- "id": "3sA1K84U254o"
835
- },
836
- "outputs": [],
837
- "source": [
838
- "# from llama_index.finetuning.embeddings.common import (\n",
839
- "# EmbeddingQAFinetuneDataset,\n",
840
- "# )\n",
841
- "# rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
842
- "# \"./rag_eval_dataset.json\"\n",
843
- "# )"
844
- ]
845
- },
846
- {
847
- "cell_type": "code",
848
- "execution_count": 28,
849
- "metadata": {
850
- "id": "H7ubvcbk27vr"
851
- },
852
- "outputs": [],
853
- "source": [
854
- "import pandas as pd\n",
855
- "\n",
856
- "# A simple function to show the evaluation result.\n",
857
- "def display_results_retriever(name, eval_results):\n",
858
- " \"\"\"Display results from evaluate.\"\"\"\n",
859
- "\n",
860
- " metric_dicts = []\n",
861
- " for eval_result in eval_results:\n",
862
- " metric_dict = eval_result.metric_vals_dict\n",
863
- " metric_dicts.append(metric_dict)\n",
864
- "\n",
865
- " full_df = pd.DataFrame(metric_dicts)\n",
866
- "\n",
867
- " hit_rate = full_df[\"hit_rate\"].mean()\n",
868
- " mrr = full_df[\"mrr\"].mean()\n",
869
- "\n",
870
- " metric_df = pd.DataFrame(\n",
871
- " {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
872
- " )\n",
873
- "\n",
874
- " return metric_df"
875
- ]
876
- },
877
- {
878
- "cell_type": "code",
879
- "execution_count": 29,
880
- "metadata": {
881
- "colab": {
882
- "base_uri": "https://localhost:8080/",
883
- "height": 435
884
- },
885
- "id": "uNLxDxoc2-Ac",
886
- "outputId": "93f03e7e-2590-46f0-fce0-3e8b29852a88"
887
- },
888
- "outputs": [
889
- {
890
- "ename": "ValidationError",
891
- "evalue": "1 validation error for RetrieverEvaluator\nretriever\n instance of BaseRetriever expected (type=type_error.arbitrary_type; expected_arbitrary_type=BaseRetriever)",
892
- "output_type": "error",
893
- "traceback": [
894
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
895
- "\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
896
- "Cell \u001b[0;32mIn[29], line 11\u001b[0m\n\u001b[1;32m 6\u001b[0m custom_retriever \u001b[38;5;241m=\u001b[39m CustomRetriever(vector_retriever, keyword_retriever, \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mOR\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[1;32m 7\u001b[0m custom_query_engine \u001b[38;5;241m=\u001b[39m RetrieverQueryEngine(\n\u001b[1;32m 8\u001b[0m retriever\u001b[38;5;241m=\u001b[39mcustom_retriever,\n\u001b[1;32m 9\u001b[0m response_synthesizer\u001b[38;5;241m=\u001b[39mresponse_synthesizer,\n\u001b[1;32m 10\u001b[0m )\n\u001b[0;32m---> 11\u001b[0m retriever_evaluator \u001b[38;5;241m=\u001b[39m \u001b[43mRetrieverEvaluator\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mfrom_metric_names\u001b[49m\u001b[43m(\u001b[49m\n\u001b[1;32m 12\u001b[0m \u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mmrr\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[38;5;124;43mhit_rate\u001b[39;49m\u001b[38;5;124;43m\"\u001b[39;49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mretriever\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mcustom_query_engine\u001b[49m\n\u001b[1;32m 13\u001b[0m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n\u001b[1;32m 14\u001b[0m eval_results \u001b[38;5;241m=\u001b[39m \u001b[38;5;28;01mawait\u001b[39;00m retriever_evaluator\u001b[38;5;241m.\u001b[39maevaluate_dataset(rag_eval_dataset)\n\u001b[1;32m 15\u001b[0m \u001b[38;5;28mprint\u001b[39m(display_results_retriever(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mRetriever top_\u001b[39m\u001b[38;5;132;01m{\u001b[39;00mi\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m\"\u001b[39m, eval_results))\n",
897
- "File \u001b[0;32m~/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/llama_index/core/evaluation/retrieval/base.py:99\u001b[0m, in \u001b[0;36mBaseRetrievalEvaluator.from_metric_names\u001b[0;34m(cls, metric_names, **kwargs)\u001b[0m\n\u001b[1;32m 91\u001b[0m \u001b[38;5;250m\u001b[39m\u001b[38;5;124;03m\"\"\"Create evaluator from metric names.\u001b[39;00m\n\u001b[1;32m 92\u001b[0m \n\u001b[1;32m 93\u001b[0m \u001b[38;5;124;03mArgs:\u001b[39;00m\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 96\u001b[0m \n\u001b[1;32m 97\u001b[0m \u001b[38;5;124;03m\"\"\"\u001b[39;00m\n\u001b[1;32m 98\u001b[0m metric_types \u001b[38;5;241m=\u001b[39m resolve_metrics(metric_names)\n\u001b[0;32m---> 99\u001b[0m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43mmetrics\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43m[\u001b[49m\u001b[43mmetric\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;28;43;01mfor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmetric\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01min\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43mmetric_types\u001b[49m\u001b[43m]\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
898
- "File \u001b[0;32m~/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/llama_index/core/evaluation/retrieval/evaluator.py:45\u001b[0m, in \u001b[0;36mRetrieverEvaluator.__init__\u001b[0;34m(self, metrics, retriever, node_postprocessors, **kwargs)\u001b[0m\n\u001b[1;32m 37\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\n\u001b[1;32m 38\u001b[0m \u001b[38;5;28mself\u001b[39m,\n\u001b[1;32m 39\u001b[0m metrics: Sequence[BaseRetrievalMetric],\n\u001b[0;32m (...)\u001b[0m\n\u001b[1;32m 42\u001b[0m \u001b[38;5;241m*\u001b[39m\u001b[38;5;241m*\u001b[39mkwargs: Any,\n\u001b[1;32m 43\u001b[0m ) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[1;32m 44\u001b[0m \u001b[38;5;250m \u001b[39m\u001b[38;5;124;03m\"\"\"Init params.\"\"\"\u001b[39;00m\n\u001b[0;32m---> 45\u001b[0m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[38;5;21;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[1;32m 46\u001b[0m \u001b[43m \u001b[49m\u001b[43mmetrics\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mmetrics\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 47\u001b[0m \u001b[43m \u001b[49m\u001b[43mretriever\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mretriever\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 48\u001b[0m \u001b[43m \u001b[49m\u001b[43mnode_postprocessors\u001b[49m\u001b[38;5;241;43m=\u001b[39;49m\u001b[43mnode_postprocessors\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 49\u001b[0m \u001b[43m \u001b[49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[38;5;241;43m*\u001b[39;49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[1;32m 50\u001b[0m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n",
899
- "File \u001b[0;32m~/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/pydantic/main.py:341\u001b[0m, in \u001b[0;36mpydantic.main.BaseModel.__init__\u001b[0;34m()\u001b[0m\n",
900
- "\u001b[0;31mValidationError\u001b[0m: 1 validation error for RetrieverEvaluator\nretriever\n instance of BaseRetriever expected (type=type_error.arbitrary_type; expected_arbitrary_type=BaseRetriever)"
901
- ]
902
- }
903
- ],
904
- "source": [
905
- "from llama_index.core.evaluation import RetrieverEvaluator\n",
906
- "\n",
907
- "# We can evaluate the retievers with different top_k values.\n",
908
- "for i in [2, 4, 6, 8, 10]:\n",
909
- " vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=i)\n",
910
- " custom_retriever = CustomRetriever(vector_retriever, keyword_retriever, \"OR\")\n",
911
- " custom_query_engine = RetrieverQueryEngine(\n",
912
- " retriever=custom_retriever,\n",
913
- " response_synthesizer=response_synthesizer,\n",
914
- " )\n",
915
- " retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
916
- " [\"mrr\", \"hit_rate\"], retriever=custom_query_engine\n",
917
- " )\n",
918
- " eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset)\n",
919
- " print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
920
- ]
921
- },
922
- {
923
- "cell_type": "code",
924
- "execution_count": null,
925
- "metadata": {
926
- "id": "1MB1YD1E3EKM"
927
- },
928
- "outputs": [],
929
- "source": []
930
- }
931
- ],
932
- "metadata": {
933
- "colab": {
934
- "authorship_tag": "ABX9TyO362/noWgs82KNvLAlRlkT",
935
- "include_colab_link": true,
936
- "provenance": []
937
- },
938
- "kernelspec": {
939
- "display_name": "Python 3",
940
- "name": "python3"
941
- },
942
- "language_info": {
943
- "codemirror_mode": {
944
- "name": "ipython",
945
- "version": 3
946
- },
947
- "file_extension": ".py",
948
- "mimetype": "text/x-python",
949
- "name": "python",
950
- "nbconvert_exporter": "python",
951
- "pygments_lexer": "ipython3",
952
- "version": "3.11.8"
953
- },
954
- "widgets": {
955
- "application/vnd.jupyter.widget-state+json": {
956
- "0245f2604e4d49c8bd0210302746c47b": {
957
- "model_module": "@jupyter-widgets/base",
958
- "model_module_version": "1.2.0",
959
- "model_name": "LayoutModel",
960
- "state": {
961
- "_model_module": "@jupyter-widgets/base",
962
- "_model_module_version": "1.2.0",
963
- "_model_name": "LayoutModel",
964
- "_view_count": null,
965
- "_view_module": "@jupyter-widgets/base",
966
- "_view_module_version": "1.2.0",
967
- "_view_name": "LayoutView",
968
- "align_content": null,
969
- "align_items": null,
970
- "align_self": null,
971
- "border": null,
972
- "bottom": null,
973
- "display": null,
974
- "flex": null,
975
- "flex_flow": null,
976
- "grid_area": null,
977
- "grid_auto_columns": null,
978
- "grid_auto_flow": null,
979
- "grid_auto_rows": null,
980
- "grid_column": null,
981
- "grid_gap": null,
982
- "grid_row": null,
983
- "grid_template_areas": null,
984
- "grid_template_columns": null,
985
- "grid_template_rows": null,
986
- "height": null,
987
- "justify_content": null,
988
- "justify_items": null,
989
- "left": null,
990
- "margin": null,
991
- "max_height": null,
992
- "max_width": null,
993
- "min_height": null,
994
- "min_width": null,
995
- "object_fit": null,
996
- "object_position": null,
997
- "order": null,
998
- "overflow": null,
999
- "overflow_x": null,
1000
- "overflow_y": null,
1001
- "padding": null,
1002
- "right": null,
1003
- "top": null,
1004
- "visibility": null,
1005
- "width": null
1006
- }
1007
- },
1008
- "134210510d49476e959dd7d032bbdbdc": {
1009
- "model_module": "@jupyter-widgets/controls",
1010
- "model_module_version": "1.5.0",
1011
- "model_name": "DescriptionStyleModel",
1012
- "state": {
1013
- "_model_module": "@jupyter-widgets/controls",
1014
- "_model_module_version": "1.5.0",
1015
- "_model_name": "DescriptionStyleModel",
1016
- "_view_count": null,
1017
- "_view_module": "@jupyter-widgets/base",
1018
- "_view_module_version": "1.2.0",
1019
- "_view_name": "StyleView",
1020
- "description_width": ""
1021
- }
1022
- },
1023
- "13b9c5395bca4c3ba21265240cb936cf": {
1024
- "model_module": "@jupyter-widgets/base",
1025
- "model_module_version": "1.2.0",
1026
- "model_name": "LayoutModel",
1027
- "state": {
1028
- "_model_module": "@jupyter-widgets/base",
1029
- "_model_module_version": "1.2.0",
1030
- "_model_name": "LayoutModel",
1031
- "_view_count": null,
1032
- "_view_module": "@jupyter-widgets/base",
1033
- "_view_module_version": "1.2.0",
1034
- "_view_name": "LayoutView",
1035
- "align_content": null,
1036
- "align_items": null,
1037
- "align_self": null,
1038
- "border": null,
1039
- "bottom": null,
1040
- "display": null,
1041
- "flex": null,
1042
- "flex_flow": null,
1043
- "grid_area": null,
1044
- "grid_auto_columns": null,
1045
- "grid_auto_flow": null,
1046
- "grid_auto_rows": null,
1047
- "grid_column": null,
1048
- "grid_gap": null,
1049
- "grid_row": null,
1050
- "grid_template_areas": null,
1051
- "grid_template_columns": null,
1052
- "grid_template_rows": null,
1053
- "height": null,
1054
- "justify_content": null,
1055
- "justify_items": null,
1056
- "left": null,
1057
- "margin": null,
1058
- "max_height": null,
1059
- "max_width": null,
1060
- "min_height": null,
1061
- "min_width": null,
1062
- "object_fit": null,
1063
- "object_position": null,
1064
- "order": null,
1065
- "overflow": null,
1066
- "overflow_x": null,
1067
- "overflow_y": null,
1068
- "padding": null,
1069
- "right": null,
1070
- "top": null,
1071
- "visibility": null,
1072
- "width": null
1073
- }
1074
- },
1075
- "193aef33d9184055bb9223f56d456de6": {
1076
- "model_module": "@jupyter-widgets/base",
1077
- "model_module_version": "1.2.0",
1078
- "model_name": "LayoutModel",
1079
- "state": {
1080
- "_model_module": "@jupyter-widgets/base",
1081
- "_model_module_version": "1.2.0",
1082
- "_model_name": "LayoutModel",
1083
- "_view_count": null,
1084
- "_view_module": "@jupyter-widgets/base",
1085
- "_view_module_version": "1.2.0",
1086
- "_view_name": "LayoutView",
1087
- "align_content": null,
1088
- "align_items": null,
1089
- "align_self": null,
1090
- "border": null,
1091
- "bottom": null,
1092
- "display": null,
1093
- "flex": null,
1094
- "flex_flow": null,
1095
- "grid_area": null,
1096
- "grid_auto_columns": null,
1097
- "grid_auto_flow": null,
1098
- "grid_auto_rows": null,
1099
- "grid_column": null,
1100
- "grid_gap": null,
1101
- "grid_row": null,
1102
- "grid_template_areas": null,
1103
- "grid_template_columns": null,
1104
- "grid_template_rows": null,
1105
- "height": null,
1106
- "justify_content": null,
1107
- "justify_items": null,
1108
- "left": null,
1109
- "margin": null,
1110
- "max_height": null,
1111
- "max_width": null,
1112
- "min_height": null,
1113
- "min_width": null,
1114
- "object_fit": null,
1115
- "object_position": null,
1116
- "order": null,
1117
- "overflow": null,
1118
- "overflow_x": null,
1119
- "overflow_y": null,
1120
- "padding": null,
1121
- "right": null,
1122
- "top": null,
1123
- "visibility": null,
1124
- "width": null
1125
- }
1126
- },
1127
- "3fbabd8a8660461ba5e7bc08ef39139a": {
1128
- "model_module": "@jupyter-widgets/controls",
1129
- "model_module_version": "1.5.0",
1130
- "model_name": "HBoxModel",
1131
- "state": {
1132
- "_dom_classes": [],
1133
- "_model_module": "@jupyter-widgets/controls",
1134
- "_model_module_version": "1.5.0",
1135
- "_model_name": "HBoxModel",
1136
- "_view_count": null,
1137
- "_view_module": "@jupyter-widgets/controls",
1138
- "_view_module_version": "1.5.0",
1139
- "_view_name": "HBoxView",
1140
- "box_style": "",
1141
- "children": [
1142
- "IPY_MODEL_df2365556ae242a2ab1a119f9a31a561",
1143
- "IPY_MODEL_5f4b9d32df8f446e858e4c289dc282f9",
1144
- "IPY_MODEL_5b588f83a15d42d9aca888e06bbd95ff"
1145
- ],
1146
- "layout": "IPY_MODEL_ad073bca655540809e39f26538d2ec0d"
1147
- }
1148
- },
1149
- "47a4586384274577a726c57605e7f8d9": {
1150
- "model_module": "@jupyter-widgets/controls",
1151
- "model_module_version": "1.5.0",
1152
- "model_name": "DescriptionStyleModel",
1153
- "state": {
1154
- "_model_module": "@jupyter-widgets/controls",
1155
- "_model_module_version": "1.5.0",
1156
- "_model_name": "DescriptionStyleModel",
1157
- "_view_count": null,
1158
- "_view_module": "@jupyter-widgets/base",
1159
- "_view_module_version": "1.2.0",
1160
- "_view_name": "StyleView",
1161
- "description_width": ""
1162
- }
1163
- },
1164
- "4a172e8c6aa44e41a42fc1d9cf714fd0": {
1165
- "model_module": "@jupyter-widgets/controls",
1166
- "model_module_version": "1.5.0",
1167
- "model_name": "HTMLModel",
1168
- "state": {
1169
- "_dom_classes": [],
1170
- "_model_module": "@jupyter-widgets/controls",
1171
- "_model_module_version": "1.5.0",
1172
- "_model_name": "HTMLModel",
1173
- "_view_count": null,
1174
- "_view_module": "@jupyter-widgets/controls",
1175
- "_view_module_version": "1.5.0",
1176
- "_view_name": "HTMLView",
1177
- "description": "",
1178
- "description_tooltip": null,
1179
- "layout": "IPY_MODEL_e7937a1bc68441a080374911a6563376",
1180
- "placeholder": "​",
1181
- "style": "IPY_MODEL_e532ed7bfef34f67b5fcacd9534eb789",
1182
- "value": " 108/108 [00:03&lt;00:00, 33.70it/s]"
1183
- }
1184
- },
1185
- "5b588f83a15d42d9aca888e06bbd95ff": {
1186
- "model_module": "@jupyter-widgets/controls",
1187
- "model_module_version": "1.5.0",
1188
- "model_name": "HTMLModel",
1189
- "state": {
1190
- "_dom_classes": [],
1191
- "_model_module": "@jupyter-widgets/controls",
1192
- "_model_module_version": "1.5.0",
1193
- "_model_name": "HTMLModel",
1194
- "_view_count": null,
1195
- "_view_module": "@jupyter-widgets/controls",
1196
- "_view_module_version": "1.5.0",
1197
- "_view_name": "HTMLView",
1198
- "description": "",
1199
- "description_tooltip": null,
1200
- "layout": "IPY_MODEL_af9b6ae927dd4764b9692507791bc67e",
1201
- "placeholder": "​",
1202
- "style": "IPY_MODEL_134210510d49476e959dd7d032bbdbdc",
1203
- "value": " 14/14 [00:00&lt;00:00, 21.41it/s]"
1204
- }
1205
- },
1206
- "5c7973afd79349ed997a69120d0629b2": {
1207
- "model_module": "@jupyter-widgets/controls",
1208
- "model_module_version": "1.5.0",
1209
- "model_name": "ProgressStyleModel",
1210
- "state": {
1211
- "_model_module": "@jupyter-widgets/controls",
1212
- "_model_module_version": "1.5.0",
1213
- "_model_name": "ProgressStyleModel",
1214
- "_view_count": null,
1215
- "_view_module": "@jupyter-widgets/base",
1216
- "_view_module_version": "1.2.0",
1217
- "_view_name": "StyleView",
1218
- "bar_color": null,
1219
- "description_width": ""
1220
- }
1221
- },
1222
- "5f4b9d32df8f446e858e4c289dc282f9": {
1223
- "model_module": "@jupyter-widgets/controls",
1224
- "model_module_version": "1.5.0",
1225
- "model_name": "FloatProgressModel",
1226
- "state": {
1227
- "_dom_classes": [],
1228
- "_model_module": "@jupyter-widgets/controls",
1229
- "_model_module_version": "1.5.0",
1230
- "_model_name": "FloatProgressModel",
1231
- "_view_count": null,
1232
- "_view_module": "@jupyter-widgets/controls",
1233
- "_view_module_version": "1.5.0",
1234
- "_view_name": "ProgressView",
1235
- "bar_style": "success",
1236
- "description": "",
1237
- "description_tooltip": null,
1238
- "layout": "IPY_MODEL_96a3bdece738481db57e811ccb74a974",
1239
- "max": 14,
1240
- "min": 0,
1241
- "orientation": "horizontal",
1242
- "style": "IPY_MODEL_5c7973afd79349ed997a69120d0629b2",
1243
- "value": 14
1244
- }
1245
- },
1246
- "5f9bb065c2b74d2e8ded32e1306a7807": {
1247
- "model_module": "@jupyter-widgets/controls",
1248
- "model_module_version": "1.5.0",
1249
- "model_name": "HBoxModel",
1250
- "state": {
1251
- "_dom_classes": [],
1252
- "_model_module": "@jupyter-widgets/controls",
1253
- "_model_module_version": "1.5.0",
1254
- "_model_name": "HBoxModel",
1255
- "_view_count": null,
1256
- "_view_module": "@jupyter-widgets/controls",
1257
- "_view_module_version": "1.5.0",
1258
- "_view_name": "HBoxView",
1259
- "box_style": "",
1260
- "children": [
1261
- "IPY_MODEL_73a06bc546a64f7f99a9e4a135319dcd",
1262
- "IPY_MODEL_ce48deaf4d8c49cdae92bfdbb3a78df0",
1263
- "IPY_MODEL_4a172e8c6aa44e41a42fc1d9cf714fd0"
1264
- ],
1265
- "layout": "IPY_MODEL_0245f2604e4d49c8bd0210302746c47b"
1266
- }
1267
- },
1268
- "73a06bc546a64f7f99a9e4a135319dcd": {
1269
- "model_module": "@jupyter-widgets/controls",
1270
- "model_module_version": "1.5.0",
1271
- "model_name": "HTMLModel",
1272
- "state": {
1273
- "_dom_classes": [],
1274
- "_model_module": "@jupyter-widgets/controls",
1275
- "_model_module_version": "1.5.0",
1276
- "_model_name": "HTMLModel",
1277
- "_view_count": null,
1278
- "_view_module": "@jupyter-widgets/controls",
1279
- "_view_module_version": "1.5.0",
1280
- "_view_name": "HTMLView",
1281
- "description": "",
1282
- "description_tooltip": null,
1283
- "layout": "IPY_MODEL_e956dfab55084a9cbe33c8e331b511e7",
1284
- "placeholder": "​",
1285
- "style": "IPY_MODEL_cb394578badd43a89850873ad2526542",
1286
- "value": "Generating embeddings: 100%"
1287
- }
1288
- },
1289
- "96a3bdece738481db57e811ccb74a974": {
1290
- "model_module": "@jupyter-widgets/base",
1291
- "model_module_version": "1.2.0",
1292
- "model_name": "LayoutModel",
1293
- "state": {
1294
- "_model_module": "@jupyter-widgets/base",
1295
- "_model_module_version": "1.2.0",
1296
- "_model_name": "LayoutModel",
1297
- "_view_count": null,
1298
- "_view_module": "@jupyter-widgets/base",
1299
- "_view_module_version": "1.2.0",
1300
- "_view_name": "LayoutView",
1301
- "align_content": null,
1302
- "align_items": null,
1303
- "align_self": null,
1304
- "border": null,
1305
- "bottom": null,
1306
- "display": null,
1307
- "flex": null,
1308
- "flex_flow": null,
1309
- "grid_area": null,
1310
- "grid_auto_columns": null,
1311
- "grid_auto_flow": null,
1312
- "grid_auto_rows": null,
1313
- "grid_column": null,
1314
- "grid_gap": null,
1315
- "grid_row": null,
1316
- "grid_template_areas": null,
1317
- "grid_template_columns": null,
1318
- "grid_template_rows": null,
1319
- "height": null,
1320
- "justify_content": null,
1321
- "justify_items": null,
1322
- "left": null,
1323
- "margin": null,
1324
- "max_height": null,
1325
- "max_width": null,
1326
- "min_height": null,
1327
- "min_width": null,
1328
- "object_fit": null,
1329
- "object_position": null,
1330
- "order": null,
1331
- "overflow": null,
1332
- "overflow_x": null,
1333
- "overflow_y": null,
1334
- "padding": null,
1335
- "right": null,
1336
- "top": null,
1337
- "visibility": null,
1338
- "width": null
1339
- }
1340
- },
1341
- "abfc9aa911ce4a5ea81c7c451f08295f": {
1342
- "model_module": "@jupyter-widgets/controls",
1343
- "model_module_version": "1.5.0",
1344
- "model_name": "ProgressStyleModel",
1345
- "state": {
1346
- "_model_module": "@jupyter-widgets/controls",
1347
- "_model_module_version": "1.5.0",
1348
- "_model_name": "ProgressStyleModel",
1349
- "_view_count": null,
1350
- "_view_module": "@jupyter-widgets/base",
1351
- "_view_module_version": "1.2.0",
1352
- "_view_name": "StyleView",
1353
- "bar_color": null,
1354
- "description_width": ""
1355
- }
1356
- },
1357
- "ad073bca655540809e39f26538d2ec0d": {
1358
- "model_module": "@jupyter-widgets/base",
1359
- "model_module_version": "1.2.0",
1360
- "model_name": "LayoutModel",
1361
- "state": {
1362
- "_model_module": "@jupyter-widgets/base",
1363
- "_model_module_version": "1.2.0",
1364
- "_model_name": "LayoutModel",
1365
- "_view_count": null,
1366
- "_view_module": "@jupyter-widgets/base",
1367
- "_view_module_version": "1.2.0",
1368
- "_view_name": "LayoutView",
1369
- "align_content": null,
1370
- "align_items": null,
1371
- "align_self": null,
1372
- "border": null,
1373
- "bottom": null,
1374
- "display": null,
1375
- "flex": null,
1376
- "flex_flow": null,
1377
- "grid_area": null,
1378
- "grid_auto_columns": null,
1379
- "grid_auto_flow": null,
1380
- "grid_auto_rows": null,
1381
- "grid_column": null,
1382
- "grid_gap": null,
1383
- "grid_row": null,
1384
- "grid_template_areas": null,
1385
- "grid_template_columns": null,
1386
- "grid_template_rows": null,
1387
- "height": null,
1388
- "justify_content": null,
1389
- "justify_items": null,
1390
- "left": null,
1391
- "margin": null,
1392
- "max_height": null,
1393
- "max_width": null,
1394
- "min_height": null,
1395
- "min_width": null,
1396
- "object_fit": null,
1397
- "object_position": null,
1398
- "order": null,
1399
- "overflow": null,
1400
- "overflow_x": null,
1401
- "overflow_y": null,
1402
- "padding": null,
1403
- "right": null,
1404
- "top": null,
1405
- "visibility": null,
1406
- "width": null
1407
- }
1408
- },
1409
- "af9b6ae927dd4764b9692507791bc67e": {
1410
- "model_module": "@jupyter-widgets/base",
1411
- "model_module_version": "1.2.0",
1412
- "model_name": "LayoutModel",
1413
- "state": {
1414
- "_model_module": "@jupyter-widgets/base",
1415
- "_model_module_version": "1.2.0",
1416
- "_model_name": "LayoutModel",
1417
- "_view_count": null,
1418
- "_view_module": "@jupyter-widgets/base",
1419
- "_view_module_version": "1.2.0",
1420
- "_view_name": "LayoutView",
1421
- "align_content": null,
1422
- "align_items": null,
1423
- "align_self": null,
1424
- "border": null,
1425
- "bottom": null,
1426
- "display": null,
1427
- "flex": null,
1428
- "flex_flow": null,
1429
- "grid_area": null,
1430
- "grid_auto_columns": null,
1431
- "grid_auto_flow": null,
1432
- "grid_auto_rows": null,
1433
- "grid_column": null,
1434
- "grid_gap": null,
1435
- "grid_row": null,
1436
- "grid_template_areas": null,
1437
- "grid_template_columns": null,
1438
- "grid_template_rows": null,
1439
- "height": null,
1440
- "justify_content": null,
1441
- "justify_items": null,
1442
- "left": null,
1443
- "margin": null,
1444
- "max_height": null,
1445
- "max_width": null,
1446
- "min_height": null,
1447
- "min_width": null,
1448
- "object_fit": null,
1449
- "object_position": null,
1450
- "order": null,
1451
- "overflow": null,
1452
- "overflow_x": null,
1453
- "overflow_y": null,
1454
- "padding": null,
1455
- "right": null,
1456
- "top": null,
1457
- "visibility": null,
1458
- "width": null
1459
- }
1460
- },
1461
- "cb394578badd43a89850873ad2526542": {
1462
- "model_module": "@jupyter-widgets/controls",
1463
- "model_module_version": "1.5.0",
1464
- "model_name": "DescriptionStyleModel",
1465
- "state": {
1466
- "_model_module": "@jupyter-widgets/controls",
1467
- "_model_module_version": "1.5.0",
1468
- "_model_name": "DescriptionStyleModel",
1469
- "_view_count": null,
1470
- "_view_module": "@jupyter-widgets/base",
1471
- "_view_module_version": "1.2.0",
1472
- "_view_name": "StyleView",
1473
- "description_width": ""
1474
- }
1475
- },
1476
- "ce48deaf4d8c49cdae92bfdbb3a78df0": {
1477
- "model_module": "@jupyter-widgets/controls",
1478
- "model_module_version": "1.5.0",
1479
- "model_name": "FloatProgressModel",
1480
- "state": {
1481
- "_dom_classes": [],
1482
- "_model_module": "@jupyter-widgets/controls",
1483
- "_model_module_version": "1.5.0",
1484
- "_model_name": "FloatProgressModel",
1485
- "_view_count": null,
1486
- "_view_module": "@jupyter-widgets/controls",
1487
- "_view_module_version": "1.5.0",
1488
- "_view_name": "ProgressView",
1489
- "bar_style": "success",
1490
- "description": "",
1491
- "description_tooltip": null,
1492
- "layout": "IPY_MODEL_193aef33d9184055bb9223f56d456de6",
1493
- "max": 108,
1494
- "min": 0,
1495
- "orientation": "horizontal",
1496
- "style": "IPY_MODEL_abfc9aa911ce4a5ea81c7c451f08295f",
1497
- "value": 108
1498
- }
1499
- },
1500
- "df2365556ae242a2ab1a119f9a31a561": {
1501
- "model_module": "@jupyter-widgets/controls",
1502
- "model_module_version": "1.5.0",
1503
- "model_name": "HTMLModel",
1504
- "state": {
1505
- "_dom_classes": [],
1506
- "_model_module": "@jupyter-widgets/controls",
1507
- "_model_module_version": "1.5.0",
1508
- "_model_name": "HTMLModel",
1509
- "_view_count": null,
1510
- "_view_module": "@jupyter-widgets/controls",
1511
- "_view_module_version": "1.5.0",
1512
- "_view_name": "HTMLView",
1513
- "description": "",
1514
- "description_tooltip": null,
1515
- "layout": "IPY_MODEL_13b9c5395bca4c3ba21265240cb936cf",
1516
- "placeholder": "​",
1517
- "style": "IPY_MODEL_47a4586384274577a726c57605e7f8d9",
1518
- "value": "Parsing nodes: 100%"
1519
- }
1520
- },
1521
- "e532ed7bfef34f67b5fcacd9534eb789": {
1522
- "model_module": "@jupyter-widgets/controls",
1523
- "model_module_version": "1.5.0",
1524
- "model_name": "DescriptionStyleModel",
1525
- "state": {
1526
- "_model_module": "@jupyter-widgets/controls",
1527
- "_model_module_version": "1.5.0",
1528
- "_model_name": "DescriptionStyleModel",
1529
- "_view_count": null,
1530
- "_view_module": "@jupyter-widgets/base",
1531
- "_view_module_version": "1.2.0",
1532
- "_view_name": "StyleView",
1533
- "description_width": ""
1534
- }
1535
- },
1536
- "e7937a1bc68441a080374911a6563376": {
1537
- "model_module": "@jupyter-widgets/base",
1538
- "model_module_version": "1.2.0",
1539
- "model_name": "LayoutModel",
1540
- "state": {
1541
- "_model_module": "@jupyter-widgets/base",
1542
- "_model_module_version": "1.2.0",
1543
- "_model_name": "LayoutModel",
1544
- "_view_count": null,
1545
- "_view_module": "@jupyter-widgets/base",
1546
- "_view_module_version": "1.2.0",
1547
- "_view_name": "LayoutView",
1548
- "align_content": null,
1549
- "align_items": null,
1550
- "align_self": null,
1551
- "border": null,
1552
- "bottom": null,
1553
- "display": null,
1554
- "flex": null,
1555
- "flex_flow": null,
1556
- "grid_area": null,
1557
- "grid_auto_columns": null,
1558
- "grid_auto_flow": null,
1559
- "grid_auto_rows": null,
1560
- "grid_column": null,
1561
- "grid_gap": null,
1562
- "grid_row": null,
1563
- "grid_template_areas": null,
1564
- "grid_template_columns": null,
1565
- "grid_template_rows": null,
1566
- "height": null,
1567
- "justify_content": null,
1568
- "justify_items": null,
1569
- "left": null,
1570
- "margin": null,
1571
- "max_height": null,
1572
- "max_width": null,
1573
- "min_height": null,
1574
- "min_width": null,
1575
- "object_fit": null,
1576
- "object_position": null,
1577
- "order": null,
1578
- "overflow": null,
1579
- "overflow_x": null,
1580
- "overflow_y": null,
1581
- "padding": null,
1582
- "right": null,
1583
- "top": null,
1584
- "visibility": null,
1585
- "width": null
1586
- }
1587
- },
1588
- "e956dfab55084a9cbe33c8e331b511e7": {
1589
- "model_module": "@jupyter-widgets/base",
1590
- "model_module_version": "1.2.0",
1591
- "model_name": "LayoutModel",
1592
- "state": {
1593
- "_model_module": "@jupyter-widgets/base",
1594
- "_model_module_version": "1.2.0",
1595
- "_model_name": "LayoutModel",
1596
- "_view_count": null,
1597
- "_view_module": "@jupyter-widgets/base",
1598
- "_view_module_version": "1.2.0",
1599
- "_view_name": "LayoutView",
1600
- "align_content": null,
1601
- "align_items": null,
1602
- "align_self": null,
1603
- "border": null,
1604
- "bottom": null,
1605
- "display": null,
1606
- "flex": null,
1607
- "flex_flow": null,
1608
- "grid_area": null,
1609
- "grid_auto_columns": null,
1610
- "grid_auto_flow": null,
1611
- "grid_auto_rows": null,
1612
- "grid_column": null,
1613
- "grid_gap": null,
1614
- "grid_row": null,
1615
- "grid_template_areas": null,
1616
- "grid_template_columns": null,
1617
- "grid_template_rows": null,
1618
- "height": null,
1619
- "justify_content": null,
1620
- "justify_items": null,
1621
- "left": null,
1622
- "margin": null,
1623
- "max_height": null,
1624
- "max_width": null,
1625
- "min_height": null,
1626
- "min_width": null,
1627
- "object_fit": null,
1628
- "object_position": null,
1629
- "order": null,
1630
- "overflow": null,
1631
- "overflow_x": null,
1632
- "overflow_y": null,
1633
- "padding": null,
1634
- "right": null,
1635
- "top": null,
1636
- "visibility": null,
1637
- "width": null
1638
- }
1639
- }
1640
- }
1641
- }
1642
- },
1643
- "nbformat": 4,
1644
- "nbformat_minor": 0
1645
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/12-Improve_Query.ipynb DELETED
@@ -1,1786 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "colab_type": "text",
7
- "id": "view-in-github"
8
- },
9
- "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/12-Improve_Query.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
- ]
12
- },
13
- {
14
- "cell_type": "markdown",
15
- "metadata": {
16
- "id": "-zE1h0uQV7uT"
17
- },
18
- "source": [
19
- "# Install Packages and Setup Variables"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": 1,
25
- "metadata": {
26
- "colab": {
27
- "base_uri": "https://localhost:8080/"
28
- },
29
- "id": "QPJzr-I9XQ7l",
30
- "outputId": "5d48c88b-a0a9-49ff-d788-e076d1cb4ead"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-readers-web tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic kaleido==0.2.1"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": 1,
40
- "metadata": {
41
- "id": "riuXwpSPcvWC"
42
- },
43
- "outputs": [],
44
- "source": [
45
- "import os\n",
46
- "\n",
47
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
48
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
49
- ]
50
- },
51
- {
52
- "cell_type": "code",
53
- "execution_count": 2,
54
- "metadata": {
55
- "id": "jIEeZzqLbz0J"
56
- },
57
- "outputs": [],
58
- "source": [
59
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
60
- "\n",
61
- "import nest_asyncio\n",
62
- "\n",
63
- "nest_asyncio.apply()"
64
- ]
65
- },
66
- {
67
- "cell_type": "markdown",
68
- "metadata": {
69
- "id": "Bkgi2OrYzF7q"
70
- },
71
- "source": [
72
- "# Load a Model"
73
- ]
74
- },
75
- {
76
- "cell_type": "code",
77
- "execution_count": 4,
78
- "metadata": {
79
- "id": "9oGT6crooSSj"
80
- },
81
- "outputs": [
82
- {
83
- "name": "stderr",
84
- "output_type": "stream",
85
- "text": [
86
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
87
- " from .autonotebook import tqdm as notebook_tqdm\n"
88
- ]
89
- }
90
- ],
91
- "source": [
92
- "from llama_index.llms.openai import OpenAI\n",
93
- "\n",
94
- "llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
95
- ]
96
- },
97
- {
98
- "cell_type": "markdown",
99
- "metadata": {
100
- "id": "0BwVuJXlzHVL"
101
- },
102
- "source": [
103
- "# Create a VectoreStore"
104
- ]
105
- },
106
- {
107
- "cell_type": "code",
108
- "execution_count": 5,
109
- "metadata": {
110
- "id": "SQP87lHczHKc"
111
- },
112
- "outputs": [],
113
- "source": [
114
- "import chromadb\n",
115
- "\n",
116
- "# create client and a new collection\n",
117
- "# chromadb.EphemeralClient saves data in-memory.\n",
118
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
119
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
120
- ]
121
- },
122
- {
123
- "cell_type": "code",
124
- "execution_count": 7,
125
- "metadata": {
126
- "id": "zAaGcYMJzHAN"
127
- },
128
- "outputs": [],
129
- "source": [
130
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
131
- "\n",
132
- "# Define a storage context object using the created vector database.\n",
133
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
134
- ]
135
- },
136
- {
137
- "cell_type": "markdown",
138
- "metadata": {
139
- "id": "I9JbAzFcjkpn"
140
- },
141
- "source": [
142
- "# Load the Dataset (CSV)"
143
- ]
144
- },
145
- {
146
- "cell_type": "markdown",
147
- "metadata": {
148
- "id": "ceveDuYdWCYk"
149
- },
150
- "source": [
151
- "## Download"
152
- ]
153
- },
154
- {
155
- "cell_type": "markdown",
156
- "metadata": {
157
- "id": "eZwf6pv7WFmD"
158
- },
159
- "source": [
160
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
161
- ]
162
- },
163
- {
164
- "cell_type": "code",
165
- "execution_count": 8,
166
- "metadata": {
167
- "colab": {
168
- "base_uri": "https://localhost:8080/"
169
- },
170
- "id": "wl_pbPvMlv1h",
171
- "outputId": "a453b612-20a8-4396-d22b-b19d2bc47816"
172
- },
173
- "outputs": [
174
- {
175
- "name": "stdout",
176
- "output_type": "stream",
177
- "text": [
178
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
179
- " Dload Upload Total Spent Left Speed\n",
180
- "100 169k 100 169k 0 0 915k 0 --:--:-- --:--:-- --:--:-- 911k\n"
181
- ]
182
- }
183
- ],
184
- "source": [
185
- "!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
186
- ]
187
- },
188
- {
189
- "cell_type": "markdown",
190
- "metadata": {
191
- "id": "VWBLtDbUWJfA"
192
- },
193
- "source": [
194
- "## Read File"
195
- ]
196
- },
197
- {
198
- "cell_type": "code",
199
- "execution_count": 9,
200
- "metadata": {
201
- "colab": {
202
- "base_uri": "https://localhost:8080/"
203
- },
204
- "id": "0Q9sxuW0g3Gd",
205
- "outputId": "49b27d8a-1f96-4e8d-fa0f-27afbf2c395c"
206
- },
207
- "outputs": [
208
- {
209
- "data": {
210
- "text/plain": [
211
- "14"
212
- ]
213
- },
214
- "execution_count": 9,
215
- "metadata": {},
216
- "output_type": "execute_result"
217
- }
218
- ],
219
- "source": [
220
- "import csv\n",
221
- "\n",
222
- "rows = []\n",
223
- "\n",
224
- "# Load the file as a JSON\n",
225
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
226
- " csv_reader = csv.reader(file)\n",
227
- "\n",
228
- " for idx, row in enumerate( csv_reader ):\n",
229
- " if idx == 0: continue; # Skip header row\n",
230
- " rows.append( row )\n",
231
- "\n",
232
- "# The number of characters in the dataset.\n",
233
- "len( rows )"
234
- ]
235
- },
236
- {
237
- "cell_type": "markdown",
238
- "metadata": {
239
- "id": "S17g2RYOjmf2"
240
- },
241
- "source": [
242
- "# Convert to Document obj"
243
- ]
244
- },
245
- {
246
- "cell_type": "code",
247
- "execution_count": 10,
248
- "metadata": {
249
- "id": "YizvmXPejkJE"
250
- },
251
- "outputs": [],
252
- "source": [
253
- "from llama_index.core import Document\n",
254
- "\n",
255
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
256
- "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
257
- ]
258
- },
259
- {
260
- "cell_type": "markdown",
261
- "metadata": {
262
- "id": "qjuLbmFuWsyl"
263
- },
264
- "source": [
265
- "# Transforming"
266
- ]
267
- },
268
- {
269
- "cell_type": "code",
270
- "execution_count": 11,
271
- "metadata": {
272
- "id": "9z3t70DGWsjO"
273
- },
274
- "outputs": [],
275
- "source": [
276
- "from llama_index.core.text_splitter import TokenTextSplitter\n",
277
- "\n",
278
- "text_splitter = TokenTextSplitter(\n",
279
- " separator=\" \", chunk_size=512, chunk_overlap=128\n",
280
- ")"
281
- ]
282
- },
283
- {
284
- "cell_type": "code",
285
- "execution_count": 12,
286
- "metadata": {
287
- "colab": {
288
- "base_uri": "https://localhost:8080/",
289
- "height": 331,
290
- "referenced_widgets": [
291
- "3fbabd8a8660461ba5e7bc08ef39139a",
292
- "df2365556ae242a2ab1a119f9a31a561",
293
- "5f4b9d32df8f446e858e4c289dc282f9",
294
- "5b588f83a15d42d9aca888e06bbd95ff",
295
- "ad073bca655540809e39f26538d2ec0d",
296
- "13b9c5395bca4c3ba21265240cb936cf",
297
- "47a4586384274577a726c57605e7f8d9",
298
- "96a3bdece738481db57e811ccb74a974",
299
- "5c7973afd79349ed997a69120d0629b2",
300
- "af9b6ae927dd4764b9692507791bc67e",
301
- "134210510d49476e959dd7d032bbdbdc",
302
- "5f9bb065c2b74d2e8ded32e1306a7807",
303
- "73a06bc546a64f7f99a9e4a135319dcd",
304
- "ce48deaf4d8c49cdae92bfdbb3a78df0",
305
- "4a172e8c6aa44e41a42fc1d9cf714fd0",
306
- "0245f2604e4d49c8bd0210302746c47b",
307
- "e956dfab55084a9cbe33c8e331b511e7",
308
- "cb394578badd43a89850873ad2526542",
309
- "193aef33d9184055bb9223f56d456de6",
310
- "abfc9aa911ce4a5ea81c7c451f08295f",
311
- "e7937a1bc68441a080374911a6563376",
312
- "e532ed7bfef34f67b5fcacd9534eb789"
313
- ]
314
- },
315
- "id": "P9LDJ7o-Wsc-",
316
- "outputId": "01070c1f-dffa-4ab7-ad71-b07b76b12e03"
317
- },
318
- "outputs": [
319
- {
320
- "name": "stderr",
321
- "output_type": "stream",
322
- "text": [
323
- "Parsing nodes: 0%| | 0/14 [00:00<?, ?it/s]"
324
- ]
325
- },
326
- {
327
- "name": "stderr",
328
- "output_type": "stream",
329
- "text": [
330
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 28.28it/s]\n",
331
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:36<00:00, 1.12it/s]\n",
332
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:22<00:00, 1.30it/s]\n",
333
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:29<00:00, 3.72it/s]\n",
334
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆοΏ½οΏ½οΏ½| 108/108 [00:02<00:00, 38.77it/s]\n"
335
- ]
336
- }
337
- ],
338
- "source": [
339
- "from llama_index.core.extractors import (\n",
340
- " SummaryExtractor,\n",
341
- " QuestionsAnsweredExtractor,\n",
342
- " KeywordExtractor,\n",
343
- ")\n",
344
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
345
- "from llama_index.core.ingestion import IngestionPipeline\n",
346
- "\n",
347
- "pipeline = IngestionPipeline(\n",
348
- " transformations=[\n",
349
- " text_splitter,\n",
350
- " QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
351
- " SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
352
- " KeywordExtractor(keywords=10, llm=llm),\n",
353
- " OpenAIEmbedding(),\n",
354
- " ],\n",
355
- " vector_store=vector_store\n",
356
- ")\n",
357
- "\n",
358
- "nodes = pipeline.run(documents=documents, show_progress=True);"
359
- ]
360
- },
361
- {
362
- "cell_type": "code",
363
- "execution_count": 13,
364
- "metadata": {
365
- "colab": {
366
- "base_uri": "https://localhost:8080/"
367
- },
368
- "id": "mPGa85hM2P3P",
369
- "outputId": "c106c463-2459-4b11-bbae-5bd5e2246011"
370
- },
371
- "outputs": [
372
- {
373
- "data": {
374
- "text/plain": [
375
- "108"
376
- ]
377
- },
378
- "execution_count": 13,
379
- "metadata": {},
380
- "output_type": "execute_result"
381
- }
382
- ],
383
- "source": [
384
- "len( nodes )"
385
- ]
386
- },
387
- {
388
- "cell_type": "code",
389
- "execution_count": 14,
390
- "metadata": {
391
- "id": "23x20bL3_jRb"
392
- },
393
- "outputs": [
394
- {
395
- "name": "stdout",
396
- "output_type": "stream",
397
- "text": [
398
- "updating: mini-llama-articles/ (stored 0%)\n",
399
- "updating: mini-llama-articles/chroma.sqlite3 (deflated 65%)\n",
400
- " adding: mini-llama-articles/aaac4d54-4f82-40da-b769-a6aecfa59eb0/ (stored 0%)\n",
401
- " adding: mini-llama-articles/aaac4d54-4f82-40da-b769-a6aecfa59eb0/data_level0.bin (deflated 96%)\n",
402
- " adding: mini-llama-articles/aaac4d54-4f82-40da-b769-a6aecfa59eb0/length.bin (deflated 35%)\n",
403
- " adding: mini-llama-articles/aaac4d54-4f82-40da-b769-a6aecfa59eb0/link_lists.bin (stored 0%)\n",
404
- " adding: mini-llama-articles/aaac4d54-4f82-40da-b769-a6aecfa59eb0/header.bin (deflated 61%)\n"
405
- ]
406
- }
407
- ],
408
- "source": [
409
- "!zip -r vectorstore.zip mini-llama-articles"
410
- ]
411
- },
412
- {
413
- "cell_type": "markdown",
414
- "metadata": {
415
- "id": "OWaT6rL7ksp8"
416
- },
417
- "source": [
418
- "# Load Indexes"
419
- ]
420
- },
421
- {
422
- "cell_type": "code",
423
- "execution_count": 16,
424
- "metadata": {
425
- "colab": {
426
- "base_uri": "https://localhost:8080/"
427
- },
428
- "id": "SodY2Xpf_kxg",
429
- "outputId": "9f8b7153-ea58-4824-8363-c47e922612a8"
430
- },
431
- "outputs": [],
432
- "source": [
433
- "# !unzip vectorstore.zip"
434
- ]
435
- },
436
- {
437
- "cell_type": "code",
438
- "execution_count": 17,
439
- "metadata": {
440
- "id": "mXi56KTXk2sp"
441
- },
442
- "outputs": [],
443
- "source": [
444
- "import chromadb\n",
445
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
446
- "\n",
447
- "# Create your index\n",
448
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
449
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
450
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
451
- ]
452
- },
453
- {
454
- "cell_type": "code",
455
- "execution_count": 18,
456
- "metadata": {
457
- "id": "jKXURvLtkuTS"
458
- },
459
- "outputs": [],
460
- "source": [
461
- "# Create your index\n",
462
- "from llama_index.core import VectorStoreIndex\n",
463
- "\n",
464
- "vector_index = VectorStoreIndex.from_vector_store(vector_store)"
465
- ]
466
- },
467
- {
468
- "cell_type": "markdown",
469
- "metadata": {
470
- "id": "SLrn8A3jckmW"
471
- },
472
- "source": [
473
- "# Multi-Step Query Engine"
474
- ]
475
- },
476
- {
477
- "cell_type": "markdown",
478
- "metadata": {
479
- "id": "UmpfpVCje8h3"
480
- },
481
- "source": [
482
- "## GPT-4"
483
- ]
484
- },
485
- {
486
- "cell_type": "code",
487
- "execution_count": 19,
488
- "metadata": {
489
- "id": "CaxFzDz4cRMd"
490
- },
491
- "outputs": [
492
- {
493
- "name": "stderr",
494
- "output_type": "stream",
495
- "text": [
496
- "/var/folders/l7/9qcp7g5x5rl9x8ltw0t85qym0000gn/T/ipykernel_1424/226941912.py:4: DeprecationWarning: Call to deprecated class method from_defaults. (ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.) -- Deprecated since version 0.10.0.\n",
497
- " service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4)\n"
498
- ]
499
- }
500
- ],
501
- "source": [
502
- "from llama_index.core import ServiceContext\n",
503
- "\n",
504
- "gpt4 = OpenAI(temperature=0, model=\"gpt-4-0125-preview\")\n",
505
- "service_context_gpt4 = ServiceContext.from_defaults(llm=gpt4)"
506
- ]
507
- },
508
- {
509
- "cell_type": "code",
510
- "execution_count": 20,
511
- "metadata": {
512
- "id": "8y-Ya3GyfcAk"
513
- },
514
- "outputs": [],
515
- "source": [
516
- "from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform\n",
517
- "\n",
518
- "step_decompose_transform_gpt4 = StepDecomposeQueryTransform(llm=gpt4, verbose=True)"
519
- ]
520
- },
521
- {
522
- "cell_type": "code",
523
- "execution_count": 21,
524
- "metadata": {
525
- "id": "zntXdSbGf_qF"
526
- },
527
- "outputs": [],
528
- "source": [
529
- "from llama_index.core.query_engine.multistep_query_engine import MultiStepQueryEngine\n",
530
- "\n",
531
- "query_engine_gpt4 = vector_index.as_query_engine(service_context=service_context_gpt4)\n",
532
- "query_engine_gpt4 = MultiStepQueryEngine(\n",
533
- " query_engine=query_engine_gpt4,\n",
534
- " query_transform=step_decompose_transform_gpt4,\n",
535
- " index_summary=\"Used to answer questions about the LLaMA2 Model\",\n",
536
- ")"
537
- ]
538
- },
539
- {
540
- "cell_type": "markdown",
541
- "metadata": {
542
- "id": "8JPD8yAinVSq"
543
- },
544
- "source": [
545
- "# Query Dataset"
546
- ]
547
- },
548
- {
549
- "cell_type": "markdown",
550
- "metadata": {
551
- "id": "D2IByQ5-ox9U"
552
- },
553
- "source": [
554
- "## Default"
555
- ]
556
- },
557
- {
558
- "cell_type": "code",
559
- "execution_count": 22,
560
- "metadata": {
561
- "id": "b0gue7cyctt1"
562
- },
563
- "outputs": [],
564
- "source": [
565
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
566
- "# and using a LLM to formulate the final answer.\n",
567
- "query_engine = vector_index.as_query_engine()\n",
568
- "\n",
569
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
570
- ]
571
- },
572
- {
573
- "cell_type": "code",
574
- "execution_count": 23,
575
- "metadata": {
576
- "colab": {
577
- "base_uri": "https://localhost:8080/",
578
- "height": 53
579
- },
580
- "id": "VKK3jMprctre",
581
- "outputId": "b6ed346c-714b-44a6-b8fa-bfaca1b38deb"
582
- },
583
- "outputs": [
584
- {
585
- "data": {
586
- "text/plain": [
587
- "'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
588
- ]
589
- },
590
- "execution_count": 23,
591
- "metadata": {},
592
- "output_type": "execute_result"
593
- }
594
- ],
595
- "source": [
596
- "res.response"
597
- ]
598
- },
599
- {
600
- "cell_type": "code",
601
- "execution_count": 24,
602
- "metadata": {
603
- "colab": {
604
- "base_uri": "https://localhost:8080/"
605
- },
606
- "id": "465dH4yQc7Ct",
607
- "outputId": "6f7eb440-cc24-4d20-ac35-fa747265d18d"
608
- },
609
- "outputs": [
610
- {
611
- "name": "stdout",
612
- "output_type": "stream",
613
- "text": [
614
- "Node ID\t 63380d3f-7aff-47cd-b2c1-e4baaed70a7e\n",
615
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
616
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
617
- "Score\t 0.7167442801500137\n",
618
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
619
- "Node ID\t 77d0679b-6de4-4467-bc39-7932e18ae282\n",
620
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
621
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
622
- "Score\t 0.6967843740521363\n",
623
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
624
- ]
625
- }
626
- ],
627
- "source": [
628
- "for src in res.source_nodes:\n",
629
- " print(\"Node ID\\t\", src.node_id)\n",
630
- " print(\"Title\\t\", src.metadata['title'])\n",
631
- " print(\"Text\\t\", src.text)\n",
632
- " print(\"Score\\t\", src.score)\n",
633
- " print(\"-_\"*20)"
634
- ]
635
- },
636
- {
637
- "cell_type": "markdown",
638
- "metadata": {
639
- "id": "2y2AiInmpz7g"
640
- },
641
- "source": [
642
- "## GPT-4 Multi-Step"
643
- ]
644
- },
645
- {
646
- "cell_type": "code",
647
- "execution_count": 25,
648
- "metadata": {
649
- "colab": {
650
- "base_uri": "https://localhost:8080/"
651
- },
652
- "id": "69kADAFilW1n",
653
- "outputId": "8a847a58-539f-4ba7-ca07-ef80ceb8b3e2"
654
- },
655
- "outputs": [
656
- {
657
- "name": "stdout",
658
- "output_type": "stream",
659
- "text": [
660
- "\u001b[1;3;33m> Current query: How many parameters LLaMA2 model has?\n",
661
- "\u001b[0m\u001b[1;3;38;5;200m> New query: What is the LLaMA2 Model?\n",
662
- "\u001b[0m\u001b[1;3;33m> Current query: How many parameters LLaMA2 model has?\n",
663
- "\u001b[0m\u001b[1;3;38;5;200m> New query: None\n",
664
- "\u001b[0m"
665
- ]
666
- }
667
- ],
668
- "source": [
669
- "response_gpt4 = query_engine_gpt4.query(\"How many parameters LLaMA2 model has?\")"
670
- ]
671
- },
672
- {
673
- "cell_type": "code",
674
- "execution_count": 26,
675
- "metadata": {
676
- "colab": {
677
- "base_uri": "https://localhost:8080/",
678
- "height": 35
679
- },
680
- "id": "_ul5p3AMldzk",
681
- "outputId": "8c5cadda-8e06-4398-81bc-8571d4710b2a"
682
- },
683
- "outputs": [
684
- {
685
- "data": {
686
- "text/plain": [
687
- "'The LLaMA2 model has parameters ranging from 7 billion to 70 billion.'"
688
- ]
689
- },
690
- "execution_count": 26,
691
- "metadata": {},
692
- "output_type": "execute_result"
693
- }
694
- ],
695
- "source": [
696
- "response_gpt4.response"
697
- ]
698
- },
699
- {
700
- "cell_type": "code",
701
- "execution_count": 27,
702
- "metadata": {
703
- "colab": {
704
- "base_uri": "https://localhost:8080/"
705
- },
706
- "id": "k5pJPBPRqjbG",
707
- "outputId": "0bdd8382-8392-483d-bb6a-51e7a146eeb3"
708
- },
709
- "outputs": [
710
- {
711
- "name": "stdout",
712
- "output_type": "stream",
713
- "text": [
714
- "Node ID\t 3f7709ed-985e-417f-b88d-e3eac6ae8a06\n",
715
- "Text\t \n",
716
- "Question: What is the LLaMA2 Model?\n",
717
- "Answer: The Llama 2 model is an open-source commercial language model developed by Meta, available in different sizes ranging from 7 billion to 70 billion parameters. It is designed to be integrated into AI-powered applications for businesses, with a focus on safety considerations in its design. The model's Ghost Attention feature enhances conversational continuity, and it possesses a groundbreaking temporal capability for organizing information based on time relevance to deliver contextually accurate responses.\n",
718
- "Score\t None\n",
719
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
720
- "Node ID\t 63380d3f-7aff-47cd-b2c1-e4baaed70a7e\n",
721
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
722
- "Score\t 0.7149311149257048\n",
723
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
724
- "Node ID\t 8a4bda7f-9e2a-44ff-a59b-84f36b8b3431\n",
725
- "Text\t with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering strong competition to closed-source models. V. Ghost Attention: Enhancing Conversational Continuity One unique feature in Llama 2 is Ghost Attention, which ensures continuity in conversations. This means that even after multiple interactions, the model remembers its initial instructions, ensuring more coherent and consistent responses throughout the conversation. This feature significantly enhances the user experience and makes Llama 2 a more reliable language model for interactive applications. In the example below, on the left, it forgets to use an emoji after a few conversations. On the right, with Ghost Attention, even after having many conversations, it will remember the context and continue to use emojis in its response. VI. Temporal Capability: A Leap in Information Organization Meta reported a groundbreaking temporal capability, where the model organizes information based on time relevance. Each question posed to the model is associated with a date, and it responds accordingly by considering the event date before which the question becomes irrelevant. For example, if you ask the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete\n",
726
- "Score\t 0.714000324321046\n",
727
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
728
- ]
729
- }
730
- ],
731
- "source": [
732
- "for src in response_gpt4.source_nodes:\n",
733
- " print(\"Node ID\\t\", src.node_id)\n",
734
- " print(\"Text\\t\", src.text)\n",
735
- " print(\"Score\\t\", src.score)\n",
736
- " print(\"-_\"*20)"
737
- ]
738
- },
739
- {
740
- "cell_type": "markdown",
741
- "metadata": {
742
- "id": "jwcSCiMhp4Uh"
743
- },
744
- "source": [
745
- "# Test GPT-3 Multi-Step"
746
- ]
747
- },
748
- {
749
- "cell_type": "code",
750
- "execution_count": 28,
751
- "metadata": {
752
- "id": "uH9gNfZuslHK"
753
- },
754
- "outputs": [
755
- {
756
- "name": "stderr",
757
- "output_type": "stream",
758
- "text": [
759
- "/var/folders/l7/9qcp7g5x5rl9x8ltw0t85qym0000gn/T/ipykernel_1424/1136257440.py:6: DeprecationWarning: Call to deprecated class method from_defaults. (ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.) -- Deprecated since version 0.10.0.\n",
760
- " service_context_gpt3 = ServiceContext.from_defaults(llm=gpt3)\n"
761
- ]
762
- }
763
- ],
764
- "source": [
765
- "from llama_index.core import ServiceContext\n",
766
- "from llama_index.core.indices.query.query_transform.base import StepDecomposeQueryTransform\n",
767
- "from llama_index.core.query_engine.multistep_query_engine import MultiStepQueryEngine\n",
768
- "\n",
769
- "gpt3 = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\")\n",
770
- "service_context_gpt3 = ServiceContext.from_defaults(llm=gpt3)\n",
771
- "\n",
772
- "step_decompose_transform_gpt3 = StepDecomposeQueryTransform(llm=gpt3, verbose=True)\n",
773
- "\n",
774
- "query_engine_gpt3 = vector_index.as_query_engine(service_context=service_context_gpt3)\n",
775
- "query_engine_gpt3 = MultiStepQueryEngine(\n",
776
- " query_engine=query_engine_gpt3,\n",
777
- " query_transform=step_decompose_transform_gpt3,\n",
778
- " index_summary=\"Used to answer questions about the LLaMA2 Model\",\n",
779
- ")"
780
- ]
781
- },
782
- {
783
- "cell_type": "code",
784
- "execution_count": 29,
785
- "metadata": {
786
- "colab": {
787
- "base_uri": "https://localhost:8080/"
788
- },
789
- "id": "9s6SkHI0p6VZ",
790
- "outputId": "1c87dbda-e026-4e28-f7eb-b01145c62b77"
791
- },
792
- "outputs": [
793
- {
794
- "name": "stdout",
795
- "output_type": "stream",
796
- "text": [
797
- "\u001b[1;3;33m> Current query: How many parameters LLaMA2 model has?\n",
798
- "\u001b[0m\u001b[1;3;38;5;200m> New query: What are the main components or features of the LLaMA2 model?\n",
799
- "\u001b[0m\u001b[1;3;33m> Current query: How many parameters LLaMA2 model has?\n",
800
- "\u001b[0m\u001b[1;3;38;5;200m> New query: What is the range of model sizes available for the LLaMA2 model?\n",
801
- "\u001b[0m\u001b[1;3;33m> Current query: How many parameters LLaMA2 model has?\n",
802
- "\u001b[0m\u001b[1;3;38;5;200m> New query: What are the safety considerations in the LLaMA2 model?\n",
803
- "\u001b[0m"
804
- ]
805
- }
806
- ],
807
- "source": [
808
- "response_gpt3 = query_engine_gpt3.query(\"How many parameters LLaMA2 model has?\")"
809
- ]
810
- },
811
- {
812
- "cell_type": "code",
813
- "execution_count": 30,
814
- "metadata": {
815
- "colab": {
816
- "base_uri": "https://localhost:8080/",
817
- "height": 35
818
- },
819
- "id": "FlgMkAhQsTIY",
820
- "outputId": "0996e879-3914-44b1-cdec-e4f0b0ba7a4e"
821
- },
822
- "outputs": [
823
- {
824
- "data": {
825
- "text/plain": [
826
- "'The LLaMA2 model has model sizes ranging from 7 billion to 70 billion parameters.'"
827
- ]
828
- },
829
- "execution_count": 30,
830
- "metadata": {},
831
- "output_type": "execute_result"
832
- }
833
- ],
834
- "source": [
835
- "response_gpt3.response"
836
- ]
837
- },
838
- {
839
- "cell_type": "markdown",
840
- "metadata": {
841
- "id": "DxOF2qth1gUC"
842
- },
843
- "source": [
844
- "# Test Retriever on Multistep"
845
- ]
846
- },
847
- {
848
- "cell_type": "code",
849
- "execution_count": 31,
850
- "metadata": {
851
- "id": "In9BZbU10KAz"
852
- },
853
- "outputs": [],
854
- "source": [
855
- "import llama_index"
856
- ]
857
- },
858
- {
859
- "cell_type": "code",
860
- "execution_count": 32,
861
- "metadata": {
862
- "id": "_-fBK2g2zkKb"
863
- },
864
- "outputs": [],
865
- "source": [
866
- "from llama_index.core.indices.query.schema import QueryBundle"
867
- ]
868
- },
869
- {
870
- "cell_type": "code",
871
- "execution_count": 33,
872
- "metadata": {
873
- "id": "wqT7mlhx1KGB"
874
- },
875
- "outputs": [],
876
- "source": [
877
- "t = QueryBundle(\"How many parameters LLaMA2 model has?\")"
878
- ]
879
- },
880
- {
881
- "cell_type": "code",
882
- "execution_count": 34,
883
- "metadata": {
884
- "colab": {
885
- "base_uri": "https://localhost:8080/",
886
- "height": 304
887
- },
888
- "id": "OHpa3MqXyyvd",
889
- "outputId": "d9b39a47-751d-48a1-ce68-ebf0a50b938d"
890
- },
891
- "outputs": [
892
- {
893
- "ename": "NotImplementedError",
894
- "evalue": "This query engine does not support retrieve, use query directly",
895
- "output_type": "error",
896
- "traceback": [
897
- "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
898
- "\u001b[0;31mNotImplementedError\u001b[0m Traceback (most recent call last)",
899
- "Cell \u001b[0;32mIn[34], line 1\u001b[0m\n\u001b[0;32m----> 1\u001b[0m \u001b[43mquery_engine_gpt3\u001b[49m\u001b[38;5;241;43m.\u001b[39;49m\u001b[43mretrieve\u001b[49m\u001b[43m(\u001b[49m\u001b[43mt\u001b[49m\u001b[43m)\u001b[49m\n",
900
- "File \u001b[0;32m~/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/llama_index/core/base/base_query_engine.py:49\u001b[0m, in \u001b[0;36mBaseQueryEngine.retrieve\u001b[0;34m(self, query_bundle)\u001b[0m\n\u001b[1;32m 48\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mretrieve\u001b[39m(\u001b[38;5;28mself\u001b[39m, query_bundle: QueryBundle) \u001b[38;5;241m-\u001b[39m\u001b[38;5;241m>\u001b[39m List[NodeWithScore]:\n\u001b[0;32m---> 49\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mNotImplementedError\u001b[39;00m(\n\u001b[1;32m 50\u001b[0m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mThis query engine does not support retrieve, use query directly\u001b[39m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m 51\u001b[0m )\n",
901
- "\u001b[0;31mNotImplementedError\u001b[0m: This query engine does not support retrieve, use query directly"
902
- ]
903
- }
904
- ],
905
- "source": [
906
- "query_engine_gpt3.retrieve(t)"
907
- ]
908
- },
909
- {
910
- "cell_type": "markdown",
911
- "metadata": {
912
- "id": "FCdPwVAQ6ixg"
913
- },
914
- "source": [
915
- "# HyDE Transform"
916
- ]
917
- },
918
- {
919
- "cell_type": "code",
920
- "execution_count": 35,
921
- "metadata": {
922
- "id": "1x6He0T961Kg"
923
- },
924
- "outputs": [],
925
- "source": [
926
- "query_engine = vector_index.as_query_engine()"
927
- ]
928
- },
929
- {
930
- "cell_type": "code",
931
- "execution_count": 36,
932
- "metadata": {
933
- "id": "0GgtfeBC6m0H"
934
- },
935
- "outputs": [],
936
- "source": [
937
- "from llama_index.core.indices.query.query_transform import HyDEQueryTransform\n",
938
- "from llama_index.core.query_engine.transform_query_engine import TransformQueryEngine\n",
939
- "\n",
940
- "hyde = HyDEQueryTransform(include_original=True)\n",
941
- "hyde_query_engine = TransformQueryEngine(query_engine, hyde)"
942
- ]
943
- },
944
- {
945
- "cell_type": "code",
946
- "execution_count": 37,
947
- "metadata": {
948
- "id": "mm3nYnIE6mwl"
949
- },
950
- "outputs": [],
951
- "source": [
952
- "response = hyde_query_engine.query(\"How many parameters LLaMA2 model has?\")"
953
- ]
954
- },
955
- {
956
- "cell_type": "code",
957
- "execution_count": 38,
958
- "metadata": {
959
- "colab": {
960
- "base_uri": "https://localhost:8080/",
961
- "height": 53
962
- },
963
- "id": "PjTJ2poc6mt5",
964
- "outputId": "32fc89c2-474d-4791-e4b0-2a1de262b571"
965
- },
966
- "outputs": [
967
- {
968
- "data": {
969
- "text/plain": [
970
- "'The LLaMA 2 model has four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
971
- ]
972
- },
973
- "execution_count": 38,
974
- "metadata": {},
975
- "output_type": "execute_result"
976
- }
977
- ],
978
- "source": [
979
- "response.response"
980
- ]
981
- },
982
- {
983
- "cell_type": "code",
984
- "execution_count": 39,
985
- "metadata": {
986
- "colab": {
987
- "base_uri": "https://localhost:8080/"
988
- },
989
- "id": "StgikqWZ6mrl",
990
- "outputId": "f0552af4-524e-444b-b8cb-67a665fad474"
991
- },
992
- "outputs": [
993
- {
994
- "name": "stdout",
995
- "output_type": "stream",
996
- "text": [
997
- "Node ID\t 63380d3f-7aff-47cd-b2c1-e4baaed70a7e\n",
998
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
999
- "Score\t 0.7504822493620628\n",
1000
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
1001
- "Node ID\t 5f575ba5-e68d-4c50-90bf-1125c4bd51f8\n",
1002
- "Text\t Models Meta AI, formerly known as Facebook Artificial Intelligence Research (FAIR), is an artificial intelligence laboratory that aims to share open-source frameworks, tools, libraries, and models for research exploration and large-scale production deployment. In 2018, they released the open-source PyText, a modeling framework focused on NLP systems. Then, in August 2022, they announced the release of BlenderBot 3, a chatbot designed to improve conversational skills and safety. In November 2022, Meta developed a large language model called Galactica, which assists scientists with tasks such as summarizing academic papers and annotating molecules and proteins. Released in February 2023, LLaMA (Large Language Model Meta AI) is a transformer-based foundational large language model by Meta that ventures into both the AI and academic spaces. The model aims to help researchers, scientists, and engineers advance their work in exploring AI applications. It will be released under a non-commercial license to prevent misuse, and access will be granted to academic researchers, individuals, and organizations affiliated with the government, civil society, academia, and industry research facilities on a selective case-by-case basis. The sharing of codes and weights allows other researchers to test new approaches in LLMs. The LLaMA models have a range of 7 billion to 65 billion parameters. LLaMA-65B can be compared to DeepMind's Chinchilla and Google's PaLM. Publicly available unlabeled data was used to train these models, and training smaller foundational models require less computing power and resources. LLaMA 65B and 33B have been trained on 1.4 trillion tokens in 20 different languages, and according to the Facebook Artificial Intelligence Research (FAIR) team, the model's performance varies across languages. The data sources used for training included CCNet (67%), GitHub, Wikipedia, ArXiv, Stack Exchange, and books. LLaMA, like other large scale language models, has issues related to biased & toxic generation and hallucination. 6. Eleuther's GPT-Neo Models Founded in July 2020 by Connor Leahy, Sid Black, and Leo Gao, EleutherAI is a non-profit AI research lab\n",
1003
- "Score\t 0.7375396701691563\n",
1004
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
1005
- ]
1006
- }
1007
- ],
1008
- "source": [
1009
- "for src in response.source_nodes:\n",
1010
- " print(\"Node ID\\t\", src.node_id)\n",
1011
- " print(\"Text\\t\", src.text)\n",
1012
- " print(\"Score\\t\", src.score)\n",
1013
- " print(\"-_\"*20)"
1014
- ]
1015
- },
1016
- {
1017
- "cell_type": "code",
1018
- "execution_count": 40,
1019
- "metadata": {
1020
- "id": "17Jbo1FH6mjH"
1021
- },
1022
- "outputs": [],
1023
- "source": [
1024
- "query_bundle = hyde(\"How many parameters LLaMA2 model has?\")"
1025
- ]
1026
- },
1027
- {
1028
- "cell_type": "code",
1029
- "execution_count": 41,
1030
- "metadata": {
1031
- "id": "UZEK63K77W7X"
1032
- },
1033
- "outputs": [],
1034
- "source": [
1035
- "hyde_doc = query_bundle.embedding_strs[0]"
1036
- ]
1037
- },
1038
- {
1039
- "cell_type": "code",
1040
- "execution_count": 42,
1041
- "metadata": {
1042
- "colab": {
1043
- "base_uri": "https://localhost:8080/",
1044
- "height": 214
1045
- },
1046
- "id": "wyzwkpSn7Yi1",
1047
- "outputId": "9b03f8dc-a26e-45e4-eec1-22366bd68dd2"
1048
- },
1049
- "outputs": [
1050
- {
1051
- "data": {
1052
- "text/plain": [
1053
- "'The LLaMA2 model has a total of 12 parameters. These parameters include the weights and biases of the neural network layers, as well as the hyperparameters such as learning rate, batch size, and number of epochs. Additionally, the model may also include regularization parameters such as L1 or L2 regularization coefficients. Overall, these parameters are crucial in determining the performance and behavior of the LLaMA2 model in various machine learning tasks.\"'"
1054
- ]
1055
- },
1056
- "execution_count": 42,
1057
- "metadata": {},
1058
- "output_type": "execute_result"
1059
- }
1060
- ],
1061
- "source": [
1062
- "hyde_doc"
1063
- ]
1064
- },
1065
- {
1066
- "cell_type": "code",
1067
- "execution_count": null,
1068
- "metadata": {},
1069
- "outputs": [],
1070
- "source": []
1071
- }
1072
- ],
1073
- "metadata": {
1074
- "colab": {
1075
- "authorship_tag": "ABX9TyMcBonOXFUEEHJsKREchiOp",
1076
- "include_colab_link": true,
1077
- "provenance": []
1078
- },
1079
- "kernelspec": {
1080
- "display_name": "Python 3",
1081
- "name": "python3"
1082
- },
1083
- "language_info": {
1084
- "codemirror_mode": {
1085
- "name": "ipython",
1086
- "version": 3
1087
- },
1088
- "file_extension": ".py",
1089
- "mimetype": "text/x-python",
1090
- "name": "python",
1091
- "nbconvert_exporter": "python",
1092
- "pygments_lexer": "ipython3",
1093
- "version": "3.11.8"
1094
- },
1095
- "widgets": {
1096
- "application/vnd.jupyter.widget-state+json": {
1097
- "0245f2604e4d49c8bd0210302746c47b": {
1098
- "model_module": "@jupyter-widgets/base",
1099
- "model_module_version": "1.2.0",
1100
- "model_name": "LayoutModel",
1101
- "state": {
1102
- "_model_module": "@jupyter-widgets/base",
1103
- "_model_module_version": "1.2.0",
1104
- "_model_name": "LayoutModel",
1105
- "_view_count": null,
1106
- "_view_module": "@jupyter-widgets/base",
1107
- "_view_module_version": "1.2.0",
1108
- "_view_name": "LayoutView",
1109
- "align_content": null,
1110
- "align_items": null,
1111
- "align_self": null,
1112
- "border": null,
1113
- "bottom": null,
1114
- "display": null,
1115
- "flex": null,
1116
- "flex_flow": null,
1117
- "grid_area": null,
1118
- "grid_auto_columns": null,
1119
- "grid_auto_flow": null,
1120
- "grid_auto_rows": null,
1121
- "grid_column": null,
1122
- "grid_gap": null,
1123
- "grid_row": null,
1124
- "grid_template_areas": null,
1125
- "grid_template_columns": null,
1126
- "grid_template_rows": null,
1127
- "height": null,
1128
- "justify_content": null,
1129
- "justify_items": null,
1130
- "left": null,
1131
- "margin": null,
1132
- "max_height": null,
1133
- "max_width": null,
1134
- "min_height": null,
1135
- "min_width": null,
1136
- "object_fit": null,
1137
- "object_position": null,
1138
- "order": null,
1139
- "overflow": null,
1140
- "overflow_x": null,
1141
- "overflow_y": null,
1142
- "padding": null,
1143
- "right": null,
1144
- "top": null,
1145
- "visibility": null,
1146
- "width": null
1147
- }
1148
- },
1149
- "134210510d49476e959dd7d032bbdbdc": {
1150
- "model_module": "@jupyter-widgets/controls",
1151
- "model_module_version": "1.5.0",
1152
- "model_name": "DescriptionStyleModel",
1153
- "state": {
1154
- "_model_module": "@jupyter-widgets/controls",
1155
- "_model_module_version": "1.5.0",
1156
- "_model_name": "DescriptionStyleModel",
1157
- "_view_count": null,
1158
- "_view_module": "@jupyter-widgets/base",
1159
- "_view_module_version": "1.2.0",
1160
- "_view_name": "StyleView",
1161
- "description_width": ""
1162
- }
1163
- },
1164
- "13b9c5395bca4c3ba21265240cb936cf": {
1165
- "model_module": "@jupyter-widgets/base",
1166
- "model_module_version": "1.2.0",
1167
- "model_name": "LayoutModel",
1168
- "state": {
1169
- "_model_module": "@jupyter-widgets/base",
1170
- "_model_module_version": "1.2.0",
1171
- "_model_name": "LayoutModel",
1172
- "_view_count": null,
1173
- "_view_module": "@jupyter-widgets/base",
1174
- "_view_module_version": "1.2.0",
1175
- "_view_name": "LayoutView",
1176
- "align_content": null,
1177
- "align_items": null,
1178
- "align_self": null,
1179
- "border": null,
1180
- "bottom": null,
1181
- "display": null,
1182
- "flex": null,
1183
- "flex_flow": null,
1184
- "grid_area": null,
1185
- "grid_auto_columns": null,
1186
- "grid_auto_flow": null,
1187
- "grid_auto_rows": null,
1188
- "grid_column": null,
1189
- "grid_gap": null,
1190
- "grid_row": null,
1191
- "grid_template_areas": null,
1192
- "grid_template_columns": null,
1193
- "grid_template_rows": null,
1194
- "height": null,
1195
- "justify_content": null,
1196
- "justify_items": null,
1197
- "left": null,
1198
- "margin": null,
1199
- "max_height": null,
1200
- "max_width": null,
1201
- "min_height": null,
1202
- "min_width": null,
1203
- "object_fit": null,
1204
- "object_position": null,
1205
- "order": null,
1206
- "overflow": null,
1207
- "overflow_x": null,
1208
- "overflow_y": null,
1209
- "padding": null,
1210
- "right": null,
1211
- "top": null,
1212
- "visibility": null,
1213
- "width": null
1214
- }
1215
- },
1216
- "193aef33d9184055bb9223f56d456de6": {
1217
- "model_module": "@jupyter-widgets/base",
1218
- "model_module_version": "1.2.0",
1219
- "model_name": "LayoutModel",
1220
- "state": {
1221
- "_model_module": "@jupyter-widgets/base",
1222
- "_model_module_version": "1.2.0",
1223
- "_model_name": "LayoutModel",
1224
- "_view_count": null,
1225
- "_view_module": "@jupyter-widgets/base",
1226
- "_view_module_version": "1.2.0",
1227
- "_view_name": "LayoutView",
1228
- "align_content": null,
1229
- "align_items": null,
1230
- "align_self": null,
1231
- "border": null,
1232
- "bottom": null,
1233
- "display": null,
1234
- "flex": null,
1235
- "flex_flow": null,
1236
- "grid_area": null,
1237
- "grid_auto_columns": null,
1238
- "grid_auto_flow": null,
1239
- "grid_auto_rows": null,
1240
- "grid_column": null,
1241
- "grid_gap": null,
1242
- "grid_row": null,
1243
- "grid_template_areas": null,
1244
- "grid_template_columns": null,
1245
- "grid_template_rows": null,
1246
- "height": null,
1247
- "justify_content": null,
1248
- "justify_items": null,
1249
- "left": null,
1250
- "margin": null,
1251
- "max_height": null,
1252
- "max_width": null,
1253
- "min_height": null,
1254
- "min_width": null,
1255
- "object_fit": null,
1256
- "object_position": null,
1257
- "order": null,
1258
- "overflow": null,
1259
- "overflow_x": null,
1260
- "overflow_y": null,
1261
- "padding": null,
1262
- "right": null,
1263
- "top": null,
1264
- "visibility": null,
1265
- "width": null
1266
- }
1267
- },
1268
- "3fbabd8a8660461ba5e7bc08ef39139a": {
1269
- "model_module": "@jupyter-widgets/controls",
1270
- "model_module_version": "1.5.0",
1271
- "model_name": "HBoxModel",
1272
- "state": {
1273
- "_dom_classes": [],
1274
- "_model_module": "@jupyter-widgets/controls",
1275
- "_model_module_version": "1.5.0",
1276
- "_model_name": "HBoxModel",
1277
- "_view_count": null,
1278
- "_view_module": "@jupyter-widgets/controls",
1279
- "_view_module_version": "1.5.0",
1280
- "_view_name": "HBoxView",
1281
- "box_style": "",
1282
- "children": [
1283
- "IPY_MODEL_df2365556ae242a2ab1a119f9a31a561",
1284
- "IPY_MODEL_5f4b9d32df8f446e858e4c289dc282f9",
1285
- "IPY_MODEL_5b588f83a15d42d9aca888e06bbd95ff"
1286
- ],
1287
- "layout": "IPY_MODEL_ad073bca655540809e39f26538d2ec0d"
1288
- }
1289
- },
1290
- "47a4586384274577a726c57605e7f8d9": {
1291
- "model_module": "@jupyter-widgets/controls",
1292
- "model_module_version": "1.5.0",
1293
- "model_name": "DescriptionStyleModel",
1294
- "state": {
1295
- "_model_module": "@jupyter-widgets/controls",
1296
- "_model_module_version": "1.5.0",
1297
- "_model_name": "DescriptionStyleModel",
1298
- "_view_count": null,
1299
- "_view_module": "@jupyter-widgets/base",
1300
- "_view_module_version": "1.2.0",
1301
- "_view_name": "StyleView",
1302
- "description_width": ""
1303
- }
1304
- },
1305
- "4a172e8c6aa44e41a42fc1d9cf714fd0": {
1306
- "model_module": "@jupyter-widgets/controls",
1307
- "model_module_version": "1.5.0",
1308
- "model_name": "HTMLModel",
1309
- "state": {
1310
- "_dom_classes": [],
1311
- "_model_module": "@jupyter-widgets/controls",
1312
- "_model_module_version": "1.5.0",
1313
- "_model_name": "HTMLModel",
1314
- "_view_count": null,
1315
- "_view_module": "@jupyter-widgets/controls",
1316
- "_view_module_version": "1.5.0",
1317
- "_view_name": "HTMLView",
1318
- "description": "",
1319
- "description_tooltip": null,
1320
- "layout": "IPY_MODEL_e7937a1bc68441a080374911a6563376",
1321
- "placeholder": "​",
1322
- "style": "IPY_MODEL_e532ed7bfef34f67b5fcacd9534eb789",
1323
- "value": " 108/108 [00:03&lt;00:00, 33.70it/s]"
1324
- }
1325
- },
1326
- "5b588f83a15d42d9aca888e06bbd95ff": {
1327
- "model_module": "@jupyter-widgets/controls",
1328
- "model_module_version": "1.5.0",
1329
- "model_name": "HTMLModel",
1330
- "state": {
1331
- "_dom_classes": [],
1332
- "_model_module": "@jupyter-widgets/controls",
1333
- "_model_module_version": "1.5.0",
1334
- "_model_name": "HTMLModel",
1335
- "_view_count": null,
1336
- "_view_module": "@jupyter-widgets/controls",
1337
- "_view_module_version": "1.5.0",
1338
- "_view_name": "HTMLView",
1339
- "description": "",
1340
- "description_tooltip": null,
1341
- "layout": "IPY_MODEL_af9b6ae927dd4764b9692507791bc67e",
1342
- "placeholder": "​",
1343
- "style": "IPY_MODEL_134210510d49476e959dd7d032bbdbdc",
1344
- "value": " 14/14 [00:00&lt;00:00, 21.41it/s]"
1345
- }
1346
- },
1347
- "5c7973afd79349ed997a69120d0629b2": {
1348
- "model_module": "@jupyter-widgets/controls",
1349
- "model_module_version": "1.5.0",
1350
- "model_name": "ProgressStyleModel",
1351
- "state": {
1352
- "_model_module": "@jupyter-widgets/controls",
1353
- "_model_module_version": "1.5.0",
1354
- "_model_name": "ProgressStyleModel",
1355
- "_view_count": null,
1356
- "_view_module": "@jupyter-widgets/base",
1357
- "_view_module_version": "1.2.0",
1358
- "_view_name": "StyleView",
1359
- "bar_color": null,
1360
- "description_width": ""
1361
- }
1362
- },
1363
- "5f4b9d32df8f446e858e4c289dc282f9": {
1364
- "model_module": "@jupyter-widgets/controls",
1365
- "model_module_version": "1.5.0",
1366
- "model_name": "FloatProgressModel",
1367
- "state": {
1368
- "_dom_classes": [],
1369
- "_model_module": "@jupyter-widgets/controls",
1370
- "_model_module_version": "1.5.0",
1371
- "_model_name": "FloatProgressModel",
1372
- "_view_count": null,
1373
- "_view_module": "@jupyter-widgets/controls",
1374
- "_view_module_version": "1.5.0",
1375
- "_view_name": "ProgressView",
1376
- "bar_style": "success",
1377
- "description": "",
1378
- "description_tooltip": null,
1379
- "layout": "IPY_MODEL_96a3bdece738481db57e811ccb74a974",
1380
- "max": 14,
1381
- "min": 0,
1382
- "orientation": "horizontal",
1383
- "style": "IPY_MODEL_5c7973afd79349ed997a69120d0629b2",
1384
- "value": 14
1385
- }
1386
- },
1387
- "5f9bb065c2b74d2e8ded32e1306a7807": {
1388
- "model_module": "@jupyter-widgets/controls",
1389
- "model_module_version": "1.5.0",
1390
- "model_name": "HBoxModel",
1391
- "state": {
1392
- "_dom_classes": [],
1393
- "_model_module": "@jupyter-widgets/controls",
1394
- "_model_module_version": "1.5.0",
1395
- "_model_name": "HBoxModel",
1396
- "_view_count": null,
1397
- "_view_module": "@jupyter-widgets/controls",
1398
- "_view_module_version": "1.5.0",
1399
- "_view_name": "HBoxView",
1400
- "box_style": "",
1401
- "children": [
1402
- "IPY_MODEL_73a06bc546a64f7f99a9e4a135319dcd",
1403
- "IPY_MODEL_ce48deaf4d8c49cdae92bfdbb3a78df0",
1404
- "IPY_MODEL_4a172e8c6aa44e41a42fc1d9cf714fd0"
1405
- ],
1406
- "layout": "IPY_MODEL_0245f2604e4d49c8bd0210302746c47b"
1407
- }
1408
- },
1409
- "73a06bc546a64f7f99a9e4a135319dcd": {
1410
- "model_module": "@jupyter-widgets/controls",
1411
- "model_module_version": "1.5.0",
1412
- "model_name": "HTMLModel",
1413
- "state": {
1414
- "_dom_classes": [],
1415
- "_model_module": "@jupyter-widgets/controls",
1416
- "_model_module_version": "1.5.0",
1417
- "_model_name": "HTMLModel",
1418
- "_view_count": null,
1419
- "_view_module": "@jupyter-widgets/controls",
1420
- "_view_module_version": "1.5.0",
1421
- "_view_name": "HTMLView",
1422
- "description": "",
1423
- "description_tooltip": null,
1424
- "layout": "IPY_MODEL_e956dfab55084a9cbe33c8e331b511e7",
1425
- "placeholder": "​",
1426
- "style": "IPY_MODEL_cb394578badd43a89850873ad2526542",
1427
- "value": "Generating embeddings: 100%"
1428
- }
1429
- },
1430
- "96a3bdece738481db57e811ccb74a974": {
1431
- "model_module": "@jupyter-widgets/base",
1432
- "model_module_version": "1.2.0",
1433
- "model_name": "LayoutModel",
1434
- "state": {
1435
- "_model_module": "@jupyter-widgets/base",
1436
- "_model_module_version": "1.2.0",
1437
- "_model_name": "LayoutModel",
1438
- "_view_count": null,
1439
- "_view_module": "@jupyter-widgets/base",
1440
- "_view_module_version": "1.2.0",
1441
- "_view_name": "LayoutView",
1442
- "align_content": null,
1443
- "align_items": null,
1444
- "align_self": null,
1445
- "border": null,
1446
- "bottom": null,
1447
- "display": null,
1448
- "flex": null,
1449
- "flex_flow": null,
1450
- "grid_area": null,
1451
- "grid_auto_columns": null,
1452
- "grid_auto_flow": null,
1453
- "grid_auto_rows": null,
1454
- "grid_column": null,
1455
- "grid_gap": null,
1456
- "grid_row": null,
1457
- "grid_template_areas": null,
1458
- "grid_template_columns": null,
1459
- "grid_template_rows": null,
1460
- "height": null,
1461
- "justify_content": null,
1462
- "justify_items": null,
1463
- "left": null,
1464
- "margin": null,
1465
- "max_height": null,
1466
- "max_width": null,
1467
- "min_height": null,
1468
- "min_width": null,
1469
- "object_fit": null,
1470
- "object_position": null,
1471
- "order": null,
1472
- "overflow": null,
1473
- "overflow_x": null,
1474
- "overflow_y": null,
1475
- "padding": null,
1476
- "right": null,
1477
- "top": null,
1478
- "visibility": null,
1479
- "width": null
1480
- }
1481
- },
1482
- "abfc9aa911ce4a5ea81c7c451f08295f": {
1483
- "model_module": "@jupyter-widgets/controls",
1484
- "model_module_version": "1.5.0",
1485
- "model_name": "ProgressStyleModel",
1486
- "state": {
1487
- "_model_module": "@jupyter-widgets/controls",
1488
- "_model_module_version": "1.5.0",
1489
- "_model_name": "ProgressStyleModel",
1490
- "_view_count": null,
1491
- "_view_module": "@jupyter-widgets/base",
1492
- "_view_module_version": "1.2.0",
1493
- "_view_name": "StyleView",
1494
- "bar_color": null,
1495
- "description_width": ""
1496
- }
1497
- },
1498
- "ad073bca655540809e39f26538d2ec0d": {
1499
- "model_module": "@jupyter-widgets/base",
1500
- "model_module_version": "1.2.0",
1501
- "model_name": "LayoutModel",
1502
- "state": {
1503
- "_model_module": "@jupyter-widgets/base",
1504
- "_model_module_version": "1.2.0",
1505
- "_model_name": "LayoutModel",
1506
- "_view_count": null,
1507
- "_view_module": "@jupyter-widgets/base",
1508
- "_view_module_version": "1.2.0",
1509
- "_view_name": "LayoutView",
1510
- "align_content": null,
1511
- "align_items": null,
1512
- "align_self": null,
1513
- "border": null,
1514
- "bottom": null,
1515
- "display": null,
1516
- "flex": null,
1517
- "flex_flow": null,
1518
- "grid_area": null,
1519
- "grid_auto_columns": null,
1520
- "grid_auto_flow": null,
1521
- "grid_auto_rows": null,
1522
- "grid_column": null,
1523
- "grid_gap": null,
1524
- "grid_row": null,
1525
- "grid_template_areas": null,
1526
- "grid_template_columns": null,
1527
- "grid_template_rows": null,
1528
- "height": null,
1529
- "justify_content": null,
1530
- "justify_items": null,
1531
- "left": null,
1532
- "margin": null,
1533
- "max_height": null,
1534
- "max_width": null,
1535
- "min_height": null,
1536
- "min_width": null,
1537
- "object_fit": null,
1538
- "object_position": null,
1539
- "order": null,
1540
- "overflow": null,
1541
- "overflow_x": null,
1542
- "overflow_y": null,
1543
- "padding": null,
1544
- "right": null,
1545
- "top": null,
1546
- "visibility": null,
1547
- "width": null
1548
- }
1549
- },
1550
- "af9b6ae927dd4764b9692507791bc67e": {
1551
- "model_module": "@jupyter-widgets/base",
1552
- "model_module_version": "1.2.0",
1553
- "model_name": "LayoutModel",
1554
- "state": {
1555
- "_model_module": "@jupyter-widgets/base",
1556
- "_model_module_version": "1.2.0",
1557
- "_model_name": "LayoutModel",
1558
- "_view_count": null,
1559
- "_view_module": "@jupyter-widgets/base",
1560
- "_view_module_version": "1.2.0",
1561
- "_view_name": "LayoutView",
1562
- "align_content": null,
1563
- "align_items": null,
1564
- "align_self": null,
1565
- "border": null,
1566
- "bottom": null,
1567
- "display": null,
1568
- "flex": null,
1569
- "flex_flow": null,
1570
- "grid_area": null,
1571
- "grid_auto_columns": null,
1572
- "grid_auto_flow": null,
1573
- "grid_auto_rows": null,
1574
- "grid_column": null,
1575
- "grid_gap": null,
1576
- "grid_row": null,
1577
- "grid_template_areas": null,
1578
- "grid_template_columns": null,
1579
- "grid_template_rows": null,
1580
- "height": null,
1581
- "justify_content": null,
1582
- "justify_items": null,
1583
- "left": null,
1584
- "margin": null,
1585
- "max_height": null,
1586
- "max_width": null,
1587
- "min_height": null,
1588
- "min_width": null,
1589
- "object_fit": null,
1590
- "object_position": null,
1591
- "order": null,
1592
- "overflow": null,
1593
- "overflow_x": null,
1594
- "overflow_y": null,
1595
- "padding": null,
1596
- "right": null,
1597
- "top": null,
1598
- "visibility": null,
1599
- "width": null
1600
- }
1601
- },
1602
- "cb394578badd43a89850873ad2526542": {
1603
- "model_module": "@jupyter-widgets/controls",
1604
- "model_module_version": "1.5.0",
1605
- "model_name": "DescriptionStyleModel",
1606
- "state": {
1607
- "_model_module": "@jupyter-widgets/controls",
1608
- "_model_module_version": "1.5.0",
1609
- "_model_name": "DescriptionStyleModel",
1610
- "_view_count": null,
1611
- "_view_module": "@jupyter-widgets/base",
1612
- "_view_module_version": "1.2.0",
1613
- "_view_name": "StyleView",
1614
- "description_width": ""
1615
- }
1616
- },
1617
- "ce48deaf4d8c49cdae92bfdbb3a78df0": {
1618
- "model_module": "@jupyter-widgets/controls",
1619
- "model_module_version": "1.5.0",
1620
- "model_name": "FloatProgressModel",
1621
- "state": {
1622
- "_dom_classes": [],
1623
- "_model_module": "@jupyter-widgets/controls",
1624
- "_model_module_version": "1.5.0",
1625
- "_model_name": "FloatProgressModel",
1626
- "_view_count": null,
1627
- "_view_module": "@jupyter-widgets/controls",
1628
- "_view_module_version": "1.5.0",
1629
- "_view_name": "ProgressView",
1630
- "bar_style": "success",
1631
- "description": "",
1632
- "description_tooltip": null,
1633
- "layout": "IPY_MODEL_193aef33d9184055bb9223f56d456de6",
1634
- "max": 108,
1635
- "min": 0,
1636
- "orientation": "horizontal",
1637
- "style": "IPY_MODEL_abfc9aa911ce4a5ea81c7c451f08295f",
1638
- "value": 108
1639
- }
1640
- },
1641
- "df2365556ae242a2ab1a119f9a31a561": {
1642
- "model_module": "@jupyter-widgets/controls",
1643
- "model_module_version": "1.5.0",
1644
- "model_name": "HTMLModel",
1645
- "state": {
1646
- "_dom_classes": [],
1647
- "_model_module": "@jupyter-widgets/controls",
1648
- "_model_module_version": "1.5.0",
1649
- "_model_name": "HTMLModel",
1650
- "_view_count": null,
1651
- "_view_module": "@jupyter-widgets/controls",
1652
- "_view_module_version": "1.5.0",
1653
- "_view_name": "HTMLView",
1654
- "description": "",
1655
- "description_tooltip": null,
1656
- "layout": "IPY_MODEL_13b9c5395bca4c3ba21265240cb936cf",
1657
- "placeholder": "​",
1658
- "style": "IPY_MODEL_47a4586384274577a726c57605e7f8d9",
1659
- "value": "Parsing nodes: 100%"
1660
- }
1661
- },
1662
- "e532ed7bfef34f67b5fcacd9534eb789": {
1663
- "model_module": "@jupyter-widgets/controls",
1664
- "model_module_version": "1.5.0",
1665
- "model_name": "DescriptionStyleModel",
1666
- "state": {
1667
- "_model_module": "@jupyter-widgets/controls",
1668
- "_model_module_version": "1.5.0",
1669
- "_model_name": "DescriptionStyleModel",
1670
- "_view_count": null,
1671
- "_view_module": "@jupyter-widgets/base",
1672
- "_view_module_version": "1.2.0",
1673
- "_view_name": "StyleView",
1674
- "description_width": ""
1675
- }
1676
- },
1677
- "e7937a1bc68441a080374911a6563376": {
1678
- "model_module": "@jupyter-widgets/base",
1679
- "model_module_version": "1.2.0",
1680
- "model_name": "LayoutModel",
1681
- "state": {
1682
- "_model_module": "@jupyter-widgets/base",
1683
- "_model_module_version": "1.2.0",
1684
- "_model_name": "LayoutModel",
1685
- "_view_count": null,
1686
- "_view_module": "@jupyter-widgets/base",
1687
- "_view_module_version": "1.2.0",
1688
- "_view_name": "LayoutView",
1689
- "align_content": null,
1690
- "align_items": null,
1691
- "align_self": null,
1692
- "border": null,
1693
- "bottom": null,
1694
- "display": null,
1695
- "flex": null,
1696
- "flex_flow": null,
1697
- "grid_area": null,
1698
- "grid_auto_columns": null,
1699
- "grid_auto_flow": null,
1700
- "grid_auto_rows": null,
1701
- "grid_column": null,
1702
- "grid_gap": null,
1703
- "grid_row": null,
1704
- "grid_template_areas": null,
1705
- "grid_template_columns": null,
1706
- "grid_template_rows": null,
1707
- "height": null,
1708
- "justify_content": null,
1709
- "justify_items": null,
1710
- "left": null,
1711
- "margin": null,
1712
- "max_height": null,
1713
- "max_width": null,
1714
- "min_height": null,
1715
- "min_width": null,
1716
- "object_fit": null,
1717
- "object_position": null,
1718
- "order": null,
1719
- "overflow": null,
1720
- "overflow_x": null,
1721
- "overflow_y": null,
1722
- "padding": null,
1723
- "right": null,
1724
- "top": null,
1725
- "visibility": null,
1726
- "width": null
1727
- }
1728
- },
1729
- "e956dfab55084a9cbe33c8e331b511e7": {
1730
- "model_module": "@jupyter-widgets/base",
1731
- "model_module_version": "1.2.0",
1732
- "model_name": "LayoutModel",
1733
- "state": {
1734
- "_model_module": "@jupyter-widgets/base",
1735
- "_model_module_version": "1.2.0",
1736
- "_model_name": "LayoutModel",
1737
- "_view_count": null,
1738
- "_view_module": "@jupyter-widgets/base",
1739
- "_view_module_version": "1.2.0",
1740
- "_view_name": "LayoutView",
1741
- "align_content": null,
1742
- "align_items": null,
1743
- "align_self": null,
1744
- "border": null,
1745
- "bottom": null,
1746
- "display": null,
1747
- "flex": null,
1748
- "flex_flow": null,
1749
- "grid_area": null,
1750
- "grid_auto_columns": null,
1751
- "grid_auto_flow": null,
1752
- "grid_auto_rows": null,
1753
- "grid_column": null,
1754
- "grid_gap": null,
1755
- "grid_row": null,
1756
- "grid_template_areas": null,
1757
- "grid_template_columns": null,
1758
- "grid_template_rows": null,
1759
- "height": null,
1760
- "justify_content": null,
1761
- "justify_items": null,
1762
- "left": null,
1763
- "margin": null,
1764
- "max_height": null,
1765
- "max_width": null,
1766
- "min_height": null,
1767
- "min_width": null,
1768
- "object_fit": null,
1769
- "object_position": null,
1770
- "order": null,
1771
- "overflow": null,
1772
- "overflow_x": null,
1773
- "overflow_y": null,
1774
- "padding": null,
1775
- "right": null,
1776
- "top": null,
1777
- "visibility": null,
1778
- "width": null
1779
- }
1780
- }
1781
- }
1782
- }
1783
- },
1784
- "nbformat": 4,
1785
- "nbformat_minor": 0
1786
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/13-Adding_Router.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
notebooks/14-Adding_Chat.ipynb DELETED
@@ -1,1618 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {
6
- "colab_type": "text",
7
- "id": "view-in-github"
8
- },
9
- "source": [
10
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/14-Adding_Chat.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
11
- ]
12
- },
13
- {
14
- "cell_type": "markdown",
15
- "metadata": {
16
- "id": "-zE1h0uQV7uT"
17
- },
18
- "source": [
19
- "# Install Packages and Setup Variables"
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": 1,
25
- "metadata": {
26
- "colab": {
27
- "base_uri": "https://localhost:8080/"
28
- },
29
- "id": "QPJzr-I9XQ7l",
30
- "outputId": "19864102-680b-446b-fb38-7fad066cee09"
31
- },
32
- "outputs": [],
33
- "source": [
34
- "!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-embeddings-huggingface llama-index-readers-web tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic kaleido==0.2.1"
35
- ]
36
- },
37
- {
38
- "cell_type": "code",
39
- "execution_count": 1,
40
- "metadata": {
41
- "id": "riuXwpSPcvWC"
42
- },
43
- "outputs": [],
44
- "source": [
45
- "import os\n",
46
- "\n",
47
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
48
- "os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
49
- ]
50
- },
51
- {
52
- "cell_type": "code",
53
- "execution_count": 2,
54
- "metadata": {
55
- "id": "jIEeZzqLbz0J"
56
- },
57
- "outputs": [],
58
- "source": [
59
- "# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
60
- "\n",
61
- "import nest_asyncio\n",
62
- "\n",
63
- "nest_asyncio.apply()"
64
- ]
65
- },
66
- {
67
- "cell_type": "markdown",
68
- "metadata": {
69
- "id": "Bkgi2OrYzF7q"
70
- },
71
- "source": [
72
- "# Load a Model"
73
- ]
74
- },
75
- {
76
- "cell_type": "code",
77
- "execution_count": 3,
78
- "metadata": {
79
- "id": "9oGT6crooSSj"
80
- },
81
- "outputs": [
82
- {
83
- "name": "stderr",
84
- "output_type": "stream",
85
- "text": [
86
- "/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
87
- " from .autonotebook import tqdm as notebook_tqdm\n"
88
- ]
89
- }
90
- ],
91
- "source": [
92
- "from llama_index.llms.openai import OpenAI\n",
93
- "\n",
94
- "llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=512)"
95
- ]
96
- },
97
- {
98
- "cell_type": "markdown",
99
- "metadata": {
100
- "id": "0BwVuJXlzHVL"
101
- },
102
- "source": [
103
- "# Create a VectoreStore"
104
- ]
105
- },
106
- {
107
- "cell_type": "code",
108
- "execution_count": 4,
109
- "metadata": {
110
- "id": "SQP87lHczHKc"
111
- },
112
- "outputs": [],
113
- "source": [
114
- "import chromadb\n",
115
- "\n",
116
- "# create client and a new collection\n",
117
- "# chromadb.EphemeralClient saves data in-memory.\n",
118
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
119
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
120
- ]
121
- },
122
- {
123
- "cell_type": "code",
124
- "execution_count": 5,
125
- "metadata": {
126
- "id": "zAaGcYMJzHAN"
127
- },
128
- "outputs": [],
129
- "source": [
130
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
131
- "\n",
132
- "# Define a storage context object using the created vector database.\n",
133
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
134
- ]
135
- },
136
- {
137
- "cell_type": "markdown",
138
- "metadata": {
139
- "id": "I9JbAzFcjkpn"
140
- },
141
- "source": [
142
- "# Load the Dataset (CSV)"
143
- ]
144
- },
145
- {
146
- "cell_type": "markdown",
147
- "metadata": {
148
- "id": "ceveDuYdWCYk"
149
- },
150
- "source": [
151
- "## Download"
152
- ]
153
- },
154
- {
155
- "cell_type": "markdown",
156
- "metadata": {
157
- "id": "eZwf6pv7WFmD"
158
- },
159
- "source": [
160
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
161
- ]
162
- },
163
- {
164
- "cell_type": "code",
165
- "execution_count": 6,
166
- "metadata": {
167
- "colab": {
168
- "base_uri": "https://localhost:8080/"
169
- },
170
- "id": "wl_pbPvMlv1h",
171
- "outputId": "5418de57-b95b-4b90-b7d0-a801ea3c73f7"
172
- },
173
- "outputs": [
174
- {
175
- "name": "stdout",
176
- "output_type": "stream",
177
- "text": [
178
- " % Total % Received % Xferd Average Speed Time Time Time Current\n",
179
- " Dload Upload Total Spent Left Speed\n",
180
- "100 169k 100 169k 0 0 784k 0 --:--:-- --:--:-- --:--:-- 785k\n"
181
- ]
182
- }
183
- ],
184
- "source": [
185
- "!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
186
- ]
187
- },
188
- {
189
- "cell_type": "markdown",
190
- "metadata": {
191
- "id": "VWBLtDbUWJfA"
192
- },
193
- "source": [
194
- "## Read File"
195
- ]
196
- },
197
- {
198
- "cell_type": "code",
199
- "execution_count": 7,
200
- "metadata": {
201
- "colab": {
202
- "base_uri": "https://localhost:8080/"
203
- },
204
- "id": "0Q9sxuW0g3Gd",
205
- "outputId": "801f2ba8-b498-4923-c1cc-c17d3208850c"
206
- },
207
- "outputs": [
208
- {
209
- "data": {
210
- "text/plain": [
211
- "14"
212
- ]
213
- },
214
- "execution_count": 7,
215
- "metadata": {},
216
- "output_type": "execute_result"
217
- }
218
- ],
219
- "source": [
220
- "import csv\n",
221
- "\n",
222
- "rows = []\n",
223
- "\n",
224
- "# Load the file as a JSON\n",
225
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
226
- " csv_reader = csv.reader(file)\n",
227
- "\n",
228
- " for idx, row in enumerate( csv_reader ):\n",
229
- " if idx == 0: continue; # Skip header row\n",
230
- " rows.append( row )\n",
231
- "\n",
232
- "# The number of characters in the dataset.\n",
233
- "len( rows )"
234
- ]
235
- },
236
- {
237
- "cell_type": "markdown",
238
- "metadata": {
239
- "id": "S17g2RYOjmf2"
240
- },
241
- "source": [
242
- "# Convert to Document obj"
243
- ]
244
- },
245
- {
246
- "cell_type": "code",
247
- "execution_count": 8,
248
- "metadata": {
249
- "id": "YizvmXPejkJE"
250
- },
251
- "outputs": [],
252
- "source": [
253
- "from llama_index.core import Document\n",
254
- "\n",
255
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
256
- "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
257
- ]
258
- },
259
- {
260
- "cell_type": "markdown",
261
- "metadata": {
262
- "id": "qjuLbmFuWsyl"
263
- },
264
- "source": [
265
- "# Transforming"
266
- ]
267
- },
268
- {
269
- "cell_type": "code",
270
- "execution_count": 9,
271
- "metadata": {
272
- "id": "9z3t70DGWsjO"
273
- },
274
- "outputs": [],
275
- "source": [
276
- "from llama_index.core.text_splitter import TokenTextSplitter\n",
277
- "\n",
278
- "# Define the splitter object that split the text into segments with 512 tokens,\n",
279
- "# with a 128 overlap between the segments.\n",
280
- "text_splitter = TokenTextSplitter(\n",
281
- " separator=\" \", chunk_size=512, chunk_overlap=128\n",
282
- ")"
283
- ]
284
- },
285
- {
286
- "cell_type": "code",
287
- "execution_count": 10,
288
- "metadata": {
289
- "colab": {
290
- "base_uri": "https://localhost:8080/",
291
- "height": 331,
292
- "referenced_widgets": [
293
- "3fbabd8a8660461ba5e7bc08ef39139a",
294
- "df2365556ae242a2ab1a119f9a31a561",
295
- "5f4b9d32df8f446e858e4c289dc282f9",
296
- "5b588f83a15d42d9aca888e06bbd95ff",
297
- "ad073bca655540809e39f26538d2ec0d",
298
- "13b9c5395bca4c3ba21265240cb936cf",
299
- "47a4586384274577a726c57605e7f8d9",
300
- "96a3bdece738481db57e811ccb74a974",
301
- "5c7973afd79349ed997a69120d0629b2",
302
- "af9b6ae927dd4764b9692507791bc67e",
303
- "134210510d49476e959dd7d032bbdbdc",
304
- "5f9bb065c2b74d2e8ded32e1306a7807",
305
- "73a06bc546a64f7f99a9e4a135319dcd",
306
- "ce48deaf4d8c49cdae92bfdbb3a78df0",
307
- "4a172e8c6aa44e41a42fc1d9cf714fd0",
308
- "0245f2604e4d49c8bd0210302746c47b",
309
- "e956dfab55084a9cbe33c8e331b511e7",
310
- "cb394578badd43a89850873ad2526542",
311
- "193aef33d9184055bb9223f56d456de6",
312
- "abfc9aa911ce4a5ea81c7c451f08295f",
313
- "e7937a1bc68441a080374911a6563376",
314
- "e532ed7bfef34f67b5fcacd9534eb789"
315
- ]
316
- },
317
- "id": "P9LDJ7o-Wsc-",
318
- "outputId": "01070c1f-dffa-4ab7-ad71-b07b76b12e03"
319
- },
320
- "outputs": [
321
- {
322
- "name": "stderr",
323
- "output_type": "stream",
324
- "text": [
325
- "Parsing nodes: 0%| | 0/14 [00:00<?, ?it/s]"
326
- ]
327
- },
328
- {
329
- "name": "stderr",
330
- "output_type": "stream",
331
- "text": [
332
- "Parsing nodes: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 14/14 [00:00<00:00, 28.48it/s]\n",
333
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:59<00:00, 1.82it/s]\n",
334
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [01:46<00:00, 1.02it/s]\n",
335
- "100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:28<00:00, 3.75it/s]\n",
336
- "Generating embeddings: 100%|β–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆβ–ˆ| 108/108 [00:01<00:00, 59.76it/s]\n"
337
- ]
338
- }
339
- ],
340
- "source": [
341
- "from llama_index.core.extractors import (\n",
342
- " SummaryExtractor,\n",
343
- " QuestionsAnsweredExtractor,\n",
344
- " KeywordExtractor,\n",
345
- ")\n",
346
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
347
- "from llama_index.core.ingestion import IngestionPipeline\n",
348
- "\n",
349
- "# Create the pipeline to apply the transformation on each chunk,\n",
350
- "# and store the transformed text in the chroma vector store.\n",
351
- "pipeline = IngestionPipeline(\n",
352
- " transformations=[\n",
353
- " text_splitter,\n",
354
- " QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
355
- " SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
356
- " KeywordExtractor(keywords=10, llm=llm),\n",
357
- " OpenAIEmbedding(),\n",
358
- " ],\n",
359
- " vector_store=vector_store\n",
360
- ")\n",
361
- "\n",
362
- "nodes = pipeline.run(documents=documents, show_progress=True);"
363
- ]
364
- },
365
- {
366
- "cell_type": "code",
367
- "execution_count": 11,
368
- "metadata": {
369
- "colab": {
370
- "base_uri": "https://localhost:8080/"
371
- },
372
- "id": "mPGa85hM2P3P",
373
- "outputId": "c106c463-2459-4b11-bbae-5bd5e2246011"
374
- },
375
- "outputs": [
376
- {
377
- "data": {
378
- "text/plain": [
379
- "108"
380
- ]
381
- },
382
- "execution_count": 11,
383
- "metadata": {},
384
- "output_type": "execute_result"
385
- }
386
- ],
387
- "source": [
388
- "len( nodes )"
389
- ]
390
- },
391
- {
392
- "cell_type": "code",
393
- "execution_count": 12,
394
- "metadata": {
395
- "id": "23x20bL3_jRb"
396
- },
397
- "outputs": [
398
- {
399
- "name": "stdout",
400
- "output_type": "stream",
401
- "text": [
402
- "updating: mini-llama-articles/ (stored 0%)\n",
403
- "updating: mini-llama-articles/chroma.sqlite3 (deflated 65%)\n",
404
- " adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/ (stored 0%)\n",
405
- " adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/data_level0.bin (deflated 100%)\n",
406
- " adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/length.bin (deflated 63%)\n",
407
- " adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/link_lists.bin (stored 0%)\n",
408
- " adding: mini-llama-articles/1a47984b-079a-4e72-809a-387c43e980b6/header.bin (deflated 61%)\n"
409
- ]
410
- }
411
- ],
412
- "source": [
413
- "# Compress the vector store directory to a zip file to be able to download and use later.\n",
414
- "!zip -r vectorstore.zip mini-llama-articles"
415
- ]
416
- },
417
- {
418
- "cell_type": "markdown",
419
- "metadata": {
420
- "id": "OWaT6rL7ksp8"
421
- },
422
- "source": [
423
- "# Load Indexes"
424
- ]
425
- },
426
- {
427
- "cell_type": "markdown",
428
- "metadata": {
429
- "id": "BLkmv3Yxp9mu"
430
- },
431
- "source": [
432
- "If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
433
- ]
434
- },
435
- {
436
- "cell_type": "code",
437
- "execution_count": 13,
438
- "metadata": {
439
- "colab": {
440
- "base_uri": "https://localhost:8080/"
441
- },
442
- "id": "SodY2Xpf_kxg",
443
- "outputId": "a6f7ae4a-447c-4222-e400-0fe55e7e26d9"
444
- },
445
- "outputs": [],
446
- "source": [
447
- "# !unzip vectorstore.zip"
448
- ]
449
- },
450
- {
451
- "cell_type": "code",
452
- "execution_count": 14,
453
- "metadata": {
454
- "id": "mXi56KTXk2sp"
455
- },
456
- "outputs": [],
457
- "source": [
458
- "import chromadb\n",
459
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
460
- "\n",
461
- "# Load the vector store from the local storage.\n",
462
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
463
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
464
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
465
- ]
466
- },
467
- {
468
- "cell_type": "code",
469
- "execution_count": 15,
470
- "metadata": {
471
- "id": "jKXURvLtkuTS"
472
- },
473
- "outputs": [],
474
- "source": [
475
- "from llama_index.core import VectorStoreIndex\n",
476
- "\n",
477
- "# Create the index based on the vector store.\n",
478
- "vector_index = VectorStoreIndex.from_vector_store(vector_store)"
479
- ]
480
- },
481
- {
482
- "cell_type": "markdown",
483
- "metadata": {
484
- "id": "q0m5rl195bcz"
485
- },
486
- "source": [
487
- "# Disply result"
488
- ]
489
- },
490
- {
491
- "cell_type": "code",
492
- "execution_count": 16,
493
- "metadata": {
494
- "id": "4JpaHEmF5dSS"
495
- },
496
- "outputs": [],
497
- "source": [
498
- "# A simple function to show the response and the sources.\n",
499
- "def display_res(response):\n",
500
- " print(\"Response:\\n\\t\", response.response.replace(\"\\n\", \"\") )\n",
501
- "\n",
502
- " print(\"Sources:\")\n",
503
- " if response.source_nodes:\n",
504
- " for src in response.source_nodes:\n",
505
- " print(\"\\tNode ID\\t\", src.node_id)\n",
506
- " print(\"\\tText\\t\", src.text)\n",
507
- " print(\"\\tScore\\t\", src.score)\n",
508
- " print(\"\\t\" + \"-_\"*20)\n",
509
- " else:\n",
510
- " print(\"\\tNo sources used!\")"
511
- ]
512
- },
513
- {
514
- "cell_type": "markdown",
515
- "metadata": {
516
- "id": "hbStjvUJ1cft"
517
- },
518
- "source": [
519
- "# Chat Engine"
520
- ]
521
- },
522
- {
523
- "cell_type": "code",
524
- "execution_count": 17,
525
- "metadata": {
526
- "id": "kwWlDpoR1cRI"
527
- },
528
- "outputs": [],
529
- "source": [
530
- "# define the chat_engine by using the index\n",
531
- "chat_engine = vector_index.as_chat_engine() #chat_mode=\"best\""
532
- ]
533
- },
534
- {
535
- "cell_type": "code",
536
- "execution_count": 18,
537
- "metadata": {
538
- "colab": {
539
- "base_uri": "https://localhost:8080/"
540
- },
541
- "id": "ER3Lb-oN46lJ",
542
- "outputId": "8b34da39-622f-43f2-cb45-01a1ff37efd7"
543
- },
544
- "outputs": [
545
- {
546
- "name": "stdout",
547
- "output_type": "stream",
548
- "text": [
549
- "Response:\n",
550
- "\t The LLaMA2 model has four different model sizes with varying parameters: 7 billion, 13 billion, 34 billion, and 70 billion parameters.\n",
551
- "Sources:\n",
552
- "\tNode ID\t c3239b40-e206-4a80-b020-eea87cf471cc\n",
553
- "\tText\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
554
- "\tScore\t 0.7031083612095066\n",
555
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
556
- "\tNode ID\t bc123b3d-b031-4c09-9400-d60ba9a161d6\n",
557
- "\tText\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
558
- "\tScore\t 0.7004323686791223\n",
559
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
560
- ]
561
- }
562
- ],
563
- "source": [
564
- "# First Question:\n",
565
- "response = chat_engine.chat(\"Use the tool to answer, How many parameters LLaMA2 model has?\")\n",
566
- "display_res(response)"
567
- ]
568
- },
569
- {
570
- "cell_type": "code",
571
- "execution_count": 19,
572
- "metadata": {
573
- "colab": {
574
- "base_uri": "https://localhost:8080/"
575
- },
576
- "id": "3RRmiJEQ5R1Q",
577
- "outputId": "15efcc9b-583f-4efe-8e36-fa8b5160da16"
578
- },
579
- "outputs": [
580
- {
581
- "name": "stdout",
582
- "output_type": "stream",
583
- "text": [
584
- "Response:\n",
585
- "\t Why did the scarecrow win an award? Because he was outstanding in his field!\n",
586
- "Sources:\n",
587
- "\tNode ID\t 8685e48d-1fdb-4f55-8f62-6f2ea4cfaf5d\n",
588
- "\tText\t with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering strong competition to closed-source models. V. Ghost Attention: Enhancing Conversational Continuity One unique feature in Llama 2 is Ghost Attention, which ensures continuity in conversations. This means that even after multiple interactions, the model remembers its initial instructions, ensuring more coherent and consistent responses throughout the conversation. This feature significantly enhances the user experience and makes Llama 2 a more reliable language model for interactive applications. In the example below, on the left, it forgets to use an emoji after a few conversations. On the right, with Ghost Attention, even after having many conversations, it will remember the context and continue to use emojis in its response. VI. Temporal Capability: A Leap in Information Organization Meta reported a groundbreaking temporal capability, where the model organizes information based on time relevance. Each question posed to the model is associated with a date, and it responds accordingly by considering the event date before which the question becomes irrelevant. For example, if you ask the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete\n",
589
- "\tScore\t 0.5624851990178006\n",
590
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
591
- "\tNode ID\t e03eb322-1360-4c76-b461-236ec8312de1\n",
592
- "\tText\t Introduce GPT4All GPT4All is a large language model (LLM) chatbot developed by Nomic AI, the world's first information cartography company. It was fine-tuned from LLaMA 7B model, the leaked large language model from Meta (aka Facebook). GPT4All is trained on a massive dataset of text and code, and it can generate text, translate languages, write different kinds of creative content, and answer your questions in an informative way. GPT4All is available to the public on GitHub. LLaMA is available for commercial use under the GPL-3.0 license - while the LLaMA code is available for commercial use, the WEIGHTS are not. This effectively puts it in the same license class as GPT4All. Nomic is working on a GPT-J-based version of GPT4All with an open commercial license. GPT4All is not going to have a subscription fee ever. GPT4All is Free4All. Although GPT4All is still in its early stages, it has already left a notable mark on the AI landscape. Its popularity and capabilities are expected to expand further in the future. How to Run GPT4All Locally GPT4All Readme provides some details about its usage. Here will briefly demonstrate to run GPT4All locally on M1 CPU Mac. Download gpt4all-lora-quantized.bin from the-eye.Clone this repository, navigate to chat, and place the downloaded file there. Simply run the following command for M1 Mac: Now, it's ready to run locally. Please see a few snapshots below: Similar to ChatGPT, GPT4All has the ability to comprehend Chinese, a feature that Bard lacks. If you want to interact with GPT4All programmatically, you can install the nomic client as follows. Install the nomic client using pip install nomic.Use the following Python script to interact with GPT4All: Chat4All Demystified GPT4All aims to provide a cost-effective and fine-tuned model for high-quality LLM results. The GPT4All model was fine-tuned using an instance of LLaMA\n",
593
- "\tScore\t 0.5615408071202241\n",
594
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
595
- ]
596
- }
597
- ],
598
- "source": [
599
- "# Second Question:\n",
600
- "response = chat_engine.chat(\"Tell me a joke?\")\n",
601
- "display_res(response)"
602
- ]
603
- },
604
- {
605
- "cell_type": "code",
606
- "execution_count": 20,
607
- "metadata": {
608
- "colab": {
609
- "base_uri": "https://localhost:8080/"
610
- },
611
- "id": "8eOzp5Xc5Vbj",
612
- "outputId": "13bc6714-dd89-45b3-a86b-759806245241"
613
- },
614
- "outputs": [
615
- {
616
- "name": "stdout",
617
- "output_type": "stream",
618
- "text": [
619
- "Response:\n",
620
- "\t The first question you asked was \"How many parameters LLaMA2 model has?\"\n",
621
- "Sources:\n",
622
- "\tNo sources used!\n"
623
- ]
624
- }
625
- ],
626
- "source": [
627
- "# Third Question: (check if it can recall previous interactions)\n",
628
- "response = chat_engine.chat(\"What was the first question I asked?\")\n",
629
- "display_res(response)"
630
- ]
631
- },
632
- {
633
- "cell_type": "code",
634
- "execution_count": 21,
635
- "metadata": {
636
- "id": "7jfiLpru5VZT"
637
- },
638
- "outputs": [],
639
- "source": [
640
- "# Reset the session to clear the memory\n",
641
- "chat_engine.reset()"
642
- ]
643
- },
644
- {
645
- "cell_type": "code",
646
- "execution_count": 22,
647
- "metadata": {
648
- "colab": {
649
- "base_uri": "https://localhost:8080/"
650
- },
651
- "id": "Jt0q8RW25VXN",
652
- "outputId": "0e2d0d4e-c0ff-48bf-8df3-478fcdc66abd"
653
- },
654
- "outputs": [
655
- {
656
- "name": "stdout",
657
- "output_type": "stream",
658
- "text": [
659
- "Response:\n",
660
- "\t The first question you asked was \"How can a Q&A bot be built over private documents using OpenAI and LangChain?\"\n",
661
- "Sources:\n",
662
- "\tNode ID\t baa8a99c-f38b-4818-b854-5741598c0776\n",
663
- "\tText\t Private data to be used The example provided can be used with any dataset. I am using a data set that has Analyst recommendations from various stocks. For the purpose of demonstration, I have gathered publicly available analyst recommendations to showcase its capabilities. You can replace this with your own information to try this. Below is a partial extract of the information commonly found in these documents. If you wish to try it yourself, you can download analyst recommendations for your preferred stocks from online sources or access them through subscription platforms like Barron's. Although the example provided focuses on analyst recommendations, the underlying structure can be utilized to query various other types of documents in any industry as well. I have assembled such data for a few stocks for demonstration purposes. This includes Google, Microsoft, Meta, and Tesla. To facilitate easy access and updating of analysts' recommendations, all the recommendations can be organized into a designated folder. Each stock corresponds to a separate file within this folder. For example, if there are recommendations for 20 stocks, there will be 20 individual files. This organization enables convenient updating of information for each stock as new recommendations arrive, streamlining the process of managing and maintaining the most up-to-date data for each stock. Questions this Q&A bot application can answer The data we have for this application is stock market analyst recommendations for many stocks. Let's say you are looking for insight about Microsoft stock. You can ask any of the following questions as an example: What is the median target price for Microsoft (MSFT)?What is the highest price estimate for Microsoft (MSFT)?What is the lowest price estimate for Microsoft (MSFT)?How much percentage increase is expected in the stock price of Microsoft (MSFT)?How many analysts provided price forecasts for Microsoft (MSFT)?What is the current consensus among investment analysts regarding Microsoft (MSFT)?Has the consensus rating for Microsoft (MSFT) changed recently?When was the consensus rating last updated for Microsoft (MSFT)?Is the current recommendation for Microsoft (MSFT) to buy, sell, or hold the stock?Are there any recent analyst reports available for Microsoft (MSFT)? These questions cover various aspects of the stock analysis, including price forecasts, analyst recommendations, and recent changes in ratings. The\n",
664
- "\tScore\t 0.5990934490336279\n",
665
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
666
- "\tNode ID\t d03de2fa-70aa-4b32-8760-3af0dd0ebb24\n",
667
- "\tText\t you want to specify exact documents, you can do it the following way. To load the files you want to ingest, you can specify the path to each file individually. The loaded files can then be saved into a list. This list serves as the input that is sent to the vector database to store the data. The alternative approach is a more versatile method in which we can load all pertinent documents from a designated folder and store the file locations in a list for subsequent processing. This approach offers flexibility and allows for the efficient handling of multiple documents by capturing their locations in a centralized list, enabling seamless data retrieval and analysis. Load the documents into the vector store. When dealing with a vast number of documents, it becomes inefficient to send all documents (analyst recommendations) to your large language model (LLM) when seeking answers to specific questions. For instance, if your question pertains to MSFT, it would be more cost-effective to only send document extracts that reference MSFT to your LLM for answering the question. This approach helps optimize resource utilization. To achieve this, all documents are split into chunks and stored in a vector database in a numeric format (embeddings). When a new question is posed, the system queries the vector database for relevant text chunks related to this question, which is then shared with the LLM to generate an appropriate response. Within the LangChain framework, the VectorstoreIndexCreator class serves as a utility for creating a vector store index. This index stores vector representations of the documents (in chromadb), enabling various text operations, such as finding similar documents based on a specific question. When a user asks a question, a similarity search is performed in the vector store to get document chunks relevant to the question. The question, along with the chunks are sent to OpenAI to get the response back. Now we are ready to query these documents. Setting up the web application The application is presented in the browser using Streamlit, providing a user-friendly interface. Within the application, a text box is available for users to enter their questions. Upon submitting the question by pressing enter, the application processes the input and generates a corresponding response. This response is then displayed below the text box, allowing users to conveniently view the relevant\n",
668
- "\tScore\t 0.5904441993661576\n",
669
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
670
- ]
671
- }
672
- ],
673
- "source": [
674
- "# Fourth Question: (don't recall the previous interactions.)\n",
675
- "response = chat_engine.chat(\"What was the first question I asked?\")\n",
676
- "display_res(response)"
677
- ]
678
- },
679
- {
680
- "cell_type": "markdown",
681
- "metadata": {
682
- "id": "0Egsib7yPJGR"
683
- },
684
- "source": [
685
- "# Streaming"
686
- ]
687
- },
688
- {
689
- "cell_type": "code",
690
- "execution_count": 23,
691
- "metadata": {
692
- "colab": {
693
- "base_uri": "https://localhost:8080/"
694
- },
695
- "id": "zanJeMbaPJcq",
696
- "outputId": "de7f0905-c1b1-49ac-fb66-d1578da35cad"
697
- },
698
- "outputs": [
699
- {
700
- "name": "stdout",
701
- "output_type": "stream",
702
- "text": [
703
- "Here is a paragraph about the LLaMA2 model's capabilities:\n",
704
- "\n",
705
- "\"The Llama 2 model showcases impressive capabilities in the realm of open-source language models. It introduces innovative features like Ghost Attention, which enhances conversational continuity by ensuring consistent responses throughout interactions. Additionally, Llama 2 boasts a groundbreaking temporal capability that organizes information based on time relevance, leading to more contextually accurate responses. Despite facing challenges in coding and math problems compared to larger models like Chat GPT 4, Llama 2 demonstrates efficiency and potential in the market, competing well with both open-source and closed-source models. Its ability to balance helpfulness and safety in optimizing responses further solidifies its position as a reliable and advanced language model for commercial use.\""
706
- ]
707
- }
708
- ],
709
- "source": [
710
- "# Stream the words as soon as they are available instead of waiting for the model to finish generation.\n",
711
- "streaming_response = chat_engine.stream_chat(\"Write a paragraph about the LLaMA2 model's capabilities.\")\n",
712
- "for token in streaming_response.response_gen:\n",
713
- " print(token, end=\"\")"
714
- ]
715
- },
716
- {
717
- "cell_type": "markdown",
718
- "metadata": {
719
- "id": "DuRgOJ2AHMJh"
720
- },
721
- "source": [
722
- "## Condense Question"
723
- ]
724
- },
725
- {
726
- "cell_type": "markdown",
727
- "metadata": {
728
- "id": "Yb2Lt41jq145"
729
- },
730
- "source": [
731
- "Enhance the input prompt by looking at the previous chat history along with the present question. The refined prompt can then be used to fetch the nodes."
732
- ]
733
- },
734
- {
735
- "cell_type": "code",
736
- "execution_count": 24,
737
- "metadata": {
738
- "id": "v0gmM5LGIaRl"
739
- },
740
- "outputs": [],
741
- "source": [
742
- "# Define GPT-4 model that will be used by the chat_engine to improve the query.\n",
743
- "gpt4 = OpenAI(temperature=0.9, model=\"gpt-4-0125-preview\")"
744
- ]
745
- },
746
- {
747
- "cell_type": "code",
748
- "execution_count": 25,
749
- "metadata": {
750
- "id": "EDWsaBTBIhK7"
751
- },
752
- "outputs": [],
753
- "source": [
754
- "chat_engine = vector_index.as_chat_engine(chat_mode=\"condense_question\", llm=gpt4, verbose=True)"
755
- ]
756
- },
757
- {
758
- "cell_type": "code",
759
- "execution_count": 26,
760
- "metadata": {
761
- "colab": {
762
- "base_uri": "https://localhost:8080/"
763
- },
764
- "id": "h4c--hJ75VU2",
765
- "outputId": "e80fd9bf-e6d5-4532-8771-8cbf781e782e"
766
- },
767
- "outputs": [
768
- {
769
- "name": "stdout",
770
- "output_type": "stream",
771
- "text": [
772
- "Querying with: Using the tool at your disposal, can you please determine which company released the LLaMA2 model and explain what specific functionality or purpose this model is known for?\n",
773
- "Response:\n",
774
- "\t The LLaMA2 model was released by Meta. The model is known for its temporal awareness feature which enhances the accuracy of its responses by delivering more contextually accurate responses based on time relevance. For example, for the question, \"How long ago did Barack Obama become president?\", it only considers information relevant after 2008. Meta's open-sourcing of LLaMA2 provides developers and researchers with commercial access to the advanced language model, which represents a significant shift in the AI industry.\n",
775
- "Sources:\n",
776
- "\tNode ID\t 7adec56f-6714-4376-8ebf-180b694c4d59\n",
777
- "\tText\t LLaMA: Meta's new AI tool According to the official release, LLaMA is a foundational language model developed to assist 'researchers and academics' in their work (as opposed to the average web user) to understand and study these NLP models. Leveraging AI in such a way could give researchers an edge in terms of time spent. You may not know this, but this would be Meta's third LLM after Blender Bot 3 and Galactica. However, the two LLMs were shut down soon, and Meta stopped their further development, as it produced erroneous results. Before moving further, it is important to emphasize that LLaMA is NOT a chatbot like ChatGPT. As I mentioned before, it is a 'research tool' for researchers. We can expect the initial versions of LLaMA to be a bit more technical and indirect to use as opposed to the case with ChatGPT, which was very direct, interactive, and a lot easy to use. \"Smaller, more performant models such as LLaMA enable ... research community who don't have access to large amounts of infrastructure to study these models.. further democratizing access in this important, fast-changing field,\" said Meta in its official blog. Meta's effort of \"democratizing\" access to the public could shed light on one of the critical issues of Generative AI - toxicity and bias. ChatGPT and other LLMs (obviously, I am referring to Bing) have a track record of responding in a way that is toxic and, well... evil. The Verge and major critics have covered it in much detail. Oh and the community did get the access, but not in the way Meta anticipated. On March 3rd, a downloadable torrent of the LLaMA system was posted on 4chan. 4chan is an anonymous online forum known for its controversial content and diverse range of discussions, which has nearly 222 million unique monthly visitors. LLaMA is currently not in use on any of Meta's products. But Meta has plans to make it available to researchers before they can use them in their own products. It's worth mentioning that Meta did not release\n",
778
- "\tScore\t 0.696738130166742\n",
779
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
780
- "\tNode ID\t d19bbfc9-9ff8-4c21-ba5a-ef78e5db2d87\n",
781
- "\tText\t the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete with Llama 2 or join hands with the open-source community to make the open-source models better? Meanwhile, Microsoft's move to host Llama 2 on Azure despite having significant investment in ChatGPT raises interesting questions. Will users prefer the capabilities and transparency of an open-source model like Llama 2 over closed, proprietary options? The stakes are high, as Meta's bold democratization play stands to reshape preferences and partnerships in the AI space. One thing is certain - the era of open language model competition has begun. VIII. Conclusion With the launch of Llama 2, Meta has achieved a landmark breakthrough in open-source language models, unleashing new potential through its commercial accessibility. Llama 2's formidable capabilities in natural language processing, along with robust safety protocols and temporal reasoning, set new benchmarks for the field. While select limitations around math and coding exist presently, Llama 2's strengths far outweigh its weaknesses. As Meta continues honing Llama technology, this latest innovation promises to be truly transformative. By open-sourcing such an advanced model, Meta is propelling democratization and proliferation of AI across industries. From healthcare to education and beyond, Llama 2 stands to shape the landscape by putting groundbreaking language modeling into the hands of all developers and researchers. The possibilities unlocked by this open-source approach signal a shift towards a more collaborative, creative AI future.\n",
782
- "\tScore\t 0.692383316770113\n",
783
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
784
- ]
785
- }
786
- ],
787
- "source": [
788
- "response = chat_engine.chat(\"Use the tool to answer, which company released LLaMA2 model? What is the model useful for?\")\n",
789
- "display_res(response)"
790
- ]
791
- },
792
- {
793
- "cell_type": "markdown",
794
- "metadata": {
795
- "id": "ysL9ONePOsGB"
796
- },
797
- "source": [
798
- "## REACT"
799
- ]
800
- },
801
- {
802
- "cell_type": "markdown",
803
- "metadata": {
804
- "id": "KiEFmxAtrmF-"
805
- },
806
- "source": [
807
- "ReAct is an agent-based chat mode that uses a loop to decide on querying a data engine during interactions, offering flexibility but relying on the Large Language Model's quality for effective responses, requiring careful management to avoid inaccurate answers."
808
- ]
809
- },
810
- {
811
- "cell_type": "code",
812
- "execution_count": 27,
813
- "metadata": {
814
- "id": "-M1jWoKXOs2t"
815
- },
816
- "outputs": [],
817
- "source": [
818
- "chat_engine = vector_index.as_chat_engine(chat_mode=\"react\", verbose=True)"
819
- ]
820
- },
821
- {
822
- "cell_type": "code",
823
- "execution_count": 28,
824
- "metadata": {
825
- "colab": {
826
- "base_uri": "https://localhost:8080/"
827
- },
828
- "id": "UZkEW1SSOs0H",
829
- "outputId": "4869c5fc-e0e1-44c6-e7f0-87db92bb2eb6"
830
- },
831
- "outputs": [
832
- {
833
- "name": "stdout",
834
- "output_type": "stream",
835
- "text": [
836
- "Added user message to memory: Which company released LLaMA2 model? What is the model useful for?\n",
837
- "=== Calling Function ===\n",
838
- "Calling function: query_engine_tool with args: {\"input\": \"Which company released LLaMA2 model?\"}\n",
839
- "Got output: Meta released the LLaMA2 model.\n",
840
- "========================\n",
841
- "\n",
842
- "=== Calling Function ===\n",
843
- "Calling function: query_engine_tool with args: {\"input\": \"What is the LLaMA2 model useful for?\"}\n",
844
- "Got output: The Llama 2 model is useful for businesses to integrate into products to create AI-powered applications.\n",
845
- "========================\n",
846
- "\n"
847
- ]
848
- }
849
- ],
850
- "source": [
851
- "response = chat_engine.chat(\"Which company released LLaMA2 model? What is the model useful for?\")"
852
- ]
853
- },
854
- {
855
- "cell_type": "code",
856
- "execution_count": 29,
857
- "metadata": {
858
- "colab": {
859
- "base_uri": "https://localhost:8080/"
860
- },
861
- "id": "eW5P1lD4Osxf",
862
- "outputId": "b128bc94-081b-49aa-c549-7d7d7be90b63"
863
- },
864
- "outputs": [
865
- {
866
- "name": "stdout",
867
- "output_type": "stream",
868
- "text": [
869
- "Response:\n",
870
- "\t The LLaMA2 model was released by Meta. It is useful for businesses to integrate into products to create AI-powered applications.\n",
871
- "Sources:\n",
872
- "\tNode ID\t 7adec56f-6714-4376-8ebf-180b694c4d59\n",
873
- "\tText\t LLaMA: Meta's new AI tool According to the official release, LLaMA is a foundational language model developed to assist 'researchers and academics' in their work (as opposed to the average web user) to understand and study these NLP models. Leveraging AI in such a way could give researchers an edge in terms of time spent. You may not know this, but this would be Meta's third LLM after Blender Bot 3 and Galactica. However, the two LLMs were shut down soon, and Meta stopped their further development, as it produced erroneous results. Before moving further, it is important to emphasize that LLaMA is NOT a chatbot like ChatGPT. As I mentioned before, it is a 'research tool' for researchers. We can expect the initial versions of LLaMA to be a bit more technical and indirect to use as opposed to the case with ChatGPT, which was very direct, interactive, and a lot easy to use. \"Smaller, more performant models such as LLaMA enable ... research community who don't have access to large amounts of infrastructure to study these models.. further democratizing access in this important, fast-changing field,\" said Meta in its official blog. Meta's effort of \"democratizing\" access to the public could shed light on one of the critical issues of Generative AI - toxicity and bias. ChatGPT and other LLMs (obviously, I am referring to Bing) have a track record of responding in a way that is toxic and, well... evil. The Verge and major critics have covered it in much detail. Oh and the community did get the access, but not in the way Meta anticipated. On March 3rd, a downloadable torrent of the LLaMA system was posted on 4chan. 4chan is an anonymous online forum known for its controversial content and diverse range of discussions, which has nearly 222 million unique monthly visitors. LLaMA is currently not in use on any of Meta's products. But Meta has plans to make it available to researchers before they can use them in their own products. It's worth mentioning that Meta did not release\n",
874
- "\tScore\t 0.6701682333186606\n",
875
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
876
- "\tNode ID\t d19bbfc9-9ff8-4c21-ba5a-ef78e5db2d87\n",
877
- "\tText\t the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete with Llama 2 or join hands with the open-source community to make the open-source models better? Meanwhile, Microsoft's move to host Llama 2 on Azure despite having significant investment in ChatGPT raises interesting questions. Will users prefer the capabilities and transparency of an open-source model like Llama 2 over closed, proprietary options? The stakes are high, as Meta's bold democratization play stands to reshape preferences and partnerships in the AI space. One thing is certain - the era of open language model competition has begun. VIII. Conclusion With the launch of Llama 2, Meta has achieved a landmark breakthrough in open-source language models, unleashing new potential through its commercial accessibility. Llama 2's formidable capabilities in natural language processing, along with robust safety protocols and temporal reasoning, set new benchmarks for the field. While select limitations around math and coding exist presently, Llama 2's strengths far outweigh its weaknesses. As Meta continues honing Llama technology, this latest innovation promises to be truly transformative. By open-sourcing such an advanced model, Meta is propelling democratization and proliferation of AI across industries. From healthcare to education and beyond, Llama 2 stands to shape the landscape by putting groundbreaking language modeling into the hands of all developers and researchers. The possibilities unlocked by this open-source approach signal a shift towards a more collaborative, creative AI future.\n",
878
- "\tScore\t 0.6696485090138802\n",
879
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
880
- "\tNode ID\t bc123b3d-b031-4c09-9400-d60ba9a161d6\n",
881
- "\tText\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
882
- "\tScore\t 0.7141285410107295\n",
883
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
884
- "\tNode ID\t c3239b40-e206-4a80-b020-eea87cf471cc\n",
885
- "\tText\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
886
- "\tScore\t 0.7116485926146265\n",
887
- "\t-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
888
- ]
889
- }
890
- ],
891
- "source": [
892
- "display_res(response)"
893
- ]
894
- },
895
- {
896
- "cell_type": "code",
897
- "execution_count": null,
898
- "metadata": {
899
- "id": "zf6r2AmFOsca"
900
- },
901
- "outputs": [],
902
- "source": []
903
- }
904
- ],
905
- "metadata": {
906
- "colab": {
907
- "authorship_tag": "ABX9TyNlfE1aMk+m6avCgDavT2ZF",
908
- "include_colab_link": true,
909
- "provenance": []
910
- },
911
- "kernelspec": {
912
- "display_name": "Python 3",
913
- "name": "python3"
914
- },
915
- "language_info": {
916
- "codemirror_mode": {
917
- "name": "ipython",
918
- "version": 3
919
- },
920
- "file_extension": ".py",
921
- "mimetype": "text/x-python",
922
- "name": "python",
923
- "nbconvert_exporter": "python",
924
- "pygments_lexer": "ipython3",
925
- "version": "3.11.8"
926
- },
927
- "widgets": {
928
- "application/vnd.jupyter.widget-state+json": {
929
- "0245f2604e4d49c8bd0210302746c47b": {
930
- "model_module": "@jupyter-widgets/base",
931
- "model_module_version": "1.2.0",
932
- "model_name": "LayoutModel",
933
- "state": {
934
- "_model_module": "@jupyter-widgets/base",
935
- "_model_module_version": "1.2.0",
936
- "_model_name": "LayoutModel",
937
- "_view_count": null,
938
- "_view_module": "@jupyter-widgets/base",
939
- "_view_module_version": "1.2.0",
940
- "_view_name": "LayoutView",
941
- "align_content": null,
942
- "align_items": null,
943
- "align_self": null,
944
- "border": null,
945
- "bottom": null,
946
- "display": null,
947
- "flex": null,
948
- "flex_flow": null,
949
- "grid_area": null,
950
- "grid_auto_columns": null,
951
- "grid_auto_flow": null,
952
- "grid_auto_rows": null,
953
- "grid_column": null,
954
- "grid_gap": null,
955
- "grid_row": null,
956
- "grid_template_areas": null,
957
- "grid_template_columns": null,
958
- "grid_template_rows": null,
959
- "height": null,
960
- "justify_content": null,
961
- "justify_items": null,
962
- "left": null,
963
- "margin": null,
964
- "max_height": null,
965
- "max_width": null,
966
- "min_height": null,
967
- "min_width": null,
968
- "object_fit": null,
969
- "object_position": null,
970
- "order": null,
971
- "overflow": null,
972
- "overflow_x": null,
973
- "overflow_y": null,
974
- "padding": null,
975
- "right": null,
976
- "top": null,
977
- "visibility": null,
978
- "width": null
979
- }
980
- },
981
- "134210510d49476e959dd7d032bbdbdc": {
982
- "model_module": "@jupyter-widgets/controls",
983
- "model_module_version": "1.5.0",
984
- "model_name": "DescriptionStyleModel",
985
- "state": {
986
- "_model_module": "@jupyter-widgets/controls",
987
- "_model_module_version": "1.5.0",
988
- "_model_name": "DescriptionStyleModel",
989
- "_view_count": null,
990
- "_view_module": "@jupyter-widgets/base",
991
- "_view_module_version": "1.2.0",
992
- "_view_name": "StyleView",
993
- "description_width": ""
994
- }
995
- },
996
- "13b9c5395bca4c3ba21265240cb936cf": {
997
- "model_module": "@jupyter-widgets/base",
998
- "model_module_version": "1.2.0",
999
- "model_name": "LayoutModel",
1000
- "state": {
1001
- "_model_module": "@jupyter-widgets/base",
1002
- "_model_module_version": "1.2.0",
1003
- "_model_name": "LayoutModel",
1004
- "_view_count": null,
1005
- "_view_module": "@jupyter-widgets/base",
1006
- "_view_module_version": "1.2.0",
1007
- "_view_name": "LayoutView",
1008
- "align_content": null,
1009
- "align_items": null,
1010
- "align_self": null,
1011
- "border": null,
1012
- "bottom": null,
1013
- "display": null,
1014
- "flex": null,
1015
- "flex_flow": null,
1016
- "grid_area": null,
1017
- "grid_auto_columns": null,
1018
- "grid_auto_flow": null,
1019
- "grid_auto_rows": null,
1020
- "grid_column": null,
1021
- "grid_gap": null,
1022
- "grid_row": null,
1023
- "grid_template_areas": null,
1024
- "grid_template_columns": null,
1025
- "grid_template_rows": null,
1026
- "height": null,
1027
- "justify_content": null,
1028
- "justify_items": null,
1029
- "left": null,
1030
- "margin": null,
1031
- "max_height": null,
1032
- "max_width": null,
1033
- "min_height": null,
1034
- "min_width": null,
1035
- "object_fit": null,
1036
- "object_position": null,
1037
- "order": null,
1038
- "overflow": null,
1039
- "overflow_x": null,
1040
- "overflow_y": null,
1041
- "padding": null,
1042
- "right": null,
1043
- "top": null,
1044
- "visibility": null,
1045
- "width": null
1046
- }
1047
- },
1048
- "193aef33d9184055bb9223f56d456de6": {
1049
- "model_module": "@jupyter-widgets/base",
1050
- "model_module_version": "1.2.0",
1051
- "model_name": "LayoutModel",
1052
- "state": {
1053
- "_model_module": "@jupyter-widgets/base",
1054
- "_model_module_version": "1.2.0",
1055
- "_model_name": "LayoutModel",
1056
- "_view_count": null,
1057
- "_view_module": "@jupyter-widgets/base",
1058
- "_view_module_version": "1.2.0",
1059
- "_view_name": "LayoutView",
1060
- "align_content": null,
1061
- "align_items": null,
1062
- "align_self": null,
1063
- "border": null,
1064
- "bottom": null,
1065
- "display": null,
1066
- "flex": null,
1067
- "flex_flow": null,
1068
- "grid_area": null,
1069
- "grid_auto_columns": null,
1070
- "grid_auto_flow": null,
1071
- "grid_auto_rows": null,
1072
- "grid_column": null,
1073
- "grid_gap": null,
1074
- "grid_row": null,
1075
- "grid_template_areas": null,
1076
- "grid_template_columns": null,
1077
- "grid_template_rows": null,
1078
- "height": null,
1079
- "justify_content": null,
1080
- "justify_items": null,
1081
- "left": null,
1082
- "margin": null,
1083
- "max_height": null,
1084
- "max_width": null,
1085
- "min_height": null,
1086
- "min_width": null,
1087
- "object_fit": null,
1088
- "object_position": null,
1089
- "order": null,
1090
- "overflow": null,
1091
- "overflow_x": null,
1092
- "overflow_y": null,
1093
- "padding": null,
1094
- "right": null,
1095
- "top": null,
1096
- "visibility": null,
1097
- "width": null
1098
- }
1099
- },
1100
- "3fbabd8a8660461ba5e7bc08ef39139a": {
1101
- "model_module": "@jupyter-widgets/controls",
1102
- "model_module_version": "1.5.0",
1103
- "model_name": "HBoxModel",
1104
- "state": {
1105
- "_dom_classes": [],
1106
- "_model_module": "@jupyter-widgets/controls",
1107
- "_model_module_version": "1.5.0",
1108
- "_model_name": "HBoxModel",
1109
- "_view_count": null,
1110
- "_view_module": "@jupyter-widgets/controls",
1111
- "_view_module_version": "1.5.0",
1112
- "_view_name": "HBoxView",
1113
- "box_style": "",
1114
- "children": [
1115
- "IPY_MODEL_df2365556ae242a2ab1a119f9a31a561",
1116
- "IPY_MODEL_5f4b9d32df8f446e858e4c289dc282f9",
1117
- "IPY_MODEL_5b588f83a15d42d9aca888e06bbd95ff"
1118
- ],
1119
- "layout": "IPY_MODEL_ad073bca655540809e39f26538d2ec0d"
1120
- }
1121
- },
1122
- "47a4586384274577a726c57605e7f8d9": {
1123
- "model_module": "@jupyter-widgets/controls",
1124
- "model_module_version": "1.5.0",
1125
- "model_name": "DescriptionStyleModel",
1126
- "state": {
1127
- "_model_module": "@jupyter-widgets/controls",
1128
- "_model_module_version": "1.5.0",
1129
- "_model_name": "DescriptionStyleModel",
1130
- "_view_count": null,
1131
- "_view_module": "@jupyter-widgets/base",
1132
- "_view_module_version": "1.2.0",
1133
- "_view_name": "StyleView",
1134
- "description_width": ""
1135
- }
1136
- },
1137
- "4a172e8c6aa44e41a42fc1d9cf714fd0": {
1138
- "model_module": "@jupyter-widgets/controls",
1139
- "model_module_version": "1.5.0",
1140
- "model_name": "HTMLModel",
1141
- "state": {
1142
- "_dom_classes": [],
1143
- "_model_module": "@jupyter-widgets/controls",
1144
- "_model_module_version": "1.5.0",
1145
- "_model_name": "HTMLModel",
1146
- "_view_count": null,
1147
- "_view_module": "@jupyter-widgets/controls",
1148
- "_view_module_version": "1.5.0",
1149
- "_view_name": "HTMLView",
1150
- "description": "",
1151
- "description_tooltip": null,
1152
- "layout": "IPY_MODEL_e7937a1bc68441a080374911a6563376",
1153
- "placeholder": "​",
1154
- "style": "IPY_MODEL_e532ed7bfef34f67b5fcacd9534eb789",
1155
- "value": " 108/108 [00:03&lt;00:00, 33.70it/s]"
1156
- }
1157
- },
1158
- "5b588f83a15d42d9aca888e06bbd95ff": {
1159
- "model_module": "@jupyter-widgets/controls",
1160
- "model_module_version": "1.5.0",
1161
- "model_name": "HTMLModel",
1162
- "state": {
1163
- "_dom_classes": [],
1164
- "_model_module": "@jupyter-widgets/controls",
1165
- "_model_module_version": "1.5.0",
1166
- "_model_name": "HTMLModel",
1167
- "_view_count": null,
1168
- "_view_module": "@jupyter-widgets/controls",
1169
- "_view_module_version": "1.5.0",
1170
- "_view_name": "HTMLView",
1171
- "description": "",
1172
- "description_tooltip": null,
1173
- "layout": "IPY_MODEL_af9b6ae927dd4764b9692507791bc67e",
1174
- "placeholder": "​",
1175
- "style": "IPY_MODEL_134210510d49476e959dd7d032bbdbdc",
1176
- "value": " 14/14 [00:00&lt;00:00, 21.41it/s]"
1177
- }
1178
- },
1179
- "5c7973afd79349ed997a69120d0629b2": {
1180
- "model_module": "@jupyter-widgets/controls",
1181
- "model_module_version": "1.5.0",
1182
- "model_name": "ProgressStyleModel",
1183
- "state": {
1184
- "_model_module": "@jupyter-widgets/controls",
1185
- "_model_module_version": "1.5.0",
1186
- "_model_name": "ProgressStyleModel",
1187
- "_view_count": null,
1188
- "_view_module": "@jupyter-widgets/base",
1189
- "_view_module_version": "1.2.0",
1190
- "_view_name": "StyleView",
1191
- "bar_color": null,
1192
- "description_width": ""
1193
- }
1194
- },
1195
- "5f4b9d32df8f446e858e4c289dc282f9": {
1196
- "model_module": "@jupyter-widgets/controls",
1197
- "model_module_version": "1.5.0",
1198
- "model_name": "FloatProgressModel",
1199
- "state": {
1200
- "_dom_classes": [],
1201
- "_model_module": "@jupyter-widgets/controls",
1202
- "_model_module_version": "1.5.0",
1203
- "_model_name": "FloatProgressModel",
1204
- "_view_count": null,
1205
- "_view_module": "@jupyter-widgets/controls",
1206
- "_view_module_version": "1.5.0",
1207
- "_view_name": "ProgressView",
1208
- "bar_style": "success",
1209
- "description": "",
1210
- "description_tooltip": null,
1211
- "layout": "IPY_MODEL_96a3bdece738481db57e811ccb74a974",
1212
- "max": 14,
1213
- "min": 0,
1214
- "orientation": "horizontal",
1215
- "style": "IPY_MODEL_5c7973afd79349ed997a69120d0629b2",
1216
- "value": 14
1217
- }
1218
- },
1219
- "5f9bb065c2b74d2e8ded32e1306a7807": {
1220
- "model_module": "@jupyter-widgets/controls",
1221
- "model_module_version": "1.5.0",
1222
- "model_name": "HBoxModel",
1223
- "state": {
1224
- "_dom_classes": [],
1225
- "_model_module": "@jupyter-widgets/controls",
1226
- "_model_module_version": "1.5.0",
1227
- "_model_name": "HBoxModel",
1228
- "_view_count": null,
1229
- "_view_module": "@jupyter-widgets/controls",
1230
- "_view_module_version": "1.5.0",
1231
- "_view_name": "HBoxView",
1232
- "box_style": "",
1233
- "children": [
1234
- "IPY_MODEL_73a06bc546a64f7f99a9e4a135319dcd",
1235
- "IPY_MODEL_ce48deaf4d8c49cdae92bfdbb3a78df0",
1236
- "IPY_MODEL_4a172e8c6aa44e41a42fc1d9cf714fd0"
1237
- ],
1238
- "layout": "IPY_MODEL_0245f2604e4d49c8bd0210302746c47b"
1239
- }
1240
- },
1241
- "73a06bc546a64f7f99a9e4a135319dcd": {
1242
- "model_module": "@jupyter-widgets/controls",
1243
- "model_module_version": "1.5.0",
1244
- "model_name": "HTMLModel",
1245
- "state": {
1246
- "_dom_classes": [],
1247
- "_model_module": "@jupyter-widgets/controls",
1248
- "_model_module_version": "1.5.0",
1249
- "_model_name": "HTMLModel",
1250
- "_view_count": null,
1251
- "_view_module": "@jupyter-widgets/controls",
1252
- "_view_module_version": "1.5.0",
1253
- "_view_name": "HTMLView",
1254
- "description": "",
1255
- "description_tooltip": null,
1256
- "layout": "IPY_MODEL_e956dfab55084a9cbe33c8e331b511e7",
1257
- "placeholder": "​",
1258
- "style": "IPY_MODEL_cb394578badd43a89850873ad2526542",
1259
- "value": "Generating embeddings: 100%"
1260
- }
1261
- },
1262
- "96a3bdece738481db57e811ccb74a974": {
1263
- "model_module": "@jupyter-widgets/base",
1264
- "model_module_version": "1.2.0",
1265
- "model_name": "LayoutModel",
1266
- "state": {
1267
- "_model_module": "@jupyter-widgets/base",
1268
- "_model_module_version": "1.2.0",
1269
- "_model_name": "LayoutModel",
1270
- "_view_count": null,
1271
- "_view_module": "@jupyter-widgets/base",
1272
- "_view_module_version": "1.2.0",
1273
- "_view_name": "LayoutView",
1274
- "align_content": null,
1275
- "align_items": null,
1276
- "align_self": null,
1277
- "border": null,
1278
- "bottom": null,
1279
- "display": null,
1280
- "flex": null,
1281
- "flex_flow": null,
1282
- "grid_area": null,
1283
- "grid_auto_columns": null,
1284
- "grid_auto_flow": null,
1285
- "grid_auto_rows": null,
1286
- "grid_column": null,
1287
- "grid_gap": null,
1288
- "grid_row": null,
1289
- "grid_template_areas": null,
1290
- "grid_template_columns": null,
1291
- "grid_template_rows": null,
1292
- "height": null,
1293
- "justify_content": null,
1294
- "justify_items": null,
1295
- "left": null,
1296
- "margin": null,
1297
- "max_height": null,
1298
- "max_width": null,
1299
- "min_height": null,
1300
- "min_width": null,
1301
- "object_fit": null,
1302
- "object_position": null,
1303
- "order": null,
1304
- "overflow": null,
1305
- "overflow_x": null,
1306
- "overflow_y": null,
1307
- "padding": null,
1308
- "right": null,
1309
- "top": null,
1310
- "visibility": null,
1311
- "width": null
1312
- }
1313
- },
1314
- "abfc9aa911ce4a5ea81c7c451f08295f": {
1315
- "model_module": "@jupyter-widgets/controls",
1316
- "model_module_version": "1.5.0",
1317
- "model_name": "ProgressStyleModel",
1318
- "state": {
1319
- "_model_module": "@jupyter-widgets/controls",
1320
- "_model_module_version": "1.5.0",
1321
- "_model_name": "ProgressStyleModel",
1322
- "_view_count": null,
1323
- "_view_module": "@jupyter-widgets/base",
1324
- "_view_module_version": "1.2.0",
1325
- "_view_name": "StyleView",
1326
- "bar_color": null,
1327
- "description_width": ""
1328
- }
1329
- },
1330
- "ad073bca655540809e39f26538d2ec0d": {
1331
- "model_module": "@jupyter-widgets/base",
1332
- "model_module_version": "1.2.0",
1333
- "model_name": "LayoutModel",
1334
- "state": {
1335
- "_model_module": "@jupyter-widgets/base",
1336
- "_model_module_version": "1.2.0",
1337
- "_model_name": "LayoutModel",
1338
- "_view_count": null,
1339
- "_view_module": "@jupyter-widgets/base",
1340
- "_view_module_version": "1.2.0",
1341
- "_view_name": "LayoutView",
1342
- "align_content": null,
1343
- "align_items": null,
1344
- "align_self": null,
1345
- "border": null,
1346
- "bottom": null,
1347
- "display": null,
1348
- "flex": null,
1349
- "flex_flow": null,
1350
- "grid_area": null,
1351
- "grid_auto_columns": null,
1352
- "grid_auto_flow": null,
1353
- "grid_auto_rows": null,
1354
- "grid_column": null,
1355
- "grid_gap": null,
1356
- "grid_row": null,
1357
- "grid_template_areas": null,
1358
- "grid_template_columns": null,
1359
- "grid_template_rows": null,
1360
- "height": null,
1361
- "justify_content": null,
1362
- "justify_items": null,
1363
- "left": null,
1364
- "margin": null,
1365
- "max_height": null,
1366
- "max_width": null,
1367
- "min_height": null,
1368
- "min_width": null,
1369
- "object_fit": null,
1370
- "object_position": null,
1371
- "order": null,
1372
- "overflow": null,
1373
- "overflow_x": null,
1374
- "overflow_y": null,
1375
- "padding": null,
1376
- "right": null,
1377
- "top": null,
1378
- "visibility": null,
1379
- "width": null
1380
- }
1381
- },
1382
- "af9b6ae927dd4764b9692507791bc67e": {
1383
- "model_module": "@jupyter-widgets/base",
1384
- "model_module_version": "1.2.0",
1385
- "model_name": "LayoutModel",
1386
- "state": {
1387
- "_model_module": "@jupyter-widgets/base",
1388
- "_model_module_version": "1.2.0",
1389
- "_model_name": "LayoutModel",
1390
- "_view_count": null,
1391
- "_view_module": "@jupyter-widgets/base",
1392
- "_view_module_version": "1.2.0",
1393
- "_view_name": "LayoutView",
1394
- "align_content": null,
1395
- "align_items": null,
1396
- "align_self": null,
1397
- "border": null,
1398
- "bottom": null,
1399
- "display": null,
1400
- "flex": null,
1401
- "flex_flow": null,
1402
- "grid_area": null,
1403
- "grid_auto_columns": null,
1404
- "grid_auto_flow": null,
1405
- "grid_auto_rows": null,
1406
- "grid_column": null,
1407
- "grid_gap": null,
1408
- "grid_row": null,
1409
- "grid_template_areas": null,
1410
- "grid_template_columns": null,
1411
- "grid_template_rows": null,
1412
- "height": null,
1413
- "justify_content": null,
1414
- "justify_items": null,
1415
- "left": null,
1416
- "margin": null,
1417
- "max_height": null,
1418
- "max_width": null,
1419
- "min_height": null,
1420
- "min_width": null,
1421
- "object_fit": null,
1422
- "object_position": null,
1423
- "order": null,
1424
- "overflow": null,
1425
- "overflow_x": null,
1426
- "overflow_y": null,
1427
- "padding": null,
1428
- "right": null,
1429
- "top": null,
1430
- "visibility": null,
1431
- "width": null
1432
- }
1433
- },
1434
- "cb394578badd43a89850873ad2526542": {
1435
- "model_module": "@jupyter-widgets/controls",
1436
- "model_module_version": "1.5.0",
1437
- "model_name": "DescriptionStyleModel",
1438
- "state": {
1439
- "_model_module": "@jupyter-widgets/controls",
1440
- "_model_module_version": "1.5.0",
1441
- "_model_name": "DescriptionStyleModel",
1442
- "_view_count": null,
1443
- "_view_module": "@jupyter-widgets/base",
1444
- "_view_module_version": "1.2.0",
1445
- "_view_name": "StyleView",
1446
- "description_width": ""
1447
- }
1448
- },
1449
- "ce48deaf4d8c49cdae92bfdbb3a78df0": {
1450
- "model_module": "@jupyter-widgets/controls",
1451
- "model_module_version": "1.5.0",
1452
- "model_name": "FloatProgressModel",
1453
- "state": {
1454
- "_dom_classes": [],
1455
- "_model_module": "@jupyter-widgets/controls",
1456
- "_model_module_version": "1.5.0",
1457
- "_model_name": "FloatProgressModel",
1458
- "_view_count": null,
1459
- "_view_module": "@jupyter-widgets/controls",
1460
- "_view_module_version": "1.5.0",
1461
- "_view_name": "ProgressView",
1462
- "bar_style": "success",
1463
- "description": "",
1464
- "description_tooltip": null,
1465
- "layout": "IPY_MODEL_193aef33d9184055bb9223f56d456de6",
1466
- "max": 108,
1467
- "min": 0,
1468
- "orientation": "horizontal",
1469
- "style": "IPY_MODEL_abfc9aa911ce4a5ea81c7c451f08295f",
1470
- "value": 108
1471
- }
1472
- },
1473
- "df2365556ae242a2ab1a119f9a31a561": {
1474
- "model_module": "@jupyter-widgets/controls",
1475
- "model_module_version": "1.5.0",
1476
- "model_name": "HTMLModel",
1477
- "state": {
1478
- "_dom_classes": [],
1479
- "_model_module": "@jupyter-widgets/controls",
1480
- "_model_module_version": "1.5.0",
1481
- "_model_name": "HTMLModel",
1482
- "_view_count": null,
1483
- "_view_module": "@jupyter-widgets/controls",
1484
- "_view_module_version": "1.5.0",
1485
- "_view_name": "HTMLView",
1486
- "description": "",
1487
- "description_tooltip": null,
1488
- "layout": "IPY_MODEL_13b9c5395bca4c3ba21265240cb936cf",
1489
- "placeholder": "​",
1490
- "style": "IPY_MODEL_47a4586384274577a726c57605e7f8d9",
1491
- "value": "Parsing nodes: 100%"
1492
- }
1493
- },
1494
- "e532ed7bfef34f67b5fcacd9534eb789": {
1495
- "model_module": "@jupyter-widgets/controls",
1496
- "model_module_version": "1.5.0",
1497
- "model_name": "DescriptionStyleModel",
1498
- "state": {
1499
- "_model_module": "@jupyter-widgets/controls",
1500
- "_model_module_version": "1.5.0",
1501
- "_model_name": "DescriptionStyleModel",
1502
- "_view_count": null,
1503
- "_view_module": "@jupyter-widgets/base",
1504
- "_view_module_version": "1.2.0",
1505
- "_view_name": "StyleView",
1506
- "description_width": ""
1507
- }
1508
- },
1509
- "e7937a1bc68441a080374911a6563376": {
1510
- "model_module": "@jupyter-widgets/base",
1511
- "model_module_version": "1.2.0",
1512
- "model_name": "LayoutModel",
1513
- "state": {
1514
- "_model_module": "@jupyter-widgets/base",
1515
- "_model_module_version": "1.2.0",
1516
- "_model_name": "LayoutModel",
1517
- "_view_count": null,
1518
- "_view_module": "@jupyter-widgets/base",
1519
- "_view_module_version": "1.2.0",
1520
- "_view_name": "LayoutView",
1521
- "align_content": null,
1522
- "align_items": null,
1523
- "align_self": null,
1524
- "border": null,
1525
- "bottom": null,
1526
- "display": null,
1527
- "flex": null,
1528
- "flex_flow": null,
1529
- "grid_area": null,
1530
- "grid_auto_columns": null,
1531
- "grid_auto_flow": null,
1532
- "grid_auto_rows": null,
1533
- "grid_column": null,
1534
- "grid_gap": null,
1535
- "grid_row": null,
1536
- "grid_template_areas": null,
1537
- "grid_template_columns": null,
1538
- "grid_template_rows": null,
1539
- "height": null,
1540
- "justify_content": null,
1541
- "justify_items": null,
1542
- "left": null,
1543
- "margin": null,
1544
- "max_height": null,
1545
- "max_width": null,
1546
- "min_height": null,
1547
- "min_width": null,
1548
- "object_fit": null,
1549
- "object_position": null,
1550
- "order": null,
1551
- "overflow": null,
1552
- "overflow_x": null,
1553
- "overflow_y": null,
1554
- "padding": null,
1555
- "right": null,
1556
- "top": null,
1557
- "visibility": null,
1558
- "width": null
1559
- }
1560
- },
1561
- "e956dfab55084a9cbe33c8e331b511e7": {
1562
- "model_module": "@jupyter-widgets/base",
1563
- "model_module_version": "1.2.0",
1564
- "model_name": "LayoutModel",
1565
- "state": {
1566
- "_model_module": "@jupyter-widgets/base",
1567
- "_model_module_version": "1.2.0",
1568
- "_model_name": "LayoutModel",
1569
- "_view_count": null,
1570
- "_view_module": "@jupyter-widgets/base",
1571
- "_view_module_version": "1.2.0",
1572
- "_view_name": "LayoutView",
1573
- "align_content": null,
1574
- "align_items": null,
1575
- "align_self": null,
1576
- "border": null,
1577
- "bottom": null,
1578
- "display": null,
1579
- "flex": null,
1580
- "flex_flow": null,
1581
- "grid_area": null,
1582
- "grid_auto_columns": null,
1583
- "grid_auto_flow": null,
1584
- "grid_auto_rows": null,
1585
- "grid_column": null,
1586
- "grid_gap": null,
1587
- "grid_row": null,
1588
- "grid_template_areas": null,
1589
- "grid_template_columns": null,
1590
- "grid_template_rows": null,
1591
- "height": null,
1592
- "justify_content": null,
1593
- "justify_items": null,
1594
- "left": null,
1595
- "margin": null,
1596
- "max_height": null,
1597
- "max_width": null,
1598
- "min_height": null,
1599
- "min_width": null,
1600
- "object_fit": null,
1601
- "object_position": null,
1602
- "order": null,
1603
- "overflow": null,
1604
- "overflow_x": null,
1605
- "overflow_y": null,
1606
- "padding": null,
1607
- "right": null,
1608
- "top": null,
1609
- "visibility": null,
1610
- "width": null
1611
- }
1612
- }
1613
- }
1614
- }
1615
- },
1616
- "nbformat": 4,
1617
- "nbformat_minor": 0
1618
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/15-Use_OpenSource_Models.ipynb DELETED
The diff for this file is too large to render. See raw diff
 
notebooks/17-Using_LLMs_to_rank_chunks_as_the_Judge.ipynb DELETED
@@ -1,830 +0,0 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": [],
7
- "authorship_tag": "ABX9TyMhd0xkjZD3StMhSoQIPv+w",
8
- "include_colab_link": true
9
- },
10
- "kernelspec": {
11
- "name": "python3",
12
- "display_name": "Python 3"
13
- },
14
- "language_info": {
15
- "name": "python"
16
- }
17
- },
18
- "cells": [
19
- {
20
- "cell_type": "markdown",
21
- "metadata": {
22
- "id": "view-in-github",
23
- "colab_type": "text"
24
- },
25
- "source": [
26
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/17-Using_LLMs_to_rank_chunks_as_the_Judge.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
27
- ]
28
- },
29
- {
30
- "cell_type": "markdown",
31
- "source": [
32
- "# Install Packages and Setup Variables"
33
- ],
34
- "metadata": {
35
- "id": "0FbELaf7TrW7"
36
- }
37
- },
38
- {
39
- "cell_type": "code",
40
- "execution_count": null,
41
- "metadata": {
42
- "id": "Yubz8AanRRSW",
43
- "colab": {
44
- "base_uri": "https://localhost:8080/"
45
- },
46
- "outputId": "2487c4fd-0fb5-4894-ffe6-c747f4adb952"
47
- },
48
- "outputs": [
49
- {
50
- "output_type": "stream",
51
- "name": "stdout",
52
- "text": [
53
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m226.7/226.7 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
54
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.8/1.8 MB\u001b[0m \u001b[31m11.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
55
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.1/211.1 kB\u001b[0m \u001b[31m6.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
56
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m508.6/508.6 kB\u001b[0m \u001b[31m7.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
57
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.4/15.4 MB\u001b[0m \u001b[31m15.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
58
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m23.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
59
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.6/75.6 kB\u001b[0m \u001b[31m2.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
60
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m81.3/81.3 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
61
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m97.6/97.6 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
62
- "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
63
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.4/7.4 MB\u001b[0m \u001b[31m28.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
64
- "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
65
- " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
66
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m55.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
67
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m91.8/91.8 kB\u001b[0m \u001b[31m7.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
68
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.8/60.8 kB\u001b[0m \u001b[31m5.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
69
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.3/41.3 kB\u001b[0m \u001b[31m3.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
70
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m56.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
71
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m47.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
72
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.1/60.1 kB\u001b[0m \u001b[31m4.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
73
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m106.1/106.1 kB\u001b[0m \u001b[31m1.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
74
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.3/67.3 kB\u001b[0m \u001b[31m4.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
75
- "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
76
- " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
77
- " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
78
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m698.9/698.9 kB\u001b[0m \u001b[31m55.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
79
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m80.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
80
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.6/67.6 kB\u001b[0m \u001b[31m9.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
81
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.9/71.9 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
82
- "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
83
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.9/77.9 kB\u001b[0m \u001b[31m10.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
84
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
85
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m141.9/141.9 kB\u001b[0m \u001b[31m16.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
86
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m290.4/290.4 kB\u001b[0m \u001b[31m26.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
87
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m5.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
88
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.8/50.8 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
89
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m341.4/341.4 kB\u001b[0m \u001b[31m35.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
90
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m92.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
91
- "\u001b[2K \u001b[90m━━���━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m78.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
92
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.2/130.2 kB\u001b[0m \u001b[31m16.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
93
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m11.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
94
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.4/49.4 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
95
- "\u001b[?25h Building wheel for tinysegmenter (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
96
- " Building wheel for feedfinder2 (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
97
- " Building wheel for jieba3k (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
98
- " Building wheel for pypika (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
99
- " Building wheel for sgmllib3k (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
100
- ]
101
- }
102
- ],
103
- "source": [
104
- "!pip install -q llama-index==0.10.30 openai==1.12.0 tiktoken==0.6.0 chromadb==0.4.21 llama-index-vector-stores-chroma==0.1.7"
105
- ]
106
- },
107
- {
108
- "cell_type": "code",
109
- "source": [
110
- "import os\n",
111
- "\n",
112
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
113
- "os.environ[\"OPENAI_API_KEY\"] = \"[OPENAI_API_KEY]\""
114
- ],
115
- "metadata": {
116
- "id": "xLXFuRW-TpUu"
117
- },
118
- "execution_count": null,
119
- "outputs": []
120
- },
121
- {
122
- "cell_type": "markdown",
123
- "source": [
124
- "# Load a Model"
125
- ],
126
- "metadata": {
127
- "id": "r6GCYYqqTuMc"
128
- }
129
- },
130
- {
131
- "cell_type": "code",
132
- "source": [
133
- "from llama_index.llms.openai import OpenAI\n",
134
- "\n",
135
- "llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
136
- ],
137
- "metadata": {
138
- "id": "pupJpdZaTu5m"
139
- },
140
- "execution_count": null,
141
- "outputs": []
142
- },
143
- {
144
- "cell_type": "markdown",
145
- "source": [
146
- "# Create a Vector Store"
147
- ],
148
- "metadata": {
149
- "id": "gaKYO-KrTwsn"
150
- }
151
- },
152
- {
153
- "cell_type": "code",
154
- "source": [
155
- "import chromadb\n",
156
- "\n",
157
- "# create client and a new collection\n",
158
- "# chromadb.EphemeralClient saves data in-memory.\n",
159
- "chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
160
- "chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
161
- ],
162
- "metadata": {
163
- "id": "npCqCZSPZKR0"
164
- },
165
- "execution_count": null,
166
- "outputs": []
167
- },
168
- {
169
- "cell_type": "code",
170
- "source": [
171
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
172
- "\n",
173
- "# Define a storage context object using the created vector database.\n",
174
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
175
- ],
176
- "metadata": {
177
- "id": "dG9eKSVrZMs1"
178
- },
179
- "execution_count": null,
180
- "outputs": []
181
- },
182
- {
183
- "cell_type": "markdown",
184
- "source": [
185
- "# Load the Dataset (CSV)"
186
- ],
187
- "metadata": {
188
- "id": "HmiFENBdZMAk"
189
- }
190
- },
191
- {
192
- "cell_type": "markdown",
193
- "source": [
194
- "## Download"
195
- ],
196
- "metadata": {
197
- "id": "X-20isiTZRIa"
198
- }
199
- },
200
- {
201
- "cell_type": "markdown",
202
- "source": [
203
- "The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
204
- ],
205
- "metadata": {
206
- "id": "-lWKX814ZURc"
207
- }
208
- },
209
- {
210
- "cell_type": "code",
211
- "source": [
212
- "!wget https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
213
- ],
214
- "metadata": {
215
- "id": "fmlEL849ZPrH",
216
- "colab": {
217
- "base_uri": "https://localhost:8080/"
218
- },
219
- "outputId": "63039988-ab7a-4ecf-deb0-d9510628ecb8"
220
- },
221
- "execution_count": null,
222
- "outputs": [
223
- {
224
- "output_type": "stream",
225
- "name": "stdout",
226
- "text": [
227
- "--2024-04-30 18:37:36-- https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv\n",
228
- "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.110.133, 185.199.109.133, 185.199.111.133, ...\n",
229
- "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... connected.\n",
230
- "HTTP request sent, awaiting response... 200 OK\n",
231
- "Length: 173646 (170K) [text/plain]\n",
232
- "Saving to: β€˜mini-llama-articles.csv’\n",
233
- "\n",
234
- "mini-llama-articles 100%[===================>] 169.58K --.-KB/s in 0.01s \n",
235
- "\n",
236
- "2024-04-30 18:37:37 (11.3 MB/s) - β€˜mini-llama-articles.csv’ saved [173646/173646]\n",
237
- "\n"
238
- ]
239
- }
240
- ]
241
- },
242
- {
243
- "cell_type": "markdown",
244
- "source": [
245
- "# Read File"
246
- ],
247
- "metadata": {
248
- "id": "r9PL_eiTZW7y"
249
- }
250
- },
251
- {
252
- "cell_type": "code",
253
- "source": [
254
- "import csv\n",
255
- "\n",
256
- "rows = []\n",
257
- "\n",
258
- "# Load the file as a JSON\n",
259
- "with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
260
- " csv_reader = csv.reader(file)\n",
261
- "\n",
262
- " for idx, row in enumerate( csv_reader ):\n",
263
- " if idx == 0: continue; # Skip header row\n",
264
- " rows.append( row )\n",
265
- "\n",
266
- "# The number of characters in the dataset.\n",
267
- "len( rows )"
268
- ],
269
- "metadata": {
270
- "id": "x5IwXJi8ZQGh"
271
- },
272
- "execution_count": null,
273
- "outputs": []
274
- },
275
- {
276
- "cell_type": "markdown",
277
- "source": [
278
- "# Convert to Document obj"
279
- ],
280
- "metadata": {
281
- "id": "ktYUZzzSZaDW"
282
- }
283
- },
284
- {
285
- "cell_type": "code",
286
- "source": [
287
- "from llama_index.core.schema import Document\n",
288
- "\n",
289
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
290
- "documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
291
- ],
292
- "metadata": {
293
- "id": "oO10Q-UyZQEB"
294
- },
295
- "execution_count": null,
296
- "outputs": []
297
- },
298
- {
299
- "cell_type": "markdown",
300
- "source": [
301
- "# Transforming"
302
- ],
303
- "metadata": {
304
- "id": "0PnovZ0tZdAT"
305
- }
306
- },
307
- {
308
- "cell_type": "code",
309
- "source": [
310
- "from llama_index.core.node_parser import TokenTextSplitter\n",
311
- "\n",
312
- "# Define the splitter object that split the text into segments with 512 tokens,\n",
313
- "# with a 128 overlap between the segments.\n",
314
- "text_splitter = TokenTextSplitter(\n",
315
- " separator=\" \", chunk_size=512, chunk_overlap=128\n",
316
- ")"
317
- ],
318
- "metadata": {
319
- "id": "wzOQZH6VZQBm"
320
- },
321
- "execution_count": null,
322
- "outputs": []
323
- },
324
- {
325
- "cell_type": "code",
326
- "source": [
327
- "from llama_index.core.extractors import (\n",
328
- " SummaryExtractor,\n",
329
- " QuestionsAnsweredExtractor,\n",
330
- " KeywordExtractor,\n",
331
- ")\n",
332
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
333
- "from llama_index.core.ingestion import IngestionPipeline\n",
334
- "\n",
335
- "# Create the pipeline to apply the transformation on each chunk,\n",
336
- "# and store the transformed text in the chroma vector store.\n",
337
- "pipeline = IngestionPipeline(\n",
338
- " transformations=[\n",
339
- " text_splitter,\n",
340
- " QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
341
- " SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
342
- " KeywordExtractor(keywords=10, llm=llm),\n",
343
- " OpenAIEmbedding(),\n",
344
- " ],\n",
345
- " vector_store=vector_store\n",
346
- ")\n",
347
- "\n",
348
- "# Run the transformation pipeline.\n",
349
- "nodes = pipeline.run(documents=documents, show_progress=True);"
350
- ],
351
- "metadata": {
352
- "id": "l6UP7M_rZeXS"
353
- },
354
- "execution_count": null,
355
- "outputs": []
356
- },
357
- {
358
- "cell_type": "code",
359
- "source": [
360
- "len( nodes )"
361
- ],
362
- "metadata": {
363
- "id": "GcUUhs88ZeUs"
364
- },
365
- "execution_count": null,
366
- "outputs": []
367
- },
368
- {
369
- "cell_type": "code",
370
- "source": [
371
- "# Compress the vector store directory to a zip file to be able to download and use later.\n",
372
- "!zip -r vectorstore.zip mini-llama-articles"
373
- ],
374
- "metadata": {
375
- "id": "B_P8Cil-ZeQM"
376
- },
377
- "execution_count": null,
378
- "outputs": []
379
- },
380
- {
381
- "cell_type": "markdown",
382
- "source": [
383
- "# Load Indexes"
384
- ],
385
- "metadata": {
386
- "id": "YSGHsZMMZj4E"
387
- }
388
- },
389
- {
390
- "cell_type": "markdown",
391
- "source": [
392
- "If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
393
- ],
394
- "metadata": {
395
- "id": "J81Yvj0AZlvK"
396
- }
397
- },
398
- {
399
- "cell_type": "code",
400
- "source": [
401
- "# !unzip vectorstore.zip"
402
- ],
403
- "metadata": {
404
- "id": "M8iaOOGyZeNp",
405
- "colab": {
406
- "base_uri": "https://localhost:8080/"
407
- },
408
- "outputId": "6a117a0b-161a-4889-daf6-baf94ae00d2a"
409
- },
410
- "execution_count": null,
411
- "outputs": [
412
- {
413
- "output_type": "stream",
414
- "name": "stdout",
415
- "text": [
416
- "Archive: vectorstore.zip\n",
417
- " creating: mini-llama-articles/\n",
418
- " creating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/\n",
419
- " inflating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/data_level0.bin \n",
420
- " inflating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/header.bin \n",
421
- " extracting: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/link_lists.bin \n",
422
- " inflating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/length.bin \n",
423
- " inflating: mini-llama-articles/chroma.sqlite3 \n"
424
- ]
425
- }
426
- ]
427
- },
428
- {
429
- "cell_type": "code",
430
- "source": [
431
- "# Load the vector store from the local storage.\n",
432
- "db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
433
- "chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
434
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
435
- ],
436
- "metadata": {
437
- "id": "6tzS_EKPZeLS"
438
- },
439
- "execution_count": null,
440
- "outputs": []
441
- },
442
- {
443
- "cell_type": "code",
444
- "source": [
445
- "from llama_index.core import VectorStoreIndex\n",
446
- "\n",
447
- "# Create the index based on the vector store.\n",
448
- "index = VectorStoreIndex.from_vector_store(vector_store)"
449
- ],
450
- "metadata": {
451
- "id": "0T6FL7J3ZrNK"
452
- },
453
- "execution_count": null,
454
- "outputs": []
455
- },
456
- {
457
- "cell_type": "markdown",
458
- "source": [
459
- "# RankGPT"
460
- ],
461
- "metadata": {
462
- "id": "w2XBkzNwLle5"
463
- }
464
- },
465
- {
466
- "cell_type": "code",
467
- "source": [
468
- "from llama_index.core.postprocessor.rankGPT_rerank import RankGPTRerank\n",
469
- "\n",
470
- "rankGPT = RankGPTRerank(top_n=3, llm=OpenAI(model=\"gpt-3.5-turbo\"))"
471
- ],
472
- "metadata": {
473
- "id": "_it2CxTtLmHT"
474
- },
475
- "execution_count": null,
476
- "outputs": []
477
- },
478
- {
479
- "cell_type": "code",
480
- "source": [
481
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
482
- "# and using a LLM to formulate the final answer.\n",
483
- "# The `node_postprocessors` function will be applied to the retrieved nodes.\n",
484
- "query_engine = index.as_query_engine(\n",
485
- " similarity_top_k=10,\n",
486
- " node_postprocessors=[rankGPT]\n",
487
- ")\n",
488
- "\n",
489
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
490
- ],
491
- "metadata": {
492
- "id": "YA3M9m9CL6AJ"
493
- },
494
- "execution_count": null,
495
- "outputs": []
496
- },
497
- {
498
- "cell_type": "code",
499
- "source": [
500
- "res.response"
501
- ],
502
- "metadata": {
503
- "colab": {
504
- "base_uri": "https://localhost:8080/",
505
- "height": 53
506
- },
507
- "id": "wgyjv9e6MCVm",
508
- "outputId": "70723d5e-9d16-4123-884b-0d65cd91a405"
509
- },
510
- "execution_count": null,
511
- "outputs": [
512
- {
513
- "output_type": "execute_result",
514
- "data": {
515
- "text/plain": [
516
- "'The Llama 2 model has four different parameter sizes: 7 billion, 13 billion, 34 billion, and 70 billion.'"
517
- ],
518
- "application/vnd.google.colaboratory.intrinsic+json": {
519
- "type": "string"
520
- }
521
- },
522
- "metadata": {},
523
- "execution_count": 12
524
- }
525
- ]
526
- },
527
- {
528
- "cell_type": "code",
529
- "source": [
530
- "# Show the retrieved nodes\n",
531
- "for src in res.source_nodes:\n",
532
- " print(\"Node ID\\t\", src.node_id)\n",
533
- " print(\"Title\\t\", src.metadata['title'])\n",
534
- " print(\"Text\\t\", src.text)\n",
535
- " print(\"Score\\t\", src.score)\n",
536
- " print(\"-_\"*20)"
537
- ],
538
- "metadata": {
539
- "colab": {
540
- "base_uri": "https://localhost:8080/"
541
- },
542
- "id": "wUhOlwWcMEUT",
543
- "outputId": "eae3754b-5cb8-4c5d-d739-c42c9686006d"
544
- },
545
- "execution_count": null,
546
- "outputs": [
547
- {
548
- "output_type": "stream",
549
- "name": "stdout",
550
- "text": [
551
- "Node ID\t d6f533e5-fef8-469c-a313-def19fd38efe\n",
552
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
553
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
554
- "Score\t 0.7077337819711658\n",
555
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
556
- "Node ID\t 2f3b7c34-8fd0-4134-af38-ef1b77e32cd8\n",
557
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
558
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
559
- "Score\t 0.7025566634608498\n",
560
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
561
- "Node ID\t 1c7a8637-6f65-401e-be33-26886c828a34\n",
562
- "Title\t Inside Code Llama: Meta AI's Entrance in the Code LLM Space\n",
563
- "Text\t Inside Code Llama The release of Code Llama does not include a single model but three different variants, characterized by their parameter sizes of 7B, 13B, and 34B. Each of these models has been trained on an extensive pool of 500B tokens encompassing code and code-related information. Notably, the 7B and 13B base and instruct models have been endowed with fill-in-the-middle (FIM) competence, empowering them to seamlessly insert code into existing code structures. This attribute equips them to handle tasks like code completion right from the outset.The trio of models caters to distinct requisites concerning serving and latency. For instance, the 7B model boasts the ability to operate on a single GPU. While the 34B model stands out for yielding optimal outcomes and elevating coding assistance, the smaller 7B and 13B versions excel in speed, making them fitting for low-latency tasks such as real-time code completion. Meta AI's innovations further extend to two nuanced adaptations of Code Llama: Code Llama - Python and Code Llama - Instruct. Code Llama - Python is a specialized derivation, meticulously honed on a substantial volume of Python code spanning 100B tokens. Given Python's central role in code generation benchmarks and its significance within the AI community, this focused model augments utility.Code Llama - Instruct represents an alignment and refinement of Code Llama through instructional fine-tuning. This novel training approach entails furnishing the model with \"natural language instruction\" inputs paired with anticipated outputs. This strategic methodology enhances the model's capacity to grasp human expectations in prompts. For endeavors involving code generation, it is advised to opt for Code Llama - Instruct versions, as they have been calibrated to yield useful and secure natural language responses. Deep diving into the Code Llama training and fine-tuning, there are a few aspects that are worth highlighting 1) DatasetLlama's training rests on a meticulously curated dataset enriched with publicly available code, offering a near-duplicate-free landscape. The dataset consists of 500B tokens during the initial phase, starting from the 7B, 13B, and 34B\n",
564
- "Score\t 0.6889534709415898\n",
565
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
566
- ]
567
- }
568
- ]
569
- },
570
- {
571
- "cell_type": "markdown",
572
- "source": [
573
- "# Custom Postprocessor"
574
- ],
575
- "metadata": {
576
- "id": "5mcAcZqhQluE"
577
- }
578
- },
579
- {
580
- "cell_type": "markdown",
581
- "source": [
582
- "## The `Judger` Function"
583
- ],
584
- "metadata": {
585
- "id": "7v7vmJblQrN6"
586
- }
587
- },
588
- {
589
- "cell_type": "markdown",
590
- "source": [
591
- "The following function will query GPT-4 to retrieve the top three nodes that has highest similarity to the asked question."
592
- ],
593
- "metadata": {
594
- "id": "6k8IKlN9QvU7"
595
- }
596
- },
597
- {
598
- "cell_type": "code",
599
- "source": [
600
- "from pydantic import BaseModel\n",
601
- "from llama_index.llms.openai import OpenAI\n",
602
- "from llama_index.core.prompts import PromptTemplate\n",
603
- "\n",
604
- "\n",
605
- "def judger(nodes, query):\n",
606
- "\n",
607
- " # The model's output template\n",
608
- " class OrderedNodes(BaseModel):\n",
609
- " \"\"\"A node with the id and assigned score.\"\"\"\n",
610
- " node_id: list\n",
611
- " score: list\n",
612
- "\n",
613
- " # Prepare the nodes and wrap them in <NODE></NODE> identifier, as well as the query\n",
614
- " the_nodes=\"\"\n",
615
- " for idx, item in enumerate(nodes):\n",
616
- " the_nodes += f\"<NODE{idx+1}>\\nNode ID: {item.node_id}\\nText: {item.text}\\n</NODE{idx+1}>\\n\"\n",
617
- "\n",
618
- " query = \"<QUERY>\\n{}\\n</QUERY>\".format(query)\n",
619
- "\n",
620
- " # Define the prompt template\n",
621
- " prompt_tmpl = PromptTemplate(\n",
622
- " \"\"\"\n",
623
- " You receive a qurey along with a list of nodes' text and their ids. Your task is to assign score\n",
624
- " to each node based on its contextually closeness to the given query. The final output is each\n",
625
- " node id along with its proximity score.\n",
626
- " Here is the list of nodes:\n",
627
- " {nodes_list}\n",
628
- "\n",
629
- " And the following is the query:\n",
630
- " {user_query}\n",
631
- "\n",
632
- " Score each of the nodes based on their text and their relevancy to the provided query.\n",
633
- " The score must be a decimal number between 0 an 1 so we can rank them.\"\"\"\n",
634
- " )\n",
635
- "\n",
636
- " # Define the an instance of GPT-4 and send the request\n",
637
- " llm = OpenAI(model=\"gpt-4\")\n",
638
- " ordered_nodes = llm.structured_predict(\n",
639
- " OrderedNodes, prompt_tmpl, nodes_list=the_nodes, user_query=query\n",
640
- " )\n",
641
- "\n",
642
- " return ordered_nodes"
643
- ],
644
- "metadata": {
645
- "id": "WhtJ1OeF9L3G"
646
- },
647
- "execution_count": null,
648
- "outputs": []
649
- },
650
- {
651
- "cell_type": "markdown",
652
- "source": [
653
- "## Define Postprocessor"
654
- ],
655
- "metadata": {
656
- "id": "Q5f1GrBKZprO"
657
- }
658
- },
659
- {
660
- "cell_type": "markdown",
661
- "source": [
662
- "The following class will use the `judger` function to rank the nodes, and filter them based on the ranks."
663
- ],
664
- "metadata": {
665
- "id": "yZujUJTvQ6Yu"
666
- }
667
- },
668
- {
669
- "cell_type": "code",
670
- "source": [
671
- "from typing import (\n",
672
- " List,\n",
673
- " Optional\n",
674
- ")\n",
675
- "from llama_index.core import QueryBundle\n",
676
- "from llama_index.core.postprocessor.types import BaseNodePostprocessor\n",
677
- "from llama_index.core.schema import NodeWithScore\n",
678
- "\n",
679
- "\n",
680
- "class OpenaiAsJudgePostprocessor(BaseNodePostprocessor):\n",
681
- " def _postprocess_nodes(\n",
682
- " self, nodes: List[NodeWithScore], query_bundle: Optional[QueryBundle]\n",
683
- " ) -> List[NodeWithScore]:\n",
684
- "\n",
685
- " r = judger(nodes, query_bundle)\n",
686
- "\n",
687
- " node_ids = r.node_id\n",
688
- " scores = r.score\n",
689
- "\n",
690
- " sorted_scores = sorted(enumerate(scores), key=lambda x: x[1], reverse=True)\n",
691
- " top_three_nodes = [sorted_scores[i][0] for i in range(3)]\n",
692
- "\n",
693
- " selected_nodes_id = [node_ids[item] for item in top_three_nodes]\n",
694
- "\n",
695
- " final_nodes = []\n",
696
- " for item in nodes:\n",
697
- " if item.node_id in selected_nodes_id:\n",
698
- " final_nodes.append( item )\n",
699
- "\n",
700
- " return final_nodes"
701
- ],
702
- "metadata": {
703
- "id": "-QtyuC8fZun0"
704
- },
705
- "execution_count": null,
706
- "outputs": []
707
- },
708
- {
709
- "cell_type": "code",
710
- "source": [
711
- "judge = OpenaiAsJudgePostprocessor()"
712
- ],
713
- "metadata": {
714
- "id": "jk-lqYlYLipi"
715
- },
716
- "execution_count": null,
717
- "outputs": []
718
- },
719
- {
720
- "cell_type": "markdown",
721
- "source": [
722
- "## Query Engine with Postprocessor"
723
- ],
724
- "metadata": {
725
- "id": "cgtsvxR7SflP"
726
- }
727
- },
728
- {
729
- "cell_type": "code",
730
- "source": [
731
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
732
- "# and using a LLM to formulate the final answer.\n",
733
- "# The `node_postprocessors` function will be applied to the retrieved nodes.\n",
734
- "query_engine = index.as_query_engine(\n",
735
- " similarity_top_k=10,\n",
736
- " node_postprocessors=[judge]\n",
737
- ")\n",
738
- "\n",
739
- "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
740
- ],
741
- "metadata": {
742
- "id": "1Hh3RLCeLfXZ"
743
- },
744
- "execution_count": null,
745
- "outputs": []
746
- },
747
- {
748
- "cell_type": "code",
749
- "source": [
750
- "res.response"
751
- ],
752
- "metadata": {
753
- "colab": {
754
- "base_uri": "https://localhost:8080/",
755
- "height": 53
756
- },
757
- "id": "zmZv0EIyF0wG",
758
- "outputId": "7ff1b3bf-1b5f-4985-ea0d-3048d94c8da1"
759
- },
760
- "execution_count": null,
761
- "outputs": [
762
- {
763
- "output_type": "execute_result",
764
- "data": {
765
- "text/plain": [
766
- "'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
767
- ],
768
- "application/vnd.google.colaboratory.intrinsic+json": {
769
- "type": "string"
770
- }
771
- },
772
- "metadata": {},
773
- "execution_count": 29
774
- }
775
- ]
776
- },
777
- {
778
- "cell_type": "code",
779
- "source": [
780
- "# Show the retrieved nodes\n",
781
- "for src in res.source_nodes:\n",
782
- " print(\"Node ID\\t\", src.node_id)\n",
783
- " print(\"Title\\t\", src.metadata['title'])\n",
784
- " print(\"Text\\t\", src.text)\n",
785
- " print(\"Score\\t\", src.score)\n",
786
- " print(\"-_\"*20)"
787
- ],
788
- "metadata": {
789
- "id": "bBMaG6yaZzjA",
790
- "colab": {
791
- "base_uri": "https://localhost:8080/"
792
- },
793
- "outputId": "8a173ef7-e66f-4f9b-a979-c88a17028ef0"
794
- },
795
- "execution_count": null,
796
- "outputs": [
797
- {
798
- "output_type": "stream",
799
- "name": "stdout",
800
- "text": [
801
- "Node ID\t d6f533e5-fef8-469c-a313-def19fd38efe\n",
802
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
803
- "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
804
- "Score\t 0.7077337819711658\n",
805
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
806
- "Node ID\t 2f3b7c34-8fd0-4134-af38-ef1b77e32cd8\n",
807
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
808
- "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
809
- "Score\t 0.7025566634608498\n",
810
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
811
- "Node ID\t 021c859e-809b-49b8-8d0d-38cc326c1203\n",
812
- "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
813
- "Text\t with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering strong competition to closed-source models. V. Ghost Attention: Enhancing Conversational Continuity One unique feature in Llama 2 is Ghost Attention, which ensures continuity in conversations. This means that even after multiple interactions, the model remembers its initial instructions, ensuring more coherent and consistent responses throughout the conversation. This feature significantly enhances the user experience and makes Llama 2 a more reliable language model for interactive applications. In the example below, on the left, it forgets to use an emoji after a few conversations. On the right, with Ghost Attention, even after having many conversations, it will remember the context and continue to use emojis in its response. VI. Temporal Capability: A Leap in Information Organization Meta reported a groundbreaking temporal capability, where the model organizes information based on time relevance. Each question posed to the model is associated with a date, and it responds accordingly by considering the event date before which the question becomes irrelevant. For example, if you ask the question, \"How long ago did Barack Obama become president?\", its only relevant after 2008. This temporal awareness allows Llama 2 to deliver more contextually accurate responses, enriching the user experience further. VII. Open Questions and Future Outlook Meta's open-sourcing of Llama 2 represents a seismic shift, now offering developers and researchers commercial access to a leading language model. With Llama 2 outperforming MosaicML's current MPT models, all eyes are on how Databricks will respond. Can MosaicML's next MPT iteration beat Llama 2? Is it worthwhile to compete\n",
814
- "Score\t 0.691486848320407\n",
815
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
816
- ]
817
- }
818
- ]
819
- },
820
- {
821
- "cell_type": "code",
822
- "source": [],
823
- "metadata": {
824
- "id": "J7sIPpFFTep3"
825
- },
826
- "execution_count": null,
827
- "outputs": []
828
- }
829
- ]
830
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/Crawl_a_Website.ipynb DELETED
@@ -1,574 +0,0 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": [],
7
- "toc_visible": true,
8
- "authorship_tag": "ABX9TyOUem37lhhg0mJYauho+pvb",
9
- "include_colab_link": true
10
- },
11
- "kernelspec": {
12
- "name": "python3",
13
- "display_name": "Python 3"
14
- },
15
- "language_info": {
16
- "name": "python"
17
- }
18
- },
19
- "cells": [
20
- {
21
- "cell_type": "markdown",
22
- "metadata": {
23
- "id": "view-in-github",
24
- "colab_type": "text"
25
- },
26
- "source": [
27
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/Crawl_a_Website.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
28
- ]
29
- },
30
- {
31
- "cell_type": "code",
32
- "source": [
33
- "!pip install -q llama-index==0.10.30 openai==1.12.0 cohere==4.47 tiktoken==0.6.0 newspaper3k==0.2.8"
34
- ],
35
- "metadata": {
36
- "id": "4CW8ux1RSdem",
37
- "colab": {
38
- "base_uri": "https://localhost:8080/"
39
- },
40
- "outputId": "155feab4-8ae6-43da-a07f-8a1f4b677c2b"
41
- },
42
- "execution_count": null,
43
- "outputs": [
44
- {
45
- "output_type": "stream",
46
- "name": "stdout",
47
- "text": [
48
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.1/211.1 kB\u001b[0m \u001b[31m4.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
49
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m81.3/81.3 kB\u001b[0m \u001b[31m8.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
50
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m97.6/97.6 kB\u001b[0m \u001b[31m9.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
51
- "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
52
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.4/7.4 MB\u001b[0m \u001b[31m43.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
53
- "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
54
- " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
55
- " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
56
- " Building wheel for tinysegmenter (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
57
- " Building wheel for feedfinder2 (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
58
- " Building wheel for jieba3k (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
59
- " Building wheel for sgmllib3k (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
60
- ]
61
- }
62
- ]
63
- },
64
- {
65
- "cell_type": "code",
66
- "source": [
67
- "import os\n",
68
- "\n",
69
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
70
- "os.environ[\"OPENAI_API_KEY\"] = \"[OPENAI_API_KEY]\"\n",
71
- "USESCRAPER_API_KEY = \"[USESCRAPER_API_KEY]\""
72
- ],
73
- "metadata": {
74
- "id": "wxDPsVXSAj6_"
75
- },
76
- "execution_count": null,
77
- "outputs": []
78
- },
79
- {
80
- "cell_type": "markdown",
81
- "source": [
82
- "There are two primary methods for extracting webpage content. The first method involves having a list of URLs; one can iterate through this list to retrieve the content of each page. The second method, web crawling, requires using a script or service to extract page URLs from a sitemap or manually following links on the page to access all the content. Initially, we will explore web scraping techniques before discussing how to use a service like usescraper.com to perform web crawling."
83
- ],
84
- "metadata": {
85
- "id": "VSc7-1mljmrp"
86
- }
87
- },
88
- {
89
- "cell_type": "markdown",
90
- "source": [
91
- "# 1. Scraping using `newspaper` Library"
92
- ],
93
- "metadata": {
94
- "id": "D3r2tYHgeIK9"
95
- }
96
- },
97
- {
98
- "cell_type": "markdown",
99
- "source": [
100
- "## Define URLs"
101
- ],
102
- "metadata": {
103
- "id": "it43ZQf8jatw"
104
- }
105
- },
106
- {
107
- "cell_type": "code",
108
- "source": [
109
- "urls = [\n",
110
- " \"https://docs.llamaindex.ai/en/stable/understanding\",\n",
111
- " \"https://docs.llamaindex.ai/en/stable/understanding/using_llms/using_llms/\",\n",
112
- " \"https://docs.llamaindex.ai/en/stable/understanding/indexing/indexing/\",\n",
113
- " \"https://docs.llamaindex.ai/en/stable/understanding/querying/querying/\"\n",
114
- "]"
115
- ],
116
- "metadata": {
117
- "id": "x74PqfQ7eIzD"
118
- },
119
- "execution_count": null,
120
- "outputs": []
121
- },
122
- {
123
- "cell_type": "markdown",
124
- "source": [
125
- "## Get Page Contents"
126
- ],
127
- "metadata": {
128
- "id": "tgxfpfSsjcMC"
129
- }
130
- },
131
- {
132
- "cell_type": "code",
133
- "source": [
134
- "import newspaper\n",
135
- "\n",
136
- "pages_content = []\n",
137
- "\n",
138
- "# Retrieve the Content\n",
139
- "for url in urls:\n",
140
- "\ttry:\n",
141
- "\t\tarticle = newspaper.Article( url )\n",
142
- "\t\tarticle.download()\n",
143
- "\t\tarticle.parse()\n",
144
- "\t\tif len(article.text) > 0:\n",
145
- "\t\t\tpages_content.append({ \"url\": url, \"title\": article.title, \"text\": article.text })\n",
146
- "\texcept:\n",
147
- "\t\tcontinue"
148
- ],
149
- "metadata": {
150
- "id": "Q6Xs1OhUfVQV"
151
- },
152
- "execution_count": null,
153
- "outputs": []
154
- },
155
- {
156
- "cell_type": "code",
157
- "source": [
158
- "pages_content[0]"
159
- ],
160
- "metadata": {
161
- "colab": {
162
- "base_uri": "https://localhost:8080/"
163
- },
164
- "id": "3cNdJNi2g1ly",
165
- "outputId": "f5184c15-6b55-47ee-98ee-646a06290a4c"
166
- },
167
- "execution_count": null,
168
- "outputs": [
169
- {
170
- "output_type": "execute_result",
171
- "data": {
172
- "text/plain": [
173
- "{'url': 'https://docs.llamaindex.ai/en/stable/understanding',\n",
174
- " 'title': 'Building an LLM Application',\n",
175
- " 'text': \"Building an LLM application#\\n\\nWelcome to the beginning of Understanding LlamaIndex. This is a series of short, bite-sized tutorials on every stage of building an LLM application to get you acquainted with how to use LlamaIndex before diving into more advanced and subtle strategies. If you're an experienced programmer new to LlamaIndex, this is the place to start.\\n\\nKey steps in building an LLM application#\\n\\nTip If you've already read our high-level concepts page you'll recognize several of these steps.\\n\\nThere are a series of key steps involved in building any LLM-powered application, whether it's answering questions about your data, creating a chatbot, or an autonomous agent. Throughout our documentation, you'll notice sections are arranged roughly in the order you'll perform these steps while building your app. You'll learn about:\\n\\nUsing LLMs : whether it's OpenAI or any number of hosted LLMs or a locally-run model of your own, LLMs are used at every step of the way, from indexing and storing to querying and parsing your data. LlamaIndex comes with a huge number of reliable, tested prompts and we'll also show you how to customize your own.\\n\\nLoading : getting your data from wherever it lives, whether that's unstructured text, PDFs, databases, or APIs to other applications. LlamaIndex has hundreds of connectors to every data source over at LlamaHub.\\n\\nIndexing : once you've got your data there are an infinite number of ways to structure access to that data to ensure your applications is always working with the most relevant data. LlamaIndex has a huge number of these strategies built-in and can help you select the best ones.\\n\\nStoring : you will probably find it more efficient to store your data in indexed form, or pre-processed summaries provided by an LLM, often in a specialized database known as a Vector Store (see below). You can also store your indexes, metadata and more.\\n\\nQuerying : every indexing strategy has a corresponding querying strategy and there are lots of ways to improve the relevance, speed and accuracy of what you retrieve and what the LLM does with it before returning it to you, including turning it into structured responses such as an API.\\n\\nPutting it all together : whether you are building question & answering, chatbots, an API, or an autonomous agent, we show you how to get your application into production.\\n\\nTracing and debugging : also called observability , it's especially important with LLM applications to be able to look into the inner workings of what's going on to help you debug problems and spot places to improve.\\n\\nEvaluating: every strategy has pros and cons and a key part of building, shipping and evolving your application is evaluating whether your change has improved your application in terms of accuracy, performance, clarity, cost and more. Reliably evaluating your changes is a crucial part of LLM application development.\\n\\nReady to dive in? Head to using LLMs.\"}"
176
- ]
177
- },
178
- "metadata": {},
179
- "execution_count": 57
180
- }
181
- ]
182
- },
183
- {
184
- "cell_type": "code",
185
- "source": [
186
- "len( pages_content )"
187
- ],
188
- "metadata": {
189
- "colab": {
190
- "base_uri": "https://localhost:8080/"
191
- },
192
- "id": "WleP60A3gkQM",
193
- "outputId": "8c79ab53-e47b-4227-eb6f-0286b8ba2d15"
194
- },
195
- "execution_count": null,
196
- "outputs": [
197
- {
198
- "output_type": "execute_result",
199
- "data": {
200
- "text/plain": [
201
- "5"
202
- ]
203
- },
204
- "metadata": {},
205
- "execution_count": 38
206
- }
207
- ]
208
- },
209
- {
210
- "cell_type": "markdown",
211
- "source": [
212
- "## Convert to Document"
213
- ],
214
- "metadata": {
215
- "id": "i5mCiRfGjfNx"
216
- }
217
- },
218
- {
219
- "cell_type": "code",
220
- "source": [
221
- "from llama_index.core.schema import Document\n",
222
- "\n",
223
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
224
- "documents = [Document(text=row['text'], metadata={\"title\": row['title'], \"url\": row['url']}) for row in pages_content]"
225
- ],
226
- "metadata": {
227
- "id": "TOJ3K-CBfVDR"
228
- },
229
- "execution_count": null,
230
- "outputs": []
231
- },
232
- {
233
- "cell_type": "markdown",
234
- "source": [
235
- "# 2. Submit the Crawler Job"
236
- ],
237
- "metadata": {
238
- "id": "CkjEyEmkJevT"
239
- }
240
- },
241
- {
242
- "cell_type": "code",
243
- "execution_count": null,
244
- "metadata": {
245
- "colab": {
246
- "base_uri": "https://localhost:8080/"
247
- },
248
- "id": "tYpchBo5-brp",
249
- "outputId": "927f84c5-c13a-408c-8802-df90bc05c733"
250
- },
251
- "outputs": [
252
- {
253
- "output_type": "stream",
254
- "name": "stdout",
255
- "text": [
256
- "{'org': '581', 'id': '7YE3T8VSPJVSCYE6EDQ90DJNFT', 'urls': ['https://docs.llamaindex.ai/en/stable/understanding/'], 'exclude_globs': [], 'exclude_elements': 'nav, header, footer, script, style, noscript, svg, [role=\"alert\"], [role=\"banner\"], [role=\"dialog\"], [role=\"alertdialog\"], [role=\"region\"][aria-label*=\"skip\" i], [aria-modal=\"true\"]', 'output_format': 'markdown', 'output_expiry': 604800, 'min_length': 50, 'page_limit': 10000, 'force_crawling_mode': 'link', 'block_resources': True, 'include_linked_files': False, 'createdAt': 1713883978029, 'status': 'starting', 'use_browser': True, 'sitemapPageCount': 0, 'notices': []}\n"
257
- ]
258
- }
259
- ],
260
- "source": [
261
- "import requests\n",
262
- "import json\n",
263
- "\n",
264
- "payload = {\n",
265
- " \"urls\": [\"https://docs.llamaindex.ai/en/stable/understanding/\"], # list of urls to crawl\n",
266
- " \"output_format\": \"markdown\", # text, html, markdown\n",
267
- " \"output_expiry\": 604800, # Automatically delete after X seconds\n",
268
- " \"min_length\": 50, # Skip pages with less than X characters\n",
269
- " \"page_limit\": 10000, # Maximum number of pages to crawl\n",
270
- " \"force_crawling_mode\": \"link\", # \"link\" follows links in the page reccursively, or \"sitemap\" to find pages from website's sitemap\n",
271
- " \"block_resources\": True, # skip loading images, stylesheets, or scripts\n",
272
- " \"include_linked_files\": False # include files (PDF, text, ...) in output\n",
273
- "}\n",
274
- "headers = {\n",
275
- " \"Authorization\": \"Bearer \" + USESCRAPER_API_KEY,\n",
276
- " \"Content-Type\": \"application/json\"\n",
277
- "}\n",
278
- "\n",
279
- "response = requests.request(\"POST\", \"https://api.usescraper.com/crawler/jobs\", json=payload, headers=headers)\n",
280
- "\n",
281
- "response = json.loads( response.text )\n",
282
- "\n",
283
- "print(response)"
284
- ]
285
- },
286
- {
287
- "cell_type": "markdown",
288
- "source": [
289
- "## Get the Status"
290
- ],
291
- "metadata": {
292
- "id": "nx_4MjHxJgxh"
293
- }
294
- },
295
- {
296
- "cell_type": "code",
297
- "source": [
298
- "url = \"https://api.usescraper.com/crawler/jobs/{}\".format(response['id'])\n",
299
- "\n",
300
- "status_res = requests.request(\"GET\", url, headers=headers)\n",
301
- "\n",
302
- "status_res = json.loads( status_res.text )\n",
303
- "\n",
304
- "print( status_res['status'] )\n",
305
- "print( status_res['progress'] )"
306
- ],
307
- "metadata": {
308
- "colab": {
309
- "base_uri": "https://localhost:8080/"
310
- },
311
- "id": "ZLJ0BUR8c1a8",
312
- "outputId": "cfd3aee9-68bf-4171-9340-abe2d03fa5ac"
313
- },
314
- "execution_count": null,
315
- "outputs": [
316
- {
317
- "output_type": "stream",
318
- "name": "stdout",
319
- "text": [
320
- "running\n",
321
- "{'scraped': 9, 'discarded': 0, 'failed': 0}\n"
322
- ]
323
- }
324
- ]
325
- },
326
- {
327
- "cell_type": "markdown",
328
- "source": [
329
- "## Get the Data"
330
- ],
331
- "metadata": {
332
- "id": "vHcRJIDsJh2i"
333
- }
334
- },
335
- {
336
- "cell_type": "code",
337
- "source": [
338
- "url = \"https://api.usescraper.com/crawler/jobs/{}/data\".format(response['id'])\n",
339
- "\n",
340
- "data_res = requests.request(\"GET\", url, headers=headers)\n",
341
- "\n",
342
- "data_res = json.loads( data_res.text )\n",
343
- "\n",
344
- "print( data_res )"
345
- ],
346
- "metadata": {
347
- "id": "J4dUn4cmGGab"
348
- },
349
- "execution_count": null,
350
- "outputs": []
351
- },
352
- {
353
- "cell_type": "code",
354
- "source": [
355
- "print( \"URL:\", data_res['data'][0]['meta']['url'] )\n",
356
- "print( \"Title:\", data_res['data'][0]['meta']['meta']['title'] )\n",
357
- "print( \"Content:\", data_res['data'][0]['text'][0:500], \"...\" )"
358
- ],
359
- "metadata": {
360
- "colab": {
361
- "base_uri": "https://localhost:8080/"
362
- },
363
- "id": "F8VEQvJkITLJ",
364
- "outputId": "b54ec108-7221-4230-8b61-d0a4be503a66"
365
- },
366
- "execution_count": null,
367
- "outputs": [
368
- {
369
- "output_type": "stream",
370
- "name": "stdout",
371
- "text": [
372
- "URL: https://docs.llamaindex.ai/en/stable/understanding/putting_it_all_together/graphs/\n",
373
- "Title: Knowledge Graphs - LlamaIndex\n",
374
- "Content: \n",
375
- "[ Skip to content ](https://docs.llamaindex.ai/en/stable/understanding/putting_it_all_together/graphs/#knowledge-graphs)\n",
376
- "#Knowledge Graphs[#](https://docs.llamaindex.ai/en/stable/understanding/putting_it_all_together/graphs/#knowledge-graphs)\n",
377
- "LlamaIndex contains some fantastic guides for building with knowledge graphs.\n",
378
- "\n",
379
- "Check out the end-to-end tutorials/workshops below. Also check out our [knowledge graph query engine guides](https://docs.llamaindex.ai/en/stable/module_guides/deploying/query_ ...\n"
380
- ]
381
- }
382
- ]
383
- },
384
- {
385
- "cell_type": "markdown",
386
- "source": [
387
- "## Convert to Document"
388
- ],
389
- "metadata": {
390
- "id": "rt2nyuLhSYLR"
391
- }
392
- },
393
- {
394
- "cell_type": "code",
395
- "source": [
396
- "from llama_index.core.schema import Document\n",
397
- "\n",
398
- "# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
399
- "documents = [Document(text=row['text'], metadata={\"title\": row['meta']['meta']['title'], \"url\": row['meta']['url']}) for row in data_res['data']]"
400
- ],
401
- "metadata": {
402
- "id": "YEieGzSFSXas"
403
- },
404
- "execution_count": null,
405
- "outputs": []
406
- },
407
- {
408
- "cell_type": "markdown",
409
- "source": [
410
- "# Create RAG Pipeline"
411
- ],
412
- "metadata": {
413
- "id": "vqbJG5a1i3Jo"
414
- }
415
- },
416
- {
417
- "cell_type": "code",
418
- "source": [
419
- "from llama_index.llms.openai import OpenAI\n",
420
- "\n",
421
- "llm = OpenAI(model=\"gpt-3.5-turbo\")"
422
- ],
423
- "metadata": {
424
- "id": "wxmiQDv3SXV6"
425
- },
426
- "execution_count": null,
427
- "outputs": []
428
- },
429
- {
430
- "cell_type": "code",
431
- "source": [
432
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
433
- "\n",
434
- "embed_model = OpenAIEmbedding(model=\"text-embedding-3-large\")"
435
- ],
436
- "metadata": {
437
- "id": "tCVhv4OkSXTV"
438
- },
439
- "execution_count": null,
440
- "outputs": []
441
- },
442
- {
443
- "cell_type": "code",
444
- "source": [
445
- "from llama_index.core.node_parser import SentenceSplitter\n",
446
- "\n",
447
- "text_splitter = SentenceSplitter(chunk_size=512, chunk_overlap=30)"
448
- ],
449
- "metadata": {
450
- "id": "quwJI61dNVr-"
451
- },
452
- "execution_count": null,
453
- "outputs": []
454
- },
455
- {
456
- "cell_type": "code",
457
- "source": [
458
- "from llama_index.core import Settings\n",
459
- "\n",
460
- "Settings.llm = llm\n",
461
- "Settings.embed_model = embed_model\n",
462
- "Settings.text_splitter = text_splitter"
463
- ],
464
- "metadata": {
465
- "id": "6KpeCRMBUgup"
466
- },
467
- "execution_count": null,
468
- "outputs": []
469
- },
470
- {
471
- "cell_type": "code",
472
- "source": [
473
- "from llama_index.core import VectorStoreIndex\n",
474
- "\n",
475
- "index = VectorStoreIndex.from_documents( documents )"
476
- ],
477
- "metadata": {
478
- "id": "nWTBidwoZSO0"
479
- },
480
- "execution_count": null,
481
- "outputs": []
482
- },
483
- {
484
- "cell_type": "code",
485
- "source": [
486
- "query_engine = index.as_query_engine()"
487
- ],
488
- "metadata": {
489
- "id": "RUuJO0IIYSeU"
490
- },
491
- "execution_count": null,
492
- "outputs": []
493
- },
494
- {
495
- "cell_type": "code",
496
- "source": [
497
- "res = query_engine.query(\"What is a query engine?\")"
498
- ],
499
- "metadata": {
500
- "id": "6_s2LkH6YX1V"
501
- },
502
- "execution_count": null,
503
- "outputs": []
504
- },
505
- {
506
- "cell_type": "code",
507
- "source": [
508
- "res.response"
509
- ],
510
- "metadata": {
511
- "colab": {
512
- "base_uri": "https://localhost:8080/",
513
- "height": 71
514
- },
515
- "id": "02zdJNqIZKep",
516
- "outputId": "76340610-0d98-4fd0-d237-ddb9f1752391"
517
- },
518
- "execution_count": null,
519
- "outputs": [
520
- {
521
- "output_type": "execute_result",
522
- "data": {
523
- "text/plain": [
524
- "'A query engine is a fundamental component used in querying processes. It is responsible for retrieving the most relevant documents from an index based on a query, postprocessing the retrieved nodes if needed, and then synthesizing a response by combining the query, relevant data, and prompt to be sent to the language model for generating an answer.'"
525
- ],
526
- "application/vnd.google.colaboratory.intrinsic+json": {
527
- "type": "string"
528
- }
529
- },
530
- "metadata": {},
531
- "execution_count": 28
532
- }
533
- ]
534
- },
535
- {
536
- "cell_type": "code",
537
- "source": [
538
- "# Show the retrieved nodes\n",
539
- "for src in res.source_nodes:\n",
540
- " print(\"Node ID\\t\", src.node_id)\n",
541
- " print(\"Title\\t\", src.metadata['title'])\n",
542
- " print(\"URL\\t\", src.metadata['url'])\n",
543
- " print(\"Score\\t\", src.score)\n",
544
- " print(\"-_\"*20)"
545
- ],
546
- "metadata": {
547
- "colab": {
548
- "base_uri": "https://localhost:8080/"
549
- },
550
- "id": "PuCcgP0nZSIl",
551
- "outputId": "e136cdbb-2ee4-4dfb-f532-f6c9365e519e"
552
- },
553
- "execution_count": null,
554
- "outputs": [
555
- {
556
- "output_type": "stream",
557
- "name": "stdout",
558
- "text": [
559
- "Node ID\t 081b6c8c-d9ea-4476-bac0-1008facd3db8\n",
560
- "Title\t Querying - LlamaIndex\n",
561
- "URL\t https://docs.llamaindex.ai/en/stable/understanding/querying/querying/\n",
562
- "Score\t 0.46212738505767387\n",
563
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
564
- "Node ID\t 3786c195-c5de-4bba-98b6-996031349a88\n",
565
- "Title\t Querying - LlamaIndex\n",
566
- "URL\t https://docs.llamaindex.ai/en/stable/understanding/querying/querying/\n",
567
- "Score\t 0.43141762602042416\n",
568
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
569
- ]
570
- }
571
- ]
572
- }
573
- ]
574
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
notebooks/Web_Search_API.ipynb DELETED
@@ -1,491 +0,0 @@
1
- {
2
- "nbformat": 4,
3
- "nbformat_minor": 0,
4
- "metadata": {
5
- "colab": {
6
- "provenance": [],
7
- "authorship_tag": "ABX9TyNH2OsWaT8fcT3tgDhO3NQn",
8
- "include_colab_link": true
9
- },
10
- "kernelspec": {
11
- "name": "python3",
12
- "display_name": "Python 3"
13
- },
14
- "language_info": {
15
- "name": "python"
16
- }
17
- },
18
- "cells": [
19
- {
20
- "cell_type": "markdown",
21
- "metadata": {
22
- "id": "view-in-github",
23
- "colab_type": "text"
24
- },
25
- "source": [
26
- "<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/Web_Search_API.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
27
- ]
28
- },
29
- {
30
- "cell_type": "code",
31
- "execution_count": null,
32
- "metadata": {
33
- "colab": {
34
- "base_uri": "https://localhost:8080/"
35
- },
36
- "id": "JboB5VaCJUrb",
37
- "outputId": "b7221d06-8783-4586-f98a-72af45cae54f"
38
- },
39
- "outputs": [
40
- {
41
- "output_type": "stream",
42
- "name": "stdout",
43
- "text": [
44
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m211.1/211.1 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
45
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m81.3/81.3 kB\u001b[0m \u001b[31m8.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
46
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m97.6/97.6 kB\u001b[0m \u001b[31m10.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
47
- "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
48
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m7.4/7.4 MB\u001b[0m \u001b[31m24.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
49
- "\u001b[?25h Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
50
- " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
51
- " Preparing metadata (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
52
- " Building wheel for tinysegmenter (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
53
- " Building wheel for feedfinder2 (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
54
- " Building wheel for jieba3k (setup.py) ... \u001b[?25l\u001b[?25hdone\n",
55
- " Building wheel for sgmllib3k (setup.py) ... \u001b[?25l\u001b[?25hdone\n"
56
- ]
57
- }
58
- ],
59
- "source": [
60
- "!pip install -q llama-index==0.10.5 openai==1.12.0 tiktoken==0.6.0 llama-index-tools-google==0.1.3 newspaper3k==0.2.8"
61
- ]
62
- },
63
- {
64
- "cell_type": "code",
65
- "source": [
66
- "import os\n",
67
- "\n",
68
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
69
- "os.environ[\"OPENAI_API_KEY\"] = \"[OPENAI_API_KEY]\"\n",
70
- "GOOGLE_SEARCH_KEY = \"[GOOGLE_SEARCH_KEY]\"\n",
71
- "GOOGLE_SEARCH_ENGINE = \"[GOOGLE_SEARCH_ENGINE]\""
72
- ],
73
- "metadata": {
74
- "id": "1NKAn5scN_g9"
75
- },
76
- "execution_count": null,
77
- "outputs": []
78
- },
79
- {
80
- "cell_type": "markdown",
81
- "source": [
82
- "# Using Agents/Tools"
83
- ],
84
- "metadata": {
85
- "id": "ex1gQVHvITMI"
86
- }
87
- },
88
- {
89
- "cell_type": "markdown",
90
- "source": [
91
- "## Define Google Search Tool"
92
- ],
93
- "metadata": {
94
- "id": "0LMypoqUyuXq"
95
- }
96
- },
97
- {
98
- "cell_type": "code",
99
- "source": [
100
- "from llama_index.tools.google import GoogleSearchToolSpec\n",
101
- "\n",
102
- "tool_spec = GoogleSearchToolSpec(key=GOOGLE_SEARCH_KEY, engine=GOOGLE_SEARCH_ENGINE)"
103
- ],
104
- "metadata": {
105
- "id": "4Q7sc69nJvWI"
106
- },
107
- "execution_count": null,
108
- "outputs": []
109
- },
110
- {
111
- "cell_type": "code",
112
- "source": [
113
- "# Import and initialize our tool spec\n",
114
- "from llama_index.core.tools.tool_spec.load_and_search import LoadAndSearchToolSpec\n",
115
- "\n",
116
- "# Wrap the google search tool to create an index on top of the returned Google search\n",
117
- "wrapped_tool = LoadAndSearchToolSpec.from_defaults(\n",
118
- " tool_spec.to_tool_list()[0],\n",
119
- ").to_tool_list()"
120
- ],
121
- "metadata": {
122
- "id": "VrbuIOaMeOIf"
123
- },
124
- "execution_count": null,
125
- "outputs": []
126
- },
127
- {
128
- "cell_type": "markdown",
129
- "source": [
130
- "## Create the Agent"
131
- ],
132
- "metadata": {
133
- "id": "T3ENpLyBy7UL"
134
- }
135
- },
136
- {
137
- "cell_type": "code",
138
- "source": [
139
- "from llama_index.agent.openai import OpenAIAgent\n",
140
- "\n",
141
- "agent = OpenAIAgent.from_tools(wrapped_tool, verbose=False)"
142
- ],
143
- "metadata": {
144
- "id": "-_Ab47ppK8b2"
145
- },
146
- "execution_count": null,
147
- "outputs": []
148
- },
149
- {
150
- "cell_type": "code",
151
- "source": [
152
- "res = agent.chat(\"How many parameters LLaMA2 model has?\")"
153
- ],
154
- "metadata": {
155
- "id": "YcUyz1-FlCQ8"
156
- },
157
- "execution_count": null,
158
- "outputs": []
159
- },
160
- {
161
- "cell_type": "code",
162
- "source": [
163
- "res.response"
164
- ],
165
- "metadata": {
166
- "colab": {
167
- "base_uri": "https://localhost:8080/",
168
- "height": 35
169
- },
170
- "id": "w4wK5sY-lOOv",
171
- "outputId": "8090a106-6fac-4514-fdbd-c72a01b28169"
172
- },
173
- "execution_count": null,
174
- "outputs": [
175
- {
176
- "output_type": "execute_result",
177
- "data": {
178
- "text/plain": [
179
- "'The LLaMA2 model has parameters available in three different sizes: 7 billion, 13 billion, and 70 billion.'"
180
- ],
181
- "application/vnd.google.colaboratory.intrinsic+json": {
182
- "type": "string"
183
- }
184
- },
185
- "metadata": {},
186
- "execution_count": 72
187
- }
188
- ]
189
- },
190
- {
191
- "cell_type": "code",
192
- "source": [
193
- "res.sources"
194
- ],
195
- "metadata": {
196
- "colab": {
197
- "base_uri": "https://localhost:8080/"
198
- },
199
- "id": "TM_cvBA1nTJM",
200
- "outputId": "0bf3533a-c62d-4d0d-bd76-76c043477042"
201
- },
202
- "execution_count": null,
203
- "outputs": [
204
- {
205
- "output_type": "execute_result",
206
- "data": {
207
- "text/plain": [
208
- "[ToolOutput(content='Content loaded! You can now search the information using read_google_search', tool_name='google_search', raw_input={'args': (), 'kwargs': {'query': 'parameters of LLaMA2 model'}}, raw_output='Content loaded! You can now search the information using read_google_search', is_error=False),\n",
209
- " ToolOutput(content='Answer: The parameters of the LLaMA2 model are available in three different sizes: 7 billion, 13 billion, and 70 billion.', tool_name='read_google_search', raw_input={'args': (), 'kwargs': {'query': 'parameters of LLaMA2 model'}}, raw_output='Answer: The parameters of the LLaMA2 model are available in three different sizes: 7 billion, 13 billion, and 70 billion.', is_error=False)]"
210
- ]
211
- },
212
- "metadata": {},
213
- "execution_count": 73
214
- }
215
- ]
216
- },
217
- {
218
- "cell_type": "markdown",
219
- "source": [
220
- "# Using Tools w/ VectorStoreIndex"
221
- ],
222
- "metadata": {
223
- "id": "who-NM4pIhPn"
224
- }
225
- },
226
- {
227
- "cell_type": "markdown",
228
- "source": [
229
- "A limitation of the current agent/tool in LlamaIndex is that it **relies solely on the page description from the retrieved pages** to answer questions. This approach will miss answers that are not visible in the page's description tag. To address this, a possible workaround is to fetch the page results, extract the page content using the newspaper3k library, and then create an index based on the downloaded content. Also, the previous method stacks all retrieved items from the search engine into a single document, making it **difficult to pinpoint the exact source** of the response. However, the following method will enable us to present the sources easily."
230
- ],
231
- "metadata": {
232
- "id": "9g9cTM9GI-19"
233
- }
234
- },
235
- {
236
- "cell_type": "markdown",
237
- "source": [
238
- "## Define Google Search Tool"
239
- ],
240
- "metadata": {
241
- "id": "31G_fxxJIsbC"
242
- }
243
- },
244
- {
245
- "cell_type": "code",
246
- "source": [
247
- "from llama_index.tools.google import GoogleSearchToolSpec\n",
248
- "\n",
249
- "tool_spec = GoogleSearchToolSpec(key=GOOGLE_SEARCH_KEY, engine=GOOGLE_SEARCH_ENGINE)"
250
- ],
251
- "metadata": {
252
- "id": "lwRmj2odIHxt"
253
- },
254
- "execution_count": null,
255
- "outputs": []
256
- },
257
- {
258
- "cell_type": "code",
259
- "source": [
260
- "search_results = tool_spec.google_search(\"LLaMA2 model details\")"
261
- ],
262
- "metadata": {
263
- "id": "UVIxdj04Bsf2"
264
- },
265
- "execution_count": null,
266
- "outputs": []
267
- },
268
- {
269
- "cell_type": "code",
270
- "source": [
271
- "import json\n",
272
- "\n",
273
- "search_results = json.loads( search_results[0].text )"
274
- ],
275
- "metadata": {
276
- "id": "AlYDNfg2BsdQ"
277
- },
278
- "execution_count": null,
279
- "outputs": []
280
- },
281
- {
282
- "cell_type": "markdown",
283
- "source": [
284
- "## Read Each URL Contents"
285
- ],
286
- "metadata": {
287
- "id": "pHALd3uhIxtQ"
288
- }
289
- },
290
- {
291
- "cell_type": "code",
292
- "source": [
293
- "import newspaper\n",
294
- "pages_content = []\n",
295
- "\n",
296
- "for item in search_results['items']:\n",
297
- "\n",
298
- " try:\n",
299
- " article = newspaper.Article( item['link'] )\n",
300
- " article.download()\n",
301
- " article.parse()\n",
302
- " if len(article.text) > 0:\n",
303
- " pages_content.append({ \"url\": item['link'], \"text\": article.text, \"title\": item['title'] })\n",
304
- " except:\n",
305
- " continue\n",
306
- "\n",
307
- "print(len(pages_content))"
308
- ],
309
- "metadata": {
310
- "colab": {
311
- "base_uri": "https://localhost:8080/"
312
- },
313
- "id": "jXz3JFduBsaq",
314
- "outputId": "1b795423-26a6-4a61-a878-cca5e27dd5d1"
315
- },
316
- "execution_count": null,
317
- "outputs": [
318
- {
319
- "output_type": "stream",
320
- "name": "stdout",
321
- "text": [
322
- "8\n"
323
- ]
324
- }
325
- ]
326
- },
327
- {
328
- "cell_type": "markdown",
329
- "source": [
330
- "## Create the Index"
331
- ],
332
- "metadata": {
333
- "id": "iqxa_qRVI3G0"
334
- }
335
- },
336
- {
337
- "cell_type": "code",
338
- "source": [
339
- "from llama_index.core import Document\n",
340
- "\n",
341
- "# Convert the texts to Document objects so the LlamaIndex framework can process them.\n",
342
- "documents = [Document(text=row[\"text\"], metadata={\"title\": row[\"title\"], \"url\": row[\"url\"]}) for row in pages_content]"
343
- ],
344
- "metadata": {
345
- "id": "O4PkK8DuBsZT"
346
- },
347
- "execution_count": null,
348
- "outputs": []
349
- },
350
- {
351
- "cell_type": "code",
352
- "source": [
353
- "from llama_index.core import VectorStoreIndex\n",
354
- "from llama_index.core.node_parser import SentenceSplitter\n",
355
- "\n",
356
- "# Build index / generate embeddings using OpenAI.\n",
357
- "index = VectorStoreIndex.from_documents(\n",
358
- " documents,\n",
359
- " transformations=[SentenceSplitter(chunk_size=512, chunk_overlap=64)],\n",
360
- ")"
361
- ],
362
- "metadata": {
363
- "id": "2RtMBWpgBsWX"
364
- },
365
- "execution_count": null,
366
- "outputs": []
367
- },
368
- {
369
- "cell_type": "code",
370
- "source": [
371
- "# Define a query engine that is responsible for retrieving related pieces of text,\n",
372
- "# and using a LLM to formulate the final answer.\n",
373
- "query_engine = index.as_query_engine()"
374
- ],
375
- "metadata": {
376
- "id": "xV_ibEZ_BsM4"
377
- },
378
- "execution_count": null,
379
- "outputs": []
380
- },
381
- {
382
- "cell_type": "markdown",
383
- "source": [
384
- "## Query"
385
- ],
386
- "metadata": {
387
- "id": "nziwu27MI6ih"
388
- }
389
- },
390
- {
391
- "cell_type": "code",
392
- "source": [
393
- "response = query_engine.query(\n",
394
- " \"How many parameters LLaMA2 model has?\"\n",
395
- ")\n",
396
- "print(response)"
397
- ],
398
- "metadata": {
399
- "colab": {
400
- "base_uri": "https://localhost:8080/"
401
- },
402
- "id": "5K1h2_t-HNPe",
403
- "outputId": "58ce5d66-eddc-43fe-e7c8-d78bc0cb8c32"
404
- },
405
- "execution_count": null,
406
- "outputs": [
407
- {
408
- "output_type": "stream",
409
- "name": "stdout",
410
- "text": [
411
- "LLaMA2 model has sizes ranging from 7 to 70 billion parameters.\n"
412
- ]
413
- }
414
- ]
415
- },
416
- {
417
- "cell_type": "code",
418
- "source": [
419
- "response = query_engine.query(\n",
420
- " \"How many parameters LLaMA2 model has? list exact sizes.\"\n",
421
- ")\n",
422
- "print(response)"
423
- ],
424
- "metadata": {
425
- "colab": {
426
- "base_uri": "https://localhost:8080/"
427
- },
428
- "id": "Xea7ZeidH27i",
429
- "outputId": "d455c379-9c91-4c9e-e9c1-6bd2deb7342e"
430
- },
431
- "execution_count": null,
432
- "outputs": [
433
- {
434
- "output_type": "stream",
435
- "name": "stdout",
436
- "text": [
437
- "The LLaMA2 model comes in several sizes with different numbers of parameters:\n",
438
- "- LLaMA2 7B\n",
439
- "- LLaMA2 13B\n",
440
- "- LLaMA2 33B\n",
441
- "- LLaMA2 65B\n"
442
- ]
443
- }
444
- ]
445
- },
446
- {
447
- "cell_type": "code",
448
- "source": [
449
- "# Show the retrieved nodes\n",
450
- "for src in response.source_nodes:\n",
451
- " print(\"Title\\t\", src.metadata['title'])\n",
452
- " print(\"Source\\t\", src.metadata['url'])\n",
453
- " print(\"Score\\t\", src.score)\n",
454
- " print(\"-_\"*20)"
455
- ],
456
- "metadata": {
457
- "colab": {
458
- "base_uri": "https://localhost:8080/"
459
- },
460
- "id": "4QpGPD5nHORP",
461
- "outputId": "8f9fc185-7745-4357-8471-25d34726cdd8"
462
- },
463
- "execution_count": null,
464
- "outputs": [
465
- {
466
- "output_type": "stream",
467
- "name": "stdout",
468
- "text": [
469
- "Title\t Introducing LLaMA: A foundational, 65-billion-parameter language ...\n",
470
- "Source\t https://ai.meta.com/blog/large-language-model-llama-meta-ai/\n",
471
- "Score\t 0.8124383491026671\n",
472
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
473
- "Title\t Llama 2 follow-up: too much RLHF, GPU sizing, technical details\n",
474
- "Source\t https://www.interconnects.ai/p/llama-2-part-2\n",
475
- "Score\t 0.8046542892214631\n",
476
- "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
477
- ]
478
- }
479
- ]
480
- },
481
- {
482
- "cell_type": "code",
483
- "source": [],
484
- "metadata": {
485
- "id": "B5b4nZ-qHpdP"
486
- },
487
- "execution_count": null,
488
- "outputs": []
489
- }
490
- ]
491
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
requirements.txt DELETED
@@ -1,18 +0,0 @@
1
- openai
2
- llama-index
3
- llama-index-vector-stores-chroma
4
- pydantic
5
- numpy
6
- cohere
7
- tiktoken
8
- chromadb
9
- kaleido
10
- python-multipart
11
- html2text
12
- sentence_transformers
13
- ipykernel
14
- gradio
15
- instructor
16
- pydantic
17
- pyarrow
18
- pymongo
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/data_level0.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c166eddba3fea5db2fd56f15c5b6f636af05cfd21c53a51dbd5aba2b7f321443
3
- size 323128000
 
 
 
 
scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/header.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:769f783936cbbc7bfe3e10ceeceb7a61217ec660c248538b2a39b593a6fa3597
3
- size 100
 
 
 
 
scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/index_metadata.pickle DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:c0f53b339a58fbc4ded2e3fd27b34e79b134f170e0ba38128999fd98d5586287
3
- size 1506322
 
 
 
 
scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/length.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:80e4ff7f48d82ec77e97e1fdf0e996055b3c8ecd928849747ba4e5342e3818e6
3
- size 104000
 
 
 
 
scripts/ai-tutor-db/898a7730-07aa-44c2-bc54-c45631b1ecb5/link_lists.bin DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:8be7d69cc54af074ab12a38919c770733923fe9d52194f0c4205833b0ff8a0ad
3
- size 227420
 
 
 
 
scripts/ai-tutor-db/chroma.sqlite3 DELETED
@@ -1,3 +0,0 @@
1
- version https://git-lfs.github.com/spec/v1
2
- oid sha256:d8d6014251cd1594dcc634e036a89915ce978424d45e29cd9486b15fca77f4c6
3
- size 594399232
 
 
 
 
scripts/basic_tutor.py DELETED
@@ -1,60 +0,0 @@
1
- import sys
2
- import os
3
- from openai import OpenAI
4
-
5
- # Retrieve your OpenAI API key from the environment variables and activate the OpenAI client
6
- openai_api_key = os.environ.get("OPENAI_API_KEY")
7
- client = OpenAI(api_key=openai_api_key)
8
-
9
-
10
- def ask_ai_tutor(question):
11
-
12
- # Check if OpenAI key has been correctly added
13
- if not openai_api_key:
14
- return "OpenAI API key not found in environment variables."
15
-
16
- try:
17
-
18
- # Formulating the system prompt
19
- system_prompt = (
20
- "You are an AI tutor specialized in answering artificial intelligence-related questions. "
21
- "Only answer AI-related question, else say that you cannot answer this question."
22
- )
23
-
24
- # Combining the system prompt with the user's question
25
- prompt = f"Please provide an informative and accurate answer to the following question.\nQuestion: {question}\nAnswer:"
26
-
27
- # Call the OpenAI API
28
- response = client.chat.completions.create(
29
- model="gpt-3.5-turbo-0125",
30
- messages=[
31
- {"role": "system", "content": system_prompt},
32
- {"role": "user", "content": prompt},
33
- ],
34
- )
35
-
36
- # Return the AI's response
37
- return response.choices[0].message.content.strip()
38
-
39
- except Exception as e:
40
- return f"An error occurred: {e}"
41
-
42
-
43
- def main():
44
- # Check if a question was provided as a command-line argument
45
- if len(sys.argv) != 2:
46
- print("Usage: python script_name.py 'Your AI-related question'")
47
- sys.exit(1)
48
-
49
- # The user's question is the first command-line argument
50
- user_question = sys.argv[1]
51
-
52
- # Get the AI's response
53
- ai_response = ask_ai_tutor(user_question)
54
-
55
- # Print the AI's response
56
- print(f"AI Tutor says: {ai_response}")
57
-
58
-
59
- if __name__ == "__main__":
60
- main()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/call_openai.py DELETED
@@ -1,79 +0,0 @@
1
- import os
2
- import logging
3
-
4
- import instructor
5
- import openai
6
- from openai import OpenAI, AsyncOpenAI
7
- from dotenv import load_dotenv
8
-
9
- logger = logging.getLogger(__name__)
10
- logging.basicConfig(level=logging.INFO)
11
-
12
- load_dotenv(".env")
13
- OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
14
-
15
-
16
- def api_function_call(
17
- system_message,
18
- query: str,
19
- model: str = "gpt-4-0125-preview",
20
- response_model=None,
21
- max_retries: int = 0,
22
- stream: bool = False,
23
- ):
24
-
25
- client = instructor.patch(OpenAI())
26
- try:
27
- message_data = {
28
- "model": model,
29
- "messages": [
30
- {"role": "system", "content": system_message},
31
- {"role": "user", "content": query},
32
- ],
33
- "max_retries": max_retries,
34
- "stream": stream,
35
- }
36
- if response_model is not None:
37
- message_data["response_model"] = response_model
38
-
39
- response = client.chat.completions.create(**message_data)
40
- error = False
41
-
42
- except openai.BadRequestError:
43
- error = True
44
- logger.exception("Invalid request to OpenAI API. See traceback:")
45
- error_message = (
46
- "Something went wrong while connecting with OpenAI, try again soon!"
47
- )
48
- return error_message, error
49
-
50
- except openai.RateLimitError:
51
- error = True
52
- logger.exception("RateLimit error from OpenAI. See traceback:")
53
- error_message = "OpenAI servers seem to be overloaded, try again later!"
54
- return error_message, error
55
-
56
- except Exception as e:
57
- error = True
58
- logger.exception(
59
- "Some kind of error happened trying to generate the response. See traceback:"
60
- )
61
- error_message = (
62
- "Something went wrong with connecting with OpenAI, try again soon!"
63
- )
64
- return error_message, error
65
-
66
- if stream is True and response_model is None:
67
-
68
- def answer_generator():
69
- for chunk in response:
70
- token = chunk.choices[0].delta.content
71
-
72
- token = "" if token is None else token
73
-
74
- yield token
75
-
76
- return answer_generator(), error
77
-
78
- else:
79
- return response, error
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/create_db.ipynb DELETED
@@ -1,380 +0,0 @@
1
- {
2
- "cells": [
3
- {
4
- "cell_type": "markdown",
5
- "metadata": {},
6
- "source": [
7
- "# Create AI-Tutor vector database"
8
- ]
9
- },
10
- {
11
- "cell_type": "code",
12
- "execution_count": null,
13
- "metadata": {},
14
- "outputs": [],
15
- "source": [
16
- "import os\n",
17
- "\n",
18
- "# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
19
- "os.environ[\"OPENAI_API_KEY\"] = \"sk-...\""
20
- ]
21
- },
22
- {
23
- "cell_type": "code",
24
- "execution_count": null,
25
- "metadata": {},
26
- "outputs": [],
27
- "source": [
28
- "import nest_asyncio\n",
29
- "\n",
30
- "nest_asyncio.apply()"
31
- ]
32
- },
33
- {
34
- "cell_type": "code",
35
- "execution_count": null,
36
- "metadata": {},
37
- "outputs": [],
38
- "source": [
39
- "import chromadb\n",
40
- "\n",
41
- "# create client and a new collection\n",
42
- "# chromadb.EphemeralClient saves data in-memory.\n",
43
- "chroma_client = chromadb.PersistentClient(path=\"./ai-tutor-db\")\n",
44
- "chroma_collection = chroma_client.create_collection(\"ai-tutor-db\")"
45
- ]
46
- },
47
- {
48
- "cell_type": "code",
49
- "execution_count": null,
50
- "metadata": {},
51
- "outputs": [],
52
- "source": [
53
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
54
- "from llama_index.core import StorageContext\n",
55
- "\n",
56
- "# Define a storage context object using the created vector database.\n",
57
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
58
- "storage_context = StorageContext.from_defaults(vector_store=vector_store)"
59
- ]
60
- },
61
- {
62
- "cell_type": "code",
63
- "execution_count": null,
64
- "metadata": {},
65
- "outputs": [],
66
- "source": [
67
- "import json\n",
68
- "from llama_index.core.schema import TextNode\n",
69
- "\n",
70
- "\n",
71
- "def load_jsonl_create_nodes(filepath):\n",
72
- " nodes = [] # List to hold the created node objects\n",
73
- " with open(filepath, \"r\") as file:\n",
74
- " for line in file:\n",
75
- " # Load each line as a JSON object\n",
76
- " json_obj = json.loads(line)\n",
77
- " # Extract required information\n",
78
- " title = json_obj.get(\"title\")\n",
79
- " url = json_obj.get(\"url\")\n",
80
- " content = json_obj.get(\"content\")\n",
81
- " source = json_obj.get(\"source\")\n",
82
- " # Create a TextNode object and append to the list\n",
83
- " node = TextNode(\n",
84
- " text=content,\n",
85
- " metadata={\"title\": title, \"url\": url, \"source\": source},\n",
86
- " excluded_embed_metadata_keys=[\"title\", \"url\", \"source\"],\n",
87
- " excluded_llm_metadata_keys=[\"title\", \"url\", \"source\"],\n",
88
- " )\n",
89
- " nodes.append(node)\n",
90
- " return nodes"
91
- ]
92
- },
93
- {
94
- "cell_type": "code",
95
- "execution_count": null,
96
- "metadata": {},
97
- "outputs": [],
98
- "source": [
99
- "filepath = \"../data/ai-tutor-csv-files/combined_data_lines.jsonl\"\n",
100
- "nodes = load_jsonl_create_nodes(filepath)\n",
101
- "\n",
102
- "print(f\"Loaded {len(nodes)} nodes/chunks from the JSONL file\\n \")\n",
103
- "\n",
104
- "node = nodes[0]\n",
105
- "print(f\"ID: {node.id_} \\nText: {node.text}, \\nMetadata: {node.metadata}\")\n",
106
- "\n",
107
- "print(\"\\n\")\n",
108
- "\n",
109
- "node = nodes[-10000]\n",
110
- "print(f\"ID: {node.id_} \\nText: {node.text}, \\nMetadata: {node.metadata}\")"
111
- ]
112
- },
113
- {
114
- "cell_type": "code",
115
- "execution_count": null,
116
- "metadata": {},
117
- "outputs": [],
118
- "source": [
119
- "# # Create the pipeline to apply the transformation on each chunk,\n",
120
- "# # and store the transformed text in the chroma vector store.\n",
121
- "# pipeline = IngestionPipeline(\n",
122
- "# transformations=[\n",
123
- "# text_splitter,\n",
124
- "# QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
125
- "# SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
126
- "# KeywordExtractor(keywords=10, llm=llm),\n",
127
- "# OpenAIEmbedding(),\n",
128
- "# ],\n",
129
- "# vector_store=vector_store\n",
130
- "# )\n",
131
- "\n",
132
- "# nodes = pipeline.run(documents=documents, show_progress=True);"
133
- ]
134
- },
135
- {
136
- "cell_type": "code",
137
- "execution_count": null,
138
- "metadata": {},
139
- "outputs": [],
140
- "source": [
141
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
142
- "from llama_index.core import VectorStoreIndex\n",
143
- "\n",
144
- "# embeds = OpenAIEmbedding(model=\"text-embedding-3-small\", mode=\"similarity\")\n",
145
- "# embeds = OpenAIEmbedding(model=\"text-embedding-3-large\", mode=\"similarity\")\n",
146
- "embeds = OpenAIEmbedding(model=\"text-embedding-3-large\", mode=\"text_search\")\n",
147
- "# embeds = OpenAIEmbedding(model=\"text-embedding-ada-002\", mode=\"similarity\")\n",
148
- "\n",
149
- "# Build index / generate embeddings using OpenAI.\n",
150
- "index = VectorStoreIndex(nodes=nodes, show_progress=True, use_async=True, storage_context=storage_context, embed_model=embeds, insert_batch_size=3000,)"
151
- ]
152
- },
153
- {
154
- "cell_type": "code",
155
- "execution_count": null,
156
- "metadata": {},
157
- "outputs": [],
158
- "source": [
159
- "from llama_index.llms.openai import OpenAI\n",
160
- "\n",
161
- "llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=None)\n",
162
- "query_engine = index.as_query_engine(llm=llm, similarity_top_k=5, embed_model=embeds)"
163
- ]
164
- },
165
- {
166
- "cell_type": "code",
167
- "execution_count": null,
168
- "metadata": {},
169
- "outputs": [],
170
- "source": [
171
- "res = query_engine.query(\"What is the LLama model?\")"
172
- ]
173
- },
174
- {
175
- "cell_type": "code",
176
- "execution_count": null,
177
- "metadata": {},
178
- "outputs": [],
179
- "source": [
180
- "res.response"
181
- ]
182
- },
183
- {
184
- "cell_type": "code",
185
- "execution_count": null,
186
- "metadata": {},
187
- "outputs": [],
188
- "source": [
189
- "for src in res.source_nodes:\n",
190
- " print(\"Node ID\\t\", src.node_id)\n",
191
- " print(\"Title\\t\", src.metadata['title'])\n",
192
- " print(\"Text\\t\", src.text)\n",
193
- " print(\"Score\\t\", src.score)\n",
194
- " print(\"Metadata\\t\", src.metadata) \n",
195
- " print(\"-_\"*20)"
196
- ]
197
- },
198
- {
199
- "cell_type": "markdown",
200
- "metadata": {},
201
- "source": [
202
- "# Load DB from disk"
203
- ]
204
- },
205
- {
206
- "cell_type": "code",
207
- "execution_count": null,
208
- "metadata": {},
209
- "outputs": [],
210
- "source": [
211
- "import chromadb\n",
212
- "from llama_index.vector_stores.chroma import ChromaVectorStore\n",
213
- "# Create your index\n",
214
- "db2 = chromadb.PersistentClient(path=\"./ai-tutor-db\")\n",
215
- "chroma_collection = db2.get_or_create_collection(\"ai-tutor-db\")\n",
216
- "vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
217
- ]
218
- },
219
- {
220
- "cell_type": "code",
221
- "execution_count": null,
222
- "metadata": {},
223
- "outputs": [],
224
- "source": [
225
- "# Create your index\n",
226
- "from llama_index.core import VectorStoreIndex\n",
227
- "index = VectorStoreIndex.from_vector_store(vector_store=vector_store)"
228
- ]
229
- },
230
- {
231
- "cell_type": "code",
232
- "execution_count": null,
233
- "metadata": {},
234
- "outputs": [],
235
- "source": [
236
- "from llama_index.embeddings.openai import OpenAIEmbedding\n",
237
- "from llama_index.llms.openai import OpenAI\n",
238
- "from llama_index.core.vector_stores import (\n",
239
- " ExactMatchFilter,\n",
240
- " MetadataFilters,\n",
241
- " MetadataFilter,\n",
242
- " FilterOperator,\n",
243
- " FilterCondition,\n",
244
- ")\n",
245
- "\n",
246
- "\n",
247
- "filters = MetadataFilters(\n",
248
- " filters=[\n",
249
- " MetadataFilter(key=\"source\", value=\"lanchain_course\"),\n",
250
- " MetadataFilter(key=\"source\", value=\"langchain_docs\"),\n",
251
- " ],\n",
252
- " condition=FilterCondition.OR,\n",
253
- ")\n",
254
- "\n",
255
- "llm = OpenAI(temperature=0, model=\"gpt-3.5-turbo-0125\", max_tokens=None)\n",
256
- "embeds = OpenAIEmbedding(model=\"text-embedding-3-large\", mode=\"text_search\")\n",
257
- "# query_engine = index.as_query_engine(\n",
258
- "# llm=llm, similarity_top_k=5, embed_model=embeds, verbose=True, streaming=True, filters=filters\n",
259
- "# )\n",
260
- "query_engine = index.as_query_engine(\n",
261
- " llm=llm, similarity_top_k=5, embed_model=embeds, verbose=True,\n",
262
- ")"
263
- ]
264
- },
265
- {
266
- "cell_type": "code",
267
- "execution_count": null,
268
- "metadata": {},
269
- "outputs": [],
270
- "source": [
271
- "res = query_engine.query(\"What is the LLama model?\")\n",
272
- "\n",
273
- "# history = \"\" \n",
274
- "# for token in res.response_gen:\n",
275
- "# history += token\n",
276
- "# print(history)"
277
- ]
278
- },
279
- {
280
- "cell_type": "code",
281
- "execution_count": null,
282
- "metadata": {},
283
- "outputs": [],
284
- "source": [
285
- "res.response"
286
- ]
287
- },
288
- {
289
- "cell_type": "code",
290
- "execution_count": null,
291
- "metadata": {},
292
- "outputs": [],
293
- "source": [
294
- "for src in res.source_nodes:\n",
295
- " print(\"Node ID\\t\", src.node_id)\n",
296
- " print(\"Source\\t\", src.metadata['source'])\n",
297
- " print(\"Title\\t\", src.metadata['title'])\n",
298
- " print(\"Text\\t\", src.text)\n",
299
- " print(\"Score\\t\", src.score)\n",
300
- " print(\"-_\"*20)"
301
- ]
302
- },
303
- {
304
- "cell_type": "code",
305
- "execution_count": null,
306
- "metadata": {},
307
- "outputs": [],
308
- "source": [
309
- "from IPython.display import Markdown, display\n",
310
- "# define prompt viewing function\n",
311
- "def display_prompt_dict(prompts_dict):\n",
312
- " for k, p in prompts_dict.items():\n",
313
- " text_md = f\"**Prompt Key**: {k}<br>\" f\"**Text:** <br>\"\n",
314
- " display(Markdown(text_md))\n",
315
- " print(p.get_template())\n",
316
- " display(Markdown(\"<br><br>\"))"
317
- ]
318
- },
319
- {
320
- "cell_type": "code",
321
- "execution_count": null,
322
- "metadata": {},
323
- "outputs": [],
324
- "source": [
325
- "prompts_dict = query_engine.get_prompts()"
326
- ]
327
- },
328
- {
329
- "cell_type": "code",
330
- "execution_count": null,
331
- "metadata": {},
332
- "outputs": [],
333
- "source": [
334
- "display_prompt_dict(prompts_dict)"
335
- ]
336
- },
337
- {
338
- "cell_type": "code",
339
- "execution_count": null,
340
- "metadata": {},
341
- "outputs": [],
342
- "source": []
343
- },
344
- {
345
- "cell_type": "code",
346
- "execution_count": null,
347
- "metadata": {},
348
- "outputs": [],
349
- "source": []
350
- },
351
- {
352
- "cell_type": "code",
353
- "execution_count": null,
354
- "metadata": {},
355
- "outputs": [],
356
- "source": []
357
- }
358
- ],
359
- "metadata": {
360
- "kernelspec": {
361
- "display_name": "env",
362
- "language": "python",
363
- "name": "python3"
364
- },
365
- "language_info": {
366
- "codemirror_mode": {
367
- "name": "ipython",
368
- "version": 3
369
- },
370
- "file_extension": ".py",
371
- "mimetype": "text/x-python",
372
- "name": "python",
373
- "nbconvert_exporter": "python",
374
- "pygments_lexer": "ipython3",
375
- "version": "3.11.8"
376
- }
377
- },
378
- "nbformat": 4,
379
- "nbformat_minor": 2
380
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/gradio-ui.py DELETED
@@ -1,295 +0,0 @@
1
- import os
2
- import logging
3
- from typing import Optional
4
- from datetime import datetime
5
-
6
- import chromadb
7
- from llama_index.core.tools import QueryEngineTool, FunctionTool, ToolMetadata
8
- from llama_index.agent.openai import OpenAIAgent
9
- from llama_index.vector_stores.chroma import ChromaVectorStore
10
- from llama_index.core import VectorStoreIndex
11
- from llama_index.embeddings.openai import OpenAIEmbedding
12
- from llama_index.llms.openai import OpenAI
13
- from llama_index.core.vector_stores import (
14
- MetadataFilters,
15
- MetadataFilter,
16
- FilterCondition,
17
- )
18
- import gradio as gr
19
- from gradio.themes.utils import (
20
- fonts,
21
- )
22
-
23
- from utils import init_mongo_db
24
- from tutor_prompts import (
25
- TEXT_QA_TEMPLATE,
26
- QueryValidation,
27
- system_message_validation,
28
- system_message_openai_agent,
29
- )
30
- from call_openai import api_function_call
31
-
32
- logger = logging.getLogger(__name__)
33
- logging.basicConfig(level=logging.INFO)
34
- logging.getLogger("httpx").setLevel(logging.WARNING)
35
-
36
- # # This variables are used to intercept API calls
37
- # # launch mitmweb
38
- # cert_file = "/Users/omar/Downloads/mitmproxy-ca-cert.pem"
39
- # os.environ["REQUESTS_CA_BUNDLE"] = cert_file
40
- # os.environ["SSL_CERT_FILE"] = cert_file
41
- # os.environ["HTTPS_PROXY"] = "http://127.0.0.1:8080"
42
-
43
- CONCURRENCY_COUNT = int(os.getenv("CONCURRENCY_COUNT", 64))
44
- MONGODB_URI = os.getenv("MONGODB_URI")
45
-
46
- AVAILABLE_SOURCES_UI = [
47
- "Gen AI 360: LLMs",
48
- "Gen AI 360: LangChain",
49
- "Gen AI 360: Advanced RAG",
50
- "Towards AI Blog",
51
- "Activeloop Docs",
52
- "HF Transformers Docs",
53
- "Wikipedia",
54
- "OpenAI Docs",
55
- "LangChain Docs",
56
- ]
57
-
58
- AVAILABLE_SOURCES = [
59
- "llm_course",
60
- "langchain_course",
61
- "advanced_rag_course",
62
- "towards_ai",
63
- "activeloop",
64
- "hf_transformers",
65
- "wikipedia",
66
- "openai",
67
- "langchain_docs",
68
- ]
69
-
70
- # Initialize MongoDB
71
- mongo_db = (
72
- init_mongo_db(uri=MONGODB_URI, db_name="towardsai-buster")
73
- if MONGODB_URI
74
- else logger.warning("No mongodb uri found, you will not be able to save data.")
75
- )
76
-
77
- # Initialize vector store and index
78
- db2 = chromadb.PersistentClient(path="scripts/ai-tutor-db")
79
- chroma_collection = db2.get_or_create_collection("ai-tutor-db")
80
- vector_store = ChromaVectorStore(chroma_collection=chroma_collection)
81
- index = VectorStoreIndex.from_vector_store(vector_store=vector_store)
82
-
83
- # Initialize OpenAI models
84
- llm = OpenAI(temperature=0, model="gpt-3.5-turbo-0125", max_tokens=None)
85
- # embeds = OpenAIEmbedding(model="text-embedding-3-large", mode="text_search")
86
- embeds = OpenAIEmbedding(model="text-embedding-3-large", mode="similarity")
87
-
88
- query_engine = index.as_query_engine(
89
- llm=llm,
90
- similarity_top_k=5,
91
- embed_model=embeds,
92
- streaming=True,
93
- text_qa_template=TEXT_QA_TEMPLATE,
94
- )
95
-
96
- query_engine_tools = [
97
- QueryEngineTool(
98
- query_engine=query_engine,
99
- metadata=ToolMetadata(
100
- name="AI_information",
101
- description="""The 'AI_information' tool serves as a comprehensive repository for insights into the field of artificial intelligence. When utilizing this tool, the input should be the user's complete question. The input can also be adapted to focus on specific aspects or further details of the current topic under discussion. This dynamic input approach allows for a tailored exploration of AI subjects, ensuring that responses are relevant and informative. Employ this tool to fetch nuanced information on topics such as model training, fine-tuning, LLM augmentation, and more, thereby facilitating a rich, context-aware dialogue.""",
102
- ),
103
- )
104
- ]
105
-
106
-
107
- def initialize_agent():
108
- agent = OpenAIAgent.from_tools(
109
- query_engine_tools,
110
- llm=llm,
111
- verbose=True,
112
- system_prompt=system_message_openai_agent,
113
- )
114
- return agent
115
-
116
-
117
- def reset_agent(agent_state):
118
- agent_state = initialize_agent() # Reset the agent by reassigning a new instance
119
- chatbot = [[None, None]]
120
- return "Agent has been reset.", chatbot
121
-
122
-
123
- def log_emails(email: gr.Textbox):
124
- collection = "email_data-test"
125
-
126
- logger.info(f"User reported {email=}")
127
- email_document = {"email": email}
128
-
129
- try:
130
- mongo_db[collection].insert_one(email_document)
131
- logger.info("")
132
- except:
133
- logger.info("Something went wrong logging")
134
-
135
- return ""
136
-
137
-
138
- def format_sources(completion) -> str:
139
- if len(completion.source_nodes) == 0:
140
- return ""
141
-
142
- # Mapping of source system names to user-friendly names
143
- display_source_to_ui = {
144
- src: ui for src, ui in zip(AVAILABLE_SOURCES, AVAILABLE_SOURCES_UI)
145
- }
146
-
147
- documents_answer_template: str = (
148
- "πŸ“ Here are the sources I used to answer your question:\n\n{documents}"
149
- )
150
- document_template: str = "[πŸ”— {source}: {title}]({url}), relevance: {score:2.2f}"
151
-
152
- documents = "\n".join(
153
- [
154
- document_template.format(
155
- title=src.metadata["title"],
156
- score=src.score,
157
- source=display_source_to_ui.get(
158
- src.metadata["source"], src.metadata["source"]
159
- ),
160
- url=src.metadata["url"],
161
- )
162
- for src in completion.source_nodes
163
- ]
164
- )
165
-
166
- return documents_answer_template.format(documents=documents)
167
-
168
-
169
- def add_sources(history, completion):
170
- if completion is None:
171
- yield history
172
-
173
- formatted_sources = format_sources(completion)
174
- if formatted_sources == "":
175
- yield history
176
-
177
- history[-1][1] += "\n\n" + formatted_sources
178
- yield history
179
-
180
-
181
- def user(user_input, history, agent_state):
182
- agent = agent_state
183
- return "", history + [[user_input, None]]
184
-
185
-
186
- def get_answer(history, agent_state):
187
- user_input = history[-1][0]
188
- history[-1][1] = ""
189
-
190
- completion = agent_state.stream_chat(user_input)
191
-
192
- for token in completion.response_gen:
193
- history[-1][1] += token
194
- yield history, completion
195
-
196
- logger.info(f"completion: {history[-1][1]=}")
197
-
198
-
199
- example_questions = [
200
- "What is the LLama model?",
201
- "What is a Large Language Model?",
202
- "What is an embedding?",
203
- ]
204
-
205
- theme = gr.themes.Soft()
206
- with gr.Blocks(
207
- theme=gr.themes.Soft(
208
- primary_hue="blue",
209
- secondary_hue="blue",
210
- font=fonts.GoogleFont("Source Sans Pro"),
211
- font_mono=fonts.GoogleFont("IBM Plex Mono"),
212
- ),
213
- fill_height=True,
214
- ) as demo:
215
-
216
- agent_state = gr.State(initialize_agent())
217
-
218
- with gr.Row():
219
- gr.HTML(
220
- "<h3><center>Towards AI πŸ€–: A Question-Answering Bot for anything AI-related</center></h3>"
221
- )
222
-
223
- chatbot = gr.Chatbot(
224
- elem_id="chatbot",
225
- show_copy_button=True,
226
- scale=2,
227
- likeable=True,
228
- show_label=False,
229
- )
230
-
231
- with gr.Row():
232
- question = gr.Textbox(
233
- label="What's your question?",
234
- placeholder="Ask a question to the AI tutor here...",
235
- lines=1,
236
- scale=7,
237
- show_label=False,
238
- )
239
- submit = gr.Button(value="Send", variant="primary", scale=1)
240
- reset_button = gr.Button("Reset Chat", variant="secondary", scale=1)
241
-
242
- with gr.Row():
243
- examples = gr.Examples(
244
- examples=example_questions,
245
- inputs=question,
246
- )
247
- with gr.Row():
248
- email = gr.Textbox(
249
- label="Want to receive updates about our AI tutor?",
250
- placeholder="Enter your email here...",
251
- lines=1,
252
- scale=6,
253
- )
254
- submit_email = gr.Button(value="Submit", variant="secondary", scale=1)
255
-
256
- gr.Markdown(
257
- "This application uses GPT3.5-Turbo to search the docs for relevant information and answer questions."
258
- )
259
-
260
- completion = gr.State()
261
-
262
- submit.click(
263
- user, [question, chatbot, agent_state], [question, chatbot], queue=False
264
- ).then(
265
- get_answer,
266
- inputs=[chatbot, agent_state],
267
- outputs=[chatbot, completion],
268
- ).then(
269
- add_sources, inputs=[chatbot, completion], outputs=[chatbot]
270
- )
271
- # .then(
272
- # save_completion, inputs=[completion, chatbot]
273
- # )
274
-
275
- question.submit(
276
- user, [question, chatbot, agent_state], [question, chatbot], queue=False
277
- ).then(
278
- get_answer,
279
- inputs=[chatbot, agent_state],
280
- outputs=[chatbot, completion],
281
- ).then(
282
- add_sources, inputs=[chatbot, completion], outputs=[chatbot]
283
- )
284
- # .then(
285
- # save_completion, inputs=[completion, chatbot]
286
- # )
287
-
288
- reset_button.click(
289
- reset_agent, inputs=[agent_state], outputs=[agent_state, chatbot]
290
- )
291
- submit_email.click(log_emails, email, email)
292
- email.submit(log_emails, email, email)
293
-
294
- demo.queue(default_concurrency_limit=CONCURRENCY_COUNT)
295
- demo.launch(debug=False, share=False)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/tutor_prompts.py DELETED
@@ -1,100 +0,0 @@
1
- from llama_index.core.llms import ChatMessage, MessageRole
2
- from llama_index.core import ChatPromptTemplate
3
- from pydantic import BaseModel, Field
4
-
5
- default_user_prompt = (
6
- "Context information is below.\n"
7
- "---------------------\n"
8
- "{context_str}\n"
9
- "---------------------\n"
10
- "Given the context information and not prior knowledge, "
11
- "answer the question: {query_str}\n"
12
- )
13
-
14
- system_prompt = (
15
- "You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine tuning models, giving memory to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, Langchain, making LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context."
16
- "You are provided information found in the json documentation. "
17
- "Only respond with information inside the json documentation. DO NOT use additional information, even if you know the answer. "
18
- "If the answer is in the documentation, answer the question (depending on the questions and the variety of relevant information in the json documentation, answer in 5 paragraphs."
19
- "If the documentation does not discuss the topic related to the question, kindly respond that you cannot answer the question because it is not part of your knowledge. "
20
- "Here is the information you can use in order: \n"
21
- "---------------------\n"
22
- "{context_str}\n"
23
- "---------------------\n"
24
- "REMEMBER:\n"
25
- "You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine tuning models, giving memory to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, Langchain, making LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context."
26
- "You are provided information found in the json documentation. "
27
- "Here are the rules you must follow:\n"
28
- "* Only respond with information inside the json documentation. DO NOT provide additional information, even if you know the answer. "
29
- "* If the answer is in the documentation, answer the question (depending on the questions and the variety of relevant information in the json documentation. Your answer needs to be pertinent and not redundant giving a clear explanation as if you were a teacher. "
30
- "* If the documentation does not discuss the topic related to the question, kindly respond that you cannot answer the question because it is not part of your knowledge. "
31
- "* Only use information summarized from the json documentation, do not respond otherwise. "
32
- "* Do not refer to the json documentation directly, but use the instructions provided within it to answer questions. "
33
- "* Do not reference any links, urls or hyperlinks in your answers.\n"
34
- "* Make sure to format your answers in Markdown format, including code block and snippets.\n"
35
- "* If you do not know the answer to a question, or if it is completely irrelevant to the AI courses, simply reply with:\n"
36
- "'I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?'"
37
- "For example:\n"
38
- "What is the meaning of life for a qa bot?\n"
39
- "I'm sorry, but I couldn't find the information that answers you question. Is there anything else I can assist you with?"
40
- "Now answer the following question: \n"
41
- )
42
-
43
- chat_text_qa_msgs: list[ChatMessage] = [
44
- ChatMessage(role=MessageRole.SYSTEM, content=system_prompt),
45
- ChatMessage(
46
- role=MessageRole.USER,
47
- content="{query_str}",
48
- ),
49
- ]
50
-
51
- TEXT_QA_TEMPLATE = ChatPromptTemplate(chat_text_qa_msgs)
52
-
53
-
54
- system_message_validation = """- You are a witty AI teacher, helpfully answering questions from students studying the field of applied artificial intelligence.
55
- - Your job is to determine whether user's question is valid or not. Users will not always submit a question either.
56
- - Users will ask all sorts of questions, and some might be tangentially related to artificial intelligence (AI), machine learning (ML), natural language processing (NLP), computer vision (CV) or generative AI.
57
- - Users can ask how to build LLM-powered apps, with LangChain, LlamaIndex, Deep Lake, Chroma DB among other technologies including OpenAI, RAG and more.
58
- - As long as a question is somewhat related to the topic of AI, ML, NLP, RAG, data and techniques used in AI like vector embeddings, memories, embeddings, tokenization, encoding, databases, RAG (Retrieval-Augmented Generation), Langchain, LlamaIndex, LLMs (Large Language Models), Preprocessing techniques, Document loading, Chunking, Indexing of document segments, Embedding models, Chains, Memory modules, Vector stores, Chat models, Sequential chains, Information Retrieval, Data connectors, LlamaHub, Node objects, Query engines, Fine-tuning, Activeloop’s Deep Memory, Prompt engineering, Synthetic training dataset, Inference, Recall rates, Query construction, Query expansion, Query transformation, Re-ranking, Cohere Reranker, Recursive retrieval, Small-to-big retrieval, Hybrid searches, Hit Rate, Mean Reciprocal Rank (MRR), GPT-4, Agents, OpenGPTs, Zero-shot ReAct, Conversational Agent, OpenAI Assistants API, Hugging Face Inference API, Code Interpreter, Knowledge Retrieval, Function Calling, Whisper, Dall-E 3, GPT-4 Vision, Unstructured, Deep Lake, FaithfulnessEvaluator, RAGAS, LangSmith, LangChain Hub, LangServe, REST API, respond 'true'. If a question is on a different subject or unrelated, respond 'false'.
59
- - Make sure the question is a valid question.
60
-
61
- Here is a list of acronyms and concepts related to Artificial Intelligence AI that are valid. The following terms can be Uppercase or Lowercase:
62
- You are case insensitive.
63
- 'TQL', 'Deep Memory', 'LLM', 'Llama', 'llamaindex', 'llama-index', 'lang chain', 'langchain', 'llama index', 'GPT', 'NLP', 'RLHF', 'RLAIF', 'Mistral', 'SFT', 'Cohere', 'NanoGPT', 'ReAct', 'LoRA', 'QLoRA', 'LMMOps', 'Alpaca', 'Flan', 'Weights and Biases', 'W&B', 'IDEFICS', 'Flamingo', 'LLaVA', 'BLIP', 'Falcon', 'Gemini'
64
-
65
- """
66
-
67
-
68
- class QueryValidation(BaseModel):
69
- """
70
- Validate the user query. Use the guidelines given to you.
71
- """
72
-
73
- user_query: str = Field(
74
- description="The user query to validate.",
75
- )
76
- chain_of_thought: str = Field(
77
- description="Is the user query valid given the above guidelines? Think step-by-step. Write down your reasoning here.",
78
- )
79
- is_valid: bool = Field(
80
- description="Based on the previous reasoning, answer with True if the query is related to AI. Answer False otherwise.",
81
- )
82
- reason: str = Field(
83
- description="Explain why the query was valid or not. What are the keywords that make it valid or invalid?",
84
- )
85
-
86
-
87
- system_message_openai_agent = """You are a witty AI teacher, adeptly responding to students' inquiries within the realm of applied artificial intelligence. The scope encompasses training models, fine-tuning models, augmenting LLMs with memory, crafting effective prompts, addressing hallucinations and biases, exploring vector databases, understanding transformer architectures, utilizing embeddings, discovering Langchain, integrating tool use in LLMs, deploying AI agents, and employing reinforcement learning with human feedback. To navigate these discussions:
88
-
89
- Utilize the AI_information tool to gather insights pertinent to the field of AI. This function accepts a string (the complete user question) and returns informative content regarding the domain of AI.
90
-
91
- AI_information: A tool for acquiring knowledge about AI. Directly forward the user's question or a refined version focusing on the current discussion topic to this tool.
92
-
93
- Your responses are exclusively based on the output provided by the AI_information tool. Refrain from incorporating external knowledge or information not directly obtained from the tool's responses.
94
-
95
- When the conversation deepens or shifts focus within a topic, adapt your inquiries to the AI_information tool to reflect these nuances. This means if a user requests further elaboration on a specific aspect of a previously discussed topic, you should reformulate your input to the tool to capture this new angle or more profound layer of inquiry.
96
-
97
- Provide comprehensive answers, ideally structured in up to ten paragraphs, drawing from the variety of relevant details furnished by the tool. The depth and breadth of your responses should align with the scope and specificity of the information retrieved.
98
-
99
- Should the AI_information tool's repository lack information on the queried topic, politely inform the user that the question transcends the bounds of your current knowledge base, citing the absence of relevant content in the tool's documentation.
100
- """
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
scripts/utils.py DELETED
@@ -1,16 +0,0 @@
1
- from pymongo.mongo_client import MongoClient
2
- from pymongo.server_api import ServerApi
3
-
4
-
5
- def init_mongo_db(uri: str, db_name: str):
6
- """Initialize the mongodb database."""
7
-
8
- try:
9
- assert uri is not None, "No URI passed"
10
- client = MongoClient(uri, server_api=ServerApi("1"))
11
- database = client[db_name]
12
- print("Connected to MongoDB")
13
- return database
14
- except Exception as e:
15
- print("Something went wrong connecting to mongodb")
16
- return