worked on lesson How to deploy an open source LLM
Browse files- notebooks/15-Use_OpenSource_Models.ipynb +256 -535
notebooks/15-Use_OpenSource_Models.ipynb
CHANGED
@@ -21,18 +21,18 @@
|
|
21 |
},
|
22 |
{
|
23 |
"cell_type": "code",
|
24 |
-
"execution_count":
|
25 |
"metadata": {
|
26 |
"id": "QPJzr-I9XQ7l"
|
27 |
},
|
28 |
"outputs": [],
|
29 |
"source": [
|
30 |
-
"!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-llms-
|
31 |
]
|
32 |
},
|
33 |
{
|
34 |
"cell_type": "code",
|
35 |
-
"execution_count":
|
36 |
"metadata": {
|
37 |
"id": "riuXwpSPcvWC"
|
38 |
},
|
@@ -40,121 +40,44 @@
|
|
40 |
"source": [
|
41 |
"import os\n",
|
42 |
"\n",
|
43 |
-
"# Set
|
44 |
-
"
|
45 |
-
"os.environ[\"
|
46 |
-
"os.environ[\"
|
47 |
-
]
|
48 |
-
},
|
49 |
-
{
|
50 |
-
"cell_type": "code",
|
51 |
-
"execution_count": 2,
|
52 |
-
"metadata": {
|
53 |
-
"id": "jIEeZzqLbz0J"
|
54 |
-
},
|
55 |
-
"outputs": [],
|
56 |
-
"source": [
|
57 |
-
"# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
|
58 |
"\n",
|
|
|
59 |
"import nest_asyncio\n",
|
60 |
"\n",
|
61 |
"nest_asyncio.apply()"
|
62 |
]
|
63 |
},
|
64 |
-
{
|
65 |
-
"cell_type": "markdown",
|
66 |
-
"metadata": {
|
67 |
-
"id": "Bkgi2OrYzF7q"
|
68 |
-
},
|
69 |
-
"source": [
|
70 |
-
"# Load a Model"
|
71 |
-
]
|
72 |
-
},
|
73 |
-
{
|
74 |
-
"cell_type": "code",
|
75 |
-
"execution_count": 3,
|
76 |
-
"metadata": {
|
77 |
-
"id": "A1yVgic9DeJ6"
|
78 |
-
},
|
79 |
-
"outputs": [
|
80 |
-
{
|
81 |
-
"name": "stderr",
|
82 |
-
"output_type": "stream",
|
83 |
-
"text": [
|
84 |
-
"/Users/louis/Documents/GitHub/ai-tutor-rag-system/.conda/lib/python3.11/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
|
85 |
-
" from .autonotebook import tqdm as notebook_tqdm\n"
|
86 |
-
]
|
87 |
-
}
|
88 |
-
],
|
89 |
-
"source": [
|
90 |
-
"from llama_index.core.prompts import PromptTemplate\n",
|
91 |
-
"from llama_index.llms.replicate import Replicate\n",
|
92 |
-
"\n",
|
93 |
-
"# Use the repicate service to access the LLaMA2-70B chat model\n",
|
94 |
-
"llm = Replicate(\n",
|
95 |
-
" model=\"meta/llama-2-70b-chat:2796ee9483c3fd7aa2e171d38f4ca12251a30609463dcfd4cd76703f22e96cdf\",\n",
|
96 |
-
" is_chat_model=True,\n",
|
97 |
-
" additional_kwargs={\"max_new_tokens\": 512}\n",
|
98 |
-
")\n"
|
99 |
-
]
|
100 |
-
},
|
101 |
{
|
102 |
"cell_type": "markdown",
|
103 |
"metadata": {
|
104 |
"id": "0BwVuJXlzHVL"
|
105 |
},
|
106 |
"source": [
|
107 |
-
"# Create a
|
108 |
]
|
109 |
},
|
110 |
{
|
111 |
"cell_type": "code",
|
112 |
-
"execution_count":
|
113 |
"metadata": {
|
114 |
"id": "SQP87lHczHKc"
|
115 |
},
|
116 |
"outputs": [],
|
117 |
"source": [
|
118 |
"import chromadb\n",
|
119 |
-
"\n",
|
120 |
-
"# create client and a new collection\n",
|
121 |
-
"# chromadb.EphemeralClient saves data in-memory.\n",
|
122 |
-
"chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
|
123 |
-
"chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
|
124 |
-
]
|
125 |
-
},
|
126 |
-
{
|
127 |
-
"cell_type": "code",
|
128 |
-
"execution_count": 11,
|
129 |
-
"metadata": {
|
130 |
-
"id": "zAaGcYMJzHAN"
|
131 |
-
},
|
132 |
-
"outputs": [],
|
133 |
-
"source": [
|
134 |
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
|
135 |
"\n",
|
136 |
-
"#
|
|
|
|
|
|
|
137 |
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
|
138 |
]
|
139 |
},
|
140 |
-
{
|
141 |
-
"cell_type": "markdown",
|
142 |
-
"metadata": {
|
143 |
-
"id": "I9JbAzFcjkpn"
|
144 |
-
},
|
145 |
-
"source": [
|
146 |
-
"# Load the Dataset (CSV)"
|
147 |
-
]
|
148 |
-
},
|
149 |
-
{
|
150 |
-
"cell_type": "markdown",
|
151 |
-
"metadata": {
|
152 |
-
"id": "ceveDuYdWCYk"
|
153 |
-
},
|
154 |
-
"source": [
|
155 |
-
"## Download"
|
156 |
-
]
|
157 |
-
},
|
158 |
{
|
159 |
"cell_type": "markdown",
|
160 |
"metadata": {
|
@@ -166,7 +89,7 @@
|
|
166 |
},
|
167 |
{
|
168 |
"cell_type": "code",
|
169 |
-
"execution_count":
|
170 |
"metadata": {
|
171 |
"colab": {
|
172 |
"base_uri": "https://localhost:8080/"
|
@@ -174,17 +97,7 @@
|
|
174 |
"id": "wl_pbPvMlv1h",
|
175 |
"outputId": "244bb539-f023-46f9-bc25-a05d0e526c14"
|
176 |
},
|
177 |
-
"outputs": [
|
178 |
-
{
|
179 |
-
"name": "stdout",
|
180 |
-
"output_type": "stream",
|
181 |
-
"text": [
|
182 |
-
" % Total % Received % Xferd Average Speed Time Time Time Current\n",
|
183 |
-
" Dload Upload Total Spent Left Speed\n",
|
184 |
-
"100 169k 100 169k 0 0 921k 0 --:--:-- --:--:-- --:--:-- 926k\n"
|
185 |
-
]
|
186 |
-
}
|
187 |
-
],
|
188 |
"source": [
|
189 |
"!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
|
190 |
]
|
@@ -195,12 +108,12 @@
|
|
195 |
"id": "VWBLtDbUWJfA"
|
196 |
},
|
197 |
"source": [
|
198 |
-
"## Read
|
199 |
]
|
200 |
},
|
201 |
{
|
202 |
"cell_type": "code",
|
203 |
-
"execution_count":
|
204 |
"metadata": {
|
205 |
"colab": {
|
206 |
"base_uri": "https://localhost:8080/"
|
@@ -208,18 +121,7 @@
|
|
208 |
"id": "0Q9sxuW0g3Gd",
|
209 |
"outputId": "8b475ed3-23b1-43e1-c8fc-027fc26455c3"
|
210 |
},
|
211 |
-
"outputs": [
|
212 |
-
{
|
213 |
-
"data": {
|
214 |
-
"text/plain": [
|
215 |
-
"14"
|
216 |
-
]
|
217 |
-
},
|
218 |
-
"execution_count": 6,
|
219 |
-
"metadata": {},
|
220 |
-
"output_type": "execute_result"
|
221 |
-
}
|
222 |
-
],
|
223 |
"source": [
|
224 |
"import csv\n",
|
225 |
"\n",
|
@@ -231,10 +133,7 @@
|
|
231 |
"\n",
|
232 |
" for idx, row in enumerate( csv_reader ):\n",
|
233 |
" if idx == 0: continue; # Skip header row\n",
|
234 |
-
" rows.append(
|
235 |
-
"\n",
|
236 |
-
"# The number of characters in the dataset.\n",
|
237 |
-
"len( rows )"
|
238 |
]
|
239 |
},
|
240 |
{
|
@@ -243,168 +142,30 @@
|
|
243 |
"id": "S17g2RYOjmf2"
|
244 |
},
|
245 |
"source": [
|
246 |
-
"
|
247 |
]
|
248 |
},
|
249 |
{
|
250 |
"cell_type": "code",
|
251 |
-
"execution_count":
|
252 |
"metadata": {
|
253 |
"id": "YizvmXPejkJE"
|
254 |
},
|
255 |
"outputs": [],
|
256 |
"source": [
|
257 |
"from llama_index.core import Document\n",
|
|
|
|
|
|
|
258 |
"\n",
|
259 |
"# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
|
260 |
-
"documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
|
261 |
-
]
|
262 |
-
},
|
263 |
-
{
|
264 |
-
"cell_type": "markdown",
|
265 |
-
"metadata": {
|
266 |
-
"id": "qjuLbmFuWsyl"
|
267 |
-
},
|
268 |
-
"source": [
|
269 |
-
"# Transforming"
|
270 |
-
]
|
271 |
-
},
|
272 |
-
{
|
273 |
-
"cell_type": "code",
|
274 |
-
"execution_count": 8,
|
275 |
-
"metadata": {
|
276 |
-
"id": "9z3t70DGWsjO"
|
277 |
-
},
|
278 |
-
"outputs": [],
|
279 |
-
"source": [
|
280 |
-
"from llama_index.core.text_splitter import TokenTextSplitter\n",
|
281 |
"\n",
|
282 |
"# Define the splitter object that split the text into segments with 512 tokens,\n",
|
283 |
"# with a 128 overlap between the segments.\n",
|
284 |
"text_splitter = TokenTextSplitter(\n",
|
285 |
" separator=\" \", chunk_size=512, chunk_overlap=128\n",
|
286 |
-
")"
|
287 |
-
]
|
288 |
-
},
|
289 |
-
{
|
290 |
-
"cell_type": "code",
|
291 |
-
"execution_count": 12,
|
292 |
-
"metadata": {
|
293 |
-
"colab": {
|
294 |
-
"base_uri": "https://localhost:8080/",
|
295 |
-
"height": 650,
|
296 |
-
"referenced_widgets": [
|
297 |
-
"2711e1220eac4e4e8ff6e5fae93c5a1a",
|
298 |
-
"073ca56c113f4dad93b73d9fcd350a66",
|
299 |
-
"9bc95d130d7347548e112c59a113e169",
|
300 |
-
"9fdbd1ce1076410d8265699ff13df861",
|
301 |
-
"c1e0d83bdbb0484983901d6db36dc112",
|
302 |
-
"d237f91523d242f6a11ac294e0832fa2",
|
303 |
-
"c53e80972993487e94fd56cf34302f0b",
|
304 |
-
"2c47558aaa6c44adb2afdb5ec766d8dd",
|
305 |
-
"38efcc43969b40429cf99b03c9f7ccbc",
|
306 |
-
"e973a85fd8ce42c9949074c7747cf467",
|
307 |
-
"b68e7dbe575e4934bbbc67461d2ee167",
|
308 |
-
"a6d1480cebd044ee8ec996c26498a07d",
|
309 |
-
"23ac188881d1484aaf630309809bbc2b",
|
310 |
-
"f75e22947e4b4efcb10f7d157c9fe5d2",
|
311 |
-
"938bdfdc914e44fcb9ae942bb6b74496",
|
312 |
-
"fdc1f2cb889f43a88e0301b29b726657",
|
313 |
-
"d1271b18bd5f4f84aa450f8d58b17774",
|
314 |
-
"34e5dab56e354682adb687ffb19c695d",
|
315 |
-
"a2e452e75f964f96b84f22521f7533a5",
|
316 |
-
"644aeca8e37f4df294cec4a0425587f7",
|
317 |
-
"fe571f5475834c01a22255e68dc782e3",
|
318 |
-
"987018d8d0e34a58a993a836cb3300d4",
|
319 |
-
"9e79a1468ef6452899596ed496801394",
|
320 |
-
"387baf8595754e8e930f36426e9f6758",
|
321 |
-
"55650dcd92f14d5f8e05eda8295e4834",
|
322 |
-
"d4901be0f61c4cfb93fa8f05c0f5bd2f",
|
323 |
-
"3f77b9fcc78c41969ea0f7cffbab2ca9",
|
324 |
-
"2807299dd4f7402d897aae5bc7adefb6",
|
325 |
-
"a882c7b38aed4592944458efb288f025",
|
326 |
-
"846b99723d934015ade4d75987e92340",
|
327 |
-
"4111e05472284375bf6e591b83cdaab9",
|
328 |
-
"06de7570f46644ce89ebac09915d1df5",
|
329 |
-
"8688a4936b9f47c6a86288f6c56fe08c",
|
330 |
-
"ffd5823568564c05b5cd89b04132020a",
|
331 |
-
"5afa1845fd734f0d81c1833615ebcef2",
|
332 |
-
"f3f65dbeccec455cb169fb7b3b2f3748",
|
333 |
-
"99933154a95f4547811b56004ba96c99",
|
334 |
-
"c0a5b64331af4b5e89acc24905fecfc8",
|
335 |
-
"a1c85b0d1291481d984a7cc6009294d1",
|
336 |
-
"cc5115d155534a8b9187efeb3f18b917",
|
337 |
-
"ad4354f9a8134e7ca58571bf10bd0668",
|
338 |
-
"b5325917ecce4469be5f64936d88a9b9",
|
339 |
-
"3f9c588d74ee46ae96689c7112c43291",
|
340 |
-
"59052cd74dfc4da8aeb461ba43d6c1ce",
|
341 |
-
"36a5fc86bdee4f5a98922ae9abd687fd",
|
342 |
-
"830c2d0a44c245b9987ca5b9b3688300",
|
343 |
-
"f919db0110564db9a3f286dc622c48a6",
|
344 |
-
"2edec7086ca64edab9599fb64a73384a",
|
345 |
-
"b68df68efe8b43389d0880af68ebb6ef",
|
346 |
-
"9b7a963b1af749dc945d528ceff0487a",
|
347 |
-
"774b8be0d1cc4f1c9c3dcacdf0724fb7",
|
348 |
-
"b1855af26dcd4d1c8d84ec39144a10ea",
|
349 |
-
"65eb95ad32fd4e52a4367d23aebc7a9f",
|
350 |
-
"96f87f9f50374fb49fa2321f74c40522",
|
351 |
-
"edd7d9ce8a1942628d08ae84bd424ea2",
|
352 |
-
"777a1a034fe54c40a80176aa32de5bef",
|
353 |
-
"bc67387104744a3c8885b5d0a681977d",
|
354 |
-
"6f1fc70b6cf54f5fbe6dcb94828d412e",
|
355 |
-
"6b4ae5c8971f47f0b02a5417cc84e548",
|
356 |
-
"35523981fb7545b5bc70e32eaca6df61",
|
357 |
-
"5562e178fcbb4c3690c4555ca1a25649",
|
358 |
-
"77adc5c66da6481eba92996d3130e7e2",
|
359 |
-
"26bb673b58cd43a09004c15cd2ae0cc7",
|
360 |
-
"7656e8f0910244808ba32c9feb0d1cce",
|
361 |
-
"1c170007cc714f179e6761a26f648928",
|
362 |
-
"3abd1d02caf5489ba66907425f447651",
|
363 |
-
"6bd2c40c93e14c0bb93447af64799be9",
|
364 |
-
"b8111557128040b5a517ba77ebb0a244",
|
365 |
-
"e9ce05dd20304114b148076a5489fdc3",
|
366 |
-
"f35c6cab11eb4eebba14d2650e314daf",
|
367 |
-
"9f1c5dc44df3452d8637639861e5c978",
|
368 |
-
"3962d5f5f940453387e4e38c56c43555",
|
369 |
-
"6c227510710d4364b9983808b5b101f1",
|
370 |
-
"10043e32478b4400abb336d7d0fb6f18",
|
371 |
-
"19264bfe1e6048449dd4467afe582218",
|
372 |
-
"2086d0451a8b4fa2a395ff21e5d51d39",
|
373 |
-
"e53a03ea9bbb46928937065deec1ac08",
|
374 |
-
"73f6fa5f742d4341862f53da4263e5c5",
|
375 |
-
"6bc1d0ab84234e37b2fd34bd520215a9",
|
376 |
-
"679a9cc52b124d1b807d194dfc779e3c",
|
377 |
-
"98f05b81a1ba49baa3f7694f8d26a3ed",
|
378 |
-
"183d8bf73ede4872bab715efa0d011b4",
|
379 |
-
"80818af9902541bc80c1fa55436b6b91",
|
380 |
-
"8a1618e5df38497d92a2be9e3e982688",
|
381 |
-
"c067f30d6a14445dabd68393f736853b",
|
382 |
-
"e395cd15151743889282fff05ef628d4",
|
383 |
-
"0ae8682d08f74b8cbd9091c7d60096f8",
|
384 |
-
"a1f056b411d64699b93fd12da6e10162"
|
385 |
-
]
|
386 |
-
},
|
387 |
-
"id": "P9LDJ7o-Wsc-",
|
388 |
-
"outputId": "08a795a9-53e3-4a2b-89d2-c0a8912d66b9"
|
389 |
-
},
|
390 |
-
"outputs": [
|
391 |
-
{
|
392 |
-
"name": "stderr",
|
393 |
-
"output_type": "stream",
|
394 |
-
"text": [
|
395 |
-
"Parsing nodes: 100%|ββββββββββ| 14/14 [00:00<00:00, 27.58it/s]\n",
|
396 |
-
"Generating embeddings: 100%|ββββββββββ| 108/108 [00:05<00:00, 21.15it/s]\n"
|
397 |
-
]
|
398 |
-
}
|
399 |
-
],
|
400 |
-
"source": [
|
401 |
-
"from llama_index.core.extractors import (\n",
|
402 |
-
" SummaryExtractor,\n",
|
403 |
-
" QuestionsAnsweredExtractor,\n",
|
404 |
-
" KeywordExtractor,\n",
|
405 |
")\n",
|
406 |
-
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
407 |
-
"from llama_index.core.ingestion import IngestionPipeline\n",
|
408 |
"\n",
|
409 |
"# Create the pipeline to apply the transformation on each chunk,\n",
|
410 |
"# and store the transformed text in the chroma vector store.\n",
|
@@ -416,80 +177,7 @@
|
|
416 |
" vector_store=vector_store\n",
|
417 |
")\n",
|
418 |
"\n",
|
419 |
-
"nodes = pipeline.run(documents=documents, show_progress=True)
|
420 |
-
]
|
421 |
-
},
|
422 |
-
{
|
423 |
-
"cell_type": "code",
|
424 |
-
"execution_count": 13,
|
425 |
-
"metadata": {
|
426 |
-
"colab": {
|
427 |
-
"base_uri": "https://localhost:8080/"
|
428 |
-
},
|
429 |
-
"id": "mPGa85hM2P3P",
|
430 |
-
"outputId": "56c3980a-38a4-40e7-abdd-84ec1f26cb95"
|
431 |
-
},
|
432 |
-
"outputs": [
|
433 |
-
{
|
434 |
-
"data": {
|
435 |
-
"text/plain": [
|
436 |
-
"108"
|
437 |
-
]
|
438 |
-
},
|
439 |
-
"execution_count": 13,
|
440 |
-
"metadata": {},
|
441 |
-
"output_type": "execute_result"
|
442 |
-
}
|
443 |
-
],
|
444 |
-
"source": [
|
445 |
-
"len( nodes )"
|
446 |
-
]
|
447 |
-
},
|
448 |
-
{
|
449 |
-
"cell_type": "code",
|
450 |
-
"execution_count": 14,
|
451 |
-
"metadata": {
|
452 |
-
"colab": {
|
453 |
-
"base_uri": "https://localhost:8080/"
|
454 |
-
},
|
455 |
-
"id": "OeeG3jxT0taW",
|
456 |
-
"outputId": "d1938534-9a12-4f5e-b7e1-5fd58d687d60"
|
457 |
-
},
|
458 |
-
"outputs": [
|
459 |
-
{
|
460 |
-
"name": "stdout",
|
461 |
-
"output_type": "stream",
|
462 |
-
"text": [
|
463 |
-
" adding: mini-llama-articles/ (stored 0%)\n",
|
464 |
-
" adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/ (stored 0%)\n",
|
465 |
-
" adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/data_level0.bin"
|
466 |
-
]
|
467 |
-
},
|
468 |
-
{
|
469 |
-
"name": "stderr",
|
470 |
-
"output_type": "stream",
|
471 |
-
"text": [
|
472 |
-
"huggingface/tokenizers: The current process just got forked, after parallelism has already been used. Disabling parallelism to avoid deadlocks...\n",
|
473 |
-
"To disable this warning, you can either:\n",
|
474 |
-
"\t- Avoid using `tokenizers` before the fork if possible\n",
|
475 |
-
"\t- Explicitly set the environment variable TOKENIZERS_PARALLELISM=(true | false)\n"
|
476 |
-
]
|
477 |
-
},
|
478 |
-
{
|
479 |
-
"name": "stdout",
|
480 |
-
"output_type": "stream",
|
481 |
-
"text": [
|
482 |
-
" (deflated 57%)\n",
|
483 |
-
" adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/length.bin (deflated 48%)\n",
|
484 |
-
" adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/link_lists.bin (stored 0%)\n",
|
485 |
-
" adding: mini-llama-articles/01877efc-b4a2-4da3-80b3-93cc40b27067/header.bin (deflated 61%)\n",
|
486 |
-
" adding: mini-llama-articles/chroma.sqlite3 (deflated 66%)\n"
|
487 |
-
]
|
488 |
-
}
|
489 |
-
],
|
490 |
-
"source": [
|
491 |
-
"# Compress the vector store directory to a zip file to be able to download and use later.\n",
|
492 |
-
"!zip -r vectorstore-bge-embedding.zip mini-llama-articles"
|
493 |
]
|
494 |
},
|
495 |
{
|
@@ -498,50 +186,34 @@
|
|
498 |
"id": "OWaT6rL7ksp8"
|
499 |
},
|
500 |
"source": [
|
501 |
-
"# Load
|
502 |
-
]
|
503 |
-
},
|
504 |
-
{
|
505 |
-
"cell_type": "markdown",
|
506 |
-
"metadata": {
|
507 |
-
"id": "RF4U62oMr-iW"
|
508 |
-
},
|
509 |
-
"source": [
|
510 |
-
"If you have already uploaded the zip file for the vector store checkpoint, please uncomment the code in the following cell block to extract its contents. After doing so, you will be able to load the dataset from local storage."
|
511 |
]
|
512 |
},
|
513 |
{
|
514 |
"cell_type": "code",
|
515 |
-
"execution_count":
|
516 |
-
"metadata": {
|
517 |
-
"colab": {
|
518 |
-
"base_uri": "https://localhost:8080/"
|
519 |
-
},
|
520 |
-
"id": "XxPMJ4tq06qx",
|
521 |
-
"outputId": "8445e40a-b3c6-44ff-dfde-37cd4c73ffa2"
|
522 |
-
},
|
523 |
-
"outputs": [],
|
524 |
-
"source": [
|
525 |
-
"# !unzip vectorstore.zip"
|
526 |
-
]
|
527 |
-
},
|
528 |
-
{
|
529 |
-
"cell_type": "code",
|
530 |
-
"execution_count": 16,
|
531 |
"metadata": {
|
532 |
"id": "mXi56KTXk2sp"
|
533 |
},
|
534 |
"outputs": [],
|
535 |
"source": [
|
536 |
-
"
|
537 |
-
"
|
538 |
-
"
|
539 |
-
"vector_store
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
540 |
]
|
541 |
},
|
542 |
{
|
543 |
"cell_type": "code",
|
544 |
-
"execution_count":
|
545 |
"metadata": {
|
546 |
"colab": {
|
547 |
"base_uri": "https://localhost:8080/",
|
@@ -618,36 +290,23 @@
|
|
618 |
"id": "RZ5iQ_KkJufJ",
|
619 |
"outputId": "dd6029ee-10ed-4bf8-95d1-88ac5c636c47"
|
620 |
},
|
621 |
-
"outputs": [
|
622 |
-
{
|
623 |
-
"name": "stderr",
|
624 |
-
"output_type": "stream",
|
625 |
-
"text": [
|
626 |
-
"/var/folders/l7/9qcp7g5x5rl9x8ltw0t85qym0000gn/T/ipykernel_8187/3245113941.py:5: DeprecationWarning: Call to deprecated class method from_defaults. (ServiceContext is deprecated, please use `llama_index.settings.Settings` instead.) -- Deprecated since version 0.10.0.\n",
|
627 |
-
" service_context = ServiceContext.from_defaults(llm=llm, embed_model=\"local:BAAI/bge-small-en-v1.5\")\n"
|
628 |
-
]
|
629 |
-
}
|
630 |
-
],
|
631 |
-
"source": [
|
632 |
-
"from llama_index.core import ServiceContext\n",
|
633 |
-
"\n",
|
634 |
-
"# Define a ServiceContext that uses the BGE model for embedding which will be loads from Huggingface.\n",
|
635 |
-
"# The model will be downloaded in your local machine.\n",
|
636 |
-
"service_context = ServiceContext.from_defaults(llm=llm, embed_model=\"local:BAAI/bge-small-en-v1.5\")"
|
637 |
-
]
|
638 |
-
},
|
639 |
-
{
|
640 |
-
"cell_type": "code",
|
641 |
-
"execution_count": 27,
|
642 |
-
"metadata": {
|
643 |
-
"id": "jKXURvLtkuTS"
|
644 |
-
},
|
645 |
"outputs": [],
|
646 |
"source": [
|
647 |
-
"from llama_index.core import
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
648 |
"\n",
|
649 |
-
"#
|
650 |
-
"index =
|
|
|
|
|
|
|
|
|
651 |
]
|
652 |
},
|
653 |
{
|
@@ -656,63 +315,25 @@
|
|
656 |
"id": "8JPD8yAinVSq"
|
657 |
},
|
658 |
"source": [
|
659 |
-
"#
|
660 |
]
|
661 |
},
|
662 |
{
|
663 |
"cell_type": "code",
|
664 |
-
"execution_count":
|
665 |
-
"metadata": {
|
666 |
-
"id": "8lBu8V7tJ2_8"
|
667 |
-
},
|
668 |
-
"outputs": [],
|
669 |
-
"source": [
|
670 |
-
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
|
671 |
-
"# and using a LLM to formulate the final answer.\n",
|
672 |
-
"query_engine = index.as_query_engine()"
|
673 |
-
]
|
674 |
-
},
|
675 |
-
{
|
676 |
-
"cell_type": "code",
|
677 |
-
"execution_count": 29,
|
678 |
"metadata": {
|
679 |
"id": "rWAI0jUhJ7qH"
|
680 |
},
|
681 |
"outputs": [],
|
682 |
"source": [
|
683 |
-
"res = query_engine.query(\"How many parameters LLaMA2 has?\")"
|
684 |
-
|
685 |
-
|
686 |
-
{
|
687 |
-
"cell_type": "code",
|
688 |
-
"execution_count": 30,
|
689 |
-
"metadata": {
|
690 |
-
"colab": {
|
691 |
-
"base_uri": "https://localhost:8080/",
|
692 |
-
"height": 53
|
693 |
-
},
|
694 |
-
"id": "VKK3jMprctre",
|
695 |
-
"outputId": "06d62444-5e04-4e99-abf0-0ff1c76c0a82"
|
696 |
-
},
|
697 |
-
"outputs": [
|
698 |
-
{
|
699 |
-
"data": {
|
700 |
-
"text/plain": [
|
701 |
-
"'LLaMA2 has four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
|
702 |
-
]
|
703 |
-
},
|
704 |
-
"execution_count": 30,
|
705 |
-
"metadata": {},
|
706 |
-
"output_type": "execute_result"
|
707 |
-
}
|
708 |
-
],
|
709 |
-
"source": [
|
710 |
-
"res.response"
|
711 |
]
|
712 |
},
|
713 |
{
|
714 |
"cell_type": "code",
|
715 |
-
"execution_count":
|
716 |
"metadata": {
|
717 |
"colab": {
|
718 |
"base_uri": "https://localhost:8080/"
|
@@ -720,25 +341,9 @@
|
|
720 |
"id": "nvSmOtqBoCY2",
|
721 |
"outputId": "21a60031-4f39-4d1a-fbfa-ddba7d267936"
|
722 |
},
|
723 |
-
"outputs": [
|
724 |
-
{
|
725 |
-
"name": "stdout",
|
726 |
-
"output_type": "stream",
|
727 |
-
"text": [
|
728 |
-
"Node ID\t 737ac31e-077f-448e-a777-a096ae0550c0\n",
|
729 |
-
"Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
|
730 |
-
"Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
|
731 |
-
"Score\t 0.6191229199592215\n",
|
732 |
-
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
|
733 |
-
"Node ID\t c02a107f-b946-48c6-9180-98b058334b92\n",
|
734 |
-
"Title\t Beyond GPT-4: What's New?\n",
|
735 |
-
"Text\t LLM Variants and Meta's Open Source Before shedding light on four major trends, I'd share the latest Meta's Llama 2 and Code Llama. Meta's Llama 2 represents a sophisticated evolution in LLMs. This suite spans models pretrained and fine-tuned across a parameter spectrum of 7 billion to 70 billion. A specialized derivative, Llama 2-Chat, has been engineered explicitly for dialogue-centric applications. Benchmarking revealed Llama 2's superior performance over most extant open-source chat models. Human-centric evaluations, focusing on safety and utility metrics, positioned Llama 2-Chat as a potential contender against proprietary, closed-source counterparts. The development trajectory of Llama 2 emphasized rigorous fine-tuning methodologies. Meta's transparent delineation of these processes aims to catalyze community-driven advancements in LLMs, underscoring a commitment to collaborative and responsible AI development. Code Llama is built on top of Llama 2 and is available in three models: Code Llama, the foundational code model;Codel Llama - Python specialized for Python;and Code Llama - Instruct, which is fine-tuned for understanding natural language instructions. Based on its benchmark testing, Code Llama outperformed state-of-the-art publicly available LLMs (except GPT-4) on code tasks. Llama 2, Llama 2-Chat, and Code Llama are key steps in LLM development but still have a way to go compared to GPT-4. Meta's open access and commitment to improving these models promise transparent and faster LLM progress in the future. Please refer to the LLM and Llama variants below: From LLMs to Multimodal LLMs, like OpenAI's ChatGPT (GPT-3.5), primarily focus on understanding and generating human language. They've been instrumental in tasks like text generation, translation, and even creative writing. However, their scope is limited to text. Enter multimodal models like GPT-4. These are a new breed of AI models that can understand and generate not just text, but also images, sounds, and potentially other types of data. The term \"multimodal\" refers to their ability to process multiple modes or\n",
|
736 |
-
"Score\t 0.5952622757578383\n",
|
737 |
-
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
|
738 |
-
]
|
739 |
-
}
|
740 |
-
],
|
741 |
"source": [
|
|
|
742 |
"for src in res.source_nodes:\n",
|
743 |
" print(\"Node ID\\t\", src.node_id)\n",
|
744 |
" print(\"Title\\t\", src.metadata['title'])\n",
|
@@ -753,12 +358,12 @@
|
|
753 |
"id": "iMkpzH7vvb09"
|
754 |
},
|
755 |
"source": [
|
756 |
-
"# Evaluate"
|
757 |
]
|
758 |
},
|
759 |
{
|
760 |
"cell_type": "code",
|
761 |
-
"execution_count":
|
762 |
"metadata": {
|
763 |
"colab": {
|
764 |
"base_uri": "https://localhost:8080/"
|
@@ -766,15 +371,7 @@
|
|
766 |
"id": "H8a3eKgKvckU",
|
767 |
"outputId": "a0e0b170-a49e-4c83-fb5a-579046056af5"
|
768 |
},
|
769 |
-
"outputs": [
|
770 |
-
{
|
771 |
-
"name": "stderr",
|
772 |
-
"output_type": "stream",
|
773 |
-
"text": [
|
774 |
-
"100%|ββββββββββ| 108/108 [04:46<00:00, 2.65s/it]\n"
|
775 |
-
]
|
776 |
-
}
|
777 |
-
],
|
778 |
"source": [
|
779 |
"from llama_index.core.evaluation import generate_question_context_pairs\n",
|
780 |
"from llama_index.llms.openai import OpenAI\n",
|
@@ -790,7 +387,7 @@
|
|
790 |
")\n",
|
791 |
"\n",
|
792 |
"# We can save the evaluation dataset as a json file for later use.\n",
|
793 |
-
"rag_eval_dataset.save_json(\"./
|
794 |
]
|
795 |
},
|
796 |
{
|
@@ -804,100 +401,223 @@
|
|
804 |
},
|
805 |
{
|
806 |
"cell_type": "code",
|
807 |
-
"execution_count":
|
808 |
"metadata": {
|
809 |
"id": "3sA1K84U254o"
|
810 |
},
|
811 |
"outputs": [],
|
812 |
"source": [
|
813 |
-
"
|
814 |
-
"
|
815 |
-
"
|
816 |
-
"
|
817 |
-
"
|
818 |
-
"
|
819 |
]
|
820 |
},
|
821 |
{
|
822 |
"cell_type": "code",
|
823 |
-
"execution_count":
|
824 |
"metadata": {
|
825 |
-
"
|
|
|
|
|
|
|
|
|
826 |
},
|
827 |
"outputs": [],
|
828 |
"source": [
|
829 |
-
"import
|
|
|
|
|
|
|
|
|
|
|
|
|
830 |
"\n",
|
831 |
-
"
|
832 |
-
"
|
833 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
834 |
"\n",
|
835 |
-
"
|
836 |
-
"
|
837 |
-
"
|
838 |
-
" metric_dicts.append(metric_dict)\n",
|
839 |
"\n",
|
840 |
-
"
|
841 |
"\n",
|
842 |
-
"
|
843 |
-
"
|
|
|
844 |
"\n",
|
845 |
-
"
|
846 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
847 |
" )\n",
|
|
|
|
|
|
|
|
|
848 |
"\n",
|
849 |
-
" return
|
850 |
]
|
851 |
},
|
852 |
{
|
853 |
"cell_type": "code",
|
854 |
-
"execution_count":
|
855 |
-
"metadata": {
|
856 |
-
|
857 |
-
"base_uri": "https://localhost:8080/"
|
858 |
-
},
|
859 |
-
"id": "uNLxDxoc2-Ac",
|
860 |
-
"outputId": "f5762021-37d3-423e-b4d6-23b1066a0e97"
|
861 |
-
},
|
862 |
-
"outputs": [
|
863 |
-
{
|
864 |
-
"name": "stdout",
|
865 |
-
"output_type": "stream",
|
866 |
-
"text": [
|
867 |
-
" Retriever Name Hit Rate MRR\n",
|
868 |
-
"0 Retriever top_2 0.6625 0.551042\n",
|
869 |
-
" Retriever Name Hit Rate MRR\n",
|
870 |
-
"0 Retriever top_4 0.822917 0.600694\n",
|
871 |
-
" Retriever Name Hit Rate MRR\n",
|
872 |
-
"0 Retriever top_6 0.879167 0.611111\n",
|
873 |
-
" Retriever Name Hit Rate MRR\n",
|
874 |
-
"0 Retriever top_8 0.902083 0.614199\n",
|
875 |
-
" Retriever Name Hit Rate MRR\n",
|
876 |
-
"0 Retriever top_10 0.922917 0.616467\n"
|
877 |
-
]
|
878 |
-
}
|
879 |
-
],
|
880 |
"source": [
|
881 |
-
"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
882 |
"\n",
|
883 |
-
"#
|
884 |
-
"
|
885 |
-
"
|
886 |
-
"
|
887 |
-
"
|
888 |
-
"
|
889 |
-
"
|
890 |
-
" print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
|
891 |
]
|
892 |
},
|
893 |
{
|
894 |
"cell_type": "code",
|
895 |
"execution_count": null,
|
896 |
-
"metadata": {
|
897 |
-
|
898 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
899 |
"outputs": [],
|
900 |
-
"source": [
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
901 |
}
|
902 |
],
|
903 |
"metadata": {
|
@@ -907,7 +627,8 @@
|
|
907 |
"provenance": []
|
908 |
},
|
909 |
"kernelspec": {
|
910 |
-
"display_name": "
|
|
|
911 |
"name": "python3"
|
912 |
},
|
913 |
"language_info": {
|
@@ -920,7 +641,7 @@
|
|
920 |
"name": "python",
|
921 |
"nbconvert_exporter": "python",
|
922 |
"pygments_lexer": "ipython3",
|
923 |
-
"version": "3.11.
|
924 |
},
|
925 |
"widgets": {
|
926 |
"application/vnd.jupyter.widget-state+json": {
|
|
|
21 |
},
|
22 |
{
|
23 |
"cell_type": "code",
|
24 |
+
"execution_count": null,
|
25 |
"metadata": {
|
26 |
"id": "QPJzr-I9XQ7l"
|
27 |
},
|
28 |
"outputs": [],
|
29 |
"source": [
|
30 |
+
"!pip install -q llama-index==0.10.11 openai==1.12.0 llama-index-finetuning llama-index-llms-together llama-index-llms-gemini llama-index-embeddings-huggingface llama-index-readers-web llama-index-vector-stores-chroma tiktoken==0.6.0 chromadb==0.4.22 pandas==2.2.0 html2text sentence_transformers pydantic kaleido==0.2.1"
|
31 |
]
|
32 |
},
|
33 |
{
|
34 |
"cell_type": "code",
|
35 |
+
"execution_count": null,
|
36 |
"metadata": {
|
37 |
"id": "riuXwpSPcvWC"
|
38 |
},
|
|
|
40 |
"source": [
|
41 |
"import os\n",
|
42 |
"\n",
|
43 |
+
"# Set environment variables for the API keys\n",
|
44 |
+
"os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_API_KEY>\"\n",
|
45 |
+
"os.environ[\"TOGETHER_AI_API_TOKEN\"] = \"<YOUR_API_KEY>\"\n",
|
46 |
+
"os.environ[\"GOOGLE_API_KEY\"] = \"<YOUR_API_KEY>\"\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
47 |
"\n",
|
48 |
+
"# Allows running asyncio in environments with an existing event loop, like Jupyter notebooks.\n",
|
49 |
"import nest_asyncio\n",
|
50 |
"\n",
|
51 |
"nest_asyncio.apply()"
|
52 |
]
|
53 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
54 |
{
|
55 |
"cell_type": "markdown",
|
56 |
"metadata": {
|
57 |
"id": "0BwVuJXlzHVL"
|
58 |
},
|
59 |
"source": [
|
60 |
+
"# Create a vector store and ingest articles"
|
61 |
]
|
62 |
},
|
63 |
{
|
64 |
"cell_type": "code",
|
65 |
+
"execution_count": null,
|
66 |
"metadata": {
|
67 |
"id": "SQP87lHczHKc"
|
68 |
},
|
69 |
"outputs": [],
|
70 |
"source": [
|
71 |
"import chromadb\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
72 |
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
|
73 |
"\n",
|
74 |
+
"# create vector store\n",
|
75 |
+
"vector_store_name = \"mini-llama-articles\"\n",
|
76 |
+
"chroma_client = chromadb.PersistentClient(path=vector_store_name)\n",
|
77 |
+
"chroma_collection = chroma_client.get_or_create_collection(vector_store_name)\n",
|
78 |
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
|
79 |
]
|
80 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
81 |
{
|
82 |
"cell_type": "markdown",
|
83 |
"metadata": {
|
|
|
89 |
},
|
90 |
{
|
91 |
"cell_type": "code",
|
92 |
+
"execution_count": null,
|
93 |
"metadata": {
|
94 |
"colab": {
|
95 |
"base_uri": "https://localhost:8080/"
|
|
|
97 |
"id": "wl_pbPvMlv1h",
|
98 |
"outputId": "244bb539-f023-46f9-bc25-a05d0e526c14"
|
99 |
},
|
100 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
101 |
"source": [
|
102 |
"!curl -o ./mini-llama-articles.csv https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
|
103 |
]
|
|
|
108 |
"id": "VWBLtDbUWJfA"
|
109 |
},
|
110 |
"source": [
|
111 |
+
"## Read articles from file"
|
112 |
]
|
113 |
},
|
114 |
{
|
115 |
"cell_type": "code",
|
116 |
+
"execution_count": null,
|
117 |
"metadata": {
|
118 |
"colab": {
|
119 |
"base_uri": "https://localhost:8080/"
|
|
|
121 |
"id": "0Q9sxuW0g3Gd",
|
122 |
"outputId": "8b475ed3-23b1-43e1-c8fc-027fc26455c3"
|
123 |
},
|
124 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
125 |
"source": [
|
126 |
"import csv\n",
|
127 |
"\n",
|
|
|
133 |
"\n",
|
134 |
" for idx, row in enumerate( csv_reader ):\n",
|
135 |
" if idx == 0: continue; # Skip header row\n",
|
136 |
+
" rows.append(row)"
|
|
|
|
|
|
|
137 |
]
|
138 |
},
|
139 |
{
|
|
|
142 |
"id": "S17g2RYOjmf2"
|
143 |
},
|
144 |
"source": [
|
145 |
+
"## Ingest documents into vector store"
|
146 |
]
|
147 |
},
|
148 |
{
|
149 |
"cell_type": "code",
|
150 |
+
"execution_count": null,
|
151 |
"metadata": {
|
152 |
"id": "YizvmXPejkJE"
|
153 |
},
|
154 |
"outputs": [],
|
155 |
"source": [
|
156 |
"from llama_index.core import Document\n",
|
157 |
+
"from llama_index.core.text_splitter import TokenTextSplitter\n",
|
158 |
+
"from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
|
159 |
+
"from llama_index.core.ingestion import IngestionPipeline\n",
|
160 |
"\n",
|
161 |
"# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
|
162 |
+
"documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
163 |
"\n",
|
164 |
"# Define the splitter object that split the text into segments with 512 tokens,\n",
|
165 |
"# with a 128 overlap between the segments.\n",
|
166 |
"text_splitter = TokenTextSplitter(\n",
|
167 |
" separator=\" \", chunk_size=512, chunk_overlap=128\n",
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
168 |
")\n",
|
|
|
|
|
169 |
"\n",
|
170 |
"# Create the pipeline to apply the transformation on each chunk,\n",
|
171 |
"# and store the transformed text in the chroma vector store.\n",
|
|
|
177 |
" vector_store=vector_store\n",
|
178 |
")\n",
|
179 |
"\n",
|
180 |
+
"nodes = pipeline.run(documents=documents, show_progress=True)"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
181 |
]
|
182 |
},
|
183 |
{
|
|
|
186 |
"id": "OWaT6rL7ksp8"
|
187 |
},
|
188 |
"source": [
|
189 |
+
"# Load vector store and create query engine"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
190 |
]
|
191 |
},
|
192 |
{
|
193 |
"cell_type": "code",
|
194 |
+
"execution_count": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
195 |
"metadata": {
|
196 |
"id": "mXi56KTXk2sp"
|
197 |
},
|
198 |
"outputs": [],
|
199 |
"source": [
|
200 |
+
"from llama_index.core import ServiceContext\n",
|
201 |
+
"from llama_index.core import VectorStoreIndex\n",
|
202 |
+
"\n",
|
203 |
+
"def from_vector_store_to_index(vector_store, llm, embed_model):\n",
|
204 |
+
" # Define a ServiceContext that uses the BGE model for embedding which will be loaded from Huggingface.\n",
|
205 |
+
" # The model will be downloaded to your local machine.\n",
|
206 |
+
" service_context = ServiceContext.from_defaults(llm=llm, embed_model=embed_model)\n",
|
207 |
+
"\n",
|
208 |
+
" # Create the index based on the vector store.\n",
|
209 |
+
" index = VectorStoreIndex.from_vector_store(vector_store, service_context=service_context)\n",
|
210 |
+
"\n",
|
211 |
+
" return index"
|
212 |
]
|
213 |
},
|
214 |
{
|
215 |
"cell_type": "code",
|
216 |
+
"execution_count": null,
|
217 |
"metadata": {
|
218 |
"colab": {
|
219 |
"base_uri": "https://localhost:8080/",
|
|
|
290 |
"id": "RZ5iQ_KkJufJ",
|
291 |
"outputId": "dd6029ee-10ed-4bf8-95d1-88ac5c636c47"
|
292 |
},
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
293 |
"outputs": [],
|
294 |
"source": [
|
295 |
+
"from llama_index.core.prompts import PromptTemplate\n",
|
296 |
+
"from llama_index.llms.together import TogetherLLM\n",
|
297 |
+
"\n",
|
298 |
+
"# Use the Together AI service to access the LLaMA2-70B chat model\n",
|
299 |
+
"llm = TogetherLLM(\n",
|
300 |
+
" model=\"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo\",\n",
|
301 |
+
" api_key=os.environ[\"TOGETHER_AI_API_TOKEN\"]\n",
|
302 |
+
")\n",
|
303 |
"\n",
|
304 |
+
"# create index from vector store\n",
|
305 |
+
"index = from_vector_store_to_index(vector_store, llm, \"local:BAAI/bge-small-en-v1.5\")\n",
|
306 |
+
"\n",
|
307 |
+
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
|
308 |
+
"# and using a LLM to formulate the final answer.\n",
|
309 |
+
"query_engine = index.as_query_engine()"
|
310 |
]
|
311 |
},
|
312 |
{
|
|
|
315 |
"id": "8JPD8yAinVSq"
|
316 |
},
|
317 |
"source": [
|
318 |
+
"# Test query engine"
|
319 |
]
|
320 |
},
|
321 |
{
|
322 |
"cell_type": "code",
|
323 |
+
"execution_count": null,
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
324 |
"metadata": {
|
325 |
"id": "rWAI0jUhJ7qH"
|
326 |
},
|
327 |
"outputs": [],
|
328 |
"source": [
|
329 |
+
"res = query_engine.query(\"How many parameters LLaMA2 has?\")\n",
|
330 |
+
"print(res.response)\n",
|
331 |
+
"# 'Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
332 |
]
|
333 |
},
|
334 |
{
|
335 |
"cell_type": "code",
|
336 |
+
"execution_count": null,
|
337 |
"metadata": {
|
338 |
"colab": {
|
339 |
"base_uri": "https://localhost:8080/"
|
|
|
341 |
"id": "nvSmOtqBoCY2",
|
342 |
"outputId": "21a60031-4f39-4d1a-fbfa-ddba7d267936"
|
343 |
},
|
344 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
345 |
"source": [
|
346 |
+
"# print the source nodes used to write the answer\n",
|
347 |
"for src in res.source_nodes:\n",
|
348 |
" print(\"Node ID\\t\", src.node_id)\n",
|
349 |
" print(\"Title\\t\", src.metadata['title'])\n",
|
|
|
358 |
"id": "iMkpzH7vvb09"
|
359 |
},
|
360 |
"source": [
|
361 |
+
"# Evaluate the retriever"
|
362 |
]
|
363 |
},
|
364 |
{
|
365 |
"cell_type": "code",
|
366 |
+
"execution_count": null,
|
367 |
"metadata": {
|
368 |
"colab": {
|
369 |
"base_uri": "https://localhost:8080/"
|
|
|
371 |
"id": "H8a3eKgKvckU",
|
372 |
"outputId": "a0e0b170-a49e-4c83-fb5a-579046056af5"
|
373 |
},
|
374 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
375 |
"source": [
|
376 |
"from llama_index.core.evaluation import generate_question_context_pairs\n",
|
377 |
"from llama_index.llms.openai import OpenAI\n",
|
|
|
387 |
")\n",
|
388 |
"\n",
|
389 |
"# We can save the evaluation dataset as a json file for later use.\n",
|
390 |
+
"rag_eval_dataset.save_json(\"./rag_eval_dataset.json\")"
|
391 |
]
|
392 |
},
|
393 |
{
|
|
|
401 |
},
|
402 |
{
|
403 |
"cell_type": "code",
|
404 |
+
"execution_count": null,
|
405 |
"metadata": {
|
406 |
"id": "3sA1K84U254o"
|
407 |
},
|
408 |
"outputs": [],
|
409 |
"source": [
|
410 |
+
"from llama_index.finetuning.embeddings.common import (\n",
|
411 |
+
" EmbeddingQAFinetuneDataset,\n",
|
412 |
+
")\n",
|
413 |
+
"rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
|
414 |
+
" \"./rag_eval_dataset.json\"\n",
|
415 |
+
")"
|
416 |
]
|
417 |
},
|
418 |
{
|
419 |
"cell_type": "code",
|
420 |
+
"execution_count": null,
|
421 |
"metadata": {
|
422 |
+
"colab": {
|
423 |
+
"base_uri": "https://localhost:8080/"
|
424 |
+
},
|
425 |
+
"id": "uNLxDxoc2-Ac",
|
426 |
+
"outputId": "f5762021-37d3-423e-b4d6-23b1066a0e97"
|
427 |
},
|
428 |
"outputs": [],
|
429 |
"source": [
|
430 |
+
"from llama_index.core.evaluation import RetrieverEvaluator, RelevancyEvaluator, FaithfulnessEvaluator, BatchEvalRunner\n",
|
431 |
+
"from llama_index.llms.openai import OpenAI\n",
|
432 |
+
"\n",
|
433 |
+
"async def run_evaluation(index, rag_eval_dataset, top_k_values, llm_judge, n_queries_to_evaluate=20):\n",
|
434 |
+
" evaluation_results = {}\n",
|
435 |
+
"\n",
|
436 |
+
" # ------------------- MRR and Hit Rate -------------------\n",
|
437 |
"\n",
|
438 |
+
" for top_k in top_k_values:\n",
|
439 |
+
" # Get MRR and Hit Rate\n",
|
440 |
+
" retriever = index.as_retriever(similarity_top_k=top_k)\n",
|
441 |
+
" retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
|
442 |
+
" [\"mrr\", \"hit_rate\"], retriever=retriever\n",
|
443 |
+
" )\n",
|
444 |
+
" eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset)\n",
|
445 |
+
" avg_mrr = sum(res.metric_vals_dict[\"mrr\"] for res in eval_results) / len(eval_results)\n",
|
446 |
+
" avg_hit_rate = sum(res.metric_vals_dict[\"hit_rate\"] for res in eval_results) / len(eval_results)\n",
|
447 |
"\n",
|
448 |
+
" # Collect the evaluation results\n",
|
449 |
+
" evaluation_results[f\"mrr_@_{top_k}\"] = avg_mrr\n",
|
450 |
+
" evaluation_results[f\"hit_rate_@_{top_k}\"] = avg_hit_rate\n",
|
|
|
451 |
"\n",
|
452 |
+
" # ------------------- Faithfulness and Relevancy -------------------\n",
|
453 |
"\n",
|
454 |
+
" # Extract the questions from the dataset\n",
|
455 |
+
" queries = list(rag_eval_dataset.queries.values())\n",
|
456 |
+
" batch_eval_queries = queries[:n_queries_to_evaluate]\n",
|
457 |
"\n",
|
458 |
+
" # Initiate the faithfulnes and relevancy evaluator objects\n",
|
459 |
+
" faithfulness_evaluator = FaithfulnessEvaluator(llm=llm_judge)\n",
|
460 |
+
" relevancy_evaluator = RelevancyEvaluator(llm=llm_judge)\n",
|
461 |
+
"\n",
|
462 |
+
" # The batch evaluator runs the evaluation in batches\n",
|
463 |
+
" runner = BatchEvalRunner(\n",
|
464 |
+
" {\n",
|
465 |
+
" \"faithfulness\": faithfulness_evaluator,\n",
|
466 |
+
" \"relevancy\": relevancy_evaluator\n",
|
467 |
+
" },\n",
|
468 |
+
" workers=16,\n",
|
469 |
+
" )\n",
|
470 |
+
"\n",
|
471 |
+
" # Get faithfulness and relevancy scores\n",
|
472 |
+
" query_engine = index.as_query_engine()\n",
|
473 |
+
" eval_results = await runner.aevaluate_queries(\n",
|
474 |
+
" query_engine, queries=batch_eval_queries\n",
|
475 |
" )\n",
|
476 |
+
" faithfulness_score = sum(result.passing for result in eval_results['faithfulness']) / len(eval_results['faithfulness'])\n",
|
477 |
+
" relevancy_score = sum(result.passing for result in eval_results['relevancy']) / len(eval_results['relevancy'])\n",
|
478 |
+
" evaluation_results[\"faithfulness\"] = faithfulness_score\n",
|
479 |
+
" evaluation_results[\"relevancy\"] = relevancy_score\n",
|
480 |
"\n",
|
481 |
+
" return evaluation_results"
|
482 |
]
|
483 |
},
|
484 |
{
|
485 |
"cell_type": "code",
|
486 |
+
"execution_count": null,
|
487 |
+
"metadata": {},
|
488 |
+
"outputs": [],
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
489 |
"source": [
|
490 |
+
"# We evaluate the retrievers with different top_k values.\n",
|
491 |
+
"top_k_values = [2, 4, 6, 8, 10]\n",
|
492 |
+
"llm_judge = OpenAI(temperature=0, model=\"gpt-4o\")\n",
|
493 |
+
"evaluation_results = await run_evaluation(index, rag_eval_dataset, top_k_values, llm_judge, n_queries_to_evaluate=20)"
|
494 |
+
]
|
495 |
+
},
|
496 |
+
{
|
497 |
+
"cell_type": "code",
|
498 |
+
"execution_count": null,
|
499 |
+
"metadata": {},
|
500 |
+
"outputs": [],
|
501 |
+
"source": [
|
502 |
+
"print(evaluation_results)"
|
503 |
+
]
|
504 |
+
},
|
505 |
+
{
|
506 |
+
"cell_type": "code",
|
507 |
+
"execution_count": null,
|
508 |
+
"metadata": {},
|
509 |
+
"outputs": [],
|
510 |
+
"source": [
|
511 |
+
"# Use GPT-3.5-turbo as the LLM model\n",
|
512 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
513 |
"\n",
|
514 |
+
"# create index from vector store\n",
|
515 |
+
"index = from_vector_store_to_index(vector_store, llm, \"local:BAAI/bge-small-en-v1.5\")\n",
|
516 |
+
"\n",
|
517 |
+
"# run evaluation with GPT-3.5-turbo\n",
|
518 |
+
"top_k_values = [2, 4, 6, 8, 10]\n",
|
519 |
+
"llm_judge = OpenAI(temperature=0, model=\"gpt-4o\")\n",
|
520 |
+
"evaluation_results = await run_evaluation(index, rag_eval_dataset, top_k_values, llm_judge, n_queries_to_evaluate=20)"
|
|
|
521 |
]
|
522 |
},
|
523 |
{
|
524 |
"cell_type": "code",
|
525 |
"execution_count": null,
|
526 |
+
"metadata": {},
|
527 |
+
"outputs": [],
|
528 |
+
"source": [
|
529 |
+
"print(evaluation_results)"
|
530 |
+
]
|
531 |
+
},
|
532 |
+
{
|
533 |
+
"cell_type": "code",
|
534 |
+
"execution_count": null,
|
535 |
+
"metadata": {},
|
536 |
+
"outputs": [],
|
537 |
+
"source": [
|
538 |
+
"from llama_index.llms.gemini import Gemini\n",
|
539 |
+
"\n",
|
540 |
+
"# Use Gemini as the LLM model\n",
|
541 |
+
"llm = Gemini(model=\"models/gemini-1.5-flash\")\n",
|
542 |
+
"\n",
|
543 |
+
"# create index from vector store\n",
|
544 |
+
"index = from_vector_store_to_index(vector_store, llm, \"local:BAAI/bge-small-en-v1.5\")\n",
|
545 |
+
"\n",
|
546 |
+
"# run evaluation with Gemini\n",
|
547 |
+
"top_k_values = [2, 4, 6, 8, 10]\n",
|
548 |
+
"llm_judge = OpenAI(temperature=0, model=\"gpt-4o\")\n",
|
549 |
+
"evaluation_results = await run_evaluation(index, rag_eval_dataset, top_k_values, llm_judge, n_queries_to_evaluate=20)"
|
550 |
+
]
|
551 |
+
},
|
552 |
+
{
|
553 |
+
"cell_type": "code",
|
554 |
+
"execution_count": null,
|
555 |
+
"metadata": {},
|
556 |
+
"outputs": [],
|
557 |
+
"source": [
|
558 |
+
"print(evaluation_results)"
|
559 |
+
]
|
560 |
+
},
|
561 |
+
{
|
562 |
+
"cell_type": "markdown",
|
563 |
+
"metadata": {},
|
564 |
+
"source": [
|
565 |
+
"# Inference speed comparison"
|
566 |
+
]
|
567 |
+
},
|
568 |
+
{
|
569 |
+
"cell_type": "code",
|
570 |
+
"execution_count": null,
|
571 |
+
"metadata": {},
|
572 |
+
"outputs": [],
|
573 |
+
"source": [
|
574 |
+
"import time"
|
575 |
+
]
|
576 |
+
},
|
577 |
+
{
|
578 |
+
"cell_type": "code",
|
579 |
+
"execution_count": null,
|
580 |
+
"metadata": {},
|
581 |
+
"outputs": [],
|
582 |
+
"source": [
|
583 |
+
"llm = TogetherLLM(\n",
|
584 |
+
" model=\"meta-llama/Meta-Llama-3.1-70B-Instruct-Turbo\",\n",
|
585 |
+
" api_key=os.environ[\"TOGETHER_AI_API_TOKEN\"]\n",
|
586 |
+
")\n",
|
587 |
+
"\n",
|
588 |
+
"time_start = time.time()\n",
|
589 |
+
"llm.complete(\"List the 50 states in the United States of America. Write their names in a comma-separated list and nothing else.\")\n",
|
590 |
+
"time_end = time.time()\n",
|
591 |
+
"print(\"Time taken for Llama 3.1 70B on Together AI: {0:.2f} seconds\".format(time_end - time_start))"
|
592 |
+
]
|
593 |
+
},
|
594 |
+
{
|
595 |
+
"cell_type": "code",
|
596 |
+
"execution_count": null,
|
597 |
+
"metadata": {},
|
598 |
"outputs": [],
|
599 |
+
"source": [
|
600 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
601 |
+
"\n",
|
602 |
+
"time_start = time.time()\n",
|
603 |
+
"llm.complete(\"List the 50 states in the United States of America. Write their names in a comma-separated list and nothing else.\")\n",
|
604 |
+
"time_end = time.time()\n",
|
605 |
+
"print(\"Time taken for GPT 3.5 Turbo: {0:.2f} seconds\".format(time_end - time_start))"
|
606 |
+
]
|
607 |
+
},
|
608 |
+
{
|
609 |
+
"cell_type": "code",
|
610 |
+
"execution_count": null,
|
611 |
+
"metadata": {},
|
612 |
+
"outputs": [],
|
613 |
+
"source": [
|
614 |
+
"llm = Gemini(model=\"models/gemini-1.5-flash\")\n",
|
615 |
+
"\n",
|
616 |
+
"time_start = time.time()\n",
|
617 |
+
"llm.complete(\"List the 50 states in the United States of America. Write their names in a comma-separated list and nothing else.\")\n",
|
618 |
+
"time_end = time.time()\n",
|
619 |
+
"print(\"Time taken for Gemini 1.5 Flash: {0:.2f} seconds\".format(time_end - time_start))"
|
620 |
+
]
|
621 |
}
|
622 |
],
|
623 |
"metadata": {
|
|
|
627 |
"provenance": []
|
628 |
},
|
629 |
"kernelspec": {
|
630 |
+
"display_name": "venv_ai_tutor",
|
631 |
+
"language": "python",
|
632 |
"name": "python3"
|
633 |
},
|
634 |
"language_info": {
|
|
|
641 |
"name": "python",
|
642 |
"nbconvert_exporter": "python",
|
643 |
"pygments_lexer": "ipython3",
|
644 |
+
"version": "3.11.4"
|
645 |
},
|
646 |
"widgets": {
|
647 |
"application/vnd.jupyter.widget-state+json": {
|