AlaFalaki commited on
Commit
4a073d6
β€’
1 Parent(s): 6359fda

Created using Colaboratory

Browse files
notebooks/05-Improve_Prompts_+_Add_Source.ipynb CHANGED
@@ -4,7 +4,7 @@
4
  "metadata": {
5
  "colab": {
6
  "provenance": [],
7
- "authorship_tag": "ABX9TyNxw0V/DQWZAMJINjB72z2U",
8
  "include_colab_link": true
9
  },
10
  "kernelspec": {
@@ -16,7 +16,7 @@
16
  },
17
  "widgets": {
18
  "application/vnd.jupyter.widget-state+json": {
19
- "c7b1637af4d0493db086e497774c8773": {
20
  "model_module": "@jupyter-widgets/controls",
21
  "model_name": "HBoxModel",
22
  "model_module_version": "1.5.0",
@@ -31,14 +31,14 @@
31
  "_view_name": "HBoxView",
32
  "box_style": "",
33
  "children": [
34
- "IPY_MODEL_fcc55e43f3eb48bc88ff242fa07e119d",
35
- "IPY_MODEL_de7130762b214b6693ea0edf9d7198fe",
36
- "IPY_MODEL_cfb653a2b9a64fb792c8f2670e2c544e"
37
  ],
38
- "layout": "IPY_MODEL_d2768ecc72ae4683950345c4649a20ce"
39
  }
40
  },
41
- "fcc55e43f3eb48bc88ff242fa07e119d": {
42
  "model_module": "@jupyter-widgets/controls",
43
  "model_name": "HTMLModel",
44
  "model_module_version": "1.5.0",
@@ -53,13 +53,13 @@
53
  "_view_name": "HTMLView",
54
  "description": "",
55
  "description_tooltip": null,
56
- "layout": "IPY_MODEL_7df6e3771db74ad5805415ee4807285b",
57
  "placeholder": "​",
58
- "style": "IPY_MODEL_f4419077f973496abd0f7d411fd27db6",
59
  "value": "Parsing nodes: 100%"
60
  }
61
  },
62
- "de7130762b214b6693ea0edf9d7198fe": {
63
  "model_module": "@jupyter-widgets/controls",
64
  "model_name": "FloatProgressModel",
65
  "model_module_version": "1.5.0",
@@ -75,15 +75,15 @@
75
  "bar_style": "success",
76
  "description": "",
77
  "description_tooltip": null,
78
- "layout": "IPY_MODEL_6f70532228db428aa668d2f02d3d0a21",
79
  "max": 14,
80
  "min": 0,
81
  "orientation": "horizontal",
82
- "style": "IPY_MODEL_214544d45926426b90fa5c4a6d932dad",
83
  "value": 14
84
  }
85
  },
86
- "cfb653a2b9a64fb792c8f2670e2c544e": {
87
  "model_module": "@jupyter-widgets/controls",
88
  "model_name": "HTMLModel",
89
  "model_module_version": "1.5.0",
@@ -98,13 +98,13 @@
98
  "_view_name": "HTMLView",
99
  "description": "",
100
  "description_tooltip": null,
101
- "layout": "IPY_MODEL_76c1addefa1f4a3b959c97bfb08f2ee6",
102
  "placeholder": "​",
103
- "style": "IPY_MODEL_e6beb9313dd54b0aab34dbff1d34cd26",
104
- "value": " 14/14 [00:00<00:00, 21.59it/s]"
105
  }
106
  },
107
- "d2768ecc72ae4683950345c4649a20ce": {
108
  "model_module": "@jupyter-widgets/base",
109
  "model_name": "LayoutModel",
110
  "model_module_version": "1.2.0",
@@ -156,7 +156,7 @@
156
  "width": null
157
  }
158
  },
159
- "7df6e3771db74ad5805415ee4807285b": {
160
  "model_module": "@jupyter-widgets/base",
161
  "model_name": "LayoutModel",
162
  "model_module_version": "1.2.0",
@@ -208,7 +208,7 @@
208
  "width": null
209
  }
210
  },
211
- "f4419077f973496abd0f7d411fd27db6": {
212
  "model_module": "@jupyter-widgets/controls",
213
  "model_name": "DescriptionStyleModel",
214
  "model_module_version": "1.5.0",
@@ -223,7 +223,7 @@
223
  "description_width": ""
224
  }
225
  },
226
- "6f70532228db428aa668d2f02d3d0a21": {
227
  "model_module": "@jupyter-widgets/base",
228
  "model_name": "LayoutModel",
229
  "model_module_version": "1.2.0",
@@ -275,7 +275,7 @@
275
  "width": null
276
  }
277
  },
278
- "214544d45926426b90fa5c4a6d932dad": {
279
  "model_module": "@jupyter-widgets/controls",
280
  "model_name": "ProgressStyleModel",
281
  "model_module_version": "1.5.0",
@@ -291,7 +291,7 @@
291
  "description_width": ""
292
  }
293
  },
294
- "76c1addefa1f4a3b959c97bfb08f2ee6": {
295
  "model_module": "@jupyter-widgets/base",
296
  "model_name": "LayoutModel",
297
  "model_module_version": "1.2.0",
@@ -343,7 +343,349 @@
343
  "width": null
344
  }
345
  },
346
- "e6beb9313dd54b0aab34dbff1d34cd26": {
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
347
  "model_module": "@jupyter-widgets/controls",
348
  "model_name": "DescriptionStyleModel",
349
  "model_module_version": "1.5.0",
@@ -383,58 +725,55 @@
383
  },
384
  {
385
  "cell_type": "code",
386
- "execution_count": null,
387
  "metadata": {
388
  "id": "QPJzr-I9XQ7l",
389
  "colab": {
390
  "base_uri": "https://localhost:8080/"
391
  },
392
- "outputId": "3661f3f0-bf4c-461c-bbab-bbe9dbe233b8"
393
  },
394
  "outputs": [
395
  {
396
  "output_type": "stream",
397
  "name": "stdout",
398
  "text": [
399
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m34.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
400
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m225.4/225.4 kB\u001b[0m \u001b[31m7.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
401
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m20.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
402
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m508.6/508.6 kB\u001b[0m \u001b[31m32.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
403
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.9/79.9 MB\u001b[0m \u001b[31m10.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
404
  "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.7/45.7 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
405
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.7/51.7 kB\u001b[0m \u001b[31m3.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
406
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.9/75.9 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
407
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m73.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
408
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.1/92.1 kB\u001b[0m \u001b[31m9.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
409
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.7/60.7 kB\u001b[0m \u001b[31m5.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
410
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.1/41.1 kB\u001b[0m \u001b[31m3.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
411
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m56.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
412
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m61.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
413
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.9/57.9 kB\u001b[0m \u001b[31m6.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
414
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m105.6/105.6 kB\u001b[0m \u001b[31m11.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
415
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.3/67.3 kB\u001b[0m \u001b[31m6.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
416
  "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
417
  " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
418
  " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
419
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m698.9/698.9 kB\u001b[0m \u001b[31m45.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
420
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m71.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
421
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.6/67.6 kB\u001b[0m \u001b[31m4.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
422
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m72.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
423
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.5/71.5 kB\u001b[0m \u001b[31m7.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
424
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.9/76.9 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
425
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m5.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
426
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m3.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
427
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.8/50.8 kB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
428
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m341.4/341.4 kB\u001b[0m \u001b[31m29.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
429
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m51.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
430
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m45.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
431
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.2/130.2 kB\u001b[0m \u001b[31m12.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
432
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.4/49.4 kB\u001b[0m \u001b[31m4.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
433
- "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
434
- "\u001b[?25h Building wheel for pypika (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
435
- "\u001b[31mERROR: pip's dependency resolver does not currently take into account all the packages that are installed. This behaviour is the source of the following dependency conflicts.\n",
436
- "tensorflow-probability 0.22.0 requires typing-extensions<4.6.0, but you have typing-extensions 4.9.0 which is incompatible.\u001b[0m\u001b[31m\n",
437
- "\u001b[0m"
438
  ]
439
  }
440
  ],
@@ -453,7 +792,7 @@
453
  "metadata": {
454
  "id": "riuXwpSPcvWC"
455
  },
456
- "execution_count": null,
457
  "outputs": []
458
  },
459
  {
@@ -466,7 +805,7 @@
466
  "metadata": {
467
  "id": "km-KQOrgr3VB"
468
  },
469
- "execution_count": null,
470
  "outputs": []
471
  },
472
  {
@@ -488,7 +827,7 @@
488
  "metadata": {
489
  "id": "9oGT6crooSSj"
490
  },
491
- "execution_count": null,
492
  "outputs": []
493
  },
494
  {
@@ -513,7 +852,7 @@
513
  "metadata": {
514
  "id": "SQP87lHczHKc"
515
  },
516
- "execution_count": null,
517
  "outputs": []
518
  },
519
  {
@@ -527,7 +866,7 @@
527
  "metadata": {
528
  "id": "zAaGcYMJzHAN"
529
  },
530
- "execution_count": null,
531
  "outputs": []
532
  },
533
  {
@@ -567,24 +906,24 @@
567
  "base_uri": "https://localhost:8080/"
568
  },
569
  "id": "fQtpDvUzKNzI",
570
- "outputId": "b23ca99d-db45-42a8-f1d1-271f19f30148"
571
  },
572
- "execution_count": null,
573
  "outputs": [
574
  {
575
  "output_type": "stream",
576
  "name": "stdout",
577
  "text": [
578
- "--2024-02-05 19:52:41-- https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv\n",
579
- "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.108.133, 185.199.109.133, 185.199.110.133, ...\n",
580
- "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.108.133|:443... connected.\n",
581
  "HTTP request sent, awaiting response... 200 OK\n",
582
  "Length: 173646 (170K) [text/plain]\n",
583
  "Saving to: β€˜mini-llama-articles.csv’\n",
584
  "\n",
585
- "mini-llama-articles 100%[===================>] 169.58K --.-KB/s in 0.03s \n",
586
  "\n",
587
- "2024-02-05 19:52:42 (6.50 MB/s) - β€˜mini-llama-articles.csv’ saved [173646/173646]\n",
588
  "\n"
589
  ]
590
  }
@@ -622,9 +961,9 @@
622
  "base_uri": "https://localhost:8080/"
623
  },
624
  "id": "_WER5lt0N7c5",
625
- "outputId": "274c4160-cec2-48c2-9ab3-b40252c5ae9b"
626
  },
627
- "execution_count": null,
628
  "outputs": [
629
  {
630
  "output_type": "execute_result",
@@ -634,7 +973,7 @@
634
  ]
635
  },
636
  "metadata": {},
637
- "execution_count": 8
638
  }
639
  ]
640
  },
@@ -658,7 +997,7 @@
658
  "metadata": {
659
  "id": "lFvW_886dxKX"
660
  },
661
- "execution_count": null,
662
  "outputs": []
663
  },
664
  {
@@ -671,9 +1010,9 @@
671
  "base_uri": "https://localhost:8080/"
672
  },
673
  "id": "Njoc3XEVkKkf",
674
- "outputId": "80ddc864-e60f-4058-8aed-1cfa0c202d75"
675
  },
676
- "execution_count": null,
677
  "outputs": [
678
  {
679
  "output_type": "execute_result",
@@ -683,7 +1022,7 @@
683
  ]
684
  },
685
  "metadata": {},
686
- "execution_count": 10
687
  }
688
  ]
689
  },
@@ -710,7 +1049,7 @@
710
  "metadata": {
711
  "id": "STACTMUR1z9N"
712
  },
713
- "execution_count": null,
714
  "outputs": []
715
  },
716
  {
@@ -736,24 +1075,35 @@
736
  "id": "CtdsIUQ81_hT",
737
  "colab": {
738
  "base_uri": "https://localhost:8080/",
739
- "height": 299,
740
  "referenced_widgets": [
741
- "c7b1637af4d0493db086e497774c8773",
742
- "fcc55e43f3eb48bc88ff242fa07e119d",
743
- "de7130762b214b6693ea0edf9d7198fe",
744
- "cfb653a2b9a64fb792c8f2670e2c544e",
745
- "d2768ecc72ae4683950345c4649a20ce",
746
- "7df6e3771db74ad5805415ee4807285b",
747
- "f4419077f973496abd0f7d411fd27db6",
748
- "6f70532228db428aa668d2f02d3d0a21",
749
- "214544d45926426b90fa5c4a6d932dad",
750
- "76c1addefa1f4a3b959c97bfb08f2ee6",
751
- "e6beb9313dd54b0aab34dbff1d34cd26"
 
 
 
 
 
 
 
 
 
 
 
752
  ]
753
  },
754
- "outputId": "80818201-256e-4b22-badc-5216e8b73199"
755
  },
756
- "execution_count": null,
757
  "outputs": [
758
  {
759
  "output_type": "display_data",
@@ -764,7 +1114,7 @@
764
  "application/vnd.jupyter.widget-view+json": {
765
  "version_major": 2,
766
  "version_minor": 0,
767
- "model_id": "c7b1637af4d0493db086e497774c8773"
768
  }
769
  },
770
  "metadata": {}
@@ -788,32 +1138,20 @@
788
  "431\n",
789
  "453\n"
790
  ]
791
- }
792
- ]
793
- },
794
- {
795
- "cell_type": "code",
796
- "source": [
797
- "b[0]"
798
- ],
799
- "metadata": {
800
- "colab": {
801
- "base_uri": "https://localhost:8080/"
802
  },
803
- "id": "lv4ShZ870zGu",
804
- "outputId": "b132ab13-92f7-4517-a7d8-433794a8eb13"
805
- },
806
- "execution_count": null,
807
- "outputs": [
808
  {
809
- "output_type": "execute_result",
810
  "data": {
811
  "text/plain": [
812
- "TextNode(id_='3171b3fc-4731-431f-9db2-789191bc4ad9', embedding=None, metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, excluded_embed_metadata_keys=[], excluded_llm_metadata_keys=[], relationships={<NodeRelationship.SOURCE: '1'>: RelatedNodeInfo(node_id='69c3e834-02f6-4d4a-b982-ae6fde0826c2', node_type=<ObjectType.DOCUMENT: '4'>, metadata={'title': \"Beyond GPT-4: What's New?\", 'url': 'https://pub.towardsai.net/beyond-gpt-4-whats-new-cbd61a448eb9#dda8', 'source_name': 'towards_ai'}, hash='3b095b0e25cdf965d950cdbd7feb8024030e7645998c1a33dc4427affca624ab'), <NodeRelationship.NEXT: '3'>: RelatedNodeInfo(node_id='ac4f7a77-a66a-449b-a78d-2ea34546ab9b', node_type=<ObjectType.TEXT: '1'>, metadata={}, hash='71418de3d50e604c2581574f1abf2248e5cc3ab7c74a3182c37cb1152d0cfd21')}, hash='cd88ca98c885d949a6e0a9d5c0a30a72c989d72cabd5c5cd73c9797b668c0dc2', text='LLM Variants and Meta\\'s Open Source Before shedding light on four major trends, I\\'d share the latest Meta\\'s Llama 2 and Code Llama. Meta\\'s Llama 2 represents a sophisticated evolution in LLMs. This suite spans models pretrained and fine-tuned across a parameter spectrum of 7 billion to 70 billion. A specialized derivative, Llama 2-Chat, has been engineered explicitly for dialogue-centric applications. Benchmarking revealed Llama 2\\'s superior performance over most extant open-source chat models. Human-centric evaluations, focusing on safety and utility metrics, positioned Llama 2-Chat as a potential contender against proprietary, closed-source counterparts. The development trajectory of Llama 2 emphasized rigorous fine-tuning methodologies. Meta\\'s transparent delineation of these processes aims to catalyze community-driven advancements in LLMs, underscoring a commitment to collaborative and responsible AI development. Code Llama is built on top of Llama 2 and is available in three models: Code Llama, the foundational code model;Codel Llama - Python specialized for Python;and Code Llama - Instruct, which is fine-tuned for understanding natural language instructions. Based on its benchmark testing, Code Llama outperformed state-of-the-art publicly available LLMs (except GPT-4) on code tasks. Llama 2, Llama 2-Chat, and Code Llama are key steps in LLM development but still have a way to go compared to GPT-4. Meta\\'s open access and commitment to improving these models promise transparent and faster LLM progress in the future. Please refer to the LLM and Llama variants below: From LLMs to Multimodal LLMs, like OpenAI\\'s ChatGPT (GPT-3.5), primarily focus on understanding and generating human language. They\\'ve been instrumental in tasks like text generation, translation, and even creative writing. However, their scope is limited to text. Enter multimodal models like GPT-4. These are a new breed of AI models that can understand and generate not just text, but also images, sounds, and potentially other types of data. The term \"multimodal\" refers to their ability to process multiple modes or', start_char_idx=0, end_char_idx=2117, text_template='{metadata_str}\\n\\n{content}', metadata_template='{key}: {value}', metadata_seperator='\\n')"
813
- ]
 
 
 
 
 
814
  },
815
- "metadata": {},
816
- "execution_count": 18
817
  }
818
  ]
819
  },
@@ -837,7 +1175,7 @@
837
  "metadata": {
838
  "id": "PS215gCGkGD-"
839
  },
840
- "execution_count": null,
841
  "outputs": []
842
  },
843
  {
@@ -851,7 +1189,7 @@
851
  "metadata": {
852
  "id": "HbT3-kRO4Qpt"
853
  },
854
- "execution_count": null,
855
  "outputs": []
856
  },
857
  {
@@ -864,7 +1202,7 @@
864
  "metadata": {
865
  "id": "sb61DWU84bHP"
866
  },
867
- "execution_count": null,
868
  "outputs": []
869
  },
870
  {
@@ -875,7 +1213,7 @@
875
  "metadata": {
876
  "id": "G32W2LMMCmnv"
877
  },
878
- "execution_count": null,
879
  "outputs": []
880
  },
881
  {
@@ -889,9 +1227,9 @@
889
  "height": 35
890
  },
891
  "id": "obc20cU5Cxf2",
892
- "outputId": "837babce-9edf-4a3f-f996-c0c407ae027c"
893
  },
894
- "execution_count": null,
895
  "outputs": [
896
  {
897
  "output_type": "execute_result",
@@ -904,7 +1242,7 @@
904
  }
905
  },
906
  "metadata": {},
907
- "execution_count": 19
908
  }
909
  ]
910
  },
@@ -924,36 +1262,247 @@
924
  "base_uri": "https://localhost:8080/"
925
  },
926
  "id": "oIAO-saJCzYe",
927
- "outputId": "bce85c7c-502c-4a7b-f3e2-f721f3d6b5a4"
928
  },
929
- "execution_count": null,
930
  "outputs": [
931
  {
932
  "output_type": "stream",
933
  "name": "stdout",
934
  "text": [
935
- "Node ID\t b0773f70-844b-4153-a946-7903dcf1498d\n",
936
  "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
937
  "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
938
- "Score\t 0.7126396048057712\n",
939
  "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
940
- "Node ID\t 331c0812-27e7-4f08-b00d-a626925b3bcd\n",
941
  "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
942
  "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
943
- "Score\t 0.7051752833671283\n",
944
  "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
945
  ]
946
  }
947
  ]
948
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
949
  {
950
  "cell_type": "code",
951
- "source": [],
 
 
 
952
  "metadata": {
953
  "id": "d4xxZHbdN0lK"
954
  },
955
- "execution_count": null,
 
 
 
 
 
 
 
 
 
 
 
956
  "outputs": []
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
957
  }
958
  ]
959
  }
 
4
  "metadata": {
5
  "colab": {
6
  "provenance": [],
7
+ "authorship_tag": "ABX9TyPHUCVR9OPVGnLj3XoIzKS4",
8
  "include_colab_link": true
9
  },
10
  "kernelspec": {
 
16
  },
17
  "widgets": {
18
  "application/vnd.jupyter.widget-state+json": {
19
+ "9b38fd520d1a4700bbc596b260a9a96f": {
20
  "model_module": "@jupyter-widgets/controls",
21
  "model_name": "HBoxModel",
22
  "model_module_version": "1.5.0",
 
31
  "_view_name": "HBoxView",
32
  "box_style": "",
33
  "children": [
34
+ "IPY_MODEL_5320a84d7a00443e86af8f031d71685d",
35
+ "IPY_MODEL_4f3f1f990d244eb290482be55525daec",
36
+ "IPY_MODEL_9a4eb44d43dc42d9acdb606b6d55ad9f"
37
  ],
38
+ "layout": "IPY_MODEL_51de9732c1e04961b16351d3f410ac1d"
39
  }
40
  },
41
+ "5320a84d7a00443e86af8f031d71685d": {
42
  "model_module": "@jupyter-widgets/controls",
43
  "model_name": "HTMLModel",
44
  "model_module_version": "1.5.0",
 
53
  "_view_name": "HTMLView",
54
  "description": "",
55
  "description_tooltip": null,
56
+ "layout": "IPY_MODEL_b40ee74dabec45ce842bcfb983d3fa75",
57
  "placeholder": "​",
58
+ "style": "IPY_MODEL_0c0ba53346954abc85f0921b682e7279",
59
  "value": "Parsing nodes: 100%"
60
  }
61
  },
62
+ "4f3f1f990d244eb290482be55525daec": {
63
  "model_module": "@jupyter-widgets/controls",
64
  "model_name": "FloatProgressModel",
65
  "model_module_version": "1.5.0",
 
75
  "bar_style": "success",
76
  "description": "",
77
  "description_tooltip": null,
78
+ "layout": "IPY_MODEL_9372c35dcfc04e16a97c0eb63003520e",
79
  "max": 14,
80
  "min": 0,
81
  "orientation": "horizontal",
82
+ "style": "IPY_MODEL_c6f3cd2404ef4a3096a61c1fcdbddd8f",
83
  "value": 14
84
  }
85
  },
86
+ "9a4eb44d43dc42d9acdb606b6d55ad9f": {
87
  "model_module": "@jupyter-widgets/controls",
88
  "model_name": "HTMLModel",
89
  "model_module_version": "1.5.0",
 
98
  "_view_name": "HTMLView",
99
  "description": "",
100
  "description_tooltip": null,
101
+ "layout": "IPY_MODEL_181bd6b10e9e4ec693ece948fd432302",
102
  "placeholder": "​",
103
+ "style": "IPY_MODEL_0c55e54063ea44ab8ea83466d9603a6d",
104
+ "value": " 14/14 [00:01&lt;00:00, 15.95it/s]"
105
  }
106
  },
107
+ "51de9732c1e04961b16351d3f410ac1d": {
108
  "model_module": "@jupyter-widgets/base",
109
  "model_name": "LayoutModel",
110
  "model_module_version": "1.2.0",
 
156
  "width": null
157
  }
158
  },
159
+ "b40ee74dabec45ce842bcfb983d3fa75": {
160
  "model_module": "@jupyter-widgets/base",
161
  "model_name": "LayoutModel",
162
  "model_module_version": "1.2.0",
 
208
  "width": null
209
  }
210
  },
211
+ "0c0ba53346954abc85f0921b682e7279": {
212
  "model_module": "@jupyter-widgets/controls",
213
  "model_name": "DescriptionStyleModel",
214
  "model_module_version": "1.5.0",
 
223
  "description_width": ""
224
  }
225
  },
226
+ "9372c35dcfc04e16a97c0eb63003520e": {
227
  "model_module": "@jupyter-widgets/base",
228
  "model_name": "LayoutModel",
229
  "model_module_version": "1.2.0",
 
275
  "width": null
276
  }
277
  },
278
+ "c6f3cd2404ef4a3096a61c1fcdbddd8f": {
279
  "model_module": "@jupyter-widgets/controls",
280
  "model_name": "ProgressStyleModel",
281
  "model_module_version": "1.5.0",
 
291
  "description_width": ""
292
  }
293
  },
294
+ "181bd6b10e9e4ec693ece948fd432302": {
295
  "model_module": "@jupyter-widgets/base",
296
  "model_name": "LayoutModel",
297
  "model_module_version": "1.2.0",
 
343
  "width": null
344
  }
345
  },
346
+ "0c55e54063ea44ab8ea83466d9603a6d": {
347
+ "model_module": "@jupyter-widgets/controls",
348
+ "model_name": "DescriptionStyleModel",
349
+ "model_module_version": "1.5.0",
350
+ "state": {
351
+ "_model_module": "@jupyter-widgets/controls",
352
+ "_model_module_version": "1.5.0",
353
+ "_model_name": "DescriptionStyleModel",
354
+ "_view_count": null,
355
+ "_view_module": "@jupyter-widgets/base",
356
+ "_view_module_version": "1.2.0",
357
+ "_view_name": "StyleView",
358
+ "description_width": ""
359
+ }
360
+ },
361
+ "739a7d470a024bc2806e2ea998bf1dac": {
362
+ "model_module": "@jupyter-widgets/controls",
363
+ "model_name": "HBoxModel",
364
+ "model_module_version": "1.5.0",
365
+ "state": {
366
+ "_dom_classes": [],
367
+ "_model_module": "@jupyter-widgets/controls",
368
+ "_model_module_version": "1.5.0",
369
+ "_model_name": "HBoxModel",
370
+ "_view_count": null,
371
+ "_view_module": "@jupyter-widgets/controls",
372
+ "_view_module_version": "1.5.0",
373
+ "_view_name": "HBoxView",
374
+ "box_style": "",
375
+ "children": [
376
+ "IPY_MODEL_299757dc40394c3287beea74c40dec27",
377
+ "IPY_MODEL_6c111aa1d43a4af9b04355a65c8fccb2",
378
+ "IPY_MODEL_4926bed77e464729b902c20bd7874a03"
379
+ ],
380
+ "layout": "IPY_MODEL_5c1eaae6cf2840ab96f1a1d6a1f91881"
381
+ }
382
+ },
383
+ "299757dc40394c3287beea74c40dec27": {
384
+ "model_module": "@jupyter-widgets/controls",
385
+ "model_name": "HTMLModel",
386
+ "model_module_version": "1.5.0",
387
+ "state": {
388
+ "_dom_classes": [],
389
+ "_model_module": "@jupyter-widgets/controls",
390
+ "_model_module_version": "1.5.0",
391
+ "_model_name": "HTMLModel",
392
+ "_view_count": null,
393
+ "_view_module": "@jupyter-widgets/controls",
394
+ "_view_module_version": "1.5.0",
395
+ "_view_name": "HTMLView",
396
+ "description": "",
397
+ "description_tooltip": null,
398
+ "layout": "IPY_MODEL_d4b409c70f3f4398ad88ede8f438e32a",
399
+ "placeholder": "​",
400
+ "style": "IPY_MODEL_85fa4db33aa8427ba18d43f9a529529b",
401
+ "value": "Generating embeddings: 100%"
402
+ }
403
+ },
404
+ "6c111aa1d43a4af9b04355a65c8fccb2": {
405
+ "model_module": "@jupyter-widgets/controls",
406
+ "model_name": "FloatProgressModel",
407
+ "model_module_version": "1.5.0",
408
+ "state": {
409
+ "_dom_classes": [],
410
+ "_model_module": "@jupyter-widgets/controls",
411
+ "_model_module_version": "1.5.0",
412
+ "_model_name": "FloatProgressModel",
413
+ "_view_count": null,
414
+ "_view_module": "@jupyter-widgets/controls",
415
+ "_view_module_version": "1.5.0",
416
+ "_view_name": "ProgressView",
417
+ "bar_style": "success",
418
+ "description": "",
419
+ "description_tooltip": null,
420
+ "layout": "IPY_MODEL_a9e8371d627a48e69c7a725646f689d5",
421
+ "max": 108,
422
+ "min": 0,
423
+ "orientation": "horizontal",
424
+ "style": "IPY_MODEL_e8a00080ca684fcc97189f5f3ea325e3",
425
+ "value": 108
426
+ }
427
+ },
428
+ "4926bed77e464729b902c20bd7874a03": {
429
+ "model_module": "@jupyter-widgets/controls",
430
+ "model_name": "HTMLModel",
431
+ "model_module_version": "1.5.0",
432
+ "state": {
433
+ "_dom_classes": [],
434
+ "_model_module": "@jupyter-widgets/controls",
435
+ "_model_module_version": "1.5.0",
436
+ "_model_name": "HTMLModel",
437
+ "_view_count": null,
438
+ "_view_module": "@jupyter-widgets/controls",
439
+ "_view_module_version": "1.5.0",
440
+ "_view_name": "HTMLView",
441
+ "description": "",
442
+ "description_tooltip": null,
443
+ "layout": "IPY_MODEL_d7213ef5bbb7409cbe40437bde51b5c9",
444
+ "placeholder": "​",
445
+ "style": "IPY_MODEL_652d2e07d8be4f1f87c2f258cf288f1a",
446
+ "value": " 108/108 [00:05&lt;00:00, 28.51it/s]"
447
+ }
448
+ },
449
+ "5c1eaae6cf2840ab96f1a1d6a1f91881": {
450
+ "model_module": "@jupyter-widgets/base",
451
+ "model_name": "LayoutModel",
452
+ "model_module_version": "1.2.0",
453
+ "state": {
454
+ "_model_module": "@jupyter-widgets/base",
455
+ "_model_module_version": "1.2.0",
456
+ "_model_name": "LayoutModel",
457
+ "_view_count": null,
458
+ "_view_module": "@jupyter-widgets/base",
459
+ "_view_module_version": "1.2.0",
460
+ "_view_name": "LayoutView",
461
+ "align_content": null,
462
+ "align_items": null,
463
+ "align_self": null,
464
+ "border": null,
465
+ "bottom": null,
466
+ "display": null,
467
+ "flex": null,
468
+ "flex_flow": null,
469
+ "grid_area": null,
470
+ "grid_auto_columns": null,
471
+ "grid_auto_flow": null,
472
+ "grid_auto_rows": null,
473
+ "grid_column": null,
474
+ "grid_gap": null,
475
+ "grid_row": null,
476
+ "grid_template_areas": null,
477
+ "grid_template_columns": null,
478
+ "grid_template_rows": null,
479
+ "height": null,
480
+ "justify_content": null,
481
+ "justify_items": null,
482
+ "left": null,
483
+ "margin": null,
484
+ "max_height": null,
485
+ "max_width": null,
486
+ "min_height": null,
487
+ "min_width": null,
488
+ "object_fit": null,
489
+ "object_position": null,
490
+ "order": null,
491
+ "overflow": null,
492
+ "overflow_x": null,
493
+ "overflow_y": null,
494
+ "padding": null,
495
+ "right": null,
496
+ "top": null,
497
+ "visibility": null,
498
+ "width": null
499
+ }
500
+ },
501
+ "d4b409c70f3f4398ad88ede8f438e32a": {
502
+ "model_module": "@jupyter-widgets/base",
503
+ "model_name": "LayoutModel",
504
+ "model_module_version": "1.2.0",
505
+ "state": {
506
+ "_model_module": "@jupyter-widgets/base",
507
+ "_model_module_version": "1.2.0",
508
+ "_model_name": "LayoutModel",
509
+ "_view_count": null,
510
+ "_view_module": "@jupyter-widgets/base",
511
+ "_view_module_version": "1.2.0",
512
+ "_view_name": "LayoutView",
513
+ "align_content": null,
514
+ "align_items": null,
515
+ "align_self": null,
516
+ "border": null,
517
+ "bottom": null,
518
+ "display": null,
519
+ "flex": null,
520
+ "flex_flow": null,
521
+ "grid_area": null,
522
+ "grid_auto_columns": null,
523
+ "grid_auto_flow": null,
524
+ "grid_auto_rows": null,
525
+ "grid_column": null,
526
+ "grid_gap": null,
527
+ "grid_row": null,
528
+ "grid_template_areas": null,
529
+ "grid_template_columns": null,
530
+ "grid_template_rows": null,
531
+ "height": null,
532
+ "justify_content": null,
533
+ "justify_items": null,
534
+ "left": null,
535
+ "margin": null,
536
+ "max_height": null,
537
+ "max_width": null,
538
+ "min_height": null,
539
+ "min_width": null,
540
+ "object_fit": null,
541
+ "object_position": null,
542
+ "order": null,
543
+ "overflow": null,
544
+ "overflow_x": null,
545
+ "overflow_y": null,
546
+ "padding": null,
547
+ "right": null,
548
+ "top": null,
549
+ "visibility": null,
550
+ "width": null
551
+ }
552
+ },
553
+ "85fa4db33aa8427ba18d43f9a529529b": {
554
+ "model_module": "@jupyter-widgets/controls",
555
+ "model_name": "DescriptionStyleModel",
556
+ "model_module_version": "1.5.0",
557
+ "state": {
558
+ "_model_module": "@jupyter-widgets/controls",
559
+ "_model_module_version": "1.5.0",
560
+ "_model_name": "DescriptionStyleModel",
561
+ "_view_count": null,
562
+ "_view_module": "@jupyter-widgets/base",
563
+ "_view_module_version": "1.2.0",
564
+ "_view_name": "StyleView",
565
+ "description_width": ""
566
+ }
567
+ },
568
+ "a9e8371d627a48e69c7a725646f689d5": {
569
+ "model_module": "@jupyter-widgets/base",
570
+ "model_name": "LayoutModel",
571
+ "model_module_version": "1.2.0",
572
+ "state": {
573
+ "_model_module": "@jupyter-widgets/base",
574
+ "_model_module_version": "1.2.0",
575
+ "_model_name": "LayoutModel",
576
+ "_view_count": null,
577
+ "_view_module": "@jupyter-widgets/base",
578
+ "_view_module_version": "1.2.0",
579
+ "_view_name": "LayoutView",
580
+ "align_content": null,
581
+ "align_items": null,
582
+ "align_self": null,
583
+ "border": null,
584
+ "bottom": null,
585
+ "display": null,
586
+ "flex": null,
587
+ "flex_flow": null,
588
+ "grid_area": null,
589
+ "grid_auto_columns": null,
590
+ "grid_auto_flow": null,
591
+ "grid_auto_rows": null,
592
+ "grid_column": null,
593
+ "grid_gap": null,
594
+ "grid_row": null,
595
+ "grid_template_areas": null,
596
+ "grid_template_columns": null,
597
+ "grid_template_rows": null,
598
+ "height": null,
599
+ "justify_content": null,
600
+ "justify_items": null,
601
+ "left": null,
602
+ "margin": null,
603
+ "max_height": null,
604
+ "max_width": null,
605
+ "min_height": null,
606
+ "min_width": null,
607
+ "object_fit": null,
608
+ "object_position": null,
609
+ "order": null,
610
+ "overflow": null,
611
+ "overflow_x": null,
612
+ "overflow_y": null,
613
+ "padding": null,
614
+ "right": null,
615
+ "top": null,
616
+ "visibility": null,
617
+ "width": null
618
+ }
619
+ },
620
+ "e8a00080ca684fcc97189f5f3ea325e3": {
621
+ "model_module": "@jupyter-widgets/controls",
622
+ "model_name": "ProgressStyleModel",
623
+ "model_module_version": "1.5.0",
624
+ "state": {
625
+ "_model_module": "@jupyter-widgets/controls",
626
+ "_model_module_version": "1.5.0",
627
+ "_model_name": "ProgressStyleModel",
628
+ "_view_count": null,
629
+ "_view_module": "@jupyter-widgets/base",
630
+ "_view_module_version": "1.2.0",
631
+ "_view_name": "StyleView",
632
+ "bar_color": null,
633
+ "description_width": ""
634
+ }
635
+ },
636
+ "d7213ef5bbb7409cbe40437bde51b5c9": {
637
+ "model_module": "@jupyter-widgets/base",
638
+ "model_name": "LayoutModel",
639
+ "model_module_version": "1.2.0",
640
+ "state": {
641
+ "_model_module": "@jupyter-widgets/base",
642
+ "_model_module_version": "1.2.0",
643
+ "_model_name": "LayoutModel",
644
+ "_view_count": null,
645
+ "_view_module": "@jupyter-widgets/base",
646
+ "_view_module_version": "1.2.0",
647
+ "_view_name": "LayoutView",
648
+ "align_content": null,
649
+ "align_items": null,
650
+ "align_self": null,
651
+ "border": null,
652
+ "bottom": null,
653
+ "display": null,
654
+ "flex": null,
655
+ "flex_flow": null,
656
+ "grid_area": null,
657
+ "grid_auto_columns": null,
658
+ "grid_auto_flow": null,
659
+ "grid_auto_rows": null,
660
+ "grid_column": null,
661
+ "grid_gap": null,
662
+ "grid_row": null,
663
+ "grid_template_areas": null,
664
+ "grid_template_columns": null,
665
+ "grid_template_rows": null,
666
+ "height": null,
667
+ "justify_content": null,
668
+ "justify_items": null,
669
+ "left": null,
670
+ "margin": null,
671
+ "max_height": null,
672
+ "max_width": null,
673
+ "min_height": null,
674
+ "min_width": null,
675
+ "object_fit": null,
676
+ "object_position": null,
677
+ "order": null,
678
+ "overflow": null,
679
+ "overflow_x": null,
680
+ "overflow_y": null,
681
+ "padding": null,
682
+ "right": null,
683
+ "top": null,
684
+ "visibility": null,
685
+ "width": null
686
+ }
687
+ },
688
+ "652d2e07d8be4f1f87c2f258cf288f1a": {
689
  "model_module": "@jupyter-widgets/controls",
690
  "model_name": "DescriptionStyleModel",
691
  "model_module_version": "1.5.0",
 
725
  },
726
  {
727
  "cell_type": "code",
728
+ "execution_count": 1,
729
  "metadata": {
730
  "id": "QPJzr-I9XQ7l",
731
  "colab": {
732
  "base_uri": "https://localhost:8080/"
733
  },
734
+ "outputId": "b6cb3d46-9ad9-4658-be9c-a24bcab98c7c"
735
  },
736
  "outputs": [
737
  {
738
  "output_type": "stream",
739
  "name": "stdout",
740
  "text": [
741
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m36.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
742
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m225.4/225.4 kB\u001b[0m \u001b[31m8.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
743
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m44.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
744
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m508.6/508.6 kB\u001b[0m \u001b[31m28.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
745
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.9/79.9 MB\u001b[0m \u001b[31m7.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
746
  "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.7/45.7 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
747
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.7/51.7 kB\u001b[0m \u001b[31m5.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
748
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.9/75.9 kB\u001b[0m \u001b[31m8.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
749
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m21.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
750
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━���━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.1/92.1 kB\u001b[0m \u001b[31m10.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
751
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.8/60.8 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
752
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.1/41.1 kB\u001b[0m \u001b[31m3.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
753
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m19.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
754
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m18.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
755
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.9/57.9 kB\u001b[0m \u001b[31m5.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
756
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m105.6/105.6 kB\u001b[0m \u001b[31m8.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
757
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.3/67.3 kB\u001b[0m \u001b[31m6.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
758
  "\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
759
  " Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
760
  " Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
761
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m698.9/698.9 kB\u001b[0m \u001b[31m11.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
762
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m11.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
763
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.6/67.6 kB\u001b[0m \u001b[31m6.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
764
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m12.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
765
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.5/71.5 kB\u001b[0m \u001b[31m7.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
766
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m77.0/77.0 kB\u001b[0m \u001b[31m8.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
767
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
768
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m4.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
769
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.8/50.8 kB\u001b[0m \u001b[31m4.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
770
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m341.4/341.4 kB\u001b[0m \u001b[31m14.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
771
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m12.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
772
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m10.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
773
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.2/130.2 kB\u001b[0m \u001b[31m8.4 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
774
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.4/49.4 kB\u001b[0m \u001b[31m4.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
775
+ "\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
776
+ "\u001b[?25h Building wheel for pypika (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"
 
 
 
777
  ]
778
  }
779
  ],
 
792
  "metadata": {
793
  "id": "riuXwpSPcvWC"
794
  },
795
+ "execution_count": 2,
796
  "outputs": []
797
  },
798
  {
 
805
  "metadata": {
806
  "id": "km-KQOrgr3VB"
807
  },
808
+ "execution_count": 3,
809
  "outputs": []
810
  },
811
  {
 
827
  "metadata": {
828
  "id": "9oGT6crooSSj"
829
  },
830
+ "execution_count": 4,
831
  "outputs": []
832
  },
833
  {
 
852
  "metadata": {
853
  "id": "SQP87lHczHKc"
854
  },
855
+ "execution_count": 9,
856
  "outputs": []
857
  },
858
  {
 
866
  "metadata": {
867
  "id": "zAaGcYMJzHAN"
868
  },
869
+ "execution_count": 10,
870
  "outputs": []
871
  },
872
  {
 
906
  "base_uri": "https://localhost:8080/"
907
  },
908
  "id": "fQtpDvUzKNzI",
909
+ "outputId": "829f8e63-7767-43a1-b3c9-95ae099012e7"
910
  },
911
+ "execution_count": 5,
912
  "outputs": [
913
  {
914
  "output_type": "stream",
915
  "name": "stdout",
916
  "text": [
917
+ "--2024-02-14 20:06:10-- https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv\n",
918
+ "Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.110.133, 185.199.108.133, 185.199.109.133, ...\n",
919
+ "Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... connected.\n",
920
  "HTTP request sent, awaiting response... 200 OK\n",
921
  "Length: 173646 (170K) [text/plain]\n",
922
  "Saving to: β€˜mini-llama-articles.csv’\n",
923
  "\n",
924
+ "\rmini-llama-articles 0%[ ] 0 --.-KB/s \rmini-llama-articles 100%[===================>] 169.58K --.-KB/s in 0.02s \n",
925
  "\n",
926
+ "2024-02-14 20:06:10 (6.80 MB/s) - β€˜mini-llama-articles.csv’ saved [173646/173646]\n",
927
  "\n"
928
  ]
929
  }
 
961
  "base_uri": "https://localhost:8080/"
962
  },
963
  "id": "_WER5lt0N7c5",
964
+ "outputId": "2e4eae71-fa3a-4faf-a4e2-d3efaeaa591a"
965
  },
966
+ "execution_count": 6,
967
  "outputs": [
968
  {
969
  "output_type": "execute_result",
 
973
  ]
974
  },
975
  "metadata": {},
976
+ "execution_count": 6
977
  }
978
  ]
979
  },
 
997
  "metadata": {
998
  "id": "lFvW_886dxKX"
999
  },
1000
+ "execution_count": 7,
1001
  "outputs": []
1002
  },
1003
  {
 
1010
  "base_uri": "https://localhost:8080/"
1011
  },
1012
  "id": "Njoc3XEVkKkf",
1013
+ "outputId": "bab3878d-252d-4f9a-8a65-d2933e8dc891"
1014
  },
1015
+ "execution_count": 8,
1016
  "outputs": [
1017
  {
1018
  "output_type": "execute_result",
 
1022
  ]
1023
  },
1024
  "metadata": {},
1025
+ "execution_count": 8
1026
  }
1027
  ]
1028
  },
 
1049
  "metadata": {
1050
  "id": "STACTMUR1z9N"
1051
  },
1052
+ "execution_count": 11,
1053
  "outputs": []
1054
  },
1055
  {
 
1075
  "id": "CtdsIUQ81_hT",
1076
  "colab": {
1077
  "base_uri": "https://localhost:8080/",
1078
+ "height": 331,
1079
  "referenced_widgets": [
1080
+ "9b38fd520d1a4700bbc596b260a9a96f",
1081
+ "5320a84d7a00443e86af8f031d71685d",
1082
+ "4f3f1f990d244eb290482be55525daec",
1083
+ "9a4eb44d43dc42d9acdb606b6d55ad9f",
1084
+ "51de9732c1e04961b16351d3f410ac1d",
1085
+ "b40ee74dabec45ce842bcfb983d3fa75",
1086
+ "0c0ba53346954abc85f0921b682e7279",
1087
+ "9372c35dcfc04e16a97c0eb63003520e",
1088
+ "c6f3cd2404ef4a3096a61c1fcdbddd8f",
1089
+ "181bd6b10e9e4ec693ece948fd432302",
1090
+ "0c55e54063ea44ab8ea83466d9603a6d",
1091
+ "739a7d470a024bc2806e2ea998bf1dac",
1092
+ "299757dc40394c3287beea74c40dec27",
1093
+ "6c111aa1d43a4af9b04355a65c8fccb2",
1094
+ "4926bed77e464729b902c20bd7874a03",
1095
+ "5c1eaae6cf2840ab96f1a1d6a1f91881",
1096
+ "d4b409c70f3f4398ad88ede8f438e32a",
1097
+ "85fa4db33aa8427ba18d43f9a529529b",
1098
+ "a9e8371d627a48e69c7a725646f689d5",
1099
+ "e8a00080ca684fcc97189f5f3ea325e3",
1100
+ "d7213ef5bbb7409cbe40437bde51b5c9",
1101
+ "652d2e07d8be4f1f87c2f258cf288f1a"
1102
  ]
1103
  },
1104
+ "outputId": "6a48a887-be9e-4bf3-d54d-3e0575a24e52"
1105
  },
1106
+ "execution_count": 12,
1107
  "outputs": [
1108
  {
1109
  "output_type": "display_data",
 
1114
  "application/vnd.jupyter.widget-view+json": {
1115
  "version_major": 2,
1116
  "version_minor": 0,
1117
+ "model_id": "9b38fd520d1a4700bbc596b260a9a96f"
1118
  }
1119
  },
1120
  "metadata": {}
 
1138
  "431\n",
1139
  "453\n"
1140
  ]
 
 
 
 
 
 
 
 
 
 
 
1141
  },
 
 
 
 
 
1142
  {
1143
+ "output_type": "display_data",
1144
  "data": {
1145
  "text/plain": [
1146
+ "Generating embeddings: 0%| | 0/108 [00:00<?, ?it/s]"
1147
+ ],
1148
+ "application/vnd.jupyter.widget-view+json": {
1149
+ "version_major": 2,
1150
+ "version_minor": 0,
1151
+ "model_id": "739a7d470a024bc2806e2ea998bf1dac"
1152
+ }
1153
  },
1154
+ "metadata": {}
 
1155
  }
1156
  ]
1157
  },
 
1175
  "metadata": {
1176
  "id": "PS215gCGkGD-"
1177
  },
1178
+ "execution_count": 14,
1179
  "outputs": []
1180
  },
1181
  {
 
1189
  "metadata": {
1190
  "id": "HbT3-kRO4Qpt"
1191
  },
1192
+ "execution_count": 15,
1193
  "outputs": []
1194
  },
1195
  {
 
1202
  "metadata": {
1203
  "id": "sb61DWU84bHP"
1204
  },
1205
+ "execution_count": 24,
1206
  "outputs": []
1207
  },
1208
  {
 
1213
  "metadata": {
1214
  "id": "G32W2LMMCmnv"
1215
  },
1216
+ "execution_count": 25,
1217
  "outputs": []
1218
  },
1219
  {
 
1227
  "height": 35
1228
  },
1229
  "id": "obc20cU5Cxf2",
1230
+ "outputId": "6f89e848-da19-40db-90bb-777a5483af04"
1231
  },
1232
+ "execution_count": 26,
1233
  "outputs": [
1234
  {
1235
  "output_type": "execute_result",
 
1242
  }
1243
  },
1244
  "metadata": {},
1245
+ "execution_count": 26
1246
  }
1247
  ]
1248
  },
 
1262
  "base_uri": "https://localhost:8080/"
1263
  },
1264
  "id": "oIAO-saJCzYe",
1265
+ "outputId": "985a5eca-9e1c-45e7-e650-63f90f7df964"
1266
  },
1267
+ "execution_count": 27,
1268
  "outputs": [
1269
  {
1270
  "output_type": "stream",
1271
  "name": "stdout",
1272
  "text": [
1273
+ "Node ID\t c01d6f82-95b5-4e21-aab5-e1312528904b\n",
1274
  "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
1275
  "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
1276
+ "Score\t 0.7122353844435011\n",
1277
  "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
1278
+ "Node ID\t 650124b4-d067-44c6-a45c-0d2454245971\n",
1279
  "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
1280
  "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
1281
+ "Score\t 0.7047038661031441\n",
1282
  "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
1283
  ]
1284
  }
1285
  ]
1286
  },
1287
+ {
1288
+ "cell_type": "markdown",
1289
+ "source": [
1290
+ "# Response Modes\n",
1291
+ "\n"
1292
+ ],
1293
+ "metadata": {
1294
+ "id": "pVJif4uhPNXM"
1295
+ }
1296
+ },
1297
+ {
1298
+ "cell_type": "markdown",
1299
+ "source": [
1300
+ "The behavior of the query engine during response generation can be adjusted. Several modes are available for consideration, including the following:\n",
1301
+ "\n",
1302
+ "- compact (default): Concatenate all the retrieved chunks and use them in the prompt to generate an answer.\n",
1303
+ "- refine: Generate an answer based on the first retrieved chunk, then improve the answer based on the other retrieved chunks one at a time. (will send one request for each chunk to refine the response)\n",
1304
+ "- tree summarize: concatenate the retrieved chunks until they fit the context window and summarize them. The summaized chunks will then recusively fed back to the LLM for summarization until one chunk remains which would be the final answer.\n",
1305
+ "\n",
1306
+ "\n",
1307
+ "Refer to [documentation](https://docs.llamaindex.ai/en/stable/module_guides/querying/response_synthesizers/root.html#configuring-the-response-mode) for a comprehensive list.\n",
1308
+ "\n",
1309
+ "Due to the limited size of the sample dataset, the examples provided will yield identical responses. It's crucial to evaluate these methods in the context of your specific use case and cost considerations."
1310
+ ],
1311
+ "metadata": {
1312
+ "id": "ykZOaQYvPWMj"
1313
+ }
1314
+ },
1315
  {
1316
  "cell_type": "code",
1317
+ "source": [
1318
+ "query_engine = index.as_query_engine(response_mode=\"refine\")\n",
1319
+ "# query_engine = index.as_query_engine(response_mode=\"tree_summarize\")"
1320
+ ],
1321
  "metadata": {
1322
  "id": "d4xxZHbdN0lK"
1323
  },
1324
+ "execution_count": 38,
1325
+ "outputs": []
1326
+ },
1327
+ {
1328
+ "cell_type": "code",
1329
+ "source": [
1330
+ "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
1331
+ ],
1332
+ "metadata": {
1333
+ "id": "uNKJfIn-SDLm"
1334
+ },
1335
+ "execution_count": 39,
1336
  "outputs": []
1337
+ },
1338
+ {
1339
+ "cell_type": "code",
1340
+ "source": [
1341
+ "res.response"
1342
+ ],
1343
+ "metadata": {
1344
+ "colab": {
1345
+ "base_uri": "https://localhost:8080/",
1346
+ "height": 35
1347
+ },
1348
+ "id": "Z1XmLBEoSFzB",
1349
+ "outputId": "53ee59b9-a2ad-4700-e8c9-7f450d650242"
1350
+ },
1351
+ "execution_count": 40,
1352
+ "outputs": [
1353
+ {
1354
+ "output_type": "execute_result",
1355
+ "data": {
1356
+ "text/plain": [
1357
+ "'The Llama 2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
1358
+ ],
1359
+ "application/vnd.google.colaboratory.intrinsic+json": {
1360
+ "type": "string"
1361
+ }
1362
+ },
1363
+ "metadata": {},
1364
+ "execution_count": 40
1365
+ }
1366
+ ]
1367
+ },
1368
+ {
1369
+ "cell_type": "code",
1370
+ "source": [
1371
+ "# Show the retrieved nodes\n",
1372
+ "for src in res.source_nodes:\n",
1373
+ " print(\"Node ID\\t\", src.node_id)\n",
1374
+ " print(\"Title\\t\", src.metadata['title'])\n",
1375
+ " print(\"Text\\t\", src.text)\n",
1376
+ " print(\"Score\\t\", src.score)\n",
1377
+ " print(\"-_\"*20)"
1378
+ ],
1379
+ "metadata": {
1380
+ "colab": {
1381
+ "base_uri": "https://localhost:8080/"
1382
+ },
1383
+ "id": "pZUgM-mSST4X",
1384
+ "outputId": "6803179b-95f5-46d1-ad98-d799ea1b6289"
1385
+ },
1386
+ "execution_count": 41,
1387
+ "outputs": [
1388
+ {
1389
+ "output_type": "stream",
1390
+ "name": "stdout",
1391
+ "text": [
1392
+ "Node ID\t c01d6f82-95b5-4e21-aab5-e1312528904b\n",
1393
+ "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
1394
+ "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
1395
+ "Score\t 0.7122353844435011\n",
1396
+ "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
1397
+ "Node ID\t 650124b4-d067-44c6-a45c-0d2454245971\n",
1398
+ "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
1399
+ "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
1400
+ "Score\t 0.7047038661031441\n",
1401
+ "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
1402
+ ]
1403
+ }
1404
+ ]
1405
+ },
1406
+ {
1407
+ "cell_type": "markdown",
1408
+ "source": [
1409
+ "The `no_text` mode will retrieve the documents, but will not send the request to the API to synthesize the final response. It is a great approach to debug the retrieved documents."
1410
+ ],
1411
+ "metadata": {
1412
+ "id": "697hg9YWTAoq"
1413
+ }
1414
+ },
1415
+ {
1416
+ "cell_type": "code",
1417
+ "source": [
1418
+ "query_engine = index.as_query_engine(response_mode=\"no_text\")\n",
1419
+ "res = query_engine.query(\"How many parameters LLaMA2 model has?\")"
1420
+ ],
1421
+ "metadata": {
1422
+ "colab": {
1423
+ "base_uri": "https://localhost:8080/"
1424
+ },
1425
+ "id": "H2x55KW0S1Jg",
1426
+ "outputId": "39e8924c-c445-4658-d39f-7a300e8d516f"
1427
+ },
1428
+ "execution_count": 44,
1429
+ "outputs": [
1430
+ {
1431
+ "output_type": "stream",
1432
+ "name": "stdout",
1433
+ "text": [
1434
+ "\n"
1435
+ ]
1436
+ }
1437
+ ]
1438
+ },
1439
+ {
1440
+ "cell_type": "code",
1441
+ "source": [
1442
+ "res.response"
1443
+ ],
1444
+ "metadata": {
1445
+ "colab": {
1446
+ "base_uri": "https://localhost:8080/",
1447
+ "height": 35
1448
+ },
1449
+ "id": "gvvtYQcBS-Ug",
1450
+ "outputId": "85dd7301-6d12-4758-86b0-652396d6fe39"
1451
+ },
1452
+ "execution_count": 47,
1453
+ "outputs": [
1454
+ {
1455
+ "output_type": "execute_result",
1456
+ "data": {
1457
+ "text/plain": [
1458
+ "''"
1459
+ ],
1460
+ "application/vnd.google.colaboratory.intrinsic+json": {
1461
+ "type": "string"
1462
+ }
1463
+ },
1464
+ "metadata": {},
1465
+ "execution_count": 47
1466
+ }
1467
+ ]
1468
+ },
1469
+ {
1470
+ "cell_type": "code",
1471
+ "source": [
1472
+ "# Show the retrieved nodes\n",
1473
+ "for src in res.source_nodes:\n",
1474
+ " print(\"Node ID\\t\", src.node_id)\n",
1475
+ " print(\"Title\\t\", src.metadata['title'])\n",
1476
+ " print(\"Text\\t\", src.text)\n",
1477
+ " print(\"Score\\t\", src.score)\n",
1478
+ " print(\"-_\"*20)"
1479
+ ],
1480
+ "metadata": {
1481
+ "colab": {
1482
+ "base_uri": "https://localhost:8080/"
1483
+ },
1484
+ "id": "o9ijBEkXS5LC",
1485
+ "outputId": "616c8315-15c5-47cd-a9ed-2830b2f88d5d"
1486
+ },
1487
+ "execution_count": 45,
1488
+ "outputs": [
1489
+ {
1490
+ "output_type": "stream",
1491
+ "name": "stdout",
1492
+ "text": [
1493
+ "Node ID\t c01d6f82-95b5-4e21-aab5-e1312528904b\n",
1494
+ "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
1495
+ "Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
1496
+ "Score\t 0.7122353844435011\n",
1497
+ "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
1498
+ "Node ID\t 650124b4-d067-44c6-a45c-0d2454245971\n",
1499
+ "Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
1500
+ "Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
1501
+ "Score\t 0.7047038661031441\n",
1502
+ "-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
1503
+ ]
1504
+ }
1505
+ ]
1506
  }
1507
  ]
1508
  }