Created using Colaboratory
Browse files
notebooks/11_Adding_Hybrid_Search.ipynb
ADDED
@@ -0,0 +1,1666 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"nbformat": 4,
|
3 |
+
"nbformat_minor": 0,
|
4 |
+
"metadata": {
|
5 |
+
"colab": {
|
6 |
+
"provenance": [],
|
7 |
+
"authorship_tag": "ABX9TyPWas2InUknw9W33/VhCZaK",
|
8 |
+
"include_colab_link": true
|
9 |
+
},
|
10 |
+
"kernelspec": {
|
11 |
+
"name": "python3",
|
12 |
+
"display_name": "Python 3"
|
13 |
+
},
|
14 |
+
"language_info": {
|
15 |
+
"name": "python"
|
16 |
+
},
|
17 |
+
"widgets": {
|
18 |
+
"application/vnd.jupyter.widget-state+json": {
|
19 |
+
"3fbabd8a8660461ba5e7bc08ef39139a": {
|
20 |
+
"model_module": "@jupyter-widgets/controls",
|
21 |
+
"model_name": "HBoxModel",
|
22 |
+
"model_module_version": "1.5.0",
|
23 |
+
"state": {
|
24 |
+
"_dom_classes": [],
|
25 |
+
"_model_module": "@jupyter-widgets/controls",
|
26 |
+
"_model_module_version": "1.5.0",
|
27 |
+
"_model_name": "HBoxModel",
|
28 |
+
"_view_count": null,
|
29 |
+
"_view_module": "@jupyter-widgets/controls",
|
30 |
+
"_view_module_version": "1.5.0",
|
31 |
+
"_view_name": "HBoxView",
|
32 |
+
"box_style": "",
|
33 |
+
"children": [
|
34 |
+
"IPY_MODEL_df2365556ae242a2ab1a119f9a31a561",
|
35 |
+
"IPY_MODEL_5f4b9d32df8f446e858e4c289dc282f9",
|
36 |
+
"IPY_MODEL_5b588f83a15d42d9aca888e06bbd95ff"
|
37 |
+
],
|
38 |
+
"layout": "IPY_MODEL_ad073bca655540809e39f26538d2ec0d"
|
39 |
+
}
|
40 |
+
},
|
41 |
+
"df2365556ae242a2ab1a119f9a31a561": {
|
42 |
+
"model_module": "@jupyter-widgets/controls",
|
43 |
+
"model_name": "HTMLModel",
|
44 |
+
"model_module_version": "1.5.0",
|
45 |
+
"state": {
|
46 |
+
"_dom_classes": [],
|
47 |
+
"_model_module": "@jupyter-widgets/controls",
|
48 |
+
"_model_module_version": "1.5.0",
|
49 |
+
"_model_name": "HTMLModel",
|
50 |
+
"_view_count": null,
|
51 |
+
"_view_module": "@jupyter-widgets/controls",
|
52 |
+
"_view_module_version": "1.5.0",
|
53 |
+
"_view_name": "HTMLView",
|
54 |
+
"description": "",
|
55 |
+
"description_tooltip": null,
|
56 |
+
"layout": "IPY_MODEL_13b9c5395bca4c3ba21265240cb936cf",
|
57 |
+
"placeholder": "",
|
58 |
+
"style": "IPY_MODEL_47a4586384274577a726c57605e7f8d9",
|
59 |
+
"value": "Parsing nodes: 100%"
|
60 |
+
}
|
61 |
+
},
|
62 |
+
"5f4b9d32df8f446e858e4c289dc282f9": {
|
63 |
+
"model_module": "@jupyter-widgets/controls",
|
64 |
+
"model_name": "FloatProgressModel",
|
65 |
+
"model_module_version": "1.5.0",
|
66 |
+
"state": {
|
67 |
+
"_dom_classes": [],
|
68 |
+
"_model_module": "@jupyter-widgets/controls",
|
69 |
+
"_model_module_version": "1.5.0",
|
70 |
+
"_model_name": "FloatProgressModel",
|
71 |
+
"_view_count": null,
|
72 |
+
"_view_module": "@jupyter-widgets/controls",
|
73 |
+
"_view_module_version": "1.5.0",
|
74 |
+
"_view_name": "ProgressView",
|
75 |
+
"bar_style": "success",
|
76 |
+
"description": "",
|
77 |
+
"description_tooltip": null,
|
78 |
+
"layout": "IPY_MODEL_96a3bdece738481db57e811ccb74a974",
|
79 |
+
"max": 14,
|
80 |
+
"min": 0,
|
81 |
+
"orientation": "horizontal",
|
82 |
+
"style": "IPY_MODEL_5c7973afd79349ed997a69120d0629b2",
|
83 |
+
"value": 14
|
84 |
+
}
|
85 |
+
},
|
86 |
+
"5b588f83a15d42d9aca888e06bbd95ff": {
|
87 |
+
"model_module": "@jupyter-widgets/controls",
|
88 |
+
"model_name": "HTMLModel",
|
89 |
+
"model_module_version": "1.5.0",
|
90 |
+
"state": {
|
91 |
+
"_dom_classes": [],
|
92 |
+
"_model_module": "@jupyter-widgets/controls",
|
93 |
+
"_model_module_version": "1.5.0",
|
94 |
+
"_model_name": "HTMLModel",
|
95 |
+
"_view_count": null,
|
96 |
+
"_view_module": "@jupyter-widgets/controls",
|
97 |
+
"_view_module_version": "1.5.0",
|
98 |
+
"_view_name": "HTMLView",
|
99 |
+
"description": "",
|
100 |
+
"description_tooltip": null,
|
101 |
+
"layout": "IPY_MODEL_af9b6ae927dd4764b9692507791bc67e",
|
102 |
+
"placeholder": "",
|
103 |
+
"style": "IPY_MODEL_134210510d49476e959dd7d032bbdbdc",
|
104 |
+
"value": " 14/14 [00:00<00:00, 21.41it/s]"
|
105 |
+
}
|
106 |
+
},
|
107 |
+
"ad073bca655540809e39f26538d2ec0d": {
|
108 |
+
"model_module": "@jupyter-widgets/base",
|
109 |
+
"model_name": "LayoutModel",
|
110 |
+
"model_module_version": "1.2.0",
|
111 |
+
"state": {
|
112 |
+
"_model_module": "@jupyter-widgets/base",
|
113 |
+
"_model_module_version": "1.2.0",
|
114 |
+
"_model_name": "LayoutModel",
|
115 |
+
"_view_count": null,
|
116 |
+
"_view_module": "@jupyter-widgets/base",
|
117 |
+
"_view_module_version": "1.2.0",
|
118 |
+
"_view_name": "LayoutView",
|
119 |
+
"align_content": null,
|
120 |
+
"align_items": null,
|
121 |
+
"align_self": null,
|
122 |
+
"border": null,
|
123 |
+
"bottom": null,
|
124 |
+
"display": null,
|
125 |
+
"flex": null,
|
126 |
+
"flex_flow": null,
|
127 |
+
"grid_area": null,
|
128 |
+
"grid_auto_columns": null,
|
129 |
+
"grid_auto_flow": null,
|
130 |
+
"grid_auto_rows": null,
|
131 |
+
"grid_column": null,
|
132 |
+
"grid_gap": null,
|
133 |
+
"grid_row": null,
|
134 |
+
"grid_template_areas": null,
|
135 |
+
"grid_template_columns": null,
|
136 |
+
"grid_template_rows": null,
|
137 |
+
"height": null,
|
138 |
+
"justify_content": null,
|
139 |
+
"justify_items": null,
|
140 |
+
"left": null,
|
141 |
+
"margin": null,
|
142 |
+
"max_height": null,
|
143 |
+
"max_width": null,
|
144 |
+
"min_height": null,
|
145 |
+
"min_width": null,
|
146 |
+
"object_fit": null,
|
147 |
+
"object_position": null,
|
148 |
+
"order": null,
|
149 |
+
"overflow": null,
|
150 |
+
"overflow_x": null,
|
151 |
+
"overflow_y": null,
|
152 |
+
"padding": null,
|
153 |
+
"right": null,
|
154 |
+
"top": null,
|
155 |
+
"visibility": null,
|
156 |
+
"width": null
|
157 |
+
}
|
158 |
+
},
|
159 |
+
"13b9c5395bca4c3ba21265240cb936cf": {
|
160 |
+
"model_module": "@jupyter-widgets/base",
|
161 |
+
"model_name": "LayoutModel",
|
162 |
+
"model_module_version": "1.2.0",
|
163 |
+
"state": {
|
164 |
+
"_model_module": "@jupyter-widgets/base",
|
165 |
+
"_model_module_version": "1.2.0",
|
166 |
+
"_model_name": "LayoutModel",
|
167 |
+
"_view_count": null,
|
168 |
+
"_view_module": "@jupyter-widgets/base",
|
169 |
+
"_view_module_version": "1.2.0",
|
170 |
+
"_view_name": "LayoutView",
|
171 |
+
"align_content": null,
|
172 |
+
"align_items": null,
|
173 |
+
"align_self": null,
|
174 |
+
"border": null,
|
175 |
+
"bottom": null,
|
176 |
+
"display": null,
|
177 |
+
"flex": null,
|
178 |
+
"flex_flow": null,
|
179 |
+
"grid_area": null,
|
180 |
+
"grid_auto_columns": null,
|
181 |
+
"grid_auto_flow": null,
|
182 |
+
"grid_auto_rows": null,
|
183 |
+
"grid_column": null,
|
184 |
+
"grid_gap": null,
|
185 |
+
"grid_row": null,
|
186 |
+
"grid_template_areas": null,
|
187 |
+
"grid_template_columns": null,
|
188 |
+
"grid_template_rows": null,
|
189 |
+
"height": null,
|
190 |
+
"justify_content": null,
|
191 |
+
"justify_items": null,
|
192 |
+
"left": null,
|
193 |
+
"margin": null,
|
194 |
+
"max_height": null,
|
195 |
+
"max_width": null,
|
196 |
+
"min_height": null,
|
197 |
+
"min_width": null,
|
198 |
+
"object_fit": null,
|
199 |
+
"object_position": null,
|
200 |
+
"order": null,
|
201 |
+
"overflow": null,
|
202 |
+
"overflow_x": null,
|
203 |
+
"overflow_y": null,
|
204 |
+
"padding": null,
|
205 |
+
"right": null,
|
206 |
+
"top": null,
|
207 |
+
"visibility": null,
|
208 |
+
"width": null
|
209 |
+
}
|
210 |
+
},
|
211 |
+
"47a4586384274577a726c57605e7f8d9": {
|
212 |
+
"model_module": "@jupyter-widgets/controls",
|
213 |
+
"model_name": "DescriptionStyleModel",
|
214 |
+
"model_module_version": "1.5.0",
|
215 |
+
"state": {
|
216 |
+
"_model_module": "@jupyter-widgets/controls",
|
217 |
+
"_model_module_version": "1.5.0",
|
218 |
+
"_model_name": "DescriptionStyleModel",
|
219 |
+
"_view_count": null,
|
220 |
+
"_view_module": "@jupyter-widgets/base",
|
221 |
+
"_view_module_version": "1.2.0",
|
222 |
+
"_view_name": "StyleView",
|
223 |
+
"description_width": ""
|
224 |
+
}
|
225 |
+
},
|
226 |
+
"96a3bdece738481db57e811ccb74a974": {
|
227 |
+
"model_module": "@jupyter-widgets/base",
|
228 |
+
"model_name": "LayoutModel",
|
229 |
+
"model_module_version": "1.2.0",
|
230 |
+
"state": {
|
231 |
+
"_model_module": "@jupyter-widgets/base",
|
232 |
+
"_model_module_version": "1.2.0",
|
233 |
+
"_model_name": "LayoutModel",
|
234 |
+
"_view_count": null,
|
235 |
+
"_view_module": "@jupyter-widgets/base",
|
236 |
+
"_view_module_version": "1.2.0",
|
237 |
+
"_view_name": "LayoutView",
|
238 |
+
"align_content": null,
|
239 |
+
"align_items": null,
|
240 |
+
"align_self": null,
|
241 |
+
"border": null,
|
242 |
+
"bottom": null,
|
243 |
+
"display": null,
|
244 |
+
"flex": null,
|
245 |
+
"flex_flow": null,
|
246 |
+
"grid_area": null,
|
247 |
+
"grid_auto_columns": null,
|
248 |
+
"grid_auto_flow": null,
|
249 |
+
"grid_auto_rows": null,
|
250 |
+
"grid_column": null,
|
251 |
+
"grid_gap": null,
|
252 |
+
"grid_row": null,
|
253 |
+
"grid_template_areas": null,
|
254 |
+
"grid_template_columns": null,
|
255 |
+
"grid_template_rows": null,
|
256 |
+
"height": null,
|
257 |
+
"justify_content": null,
|
258 |
+
"justify_items": null,
|
259 |
+
"left": null,
|
260 |
+
"margin": null,
|
261 |
+
"max_height": null,
|
262 |
+
"max_width": null,
|
263 |
+
"min_height": null,
|
264 |
+
"min_width": null,
|
265 |
+
"object_fit": null,
|
266 |
+
"object_position": null,
|
267 |
+
"order": null,
|
268 |
+
"overflow": null,
|
269 |
+
"overflow_x": null,
|
270 |
+
"overflow_y": null,
|
271 |
+
"padding": null,
|
272 |
+
"right": null,
|
273 |
+
"top": null,
|
274 |
+
"visibility": null,
|
275 |
+
"width": null
|
276 |
+
}
|
277 |
+
},
|
278 |
+
"5c7973afd79349ed997a69120d0629b2": {
|
279 |
+
"model_module": "@jupyter-widgets/controls",
|
280 |
+
"model_name": "ProgressStyleModel",
|
281 |
+
"model_module_version": "1.5.0",
|
282 |
+
"state": {
|
283 |
+
"_model_module": "@jupyter-widgets/controls",
|
284 |
+
"_model_module_version": "1.5.0",
|
285 |
+
"_model_name": "ProgressStyleModel",
|
286 |
+
"_view_count": null,
|
287 |
+
"_view_module": "@jupyter-widgets/base",
|
288 |
+
"_view_module_version": "1.2.0",
|
289 |
+
"_view_name": "StyleView",
|
290 |
+
"bar_color": null,
|
291 |
+
"description_width": ""
|
292 |
+
}
|
293 |
+
},
|
294 |
+
"af9b6ae927dd4764b9692507791bc67e": {
|
295 |
+
"model_module": "@jupyter-widgets/base",
|
296 |
+
"model_name": "LayoutModel",
|
297 |
+
"model_module_version": "1.2.0",
|
298 |
+
"state": {
|
299 |
+
"_model_module": "@jupyter-widgets/base",
|
300 |
+
"_model_module_version": "1.2.0",
|
301 |
+
"_model_name": "LayoutModel",
|
302 |
+
"_view_count": null,
|
303 |
+
"_view_module": "@jupyter-widgets/base",
|
304 |
+
"_view_module_version": "1.2.0",
|
305 |
+
"_view_name": "LayoutView",
|
306 |
+
"align_content": null,
|
307 |
+
"align_items": null,
|
308 |
+
"align_self": null,
|
309 |
+
"border": null,
|
310 |
+
"bottom": null,
|
311 |
+
"display": null,
|
312 |
+
"flex": null,
|
313 |
+
"flex_flow": null,
|
314 |
+
"grid_area": null,
|
315 |
+
"grid_auto_columns": null,
|
316 |
+
"grid_auto_flow": null,
|
317 |
+
"grid_auto_rows": null,
|
318 |
+
"grid_column": null,
|
319 |
+
"grid_gap": null,
|
320 |
+
"grid_row": null,
|
321 |
+
"grid_template_areas": null,
|
322 |
+
"grid_template_columns": null,
|
323 |
+
"grid_template_rows": null,
|
324 |
+
"height": null,
|
325 |
+
"justify_content": null,
|
326 |
+
"justify_items": null,
|
327 |
+
"left": null,
|
328 |
+
"margin": null,
|
329 |
+
"max_height": null,
|
330 |
+
"max_width": null,
|
331 |
+
"min_height": null,
|
332 |
+
"min_width": null,
|
333 |
+
"object_fit": null,
|
334 |
+
"object_position": null,
|
335 |
+
"order": null,
|
336 |
+
"overflow": null,
|
337 |
+
"overflow_x": null,
|
338 |
+
"overflow_y": null,
|
339 |
+
"padding": null,
|
340 |
+
"right": null,
|
341 |
+
"top": null,
|
342 |
+
"visibility": null,
|
343 |
+
"width": null
|
344 |
+
}
|
345 |
+
},
|
346 |
+
"134210510d49476e959dd7d032bbdbdc": {
|
347 |
+
"model_module": "@jupyter-widgets/controls",
|
348 |
+
"model_name": "DescriptionStyleModel",
|
349 |
+
"model_module_version": "1.5.0",
|
350 |
+
"state": {
|
351 |
+
"_model_module": "@jupyter-widgets/controls",
|
352 |
+
"_model_module_version": "1.5.0",
|
353 |
+
"_model_name": "DescriptionStyleModel",
|
354 |
+
"_view_count": null,
|
355 |
+
"_view_module": "@jupyter-widgets/base",
|
356 |
+
"_view_module_version": "1.2.0",
|
357 |
+
"_view_name": "StyleView",
|
358 |
+
"description_width": ""
|
359 |
+
}
|
360 |
+
},
|
361 |
+
"5f9bb065c2b74d2e8ded32e1306a7807": {
|
362 |
+
"model_module": "@jupyter-widgets/controls",
|
363 |
+
"model_name": "HBoxModel",
|
364 |
+
"model_module_version": "1.5.0",
|
365 |
+
"state": {
|
366 |
+
"_dom_classes": [],
|
367 |
+
"_model_module": "@jupyter-widgets/controls",
|
368 |
+
"_model_module_version": "1.5.0",
|
369 |
+
"_model_name": "HBoxModel",
|
370 |
+
"_view_count": null,
|
371 |
+
"_view_module": "@jupyter-widgets/controls",
|
372 |
+
"_view_module_version": "1.5.0",
|
373 |
+
"_view_name": "HBoxView",
|
374 |
+
"box_style": "",
|
375 |
+
"children": [
|
376 |
+
"IPY_MODEL_73a06bc546a64f7f99a9e4a135319dcd",
|
377 |
+
"IPY_MODEL_ce48deaf4d8c49cdae92bfdbb3a78df0",
|
378 |
+
"IPY_MODEL_4a172e8c6aa44e41a42fc1d9cf714fd0"
|
379 |
+
],
|
380 |
+
"layout": "IPY_MODEL_0245f2604e4d49c8bd0210302746c47b"
|
381 |
+
}
|
382 |
+
},
|
383 |
+
"73a06bc546a64f7f99a9e4a135319dcd": {
|
384 |
+
"model_module": "@jupyter-widgets/controls",
|
385 |
+
"model_name": "HTMLModel",
|
386 |
+
"model_module_version": "1.5.0",
|
387 |
+
"state": {
|
388 |
+
"_dom_classes": [],
|
389 |
+
"_model_module": "@jupyter-widgets/controls",
|
390 |
+
"_model_module_version": "1.5.0",
|
391 |
+
"_model_name": "HTMLModel",
|
392 |
+
"_view_count": null,
|
393 |
+
"_view_module": "@jupyter-widgets/controls",
|
394 |
+
"_view_module_version": "1.5.0",
|
395 |
+
"_view_name": "HTMLView",
|
396 |
+
"description": "",
|
397 |
+
"description_tooltip": null,
|
398 |
+
"layout": "IPY_MODEL_e956dfab55084a9cbe33c8e331b511e7",
|
399 |
+
"placeholder": "",
|
400 |
+
"style": "IPY_MODEL_cb394578badd43a89850873ad2526542",
|
401 |
+
"value": "Generating embeddings: 100%"
|
402 |
+
}
|
403 |
+
},
|
404 |
+
"ce48deaf4d8c49cdae92bfdbb3a78df0": {
|
405 |
+
"model_module": "@jupyter-widgets/controls",
|
406 |
+
"model_name": "FloatProgressModel",
|
407 |
+
"model_module_version": "1.5.0",
|
408 |
+
"state": {
|
409 |
+
"_dom_classes": [],
|
410 |
+
"_model_module": "@jupyter-widgets/controls",
|
411 |
+
"_model_module_version": "1.5.0",
|
412 |
+
"_model_name": "FloatProgressModel",
|
413 |
+
"_view_count": null,
|
414 |
+
"_view_module": "@jupyter-widgets/controls",
|
415 |
+
"_view_module_version": "1.5.0",
|
416 |
+
"_view_name": "ProgressView",
|
417 |
+
"bar_style": "success",
|
418 |
+
"description": "",
|
419 |
+
"description_tooltip": null,
|
420 |
+
"layout": "IPY_MODEL_193aef33d9184055bb9223f56d456de6",
|
421 |
+
"max": 108,
|
422 |
+
"min": 0,
|
423 |
+
"orientation": "horizontal",
|
424 |
+
"style": "IPY_MODEL_abfc9aa911ce4a5ea81c7c451f08295f",
|
425 |
+
"value": 108
|
426 |
+
}
|
427 |
+
},
|
428 |
+
"4a172e8c6aa44e41a42fc1d9cf714fd0": {
|
429 |
+
"model_module": "@jupyter-widgets/controls",
|
430 |
+
"model_name": "HTMLModel",
|
431 |
+
"model_module_version": "1.5.0",
|
432 |
+
"state": {
|
433 |
+
"_dom_classes": [],
|
434 |
+
"_model_module": "@jupyter-widgets/controls",
|
435 |
+
"_model_module_version": "1.5.0",
|
436 |
+
"_model_name": "HTMLModel",
|
437 |
+
"_view_count": null,
|
438 |
+
"_view_module": "@jupyter-widgets/controls",
|
439 |
+
"_view_module_version": "1.5.0",
|
440 |
+
"_view_name": "HTMLView",
|
441 |
+
"description": "",
|
442 |
+
"description_tooltip": null,
|
443 |
+
"layout": "IPY_MODEL_e7937a1bc68441a080374911a6563376",
|
444 |
+
"placeholder": "",
|
445 |
+
"style": "IPY_MODEL_e532ed7bfef34f67b5fcacd9534eb789",
|
446 |
+
"value": " 108/108 [00:03<00:00, 33.70it/s]"
|
447 |
+
}
|
448 |
+
},
|
449 |
+
"0245f2604e4d49c8bd0210302746c47b": {
|
450 |
+
"model_module": "@jupyter-widgets/base",
|
451 |
+
"model_name": "LayoutModel",
|
452 |
+
"model_module_version": "1.2.0",
|
453 |
+
"state": {
|
454 |
+
"_model_module": "@jupyter-widgets/base",
|
455 |
+
"_model_module_version": "1.2.0",
|
456 |
+
"_model_name": "LayoutModel",
|
457 |
+
"_view_count": null,
|
458 |
+
"_view_module": "@jupyter-widgets/base",
|
459 |
+
"_view_module_version": "1.2.0",
|
460 |
+
"_view_name": "LayoutView",
|
461 |
+
"align_content": null,
|
462 |
+
"align_items": null,
|
463 |
+
"align_self": null,
|
464 |
+
"border": null,
|
465 |
+
"bottom": null,
|
466 |
+
"display": null,
|
467 |
+
"flex": null,
|
468 |
+
"flex_flow": null,
|
469 |
+
"grid_area": null,
|
470 |
+
"grid_auto_columns": null,
|
471 |
+
"grid_auto_flow": null,
|
472 |
+
"grid_auto_rows": null,
|
473 |
+
"grid_column": null,
|
474 |
+
"grid_gap": null,
|
475 |
+
"grid_row": null,
|
476 |
+
"grid_template_areas": null,
|
477 |
+
"grid_template_columns": null,
|
478 |
+
"grid_template_rows": null,
|
479 |
+
"height": null,
|
480 |
+
"justify_content": null,
|
481 |
+
"justify_items": null,
|
482 |
+
"left": null,
|
483 |
+
"margin": null,
|
484 |
+
"max_height": null,
|
485 |
+
"max_width": null,
|
486 |
+
"min_height": null,
|
487 |
+
"min_width": null,
|
488 |
+
"object_fit": null,
|
489 |
+
"object_position": null,
|
490 |
+
"order": null,
|
491 |
+
"overflow": null,
|
492 |
+
"overflow_x": null,
|
493 |
+
"overflow_y": null,
|
494 |
+
"padding": null,
|
495 |
+
"right": null,
|
496 |
+
"top": null,
|
497 |
+
"visibility": null,
|
498 |
+
"width": null
|
499 |
+
}
|
500 |
+
},
|
501 |
+
"e956dfab55084a9cbe33c8e331b511e7": {
|
502 |
+
"model_module": "@jupyter-widgets/base",
|
503 |
+
"model_name": "LayoutModel",
|
504 |
+
"model_module_version": "1.2.0",
|
505 |
+
"state": {
|
506 |
+
"_model_module": "@jupyter-widgets/base",
|
507 |
+
"_model_module_version": "1.2.0",
|
508 |
+
"_model_name": "LayoutModel",
|
509 |
+
"_view_count": null,
|
510 |
+
"_view_module": "@jupyter-widgets/base",
|
511 |
+
"_view_module_version": "1.2.0",
|
512 |
+
"_view_name": "LayoutView",
|
513 |
+
"align_content": null,
|
514 |
+
"align_items": null,
|
515 |
+
"align_self": null,
|
516 |
+
"border": null,
|
517 |
+
"bottom": null,
|
518 |
+
"display": null,
|
519 |
+
"flex": null,
|
520 |
+
"flex_flow": null,
|
521 |
+
"grid_area": null,
|
522 |
+
"grid_auto_columns": null,
|
523 |
+
"grid_auto_flow": null,
|
524 |
+
"grid_auto_rows": null,
|
525 |
+
"grid_column": null,
|
526 |
+
"grid_gap": null,
|
527 |
+
"grid_row": null,
|
528 |
+
"grid_template_areas": null,
|
529 |
+
"grid_template_columns": null,
|
530 |
+
"grid_template_rows": null,
|
531 |
+
"height": null,
|
532 |
+
"justify_content": null,
|
533 |
+
"justify_items": null,
|
534 |
+
"left": null,
|
535 |
+
"margin": null,
|
536 |
+
"max_height": null,
|
537 |
+
"max_width": null,
|
538 |
+
"min_height": null,
|
539 |
+
"min_width": null,
|
540 |
+
"object_fit": null,
|
541 |
+
"object_position": null,
|
542 |
+
"order": null,
|
543 |
+
"overflow": null,
|
544 |
+
"overflow_x": null,
|
545 |
+
"overflow_y": null,
|
546 |
+
"padding": null,
|
547 |
+
"right": null,
|
548 |
+
"top": null,
|
549 |
+
"visibility": null,
|
550 |
+
"width": null
|
551 |
+
}
|
552 |
+
},
|
553 |
+
"cb394578badd43a89850873ad2526542": {
|
554 |
+
"model_module": "@jupyter-widgets/controls",
|
555 |
+
"model_name": "DescriptionStyleModel",
|
556 |
+
"model_module_version": "1.5.0",
|
557 |
+
"state": {
|
558 |
+
"_model_module": "@jupyter-widgets/controls",
|
559 |
+
"_model_module_version": "1.5.0",
|
560 |
+
"_model_name": "DescriptionStyleModel",
|
561 |
+
"_view_count": null,
|
562 |
+
"_view_module": "@jupyter-widgets/base",
|
563 |
+
"_view_module_version": "1.2.0",
|
564 |
+
"_view_name": "StyleView",
|
565 |
+
"description_width": ""
|
566 |
+
}
|
567 |
+
},
|
568 |
+
"193aef33d9184055bb9223f56d456de6": {
|
569 |
+
"model_module": "@jupyter-widgets/base",
|
570 |
+
"model_name": "LayoutModel",
|
571 |
+
"model_module_version": "1.2.0",
|
572 |
+
"state": {
|
573 |
+
"_model_module": "@jupyter-widgets/base",
|
574 |
+
"_model_module_version": "1.2.0",
|
575 |
+
"_model_name": "LayoutModel",
|
576 |
+
"_view_count": null,
|
577 |
+
"_view_module": "@jupyter-widgets/base",
|
578 |
+
"_view_module_version": "1.2.0",
|
579 |
+
"_view_name": "LayoutView",
|
580 |
+
"align_content": null,
|
581 |
+
"align_items": null,
|
582 |
+
"align_self": null,
|
583 |
+
"border": null,
|
584 |
+
"bottom": null,
|
585 |
+
"display": null,
|
586 |
+
"flex": null,
|
587 |
+
"flex_flow": null,
|
588 |
+
"grid_area": null,
|
589 |
+
"grid_auto_columns": null,
|
590 |
+
"grid_auto_flow": null,
|
591 |
+
"grid_auto_rows": null,
|
592 |
+
"grid_column": null,
|
593 |
+
"grid_gap": null,
|
594 |
+
"grid_row": null,
|
595 |
+
"grid_template_areas": null,
|
596 |
+
"grid_template_columns": null,
|
597 |
+
"grid_template_rows": null,
|
598 |
+
"height": null,
|
599 |
+
"justify_content": null,
|
600 |
+
"justify_items": null,
|
601 |
+
"left": null,
|
602 |
+
"margin": null,
|
603 |
+
"max_height": null,
|
604 |
+
"max_width": null,
|
605 |
+
"min_height": null,
|
606 |
+
"min_width": null,
|
607 |
+
"object_fit": null,
|
608 |
+
"object_position": null,
|
609 |
+
"order": null,
|
610 |
+
"overflow": null,
|
611 |
+
"overflow_x": null,
|
612 |
+
"overflow_y": null,
|
613 |
+
"padding": null,
|
614 |
+
"right": null,
|
615 |
+
"top": null,
|
616 |
+
"visibility": null,
|
617 |
+
"width": null
|
618 |
+
}
|
619 |
+
},
|
620 |
+
"abfc9aa911ce4a5ea81c7c451f08295f": {
|
621 |
+
"model_module": "@jupyter-widgets/controls",
|
622 |
+
"model_name": "ProgressStyleModel",
|
623 |
+
"model_module_version": "1.5.0",
|
624 |
+
"state": {
|
625 |
+
"_model_module": "@jupyter-widgets/controls",
|
626 |
+
"_model_module_version": "1.5.0",
|
627 |
+
"_model_name": "ProgressStyleModel",
|
628 |
+
"_view_count": null,
|
629 |
+
"_view_module": "@jupyter-widgets/base",
|
630 |
+
"_view_module_version": "1.2.0",
|
631 |
+
"_view_name": "StyleView",
|
632 |
+
"bar_color": null,
|
633 |
+
"description_width": ""
|
634 |
+
}
|
635 |
+
},
|
636 |
+
"e7937a1bc68441a080374911a6563376": {
|
637 |
+
"model_module": "@jupyter-widgets/base",
|
638 |
+
"model_name": "LayoutModel",
|
639 |
+
"model_module_version": "1.2.0",
|
640 |
+
"state": {
|
641 |
+
"_model_module": "@jupyter-widgets/base",
|
642 |
+
"_model_module_version": "1.2.0",
|
643 |
+
"_model_name": "LayoutModel",
|
644 |
+
"_view_count": null,
|
645 |
+
"_view_module": "@jupyter-widgets/base",
|
646 |
+
"_view_module_version": "1.2.0",
|
647 |
+
"_view_name": "LayoutView",
|
648 |
+
"align_content": null,
|
649 |
+
"align_items": null,
|
650 |
+
"align_self": null,
|
651 |
+
"border": null,
|
652 |
+
"bottom": null,
|
653 |
+
"display": null,
|
654 |
+
"flex": null,
|
655 |
+
"flex_flow": null,
|
656 |
+
"grid_area": null,
|
657 |
+
"grid_auto_columns": null,
|
658 |
+
"grid_auto_flow": null,
|
659 |
+
"grid_auto_rows": null,
|
660 |
+
"grid_column": null,
|
661 |
+
"grid_gap": null,
|
662 |
+
"grid_row": null,
|
663 |
+
"grid_template_areas": null,
|
664 |
+
"grid_template_columns": null,
|
665 |
+
"grid_template_rows": null,
|
666 |
+
"height": null,
|
667 |
+
"justify_content": null,
|
668 |
+
"justify_items": null,
|
669 |
+
"left": null,
|
670 |
+
"margin": null,
|
671 |
+
"max_height": null,
|
672 |
+
"max_width": null,
|
673 |
+
"min_height": null,
|
674 |
+
"min_width": null,
|
675 |
+
"object_fit": null,
|
676 |
+
"object_position": null,
|
677 |
+
"order": null,
|
678 |
+
"overflow": null,
|
679 |
+
"overflow_x": null,
|
680 |
+
"overflow_y": null,
|
681 |
+
"padding": null,
|
682 |
+
"right": null,
|
683 |
+
"top": null,
|
684 |
+
"visibility": null,
|
685 |
+
"width": null
|
686 |
+
}
|
687 |
+
},
|
688 |
+
"e532ed7bfef34f67b5fcacd9534eb789": {
|
689 |
+
"model_module": "@jupyter-widgets/controls",
|
690 |
+
"model_name": "DescriptionStyleModel",
|
691 |
+
"model_module_version": "1.5.0",
|
692 |
+
"state": {
|
693 |
+
"_model_module": "@jupyter-widgets/controls",
|
694 |
+
"_model_module_version": "1.5.0",
|
695 |
+
"_model_name": "DescriptionStyleModel",
|
696 |
+
"_view_count": null,
|
697 |
+
"_view_module": "@jupyter-widgets/base",
|
698 |
+
"_view_module_version": "1.2.0",
|
699 |
+
"_view_name": "StyleView",
|
700 |
+
"description_width": ""
|
701 |
+
}
|
702 |
+
}
|
703 |
+
}
|
704 |
+
}
|
705 |
+
},
|
706 |
+
"cells": [
|
707 |
+
{
|
708 |
+
"cell_type": "markdown",
|
709 |
+
"metadata": {
|
710 |
+
"id": "view-in-github",
|
711 |
+
"colab_type": "text"
|
712 |
+
},
|
713 |
+
"source": [
|
714 |
+
"<a href=\"https://colab.research.google.com/github/towardsai/ai-tutor-rag-system/blob/main/notebooks/11_Adding_Hybrid_Search.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
|
715 |
+
]
|
716 |
+
},
|
717 |
+
{
|
718 |
+
"cell_type": "markdown",
|
719 |
+
"source": [
|
720 |
+
"# Install Packages and Setup Variables"
|
721 |
+
],
|
722 |
+
"metadata": {
|
723 |
+
"id": "-zE1h0uQV7uT"
|
724 |
+
}
|
725 |
+
},
|
726 |
+
{
|
727 |
+
"cell_type": "code",
|
728 |
+
"execution_count": 1,
|
729 |
+
"metadata": {
|
730 |
+
"id": "QPJzr-I9XQ7l",
|
731 |
+
"colab": {
|
732 |
+
"base_uri": "https://localhost:8080/"
|
733 |
+
},
|
734 |
+
"outputId": "3115889a-14ee-457c-c0d5-271c1053a1e9"
|
735 |
+
},
|
736 |
+
"outputs": [
|
737 |
+
{
|
738 |
+
"output_type": "stream",
|
739 |
+
"name": "stdout",
|
740 |
+
"text": [
|
741 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m15.7/15.7 MB\u001b[0m \u001b[31m35.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
742 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m225.4/225.4 kB\u001b[0m \u001b[31m15.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
743 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.0/2.0 MB\u001b[0m \u001b[31m33.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
744 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m508.6/508.6 kB\u001b[0m \u001b[31m36.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
745 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m79.9/79.9 MB\u001b[0m \u001b[31m12.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
746 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m45.7/45.7 kB\u001b[0m \u001b[31m4.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
747 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m51.7/51.7 kB\u001b[0m \u001b[31m6.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
748 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m75.9/75.9 kB\u001b[0m \u001b[31m7.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
749 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m2.4/2.4 MB\u001b[0m \u001b[31m68.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
750 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m92.1/92.1 kB\u001b[0m \u001b[31m7.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
751 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m60.7/60.7 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
752 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m41.1/41.1 kB\u001b[0m \u001b[31m3.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
753 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m5.4/5.4 MB\u001b[0m \u001b[31m85.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
754 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m6.8/6.8 MB\u001b[0m \u001b[31m98.1 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
755 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m57.9/57.9 kB\u001b[0m \u001b[31m6.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
756 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m105.6/105.6 kB\u001b[0m \u001b[31m10.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
757 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.3/67.3 kB\u001b[0m \u001b[31m7.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
758 |
+
"\u001b[?25h Installing build dependencies ... \u001b[?25l\u001b[?25hdone\n",
|
759 |
+
" Getting requirements to build wheel ... \u001b[?25l\u001b[?25hdone\n",
|
760 |
+
" Preparing metadata (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n",
|
761 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m698.9/698.9 kB\u001b[0m \u001b[31m61.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
762 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.6/1.6 MB\u001b[0m \u001b[31m90.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
763 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m67.6/67.6 kB\u001b[0m \u001b[31m8.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
764 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.1/3.1 MB\u001b[0m \u001b[31m105.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
765 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m71.5/71.5 kB\u001b[0m \u001b[31m7.5 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
766 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m76.9/76.9 kB\u001b[0m \u001b[31m9.2 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
767 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m58.3/58.3 kB\u001b[0m \u001b[31m6.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
768 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m46.0/46.0 kB\u001b[0m \u001b[31m5.8 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
769 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m50.8/50.8 kB\u001b[0m \u001b[31m6.9 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
770 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m341.4/341.4 kB\u001b[0m \u001b[31m35.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
771 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m3.4/3.4 MB\u001b[0m \u001b[31m105.7 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
772 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m1.3/1.3 MB\u001b[0m \u001b[31m57.6 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
773 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m130.2/130.2 kB\u001b[0m \u001b[31m16.3 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
774 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m49.4/49.4 kB\u001b[0m \u001b[31m723.8 kB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
775 |
+
"\u001b[2K \u001b[90m━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\u001b[0m \u001b[32m86.8/86.8 kB\u001b[0m \u001b[31m10.0 MB/s\u001b[0m eta \u001b[36m0:00:00\u001b[0m\n",
|
776 |
+
"\u001b[?25h Building wheel for pypika (pyproject.toml) ... \u001b[?25l\u001b[?25hdone\n"
|
777 |
+
]
|
778 |
+
}
|
779 |
+
],
|
780 |
+
"source": [
|
781 |
+
"!pip install -q llama-index==0.9.21 openai==1.6.0 tiktoken==0.5.2 chromadb==0.4.21 kaleido==0.2.1 python-multipart==0.0.6 cohere==4.39"
|
782 |
+
]
|
783 |
+
},
|
784 |
+
{
|
785 |
+
"cell_type": "code",
|
786 |
+
"source": [
|
787 |
+
"import os\n",
|
788 |
+
"\n",
|
789 |
+
"# Set the \"OPENAI_API_KEY\" in the Python environment. Will be used by OpenAI client later.\n",
|
790 |
+
"os.environ[\"OPENAI_API_KEY\"] = \"<YOUR_OPENAI_KEY>\""
|
791 |
+
],
|
792 |
+
"metadata": {
|
793 |
+
"id": "riuXwpSPcvWC"
|
794 |
+
},
|
795 |
+
"execution_count": 2,
|
796 |
+
"outputs": []
|
797 |
+
},
|
798 |
+
{
|
799 |
+
"cell_type": "code",
|
800 |
+
"source": [
|
801 |
+
"import nest_asyncio\n",
|
802 |
+
"\n",
|
803 |
+
"nest_asyncio.apply()"
|
804 |
+
],
|
805 |
+
"metadata": {
|
806 |
+
"id": "jIEeZzqLbz0J"
|
807 |
+
},
|
808 |
+
"execution_count": 3,
|
809 |
+
"outputs": []
|
810 |
+
},
|
811 |
+
{
|
812 |
+
"cell_type": "markdown",
|
813 |
+
"source": [
|
814 |
+
"# Load a Model"
|
815 |
+
],
|
816 |
+
"metadata": {
|
817 |
+
"id": "Bkgi2OrYzF7q"
|
818 |
+
}
|
819 |
+
},
|
820 |
+
{
|
821 |
+
"cell_type": "code",
|
822 |
+
"source": [
|
823 |
+
"from llama_index.llms import OpenAI\n",
|
824 |
+
"\n",
|
825 |
+
"llm = OpenAI(temperature=0.9, model=\"gpt-3.5-turbo\", max_tokens=512)"
|
826 |
+
],
|
827 |
+
"metadata": {
|
828 |
+
"id": "9oGT6crooSSj"
|
829 |
+
},
|
830 |
+
"execution_count": 4,
|
831 |
+
"outputs": []
|
832 |
+
},
|
833 |
+
{
|
834 |
+
"cell_type": "markdown",
|
835 |
+
"source": [
|
836 |
+
"# Create a VectoreStore"
|
837 |
+
],
|
838 |
+
"metadata": {
|
839 |
+
"id": "0BwVuJXlzHVL"
|
840 |
+
}
|
841 |
+
},
|
842 |
+
{
|
843 |
+
"cell_type": "code",
|
844 |
+
"source": [
|
845 |
+
"import chromadb\n",
|
846 |
+
"\n",
|
847 |
+
"# create client and a new collection\n",
|
848 |
+
"# chromadb.EphemeralClient saves data in-memory.\n",
|
849 |
+
"chroma_client = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
|
850 |
+
"chroma_collection = chroma_client.create_collection(\"mini-llama-articles\")"
|
851 |
+
],
|
852 |
+
"metadata": {
|
853 |
+
"id": "SQP87lHczHKc"
|
854 |
+
},
|
855 |
+
"execution_count": null,
|
856 |
+
"outputs": []
|
857 |
+
},
|
858 |
+
{
|
859 |
+
"cell_type": "code",
|
860 |
+
"source": [
|
861 |
+
"from llama_index.vector_stores import ChromaVectorStore\n",
|
862 |
+
"\n",
|
863 |
+
"# Define a storage context object using the created vector database.\n",
|
864 |
+
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
|
865 |
+
],
|
866 |
+
"metadata": {
|
867 |
+
"id": "zAaGcYMJzHAN"
|
868 |
+
},
|
869 |
+
"execution_count": null,
|
870 |
+
"outputs": []
|
871 |
+
},
|
872 |
+
{
|
873 |
+
"cell_type": "markdown",
|
874 |
+
"source": [
|
875 |
+
"# Load the Dataset (CSV)"
|
876 |
+
],
|
877 |
+
"metadata": {
|
878 |
+
"id": "I9JbAzFcjkpn"
|
879 |
+
}
|
880 |
+
},
|
881 |
+
{
|
882 |
+
"cell_type": "markdown",
|
883 |
+
"source": [
|
884 |
+
"## Download"
|
885 |
+
],
|
886 |
+
"metadata": {
|
887 |
+
"id": "ceveDuYdWCYk"
|
888 |
+
}
|
889 |
+
},
|
890 |
+
{
|
891 |
+
"cell_type": "markdown",
|
892 |
+
"source": [
|
893 |
+
"The dataset includes several articles from the TowardsAI blog, which provide an in-depth explanation of the LLaMA2 model. Read the dataset as a long string."
|
894 |
+
],
|
895 |
+
"metadata": {
|
896 |
+
"id": "eZwf6pv7WFmD"
|
897 |
+
}
|
898 |
+
},
|
899 |
+
{
|
900 |
+
"cell_type": "code",
|
901 |
+
"source": [
|
902 |
+
"!wget https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv"
|
903 |
+
],
|
904 |
+
"metadata": {
|
905 |
+
"colab": {
|
906 |
+
"base_uri": "https://localhost:8080/"
|
907 |
+
},
|
908 |
+
"id": "wl_pbPvMlv1h",
|
909 |
+
"outputId": "24342259-24f0-44fa-bd0d-21da798d0555"
|
910 |
+
},
|
911 |
+
"execution_count": 5,
|
912 |
+
"outputs": [
|
913 |
+
{
|
914 |
+
"output_type": "stream",
|
915 |
+
"name": "stdout",
|
916 |
+
"text": [
|
917 |
+
"--2024-02-07 17:30:07-- https://raw.githubusercontent.com/AlaFalaki/tutorial_notebooks/main/data/mini-llama-articles.csv\n",
|
918 |
+
"Resolving raw.githubusercontent.com (raw.githubusercontent.com)... 185.199.110.133, 185.199.108.133, 185.199.109.133, ...\n",
|
919 |
+
"Connecting to raw.githubusercontent.com (raw.githubusercontent.com)|185.199.110.133|:443... connected.\n",
|
920 |
+
"HTTP request sent, awaiting response... 200 OK\n",
|
921 |
+
"Length: 173646 (170K) [text/plain]\n",
|
922 |
+
"Saving to: ‘mini-llama-articles.csv’\n",
|
923 |
+
"\n",
|
924 |
+
"\rmini-llama-articles 0%[ ] 0 --.-KB/s \rmini-llama-articles 100%[===================>] 169.58K --.-KB/s in 0.02s \n",
|
925 |
+
"\n",
|
926 |
+
"2024-02-07 17:30:07 (7.03 MB/s) - ‘mini-llama-articles.csv’ saved [173646/173646]\n",
|
927 |
+
"\n"
|
928 |
+
]
|
929 |
+
}
|
930 |
+
]
|
931 |
+
},
|
932 |
+
{
|
933 |
+
"cell_type": "markdown",
|
934 |
+
"source": [
|
935 |
+
"## Read File"
|
936 |
+
],
|
937 |
+
"metadata": {
|
938 |
+
"id": "VWBLtDbUWJfA"
|
939 |
+
}
|
940 |
+
},
|
941 |
+
{
|
942 |
+
"cell_type": "code",
|
943 |
+
"source": [
|
944 |
+
"import csv\n",
|
945 |
+
"\n",
|
946 |
+
"rows = []\n",
|
947 |
+
"\n",
|
948 |
+
"# Load the file as a JSON\n",
|
949 |
+
"with open(\"./mini-llama-articles.csv\", mode=\"r\", encoding=\"utf-8\") as file:\n",
|
950 |
+
" csv_reader = csv.reader(file)\n",
|
951 |
+
"\n",
|
952 |
+
" for idx, row in enumerate( csv_reader ):\n",
|
953 |
+
" if idx == 0: continue; # Skip header row\n",
|
954 |
+
" rows.append( row )\n",
|
955 |
+
"\n",
|
956 |
+
"# The number of characters in the dataset.\n",
|
957 |
+
"len( rows )"
|
958 |
+
],
|
959 |
+
"metadata": {
|
960 |
+
"id": "0Q9sxuW0g3Gd",
|
961 |
+
"colab": {
|
962 |
+
"base_uri": "https://localhost:8080/"
|
963 |
+
},
|
964 |
+
"outputId": "889c1127-cf04-4ce7-d99c-d60826ffe92f"
|
965 |
+
},
|
966 |
+
"execution_count": 6,
|
967 |
+
"outputs": [
|
968 |
+
{
|
969 |
+
"output_type": "execute_result",
|
970 |
+
"data": {
|
971 |
+
"text/plain": [
|
972 |
+
"14"
|
973 |
+
]
|
974 |
+
},
|
975 |
+
"metadata": {},
|
976 |
+
"execution_count": 6
|
977 |
+
}
|
978 |
+
]
|
979 |
+
},
|
980 |
+
{
|
981 |
+
"cell_type": "markdown",
|
982 |
+
"source": [
|
983 |
+
"# Convert to Document obj"
|
984 |
+
],
|
985 |
+
"metadata": {
|
986 |
+
"id": "S17g2RYOjmf2"
|
987 |
+
}
|
988 |
+
},
|
989 |
+
{
|
990 |
+
"cell_type": "code",
|
991 |
+
"source": [
|
992 |
+
"from llama_index import Document\n",
|
993 |
+
"\n",
|
994 |
+
"# Convert the chunks to Document objects so the LlamaIndex framework can process them.\n",
|
995 |
+
"documents = [Document(text=row[1], metadata={\"title\": row[0], \"url\": row[2], \"source_name\": row[3]}) for row in rows]"
|
996 |
+
],
|
997 |
+
"metadata": {
|
998 |
+
"id": "YizvmXPejkJE"
|
999 |
+
},
|
1000 |
+
"execution_count": 7,
|
1001 |
+
"outputs": []
|
1002 |
+
},
|
1003 |
+
{
|
1004 |
+
"cell_type": "markdown",
|
1005 |
+
"source": [
|
1006 |
+
"# Transforming"
|
1007 |
+
],
|
1008 |
+
"metadata": {
|
1009 |
+
"id": "qjuLbmFuWsyl"
|
1010 |
+
}
|
1011 |
+
},
|
1012 |
+
{
|
1013 |
+
"cell_type": "code",
|
1014 |
+
"source": [
|
1015 |
+
"from llama_index.text_splitter import TokenTextSplitter\n",
|
1016 |
+
"\n",
|
1017 |
+
"text_splitter = TokenTextSplitter(\n",
|
1018 |
+
" separator=\" \", chunk_size=512, chunk_overlap=128\n",
|
1019 |
+
")"
|
1020 |
+
],
|
1021 |
+
"metadata": {
|
1022 |
+
"id": "9z3t70DGWsjO"
|
1023 |
+
},
|
1024 |
+
"execution_count": 8,
|
1025 |
+
"outputs": []
|
1026 |
+
},
|
1027 |
+
{
|
1028 |
+
"cell_type": "code",
|
1029 |
+
"source": [
|
1030 |
+
"from llama_index.extractors import (\n",
|
1031 |
+
" SummaryExtractor,\n",
|
1032 |
+
" QuestionsAnsweredExtractor,\n",
|
1033 |
+
" KeywordExtractor,\n",
|
1034 |
+
")\n",
|
1035 |
+
"from llama_index.embeddings import OpenAIEmbedding\n",
|
1036 |
+
"from llama_index.ingestion import IngestionPipeline\n",
|
1037 |
+
"\n",
|
1038 |
+
"pipeline = IngestionPipeline(\n",
|
1039 |
+
" transformations=[\n",
|
1040 |
+
" text_splitter,\n",
|
1041 |
+
" QuestionsAnsweredExtractor(questions=3, llm=llm),\n",
|
1042 |
+
" SummaryExtractor(summaries=[\"prev\", \"self\"], llm=llm),\n",
|
1043 |
+
" KeywordExtractor(keywords=10, llm=llm),\n",
|
1044 |
+
" OpenAIEmbedding(),\n",
|
1045 |
+
" ],\n",
|
1046 |
+
" vector_store=vector_store\n",
|
1047 |
+
")\n",
|
1048 |
+
"\n",
|
1049 |
+
"nodes = pipeline.run(documents=documents, show_progress=True);"
|
1050 |
+
],
|
1051 |
+
"metadata": {
|
1052 |
+
"colab": {
|
1053 |
+
"base_uri": "https://localhost:8080/",
|
1054 |
+
"height": 331,
|
1055 |
+
"referenced_widgets": [
|
1056 |
+
"3fbabd8a8660461ba5e7bc08ef39139a",
|
1057 |
+
"df2365556ae242a2ab1a119f9a31a561",
|
1058 |
+
"5f4b9d32df8f446e858e4c289dc282f9",
|
1059 |
+
"5b588f83a15d42d9aca888e06bbd95ff",
|
1060 |
+
"ad073bca655540809e39f26538d2ec0d",
|
1061 |
+
"13b9c5395bca4c3ba21265240cb936cf",
|
1062 |
+
"47a4586384274577a726c57605e7f8d9",
|
1063 |
+
"96a3bdece738481db57e811ccb74a974",
|
1064 |
+
"5c7973afd79349ed997a69120d0629b2",
|
1065 |
+
"af9b6ae927dd4764b9692507791bc67e",
|
1066 |
+
"134210510d49476e959dd7d032bbdbdc",
|
1067 |
+
"5f9bb065c2b74d2e8ded32e1306a7807",
|
1068 |
+
"73a06bc546a64f7f99a9e4a135319dcd",
|
1069 |
+
"ce48deaf4d8c49cdae92bfdbb3a78df0",
|
1070 |
+
"4a172e8c6aa44e41a42fc1d9cf714fd0",
|
1071 |
+
"0245f2604e4d49c8bd0210302746c47b",
|
1072 |
+
"e956dfab55084a9cbe33c8e331b511e7",
|
1073 |
+
"cb394578badd43a89850873ad2526542",
|
1074 |
+
"193aef33d9184055bb9223f56d456de6",
|
1075 |
+
"abfc9aa911ce4a5ea81c7c451f08295f",
|
1076 |
+
"e7937a1bc68441a080374911a6563376",
|
1077 |
+
"e532ed7bfef34f67b5fcacd9534eb789"
|
1078 |
+
]
|
1079 |
+
},
|
1080 |
+
"id": "P9LDJ7o-Wsc-",
|
1081 |
+
"outputId": "01070c1f-dffa-4ab7-ad71-b07b76b12e03"
|
1082 |
+
},
|
1083 |
+
"execution_count": 71,
|
1084 |
+
"outputs": [
|
1085 |
+
{
|
1086 |
+
"output_type": "display_data",
|
1087 |
+
"data": {
|
1088 |
+
"text/plain": [
|
1089 |
+
"Parsing nodes: 0%| | 0/14 [00:00<?, ?it/s]"
|
1090 |
+
],
|
1091 |
+
"application/vnd.jupyter.widget-view+json": {
|
1092 |
+
"version_major": 2,
|
1093 |
+
"version_minor": 0,
|
1094 |
+
"model_id": "3fbabd8a8660461ba5e7bc08ef39139a"
|
1095 |
+
}
|
1096 |
+
},
|
1097 |
+
"metadata": {}
|
1098 |
+
},
|
1099 |
+
{
|
1100 |
+
"output_type": "stream",
|
1101 |
+
"name": "stdout",
|
1102 |
+
"text": [
|
1103 |
+
"464\n",
|
1104 |
+
"452\n",
|
1105 |
+
"457\n",
|
1106 |
+
"465\n",
|
1107 |
+
"448\n",
|
1108 |
+
"468\n",
|
1109 |
+
"434\n",
|
1110 |
+
"447\n",
|
1111 |
+
"455\n",
|
1112 |
+
"445\n",
|
1113 |
+
"449\n",
|
1114 |
+
"455\n",
|
1115 |
+
"431\n",
|
1116 |
+
"453\n"
|
1117 |
+
]
|
1118 |
+
},
|
1119 |
+
{
|
1120 |
+
"output_type": "display_data",
|
1121 |
+
"data": {
|
1122 |
+
"text/plain": [
|
1123 |
+
"Generating embeddings: 0%| | 0/108 [00:00<?, ?it/s]"
|
1124 |
+
],
|
1125 |
+
"application/vnd.jupyter.widget-view+json": {
|
1126 |
+
"version_major": 2,
|
1127 |
+
"version_minor": 0,
|
1128 |
+
"model_id": "5f9bb065c2b74d2e8ded32e1306a7807"
|
1129 |
+
}
|
1130 |
+
},
|
1131 |
+
"metadata": {}
|
1132 |
+
}
|
1133 |
+
]
|
1134 |
+
},
|
1135 |
+
{
|
1136 |
+
"cell_type": "code",
|
1137 |
+
"source": [
|
1138 |
+
"len( nodes )"
|
1139 |
+
],
|
1140 |
+
"metadata": {
|
1141 |
+
"colab": {
|
1142 |
+
"base_uri": "https://localhost:8080/"
|
1143 |
+
},
|
1144 |
+
"id": "mPGa85hM2P3P",
|
1145 |
+
"outputId": "c106c463-2459-4b11-bbae-5bd5e2246011"
|
1146 |
+
},
|
1147 |
+
"execution_count": null,
|
1148 |
+
"outputs": [
|
1149 |
+
{
|
1150 |
+
"output_type": "execute_result",
|
1151 |
+
"data": {
|
1152 |
+
"text/plain": [
|
1153 |
+
"108"
|
1154 |
+
]
|
1155 |
+
},
|
1156 |
+
"metadata": {},
|
1157 |
+
"execution_count": 109
|
1158 |
+
}
|
1159 |
+
]
|
1160 |
+
},
|
1161 |
+
{
|
1162 |
+
"cell_type": "code",
|
1163 |
+
"source": [
|
1164 |
+
"!zip -r vectorstore.zip mini-llama-articles"
|
1165 |
+
],
|
1166 |
+
"metadata": {
|
1167 |
+
"id": "23x20bL3_jRb"
|
1168 |
+
},
|
1169 |
+
"execution_count": null,
|
1170 |
+
"outputs": []
|
1171 |
+
},
|
1172 |
+
{
|
1173 |
+
"cell_type": "markdown",
|
1174 |
+
"source": [
|
1175 |
+
"# Load Indexes"
|
1176 |
+
],
|
1177 |
+
"metadata": {
|
1178 |
+
"id": "OWaT6rL7ksp8"
|
1179 |
+
}
|
1180 |
+
},
|
1181 |
+
{
|
1182 |
+
"cell_type": "code",
|
1183 |
+
"source": [
|
1184 |
+
"!unzip vectorstore.zip"
|
1185 |
+
],
|
1186 |
+
"metadata": {
|
1187 |
+
"colab": {
|
1188 |
+
"base_uri": "https://localhost:8080/"
|
1189 |
+
},
|
1190 |
+
"id": "SodY2Xpf_kxg",
|
1191 |
+
"outputId": "701258b4-ea35-46d1-df33-536a45752a28"
|
1192 |
+
},
|
1193 |
+
"execution_count": 9,
|
1194 |
+
"outputs": [
|
1195 |
+
{
|
1196 |
+
"output_type": "stream",
|
1197 |
+
"name": "stdout",
|
1198 |
+
"text": [
|
1199 |
+
"Archive: vectorstore.zip\n",
|
1200 |
+
" creating: mini-llama-articles/\n",
|
1201 |
+
" creating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/\n",
|
1202 |
+
" inflating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/data_level0.bin \n",
|
1203 |
+
" inflating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/header.bin \n",
|
1204 |
+
" extracting: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/link_lists.bin \n",
|
1205 |
+
" inflating: mini-llama-articles/a361e92f-9895-41b6-ba72-4ad38e9875bd/length.bin \n",
|
1206 |
+
" inflating: mini-llama-articles/chroma.sqlite3 \n"
|
1207 |
+
]
|
1208 |
+
}
|
1209 |
+
]
|
1210 |
+
},
|
1211 |
+
{
|
1212 |
+
"cell_type": "code",
|
1213 |
+
"source": [
|
1214 |
+
"import chromadb\n",
|
1215 |
+
"from llama_index.vector_stores import ChromaVectorStore\n",
|
1216 |
+
"\n",
|
1217 |
+
"# Create your index\n",
|
1218 |
+
"db = chromadb.PersistentClient(path=\"./mini-llama-articles\")\n",
|
1219 |
+
"chroma_collection = db.get_or_create_collection(\"mini-llama-articles\")\n",
|
1220 |
+
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)"
|
1221 |
+
],
|
1222 |
+
"metadata": {
|
1223 |
+
"id": "mXi56KTXk2sp"
|
1224 |
+
},
|
1225 |
+
"execution_count": 10,
|
1226 |
+
"outputs": []
|
1227 |
+
},
|
1228 |
+
{
|
1229 |
+
"cell_type": "code",
|
1230 |
+
"source": [
|
1231 |
+
"# Create your index\n",
|
1232 |
+
"from llama_index import VectorStoreIndex\n",
|
1233 |
+
"\n",
|
1234 |
+
"vector_index = VectorStoreIndex.from_vector_store(vector_store)"
|
1235 |
+
],
|
1236 |
+
"metadata": {
|
1237 |
+
"id": "jKXURvLtkuTS"
|
1238 |
+
},
|
1239 |
+
"execution_count": 20,
|
1240 |
+
"outputs": []
|
1241 |
+
},
|
1242 |
+
{
|
1243 |
+
"cell_type": "code",
|
1244 |
+
"source": [
|
1245 |
+
"# Set similarity_top_k to a large number to retrieve all the nodes\n",
|
1246 |
+
"retriever = vector_index.as_retriever(similarity_top_k=100000000)\n",
|
1247 |
+
"\n",
|
1248 |
+
"# Retrieve all nodes\n",
|
1249 |
+
"all_nodes = retriever.retrieve('Hello!')"
|
1250 |
+
],
|
1251 |
+
"metadata": {
|
1252 |
+
"colab": {
|
1253 |
+
"base_uri": "https://localhost:8080/"
|
1254 |
+
},
|
1255 |
+
"id": "Za6m06wpcJpN",
|
1256 |
+
"outputId": "98806ea5-5c2d-4a87-97ea-ee37a890c7bf"
|
1257 |
+
},
|
1258 |
+
"execution_count": 12,
|
1259 |
+
"outputs": [
|
1260 |
+
{
|
1261 |
+
"output_type": "stream",
|
1262 |
+
"name": "stderr",
|
1263 |
+
"text": [
|
1264 |
+
"WARNING:chromadb.segment.impl.vector.local_persistent_hnsw:Number of requested results 100000000 is greater than number of elements in index 108, updating n_results = 108\n"
|
1265 |
+
]
|
1266 |
+
}
|
1267 |
+
]
|
1268 |
+
},
|
1269 |
+
{
|
1270 |
+
"cell_type": "code",
|
1271 |
+
"source": [
|
1272 |
+
"all_nodes = [item.node for item in all_nodes]"
|
1273 |
+
],
|
1274 |
+
"metadata": {
|
1275 |
+
"id": "2Tz_n2MLj62B"
|
1276 |
+
},
|
1277 |
+
"execution_count": 15,
|
1278 |
+
"outputs": []
|
1279 |
+
},
|
1280 |
+
{
|
1281 |
+
"cell_type": "code",
|
1282 |
+
"source": [
|
1283 |
+
"len( all_nodes )"
|
1284 |
+
],
|
1285 |
+
"metadata": {
|
1286 |
+
"colab": {
|
1287 |
+
"base_uri": "https://localhost:8080/"
|
1288 |
+
},
|
1289 |
+
"id": "mquOgF8UnXZi",
|
1290 |
+
"outputId": "cd41e132-237e-4e4f-bb35-464dba9307ba"
|
1291 |
+
},
|
1292 |
+
"execution_count": 16,
|
1293 |
+
"outputs": [
|
1294 |
+
{
|
1295 |
+
"output_type": "execute_result",
|
1296 |
+
"data": {
|
1297 |
+
"text/plain": [
|
1298 |
+
"108"
|
1299 |
+
]
|
1300 |
+
},
|
1301 |
+
"metadata": {},
|
1302 |
+
"execution_count": 16
|
1303 |
+
}
|
1304 |
+
]
|
1305 |
+
},
|
1306 |
+
{
|
1307 |
+
"cell_type": "code",
|
1308 |
+
"source": [
|
1309 |
+
"from llama_index import SimpleKeywordTableIndex\n",
|
1310 |
+
"\n",
|
1311 |
+
"keyword_index = SimpleKeywordTableIndex(nodes=all_nodes)"
|
1312 |
+
],
|
1313 |
+
"metadata": {
|
1314 |
+
"id": "hcmwBAsCZIwR"
|
1315 |
+
},
|
1316 |
+
"execution_count": 17,
|
1317 |
+
"outputs": []
|
1318 |
+
},
|
1319 |
+
{
|
1320 |
+
"cell_type": "markdown",
|
1321 |
+
"source": [
|
1322 |
+
"# Custom Retriever"
|
1323 |
+
],
|
1324 |
+
"metadata": {
|
1325 |
+
"id": "K3wtAa7Lo2Vh"
|
1326 |
+
}
|
1327 |
+
},
|
1328 |
+
{
|
1329 |
+
"cell_type": "code",
|
1330 |
+
"source": [
|
1331 |
+
"# import QueryBundle\n",
|
1332 |
+
"from llama_index import QueryBundle\n",
|
1333 |
+
"\n",
|
1334 |
+
"# import NodeWithScore\n",
|
1335 |
+
"from llama_index.schema import NodeWithScore\n",
|
1336 |
+
"\n",
|
1337 |
+
"# Retrievers\n",
|
1338 |
+
"from llama_index.retrievers import (\n",
|
1339 |
+
" BaseRetriever,\n",
|
1340 |
+
" VectorIndexRetriever,\n",
|
1341 |
+
" KeywordTableSimpleRetriever,\n",
|
1342 |
+
")\n",
|
1343 |
+
"\n",
|
1344 |
+
"from typing import List"
|
1345 |
+
],
|
1346 |
+
"metadata": {
|
1347 |
+
"id": "8ZxO3cW-o8sA"
|
1348 |
+
},
|
1349 |
+
"execution_count": 18,
|
1350 |
+
"outputs": []
|
1351 |
+
},
|
1352 |
+
{
|
1353 |
+
"cell_type": "code",
|
1354 |
+
"source": [
|
1355 |
+
"class CustomRetriever(BaseRetriever):\n",
|
1356 |
+
" \"\"\"Custom retriever that performs both semantic search and hybrid search.\"\"\"\n",
|
1357 |
+
"\n",
|
1358 |
+
" def __init__(\n",
|
1359 |
+
" self,\n",
|
1360 |
+
" vector_retriever: VectorIndexRetriever,\n",
|
1361 |
+
" keyword_retriever: KeywordTableSimpleRetriever,\n",
|
1362 |
+
" mode: str = \"AND\",\n",
|
1363 |
+
" ) -> None:\n",
|
1364 |
+
" \"\"\"Init params.\"\"\"\n",
|
1365 |
+
"\n",
|
1366 |
+
" self._vector_retriever = vector_retriever\n",
|
1367 |
+
" self._keyword_retriever = keyword_retriever\n",
|
1368 |
+
" if mode not in (\"AND\", \"OR\"):\n",
|
1369 |
+
" raise ValueError(\"Invalid mode.\")\n",
|
1370 |
+
" self._mode = mode\n",
|
1371 |
+
" super().__init__()\n",
|
1372 |
+
"\n",
|
1373 |
+
" def _retrieve(self, query_bundle: QueryBundle) -> List[NodeWithScore]:\n",
|
1374 |
+
" \"\"\"Retrieve nodes given query.\"\"\"\n",
|
1375 |
+
"\n",
|
1376 |
+
" vector_nodes = self._vector_retriever.retrieve(query_bundle)\n",
|
1377 |
+
" keyword_nodes = self._keyword_retriever.retrieve(query_bundle)\n",
|
1378 |
+
"\n",
|
1379 |
+
" vector_ids = {n.node.node_id for n in vector_nodes}\n",
|
1380 |
+
" keyword_ids = {n.node.node_id for n in keyword_nodes}\n",
|
1381 |
+
"\n",
|
1382 |
+
" combined_dict = {n.node.node_id: n for n in vector_nodes}\n",
|
1383 |
+
" combined_dict.update({n.node.node_id: n for n in keyword_nodes})\n",
|
1384 |
+
"\n",
|
1385 |
+
" if self._mode == \"AND\":\n",
|
1386 |
+
" retrieve_ids = vector_ids.intersection(keyword_ids)\n",
|
1387 |
+
" else:\n",
|
1388 |
+
" retrieve_ids = vector_ids.union(keyword_ids)\n",
|
1389 |
+
"\n",
|
1390 |
+
" retrieve_nodes = [combined_dict[rid] for rid in retrieve_ids]\n",
|
1391 |
+
"\n",
|
1392 |
+
" return retrieve_nodes"
|
1393 |
+
],
|
1394 |
+
"metadata": {
|
1395 |
+
"id": "txPFNOkUo2Kj"
|
1396 |
+
},
|
1397 |
+
"execution_count": 49,
|
1398 |
+
"outputs": []
|
1399 |
+
},
|
1400 |
+
{
|
1401 |
+
"cell_type": "code",
|
1402 |
+
"source": [
|
1403 |
+
"from llama_index import get_response_synthesizer\n",
|
1404 |
+
"from llama_index.query_engine import RetrieverQueryEngine\n",
|
1405 |
+
"\n",
|
1406 |
+
"# define custom retriever\n",
|
1407 |
+
"vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=2)\n",
|
1408 |
+
"keyword_retriever = KeywordTableSimpleRetriever(index=keyword_index, max_keywords_per_query=2)\n",
|
1409 |
+
"custom_retriever = CustomRetriever(vector_retriever, keyword_retriever, \"OR\")\n",
|
1410 |
+
"\n",
|
1411 |
+
"# define response synthesizer\n",
|
1412 |
+
"response_synthesizer = get_response_synthesizer()"
|
1413 |
+
],
|
1414 |
+
"metadata": {
|
1415 |
+
"id": "YWLckX40pii-"
|
1416 |
+
},
|
1417 |
+
"execution_count": 59,
|
1418 |
+
"outputs": []
|
1419 |
+
},
|
1420 |
+
{
|
1421 |
+
"cell_type": "markdown",
|
1422 |
+
"source": [
|
1423 |
+
"# Query Dataset"
|
1424 |
+
],
|
1425 |
+
"metadata": {
|
1426 |
+
"id": "8JPD8yAinVSq"
|
1427 |
+
}
|
1428 |
+
},
|
1429 |
+
{
|
1430 |
+
"cell_type": "code",
|
1431 |
+
"source": [
|
1432 |
+
"# Define a query engine that is responsible for retrieving related pieces of text,\n",
|
1433 |
+
"# and using a LLM to formulate the final answer.\n",
|
1434 |
+
"custom_query_engine = RetrieverQueryEngine(\n",
|
1435 |
+
" retriever=custom_retriever,\n",
|
1436 |
+
" response_synthesizer=response_synthesizer,\n",
|
1437 |
+
")\n",
|
1438 |
+
"\n",
|
1439 |
+
"res = custom_query_engine.query(\"How many parameters LLaMA2 model has?\")"
|
1440 |
+
],
|
1441 |
+
"metadata": {
|
1442 |
+
"id": "b0gue7cyctt1"
|
1443 |
+
},
|
1444 |
+
"execution_count": 60,
|
1445 |
+
"outputs": []
|
1446 |
+
},
|
1447 |
+
{
|
1448 |
+
"cell_type": "code",
|
1449 |
+
"source": [
|
1450 |
+
"res.response"
|
1451 |
+
],
|
1452 |
+
"metadata": {
|
1453 |
+
"colab": {
|
1454 |
+
"base_uri": "https://localhost:8080/",
|
1455 |
+
"height": 35
|
1456 |
+
},
|
1457 |
+
"id": "VKK3jMprctre",
|
1458 |
+
"outputId": "370a6a1a-133d-428f-80c7-28777f4349b3"
|
1459 |
+
},
|
1460 |
+
"execution_count": 61,
|
1461 |
+
"outputs": [
|
1462 |
+
{
|
1463 |
+
"output_type": "execute_result",
|
1464 |
+
"data": {
|
1465 |
+
"text/plain": [
|
1466 |
+
"'The LLaMA2 model is available in four different sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters.'"
|
1467 |
+
],
|
1468 |
+
"application/vnd.google.colaboratory.intrinsic+json": {
|
1469 |
+
"type": "string"
|
1470 |
+
}
|
1471 |
+
},
|
1472 |
+
"metadata": {},
|
1473 |
+
"execution_count": 61
|
1474 |
+
}
|
1475 |
+
]
|
1476 |
+
},
|
1477 |
+
{
|
1478 |
+
"cell_type": "code",
|
1479 |
+
"source": [
|
1480 |
+
"for src in res.source_nodes:\n",
|
1481 |
+
" print(\"Node ID\\t\", src.node_id)\n",
|
1482 |
+
" print(\"Title\\t\", src.metadata['title'])\n",
|
1483 |
+
" print(\"Text\\t\", src.text)\n",
|
1484 |
+
" print(\"Score\\t\", src.score)\n",
|
1485 |
+
" print(\"-_\"*20)"
|
1486 |
+
],
|
1487 |
+
"metadata": {
|
1488 |
+
"colab": {
|
1489 |
+
"base_uri": "https://localhost:8080/"
|
1490 |
+
},
|
1491 |
+
"id": "465dH4yQc7Ct",
|
1492 |
+
"outputId": "8f43f543-40b1-4f63-a433-d59b33545774"
|
1493 |
+
},
|
1494 |
+
"execution_count": 53,
|
1495 |
+
"outputs": [
|
1496 |
+
{
|
1497 |
+
"output_type": "stream",
|
1498 |
+
"name": "stdout",
|
1499 |
+
"text": [
|
1500 |
+
"Node ID\t 4eeedc9b-c7c5-4c38-84f3-acfa7be825f1\n",
|
1501 |
+
"Title\t Building Intuition on the Concepts behind LLMs like ChatGPT - Part 1- Neural Networks, Transformers, Pretraining, and Fine Tuning\n",
|
1502 |
+
"Text\t published by OpenAI, to train better models, increasing the number of parameters is 3x more important than increasing the size of the training data. (Note: DeepMind has since published a paper with a differing view.) This translates to a significant increase in computational requirements, as handling a larger number of parameters demands more complex calculations. Parallelization, which is the process of dividing a single task into multiple sub-tasks that can be processed simultaneously across multiple compute resources, becomes essential in dealing with this problem. Parallelization is difficult to achieve with RNNs given their sequential nature. This is not an issue for transformers as it computes relationships between all elements in a sequence simultaneously, rather than sequentially. It also means that they work well with GPUs or video cards. Graphics rendering requires a large number of simple calculations happening concurrently. The numerous, small, and efficient processing cores that a GPU has, which are designed for simultaneous operations, make it a good fit for tasks such as matrix and vector operations that are central to deep learning. AI going 'mainstream' and the mad scramble to build larger and better models is a boon to GPU manufacturers. NVIDIA- specifically - whose stock price has grown 200% YTD as of this writing, has made them the highest-performing stock this year and pushed their market cap to USD 1 trillion. They join megacaps like Apple, Google, Microsoft, and Amazon in this exclusive club. The Transformer is a decidedly complex topic and the explanation above wholesale left out important concepts in order to be more digestible to a broader audience. If you want to know more, I found these gentle yet significantly more fleshed-out introductions to the topic: Jay Allamar's illustrated transformer, Lili Jiang's potion analogy, or if you want something more advanced - Karpathy's nanoGPT that babbles in Shakepear-ish. Fine-tuning 'chat' models like ChatGPT The output of pretrainings are base models or foundation models. Examples of recently released text-generation foundation models are GPT-4, Bard, LLaMa 1 & 2, and Claude 1\n",
|
1503 |
+
"Score\t None\n",
|
1504 |
+
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
|
1505 |
+
"Node ID\t 2f3b7c34-8fd0-4134-af38-ef1b77e32cd8\n",
|
1506 |
+
"Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
|
1507 |
+
"Text\t The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving an optimum balance that allows the model to be both helpful and safe is of utmost importance. To strike the right balance between helpfulness and safety, Meta employed two reward models - one for helpfulness and another for safety - to optimize the model's responses. The 34B parameter model has reported higher safety violations than other variants, possibly contributing to the delay in its release. IV. Helpfulness Comparison: Llama 2 Outperforms Competitors Llama 2 emerges as a strong contender in the open-source language model arena, outperforming its competitors in most categories. The 70B parameter model outperforms all other open-source models, while the 7B and 34B models outshine Falcon in all categories and MPT in all categories except coding. Despite being smaller, Llam a2's performance rivals that of Chat GPT 3.5, a significantly larger closed-source model. While GPT 4 and PalM-2-L, with their larger size, outperform Llama 2, this is expected due to their capacity for handling complex language tasks. Llama 2's impressive ability to compete with larger models highlights its efficiency and potential in the market. However, Llama 2 does face challenges in coding and math problems, where models like Chat GPT 4 excel, given their significantly larger size. Chat GPT 4 performed significantly better than Llama 2 for coding (HumanEval benchmark)and math problem tasks (GSM8k benchmark). Open-source AI technologies, like Llama 2, continue to advance, offering\n",
|
1508 |
+
"Score\t 0.7027467208195612\n",
|
1509 |
+
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
|
1510 |
+
"Node ID\t 6cf9e4ad-4137-436d-bdff-c823dcc289f9\n",
|
1511 |
+
"Title\t Fine-Tuning a Llama-2 7B Model for Python Code Generation\n",
|
1512 |
+
"Text\t if it were an instruction, outlining its main sections as follows: Output: Fine-tuning the model To carry out this stage, we have used the Google Colab environment, where we have developed a notebook that allows us to run the training in an interactive way and also a Python script to run the training in unattended mode. For the first test runs, a T4 instance with a high RAM capacity is enough, but when it comes to running the whole dataset and epochs, we have opted to use an A100 instance in order to speed up the training and ensure that its execution time is reasonable. In order to be able to share the model, we will log in to the Huggingface hub using the appropriate token, so that at the end of the whole process, we will upload the model files so that they can be shared with the rest of the users. Fine-tuning techniques: PEFT, Lora, and QLora In recent months, some papers have appeared showing how PEFT techniques can be used to train large language models with a drastic reduction of RAM requirements and consequently allowing fine-tuning of these models on a single GPU of reasonable size. The usual steps to train an LLM consist, first, an intensive pre-training on billions or trillions of tokens to obtain a foundation model, and then a fine-tuning is performed on this model to specialize it on a downstream task. In this fine-tuning phase is where the PEFT technique has its purpose. Parameter Efficient Fine-Tuning (PEFT) allows us to considerably reduce RAM and storage requirements by only fine-tuning a small number of additional parameters, with virtually all model parameters remaining frozen. PEFT has been found to produce good generalization with relatively low-volume datasets. Furthermore, it enhances the reusability and portability of the model, as the small checkpoints obtained can be easily added to the base model, and the base model can be easily fine-tuned and reused in multiple scenarios by adding the PEFT parameters. Finally, since the base model is not adjusted, all the knowledge acquired in the pre-training phase is preserved, thus avoiding catastrophic forgetting. Most widely used PEFT techniques aim to keep the pre-trained base model untouched\n",
|
1513 |
+
"Score\t None\n",
|
1514 |
+
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
|
1515 |
+
"Node ID\t d6f533e5-fef8-469c-a313-def19fd38efe\n",
|
1516 |
+
"Title\t Meta's Llama 2: Revolutionizing Open Source Language Models for Commercial Use\n",
|
1517 |
+
"Text\t I. Llama 2: Revolutionizing Commercial Use Unlike its predecessor Llama 1, which was limited to research use, Llama 2 represents a major advancement as an open-source commercial model. Businesses can now integrate Llama 2 into products to create AI-powered applications. Availability on Azure and AWS facilitates fine-tuning and adoption. However, restrictions apply to prevent exploitation. Companies with over 700 million active daily users cannot use Llama 2. Additionally, its output cannot be used to improve other language models. II. Llama 2 Model Flavors Llama 2 is available in four different model sizes: 7 billion, 13 billion, 34 billion, and 70 billion parameters. While 7B, 13B, and 70B have already been released, the 34B model is still awaited. The pretrained variant, trained on a whopping 2 trillion tokens, boasts a context window of 4096 tokens, twice the size of its predecessor Llama 1. Meta also released a Llama 2 fine-tuned model for chat applications that was trained on over 1 million human annotations. Such extensive training comes at a cost, with the 70B model taking a staggering 1720320 GPU hours to train. The context window's length determines the amount of content the model can process at once, making Llama 2 a powerful language model in terms of scale and efficiency. III. Safety Considerations: A Top Priority for Meta Meta's commitment to safety and alignment shines through in Llama 2's design. The model demonstrates exceptionally low AI safety violation percentages, surpassing even ChatGPT in safety benchmarks. Finding the right balance between helpfulness and safety when optimizing a model poses significant challenges. While a highly helpful model may be capable of answering any question, including sensitive ones like \"How do I build a bomb?\", it also raises concerns about potential misuse. Thus, striking the perfect equilibrium between providing useful information and ensuring safety is paramount. However, prioritizing safety to an extreme extent can lead to a model that struggles to effectively address a diverse range of questions. This limitation could hinder the model's practical applicability and user experience. Thus, achieving\n",
|
1518 |
+
"Score\t 0.7079579000296077\n",
|
1519 |
+
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
|
1520 |
+
"Node ID\t ee2a78d4-9dc5-4c75-a468-2776b64d5879\n",
|
1521 |
+
"Title\t GPTQ Quantization on a Llama 2 7B Fine-Tuned Model With HuggingFace\n",
|
1522 |
+
"Text\t GPTQ: Post-training quantization on generative models In a groundbreaking paper [1], researchers unveiled GPTQ, a novel post-training quantization method that has the potential to reshape the world of language model compression. GPTQ is not only efficient enough to be applied to models boasting hundreds of billions of parameters, but it can also achieve remarkable precision by compressing these models to a mere 2, 3, or 4 bits per parameter without sacrificing significant accuracy. This cutting-edge technique is showcased by its ability to quantize massive models, such as OPT-175B and BLOOM-176B, in just a matter of a few GPU hours while maintaining minimal perplexity, a stringent measure of accuracy. On the practical front, the researchers have developed an execution harness that enables efficient operation of the compressed models for generative tasks. Remarkably, they achieved the milestone of running the compressed OPT-175B model on a single NVIDIA A100 GPU, or with only two more cost-effective NVIDIA A6000 GPUs. Additionally, bespoke GPU kernels optimized for compression result in significant speedups, further enhancing the practicality of these compressed models. What makes GPTQ stand out is its ability to quantize language models with hundreds of billions of parameters to the 34 bits/component range. This is a remarkable leap, as prior methods struggled to maintain accuracy below 8 bits and typically focused on smaller models. However, the study also highlights the complex tradeoffs between perplexity, bit-width, and model size induced by compression. But it comes with limitations. GPTQ does not currently offer speedups for actual multiplications due to the lack of hardware support for mixed-precision operands on mainstream architectures. Activation quantization is also not included in the current results but can be addressed through orthogonal techniques. In sum, GPTQ's ability to compress extremely accurate language models to unprecedented levels marks a significant milestone in the field of machine learning and language modeling. It paves the way for more efficient and accessible applications of these colossal models while pointing toward further research possibilities in the realm of model compression. ¿When you should use GPTQ? The\n",
|
1523 |
+
"Score\t None\n",
|
1524 |
+
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n",
|
1525 |
+
"Node ID\t 13ad2390-a78c-493f-af1c-013351243578\n",
|
1526 |
+
"Title\t Fine-Tuning a Llama-2 7B Model for Python Code Generation\n",
|
1527 |
+
"Text\t only fine-tuning a small number of additional parameters, with virtually all model parameters remaining frozen. PEFT has been found to produce good generalization with relatively low-volume datasets. Furthermore, it enhances the reusability and portability of the model, as the small checkpoints obtained can be easily added to the base model, and the base model can be easily fine-tuned and reused in multiple scenarios by adding the PEFT parameters. Finally, since the base model is not adjusted, all the knowledge acquired in the pre-training phase is preserved, thus avoiding catastrophic forgetting. Most widely used PEFT techniques aim to keep the pre-trained base model untouched and add new layers or parameters on top of it. These layers are called \"Adapters\" and the technique of their adjustment \"adapter-tuning\", we add these layers to the pre-trained base model and only train the parameters of these new layers. However, a serious problem with this approach is that these layers lead to increased latency in the inference phase, which makes the process inefficient in many scenarios.In the LoRa technique, a Low-Rank Adaptation of Large Language Models, the idea is not to include new layers but to add values to the parameters in a way that avoids this scary problem of latency in the inference phase. LoRa trains and stores the changes of the additional weights while freezing all the weights of the pre-trained model. Therefore, we train a new weights matrix with the changes in the pre-trained model matrix, and this new matrix is decomposed into 2 Low-rank matrices as explained here: Merge the base model and the adapter weights As we mention, we have trained \"modification weights\" on the base model, our final model requires merging the pretrained model and the adapters in a single model. You can find and download the model in my Hugging Face account edumunozsala/llama-27b-int4-python-code-20k. Give it a try! Inferencing or generating Python code And finally, we will show you how you can download the model from the Hugging Face Hub and call the model to generate an accurate result: Thanks to Maxime Labonne for an excellent article [9] and Philipp Schmid who provides an inspiring\n",
|
1528 |
+
"Score\t None\n",
|
1529 |
+
"-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_-_\n"
|
1530 |
+
]
|
1531 |
+
}
|
1532 |
+
]
|
1533 |
+
},
|
1534 |
+
{
|
1535 |
+
"cell_type": "markdown",
|
1536 |
+
"source": [
|
1537 |
+
"# Evaluate"
|
1538 |
+
],
|
1539 |
+
"metadata": {
|
1540 |
+
"id": "iMkpzH7vvb09"
|
1541 |
+
}
|
1542 |
+
},
|
1543 |
+
{
|
1544 |
+
"cell_type": "code",
|
1545 |
+
"source": [
|
1546 |
+
"from llama_index.evaluation import generate_question_context_pairs\n",
|
1547 |
+
"from llama_index.llms import OpenAI\n",
|
1548 |
+
"\n",
|
1549 |
+
"llm = OpenAI(model=\"gpt-3.5-turbo\")\n",
|
1550 |
+
"rag_eval_dataset = generate_question_context_pairs(\n",
|
1551 |
+
" nodes,\n",
|
1552 |
+
" llm=llm,\n",
|
1553 |
+
" num_questions_per_chunk=1\n",
|
1554 |
+
")\n",
|
1555 |
+
"\n",
|
1556 |
+
"# We can save the dataset as a json file for later use.\n",
|
1557 |
+
"rag_eval_dataset.save_json(\"./rag_eval_dataset.json\")"
|
1558 |
+
],
|
1559 |
+
"metadata": {
|
1560 |
+
"id": "H8a3eKgKvckU"
|
1561 |
+
},
|
1562 |
+
"execution_count": null,
|
1563 |
+
"outputs": []
|
1564 |
+
},
|
1565 |
+
{
|
1566 |
+
"cell_type": "code",
|
1567 |
+
"source": [
|
1568 |
+
"from llama_index.finetuning.embeddings.common import (\n",
|
1569 |
+
" EmbeddingQAFinetuneDataset,\n",
|
1570 |
+
")\n",
|
1571 |
+
"rag_eval_dataset = EmbeddingQAFinetuneDataset.from_json(\n",
|
1572 |
+
" \"./rag_eval_dataset.json\"\n",
|
1573 |
+
")"
|
1574 |
+
],
|
1575 |
+
"metadata": {
|
1576 |
+
"id": "3sA1K84U254o"
|
1577 |
+
},
|
1578 |
+
"execution_count": 54,
|
1579 |
+
"outputs": []
|
1580 |
+
},
|
1581 |
+
{
|
1582 |
+
"cell_type": "code",
|
1583 |
+
"source": [
|
1584 |
+
"import pandas as pd\n",
|
1585 |
+
"\n",
|
1586 |
+
"def display_results_retriever(name, eval_results):\n",
|
1587 |
+
" \"\"\"Display results from evaluate.\"\"\"\n",
|
1588 |
+
"\n",
|
1589 |
+
" metric_dicts = []\n",
|
1590 |
+
" for eval_result in eval_results:\n",
|
1591 |
+
" metric_dict = eval_result.metric_vals_dict\n",
|
1592 |
+
" metric_dicts.append(metric_dict)\n",
|
1593 |
+
"\n",
|
1594 |
+
" full_df = pd.DataFrame(metric_dicts)\n",
|
1595 |
+
"\n",
|
1596 |
+
" hit_rate = full_df[\"hit_rate\"].mean()\n",
|
1597 |
+
" mrr = full_df[\"mrr\"].mean()\n",
|
1598 |
+
"\n",
|
1599 |
+
" metric_df = pd.DataFrame(\n",
|
1600 |
+
" {\"Retriever Name\": [name], \"Hit Rate\": [hit_rate], \"MRR\": [mrr]}\n",
|
1601 |
+
" )\n",
|
1602 |
+
"\n",
|
1603 |
+
" return metric_df"
|
1604 |
+
],
|
1605 |
+
"metadata": {
|
1606 |
+
"id": "H7ubvcbk27vr"
|
1607 |
+
},
|
1608 |
+
"execution_count": 55,
|
1609 |
+
"outputs": []
|
1610 |
+
},
|
1611 |
+
{
|
1612 |
+
"cell_type": "code",
|
1613 |
+
"source": [
|
1614 |
+
"from llama_index.evaluation import RetrieverEvaluator\n",
|
1615 |
+
"\n",
|
1616 |
+
"# We can evaluate the retievers with different top_k values.\n",
|
1617 |
+
"for i in [2, 4, 6, 8, 10]:\n",
|
1618 |
+
" vector_retriever = VectorIndexRetriever(index=vector_index, similarity_top_k=i)\n",
|
1619 |
+
" custom_retriever = CustomRetriever(vector_retriever, keyword_retriever, \"OR\")\n",
|
1620 |
+
" custom_query_engine = RetrieverQueryEngine(\n",
|
1621 |
+
" retriever=custom_retriever,\n",
|
1622 |
+
" response_synthesizer=response_synthesizer,\n",
|
1623 |
+
" )\n",
|
1624 |
+
" retriever_evaluator = RetrieverEvaluator.from_metric_names(\n",
|
1625 |
+
" [\"mrr\", \"hit_rate\"], retriever=custom_query_engine\n",
|
1626 |
+
" )\n",
|
1627 |
+
" eval_results = await retriever_evaluator.aevaluate_dataset(rag_eval_dataset)\n",
|
1628 |
+
" print(display_results_retriever(f\"Retriever top_{i}\", eval_results))"
|
1629 |
+
],
|
1630 |
+
"metadata": {
|
1631 |
+
"colab": {
|
1632 |
+
"base_uri": "https://localhost:8080/",
|
1633 |
+
"height": 435
|
1634 |
+
},
|
1635 |
+
"id": "uNLxDxoc2-Ac",
|
1636 |
+
"outputId": "93f03e7e-2590-46f0-fce0-3e8b29852a88"
|
1637 |
+
},
|
1638 |
+
"execution_count": 63,
|
1639 |
+
"outputs": [
|
1640 |
+
{
|
1641 |
+
"output_type": "error",
|
1642 |
+
"ename": "ValidationError",
|
1643 |
+
"evalue": "1 validation error for RetrieverEvaluator\nretriever\n instance of BaseRetriever expected (type=type_error.arbitrary_type; expected_arbitrary_type=BaseRetriever)",
|
1644 |
+
"traceback": [
|
1645 |
+
"\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
|
1646 |
+
"\u001b[0;31mValidationError\u001b[0m Traceback (most recent call last)",
|
1647 |
+
"\u001b[0;32m<ipython-input-63-b809d06970c4>\u001b[0m in \u001b[0;36m<cell line: 4>\u001b[0;34m()\u001b[0m\n\u001b[1;32m 9\u001b[0m \u001b[0mresponse_synthesizer\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mresponse_synthesizer\u001b[0m\u001b[0;34m,\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 10\u001b[0m )\n\u001b[0;32m---> 11\u001b[0;31m retriever_evaluator = RetrieverEvaluator.from_metric_names(\n\u001b[0m\u001b[1;32m 12\u001b[0m \u001b[0;34m[\u001b[0m\u001b[0;34m\"mrr\"\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m\"hit_rate\"\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretriever\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mcustom_query_engine\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 13\u001b[0m )\n",
|
1648 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/evaluation/retrieval/base.py\u001b[0m in \u001b[0;36mfrom_metric_names\u001b[0;34m(cls, metric_names, **kwargs)\u001b[0m\n\u001b[1;32m 95\u001b[0m \"\"\"\n\u001b[1;32m 96\u001b[0m \u001b[0mmetric_types\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mresolve_metrics\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmetric_names\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 97\u001b[0;31m \u001b[0;32mreturn\u001b[0m \u001b[0mcls\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmetrics\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0;34m[\u001b[0m\u001b[0mmetric\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m \u001b[0;32mfor\u001b[0m \u001b[0mmetric\u001b[0m \u001b[0;32min\u001b[0m \u001b[0mmetric_types\u001b[0m\u001b[0;34m]\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 98\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 99\u001b[0m \u001b[0;34m@\u001b[0m\u001b[0mabstractmethod\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
1649 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/llama_index/evaluation/retrieval/evaluator.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(self, metrics, retriever, **kwargs)\u001b[0m\n\u001b[1;32m 36\u001b[0m ) -> None:\n\u001b[1;32m 37\u001b[0m \u001b[0;34m\"\"\"Init params.\"\"\"\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m---> 38\u001b[0;31m \u001b[0msuper\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__init__\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0mmetrics\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mmetrics\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mretriever\u001b[0m\u001b[0;34m=\u001b[0m\u001b[0mretriever\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m**\u001b[0m\u001b[0mkwargs\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 39\u001b[0m \u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 40\u001b[0m async def _aget_retrieved_ids_and_texts(\n",
|
1650 |
+
"\u001b[0;32m/usr/local/lib/python3.10/dist-packages/pydantic/v1/main.py\u001b[0m in \u001b[0;36m__init__\u001b[0;34m(__pydantic_self__, **data)\u001b[0m\n\u001b[1;32m 339\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mfields_set\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalidation_error\u001b[0m \u001b[0;34m=\u001b[0m \u001b[0mvalidate_model\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m__pydantic_self__\u001b[0m\u001b[0;34m.\u001b[0m\u001b[0m__class__\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mdata\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 340\u001b[0m \u001b[0;32mif\u001b[0m \u001b[0mvalidation_error\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0;32m--> 341\u001b[0;31m \u001b[0;32mraise\u001b[0m \u001b[0mvalidation_error\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[0m\u001b[1;32m 342\u001b[0m \u001b[0;32mtry\u001b[0m\u001b[0;34m:\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n\u001b[1;32m 343\u001b[0m \u001b[0mobject_setattr\u001b[0m\u001b[0;34m(\u001b[0m\u001b[0m__pydantic_self__\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0;34m'__dict__'\u001b[0m\u001b[0;34m,\u001b[0m \u001b[0mvalues\u001b[0m\u001b[0;34m)\u001b[0m\u001b[0;34m\u001b[0m\u001b[0;34m\u001b[0m\u001b[0m\n",
|
1651 |
+
"\u001b[0;31mValidationError\u001b[0m: 1 validation error for RetrieverEvaluator\nretriever\n instance of BaseRetriever expected (type=type_error.arbitrary_type; expected_arbitrary_type=BaseRetriever)"
|
1652 |
+
]
|
1653 |
+
}
|
1654 |
+
]
|
1655 |
+
},
|
1656 |
+
{
|
1657 |
+
"cell_type": "code",
|
1658 |
+
"source": [],
|
1659 |
+
"metadata": {
|
1660 |
+
"id": "1MB1YD1E3EKM"
|
1661 |
+
},
|
1662 |
+
"execution_count": null,
|
1663 |
+
"outputs": []
|
1664 |
+
}
|
1665 |
+
]
|
1666 |
+
}
|