thng292 commited on
Commit
f54439d
·
1 Parent(s): 380bc16

Hope final

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ *.json filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ **/.env
2
+ **/.venv
.python-version ADDED
@@ -0,0 +1 @@
 
 
1
+ 3.13
Dockerfile ADDED
@@ -0,0 +1,44 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.13-slim
2
+
3
+ COPY --from=ghcr.io/astral-sh/uv:latest /uv /uvx /bin/
4
+
5
+ RUN apt-get update && \
6
+ apt-get install -y --no-install-recommends gcc g++ cmake && \
7
+ rm -rf /var/lib/apt/lists/*
8
+
9
+ WORKDIR /app
10
+
11
+ COPY src/ src/
12
+ COPY packages/data_prep/generated/ packages/data_prep/generated/
13
+
14
+ RUN uv pip install --system \
15
+ --extra-index-url https://abetlen.github.io/llama-cpp-python/whl/cpu \
16
+ "gradio>=6.12.0" \
17
+ "huggingface-hub>=0.30.0" \
18
+ "llama-cpp-python>=0.3.19" \
19
+ "llama-index>=0.14.20" \
20
+ "llama-index-embeddings-huggingface>=0.7.0" \
21
+ "numpy>=2.0.0"
22
+
23
+ RUN useradd -m -u 1000 user
24
+ USER user
25
+
26
+ ENV HOME=/home/user \
27
+ PYTHONPATH=/app/src \
28
+ GRADIO_SERVER_NAME=0.0.0.0
29
+
30
+ RUN python -c "\
31
+ from huggingface_hub import hf_hub_download, list_repo_files; \
32
+ files = list_repo_files('Jackrong/Qwen3.5-4B-Neo-GGUF'); \
33
+ gguf = [f for f in files if f.endswith('.gguf')]; \
34
+ target = next((f for f in gguf if 'Q4_K_M' in f.upper()), gguf[0]); \
35
+ print(f'Downloading {target}...'); \
36
+ hf_hub_download('Jackrong/Qwen3.5-4B-Neo-GGUF', target)"
37
+
38
+ RUN python -c "\
39
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding; \
40
+ HuggingFaceEmbedding(model_name='BAAI/bge-small-en-v1.5')"
41
+
42
+ EXPOSE 7860
43
+
44
+ CMD ["python", "-m", "big_data_application"]
README.md CHANGED
@@ -9,3 +9,5 @@ license: mit
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
9
  ---
10
 
11
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
12
+
13
+ - uv add llama-cpp-python --index https://abetlen.github.io/llama-cpp-python/whl/cpu
packages/data_prep/.ipynb_checkpoints/main-checkpoint.ipynb ADDED
@@ -0,0 +1,294 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 1,
6
+ "id": "6137a317",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "text/plain": [
12
+ "True"
13
+ ]
14
+ },
15
+ "execution_count": 1,
16
+ "metadata": {},
17
+ "output_type": "execute_result"
18
+ }
19
+ ],
20
+ "source": [
21
+ "from dotenv import load_dotenv\n",
22
+ "load_dotenv()"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 2,
28
+ "id": "4a7dc84b",
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "import nest_asyncio\n",
33
+ "nest_asyncio.apply()"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 3,
39
+ "id": "a47086ec",
40
+ "metadata": {},
41
+ "outputs": [
42
+ {
43
+ "ename": "SyntaxError",
44
+ "evalue": "invalid syntax (188846707.py, line 6)",
45
+ "output_type": "error",
46
+ "traceback": [
47
+ " \u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[3]\u001b[39m\u001b[32m, line 6\u001b[39m\n\u001b[31m \u001b[39m\u001b[31mfrom llama_index.llms.openai\u001b[39m\n ^\n\u001b[31mSyntaxError\u001b[39m\u001b[31m:\u001b[39m invalid syntax\n"
48
+ ]
49
+ }
50
+ ],
51
+ "source": [
52
+ "import os\n",
53
+ "import datasets\n",
54
+ "from llama_index.core import PropertyGraphIndex, Document, Settings, load_index_from_storage\n",
55
+ "from llama_index.core.graph_stores import SimplePropertyGraphStore\n",
56
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
57
+ "from llama_index.llms.openailike"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 4,
63
+ "id": "28b17221",
64
+ "metadata": {},
65
+ "outputs": [
66
+ {
67
+ "name": "stderr",
68
+ "output_type": "stream",
69
+ "text": [
70
+ "Warning: You are sending unauthenticated requests to the HF Hub. Please set a HF_TOKEN to enable higher rate limits and faster downloads.\n"
71
+ ]
72
+ }
73
+ ],
74
+ "source": [
75
+ "og_data = datasets.load_dataset(\"gamino/wiki_medical_terms\", split=\"train[:100]\")"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": 5,
81
+ "id": "ca0aa288",
82
+ "metadata": {},
83
+ "outputs": [],
84
+ "source": [
85
+ "data = [\n",
86
+ " Document(id_=str(idx), text=text) for idx, text in enumerate(og_data[\"page_text\"])\n",
87
+ "]"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 6,
93
+ "id": "53a4d694",
94
+ "metadata": {},
95
+ "outputs": [
96
+ {
97
+ "data": {
98
+ "text/plain": [
99
+ "100"
100
+ ]
101
+ },
102
+ "execution_count": 6,
103
+ "metadata": {},
104
+ "output_type": "execute_result"
105
+ }
106
+ ],
107
+ "source": [
108
+ "len(data)"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 7,
114
+ "id": "6d77d82f",
115
+ "metadata": {},
116
+ "outputs": [],
117
+ "source": [
118
+ "graph_store = SimplePropertyGraphStore()"
119
+ ]
120
+ },
121
+ {
122
+ "cell_type": "code",
123
+ "execution_count": 8,
124
+ "id": "e08d931b",
125
+ "metadata": {},
126
+ "outputs": [],
127
+ "source": [
128
+ "llm = OpenAI(\n",
129
+ " model=\"DeepSeek-V3.2\",\n",
130
+ " api_key=os.getenv(\"AZURE_API_KEY\"),\n",
131
+ " api_base=\"https://thong-api-hub.services.ai.azure.com/models\",\n",
132
+ ")\n",
133
+ "Settings.llm = llm"
134
+ ]
135
+ },
136
+ {
137
+ "cell_type": "code",
138
+ "execution_count": 12,
139
+ "id": "8d7d9e4a",
140
+ "metadata": {},
141
+ "outputs": [
142
+ {
143
+ "ename": "ValueError",
144
+ "evalue": "Unknown model 'DeepSeek-V3.2'. Please provide a valid OpenAI model name in: o1, o1-2024-12-17, o1-pro, o1-pro-2025-03-19, o1-preview, o1-preview-2024-09-12, o1-mini, o1-mini-2024-09-12, o3-mini, o3-mini-2025-01-31, o3, o3-2025-04-16, o3-pro, o3-pro-2025-06-10, o4-mini, o4-mini-2025-04-16, gpt-5, gpt-5-2025-08-07, gpt-5-chat, gpt-5-chat-latest, gpt-5-mini, gpt-5-mini-2025-08-07, gpt-5-nano, gpt-5-nano-2025-08-07, gpt-5-pro, gpt-5-pro-2025-10-06, gpt-5.1, gpt-5.1-2025-11-13, gpt-5.1-chat-latest, gpt-5.2, gpt-5.2-2025-12-11, gpt-5.2-chat-latest, gpt-5.3, gpt-5.3-chat-latest, gpt-5.4, gpt-5.4-2026-03-05, gpt-5.4-mini, gpt-5.4-nano, gpt-5.4-chat-latest, gpt-4, gpt-4-32k, gpt-4-1106-preview, gpt-4-0125-preview, gpt-4-turbo-preview, gpt-4-vision-preview, gpt-4-1106-vision-preview, gpt-4-turbo-2024-04-09, gpt-4-turbo, gpt-4o, gpt-4o-audio-preview, gpt-4o-audio-preview-2024-12-17, gpt-4o-audio-preview-2024-10-01, gpt-4o-mini-audio-preview, gpt-4o-mini-audio-preview-2024-12-17, gpt-4o-2024-05-13, gpt-4o-2024-08-06, gpt-4o-2024-11-20, gpt-4.5-preview, gpt-4.5-preview-2025-02-27, chatgpt-4o-latest, gpt-4o-mini, gpt-4o-mini-2024-07-18, gpt-4-0613, gpt-4-32k-0613, gpt-4-0314, gpt-4-32k-0314, gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-4.1-2025-04-14, gpt-4.1-mini-2025-04-14, gpt-4.1-nano-2025-04-14, gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-3.5-turbo-0125, gpt-3.5-turbo-1106, gpt-3.5-turbo-0613, gpt-3.5-turbo-16k-0613, gpt-3.5-turbo-0301, text-davinci-003, text-davinci-002, gpt-3.5-turbo-instruct, text-ada-001, text-babbage-001, text-curie-001, ada, babbage, curie, davinci, gpt-35-turbo-16k, gpt-35-turbo, gpt-35-turbo-0125, gpt-35-turbo-1106, gpt-35-turbo-0613, gpt-35-turbo-16k-0613",
145
+ "output_type": "error",
146
+ "traceback": [
147
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
148
+ "\u001b[31mValueError\u001b[39m Traceback (most recent call last)",
149
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[12]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m Settings.llm.complete(\u001b[33m\"Hello\"\u001b[39m)\n",
150
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index_instrumentation/dispatcher.py:413\u001b[39m, in \u001b[36mDispatcher.span.<locals>.wrapper\u001b[39m\u001b[34m(func, instance, args, kwargs)\u001b[39m\n\u001b[32m 410\u001b[39m _logger.debug(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mFailed to reset active_span_id: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 412\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m413\u001b[39m result = \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 414\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(result, asyncio.Future):\n\u001b[32m 415\u001b[39m \u001b[38;5;66;03m# If the result is a Future, wrap it\u001b[39;00m\n\u001b[32m 416\u001b[39m new_future = asyncio.ensure_future(result)\n",
151
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/llms/callbacks.py:447\u001b[39m, in \u001b[36mllm_completion_callback.<locals>.wrap.<locals>.wrapped_llm_predict\u001b[39m\u001b[34m(_self, *args, **kwargs)\u001b[39m\n\u001b[32m 444\u001b[39m _self.rate_limiter.acquire()\n\u001b[32m 446\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m447\u001b[39m f_return_val = \u001b[43mf\u001b[49m\u001b[43m(\u001b[49m\u001b[43m_self\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 448\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n\u001b[32m 449\u001b[39m callback_manager.on_event_end(\n\u001b[32m 450\u001b[39m CBEventType.LLM,\n\u001b[32m 451\u001b[39m payload={EventPayload.EXCEPTION: e},\n\u001b[32m 452\u001b[39m event_id=event_id,\n\u001b[32m 453\u001b[39m )\n",
152
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/llms/openai/base.py:425\u001b[39m, in \u001b[36mOpenAI.complete\u001b[39m\u001b[34m(self, prompt, formatted, **kwargs)\u001b[39m\n\u001b[32m 420\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.modalities \u001b[38;5;129;01mand\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33maudio\u001b[39m\u001b[33m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m.modalities:\n\u001b[32m 421\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 422\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mAudio is not supported for completion. Use chat/achat instead.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 423\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m425\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_use_chat_completions\u001b[49m\u001b[43m(\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m:\n\u001b[32m 426\u001b[39m complete_fn = chat_to_completion_decorator(\u001b[38;5;28mself\u001b[39m._chat)\n\u001b[32m 427\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n",
153
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/llms/openai/base.py:444\u001b[39m, in \u001b[36mOpenAI._use_chat_completions\u001b[39m\u001b[34m(self, kwargs)\u001b[39m\n\u001b[32m 442\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[33m\"\u001b[39m\u001b[33muse_chat_completions\u001b[39m\u001b[33m\"\u001b[39m \u001b[38;5;129;01min\u001b[39;00m kwargs:\n\u001b[32m 443\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m kwargs[\u001b[33m\"\u001b[39m\u001b[33muse_chat_completions\u001b[39m\u001b[33m\"\u001b[39m]\n\u001b[32m--> \u001b[39m\u001b[32m444\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mmetadata\u001b[49m.is_chat_model\n",
154
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/llms/openai/base.py:385\u001b[39m, in \u001b[36mOpenAI.metadata\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 382\u001b[39m \u001b[38;5;129m@property\u001b[39m\n\u001b[32m 383\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mmetadata\u001b[39m(\u001b[38;5;28mself\u001b[39m) -> LLMMetadata:\n\u001b[32m 384\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m LLMMetadata(\n\u001b[32m--> \u001b[39m\u001b[32m385\u001b[39m context_window=\u001b[43mopenai_modelname_to_contextsize\u001b[49m\u001b[43m(\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_get_model_name\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m)\u001b[49m,\n\u001b[32m 386\u001b[39m num_output=\u001b[38;5;28mself\u001b[39m.max_tokens \u001b[38;5;129;01mor\u001b[39;00m -\u001b[32m1\u001b[39m,\n\u001b[32m 387\u001b[39m is_chat_model=is_chat_model(model=\u001b[38;5;28mself\u001b[39m._get_model_name()),\n\u001b[32m 388\u001b[39m is_function_calling_model=is_function_calling_model(\n\u001b[32m 389\u001b[39m model=\u001b[38;5;28mself\u001b[39m._get_model_name()\n\u001b[32m 390\u001b[39m ),\n\u001b[32m 391\u001b[39m model_name=\u001b[38;5;28mself\u001b[39m.model,\n\u001b[32m 392\u001b[39m \u001b[38;5;66;03m# TODO: Temp for O1 beta\u001b[39;00m\n\u001b[32m 393\u001b[39m system_role=MessageRole.USER\n\u001b[32m 394\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.model \u001b[38;5;129;01min\u001b[39;00m O1_MODELS\n\u001b[32m 395\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m MessageRole.SYSTEM,\n\u001b[32m 396\u001b[39m )\n",
155
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/llms/openai/utils.py:383\u001b[39m, in \u001b[36mopenai_modelname_to_contextsize\u001b[39m\u001b[34m(modelname)\u001b[39m\n\u001b[32m 378\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 379\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mOpenAI model \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodelname\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m has been discontinued. \u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 380\u001b[39m \u001b[33m\"\u001b[39m\u001b[33mPlease choose another model.\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 381\u001b[39m )\n\u001b[32m 382\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m modelname \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;129;01min\u001b[39;00m ALL_AVAILABLE_MODELS:\n\u001b[32m--> \u001b[39m\u001b[32m383\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mValueError\u001b[39;00m(\n\u001b[32m 384\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mUnknown model \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mmodelname\u001b[38;5;132;01m!r}\u001b[39;00m\u001b[33m. Please provide a valid OpenAI model name in:\u001b[39m\u001b[33m\"\u001b[39m\n\u001b[32m 385\u001b[39m \u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33m \u001b[39m\u001b[38;5;132;01m{\u001b[39;00m\u001b[33m'\u001b[39m\u001b[33m, \u001b[39m\u001b[33m'\u001b[39m.join(ALL_AVAILABLE_MODELS.keys())\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m\n\u001b[32m 386\u001b[39m )\n\u001b[32m 387\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m ALL_AVAILABLE_MODELS[modelname]\n",
156
+ "\u001b[31mValueError\u001b[39m: Unknown model 'DeepSeek-V3.2'. Please provide a valid OpenAI model name in: o1, o1-2024-12-17, o1-pro, o1-pro-2025-03-19, o1-preview, o1-preview-2024-09-12, o1-mini, o1-mini-2024-09-12, o3-mini, o3-mini-2025-01-31, o3, o3-2025-04-16, o3-pro, o3-pro-2025-06-10, o4-mini, o4-mini-2025-04-16, gpt-5, gpt-5-2025-08-07, gpt-5-chat, gpt-5-chat-latest, gpt-5-mini, gpt-5-mini-2025-08-07, gpt-5-nano, gpt-5-nano-2025-08-07, gpt-5-pro, gpt-5-pro-2025-10-06, gpt-5.1, gpt-5.1-2025-11-13, gpt-5.1-chat-latest, gpt-5.2, gpt-5.2-2025-12-11, gpt-5.2-chat-latest, gpt-5.3, gpt-5.3-chat-latest, gpt-5.4, gpt-5.4-2026-03-05, gpt-5.4-mini, gpt-5.4-nano, gpt-5.4-chat-latest, gpt-4, gpt-4-32k, gpt-4-1106-preview, gpt-4-0125-preview, gpt-4-turbo-preview, gpt-4-vision-preview, gpt-4-1106-vision-preview, gpt-4-turbo-2024-04-09, gpt-4-turbo, gpt-4o, gpt-4o-audio-preview, gpt-4o-audio-preview-2024-12-17, gpt-4o-audio-preview-2024-10-01, gpt-4o-mini-audio-preview, gpt-4o-mini-audio-preview-2024-12-17, gpt-4o-2024-05-13, gpt-4o-2024-08-06, gpt-4o-2024-11-20, gpt-4.5-preview, gpt-4.5-preview-2025-02-27, chatgpt-4o-latest, gpt-4o-mini, gpt-4o-mini-2024-07-18, gpt-4-0613, gpt-4-32k-0613, gpt-4-0314, gpt-4-32k-0314, gpt-4.1, gpt-4.1-mini, gpt-4.1-nano, gpt-4.1-2025-04-14, gpt-4.1-mini-2025-04-14, gpt-4.1-nano-2025-04-14, gpt-3.5-turbo, gpt-3.5-turbo-16k, gpt-3.5-turbo-0125, gpt-3.5-turbo-1106, gpt-3.5-turbo-0613, gpt-3.5-turbo-16k-0613, gpt-3.5-turbo-0301, text-davinci-003, text-davinci-002, gpt-3.5-turbo-instruct, text-ada-001, text-babbage-001, text-curie-001, ada, babbage, curie, davinci, gpt-35-turbo-16k, gpt-35-turbo, gpt-35-turbo-0125, gpt-35-turbo-1106, gpt-35-turbo-0613, gpt-35-turbo-16k-0613"
157
+ ]
158
+ }
159
+ ],
160
+ "source": [
161
+ "Settings.llm.complete(\"Hello\")"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": 9,
167
+ "id": "711fd553",
168
+ "metadata": {},
169
+ "outputs": [
170
+ {
171
+ "name": "stderr",
172
+ "output_type": "stream",
173
+ "text": [
174
+ "Loading weights: 100%|██████████| 199/199 [00:00<00:00, 2394.67it/s]\n",
175
+ "\u001b[1mBertModel LOAD REPORT\u001b[0m from: BAAI/bge-small-en-v1.5\n",
176
+ "Key | Status | | \n",
177
+ "------------------------+------------+--+-\n",
178
+ "embeddings.position_ids | UNEXPECTED | | \n",
179
+ "\n",
180
+ "Notes:\n",
181
+ "- UNEXPECTED:\tcan be ignored when loading from different task/architecture; not ok if you expect identical arch.\n"
182
+ ]
183
+ }
184
+ ],
185
+ "source": [
186
+ "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
187
+ "\n",
188
+ "Settings.embed_model = embed_model"
189
+ ]
190
+ },
191
+ {
192
+ "cell_type": "code",
193
+ "execution_count": 11,
194
+ "id": "6403ff0b",
195
+ "metadata": {},
196
+ "outputs": [
197
+ {
198
+ "name": "stderr",
199
+ "output_type": "stream",
200
+ "text": [
201
+ "Applying transformations: 0%| | 0/1 [00:00<?, ?it/s]Task was destroyed but it is pending!\n",
202
+ "task: <Task pending name='Task-1319' coro=<_async_in_context.<locals>.run_in_context() running at /media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/ipykernel/utils.py:60> wait_for=<Task pending name='Task-1321' coro=<Kernel.shell_main() running at /media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/ipykernel/kernelbase.py:597> cb=[Task.__wakeup()]> cb=[ZMQStream._run_callback.<locals>._log_error() at /media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/zmq/eventloop/zmqstream.py:563]>\n",
203
+ "/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/pydantic/main.py:112: RuntimeWarning: coroutine 'Kernel.shell_main' was never awaited\n",
204
+ " 'validate_assignment': lambda model, name, val: model.__pydantic_validator__.validate_assignment(model, name, val), # pyright: ignore[reportAssignmentType]\n",
205
+ "RuntimeWarning: Enable tracemalloc to get the object allocation traceback\n",
206
+ "Task was destroyed but it is pending!\n",
207
+ "task: <Task pending name='Task-1321' coro=<Kernel.shell_main() running at /media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/ipykernel/kernelbase.py:597> cb=[Task.__wakeup()]>\n",
208
+ "Applying transformations: 100%|██████████| 1/1 [00:01<00:00, 1.66s/it]\n",
209
+ "Applying transformations: 100%|██████████| 2/2 [09:57<00:00, 298.86s/it]\n",
210
+ "Generating embeddings: 2%|▏ | 10/499 [00:01<01:10, 6.99it/s]"
211
+ ]
212
+ },
213
+ {
214
+ "ename": "KeyboardInterrupt",
215
+ "evalue": "",
216
+ "output_type": "error",
217
+ "traceback": [
218
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
219
+ "\u001b[31mKeyboardInterrupt\u001b[39m Traceback (most recent call last)",
220
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[11]\u001b[39m\u001b[32m, line 1\u001b[39m\n\u001b[32m----> \u001b[39m\u001b[32m1\u001b[39m index = \u001b[43mPropertyGraphIndex\u001b[49m\u001b[43m.\u001b[49m\u001b[43mfrom_documents\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 2\u001b[39m \u001b[43m \u001b[49m\u001b[43mdata\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mshow_progress\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mTrue\u001b[39;49;00m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mproperty_graph_store\u001b[49m\u001b[43m=\u001b[49m\u001b[43mgraph_store\u001b[49m\u001b[43m,\u001b[49m\u001b[43muse_async\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43;01mFalse\u001b[39;49;00m\n\u001b[32m 3\u001b[39m \u001b[43m)\u001b[49m\n",
221
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/indices/base.py:122\u001b[39m, in \u001b[36mBaseIndex.from_documents\u001b[39m\u001b[34m(cls, documents, storage_context, show_progress, callback_manager, transformations, **kwargs)\u001b[39m\n\u001b[32m 113\u001b[39m docstore.set_document_hash(doc.id_, doc.hash)\n\u001b[32m 115\u001b[39m nodes = run_transformations(\n\u001b[32m 116\u001b[39m documents, \u001b[38;5;66;03m# type: ignore\u001b[39;00m\n\u001b[32m 117\u001b[39m transformations,\n\u001b[32m 118\u001b[39m show_progress=show_progress,\n\u001b[32m 119\u001b[39m **kwargs,\n\u001b[32m 120\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m122\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mcls\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[32m 123\u001b[39m \u001b[43m \u001b[49m\u001b[43mnodes\u001b[49m\u001b[43m=\u001b[49m\u001b[43mnodes\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 124\u001b[39m \u001b[43m \u001b[49m\u001b[43mstorage_context\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstorage_context\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 125\u001b[39m \u001b[43m \u001b[49m\u001b[43mcallback_manager\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcallback_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 126\u001b[39m \u001b[43m \u001b[49m\u001b[43mshow_progress\u001b[49m\u001b[43m=\u001b[49m\u001b[43mshow_progress\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 127\u001b[39m \u001b[43m \u001b[49m\u001b[43mtransformations\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtransformations\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 128\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 129\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
222
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/indices/property_graph/base.py:136\u001b[39m, in \u001b[36mPropertyGraphIndex.__init__\u001b[39m\u001b[34m(self, nodes, llm, kg_extractors, property_graph_store, vector_store, use_async, embed_model, embed_kg_nodes, callback_manager, transformations, storage_context, show_progress, **kwargs)\u001b[39m\n\u001b[32m 130\u001b[39m \u001b[38;5;28mself\u001b[39m._embed_kg_nodes = embed_kg_nodes\n\u001b[32m 131\u001b[39m \u001b[38;5;28mself\u001b[39m._override_vector_store = (\n\u001b[32m 132\u001b[39m vector_store \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 133\u001b[39m \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m storage_context.property_graph_store.supports_vector_queries\n\u001b[32m 134\u001b[39m )\n\u001b[32m--> \u001b[39m\u001b[32m136\u001b[39m \u001b[38;5;28;43msuper\u001b[39;49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\u001b[43m.\u001b[49m\u001b[34;43m__init__\u001b[39;49m\u001b[43m(\u001b[49m\n\u001b[32m 137\u001b[39m \u001b[43m \u001b[49m\u001b[43mnodes\u001b[49m\u001b[43m=\u001b[49m\u001b[43mnodes\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 138\u001b[39m \u001b[43m \u001b[49m\u001b[43mcallback_manager\u001b[49m\u001b[43m=\u001b[49m\u001b[43mcallback_manager\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 139\u001b[39m \u001b[43m \u001b[49m\u001b[43mstorage_context\u001b[49m\u001b[43m=\u001b[49m\u001b[43mstorage_context\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 140\u001b[39m \u001b[43m \u001b[49m\u001b[43mtransformations\u001b[49m\u001b[43m=\u001b[49m\u001b[43mtransformations\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 141\u001b[39m \u001b[43m \u001b[49m\u001b[43mshow_progress\u001b[49m\u001b[43m=\u001b[49m\u001b[43mshow_progress\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 142\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 143\u001b[39m \u001b[43m\u001b[49m\u001b[43m)\u001b[49m\n",
223
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/indices/base.py:79\u001b[39m, in \u001b[36mBaseIndex.__init__\u001b[39m\u001b[34m(self, nodes, objects, index_struct, storage_context, callback_manager, transformations, show_progress, **kwargs)\u001b[39m\n\u001b[32m 77\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m index_struct \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 78\u001b[39m nodes = nodes \u001b[38;5;129;01mor\u001b[39;00m []\n\u001b[32m---> \u001b[39m\u001b[32m79\u001b[39m index_struct = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mbuild_index_from_nodes\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 80\u001b[39m \u001b[43m \u001b[49m\u001b[43mnodes\u001b[49m\u001b[43m \u001b[49m\u001b[43m+\u001b[49m\u001b[43m \u001b[49m\u001b[43mobjects\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore\u001b[39;49;00m\n\u001b[32m 81\u001b[39m \u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;66;43;03m# type: ignore\u001b[39;49;00m\n\u001b[32m 82\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 83\u001b[39m \u001b[38;5;28mself\u001b[39m._index_struct = index_struct\n\u001b[32m 84\u001b[39m \u001b[38;5;28mself\u001b[39m._storage_context.index_store.add_index_struct(\u001b[38;5;28mself\u001b[39m._index_struct)\n",
224
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/indices/base.py:189\u001b[39m, in \u001b[36mBaseIndex.build_index_from_nodes\u001b[39m\u001b[34m(self, nodes, **build_kwargs)\u001b[39m\n\u001b[32m 187\u001b[39m \u001b[38;5;250m\u001b[39m\u001b[33;03m\"\"\"Build the index from nodes.\"\"\"\u001b[39;00m\n\u001b[32m 188\u001b[39m \u001b[38;5;28mself\u001b[39m._docstore.add_documents(nodes, allow_update=\u001b[38;5;28;01mTrue\u001b[39;00m)\n\u001b[32m--> \u001b[39m\u001b[32m189\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_build_index_from_nodes\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnodes\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mbuild_kwargs\u001b[49m\u001b[43m)\u001b[49m\n",
225
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/indices/property_graph/base.py:336\u001b[39m, in \u001b[36mPropertyGraphIndex._build_index_from_nodes\u001b[39m\u001b[34m(self, nodes, **build_kwargs)\u001b[39m\n\u001b[32m 332\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m_build_index_from_nodes\u001b[39m(\n\u001b[32m 333\u001b[39m \u001b[38;5;28mself\u001b[39m, nodes: Optional[Sequence[BaseNode]], **build_kwargs: Any\n\u001b[32m 334\u001b[39m ) -> IndexLPG:\n\u001b[32m 335\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"Build index from nodes.\"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m336\u001b[39m nodes = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_insert_nodes\u001b[49m\u001b[43m(\u001b[49m\u001b[43mnodes\u001b[49m\u001b[43m \u001b[49m\u001b[38;5;129;43;01mor\u001b[39;49;00m\u001b[43m \u001b[49m\u001b[43m[\u001b[49m\u001b[43m]\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 338\u001b[39m \u001b[38;5;66;03m# this isn't really used or needed\u001b[39;00m\n\u001b[32m 339\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m IndexLPG()\n",
226
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/indices/property_graph/base.py:265\u001b[39m, in \u001b[36mPropertyGraphIndex._insert_nodes\u001b[39m\u001b[34m(self, nodes)\u001b[39m\n\u001b[32m 259\u001b[39m embeddings = asyncio.run(\n\u001b[32m 260\u001b[39m \u001b[38;5;28mself\u001b[39m._embed_model.aget_text_embedding_batch(\n\u001b[32m 261\u001b[39m node_texts, show_progress=\u001b[38;5;28mself\u001b[39m._show_progress\n\u001b[32m 262\u001b[39m )\n\u001b[32m 263\u001b[39m )\n\u001b[32m 264\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m265\u001b[39m embeddings = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_embed_model\u001b[49m\u001b[43m.\u001b[49m\u001b[43mget_text_embedding_batch\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 266\u001b[39m \u001b[43m \u001b[49m\u001b[43mnode_texts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mshow_progress\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_show_progress\u001b[49m\n\u001b[32m 267\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 269\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m node, embedding \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mzip\u001b[39m(nodes, embeddings):\n\u001b[32m 270\u001b[39m node.embedding = embedding\n",
227
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index_instrumentation/dispatcher.py:413\u001b[39m, in \u001b[36mDispatcher.span.<locals>.wrapper\u001b[39m\u001b[34m(func, instance, args, kwargs)\u001b[39m\n\u001b[32m 410\u001b[39m _logger.debug(\u001b[33mf\u001b[39m\u001b[33m\"\u001b[39m\u001b[33mFailed to reset active_span_id: \u001b[39m\u001b[38;5;132;01m{\u001b[39;00me\u001b[38;5;132;01m}\u001b[39;00m\u001b[33m\"\u001b[39m)\n\u001b[32m 412\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m413\u001b[39m result = \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 414\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(result, asyncio.Future):\n\u001b[32m 415\u001b[39m \u001b[38;5;66;03m# If the result is a Future, wrap it\u001b[39;00m\n\u001b[32m 416\u001b[39m new_future = asyncio.ensure_future(result)\n",
228
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/core/base/embeddings/base.py:508\u001b[39m, in \u001b[36mBaseEmbedding.get_text_embedding_batch\u001b[39m\u001b[34m(self, texts, show_progress, **kwargs)\u001b[39m\n\u001b[32m 506\u001b[39m \u001b[38;5;28mself\u001b[39m.rate_limiter.acquire()\n\u001b[32m 507\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28mself\u001b[39m.embeddings_cache:\n\u001b[32m--> \u001b[39m\u001b[32m508\u001b[39m embeddings = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_get_text_embeddings\u001b[49m\u001b[43m(\u001b[49m\u001b[43mcur_batch\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 509\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.embeddings_cache \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 510\u001b[39m embeddings = \u001b[38;5;28mself\u001b[39m._get_text_embeddings_cached(cur_batch)\n",
229
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/embeddings/huggingface/base.py:333\u001b[39m, in \u001b[36mHuggingFaceEmbedding._get_text_embeddings\u001b[39m\u001b[34m(self, texts)\u001b[39m\n\u001b[32m 322\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m_get_text_embeddings\u001b[39m(\u001b[38;5;28mself\u001b[39m, texts: List[\u001b[38;5;28mstr\u001b[39m]) -> List[List[\u001b[38;5;28mfloat\u001b[39m]]:\n\u001b[32m 323\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 324\u001b[39m \u001b[33;03m Generates Embeddings for text.\u001b[39;00m\n\u001b[32m 325\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 331\u001b[39m \n\u001b[32m 332\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m333\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_embed\u001b[49m\u001b[43m(\u001b[49m\u001b[43mtexts\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprompt_name\u001b[49m\u001b[43m=\u001b[49m\u001b[33;43m\"\u001b[39;49m\u001b[33;43mtext\u001b[39;49m\u001b[33;43m\"\u001b[39;49m\u001b[43m)\u001b[49m\n",
230
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/embeddings/huggingface/base.py:268\u001b[39m, in \u001b[36mHuggingFaceEmbedding._embed\u001b[39m\u001b[34m(self, inputs, prompt_name)\u001b[39m\n\u001b[32m 248\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m_embed\u001b[39m(\n\u001b[32m 249\u001b[39m \u001b[38;5;28mself\u001b[39m,\n\u001b[32m 250\u001b[39m inputs: List[Union[\u001b[38;5;28mstr\u001b[39m, BytesIO]],\n\u001b[32m 251\u001b[39m prompt_name: Optional[\u001b[38;5;28mstr\u001b[39m] = \u001b[38;5;28;01mNone\u001b[39;00m,\n\u001b[32m 252\u001b[39m ) -> List[List[\u001b[38;5;28mfloat\u001b[39m]]:\n\u001b[32m 253\u001b[39m \u001b[38;5;250m \u001b[39m\u001b[33;03m\"\"\"\u001b[39;00m\n\u001b[32m 254\u001b[39m \u001b[33;03m Generates Embeddings with input validation and retry mechanism.\u001b[39;00m\n\u001b[32m 255\u001b[39m \n\u001b[32m (...)\u001b[39m\u001b[32m 266\u001b[39m \n\u001b[32m 267\u001b[39m \u001b[33;03m \"\"\"\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m268\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_embed_with_retry\u001b[49m\u001b[43m(\u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43mprompt_name\u001b[49m\u001b[43m)\u001b[49m\n",
231
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/tenacity/__init__.py:331\u001b[39m, in \u001b[36mBaseRetrying.wraps.<locals>.wrapped_f\u001b[39m\u001b[34m(*args, **kw)\u001b[39m\n\u001b[32m 329\u001b[39m copy = \u001b[38;5;28mself\u001b[39m.copy()\n\u001b[32m 330\u001b[39m wrapped_f.statistics = copy.statistics \u001b[38;5;66;03m# type: ignore[attr-defined]\u001b[39;00m\n\u001b[32m--> \u001b[39m\u001b[32m331\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mcopy\u001b[49m\u001b[43m(\u001b[49m\u001b[43mf\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkw\u001b[49m\u001b[43m)\u001b[49m\n",
232
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/tenacity/__init__.py:470\u001b[39m, in \u001b[36mRetrying.__call__\u001b[39m\u001b[34m(self, fn, *args, **kwargs)\u001b[39m\n\u001b[32m 468\u001b[39m retry_state = RetryCallState(retry_object=\u001b[38;5;28mself\u001b[39m, fn=fn, args=args, kwargs=kwargs)\n\u001b[32m 469\u001b[39m \u001b[38;5;28;01mwhile\u001b[39;00m \u001b[38;5;28;01mTrue\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m470\u001b[39m do = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43miter\u001b[49m\u001b[43m(\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m=\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 471\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(do, DoAttempt):\n\u001b[32m 472\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n",
233
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/tenacity/__init__.py:371\u001b[39m, in \u001b[36mBaseRetrying.iter\u001b[39m\u001b[34m(self, retry_state)\u001b[39m\n\u001b[32m 369\u001b[39m result = \u001b[38;5;28;01mNone\u001b[39;00m\n\u001b[32m 370\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m action \u001b[38;5;129;01min\u001b[39;00m \u001b[38;5;28mself\u001b[39m.iter_state.actions:\n\u001b[32m--> \u001b[39m\u001b[32m371\u001b[39m result = \u001b[43maction\u001b[49m\u001b[43m(\u001b[49m\u001b[43mretry_state\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 372\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m result\n",
234
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/tenacity/__init__.py:393\u001b[39m, in \u001b[36mBaseRetrying._post_retry_check_actions.<locals>.<lambda>\u001b[39m\u001b[34m(rs)\u001b[39m\n\u001b[32m 391\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34m_post_retry_check_actions\u001b[39m(\u001b[38;5;28mself\u001b[39m, retry_state: \u001b[33m\"\u001b[39m\u001b[33mRetryCallState\u001b[39m\u001b[33m\"\u001b[39m) -> \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 392\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m (\u001b[38;5;28mself\u001b[39m.iter_state.is_explicit_retry \u001b[38;5;129;01mor\u001b[39;00m \u001b[38;5;28mself\u001b[39m.iter_state.retry_run_result):\n\u001b[32m--> \u001b[39m\u001b[32m393\u001b[39m \u001b[38;5;28mself\u001b[39m._add_action_func(\u001b[38;5;28;01mlambda\u001b[39;00m rs: \u001b[43mrs\u001b[49m\u001b[43m.\u001b[49m\u001b[43moutcome\u001b[49m\u001b[43m.\u001b[49m\u001b[43mresult\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m)\n\u001b[32m 394\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m\n\u001b[32m 396\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m.after \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n",
235
+ "\u001b[36mFile \u001b[39m\u001b[32m~/.local/share/uv/python/cpython-3.13.5-linux-x86_64-gnu/lib/python3.13/concurrent/futures/_base.py:449\u001b[39m, in \u001b[36mFuture.result\u001b[39m\u001b[34m(self, timeout)\u001b[39m\n\u001b[32m 447\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m CancelledError()\n\u001b[32m 448\u001b[39m \u001b[38;5;28;01melif\u001b[39;00m \u001b[38;5;28mself\u001b[39m._state == FINISHED:\n\u001b[32m--> \u001b[39m\u001b[32m449\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m__get_result\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 451\u001b[39m \u001b[38;5;28mself\u001b[39m._condition.wait(timeout)\n\u001b[32m 453\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m._state \u001b[38;5;129;01min\u001b[39;00m [CANCELLED, CANCELLED_AND_NOTIFIED]:\n",
236
+ "\u001b[36mFile \u001b[39m\u001b[32m~/.local/share/uv/python/cpython-3.13.5-linux-x86_64-gnu/lib/python3.13/concurrent/futures/_base.py:401\u001b[39m, in \u001b[36mFuture.__get_result\u001b[39m\u001b[34m(self)\u001b[39m\n\u001b[32m 399\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mself\u001b[39m._exception \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;129;01mnot\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m:\n\u001b[32m 400\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m401\u001b[39m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;28mself\u001b[39m._exception\n\u001b[32m 402\u001b[39m \u001b[38;5;28;01mfinally\u001b[39;00m:\n\u001b[32m 403\u001b[39m \u001b[38;5;66;03m# Break a reference cycle with the exception in self._exception\u001b[39;00m\n\u001b[32m 404\u001b[39m \u001b[38;5;28mself\u001b[39m = \u001b[38;5;28;01mNone\u001b[39;00m\n",
237
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/tenacity/__init__.py:473\u001b[39m, in \u001b[36mRetrying.__call__\u001b[39m\u001b[34m(self, fn, *args, **kwargs)\u001b[39m\n\u001b[32m 471\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28misinstance\u001b[39m(do, DoAttempt):\n\u001b[32m 472\u001b[39m \u001b[38;5;28;01mtry\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m473\u001b[39m result = \u001b[43mfn\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 474\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mBaseException\u001b[39;00m: \u001b[38;5;66;03m# noqa: B902\u001b[39;00m\n\u001b[32m 475\u001b[39m retry_state.set_exception(sys.exc_info()) \u001b[38;5;66;03m# type: ignore[arg-type]\u001b[39;00m\n",
238
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/llama_index/embeddings/huggingface/base.py:236\u001b[39m, in \u001b[36mHuggingFaceEmbedding._embed_with_retry\u001b[39m\u001b[34m(self, inputs, prompt_name)\u001b[39m\n\u001b[32m 234\u001b[39m \u001b[38;5;28mself\u001b[39m._model.stop_multi_process_pool(pool=pool)\n\u001b[32m 235\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m--> \u001b[39m\u001b[32m236\u001b[39m emb = \u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43m_model\u001b[49m\u001b[43m.\u001b[49m\u001b[43mencode\u001b[49m\u001b[43m(\u001b[49m\n\u001b[32m 237\u001b[39m \u001b[43m \u001b[49m\u001b[43minputs\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 238\u001b[39m \u001b[43m \u001b[49m\u001b[43mbatch_size\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43membed_batch_size\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 239\u001b[39m \u001b[43m \u001b[49m\u001b[43mprompt_name\u001b[49m\u001b[43m=\u001b[49m\u001b[43mprompt_name\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 240\u001b[39m \u001b[43m \u001b[49m\u001b[43mnormalize_embeddings\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mnormalize\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 241\u001b[39m \u001b[43m \u001b[49m\u001b[43mshow_progress_bar\u001b[49m\u001b[43m=\u001b[49m\u001b[38;5;28;43mself\u001b[39;49m\u001b[43m.\u001b[49m\u001b[43mshow_progress_bar\u001b[49m\u001b[43m,\u001b[49m\n\u001b[32m 242\u001b[39m \u001b[43m \u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 243\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m emb.tolist()\n\u001b[32m 244\u001b[39m \u001b[38;5;28;01mexcept\u001b[39;00m \u001b[38;5;167;01mException\u001b[39;00m \u001b[38;5;28;01mas\u001b[39;00m e:\n",
239
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/torch/utils/_contextlib.py:124\u001b[39m, in \u001b[36mcontext_decorator.<locals>.decorate_context\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m 120\u001b[39m \u001b[38;5;129m@functools\u001b[39m.wraps(func)\n\u001b[32m 121\u001b[39m \u001b[38;5;28;01mdef\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34mdecorate_context\u001b[39m(*args, **kwargs):\n\u001b[32m 122\u001b[39m \u001b[38;5;66;03m# pyrefly: ignore [bad-context-manager]\u001b[39;00m\n\u001b[32m 123\u001b[39m \u001b[38;5;28;01mwith\u001b[39;00m ctx_factory():\n\u001b[32m--> \u001b[39m\u001b[32m124\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
240
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/sentence_transformers/util/decorators.py:41\u001b[39m, in \u001b[36mdeprecated_kwargs.<locals>.decorator.<locals>.wrapper\u001b[39m\u001b[34m(*args, **kwargs)\u001b[39m\n\u001b[32m 39\u001b[39m \u001b[38;5;28;01melse\u001b[39;00m:\n\u001b[32m 40\u001b[39m kwargs.pop(old_name)\n\u001b[32m---> \u001b[39m\u001b[32m41\u001b[39m \u001b[38;5;28;01mreturn\u001b[39;00m \u001b[43mfunc\u001b[49m\u001b[43m(\u001b[49m\u001b[43m*\u001b[49m\u001b[43margs\u001b[49m\u001b[43m,\u001b[49m\u001b[43m \u001b[49m\u001b[43m*\u001b[49m\u001b[43m*\u001b[49m\u001b[43mkwargs\u001b[49m\u001b[43m)\u001b[49m\n",
241
+ "\u001b[36mFile \u001b[39m\u001b[32m/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/sentence_transformers/sentence_transformer/model.py:682\u001b[39m, in \u001b[36mSentenceTransformer.encode\u001b[39m\u001b[34m(self, inputs, prompt_name, prompt, batch_size, show_progress_bar, output_value, precision, convert_to_numpy, convert_to_tensor, device, normalize_embeddings, truncate_dim, pool, chunk_size, **kwargs)\u001b[39m\n\u001b[32m 680\u001b[39m embeddings = torch.nn.functional.normalize(embeddings, p=\u001b[32m2\u001b[39m, dim=\u001b[32m1\u001b[39m)\n\u001b[32m 681\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m convert_to_numpy:\n\u001b[32m--> \u001b[39m\u001b[32m682\u001b[39m embeddings = \u001b[43membeddings\u001b[49m\u001b[43m.\u001b[49m\u001b[43mcpu\u001b[49m\u001b[43m(\u001b[49m\u001b[43m)\u001b[49m\n\u001b[32m 684\u001b[39m all_embeddings.extend(embeddings)\n\u001b[32m 686\u001b[39m all_embeddings = [all_embeddings[idx] \u001b[38;5;28;01mfor\u001b[39;00m idx \u001b[38;5;129;01min\u001b[39;00m np.argsort(length_sorted_idx)]\n",
242
+ "\u001b[31mKeyboardInterrupt\u001b[39m: "
243
+ ]
244
+ }
245
+ ],
246
+ "source": [
247
+ "index = PropertyGraphIndex.from_documents(\n",
248
+ " data, show_progress=True, property_graph_store=graph_store,use_async=False\n",
249
+ ")"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "code",
254
+ "execution_count": 11,
255
+ "id": "abedc381",
256
+ "metadata": {},
257
+ "outputs": [],
258
+ "source": [
259
+ "index.storage_context.persist(\"./storage2\")"
260
+ ]
261
+ },
262
+ {
263
+ "cell_type": "code",
264
+ "execution_count": 12,
265
+ "id": "b5a54ce9",
266
+ "metadata": {},
267
+ "outputs": [],
268
+ "source": [
269
+ "index.property_graph_store.save_networkx_graph(name=\"./kg.html\")"
270
+ ]
271
+ }
272
+ ],
273
+ "metadata": {
274
+ "kernelspec": {
275
+ "display_name": "Python 3 (ipykernel)",
276
+ "language": "python",
277
+ "name": "python3"
278
+ },
279
+ "language_info": {
280
+ "codemirror_mode": {
281
+ "name": "ipython",
282
+ "version": 3
283
+ },
284
+ "file_extension": ".py",
285
+ "mimetype": "text/x-python",
286
+ "name": "python",
287
+ "nbconvert_exporter": "python",
288
+ "pygments_lexer": "ipython3",
289
+ "version": "3.13.5"
290
+ }
291
+ },
292
+ "nbformat": 4,
293
+ "nbformat_minor": 5
294
+ }
packages/data_prep/.ipynb_checkpoints/main2-checkpoint.ipynb ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": null,
6
+ "id": "c23fd4cd",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "ename": "",
11
+ "evalue": "",
12
+ "output_type": "error",
13
+ "traceback": [
14
+ "\u001b[1;31mRunning cells with '.venv (Python 3.13.5)' requires the ipykernel package.\n",
15
+ "\u001b[1;31mInstall 'ipykernel' into the Python environment. \n",
16
+ "\u001b[1;31mCommand: '/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/packages/data_prep/.venv/bin/python -m pip install ipykernel -U --force-reinstall'"
17
+ ]
18
+ }
19
+ ],
20
+ "source": [
21
+ "from dotenv import load_dotenv\n",
22
+ "load_dotenv()"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 2,
28
+ "id": "d45ba734",
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "import nest_asyncio\n",
33
+ "nest_asyncio.apply()"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 3,
39
+ "id": "0c69d6c4",
40
+ "metadata": {},
41
+ "outputs": [
42
+ {
43
+ "name": "stderr",
44
+ "output_type": "stream",
45
+ "text": [
46
+ "/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/.venv/lib/python3.13/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
47
+ " from .autonotebook import tqdm as notebook_tqdm\n"
48
+ ]
49
+ }
50
+ ],
51
+ "source": [
52
+ "import os\n",
53
+ "import datasets\n",
54
+ "from llama_index.core import PropertyGraphIndex, Document, Settings, load_index_from_storage, StorageContext\n",
55
+ "from llama_index.core.graph_stores import SimplePropertyGraphStore\n",
56
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
57
+ "from llama_index.llms.openai import OpenAI"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 4,
63
+ "id": "1eb7b186",
64
+ "metadata": {},
65
+ "outputs": [],
66
+ "source": [
67
+ "llm = OpenAI(\n",
68
+ " model=\"DeepSeek-V3.2\",\n",
69
+ " api_key=os.getenv(\"AZURE_API_KEY\"),\n",
70
+ " api_base=\"https://thong-api-hub.services.ai.azure.com/models\",\n",
71
+ ")\n",
72
+ "Settings.llm = llm"
73
+ ]
74
+ },
75
+ {
76
+ "cell_type": "code",
77
+ "execution_count": 5,
78
+ "id": "351a1c7a",
79
+ "metadata": {},
80
+ "outputs": [
81
+ {
82
+ "name": "stderr",
83
+ "output_type": "stream",
84
+ "text": [
85
+ "Warning: You are sending unauthenticated requests to the HF Hub. Please set a HF_TOKEN to enable higher rate limits and faster downloads.\n",
86
+ "Loading weights: 100%|██████████| 199/199 [00:00<00:00, 11205.98it/s]\n",
87
+ "\u001b[1mBertModel LOAD REPORT\u001b[0m from: BAAI/bge-small-en-v1.5\n",
88
+ "Key | Status | | \n",
89
+ "------------------------+------------+--+-\n",
90
+ "embeddings.position_ids | UNEXPECTED | | \n",
91
+ "\n",
92
+ "Notes:\n",
93
+ "- UNEXPECTED:\tcan be ignored when loading from different task/architecture; not ok if you expect identical arch.\n"
94
+ ]
95
+ }
96
+ ],
97
+ "source": [
98
+ "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
99
+ "\n",
100
+ "Settings.embed_model = embed_model"
101
+ ]
102
+ },
103
+ {
104
+ "cell_type": "code",
105
+ "execution_count": 6,
106
+ "id": "36defa9e",
107
+ "metadata": {},
108
+ "outputs": [],
109
+ "source": [
110
+ "storage_context = StorageContext.from_defaults(persist_dir=\"./storage\")"
111
+ ]
112
+ },
113
+ {
114
+ "cell_type": "code",
115
+ "execution_count": 7,
116
+ "id": "8d5f7f31",
117
+ "metadata": {},
118
+ "outputs": [],
119
+ "source": [
120
+ "index = load_index_from_storage(storage_context)"
121
+ ]
122
+ },
123
+ {
124
+ "cell_type": "code",
125
+ "execution_count": 8,
126
+ "id": "dfdccca1",
127
+ "metadata": {},
128
+ "outputs": [],
129
+ "source": [
130
+ "index.property_graph_store.save_networkx_graph(name=\"./kg.html\")"
131
+ ]
132
+ },
133
+ {
134
+ "cell_type": "code",
135
+ "execution_count": 9,
136
+ "id": "46bc10ba",
137
+ "metadata": {},
138
+ "outputs": [],
139
+ "source": [
140
+ "from typing import cast\n",
141
+ "tmp = cast(SimplePropertyGraphStore, index.property_graph_store)"
142
+ ]
143
+ },
144
+ {
145
+ "cell_type": "code",
146
+ "execution_count": null,
147
+ "id": "dfb604fd",
148
+ "metadata": {},
149
+ "outputs": [],
150
+ "source": [
151
+ "tmp.save_networkx_graph"
152
+ ]
153
+ }
154
+ ],
155
+ "metadata": {
156
+ "kernelspec": {
157
+ "display_name": ".venv",
158
+ "language": "python",
159
+ "name": "python3"
160
+ },
161
+ "language_info": {
162
+ "codemirror_mode": {
163
+ "name": "ipython",
164
+ "version": 3
165
+ },
166
+ "file_extension": ".py",
167
+ "mimetype": "text/x-python",
168
+ "name": "python",
169
+ "nbconvert_exporter": "python",
170
+ "pygments_lexer": "ipython3",
171
+ "version": "3.13.5"
172
+ }
173
+ },
174
+ "nbformat": 4,
175
+ "nbformat_minor": 5
176
+ }
packages/data_prep/README.md ADDED
File without changes
packages/data_prep/enhance.ipynb ADDED
@@ -0,0 +1,510 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "c23fd4cd",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "text/plain": [
12
+ "True"
13
+ ]
14
+ },
15
+ "execution_count": 2,
16
+ "metadata": {},
17
+ "output_type": "execute_result"
18
+ }
19
+ ],
20
+ "source": [
21
+ "from dotenv import load_dotenv\n",
22
+ "load_dotenv()"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 3,
28
+ "id": "d45ba734",
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "import nest_asyncio\n",
33
+ "nest_asyncio.apply()"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 18,
39
+ "id": "0c69d6c4",
40
+ "metadata": {},
41
+ "outputs": [],
42
+ "source": [
43
+ "import os\n",
44
+ "from llama_index.core import PropertyGraphIndex, Settings, load_index_from_storage, StorageContext\n",
45
+ "from llama_index.core.graph_stores import SimplePropertyGraphStore\n",
46
+ "from llama_index.core.graph_stores.types import KG_SOURCE_REL, Relation, LabelledPropertyGraph\n",
47
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
48
+ "from llama_index.llms.openai_like import OpenAILike"
49
+ ]
50
+ },
51
+ {
52
+ "cell_type": "markdown",
53
+ "id": "7b6858c1",
54
+ "metadata": {},
55
+ "source": [
56
+ "### Load the generated Graph"
57
+ ]
58
+ },
59
+ {
60
+ "cell_type": "code",
61
+ "execution_count": 5,
62
+ "id": "1eb7b186",
63
+ "metadata": {},
64
+ "outputs": [],
65
+ "source": [
66
+ "llm = OpenAILike(\n",
67
+ " model=\"DeepSeek-V3.2\",\n",
68
+ " api_key=os.getenv(\"AZURE_API_KEY\"),\n",
69
+ " api_base=\"https://thong-api-hub.openai.azure.com/openai/v1\",\n",
70
+ " is_chat_model=True,\n",
71
+ " timeout=300,\n",
72
+ ")\n",
73
+ "Settings.llm = llm"
74
+ ]
75
+ },
76
+ {
77
+ "cell_type": "code",
78
+ "execution_count": 6,
79
+ "id": "351a1c7a",
80
+ "metadata": {},
81
+ "outputs": [
82
+ {
83
+ "name": "stderr",
84
+ "output_type": "stream",
85
+ "text": [
86
+ "Warning: You are sending unauthenticated requests to the HF Hub. Please set a HF_TOKEN to enable higher rate limits and faster downloads.\n",
87
+ "Loading weights: 100%|██████████| 199/199 [00:00<00:00, 2288.13it/s]\n",
88
+ "\u001b[1mBertModel LOAD REPORT\u001b[0m from: BAAI/bge-small-en-v1.5\n",
89
+ "Key | Status | | \n",
90
+ "------------------------+------------+--+-\n",
91
+ "embeddings.position_ids | UNEXPECTED | | \n",
92
+ "\n",
93
+ "Notes:\n",
94
+ "- UNEXPECTED:\tcan be ignored when loading from different task/architecture; not ok if you expect identical arch.\n"
95
+ ]
96
+ }
97
+ ],
98
+ "source": [
99
+ "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
100
+ "\n",
101
+ "Settings.embed_model = embed_model"
102
+ ]
103
+ },
104
+ {
105
+ "cell_type": "code",
106
+ "execution_count": null,
107
+ "id": "36defa9e",
108
+ "metadata": {},
109
+ "outputs": [],
110
+ "source": [
111
+ "storage_context = StorageContext.from_defaults(persist_dir=\"./generated\")\n",
112
+ "\n",
113
+ "index = load_index_from_storage(storage_context)"
114
+ ]
115
+ },
116
+ {
117
+ "cell_type": "code",
118
+ "execution_count": 29,
119
+ "id": "8615e444",
120
+ "metadata": {},
121
+ "outputs": [],
122
+ "source": [
123
+ "from typing import cast\n",
124
+ "\n",
125
+ "index = cast(PropertyGraphIndex, index)"
126
+ ]
127
+ },
128
+ {
129
+ "cell_type": "code",
130
+ "execution_count": 30,
131
+ "id": "0cb1ccf6",
132
+ "metadata": {},
133
+ "outputs": [],
134
+ "source": [
135
+ "property_graph_store = cast(SimplePropertyGraphStore, index.property_graph_store)"
136
+ ]
137
+ },
138
+ {
139
+ "cell_type": "code",
140
+ "execution_count": 31,
141
+ "id": "01c9ab47",
142
+ "metadata": {},
143
+ "outputs": [],
144
+ "source": [
145
+ "graph = cast(LabelledPropertyGraph, property_graph_store.graph)"
146
+ ]
147
+ },
148
+ {
149
+ "cell_type": "code",
150
+ "execution_count": 32,
151
+ "id": "c455850d",
152
+ "metadata": {},
153
+ "outputs": [
154
+ {
155
+ "data": {
156
+ "text/plain": [
157
+ "1039"
158
+ ]
159
+ },
160
+ "execution_count": 32,
161
+ "metadata": {},
162
+ "output_type": "execute_result"
163
+ }
164
+ ],
165
+ "source": [
166
+ "len(graph.triplets)"
167
+ ]
168
+ },
169
+ {
170
+ "cell_type": "code",
171
+ "execution_count": null,
172
+ "id": "93f60fe1",
173
+ "metadata": {},
174
+ "outputs": [],
175
+ "source": [
176
+ "# Add source relationship, I forgor to add ImplicitExtract in the previous script\n",
177
+ "for node in graph.get_all_nodes():\n",
178
+ " if \"triplet_source_id\" in node.properties:\n",
179
+ " graph.add_relation(\n",
180
+ " Relation(\n",
181
+ " label=KG_SOURCE_REL,\n",
182
+ " source_id=node.id,\n",
183
+ " target_id=node.properties[\"triplet_source_id\"],\n",
184
+ " )\n",
185
+ " )"
186
+ ]
187
+ },
188
+ {
189
+ "cell_type": "code",
190
+ "execution_count": 34,
191
+ "id": "17606922",
192
+ "metadata": {},
193
+ "outputs": [
194
+ {
195
+ "data": {
196
+ "text/plain": [
197
+ "2383"
198
+ ]
199
+ },
200
+ "execution_count": 34,
201
+ "metadata": {},
202
+ "output_type": "execute_result"
203
+ }
204
+ ],
205
+ "source": [
206
+ "len(graph.triplets)"
207
+ ]
208
+ },
209
+ {
210
+ "cell_type": "code",
211
+ "execution_count": 53,
212
+ "id": "b893ab85",
213
+ "metadata": {},
214
+ "outputs": [],
215
+ "source": [
216
+ "# Save it back\n",
217
+ "index.storage_context.persist(\"./shitass\")"
218
+ ]
219
+ },
220
+ {
221
+ "cell_type": "code",
222
+ "execution_count": null,
223
+ "id": "b911da97",
224
+ "metadata": {},
225
+ "outputs": [],
226
+ "source": [
227
+ "id_to_int = { node: i for i, node in enumerate(graph.nodes)}"
228
+ ]
229
+ },
230
+ {
231
+ "cell_type": "code",
232
+ "execution_count": 66,
233
+ "id": "310a330e",
234
+ "metadata": {},
235
+ "outputs": [],
236
+ "source": [
237
+ "int_to_id = { v: k for k, v in id_to_int.items()}"
238
+ ]
239
+ },
240
+ {
241
+ "cell_type": "code",
242
+ "execution_count": null,
243
+ "id": "5e71094c",
244
+ "metadata": {},
245
+ "outputs": [],
246
+ "source": [
247
+ "import json\n",
248
+ "json.dump(id_to_int, open(\"generated/id_to_int.json\", 'w'))\n",
249
+ "json.dump(id_to_int, open(\"generated/int_to_id.json\", 'w'))"
250
+ ]
251
+ },
252
+ {
253
+ "cell_type": "markdown",
254
+ "id": "8c4d7d6e",
255
+ "metadata": {},
256
+ "source": [
257
+ "### Construct model + training"
258
+ ]
259
+ },
260
+ {
261
+ "cell_type": "code",
262
+ "execution_count": 40,
263
+ "id": "fab9274c",
264
+ "metadata": {},
265
+ "outputs": [],
266
+ "source": [
267
+ "import torch\n",
268
+ "import torch.nn.functional as F\n",
269
+ "from torch_geometric.data import Data\n",
270
+ "from torch_geometric.nn import LightGCN"
271
+ ]
272
+ },
273
+ {
274
+ "cell_type": "code",
275
+ "execution_count": 38,
276
+ "id": "dc054e89",
277
+ "metadata": {},
278
+ "outputs": [],
279
+ "source": [
280
+ "edges = [\n",
281
+ " [id_to_int[rel.source_id], id_to_int[rel.target_id]]\n",
282
+ " for rel in graph.relations.values()\n",
283
+ "]"
284
+ ]
285
+ },
286
+ {
287
+ "cell_type": "code",
288
+ "execution_count": 41,
289
+ "id": "bd9ed0ed",
290
+ "metadata": {},
291
+ "outputs": [],
292
+ "source": [
293
+ "# [2, num_edges]\n",
294
+ "edge_index = torch.tensor(edges, dtype=torch.long)"
295
+ ]
296
+ },
297
+ {
298
+ "cell_type": "code",
299
+ "execution_count": 43,
300
+ "id": "b6162da5",
301
+ "metadata": {},
302
+ "outputs": [],
303
+ "source": [
304
+ "initial_semantic_features = torch.tensor(embed_model.get_text_embedding_batch([\n",
305
+ " getattr(node, \"text\", node.label) for node in graph.nodes.values()\n",
306
+ "]), dtype=torch.float)"
307
+ ]
308
+ },
309
+ {
310
+ "cell_type": "code",
311
+ "execution_count": 44,
312
+ "id": "37b00c3e",
313
+ "metadata": {},
314
+ "outputs": [],
315
+ "source": [
316
+ "data = Data(x=initial_semantic_features, edge_index=edge_index)"
317
+ ]
318
+ },
319
+ {
320
+ "cell_type": "code",
321
+ "execution_count": 49,
322
+ "id": "78592e84",
323
+ "metadata": {},
324
+ "outputs": [
325
+ {
326
+ "data": {
327
+ "text/plain": [
328
+ "torch.Size([1449, 384])"
329
+ ]
330
+ },
331
+ "execution_count": 49,
332
+ "metadata": {},
333
+ "output_type": "execute_result"
334
+ }
335
+ ],
336
+ "source": [
337
+ "initial_semantic_features.shape"
338
+ ]
339
+ },
340
+ {
341
+ "cell_type": "code",
342
+ "execution_count": null,
343
+ "id": "1d138911",
344
+ "metadata": {},
345
+ "outputs": [],
346
+ "source": [
347
+ "torch.save(data, \"generated/graph.pt\")"
348
+ ]
349
+ },
350
+ {
351
+ "cell_type": "code",
352
+ "execution_count": 62,
353
+ "id": "f7ba02e9",
354
+ "metadata": {},
355
+ "outputs": [],
356
+ "source": [
357
+ "light_gcn = LightGCN(num_nodes=initial_semantic_features.shape[0], embedding_dim=initial_semantic_features.shape[1], num_layers=3).to(\"cuda\")\n",
358
+ "\n",
359
+ "# copy embedding qua\n",
360
+ "light_gcn.embedding = light_gcn.embedding.from_pretrained(initial_semantic_features).to(\"cuda\").requires_grad_()"
361
+ ]
362
+ },
363
+ {
364
+ "cell_type": "code",
365
+ "execution_count": 54,
366
+ "id": "6d2b44af",
367
+ "metadata": {},
368
+ "outputs": [],
369
+ "source": [
370
+ "edge_index_pyg = edge_index.t().contiguous().to(\"cuda\")"
371
+ ]
372
+ },
373
+ {
374
+ "cell_type": "code",
375
+ "execution_count": 63,
376
+ "id": "c1015cd8",
377
+ "metadata": {},
378
+ "outputs": [
379
+ {
380
+ "name": "stdout",
381
+ "output_type": "stream",
382
+ "text": [
383
+ "Epoch 20/300, Loss: 0.6267\n",
384
+ "Epoch 40/300, Loss: 0.5856\n",
385
+ "Epoch 60/300, Loss: 0.5324\n",
386
+ "Epoch 80/300, Loss: 0.4638\n",
387
+ "Epoch 100/300, Loss: 0.3999\n",
388
+ "Epoch 120/300, Loss: 0.3355\n",
389
+ "Epoch 140/300, Loss: 0.2812\n",
390
+ "Epoch 160/300, Loss: 0.2454\n",
391
+ "Epoch 180/300, Loss: 0.2091\n",
392
+ "Epoch 200/300, Loss: 0.1841\n",
393
+ "Epoch 220/300, Loss: 0.1624\n",
394
+ "Epoch 240/300, Loss: 0.1443\n",
395
+ "Epoch 260/300, Loss: 0.1302\n",
396
+ "Epoch 280/300, Loss: 0.1144\n",
397
+ "Epoch 300/300, Loss: 0.1047\n",
398
+ "Training complete.\n"
399
+ ]
400
+ }
401
+ ],
402
+ "source": [
403
+ "from torch_geometric.utils import negative_sampling\n",
404
+ "\n",
405
+ "optimizer = torch.optim.Adam(light_gcn.parameters(), lr=1e-3)\n",
406
+ "num_epochs = 300\n",
407
+ "\n",
408
+ "for epoch in range(num_epochs):\n",
409
+ " light_gcn.train()\n",
410
+ " optimizer.zero_grad()\n",
411
+ "\n",
412
+ " emb = light_gcn.get_embedding(edge_index_pyg)\n",
413
+ "\n",
414
+ " pos_score = (emb[edge_index_pyg[0]] * emb[edge_index_pyg[1]]).sum(dim=-1)\n",
415
+ "\n",
416
+ " neg_edge = negative_sampling(\n",
417
+ " edge_index=edge_index_pyg,\n",
418
+ " num_nodes=data.num_nodes,\n",
419
+ " num_neg_samples=edge_index_pyg.size(1),\n",
420
+ " )\n",
421
+ " neg_score = (emb[neg_edge[0]] * emb[neg_edge[1]]).sum(dim=-1)\n",
422
+ "\n",
423
+ " loss = -F.logsigmoid(pos_score - neg_score).mean()\n",
424
+ "\n",
425
+ " loss.backward()\n",
426
+ " optimizer.step()\n",
427
+ "\n",
428
+ " if (epoch + 1) % 20 == 0:\n",
429
+ " print(f\"Epoch {epoch+1:>3}/{num_epochs}, Loss: {loss.item():.4f}\")\n",
430
+ "\n",
431
+ "print(\"Training complete.\")"
432
+ ]
433
+ },
434
+ {
435
+ "cell_type": "code",
436
+ "execution_count": null,
437
+ "id": "2cafe26c",
438
+ "metadata": {},
439
+ "outputs": [
440
+ {
441
+ "name": "stdout",
442
+ "output_type": "stream",
443
+ "text": [
444
+ "Model saved.\n"
445
+ ]
446
+ }
447
+ ],
448
+ "source": [
449
+ "torch.save(light_gcn.state_dict(), \"generated/lightgcn.pth\")\n",
450
+ "print(\"Model saved.\")"
451
+ ]
452
+ },
453
+ {
454
+ "cell_type": "code",
455
+ "execution_count": null,
456
+ "id": "342967bd",
457
+ "metadata": {},
458
+ "outputs": [
459
+ {
460
+ "name": "stdout",
461
+ "output_type": "stream",
462
+ "text": [
463
+ "Saved embeddings: torch.Size([1449, 384])\n"
464
+ ]
465
+ }
466
+ ],
467
+ "source": [
468
+ "light_gcn.eval()\n",
469
+ "with torch.no_grad():\n",
470
+ " updated_embeddings = light_gcn.get_embedding(edge_index_pyg).cpu()\n",
471
+ "\n",
472
+ "torch.save(updated_embeddings, \"generated/lightgcn_embeddings.pt\")\n",
473
+ "print(f\"Saved embeddings: {updated_embeddings.shape}\")"
474
+ ]
475
+ },
476
+ {
477
+ "cell_type": "code",
478
+ "execution_count": null,
479
+ "id": "5461e373",
480
+ "metadata": {},
481
+ "outputs": [],
482
+ "source": [
483
+ "import numpy as np\n",
484
+ "\n",
485
+ "np.save(\"generated/lightgcn_embeddings\", updated_embeddings.numpy())"
486
+ ]
487
+ }
488
+ ],
489
+ "metadata": {
490
+ "kernelspec": {
491
+ "display_name": ".venv",
492
+ "language": "python",
493
+ "name": "python3"
494
+ },
495
+ "language_info": {
496
+ "codemirror_mode": {
497
+ "name": "ipython",
498
+ "version": 3
499
+ },
500
+ "file_extension": ".py",
501
+ "mimetype": "text/x-python",
502
+ "name": "python",
503
+ "nbconvert_exporter": "python",
504
+ "pygments_lexer": "ipython3",
505
+ "version": "3.13.5"
506
+ }
507
+ },
508
+ "nbformat": 4,
509
+ "nbformat_minor": 5
510
+ }
packages/data_prep/generate.ipynb ADDED
@@ -0,0 +1,266 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "id": "6137a317",
7
+ "metadata": {},
8
+ "outputs": [
9
+ {
10
+ "data": {
11
+ "text/plain": [
12
+ "True"
13
+ ]
14
+ },
15
+ "execution_count": 2,
16
+ "metadata": {},
17
+ "output_type": "execute_result"
18
+ }
19
+ ],
20
+ "source": [
21
+ "from dotenv import load_dotenv\n",
22
+ "load_dotenv()"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 3,
28
+ "id": "4a7dc84b",
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "import nest_asyncio\n",
33
+ "nest_asyncio.apply()"
34
+ ]
35
+ },
36
+ {
37
+ "cell_type": "code",
38
+ "execution_count": 4,
39
+ "id": "a47086ec",
40
+ "metadata": {},
41
+ "outputs": [
42
+ {
43
+ "name": "stderr",
44
+ "output_type": "stream",
45
+ "text": [
46
+ "/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/packages/data_prep/.venv/lib/python3.13/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
47
+ " from .autonotebook import tqdm as notebook_tqdm\n"
48
+ ]
49
+ }
50
+ ],
51
+ "source": [
52
+ "import os\n",
53
+ "import datasets\n",
54
+ "from llama_index.core import PropertyGraphIndex, Document, Settings, load_index_from_storage, VectorStoreIndex\n",
55
+ "from llama_index.core.graph_stores import SimplePropertyGraphStore\n",
56
+ "from llama_index.embeddings.huggingface import HuggingFaceEmbedding\n",
57
+ "from llama_index.llms.openai_like import OpenAILike"
58
+ ]
59
+ },
60
+ {
61
+ "cell_type": "code",
62
+ "execution_count": 5,
63
+ "id": "28b17221",
64
+ "metadata": {},
65
+ "outputs": [
66
+ {
67
+ "name": "stderr",
68
+ "output_type": "stream",
69
+ "text": [
70
+ "Warning: You are sending unauthenticated requests to the HF Hub. Please set a HF_TOKEN to enable higher rate limits and faster downloads.\n"
71
+ ]
72
+ }
73
+ ],
74
+ "source": [
75
+ "og_data = datasets.load_dataset(\"gamino/wiki_medical_terms\", split=\"train[:20]\")"
76
+ ]
77
+ },
78
+ {
79
+ "cell_type": "code",
80
+ "execution_count": 6,
81
+ "id": "ca0aa288",
82
+ "metadata": {},
83
+ "outputs": [],
84
+ "source": [
85
+ "data = [\n",
86
+ " Document(id_=str(idx), text=text) for idx, text in enumerate(og_data[\"page_text\"])\n",
87
+ "]"
88
+ ]
89
+ },
90
+ {
91
+ "cell_type": "code",
92
+ "execution_count": 7,
93
+ "id": "53a4d694",
94
+ "metadata": {},
95
+ "outputs": [
96
+ {
97
+ "data": {
98
+ "text/plain": [
99
+ "20"
100
+ ]
101
+ },
102
+ "execution_count": 7,
103
+ "metadata": {},
104
+ "output_type": "execute_result"
105
+ }
106
+ ],
107
+ "source": [
108
+ "len(data)"
109
+ ]
110
+ },
111
+ {
112
+ "cell_type": "code",
113
+ "execution_count": 14,
114
+ "id": "e08d931b",
115
+ "metadata": {},
116
+ "outputs": [],
117
+ "source": [
118
+ "llm = OpenAILike(\n",
119
+ " model=\"DeepSeek-V3.2\",\n",
120
+ " api_key=os.getenv(\"AZURE_API_KEY\"),\n",
121
+ " api_base=\"https://thong-api-hub.openai.azure.com/openai/v1\",\n",
122
+ " is_chat_model=True,\n",
123
+ " timeout=300,\n",
124
+ " is_function_calling_model=True, \n",
125
+ " should_use_structured_outputs=True,\n",
126
+ ")\n",
127
+ "Settings.llm = llm"
128
+ ]
129
+ },
130
+ {
131
+ "cell_type": "code",
132
+ "execution_count": 20,
133
+ "id": "8d7d9e4a",
134
+ "metadata": {},
135
+ "outputs": [
136
+ {
137
+ "data": {
138
+ "text/plain": [
139
+ "CompletionResponse(text='你好!😊 很高兴见到你!\\n\\n有什么我可以帮助你的吗?无论是回答问题、聊天、协助解决问题,还是其他任何事情,我都很乐意为你提供帮助。请随时告诉我你需要什么!', additional_kwargs={'prompt_tokens': 5, 'completion_tokens': 41, 'total_tokens': 46}, raw=ChatCompletion(id='48c590e60d6e472da59fddacdeda1cd2', choices=[Choice(finish_reason='stop', index=0, logprobs=None, message=ChatCompletionMessage(content='你好!😊 很高兴见到你!\\n\\n有什么我可以帮助你的吗?无论是回答问题、聊天、协助解决问题,还是其他任何事情,我都很乐意为你提供帮助。请随时告诉我你需要什么!', refusal=None, role='assistant', annotations=None, audio=None, function_call=None, tool_calls=None, reasoning_content=None), content_filter_results={'hate': {'filtered': False, 'severity': 'safe'}, 'protected_material_code': {'filtered': False, 'detected': False}, 'protected_material_text': {'filtered': False, 'detected': False}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}, stop_reason=None)], created=1777086170, model='deepseek-v3.2', object='chat.completion', service_tier=None, system_fingerprint=None, usage=CompletionUsage(completion_tokens=41, prompt_tokens=5, total_tokens=46, completion_tokens_details=None, prompt_tokens_details=None, audio_prompt_tokens=0, reasoning_tokens=0), prompt_filter_results=[{'prompt_index': 0, 'content_filter_results': {'hate': {'filtered': False, 'severity': 'safe'}, 'jailbreak': {'filtered': False, 'detected': False}, 'self_harm': {'filtered': False, 'severity': 'safe'}, 'sexual': {'filtered': False, 'severity': 'safe'}, 'violence': {'filtered': False, 'severity': 'safe'}}}]), logprobs=None, delta=None)"
140
+ ]
141
+ },
142
+ "execution_count": 20,
143
+ "metadata": {},
144
+ "output_type": "execute_result"
145
+ }
146
+ ],
147
+ "source": [
148
+ "Settings.llm.complete(\"Hello\")"
149
+ ]
150
+ },
151
+ {
152
+ "cell_type": "code",
153
+ "execution_count": 10,
154
+ "id": "711fd553",
155
+ "metadata": {},
156
+ "outputs": [
157
+ {
158
+ "name": "stderr",
159
+ "output_type": "stream",
160
+ "text": [
161
+ "/media/data/BaiTap/Code/Nam4/DLLUD/Lab03/big-data-application/packages/data_prep/.venv/lib/python3.13/site-packages/torch/cuda/__init__.py:180: UserWarning: CUDA initialization: CUDA unknown error - this may be due to an incorrectly set up environment, e.g. changing env variable CUDA_VISIBLE_DEVICES after program start. Setting the available devices to be zero. (Triggered internally at /pytorch/c10/cuda/CUDAFunctions.cpp:119.)\n",
162
+ " return torch._C._cuda_getDeviceCount() > 0\n",
163
+ "Loading weights: 100%|██████████| 199/199 [00:00<00:00, 2822.69it/s]\n",
164
+ "\u001b[1mBertModel LOAD REPORT\u001b[0m from: BAAI/bge-small-en-v1.5\n",
165
+ "Key | Status | | \n",
166
+ "------------------------+------------+--+-\n",
167
+ "embeddings.position_ids | UNEXPECTED | | \n",
168
+ "\n",
169
+ "Notes:\n",
170
+ "- UNEXPECTED:\tcan be ignored when loading from different task/architecture; not ok if you expect identical arch.\n"
171
+ ]
172
+ }
173
+ ],
174
+ "source": [
175
+ "embed_model = HuggingFaceEmbedding(model_name=\"BAAI/bge-small-en-v1.5\")\n",
176
+ "\n",
177
+ "Settings.embed_model = embed_model"
178
+ ]
179
+ },
180
+ {
181
+ "cell_type": "code",
182
+ "execution_count": null,
183
+ "id": "a5b54280",
184
+ "metadata": {},
185
+ "outputs": [],
186
+ "source": [
187
+ "from llama_index.core.indices.property_graph import DynamicLLMPathExtractor, ImplicitPathExtractor\n",
188
+ "\n",
189
+ "kg_extractor = DynamicLLMPathExtractor(\n",
190
+ " llm=llm,\n",
191
+ ")"
192
+ ]
193
+ },
194
+ {
195
+ "cell_type": "code",
196
+ "execution_count": null,
197
+ "id": "6403ff0b",
198
+ "metadata": {},
199
+ "outputs": [
200
+ {
201
+ "name": "stderr",
202
+ "output_type": "stream",
203
+ "text": [
204
+ "Applying transformations: 100%|██████████| 1/1 [00:00<00:00, 5.57it/s]\n",
205
+ "Applying transformations: 100%|██████████| 1/1 [02:28<00:00, 148.94s/it]\n",
206
+ "Generating embeddings: 100%|██████████| 11/11 [00:13<00:00, 1.24s/it]\n",
207
+ "Generating embeddings: 100%|██████████| 210/210 [00:24<00:00, 8.57it/s]\n"
208
+ ]
209
+ }
210
+ ],
211
+ "source": [
212
+ "graph_store = SimplePropertyGraphStore()\n",
213
+ "\n",
214
+ "index = PropertyGraphIndex.from_documents(\n",
215
+ " data,\n",
216
+ " show_progress=True,\n",
217
+ " property_graph_store=graph_store,\n",
218
+ " llm=llm,\n",
219
+ " embed_model=embed_model,\n",
220
+ " kg_extractors=[kg_extractor, ImplicitPathExtractor()],\n",
221
+ ")"
222
+ ]
223
+ },
224
+ {
225
+ "cell_type": "code",
226
+ "execution_count": 23,
227
+ "id": "b5a54ce9",
228
+ "metadata": {},
229
+ "outputs": [],
230
+ "source": [
231
+ "index.property_graph_store.save_networkx_graph(name=\"./kg.html\")"
232
+ ]
233
+ },
234
+ {
235
+ "cell_type": "code",
236
+ "execution_count": null,
237
+ "id": "abedc381",
238
+ "metadata": {},
239
+ "outputs": [],
240
+ "source": [
241
+ "index.storage_context.persist(\"./generated\")"
242
+ ]
243
+ }
244
+ ],
245
+ "metadata": {
246
+ "kernelspec": {
247
+ "display_name": ".venv",
248
+ "language": "python",
249
+ "name": "python3"
250
+ },
251
+ "language_info": {
252
+ "codemirror_mode": {
253
+ "name": "ipython",
254
+ "version": 3
255
+ },
256
+ "file_extension": ".py",
257
+ "mimetype": "text/x-python",
258
+ "name": "python",
259
+ "nbconvert_exporter": "python",
260
+ "pygments_lexer": "ipython3",
261
+ "version": "3.13.5"
262
+ }
263
+ },
264
+ "nbformat": 4,
265
+ "nbformat_minor": 5
266
+ }
packages/data_prep/generated/default__vector_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:c0c7f4210f14c206400b066e59279a3fdd29ea38c8a475f75a75bf0117fcf083
3
+ size 11773589
packages/data_prep/generated/docstore.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6a73fa5772fee1f6f964029a3d38634f864600cc0de7db553b8dae0bcdd919fd
3
+ size 574331
packages/data_prep/generated/graph.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:10d161965eb144eb9e7851a3373fc10e40cf2bee95f65e39489ef77bf8b5dfe7
3
+ size 2265813
packages/data_prep/generated/graph_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e0a77744010862225c69da83c585f4f8a42fd551b044ce530dbb1eb6e16742c
3
+ size 18
packages/data_prep/generated/id_to_int.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a356d44914a150f95f5d1db996f8874961d5323ab8f2fe9d3e91e4f75f30438
3
+ size 44218
packages/data_prep/generated/image__vector_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d17ed74c1649a438e518a8dc56a7772913dfe1ea7a7605bce069c63872431455
3
+ size 72
packages/data_prep/generated/index_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5d3c3dd402c1fe9eed6658c9ff43a7e360df4046b08103c21ec53f74efd895c3
3
+ size 181
packages/data_prep/generated/int_to_id.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1a356d44914a150f95f5d1db996f8874961d5323ab8f2fe9d3e91e4f75f30438
3
+ size 44218
packages/data_prep/generated/lightgcn.pth ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:aec6644cf92f4a49f4ae3416bef018498da11b8f73e67712d94e0df6b64eee39
3
+ size 2227821
packages/data_prep/generated/lightgcn_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e2cff0781fee7d2ea40b19e867e1c07b32012ca1fcb53203a5bfac10da32b8f8
3
+ size 2225792
packages/data_prep/generated/lightgcn_embeddings.pt ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:315c00d6317b813cb02b5ca682be2fcd70b5349d44e88a43ec587e3221ddc3df
3
+ size 2227325
packages/data_prep/generated/property_graph_store.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f15eb29ec3ff3b4b8049cd5ff58ec8071e5400de464b3ee54a283d147a52c0f
3
+ size 2301082
packages/data_prep/kg.html ADDED
The diff for this file is too large to render. See raw diff
 
packages/data_prep/pyproject.toml ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "data-prep"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "thng292", email = "nguyenquangthong292@gmail.com" }
8
+ ]
9
+ requires-python = ">=3.13"
10
+ dependencies = [
11
+ "datasets>=4.8.4",
12
+ "ipykernel>=7.2.0",
13
+ "llama-index>=0.14.20",
14
+ "llama-index-embeddings-huggingface>=0.7.0",
15
+ "llama-index-llms-openai-like>=0.7.1",
16
+ "nest-asyncio>=1.6.0",
17
+ "notebook>=7.5.5",
18
+ "python-dotenv>=1.2.2",
19
+ "pyvis>=0.3.2",
20
+ "torch>=2.11.0",
21
+ "torch-geometric>=2.7.0",
22
+ ]
23
+
24
+ [project.scripts]
25
+ data-prep = "data_prep:main"
26
+
27
+ [build-system]
28
+ requires = ["uv_build>=0.10.4,<0.11.0"]
29
+ build-backend = "uv_build"
packages/data_prep/src/data_prep/__init__.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def main() -> None:
2
+ print("Hello from data-prep!")
3
+
4
+
5
+ # Load datasets
6
+ # Build + index to SimplePropertyGraphStore
7
+ # Save it `original`
8
+ # Load to torch geometric
9
+ # Message passing somehow?
10
+ # Save it `enhanced`
11
+ # Find weighted linear formulation by subclassing BaseRetriever
12
+ # Build front-end with gradio, allow chat, uses llama.cpp
pyproject.toml ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ [project]
2
+ name = "big-data-application"
3
+ version = "0.1.0"
4
+ description = "Add your description here"
5
+ readme = "README.md"
6
+ authors = [
7
+ { name = "thng292", email = "nguyenquangthong292@gmail.com" }
8
+ ]
9
+ requires-python = ">=3.13"
10
+ dependencies = [
11
+ "gradio>=6.12.0",
12
+ "huggingface-hub>=0.30.0",
13
+ "ipykernel>=7.2.0",
14
+ "llama-cpp-python>=0.3.19",
15
+ "llama-index>=0.14.20",
16
+ "llama-index-embeddings-huggingface>=0.7.0",
17
+ "numpy>=2.0.0",
18
+ "python-dotenv>=1.2.2",
19
+ ]
20
+
21
+ [project.scripts]
22
+ big-data-application = "big_data_application:main"
23
+
24
+ [[tool.uv.index]]
25
+ url = "https://abetlen.github.io/llama-cpp-python/whl/cpu"
26
+
27
+ [build-system]
28
+ requires = ["uv_build>=0.10.4,<0.11.0"]
29
+ build-backend = "uv_build"
30
+
31
+ [tool.uv.workspace]
32
+ members = ["packages/*"]
src/big_data_application/__init__.py ADDED
@@ -0,0 +1,189 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import re
3
+ import json
4
+
5
+ import numpy as np
6
+ import gradio as gr
7
+ from llama_index.core import StorageContext, load_index_from_storage, Settings, QueryBundle
8
+ from llama_index.core.retrievers import BaseRetriever
9
+ from llama_index.core.schema import NodeWithScore, TextNode
10
+ from llama_index.embeddings.huggingface import HuggingFaceEmbedding
11
+ from llama_cpp import Llama
12
+ from huggingface_hub import hf_hub_download, list_repo_files
13
+
14
+
15
+ DATA_DIR = os.environ.get("DATA_DIR", "packages/data_prep/generated")
16
+ MODEL_REPO = "Jackrong/Qwen3.5-4B-Neo-GGUF"
17
+
18
+
19
+ def download_gguf_model(repo_id: str) -> str:
20
+ files = list_repo_files(repo_id)
21
+ gguf_files = [f for f in files if f.endswith(".gguf")]
22
+ target = next((f for f in gguf_files if "Q4_K_M" in f.upper()), gguf_files[0])
23
+ print(f"Downloading {target} from {repo_id}...")
24
+ return hf_hub_download(repo_id=repo_id, filename=target)
25
+
26
+
27
+ class HybridGraphRetriever(BaseRetriever):
28
+ """Combines original embedding similarity and LightGCN-enhanced embedding
29
+ similarity using a weighted linear formulation for hybrid ranking."""
30
+
31
+ def __init__(
32
+ self,
33
+ data_dir: str,
34
+ embed_model: HuggingFaceEmbedding,
35
+ alpha: float = 0.5,
36
+ top_k: int = 5,
37
+ ):
38
+ super().__init__()
39
+ self._embed_model = embed_model
40
+ self._alpha = alpha
41
+ self._top_k = top_k
42
+
43
+ with open(os.path.join(data_dir, "property_graph_store.json")) as f:
44
+ pg_data = json.load(f)
45
+ with open(os.path.join(data_dir, "id_to_int.json")) as f:
46
+ id_to_int = json.load(f)
47
+ lightgcn_all = np.load(os.path.join(data_dir, "lightgcn_embeddings.npy"))
48
+
49
+ node_ids: list[str] = []
50
+ node_texts: list[str] = []
51
+ node_labels: list[str] = []
52
+ orig_list: list[list[float]] = []
53
+ lgcn_list: list[np.ndarray] = []
54
+
55
+ for node_id, node_data in pg_data["nodes"].items():
56
+ if node_id not in id_to_int:
57
+ continue
58
+ emb = node_data.get("embedding")
59
+ if not emb:
60
+ continue
61
+ idx = id_to_int[node_id]
62
+ node_ids.append(node_id)
63
+ node_texts.append(node_data.get("text", ""))
64
+ node_labels.append(node_data.get("label", ""))
65
+ orig_list.append(emb)
66
+ lgcn_list.append(lightgcn_all[idx])
67
+
68
+ self._node_ids = node_ids
69
+ self._node_texts = node_texts
70
+ self._node_labels = node_labels
71
+
72
+ orig = np.array(orig_list, dtype=np.float32)
73
+ lgcn = np.stack(lgcn_list).astype(np.float32)
74
+ self._orig_normed = orig / (np.linalg.norm(orig, axis=1, keepdims=True) + 1e-8)
75
+ self._lgcn_normed = lgcn / (np.linalg.norm(lgcn, axis=1, keepdims=True) + 1e-8)
76
+
77
+ print(
78
+ f"HybridGraphRetriever ready: {len(node_ids)} nodes, "
79
+ f"alpha={alpha}, top_k={top_k}"
80
+ )
81
+
82
+ def _retrieve(self, query_bundle: QueryBundle) -> list[NodeWithScore]:
83
+ query_emb = np.array(
84
+ self._embed_model.get_query_embedding(query_bundle.query_str),
85
+ dtype=np.float32,
86
+ )
87
+ query_normed = query_emb / (np.linalg.norm(query_emb) + 1e-8)
88
+
89
+ sim_orig = self._orig_normed @ query_normed
90
+ sim_lgcn = self._lgcn_normed @ query_normed
91
+
92
+ # Weighted linear combination: score = alpha * sim_original + (1 - alpha) * sim_lightgcn
93
+ scores = self._alpha * sim_orig + (1 - self._alpha) * sim_lgcn
94
+
95
+ top_indices = np.argsort(scores)[::-1][: self._top_k]
96
+
97
+ return [
98
+ NodeWithScore(
99
+ node=TextNode(
100
+ text=self._node_texts[i],
101
+ id_=self._node_ids[i],
102
+ metadata={"label": self._node_labels[i]},
103
+ ),
104
+ score=float(scores[i]),
105
+ )
106
+ for i in top_indices
107
+ ]
108
+
109
+
110
+ def _strip_think_tags(text: str) -> str:
111
+ text = re.sub(r"<think>.*?</think>", "", text, flags=re.DOTALL)
112
+ if "<think>" in text:
113
+ text = text[: text.index("<think>")]
114
+ return text.strip()
115
+
116
+
117
+ def main() -> None:
118
+ print("Loading embedding model...")
119
+ embed_model = HuggingFaceEmbedding(model_name="BAAI/bge-small-en-v1.5")
120
+ Settings.embed_model = embed_model
121
+ Settings.llm = None
122
+
123
+ print("Loading property graph index...")
124
+ storage_context = StorageContext.from_defaults(persist_dir=DATA_DIR)
125
+ index = load_index_from_storage(storage_context)
126
+ print(f"Index loaded: {index.index_id}")
127
+
128
+ print("Building hybrid retriever...")
129
+ retriever = HybridGraphRetriever(
130
+ data_dir=DATA_DIR,
131
+ embed_model=embed_model,
132
+ alpha=0.5,
133
+ top_k=5,
134
+ )
135
+
136
+ print("Loading LLM...")
137
+ model_path = download_gguf_model(MODEL_REPO)
138
+ llm = Llama(
139
+ model_path=model_path,
140
+ n_ctx=4096,
141
+ n_threads=os.cpu_count() or 4,
142
+ verbose=False,
143
+ )
144
+
145
+ def chat(message: str, history: list[dict]):
146
+ nodes = retriever.retrieve(message)
147
+ context = "\n\n".join(
148
+ f"[{n.metadata.get('label', '')}] {n.text}" for n in nodes
149
+ )
150
+
151
+ messages: list[dict] = [
152
+ {
153
+ "role": "system",
154
+ "content": (
155
+ "You are a helpful knowledge assistant. "
156
+ "Answer questions based on the provided context from a knowledge graph. "
157
+ "If the context doesn't contain relevant information, say so.\n\n"
158
+ f"Context:\n{context}"
159
+ ),
160
+ }
161
+ ]
162
+ for msg in history:
163
+ messages.append({"role": msg["role"], "content": msg["content"]})
164
+ messages.append({"role": "user", "content": message})
165
+
166
+ response = llm.create_chat_completion(
167
+ messages=messages,
168
+ stream=True,
169
+ max_tokens=1024,
170
+ )
171
+
172
+ partial = ""
173
+ for chunk in response:
174
+ delta = chunk["choices"][0]["delta"]
175
+ content = delta.get("content", "")
176
+ if content:
177
+ partial += content
178
+ display = _strip_think_tags(partial)
179
+ if display:
180
+ yield display
181
+
182
+ print("Starting Gradio app...")
183
+ demo = gr.ChatInterface(
184
+ fn=chat,
185
+ title="Knowledge Graph Chat",
186
+ description="Chat with an LLM powered by a knowledge graph with hybrid retrieval (original + LightGCN embeddings).",
187
+ type="messages",
188
+ )
189
+ demo.launch(server_name="0.0.0.0", server_port=7860)
src/big_data_application/__main__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from big_data_application import main
2
+
3
+ main()
uv.lock ADDED
The diff for this file is too large to render. See raw diff