Spaces:
Sleeping
Sleeping
Omar Solano
commited on
Commit
β’
129499e
1
Parent(s):
cd37733
add upload to hf script
Browse files
data/scraping_scripts/create_db.ipynb
DELETED
@@ -1,353 +0,0 @@
|
|
1 |
-
{
|
2 |
-
"cells": [
|
3 |
-
{
|
4 |
-
"cell_type": "markdown",
|
5 |
-
"metadata": {},
|
6 |
-
"source": [
|
7 |
-
"# Create HF vector database\n"
|
8 |
-
]
|
9 |
-
},
|
10 |
-
{
|
11 |
-
"cell_type": "code",
|
12 |
-
"execution_count": null,
|
13 |
-
"metadata": {},
|
14 |
-
"outputs": [],
|
15 |
-
"source": [
|
16 |
-
"from dotenv import load_dotenv\n",
|
17 |
-
"\n",
|
18 |
-
"load_dotenv(\"../../.env\")"
|
19 |
-
]
|
20 |
-
},
|
21 |
-
{
|
22 |
-
"cell_type": "code",
|
23 |
-
"execution_count": null,
|
24 |
-
"metadata": {},
|
25 |
-
"outputs": [],
|
26 |
-
"source": [
|
27 |
-
"import nest_asyncio\n",
|
28 |
-
"\n",
|
29 |
-
"nest_asyncio.apply()"
|
30 |
-
]
|
31 |
-
},
|
32 |
-
{
|
33 |
-
"cell_type": "markdown",
|
34 |
-
"metadata": {},
|
35 |
-
"source": [
|
36 |
-
"### Create a set of Llama-index Documents with each section in the jsonl file\n"
|
37 |
-
]
|
38 |
-
},
|
39 |
-
{
|
40 |
-
"cell_type": "code",
|
41 |
-
"execution_count": null,
|
42 |
-
"metadata": {},
|
43 |
-
"outputs": [],
|
44 |
-
"source": [
|
45 |
-
"from llama_index.core import Document\n",
|
46 |
-
"from llama_index.core.schema import MetadataMode\n",
|
47 |
-
"import json\n",
|
48 |
-
"import pickle\n",
|
49 |
-
"\n",
|
50 |
-
"\n",
|
51 |
-
"def create_docs(input_file):\n",
|
52 |
-
" with open(input_file, \"r\") as f:\n",
|
53 |
-
" documents = []\n",
|
54 |
-
" for i, line in enumerate(f):\n",
|
55 |
-
" data = json.loads(line)\n",
|
56 |
-
" documents.append(\n",
|
57 |
-
" Document(\n",
|
58 |
-
" doc_id=data[\"doc_id\"],\n",
|
59 |
-
" text=data[\"content\"],\n",
|
60 |
-
" metadata={\n",
|
61 |
-
" \"url\": data[\"url\"],\n",
|
62 |
-
" \"title\": data[\"name\"],\n",
|
63 |
-
" \"tokens\": data[\"tokens\"],\n",
|
64 |
-
" \"retrieve_doc\": data[\"retrieve_doc\"],\n",
|
65 |
-
" \"source\": data[\"source\"],\n",
|
66 |
-
" },\n",
|
67 |
-
" # LLM will see the 'url' of each chunk\n",
|
68 |
-
" excluded_llm_metadata_keys=[\n",
|
69 |
-
" # \"url\",\n",
|
70 |
-
" \"title\",\n",
|
71 |
-
" \"tokens\",\n",
|
72 |
-
" \"retrieve_doc\",\n",
|
73 |
-
" \"source\",\n",
|
74 |
-
" ],\n",
|
75 |
-
" # Embedding model will embed the 'title' of each chunk\n",
|
76 |
-
" excluded_embed_metadata_keys=[\n",
|
77 |
-
" \"url\",\n",
|
78 |
-
" # \"title\",\n",
|
79 |
-
" \"tokens\",\n",
|
80 |
-
" \"retrieve_doc\",\n",
|
81 |
-
" \"source\",\n",
|
82 |
-
" ],\n",
|
83 |
-
" )\n",
|
84 |
-
" )\n",
|
85 |
-
" return documents\n",
|
86 |
-
"\n",
|
87 |
-
"\n",
|
88 |
-
"# documents = create_docs(\"../transformers_data.jsonl\")\n",
|
89 |
-
"# documents = create_docs(\"../peft_data.jsonl\")\n",
|
90 |
-
"# documents = create_docs(\"../trl_data.jsonl\")\n",
|
91 |
-
"# documents = create_docs(\"../llama_index_data.jsonl\")\n",
|
92 |
-
"documents = create_docs(\"../openai-cookbook_data.jsonl\")\n",
|
93 |
-
"print(documents[0])\n",
|
94 |
-
"print(documents[0].metadata)"
|
95 |
-
]
|
96 |
-
},
|
97 |
-
{
|
98 |
-
"cell_type": "code",
|
99 |
-
"execution_count": null,
|
100 |
-
"metadata": {},
|
101 |
-
"outputs": [],
|
102 |
-
"source": [
|
103 |
-
"# print(\n",
|
104 |
-
"# \"The LLM sees this: \\n\",\n",
|
105 |
-
"# documents[0].get_content(metadata_mode=MetadataMode.LLM),\n",
|
106 |
-
"# )\n",
|
107 |
-
"print(\n",
|
108 |
-
" \"The Embedding model sees this: \\n\",\n",
|
109 |
-
" documents[0].get_content(metadata_mode=MetadataMode.EMBED),\n",
|
110 |
-
")"
|
111 |
-
]
|
112 |
-
},
|
113 |
-
{
|
114 |
-
"cell_type": "code",
|
115 |
-
"execution_count": null,
|
116 |
-
"metadata": {},
|
117 |
-
"outputs": [],
|
118 |
-
"source": [
|
119 |
-
"import chromadb\n",
|
120 |
-
"\n",
|
121 |
-
"# create client and a new collection\n",
|
122 |
-
"DB_COLLECTION = \"chroma-db-openai-cookbooks\"\n",
|
123 |
-
"chroma_client = chromadb.PersistentClient(path=f\"../{DB_COLLECTION}\")\n",
|
124 |
-
"chroma_collection = chroma_client.create_collection(DB_COLLECTION)\n",
|
125 |
-
"\n",
|
126 |
-
"\n",
|
127 |
-
"from llama_index.vector_stores.chroma import ChromaVectorStore\n",
|
128 |
-
"from llama_index.core import StorageContext\n",
|
129 |
-
"\n",
|
130 |
-
"# Define a storage context object using the created vector database.\n",
|
131 |
-
"vector_store = ChromaVectorStore(chroma_collection=chroma_collection)\n",
|
132 |
-
"storage_context = StorageContext.from_defaults(vector_store=vector_store)\n",
|
133 |
-
"\n",
|
134 |
-
"document_dict = {doc.doc_id: doc for doc in documents}\n",
|
135 |
-
"DOCUMENT_NAME = f\"../{DB_COLLECTION}/document_dict_openai.pkl\"\n",
|
136 |
-
"\n",
|
137 |
-
"with open(DOCUMENT_NAME, \"wb\") as f:\n",
|
138 |
-
" pickle.dump(document_dict, f)\n",
|
139 |
-
"\n",
|
140 |
-
"# with open(DOCUMENT_NAME, \"rb\") as f:\n",
|
141 |
-
"# document_dict = pickle.load(f)"
|
142 |
-
]
|
143 |
-
},
|
144 |
-
{
|
145 |
-
"cell_type": "code",
|
146 |
-
"execution_count": null,
|
147 |
-
"metadata": {},
|
148 |
-
"outputs": [],
|
149 |
-
"source": [
|
150 |
-
"from llama_index.core import VectorStoreIndex\n",
|
151 |
-
"from llama_index.core.node_parser import SentenceSplitter\n",
|
152 |
-
"from llama_index.embeddings.openai import OpenAIEmbedding\n",
|
153 |
-
"\n",
|
154 |
-
"index = VectorStoreIndex.from_documents(\n",
|
155 |
-
" documents,\n",
|
156 |
-
" embed_model=OpenAIEmbedding(model=\"text-embedding-3-large\", mode=\"similarity\"),\n",
|
157 |
-
" transformations=[SentenceSplitter(chunk_size=800, chunk_overlap=400)],\n",
|
158 |
-
" show_progress=True,\n",
|
159 |
-
" use_async=True,\n",
|
160 |
-
" storage_context=storage_context,\n",
|
161 |
-
")"
|
162 |
-
]
|
163 |
-
},
|
164 |
-
{
|
165 |
-
"cell_type": "markdown",
|
166 |
-
"metadata": {},
|
167 |
-
"source": [
|
168 |
-
"### Test the DB"
|
169 |
-
]
|
170 |
-
},
|
171 |
-
{
|
172 |
-
"cell_type": "code",
|
173 |
-
"execution_count": null,
|
174 |
-
"metadata": {},
|
175 |
-
"outputs": [],
|
176 |
-
"source": [
|
177 |
-
"retriever = index.as_retriever(\n",
|
178 |
-
" similarity_top_k=10,\n",
|
179 |
-
" use_async=True,\n",
|
180 |
-
" embed_model=OpenAIEmbedding(model=\"text-embedding-3-large\", mode=\"similarity\"),\n",
|
181 |
-
")"
|
182 |
-
]
|
183 |
-
},
|
184 |
-
{
|
185 |
-
"cell_type": "code",
|
186 |
-
"execution_count": null,
|
187 |
-
"metadata": {},
|
188 |
-
"outputs": [],
|
189 |
-
"source": [
|
190 |
-
"from llama_index.core.data_structs import Node\n",
|
191 |
-
"from llama_index.core.schema import NodeWithScore, BaseNode, TextNode\n",
|
192 |
-
"\n",
|
193 |
-
"\n",
|
194 |
-
"# query = \"fine-tune a pretrained model\"\n",
|
195 |
-
"# query = \"fine-tune an llm\"\n",
|
196 |
-
"query = \"how to fine-tune an llm?\"\n",
|
197 |
-
"\n",
|
198 |
-
"nodes_context = []\n",
|
199 |
-
"nodes = retriever.retrieve(query)\n",
|
200 |
-
"\n",
|
201 |
-
"\n",
|
202 |
-
"# Filter nodes with the same ref_doc_id\n",
|
203 |
-
"def filter_nodes_by_unique_doc_id(nodes):\n",
|
204 |
-
" unique_nodes = {}\n",
|
205 |
-
" for node in nodes:\n",
|
206 |
-
" doc_id = node.node.ref_doc_id\n",
|
207 |
-
" if doc_id is not None and doc_id not in unique_nodes:\n",
|
208 |
-
" unique_nodes[doc_id] = node\n",
|
209 |
-
" return list(unique_nodes.values())\n",
|
210 |
-
"\n",
|
211 |
-
"\n",
|
212 |
-
"nodes = filter_nodes_by_unique_doc_id(nodes)\n",
|
213 |
-
"print(len(nodes))\n",
|
214 |
-
"\n",
|
215 |
-
"for node in nodes:\n",
|
216 |
-
" print(\"Node ID\\t\", node.node_id)\n",
|
217 |
-
" print(\"Title\\t\", node.metadata[\"title\"])\n",
|
218 |
-
" print(\"Text\\t\", node.text)\n",
|
219 |
-
" print(\"Score\\t\", node.score)\n",
|
220 |
-
" print(\"Metadata\\t\", node.metadata)\n",
|
221 |
-
" print(\"-_\" * 20)\n",
|
222 |
-
" if node.metadata[\"retrieve_doc\"] == True:\n",
|
223 |
-
" print(\"This node will be replaced by the document\")\n",
|
224 |
-
" doc = document_dict[node.node.ref_doc_id]\n",
|
225 |
-
" # print(doc.text)\n",
|
226 |
-
" new_node = NodeWithScore(\n",
|
227 |
-
" node=TextNode(text=doc.text, metadata=node.metadata), score=node.score\n",
|
228 |
-
" )\n",
|
229 |
-
" print(new_node.text)\n",
|
230 |
-
" nodes_context.append(new_node)\n",
|
231 |
-
" else:\n",
|
232 |
-
" nodes_context.append(node)\n",
|
233 |
-
"\n",
|
234 |
-
"print(len(nodes_context))"
|
235 |
-
]
|
236 |
-
},
|
237 |
-
{
|
238 |
-
"cell_type": "code",
|
239 |
-
"execution_count": null,
|
240 |
-
"metadata": {},
|
241 |
-
"outputs": [],
|
242 |
-
"source": [
|
243 |
-
"from llama_index.core import ChatPromptTemplate\n",
|
244 |
-
"from llama_index.core.llms import ChatMessage, MessageRole\n",
|
245 |
-
"from pydantic import BaseModel, Field\n",
|
246 |
-
"\n",
|
247 |
-
"system_prompt = (\n",
|
248 |
-
" \"You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine-tuning models, giving 'memory' to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, RAG frameworks, Langchain, Llama-Index, LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context.\"\n",
|
249 |
-
" \"You are provided information found in Hugging Face's documentation and the RAG course. \"\n",
|
250 |
-
" \"Only some information might be relevant to the question, so ignore the irrelevant part and use the relevant part to answer the question.\"\n",
|
251 |
-
" \"Only respond with information given to you documentation. DO NOT use additional information, even if you know the answer. \"\n",
|
252 |
-
" \"If the answer is somewhere in the documentation, answer the question (depending on the questions and the variety of relevant information in the documentation, give complete and helpful answers.\"\n",
|
253 |
-
" \"Here is the information you can use, the order is not important: \\n\\n\"\n",
|
254 |
-
" \"---------------------\\n\"\n",
|
255 |
-
" \"{context_str}\\n\"\n",
|
256 |
-
" \"---------------------\\n\\n\"\n",
|
257 |
-
" \"REMEMBER:\\n\"\n",
|
258 |
-
" \"You are a witty AI teacher, helpfully answering questions from students of an applied artificial intelligence course on Large Language Models (LLMs or llm). Topics covered include training models, fine tuning models, giving memory to LLMs, prompting, hallucinations and bias, vector databases, transformer architectures, embeddings, RAG frameworks, Langchain, making LLMs interact with tool use, AI agents, reinforcement learning with human feedback. Questions should be understood with this context.\"\n",
|
259 |
-
" \"You are provided information found in Hugging Face's documentation and the RAG course. \"\n",
|
260 |
-
" \"Here are the rules you must follow:\\n\"\n",
|
261 |
-
" \"* Only respond with information inside the documentation. DO NOT provide additional information, even if you know the answer. \"\n",
|
262 |
-
" \"* If the answer is in the documentation, answer the question (depending on the questions and the variety of relevant information in the json documentation. Your answer needs to be pertinent and not redundant giving a clear explanation as if you were a teacher. \"\n",
|
263 |
-
" \"* Only use information summarized from the documentation, do not respond otherwise. \"\n",
|
264 |
-
" \"* Do not refer to the documentation directly, but use the instructions provided within it to answer questions. \"\n",
|
265 |
-
" \"* Do not reference any links, urls or hyperlinks in your answers.\\n\"\n",
|
266 |
-
" \"* Make sure to format your answers in Markdown format, including code block and snippets.\\n\"\n",
|
267 |
-
" \"Now answer the following question: \\n\"\n",
|
268 |
-
")\n",
|
269 |
-
"\n",
|
270 |
-
"chat_text_qa_msgs: list[ChatMessage] = [\n",
|
271 |
-
" ChatMessage(role=MessageRole.SYSTEM, content=system_prompt),\n",
|
272 |
-
" ChatMessage(\n",
|
273 |
-
" role=MessageRole.USER,\n",
|
274 |
-
" content=\"{query_str}\",\n",
|
275 |
-
" ),\n",
|
276 |
-
"]\n",
|
277 |
-
"\n",
|
278 |
-
"TEXT_QA_TEMPLATE = ChatPromptTemplate(chat_text_qa_msgs)"
|
279 |
-
]
|
280 |
-
},
|
281 |
-
{
|
282 |
-
"cell_type": "code",
|
283 |
-
"execution_count": null,
|
284 |
-
"metadata": {},
|
285 |
-
"outputs": [],
|
286 |
-
"source": [
|
287 |
-
"from IPython.display import Markdown\n",
|
288 |
-
"from llama_index.core.data_structs import Node\n",
|
289 |
-
"from llama_index.core.schema import NodeWithScore\n",
|
290 |
-
"from llama_index.core import get_response_synthesizer\n",
|
291 |
-
"from llama_index.llms.gemini import Gemini\n",
|
292 |
-
"from llama_index.llms.openai import OpenAI\n",
|
293 |
-
"\n",
|
294 |
-
"# llm = Gemini(model=\"models/gemini-1.5-flash\", temperature=1, max_tokens=None)\n",
|
295 |
-
"# llm = Gemini(model=\"models/gemini-1.5-pro\", temperature=1, max_tokens=None)\n",
|
296 |
-
"# llm = OpenAI(temperature=1, model=\"gpt-3.5-turbo\", max_tokens=None)\n",
|
297 |
-
"llm = OpenAI(temperature=1, model=\"gpt-4o-mini\", max_tokens=None)\n",
|
298 |
-
"\n",
|
299 |
-
"response_synthesizer = get_response_synthesizer(\n",
|
300 |
-
" llm=llm, response_mode=\"simple_summarize\", text_qa_template=TEXT_QA_TEMPLATE\n",
|
301 |
-
")\n",
|
302 |
-
"\n",
|
303 |
-
"response = response_synthesizer.synthesize(query, nodes=nodes_context)\n",
|
304 |
-
"# print(response.response)\n",
|
305 |
-
"display(Markdown(response.response))\n",
|
306 |
-
"\n",
|
307 |
-
"# for src in response.source_nodes:\n",
|
308 |
-
"# print(src.node.ref_doc_id)\n",
|
309 |
-
"# print(\"Node ID\\t\", src.node_id)\n",
|
310 |
-
"# print(\"Title\\t\", src.metadata[\"title\"])\n",
|
311 |
-
"# print(\"Text\\t\", src.text)\n",
|
312 |
-
"# print(\"Score\\t\", src.score)\n",
|
313 |
-
"# print(\"Metadata\\t\", src.metadata)\n",
|
314 |
-
"# print(\"-_\" * 20)"
|
315 |
-
]
|
316 |
-
},
|
317 |
-
{
|
318 |
-
"cell_type": "code",
|
319 |
-
"execution_count": null,
|
320 |
-
"metadata": {},
|
321 |
-
"outputs": [],
|
322 |
-
"source": []
|
323 |
-
},
|
324 |
-
{
|
325 |
-
"cell_type": "code",
|
326 |
-
"execution_count": null,
|
327 |
-
"metadata": {},
|
328 |
-
"outputs": [],
|
329 |
-
"source": []
|
330 |
-
}
|
331 |
-
],
|
332 |
-
"metadata": {
|
333 |
-
"kernelspec": {
|
334 |
-
"display_name": "env",
|
335 |
-
"language": "python",
|
336 |
-
"name": "python3"
|
337 |
-
},
|
338 |
-
"language_info": {
|
339 |
-
"codemirror_mode": {
|
340 |
-
"name": "ipython",
|
341 |
-
"version": 3
|
342 |
-
},
|
343 |
-
"file_extension": ".py",
|
344 |
-
"mimetype": "text/x-python",
|
345 |
-
"name": "python",
|
346 |
-
"nbconvert_exporter": "python",
|
347 |
-
"pygments_lexer": "ipython3",
|
348 |
-
"version": "3.12.4"
|
349 |
-
}
|
350 |
-
},
|
351 |
-
"nbformat": 4,
|
352 |
-
"nbformat_minor": 2
|
353 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
data/scraping_scripts/upload_dbs_to_hf.py
ADDED
@@ -0,0 +1,34 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
Hugging Face Data Upload Script
|
3 |
+
|
4 |
+
Purpose:
|
5 |
+
This script uploads a local folder to a Hugging Face dataset repository. It's designed to
|
6 |
+
update or create a dataset on the Hugging Face Hub by uploading the contents of a specified
|
7 |
+
local folder.
|
8 |
+
|
9 |
+
Usage:
|
10 |
+
- Run the script: python data/scraping_scripts/upload_dbs_to_hf.py
|
11 |
+
|
12 |
+
The script will:
|
13 |
+
- Upload the contents of the 'data' folder to the specified Hugging Face dataset repository.
|
14 |
+
- https://huggingface.co/datasets/towardsai-buster/ai-tutor-vector-db
|
15 |
+
|
16 |
+
Configuration:
|
17 |
+
- The script is set to upload to the "towardsai-buster/test-data" dataset repository.
|
18 |
+
- It ignores files with extensions .jsonl, .py, .txt, and .ipynb.
|
19 |
+
- It deletes all existing files in the repository before uploading (due to delete_patterns=["*"]).
|
20 |
+
"""
|
21 |
+
|
22 |
+
from huggingface_hub import HfApi
|
23 |
+
|
24 |
+
api = HfApi()
|
25 |
+
|
26 |
+
api.upload_folder(
|
27 |
+
folder_path="data",
|
28 |
+
repo_id="towardsai-buster/ai-tutor-vector-db",
|
29 |
+
repo_type="dataset",
|
30 |
+
multi_commits=True,
|
31 |
+
multi_commits_verbose=True,
|
32 |
+
delete_patterns=["*"],
|
33 |
+
ignore_patterns=["*.jsonl", "*.py", "*.txt", "*.ipynb"],
|
34 |
+
)
|
scripts/setup.py
CHANGED
@@ -11,8 +11,7 @@ from llama_index.core.node_parser import SentenceSplitter
|
|
11 |
from llama_index.core.retrievers import VectorIndexRetriever
|
12 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
13 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
14 |
-
|
15 |
-
# from utils import init_mongo_db
|
16 |
|
17 |
load_dotenv()
|
18 |
|
@@ -108,11 +107,11 @@ AVAILABLE_SOURCES = [
|
|
108 |
# "rag_course",
|
109 |
]
|
110 |
|
111 |
-
|
112 |
-
|
113 |
-
|
114 |
-
|
115 |
-
|
116 |
|
117 |
__all__ = [
|
118 |
"custom_retriever_transformers",
|
@@ -121,8 +120,8 @@ __all__ = [
|
|
121 |
"custom_retriever_llama_index",
|
122 |
"custom_retriever_openai_cookbooks",
|
123 |
"custom_retriever_langchain",
|
|
|
124 |
"CONCURRENCY_COUNT",
|
125 |
-
"MONGODB_URI",
|
126 |
"AVAILABLE_SOURCES_UI",
|
127 |
"AVAILABLE_SOURCES",
|
128 |
]
|
|
|
11 |
from llama_index.core.retrievers import VectorIndexRetriever
|
12 |
from llama_index.embeddings.openai import OpenAIEmbedding
|
13 |
from llama_index.vector_stores.chroma import ChromaVectorStore
|
14 |
+
from utils import init_mongo_db
|
|
|
15 |
|
16 |
load_dotenv()
|
17 |
|
|
|
107 |
# "rag_course",
|
108 |
]
|
109 |
|
110 |
+
mongo_db = (
|
111 |
+
init_mongo_db(uri=MONGODB_URI, db_name="towardsai-buster")
|
112 |
+
if MONGODB_URI
|
113 |
+
else logfire.warn("No mongodb uri found, you will not be able to save data.")
|
114 |
+
)
|
115 |
|
116 |
__all__ = [
|
117 |
"custom_retriever_transformers",
|
|
|
120 |
"custom_retriever_llama_index",
|
121 |
"custom_retriever_openai_cookbooks",
|
122 |
"custom_retriever_langchain",
|
123 |
+
"mongo_db",
|
124 |
"CONCURRENCY_COUNT",
|
|
|
125 |
"AVAILABLE_SOURCES_UI",
|
126 |
"AVAILABLE_SOURCES",
|
127 |
]
|