kaykyramos commited on
Commit
fd60686
·
verified ·
1 Parent(s): 7b54ef1

Upload create-dataset.ipynb

Browse files
Files changed (1) hide show
  1. create-dataset.ipynb +459 -0
create-dataset.ipynb ADDED
@@ -0,0 +1,459 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "markdown",
5
+ "metadata": {},
6
+ "source": [
7
+ "# Instalação de dependências"
8
+ ]
9
+ },
10
+ {
11
+ "cell_type": "code",
12
+ "execution_count": null,
13
+ "metadata": {},
14
+ "outputs": [],
15
+ "source": [
16
+ "%pip install pandas pyarrow tdqm requests ipywidgets huggingface_hub"
17
+ ]
18
+ },
19
+ {
20
+ "cell_type": "markdown",
21
+ "metadata": {},
22
+ "source": [
23
+ "# Importa dependências"
24
+ ]
25
+ },
26
+ {
27
+ "cell_type": "code",
28
+ "execution_count": null,
29
+ "metadata": {},
30
+ "outputs": [],
31
+ "source": [
32
+ "import os\n",
33
+ "from pathlib import Path\n",
34
+ "from datetime import datetime\n",
35
+ "import requests\n",
36
+ "import zipfile\n",
37
+ "import concurrent.futures\n",
38
+ "import pandas as pd\n",
39
+ "from functools import partial\n",
40
+ "from tqdm.auto import tqdm\n",
41
+ "import ipywidgets as widgets\n",
42
+ "from IPython.display import display\n",
43
+ "import time"
44
+ ]
45
+ },
46
+ {
47
+ "cell_type": "markdown",
48
+ "metadata": {},
49
+ "source": [
50
+ "# Download do dataset da Binance"
51
+ ]
52
+ },
53
+ {
54
+ "cell_type": "code",
55
+ "execution_count": null,
56
+ "metadata": {},
57
+ "outputs": [],
58
+ "source": [
59
+ "# Função para gerar todos os meses entre as datas de início e fim\n",
60
+ "def generate_months(start_date, end_date):\n",
61
+ " current = start_date\n",
62
+ " while current <= end_date:\n",
63
+ " yield current\n",
64
+ " # Avança para o próximo mês\n",
65
+ " if current.month == 12:\n",
66
+ " current = datetime(current.year + 1, 1, 1)\n",
67
+ " else:\n",
68
+ " current = datetime(current.year, current.month + 1, 1)\n",
69
+ "\n",
70
+ "# Função para baixar um arquivo com atualização da barra de progresso e velocidade\n",
71
+ "def download_file(url, dest_path, progress_bar, speed_label):\n",
72
+ " try:\n",
73
+ " with requests.get(url, stream=True) as response:\n",
74
+ " response.raise_for_status() # Levanta uma exceção para erros HTTP\n",
75
+ " total_size = int(response.headers.get('content-length', 0))\n",
76
+ " block_size = 1024 # 1 Kibibyte\n",
77
+ " downloaded = 0\n",
78
+ " start_time = time.time()\n",
79
+ " with open(dest_path, 'wb') as f:\n",
80
+ " for data in response.iter_content(block_size):\n",
81
+ " if data:\n",
82
+ " f.write(data)\n",
83
+ " downloaded += len(data)\n",
84
+ " progress_bar.value = downloaded\n",
85
+ " elapsed_time = time.time() - start_time\n",
86
+ " speed = downloaded / elapsed_time if elapsed_time > 0 else 0\n",
87
+ " speed_label.value = f\"Velocidade: {speed/1024:.2f} KB/s\"\n",
88
+ " progress_bar.description = f\"✅ {dest_path.name}\"\n",
89
+ " except requests.exceptions.HTTPError as http_err:\n",
90
+ " progress_bar.description = f\"❌ {dest_path.name}\"\n",
91
+ " speed_label.value = f\"Erro HTTP: {http_err}\"\n",
92
+ " except Exception as err:\n",
93
+ " progress_bar.description = f\"❌ {dest_path.name}\"\n",
94
+ " speed_label.value = f\"Erro: {err}\"\n",
95
+ "\n",
96
+ "# Definição das datas de início e fim\n",
97
+ "start_date = datetime(2017, 8, 1) # Agosto de 2017\n",
98
+ "end_date = datetime(2024, 9, 1) # Setembro de 2024\n",
99
+ "\n",
100
+ "# Padrão da URL base\n",
101
+ "base_url = \"https://data.binance.vision/data/spot/monthly/trades/BTCUSDT/BTCUSDT-trades-{year}-{month:02d}.zip\"\n",
102
+ "\n",
103
+ "# Diretório de download\n",
104
+ "download_dir = Path(\"./dataset-raw\")\n",
105
+ "download_dir.mkdir(parents=True, exist_ok=True)\n",
106
+ "\n",
107
+ "# Lista para armazenar as tarefas de download\n",
108
+ "download_tasks = []\n",
109
+ "for single_date in generate_months(start_date, end_date):\n",
110
+ " year = single_date.year\n",
111
+ " month = single_date.month\n",
112
+ " file_suffix = f\"{year}-{month:02d}\"\n",
113
+ "\n",
114
+ " # Caminhos dos arquivos\n",
115
+ " csv_file = download_dir / f\"BTCUSDT-trades-{file_suffix}.csv\"\n",
116
+ " zip_file = download_dir / f\"BTCUSDT-trades-{file_suffix}.zip\"\n",
117
+ "\n",
118
+ " # Verifica se o CSV já existe\n",
119
+ " if csv_file.exists():\n",
120
+ " print(f\"📄 CSV já existe: {csv_file.name}. Pulando download.\")\n",
121
+ " continue\n",
122
+ " # Verifica se o ZIP já existe\n",
123
+ " elif zip_file.exists():\n",
124
+ " print(f\"📦 ZIP já existe: {zip_file.name}. Pulando download.\")\n",
125
+ " continue\n",
126
+ " else:\n",
127
+ " # Constrói a URL de download\n",
128
+ " url = base_url.format(year=year, month=month)\n",
129
+ " download_tasks.append((url, zip_file))\n",
130
+ "\n",
131
+ "# Número máximo de threads\n",
132
+ "max_workers = 1 # Ajuste conforme a capacidade do seu sistema e conexão de rede\n",
133
+ "\n",
134
+ "# Função principal para gerenciar os downloads\n",
135
+ "def main_download(download_tasks, max_workers=5):\n",
136
+ " if not download_tasks:\n",
137
+ " print(\"✅ Nenhuma tarefa de download a ser executada.\")\n",
138
+ " return\n",
139
+ "\n",
140
+ " # Criação de widgets para cada download\n",
141
+ " download_widgets = []\n",
142
+ " for url, dest_path in download_tasks:\n",
143
+ " speed_label = widgets.Label(value=\"Velocidade: 0 KB/s\")\n",
144
+ " progress_bar = widgets.IntProgress(\n",
145
+ " value=0,\n",
146
+ " min=0,\n",
147
+ " max=1, # Será atualizado após obter o tamanho total\n",
148
+ " description=dest_path.name,\n",
149
+ " bar_style='', # 'success', 'info', 'warning', 'danger' ou ''\n",
150
+ " orientation='horizontal'\n",
151
+ " )\n",
152
+ " download_widgets.append(widgets.VBox([progress_bar, speed_label]))\n",
153
+ "\n",
154
+ " # Exibe todos os widgets de download\n",
155
+ " container = widgets.VBox(download_widgets)\n",
156
+ " display(container)\n",
157
+ "\n",
158
+ " # Inicializa o ThreadPoolExecutor\n",
159
+ " with concurrent.futures.ThreadPoolExecutor(max_workers=max_workers) as executor:\n",
160
+ " # Submete todas as tarefas de download\n",
161
+ " futures = []\n",
162
+ " for i, (url, dest_path) in enumerate(download_tasks):\n",
163
+ " # Atualiza o valor máximo da barra de progresso após obter o tamanho total\n",
164
+ " try:\n",
165
+ " head = requests.head(url, allow_redirects=True)\n",
166
+ " total_size = int(head.headers.get('content-length', 0))\n",
167
+ " if total_size == 0:\n",
168
+ " # Fallback para obter o tamanho via GET\n",
169
+ " with requests.get(url, stream=True) as response:\n",
170
+ " response.raise_for_status()\n",
171
+ " total_size = int(response.headers.get('content-length', 0))\n",
172
+ " except Exception as e:\n",
173
+ " print(f\"❌ Não foi possível obter o tamanho de {dest_path.name}: {e}\")\n",
174
+ " continue\n",
175
+ "\n",
176
+ " progress_bar = download_widgets[i].children[0]\n",
177
+ " speed_label = download_widgets[i].children[1]\n",
178
+ " progress_bar.max = total_size\n",
179
+ "\n",
180
+ " future = executor.submit(download_file, url, dest_path, progress_bar, speed_label)\n",
181
+ " futures.append(future)\n",
182
+ "\n",
183
+ " # Aguardando a conclusão de todas as tarefas\n",
184
+ " for future in concurrent.futures.as_completed(futures):\n",
185
+ " pass # Todas as atualizações são feitas dentro da função download_file\n",
186
+ "\n",
187
+ " print(\"🎉 Todos os downloads foram concluídos.\")\n",
188
+ "\n",
189
+ "# Executa a função de download\n",
190
+ "main_download(download_tasks, max_workers)\n"
191
+ ]
192
+ },
193
+ {
194
+ "cell_type": "markdown",
195
+ "metadata": {},
196
+ "source": [
197
+ "# Processar dataset de .zip para .parquet"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "code",
202
+ "execution_count": null,
203
+ "metadata": {},
204
+ "outputs": [],
205
+ "source": [
206
+ "# Define os nomes das colunas com base na estrutura dos dados\n",
207
+ "COLUMN_NAMES = [\n",
208
+ " 'trade_id',\n",
209
+ " 'price',\n",
210
+ " 'qty',\n",
211
+ " 'quoteQty',\n",
212
+ " 'time',\n",
213
+ " 'isBuyerMaker',\n",
214
+ " 'isBestMatch'\n",
215
+ "]\n",
216
+ "\n",
217
+ "import os\n",
218
+ "import zipfile\n",
219
+ "import pandas as pd\n",
220
+ "import pyarrow as pa\n",
221
+ "import pyarrow.parquet as pq\n",
222
+ "\n",
223
+ "def extract_zip(file_path, extract_to):\n",
224
+ " \"\"\"\n",
225
+ " Extrai um arquivo ZIP e o remove após a extração bem-sucedida.\n",
226
+ "\n",
227
+ " :param file_path: Caminho completo para o arquivo ZIP.\n",
228
+ " :param extract_to: Diretório onde os arquivos serão extraídos.\n",
229
+ " \"\"\"\n",
230
+ " try:\n",
231
+ " with zipfile.ZipFile(file_path, 'r') as zip_ref:\n",
232
+ " zip_ref.extractall(extract_to)\n",
233
+ " os.remove(file_path)\n",
234
+ " print(f\"Extração concluída: {os.path.basename(file_path)}\")\n",
235
+ " except zipfile.BadZipFile:\n",
236
+ " print(f\"Arquivo corrompido: {os.path.basename(file_path)}\")\n",
237
+ " except Exception as e:\n",
238
+ " print(f\"Erro ao processar {os.path.basename(file_path)}: {e}\")\n",
239
+ "\n",
240
+ "def extract_and_delete(zip_dir_path):\n",
241
+ " \"\"\"\n",
242
+ " Extrai todos os arquivos ZIP em um diretório e os remove após a extração.\n",
243
+ "\n",
244
+ " :param zip_dir_path: Diretório contendo os arquivos ZIP.\n",
245
+ " \"\"\"\n",
246
+ " # Lista todos os arquivos ZIP no diretório especificado\n",
247
+ " zip_files = [\n",
248
+ " os.path.join(zip_dir_path, f)\n",
249
+ " for f in os.listdir(zip_dir_path)\n",
250
+ " if f.lower().endswith('.zip')\n",
251
+ " ]\n",
252
+ "\n",
253
+ " if not zip_files:\n",
254
+ " print(\"Nenhum arquivo ZIP encontrado para extração.\")\n",
255
+ " return\n",
256
+ "\n",
257
+ " print(f\"Iniciando a extração de {len(zip_files)} arquivos ZIP...\")\n",
258
+ "\n",
259
+ " for zip_file in zip_files:\n",
260
+ " extract_zip(zip_file, zip_dir_path)\n",
261
+ "\n",
262
+ " print(\"Extração de arquivos ZIP concluída.\")\n",
263
+ "\n",
264
+ "def process_csv_directory(directory_path, output_parquet_path):\n",
265
+ " \"\"\"\n",
266
+ " Processa todos os arquivos CSV em um diretório e salva os dados combinados em um único arquivo Parquet,\n",
267
+ " respeitando o limite de memória disponível.\n",
268
+ "\n",
269
+ " :param directory_path: Diretório contendo os arquivos CSV.\n",
270
+ " :param output_parquet_path: Caminho onde o arquivo Parquet será salvo.\n",
271
+ " \"\"\"\n",
272
+ " csv_files = [\n",
273
+ " os.path.join(directory_path, f)\n",
274
+ " for f in sorted(os.listdir(directory_path))\n",
275
+ " if f.lower().endswith('.csv')\n",
276
+ " ]\n",
277
+ "\n",
278
+ " if not csv_files:\n",
279
+ " print(\"Nenhum arquivo CSV encontrado para processamento.\")\n",
280
+ " return\n",
281
+ "\n",
282
+ " print(f\"Iniciando o processamento de {len(csv_files)} arquivos CSV...\")\n",
283
+ "\n",
284
+ " # Inicializa o ParquetWriter\n",
285
+ " writer = None\n",
286
+ "\n",
287
+ " for idx, file_path in enumerate(csv_files):\n",
288
+ " try:\n",
289
+ " # Ler o CSV em chunks para economizar memória\n",
290
+ " for df_chunk in pd.read_csv(file_path, header=None, names=COLUMN_NAMES, chunksize=100000):\n",
291
+ " # Converte 'time' de milissegundos para datetime\n",
292
+ " df_chunk['time'] = pd.to_datetime(df_chunk['time'], unit='ms')\n",
293
+ " table = pa.Table.from_pandas(df_chunk)\n",
294
+ " if writer is None:\n",
295
+ " # Cria o ParquetWriter no primeiro chunk\n",
296
+ " writer = pq.ParquetWriter(output_parquet_path, table.schema, compression='snappy')\n",
297
+ " writer.write_table(table)\n",
298
+ " print(f\"Dados do arquivo {os.path.basename(file_path)} processados.\")\n",
299
+ " except Exception as e:\n",
300
+ " print(f\"Erro ao processar o arquivo {os.path.basename(file_path)}: {e}\")\n",
301
+ "\n",
302
+ " # Fecha o ParquetWriter\n",
303
+ " if writer:\n",
304
+ " writer.close()\n",
305
+ " print(f\"Dados combinados salvos com sucesso em {output_parquet_path}\")\n",
306
+ " else:\n",
307
+ " print(\"Nenhum dado foi escrito no arquivo Parquet.\")\n",
308
+ "\n",
309
+ " print(\"Processamento concluído.\")\n"
310
+ ]
311
+ },
312
+ {
313
+ "cell_type": "code",
314
+ "execution_count": null,
315
+ "metadata": {},
316
+ "outputs": [],
317
+ "source": [
318
+ "# Especifica o caminho para o diretório contendo os arquivos ZIP e CSV\n",
319
+ "zip_dir_path = './dataset-raw' # Substitua pelo seu caminho de diretório\n",
320
+ "\n",
321
+ "# Especifica o caminho para o arquivo Parquet de saída\n",
322
+ "output_parquet_path = './BTCUSDT-Dataset.parquet' # Substitua pelo seu caminho de saída desejado\n",
323
+ "\n",
324
+ "# Etapa 1: Extrair e deletar arquivos ZIP de forma paralela\n",
325
+ "extract_and_delete(zip_dir_path)\n",
326
+ "\n",
327
+ "# Etapa 2: Processar arquivos CSV de forma paralela e salvar como Parquet\n",
328
+ "process_csv_directory(zip_dir_path, output_parquet_path)"
329
+ ]
330
+ },
331
+ {
332
+ "cell_type": "markdown",
333
+ "metadata": {},
334
+ "source": [
335
+ "# Particionamento do dataset em arquivos menores de no máximo 4GB"
336
+ ]
337
+ },
338
+ {
339
+ "cell_type": "code",
340
+ "execution_count": null,
341
+ "metadata": {},
342
+ "outputs": [],
343
+ "source": [
344
+ "import pyarrow.parquet as pq\n",
345
+ "import pyarrow as pa\n",
346
+ "\n",
347
+ "# Caminho do arquivo original\n",
348
+ "input_file = \"/Users/lordramos/Desktop/binance spot data/BTCUSDT-Dataset.parquet\"\n",
349
+ "\n",
350
+ "# Caminho para salvar os arquivos divididos\n",
351
+ "output_dir = \"/Users/lordramos/Desktop/binance spot data/\"\n",
352
+ "\n",
353
+ "# Tamanho máximo de cada arquivo em bytes (4 GB = 4 * 1024^3 bytes)\n",
354
+ "max_size = 4 * 1024**3\n",
355
+ "\n",
356
+ "# Inicialize as variáveis\n",
357
+ "part_number = 1\n",
358
+ "current_size = 0\n",
359
+ "output_file = f\"{output_dir}BTCUSDT-Dataset-part-{part_number}.parquet\"\n",
360
+ "writer = None\n",
361
+ "\n",
362
+ "# Lê o arquivo original em pedaços\n",
363
+ "for batch in pq.ParquetFile(input_file).iter_batches(batch_size=10000):\n",
364
+ " table = pa.Table.from_batches([batch])\n",
365
+ "\n",
366
+ " # Calcula o tamanho do batch atual\n",
367
+ " batch_size = table.nbytes\n",
368
+ "\n",
369
+ " # Checa se o tamanho atual mais o novo batch excede o limite de 4 GB\n",
370
+ " if current_size + batch_size > max_size:\n",
371
+ " # Fecha o arquivo atual e inicia um novo arquivo\n",
372
+ " if writer:\n",
373
+ " writer.close()\n",
374
+ " part_number += 1\n",
375
+ " output_file = f\"{output_dir}BTCUSDT-Dataset-part-{part_number}.parquet\"\n",
376
+ " current_size = 0 # Redefine o tamanho atual para o novo arquivo\n",
377
+ " writer = None\n",
378
+ "\n",
379
+ " # Se o writer ainda não está definido, inicia um novo writer\n",
380
+ " if writer is None:\n",
381
+ " writer = pq.ParquetWriter(output_file, table.schema)\n",
382
+ "\n",
383
+ " # Escreve o batch no arquivo atual e atualiza o tamanho\n",
384
+ " writer.write_table(table)\n",
385
+ " current_size += batch_size\n",
386
+ "\n",
387
+ "# Fecha o último arquivo\n",
388
+ "if writer:\n",
389
+ " writer.close()"
390
+ ]
391
+ },
392
+ {
393
+ "cell_type": "markdown",
394
+ "metadata": {},
395
+ "source": [
396
+ "# Upload para HuggingFace"
397
+ ]
398
+ },
399
+ {
400
+ "cell_type": "code",
401
+ "execution_count": null,
402
+ "metadata": {},
403
+ "outputs": [],
404
+ "source": [
405
+ "from huggingface_hub import login, HfApi, HfFolder\n",
406
+ "import os\n",
407
+ "\n",
408
+ "login(token=\"\")\n",
409
+ "\n",
410
+ "# Configurações de autenticação e detalhes do dataset\n",
411
+ "token = HfFolder.get_token() # Assume que o token já está salvo localmente\n",
412
+ "api = HfApi()\n",
413
+ "dataset_id = \"orion-research/btcusdt-spot-dataset\" # Substitua pelo nome do seu dataset\n",
414
+ "\n",
415
+ "# Caminho da pasta onde os arquivos particionados estão salvos\n",
416
+ "output_dir = \"/Users/lordramos/Desktop/binance spot data/\"\n",
417
+ "\n",
418
+ "# Cria o repositório no Hugging Face Hub (caso ainda não tenha sido criado)\n",
419
+ "api.create_repo(repo_id=dataset_id, repo_type=\"dataset\", token=token, private=False)\n",
420
+ "\n",
421
+ "# Loop para fazer o upload de cada arquivo particionado\n",
422
+ "for file_name in os.listdir(output_dir):\n",
423
+ " if file_name.startswith(\"BTCUSDT-Dataset-part-\") and file_name.endswith(\".parquet\"):\n",
424
+ " file_path = os.path.join(output_dir, file_name)\n",
425
+ " \n",
426
+ " # Faz o upload do arquivo\n",
427
+ " api.upload_file(\n",
428
+ " path_or_fileobj=file_path,\n",
429
+ " path_in_repo=file_name,\n",
430
+ " repo_id=dataset_id,\n",
431
+ " repo_type=\"dataset\",\n",
432
+ " token=token,\n",
433
+ " )\n",
434
+ " print(f\"{file_name} upload completed.\")\n"
435
+ ]
436
+ }
437
+ ],
438
+ "metadata": {
439
+ "kernelspec": {
440
+ "display_name": "mlfinlab",
441
+ "language": "python",
442
+ "name": "python3"
443
+ },
444
+ "language_info": {
445
+ "codemirror_mode": {
446
+ "name": "ipython",
447
+ "version": 3
448
+ },
449
+ "file_extension": ".py",
450
+ "mimetype": "text/x-python",
451
+ "name": "python",
452
+ "nbconvert_exporter": "python",
453
+ "pygments_lexer": "ipython3",
454
+ "version": "3.8.20"
455
+ }
456
+ },
457
+ "nbformat": 4,
458
+ "nbformat_minor": 2
459
+ }