kaykyramos
commited on
Upload 18 files
Browse files- output/dollar-bars-[10000000]/dollar_bars.parquet/part.0.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.1.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.10.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.11.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.12.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.13.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.14.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.2.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.3.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.4.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.5.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.6.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.7.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.8.parquet +3 -0
- output/dollar-bars-[10000000]/dollar_bars.parquet/part.9.parquet +3 -0
- src/0 - Estruturação dos Dados.ipynb +152 -0
- src/1 - Análise Exploratória.ipynb +1140 -0
- src/utils.py +372 -0
output/dollar-bars-[10000000]/dollar_bars.parquet/part.0.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:9fbb9b77421f98adbcde2a8facdf7d17fc6b48b407b29383982b1891c6dc55b8
|
3 |
+
size 636558
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.1.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:05be83ace8006b0e6a0ee6c830255478f55802c0c2c3a620cba413307cfb35fe
|
3 |
+
size 1295026
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.10.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:302de7cb3fc6c45b49e12231497d28a5814587a0c442f6e47bfa1257a059e8d9
|
3 |
+
size 1286041
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.11.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:3a512d57ab6e12fb808892a3a2a95487cb5fe1de7c3de52f12f713ab026647d8
|
3 |
+
size 651646
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.12.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:e9a3d9cbe38b6837115dc33c00902dfda663ad8aefb5f8426ce5a566601f768b
|
3 |
+
size 1268095
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.13.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:565647dd1545e21978d718299760e44ed5646b9d7fa9748c60f08b73797e29e4
|
3 |
+
size 1287974
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.14.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:4e5c7e8ceae9f83e0ef1d8cc005c1234c1db3df058bd0fcab899479114d9dc2e
|
3 |
+
size 1269338
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.2.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:6d5c3ef8fcb9a9a7c39eef8e712ba77cbd873f03adef700e7e98cbbd4755135c
|
3 |
+
size 1294684
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.3.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:06d39b0143016df4e66ed7dc764c64530b924f1e26a2a42013abed0193cbdd14
|
3 |
+
size 626818
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.4.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:659d99025a8e13a32541b15dcc78c4196e3dfb9ff042c2db5d05827f92516a2e
|
3 |
+
size 1274874
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.5.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8b7b7f28d46871cc104a9d0444abac63ac7e9021244d7a1d192ca5f1efeda1d4
|
3 |
+
size 1278170
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.6.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:11fd0f383e8ad93f9574c8251adc9c2748283f39b387496fd09550d432f3bc40
|
3 |
+
size 1250555
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.7.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:8106d2d703420b72e5e46bcbc6411bab00eb675506c05c0f9a2fb0ca021d24f8
|
3 |
+
size 622727
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.8.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:cf0961bed2374826fb4750e8238a408bf95bc8e87649b67c830ac87fc06e4c1b
|
3 |
+
size 1279706
|
output/dollar-bars-[10000000]/dollar_bars.parquet/part.9.parquet
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:18c3fca1c2641f3f8b3d158544a9e5946aec96c974fb95d2b590f8dcc3c85c3b
|
3 |
+
size 1278043
|
src/0 - Estruturação dos Dados.ipynb
ADDED
@@ -0,0 +1,152 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "markdown",
|
5 |
+
"metadata": {},
|
6 |
+
"source": [
|
7 |
+
"# Transformar os dados RAW (Binance BTCUSDT, all trades) -> OHCL"
|
8 |
+
]
|
9 |
+
},
|
10 |
+
{
|
11 |
+
"cell_type": "code",
|
12 |
+
"execution_count": null,
|
13 |
+
"metadata": {},
|
14 |
+
"outputs": [],
|
15 |
+
"source": [
|
16 |
+
"raw_dataset_path = '../datasets/BTCUSDT-Trades/'\n",
|
17 |
+
"output_path = '../output'"
|
18 |
+
]
|
19 |
+
},
|
20 |
+
{
|
21 |
+
"cell_type": "code",
|
22 |
+
"execution_count": null,
|
23 |
+
"metadata": {},
|
24 |
+
"outputs": [],
|
25 |
+
"source": [
|
26 |
+
"import os\n",
|
27 |
+
"from loguru import logger\n",
|
28 |
+
"import dask.dataframe as dd\n",
|
29 |
+
"from dask.diagnostics import ProgressBar\n",
|
30 |
+
"import pandas as pd\n",
|
31 |
+
"\n",
|
32 |
+
"logger.remove() \n",
|
33 |
+
"logger.add(lambda msg: print(msg, end=\"\"), level=\"INFO\")\n",
|
34 |
+
"\n",
|
35 |
+
"from utils import create_dollar_bars\n",
|
36 |
+
"\n",
|
37 |
+
"def dataSampler(barType=\"standard\", samplerType=\"time\", samplerAmount=100, maxRecords=None):\n",
|
38 |
+
" barTypeDictionary = {\n",
|
39 |
+
" \"standard\": \"Padrão\",\n",
|
40 |
+
" \"imbalance\": \"de Desequilíbrio\",\n",
|
41 |
+
" \"run\": \"de Ordem Iceberg\"\n",
|
42 |
+
" }\n",
|
43 |
+
" samplerDictionary = {\n",
|
44 |
+
" \"time\": \"Temporal\",\n",
|
45 |
+
" \"ticks\": \"Ticks\",\n",
|
46 |
+
" \"volume\": \"Volume\",\n",
|
47 |
+
" \"dollar\": \"Dollar\"\n",
|
48 |
+
" }\n",
|
49 |
+
" \n",
|
50 |
+
" output_directory = os.path.join(output_path, f\"{samplerType}-bars-[{samplerAmount}]\")\n",
|
51 |
+
" if not os.path.exists(output_directory):\n",
|
52 |
+
" os.makedirs(output_directory)\n",
|
53 |
+
" logger.info(f\"Diretório criado: {output_directory}\")\n",
|
54 |
+
" \n",
|
55 |
+
" print(f\"Criando Barras {samplerDictionary[samplerType]} {barTypeDictionary[barType]} agrupadas a cada {samplerAmount}...\") \n",
|
56 |
+
"\n",
|
57 |
+
" # Verificar se já existem arquivos .parquet no diretório de saída\n",
|
58 |
+
" parquet_files_output = [f for f in os.listdir(output_directory) if f.endswith('.parquet')]\n",
|
59 |
+
" if parquet_files_output:\n",
|
60 |
+
" logger.info(f\"'{output_directory}' já existe e contém arquivos .parquet. Carregando de {output_directory}...\")\n",
|
61 |
+
" \n",
|
62 |
+
" # Carregar todos os arquivos .parquet usando Dask\n",
|
63 |
+
" try:\n",
|
64 |
+
" bars = dd.read_parquet(os.path.join(output_directory, '*.parquet')).compute()\n",
|
65 |
+
" logger.info(\"'dollar_bars' carregado com sucesso.\")\n",
|
66 |
+
" return bars\n",
|
67 |
+
" except Exception as e:\n",
|
68 |
+
" logger.error(f\"Erro ao carregar arquivos .parquet: {e}\")\n",
|
69 |
+
" \n",
|
70 |
+
" logger.info(\"Criando 'dollar_bars'...\")\n",
|
71 |
+
"\n",
|
72 |
+
" dollar_bars_path = os.path.join(output_directory, 'dollar_bars.parquet')\n",
|
73 |
+
"\n",
|
74 |
+
" # Obter a lista de todos os arquivos Parquet no raw_dataset_path\n",
|
75 |
+
" parquet_files = [os.path.join(raw_dataset_path, f) for f in os.listdir(raw_dataset_path) if f.endswith('.parquet')]\n",
|
76 |
+
" parquet_files.sort()\n",
|
77 |
+
" \n",
|
78 |
+
" if not parquet_files:\n",
|
79 |
+
" logger.warning(f\"Nenhum arquivo .parquet encontrado em '{raw_dataset_path}'.\")\n",
|
80 |
+
" return []\n",
|
81 |
+
" \n",
|
82 |
+
" logger.info(f\"Total de arquivos .parquet a serem processados: {len(parquet_files)}\")\n",
|
83 |
+
" \n",
|
84 |
+
" # Carregar todos os arquivos .parquet usando Dask diretamente\n",
|
85 |
+
" try:\n",
|
86 |
+
" df_dask = dd.read_parquet(os.path.join(raw_dataset_path, '*.parquet'))\n",
|
87 |
+
" logger.info(\"Todos os arquivos .parquet foram carregados com sucesso.\")\n",
|
88 |
+
" except Exception as e:\n",
|
89 |
+
" logger.error(f\"Erro ao carregar arquivos .parquet: {e}\")\n",
|
90 |
+
" return []\n",
|
91 |
+
" \n",
|
92 |
+
" # Se maxRecords estiver definido, limitar o DataFrame\n",
|
93 |
+
" if maxRecords is not None:\n",
|
94 |
+
" df_dask = df_dask.head(maxRecords, compute=False)\n",
|
95 |
+
" logger.info(f\"Limite de registros definido para {maxRecords}.\")\n",
|
96 |
+
" \n",
|
97 |
+
" # Criar e salvar 'dollar_bars'\n",
|
98 |
+
" try:\n",
|
99 |
+
" dollar_bars = create_dollar_bars(df_dask, samplerAmount, dollar_bars_path)\n",
|
100 |
+
" logger.info(\"'dollar_bars' criado e salvo com sucesso.\")\n",
|
101 |
+
" return dollar_bars\n",
|
102 |
+
" except Exception as e:\n",
|
103 |
+
" logger.error(f\"Erro ao criar 'dollar_bars': {e}\")\n",
|
104 |
+
" return []\n"
|
105 |
+
]
|
106 |
+
},
|
107 |
+
{
|
108 |
+
"cell_type": "markdown",
|
109 |
+
"metadata": {},
|
110 |
+
"source": [
|
111 |
+
"# Realiza Amostragem"
|
112 |
+
]
|
113 |
+
},
|
114 |
+
{
|
115 |
+
"cell_type": "code",
|
116 |
+
"execution_count": null,
|
117 |
+
"metadata": {},
|
118 |
+
"outputs": [],
|
119 |
+
"source": [
|
120 |
+
"serieBars = dataSampler(\n",
|
121 |
+
" barType=\"standard\",\n",
|
122 |
+
" samplerType=\"dollar\",\n",
|
123 |
+
" samplerAmount=10_000_000\n",
|
124 |
+
")\n",
|
125 |
+
"\n",
|
126 |
+
"sample_bars = serieBars.head()\n",
|
127 |
+
"display(sample_bars)"
|
128 |
+
]
|
129 |
+
}
|
130 |
+
],
|
131 |
+
"metadata": {
|
132 |
+
"kernelspec": {
|
133 |
+
"display_name": "base",
|
134 |
+
"language": "python",
|
135 |
+
"name": "python3"
|
136 |
+
},
|
137 |
+
"language_info": {
|
138 |
+
"codemirror_mode": {
|
139 |
+
"name": "ipython",
|
140 |
+
"version": 3
|
141 |
+
},
|
142 |
+
"file_extension": ".py",
|
143 |
+
"mimetype": "text/x-python",
|
144 |
+
"name": "python",
|
145 |
+
"nbconvert_exporter": "python",
|
146 |
+
"pygments_lexer": "ipython3",
|
147 |
+
"version": "3.12.4"
|
148 |
+
}
|
149 |
+
},
|
150 |
+
"nbformat": 4,
|
151 |
+
"nbformat_minor": 2
|
152 |
+
}
|
src/1 - Análise Exploratória.ipynb
ADDED
@@ -0,0 +1,1140 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": null,
|
6 |
+
"metadata": {},
|
7 |
+
"outputs": [],
|
8 |
+
"source": [
|
9 |
+
"import numpy as np\n",
|
10 |
+
"import pandas as pd\n",
|
11 |
+
"from numba import njit\n",
|
12 |
+
"from statsmodels.tsa.stattools import adfuller\n",
|
13 |
+
"from sklearn.decomposition import PCA\n",
|
14 |
+
"from sklearn.preprocessing import StandardScaler\n",
|
15 |
+
"from sklearn.linear_model import LinearRegression\n",
|
16 |
+
"from sklearn.model_selection import cross_val_score\n",
|
17 |
+
"import matplotlib.pyplot as plt\n",
|
18 |
+
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
|
19 |
+
"import statsmodels.api as sm\n",
|
20 |
+
"\n",
|
21 |
+
"@njit\n",
|
22 |
+
"def calculate_weights_numba(d, thresh):\n",
|
23 |
+
" w = [1.0]\n",
|
24 |
+
" k = 1\n",
|
25 |
+
" while True:\n",
|
26 |
+
" w_ = -w[-1] * (d - k + 1) / k\n",
|
27 |
+
" if abs(w_) < thresh:\n",
|
28 |
+
" break\n",
|
29 |
+
" w.append(w_)\n",
|
30 |
+
" k += 1\n",
|
31 |
+
" return np.array(w[::-1])\n",
|
32 |
+
"\n",
|
33 |
+
"@njit\n",
|
34 |
+
"def frac_diff_numba(series_values, w, width):\n",
|
35 |
+
" n = len(series_values)\n",
|
36 |
+
" diff_series = np.empty(n)\n",
|
37 |
+
" diff_series[:] = np.nan\n",
|
38 |
+
" for i in range(width, n):\n",
|
39 |
+
" diff_value = 0.0\n",
|
40 |
+
" for j in range(width):\n",
|
41 |
+
" diff_value += w[j] * series_values[i - width + j]\n",
|
42 |
+
" diff_series[i] = diff_value\n",
|
43 |
+
" return diff_series\n",
|
44 |
+
"\n",
|
45 |
+
"def frac_diff_optimized(series, d, thresh=1e-5):\n",
|
46 |
+
" \"\"\"\n",
|
47 |
+
" Aplica diferenciação fracionária à série temporal utilizando Numba para otimização.\n",
|
48 |
+
"\n",
|
49 |
+
" Parâmetros:\n",
|
50 |
+
" - series: pd.Series, a série temporal a ser diferenciada.\n",
|
51 |
+
" - d: float, ordem de diferenciação fracionária (0 < d < 1).\n",
|
52 |
+
" - thresh: float, limiar para os coeficientes binomiais.\n",
|
53 |
+
"\n",
|
54 |
+
" Retorna:\n",
|
55 |
+
" - pd.Series, série diferenciada fracionariamente.\n",
|
56 |
+
" \"\"\"\n",
|
57 |
+
" # Calcular os coeficientes binomiais\n",
|
58 |
+
" w = calculate_weights_numba(d, thresh)\n",
|
59 |
+
" width = len(w)\n",
|
60 |
+
" \n",
|
61 |
+
" # Converter a série para um array numpy\n",
|
62 |
+
" series_values = series.values\n",
|
63 |
+
" n = len(series_values)\n",
|
64 |
+
" \n",
|
65 |
+
" # Aplicar a diferenciação fracionária utilizando Numba\n",
|
66 |
+
" diff_series = frac_diff_numba(series_values, w, width)\n",
|
67 |
+
" \n",
|
68 |
+
" return pd.Series(diff_series, index=series.index)\n",
|
69 |
+
"\n",
|
70 |
+
"def adf_test(series, title='ADF Test'):\n",
|
71 |
+
" \"\"\"\n",
|
72 |
+
" Realiza o teste de Dickey-Fuller aumentado e imprime os resultados.\n",
|
73 |
+
"\n",
|
74 |
+
" Parâmetros:\n",
|
75 |
+
" - series: pd.Series, a série temporal a ser testada.\n",
|
76 |
+
" - title: str, título para identificar o teste.\n",
|
77 |
+
"\n",
|
78 |
+
" Retorna:\n",
|
79 |
+
" - None\n",
|
80 |
+
" \"\"\"\n",
|
81 |
+
" print(f'\\n==== {title} ====')\n",
|
82 |
+
" result = adfuller(series.dropna(), autolag='AIC')\n",
|
83 |
+
" labels = ['Estatística ADF', 'p-valor', 'Número de Lags Usados', 'Número de Observações Usadas']\n",
|
84 |
+
" out = pd.Series(result[0:4], index=labels)\n",
|
85 |
+
" for key, value in result[4].items():\n",
|
86 |
+
" out[f'Valor Crítico ({key})'] = value\n",
|
87 |
+
" print(out.to_string())\n",
|
88 |
+
" if result[1] <= 0.05:\n",
|
89 |
+
" print(\"Evidência forte contra a hipótese nula (presença de raiz unitária), rejeitamos a hipótese nula.\")\n",
|
90 |
+
" else:\n",
|
91 |
+
" print(\"Evidência fraca contra a hipótese nula, não rejeitamos a hipótese nula.\")\n",
|
92 |
+
"\n",
|
93 |
+
"def create_lag_matrix(series, n_lags):\n",
|
94 |
+
" \"\"\"\n",
|
95 |
+
" Cria uma matriz de lags para a série temporal.\n",
|
96 |
+
"\n",
|
97 |
+
" Parâmetros:\n",
|
98 |
+
" - series: pd.Series, a série temporal.\n",
|
99 |
+
" - n_lags: int, número de lags a serem criados.\n",
|
100 |
+
"\n",
|
101 |
+
" Retorna:\n",
|
102 |
+
" - X: np.ndarray, matriz de lags.\n",
|
103 |
+
" - y: np.ndarray, variável alvo ajustada.\n",
|
104 |
+
" \"\"\"\n",
|
105 |
+
" # Criar um DataFrame com as lags\n",
|
106 |
+
" df_lags = pd.concat([series.shift(i) for i in range(1, n_lags + 1)], axis=1)\n",
|
107 |
+
" df_lags.columns = [f'lag_{i}' for i in range(1, n_lags + 1)]\n",
|
108 |
+
" \n",
|
109 |
+
" # Remover linhas com NaN causados pelo shift\n",
|
110 |
+
" df_lags = df_lags.dropna()\n",
|
111 |
+
" \n",
|
112 |
+
" # Variável alvo\n",
|
113 |
+
" y = series.loc[df_lags.index].values\n",
|
114 |
+
" \n",
|
115 |
+
" # Converter para array NumPy\n",
|
116 |
+
" X = df_lags.values\n",
|
117 |
+
" \n",
|
118 |
+
" return X, y\n",
|
119 |
+
"\n",
|
120 |
+
"def monteCarlo(serie_predita, constant, params, order):\n",
|
121 |
+
" out = len(serie_predita)\n",
|
122 |
+
" Y = np.zeros(out)\n",
|
123 |
+
" Y[:(order)] = serie_predita.iloc[:(order)]\n",
|
124 |
+
"\n",
|
125 |
+
" for i in range(order, out):\n",
|
126 |
+
" # Extract the relevant data and transpose if needed\n",
|
127 |
+
" data = serie_predita.iloc[i-order:i].values\n",
|
128 |
+
" phi_transpose = np.transpose(params)\n",
|
129 |
+
"\n",
|
130 |
+
" Y[i] = data @ phi_transpose + constant\n",
|
131 |
+
"\n",
|
132 |
+
" Y = pd.DataFrame(Y)\n",
|
133 |
+
" Y.index = serie_predita.index\n",
|
134 |
+
" Y.rename(columns={0: 'fraqdiff_pred'}, inplace=True)\n",
|
135 |
+
" return Y\n",
|
136 |
+
"\n",
|
137 |
+
"import statsmodels.api as sm\n",
|
138 |
+
"import numpy as np\n",
|
139 |
+
"\n",
|
140 |
+
"def auto_reg(order, serie_predita):\n",
|
141 |
+
" \"\"\"\n",
|
142 |
+
" Ajusta um modelo autoregressivo e retorna previsões alinhadas com a série original.\n",
|
143 |
+
"\n",
|
144 |
+
" Parameters:\n",
|
145 |
+
" - order (int): Ordem do modelo autoregressivo.\n",
|
146 |
+
" - serie_predita (pd.Series): Série temporal para ajustar o modelo.\n",
|
147 |
+
"\n",
|
148 |
+
" Returns:\n",
|
149 |
+
" - Y_pred_aligned (pd.Series): Série de previsões alinhadas com a série original.\n",
|
150 |
+
" - constant (float): Constante do modelo.\n",
|
151 |
+
" - params (pd.Series): Parâmetros do modelo.\n",
|
152 |
+
" \"\"\"\n",
|
153 |
+
" # Garantir que serie_predita é uma pandas Series\n",
|
154 |
+
" if not isinstance(serie_predita, pd.Series):\n",
|
155 |
+
" serie_predita = pd.Series(serie_predita)\n",
|
156 |
+
" \n",
|
157 |
+
" # Criar matriz de lags sem usar np.roll para evitar deslocamento circular\n",
|
158 |
+
" X = pd.concat([serie_predita.shift(i+1) for i in range(order)], axis=1)\n",
|
159 |
+
" X.columns = [f'lag_{i+1}' for i in range(order)]\n",
|
160 |
+
" \n",
|
161 |
+
" # Remover linhas com NaN devido aos lags\n",
|
162 |
+
" X = X.dropna()\n",
|
163 |
+
" y = serie_predita.loc[X.index]\n",
|
164 |
+
" \n",
|
165 |
+
" # Adicionar constante\n",
|
166 |
+
" X = sm.add_constant(X)\n",
|
167 |
+
" \n",
|
168 |
+
" # Ajustar o modelo autoregressivo\n",
|
169 |
+
" model = sm.OLS(y, X)\n",
|
170 |
+
" result = model.fit()\n",
|
171 |
+
" \n",
|
172 |
+
" # Exibir o resumo do modelo\n",
|
173 |
+
" print(result.summary())\n",
|
174 |
+
" \n",
|
175 |
+
" # Obter parâmetros\n",
|
176 |
+
" constant = result.params['const']\n",
|
177 |
+
" params = result.params.drop('const')\n",
|
178 |
+
" \n",
|
179 |
+
" # Prever os valores in-sample\n",
|
180 |
+
" Y_pred = result.predict(X)\n",
|
181 |
+
" \n",
|
182 |
+
" # Garantir que Y_pred está alinhado com y\n",
|
183 |
+
" Y_pred_aligned = Y_pred.copy()\n",
|
184 |
+
" Y_pred_aligned.index = y.index\n",
|
185 |
+
" \n",
|
186 |
+
" return Y_pred_aligned, constant, params\n"
|
187 |
+
]
|
188 |
+
},
|
189 |
+
{
|
190 |
+
"cell_type": "code",
|
191 |
+
"execution_count": null,
|
192 |
+
"metadata": {},
|
193 |
+
"outputs": [],
|
194 |
+
"source": [
|
195 |
+
"series = pd.read_parquet(\"../output/dollar-bars-[10000000]/dollar_bars.parquet/part.0.parquet\")\n",
|
196 |
+
"series.head()"
|
197 |
+
]
|
198 |
+
},
|
199 |
+
{
|
200 |
+
"cell_type": "code",
|
201 |
+
"execution_count": null,
|
202 |
+
"metadata": {},
|
203 |
+
"outputs": [],
|
204 |
+
"source": [
|
205 |
+
"import pandas as pd\n",
|
206 |
+
"import numpy as np\n",
|
207 |
+
"from numba import njit\n",
|
208 |
+
"from statsmodels.tsa.stattools import adfuller\n",
|
209 |
+
"from sklearn.preprocessing import StandardScaler\n",
|
210 |
+
"from sklearn.decomposition import PCA\n",
|
211 |
+
"from sklearn.linear_model import LinearRegression\n",
|
212 |
+
"from sklearn.model_selection import cross_val_score\n",
|
213 |
+
"import matplotlib.pyplot as plt\n",
|
214 |
+
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
|
215 |
+
"\n",
|
216 |
+
"# Função de diferenciação fracionária otimizada com Numba\n",
|
217 |
+
"@njit\n",
|
218 |
+
"def frac_diff_numba(series, w, width):\n",
|
219 |
+
" diff_series = np.zeros(len(series))\n",
|
220 |
+
" for i in range(width, len(series)):\n",
|
221 |
+
" value = 0.0\n",
|
222 |
+
" for j in range(width):\n",
|
223 |
+
" value += w[j] * series[i - j]\n",
|
224 |
+
" diff_series[i] = value\n",
|
225 |
+
" return diff_series\n",
|
226 |
+
"\n",
|
227 |
+
"def frac_diff_optimized(series, d, max_lag=100):\n",
|
228 |
+
" # Calcula os pesos da diferenciação fracionária\n",
|
229 |
+
" w = [1.0]\n",
|
230 |
+
" for k in range(1, max_lag):\n",
|
231 |
+
" w_ = -w[-1] * (d - k + 1) / k\n",
|
232 |
+
" w.append(w_)\n",
|
233 |
+
" if abs(w[-1]) < 1e-5:\n",
|
234 |
+
" break\n",
|
235 |
+
" width = len(w)\n",
|
236 |
+
" series_values = series.values.astype(np.float64) # Assegura que os valores são float64\n",
|
237 |
+
" diff_series = frac_diff_numba(series_values, np.array(w, dtype=np.float64), width)\n",
|
238 |
+
" return pd.Series(diff_series, index=series.index)\n",
|
239 |
+
"\n",
|
240 |
+
"# Função para realizar o teste ADF\n",
|
241 |
+
"def adf_test(series, title=''):\n",
|
242 |
+
" result = adfuller(series, autolag='AIC')\n",
|
243 |
+
" print(f'== {title} ==')\n",
|
244 |
+
" print(f'ADF Statistic: {result[0]}')\n",
|
245 |
+
" print(f'p-value: {result[1]}')\n",
|
246 |
+
" for key, value in result[4].items():\n",
|
247 |
+
" print(f'Critical Value {key}: {value}')\n",
|
248 |
+
" print('')\n",
|
249 |
+
"\n",
|
250 |
+
"# Função para criar matriz de lags\n",
|
251 |
+
"def create_lag_matrix(series, n_lags):\n",
|
252 |
+
" X = pd.concat([series.shift(i) for i in range(1, n_lags + 1)], axis=1)\n",
|
253 |
+
" y = series\n",
|
254 |
+
" X = X.dropna()\n",
|
255 |
+
" y = y.loc[X.index]\n",
|
256 |
+
" return X.values, y.values\n",
|
257 |
+
"\n",
|
258 |
+
"# Função autoregressiva (exemplo simples)\n",
|
259 |
+
"def auto_reg(order, series):\n",
|
260 |
+
" from statsmodels.tsa.ar_model import AutoReg\n",
|
261 |
+
" model = AutoReg(series, lags=order, old_names=False)\n",
|
262 |
+
" model_fit = model.fit()\n",
|
263 |
+
" Y_pred = model_fit.predict(start=order, end=len(series)-1)\n",
|
264 |
+
" constant = model_fit.params[0]\n",
|
265 |
+
" params = model_fit.params[1:]\n",
|
266 |
+
" return Y_pred, constant, params\n",
|
267 |
+
"\n",
|
268 |
+
"# Leitura da série\n",
|
269 |
+
"series = pd.read_parquet(\"../output/dollar-bars-[10000000]/dollar_bars.parquet/part.0.parquet\")\n",
|
270 |
+
"print(series.head())\n",
|
271 |
+
"\n",
|
272 |
+
"# Definir o valor de diferenciação fracionária\n",
|
273 |
+
"d = 0.3\n",
|
274 |
+
"\n",
|
275 |
+
"# Aplicar a diferenciação fracionária na coluna 'price_close' em vez do DataFrame completo\n",
|
276 |
+
"print(\"Aplicando diferenciação fracionária na série 'price_close'...\")\n",
|
277 |
+
"series_frac_diff = frac_diff_optimized(series['price_close'], d).dropna()\n",
|
278 |
+
"print(\"Diferenciação fracionária concluída.\")\n",
|
279 |
+
"\n",
|
280 |
+
"# Realizar o teste ADF na série diferenciada completa\n",
|
281 |
+
"adf_test(series_frac_diff, title=f'Preço de Fechamento (price_close) - Diferenciação Fracionária d={d}')\n",
|
282 |
+
"\n",
|
283 |
+
"# Exibir as primeiras linhas da série diferenciada\n",
|
284 |
+
"display(series_frac_diff.head())\n",
|
285 |
+
"\n",
|
286 |
+
"# Definir o tamanho da amostra (5% dos dados)\n",
|
287 |
+
"sample_size = int(len(series_frac_diff) * 0.05) # Aumentado para 5%\n",
|
288 |
+
"series_frac_diff_sample = series_frac_diff.sample(n=sample_size, random_state=42).sort_index()\n",
|
289 |
+
"\n",
|
290 |
+
"# Realizar o teste ADF na amostra de 5%\n",
|
291 |
+
"adf_test(series_frac_diff_sample, title=f'Preço de Fechamento (price_close) - Diferenciação Fracionária d={d} (5% Amostra)')\n",
|
292 |
+
"\n",
|
293 |
+
"# Exibir as primeiras linhas da amostra diferenciada\n",
|
294 |
+
"display(series_frac_diff_sample.head())\n",
|
295 |
+
"\n",
|
296 |
+
"# Criar matriz de lags\n",
|
297 |
+
"n_lags = 5\n",
|
298 |
+
"X, y = create_lag_matrix(series_frac_diff, n_lags)\n",
|
299 |
+
"print(f\"Matriz de lags criada com forma: {X.shape}\")\n",
|
300 |
+
"print(f\"Variável alvo (y) criada com forma: {y.shape}\")\n",
|
301 |
+
"\n",
|
302 |
+
"# Aplicar PCA\n",
|
303 |
+
"scaler = StandardScaler()\n",
|
304 |
+
"X_scaled = scaler.fit_transform(X)\n",
|
305 |
+
"pca = PCA()\n",
|
306 |
+
"X_pca = pca.fit_transform(X_scaled)\n",
|
307 |
+
"cumulative_variance = np.cumsum(pca.explained_variance_ratio_)\n",
|
308 |
+
"print(\"Variância explicada acumulada:\", cumulative_variance)\n",
|
309 |
+
"\n",
|
310 |
+
"# Determinar número de componentes\n",
|
311 |
+
"n_components_var = np.argmax(cumulative_variance >= 0.95) + 1\n",
|
312 |
+
"print(f\"Número ótimo de componentes (95% da variância explicada): {n_components_var}\")\n",
|
313 |
+
"\n",
|
314 |
+
"# Reduzir dimensionalidade\n",
|
315 |
+
"n_components = n_components_var\n",
|
316 |
+
"pca_reduced = PCA(n_components=n_components)\n",
|
317 |
+
"X_reduced = pca_reduced.fit_transform(X_scaled)\n",
|
318 |
+
"print(f\"Forma da matriz após PCA: {X_reduced.shape}\")\n",
|
319 |
+
"\n",
|
320 |
+
"# Aplicação em modelagem\n",
|
321 |
+
"model = LinearRegression()\n",
|
322 |
+
"scores = cross_val_score(model, X_reduced, y, cv=5, scoring='neg_mean_squared_error')\n",
|
323 |
+
"print(f\"MSE médio (5-fold CV): {-np.mean(scores):.4f}\")\n",
|
324 |
+
"\n",
|
325 |
+
"# Plotar ACF e PACF\n",
|
326 |
+
"desired_lags = 40\n",
|
327 |
+
"sample_size = len(series_frac_diff_sample)\n",
|
328 |
+
"max_allowed_lags = sample_size // 2 - 1\n",
|
329 |
+
"adjusted_lags = min(desired_lags, max_allowed_lags)\n",
|
330 |
+
"\n",
|
331 |
+
"print(f\"Usando lags={adjusted_lags} para a amostra de tamanho {sample_size}\")\n",
|
332 |
+
"\n",
|
333 |
+
"fig, axes = plt.subplots(2, 2, figsize=(15, 10))\n",
|
334 |
+
"\n",
|
335 |
+
"# Série Completa\n",
|
336 |
+
"plot_acf(series_frac_diff, ax=axes[0, 0], lags=desired_lags, zero=False)\n",
|
337 |
+
"axes[0, 0].set_title(f'ACF - Série Completa (d={d})')\n",
|
338 |
+
"\n",
|
339 |
+
"plot_pacf(series_frac_diff, ax=axes[1, 0], lags=desired_lags, zero=False, method='ywm')\n",
|
340 |
+
"axes[1, 0].set_title(f'PACF - Série Completa (d={d})')\n",
|
341 |
+
"\n",
|
342 |
+
"# Amostra de 5%\n",
|
343 |
+
"plot_acf(series_frac_diff_sample, ax=axes[0, 1], lags=adjusted_lags, zero=False)\n",
|
344 |
+
"axes[0, 1].set_title(f'ACF - 5% Amostra (d={d})')\n",
|
345 |
+
"\n",
|
346 |
+
"plot_pacf(series_frac_diff_sample, ax=axes[1, 1], lags=adjusted_lags, zero=False, method='ywm')\n",
|
347 |
+
"axes[1, 1].set_title(f'PACF - 5% Amostra (d={d})')\n",
|
348 |
+
"\n",
|
349 |
+
"plt.tight_layout()\n",
|
350 |
+
"plt.show()\n",
|
351 |
+
"\n",
|
352 |
+
"# Aplicar a função autoregressiva\n",
|
353 |
+
"order = 1 # Defina a ordem do modelo autoregressivo\n",
|
354 |
+
"Y_pred, constant, params = auto_reg(order, series_frac_diff)\n",
|
355 |
+
"\n",
|
356 |
+
"# Visualizar a série prevista\n",
|
357 |
+
"plt.figure(figsize=(14,6))\n",
|
358 |
+
"plt.plot(series_frac_diff, label='Série Diferenciada Fracionária', alpha=0.5)\n",
|
359 |
+
"plt.plot(Y_pred, label='Série Prevista', alpha=0.7)\n",
|
360 |
+
"plt.legend()\n",
|
361 |
+
"plt.title('Comparação entre Série Diferenciada e Série Prevista')\n",
|
362 |
+
"plt.xlabel('Data')\n",
|
363 |
+
"plt.ylabel('Preço Diferenciado')\n",
|
364 |
+
"plt.show()\n"
|
365 |
+
]
|
366 |
+
},
|
367 |
+
{
|
368 |
+
"cell_type": "markdown",
|
369 |
+
"metadata": {},
|
370 |
+
"source": [
|
371 |
+
"# Resíduos"
|
372 |
+
]
|
373 |
+
},
|
374 |
+
{
|
375 |
+
"cell_type": "code",
|
376 |
+
"execution_count": null,
|
377 |
+
"metadata": {},
|
378 |
+
"outputs": [],
|
379 |
+
"source": [
|
380 |
+
"import pandas as pd\n",
|
381 |
+
"\n",
|
382 |
+
"# Converter y para pandas Series\n",
|
383 |
+
"y_series = pd.Series(y, name='y_true')\n",
|
384 |
+
"\n",
|
385 |
+
"# Verificar se Y_pred é um DataFrame com uma única coluna\n",
|
386 |
+
"if isinstance(Y_pred, pd.DataFrame):\n",
|
387 |
+
" if Y_pred.shape[1] == 1:\n",
|
388 |
+
" # Converter para Series\n",
|
389 |
+
" Y_pred_series = Y_pred.iloc[:, 0].reset_index(drop=True)\n",
|
390 |
+
" Y_pred_series.name = 'y_pred'\n",
|
391 |
+
" print(f\"Convertido Y_pred para Series com comprimento {len(Y_pred_series)}.\")\n",
|
392 |
+
" else:\n",
|
393 |
+
" raise ValueError(\"Y_pred possui mais de uma coluna. Por favor, selecione a coluna correta para as previsões.\")\n",
|
394 |
+
"elif isinstance(Y_pred, pd.Series):\n",
|
395 |
+
" Y_pred_series = Y_pred.reset_index(drop=True)\n",
|
396 |
+
" Y_pred_series.name = 'y_pred'\n",
|
397 |
+
" print(f\"Y_pred já é uma Series com comprimento {len(Y_pred_series)}.\")\n",
|
398 |
+
"else:\n",
|
399 |
+
" raise TypeError(\"Y_pred deve ser uma pandas Series ou DataFrame.\")\n",
|
400 |
+
"\n",
|
401 |
+
"# Verificar comprimentos\n",
|
402 |
+
"len_y = len(y_series)\n",
|
403 |
+
"len_Y_pred = len(Y_pred_series)\n",
|
404 |
+
"\n",
|
405 |
+
"print(f\"Comprimento de y_series: {len_y}\")\n",
|
406 |
+
"print(f\"Comprimento de Y_pred_series: {len_Y_pred}\")\n",
|
407 |
+
"\n",
|
408 |
+
"# Calcular a diferença de comprimento\n",
|
409 |
+
"diff = len_Y_pred - len_y\n",
|
410 |
+
"\n",
|
411 |
+
"if diff > 0:\n",
|
412 |
+
" print(f\"Y_pred_series é {diff} pontos maior que y_series. Truncando os últimos {diff} pontos de Y_pred_series.\")\n",
|
413 |
+
" Y_pred_aligned = Y_pred_series.iloc[:-diff].reset_index(drop=True)\n",
|
414 |
+
" y_aligned = y_series.reset_index(drop=True)\n",
|
415 |
+
"elif diff < 0:\n",
|
416 |
+
" print(f\"Y_pred_series é {abs(diff)} pontos menor que y_series. Truncando os últimos {abs(diff)} pontos de y_series.\")\n",
|
417 |
+
" y_aligned = y_series.iloc[:len_Y_pred].reset_index(drop=True)\n",
|
418 |
+
" Y_pred_aligned = Y_pred_series.reset_index(drop=True)\n",
|
419 |
+
"else:\n",
|
420 |
+
" print(\"y_series e Y_pred_series já estão alinhadas em comprimento.\")\n",
|
421 |
+
" y_aligned = y_series.reset_index(drop=True)\n",
|
422 |
+
" Y_pred_aligned = Y_pred_series.reset_index(drop=True)\n",
|
423 |
+
"\n",
|
424 |
+
"print(f\"Comprimento de y_aligned: {len(y_aligned)}\")\n",
|
425 |
+
"print(f\"Comprimento de Y_pred_aligned: {len(Y_pred_aligned)}\")\n",
|
426 |
+
"\n",
|
427 |
+
"\n",
|
428 |
+
"# Calcular os resíduos\n",
|
429 |
+
"residuals = y_aligned - Y_pred_aligned\n",
|
430 |
+
"\n",
|
431 |
+
"# Exibir as primeiras linhas dos resíduos\n",
|
432 |
+
"display(residuals.head())\n",
|
433 |
+
"\n",
|
434 |
+
"import matplotlib.pyplot as plt\n",
|
435 |
+
"import seaborn as sns\n",
|
436 |
+
"from scipy.stats import shapiro\n",
|
437 |
+
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
|
438 |
+
"\n",
|
439 |
+
"# Plotar os resíduos ao longo do tempo\n",
|
440 |
+
"plt.figure(figsize=(14,6))\n",
|
441 |
+
"plt.plot(residuals, label='Resíduos (y_true - Y_pred)')\n",
|
442 |
+
"plt.axhline(0, color='red', linestyle='--')\n",
|
443 |
+
"plt.legend()\n",
|
444 |
+
"plt.title('Resíduos da Previsão Autoregressiva')\n",
|
445 |
+
"plt.xlabel('Observações')\n",
|
446 |
+
"plt.ylabel('Resíduo')\n",
|
447 |
+
"plt.show()\n",
|
448 |
+
"\n",
|
449 |
+
"# Estatísticas descritivas dos resíduos\n",
|
450 |
+
"print(\"Estatísticas dos Resíduos:\")\n",
|
451 |
+
"print(residuals.describe())\n",
|
452 |
+
"\n",
|
453 |
+
"# Plotar histograma dos resíduos\n",
|
454 |
+
"plt.figure(figsize=(10,5))\n",
|
455 |
+
"sns.histplot(residuals, kde=True, bins=50)\n",
|
456 |
+
"plt.title('Distribuição dos Resíduos')\n",
|
457 |
+
"plt.xlabel('Resíduo')\n",
|
458 |
+
"plt.ylabel('Frequência')\n",
|
459 |
+
"plt.show()\n",
|
460 |
+
"\n",
|
461 |
+
"# Teste de normalidade (Shapiro-Wilk)\n",
|
462 |
+
"# Shapiro-Wilk tem limite de amostra (normalmente até 5000 observações)\n",
|
463 |
+
"sample_size = min(5000, len(residuals))\n",
|
464 |
+
"sample_residuals = residuals.sample(n=sample_size, random_state=42)\n",
|
465 |
+
"stat, p_value = shapiro(sample_residuals)\n",
|
466 |
+
"print(f\"Estatística Shapiro-Wilk: {stat:.4f}, p-valor: {p_value:.4f}\")\n",
|
467 |
+
"if p_value > 0.05:\n",
|
468 |
+
" print(\"Os resíduos parecem seguir uma distribuição normal (não rejeita H0).\")\n",
|
469 |
+
"else:\n",
|
470 |
+
" print(\"Os resíduos não seguem uma distribuição normal (rejeita H0).\")\n",
|
471 |
+
"\n",
|
472 |
+
"# Autocorrelação dos resíduos\n",
|
473 |
+
"fig, ax = plt.subplots(1, 2, figsize=(15,5))\n",
|
474 |
+
"plot_acf(residuals, ax=ax[0], lags=40, zero=False)\n",
|
475 |
+
"ax[0].set_title('ACF dos Resíduos')\n",
|
476 |
+
"plot_pacf(residuals, ax=ax[1], lags=40, zero=False, method='ywm')\n",
|
477 |
+
"ax[1].set_title('PACF dos Resíduos')\n",
|
478 |
+
"plt.show()\n",
|
479 |
+
"\n"
|
480 |
+
]
|
481 |
+
},
|
482 |
+
{
|
483 |
+
"cell_type": "markdown",
|
484 |
+
"metadata": {},
|
485 |
+
"source": [
|
486 |
+
"# Cusum Simétrico"
|
487 |
+
]
|
488 |
+
},
|
489 |
+
{
|
490 |
+
"cell_type": "code",
|
491 |
+
"execution_count": null,
|
492 |
+
"metadata": {},
|
493 |
+
"outputs": [],
|
494 |
+
"source": [
|
495 |
+
"import pandas as pd\n",
|
496 |
+
"import matplotlib.pyplot as plt\n",
|
497 |
+
"import numpy as np\n",
|
498 |
+
"\n",
|
499 |
+
"# Supondo que 'residuals' é um pandas Series obtido anteriormente\n",
|
500 |
+
"# Certifique-se de que os resíduos estão ordenados corretamente\n",
|
501 |
+
"\n",
|
502 |
+
"# Inicializar as colunas de CUSUM positivo e negativo\n",
|
503 |
+
"cusum_positive = [0]\n",
|
504 |
+
"cusum_negative = [0]\n",
|
505 |
+
"\n",
|
506 |
+
"# Iterar sobre os resíduos para calcular o CUSUM\n",
|
507 |
+
"for residual in residuals:\n",
|
508 |
+
" # CUSUM positivo: soma acumulativa das diferenças positivas\n",
|
509 |
+
" s_pos = max(0, cusum_positive[-1] + residual)\n",
|
510 |
+
" cusum_positive.append(s_pos)\n",
|
511 |
+
" \n",
|
512 |
+
" # CUSUM negativo: soma acumulativa das diferenças negativas\n",
|
513 |
+
" s_neg = min(0, cusum_negative[-1] + residual)\n",
|
514 |
+
" cusum_negative.append(s_neg)\n",
|
515 |
+
"\n",
|
516 |
+
"# Remover o primeiro elemento (inicialização)\n",
|
517 |
+
"cusum_positive = cusum_positive[1:]\n",
|
518 |
+
"cusum_negative = cusum_negative[1:]\n",
|
519 |
+
"\n",
|
520 |
+
"# Adicionar ao DataFrame para facilitar o manuseio\n",
|
521 |
+
"residuals_df = pd.DataFrame({\n",
|
522 |
+
" 'Residuals': residuals,\n",
|
523 |
+
" 'CUSUM_Positive': cusum_positive,\n",
|
524 |
+
" 'CUSUM_Negative': cusum_negative\n",
|
525 |
+
"}, index=residuals.index)\n",
|
526 |
+
"\n",
|
527 |
+
"# Exibir as primeiras linhas para verificação\n",
|
528 |
+
"display(residuals_df.head())\n"
|
529 |
+
]
|
530 |
+
},
|
531 |
+
{
|
532 |
+
"cell_type": "code",
|
533 |
+
"execution_count": null,
|
534 |
+
"metadata": {},
|
535 |
+
"outputs": [],
|
536 |
+
"source": [
|
537 |
+
"# Plotar CUSUM Positivo e Negativo\n",
|
538 |
+
"plt.figure(figsize=(14, 7))\n",
|
539 |
+
"plt.plot(residuals_df['CUSUM_Positive'], label='CUSUM Positivo', color='green')\n",
|
540 |
+
"plt.plot(residuals_df['CUSUM_Negative'], label='CUSUM Negativo', color='red')\n",
|
541 |
+
"plt.axhline(0, color='black', linestyle='--', linewidth=1)\n",
|
542 |
+
"plt.title('CUSUM dos Resíduos')\n",
|
543 |
+
"plt.xlabel('Observações')\n",
|
544 |
+
"plt.ylabel('Cumulative Sum')\n",
|
545 |
+
"plt.legend()\n",
|
546 |
+
"plt.show()\n"
|
547 |
+
]
|
548 |
+
},
|
549 |
+
{
|
550 |
+
"cell_type": "markdown",
|
551 |
+
"metadata": {},
|
552 |
+
"source": [
|
553 |
+
"# Indicador de Volatilidade"
|
554 |
+
]
|
555 |
+
},
|
556 |
+
{
|
557 |
+
"cell_type": "code",
|
558 |
+
"execution_count": null,
|
559 |
+
"metadata": {},
|
560 |
+
"outputs": [],
|
561 |
+
"source": [
|
562 |
+
"import pandas as pd\n",
|
563 |
+
"import numpy as np\n",
|
564 |
+
"import matplotlib.pyplot as plt\n",
|
565 |
+
"import seaborn as sns\n",
|
566 |
+
"from scipy.stats import shapiro\n",
|
567 |
+
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
|
568 |
+
"from sklearn.linear_model import LinearRegression\n",
|
569 |
+
"from sklearn.model_selection import cross_val_score\n",
|
570 |
+
"from sklearn.preprocessing import StandardScaler\n",
|
571 |
+
"from sklearn.decomposition import PCA\n",
|
572 |
+
"\n",
|
573 |
+
"# --- Parte anterior: Cálculo dos Resíduos ---\n",
|
574 |
+
"# Converter y para pandas Series (certifique-se de que 'y' está definido)\n",
|
575 |
+
"y_series = pd.Series(y, name='y_true')\n",
|
576 |
+
"\n",
|
577 |
+
"# Verificar se Y_pred é um DataFrame com uma única coluna\n",
|
578 |
+
"if isinstance(Y_pred, pd.DataFrame):\n",
|
579 |
+
" if Y_pred.shape[1] == 1:\n",
|
580 |
+
" # Converter para Series sem resetar o índice\n",
|
581 |
+
" Y_pred_series = Y_pred.iloc[:, 0]\n",
|
582 |
+
" Y_pred_series.name = 'y_pred'\n",
|
583 |
+
" print(f\"Convertido Y_pred para Series com comprimento {len(Y_pred_series)}.\")\n",
|
584 |
+
" else:\n",
|
585 |
+
" raise ValueError(\"Y_pred possui mais de uma coluna. Por favor, selecione a coluna correta para as previsões.\")\n",
|
586 |
+
"elif isinstance(Y_pred, pd.Series):\n",
|
587 |
+
" Y_pred_series = Y_pred\n",
|
588 |
+
" Y_pred_series.name = 'y_pred'\n",
|
589 |
+
" print(f\"Y_pred já é uma Series com comprimento {len(Y_pred_series)}.\")\n",
|
590 |
+
"else:\n",
|
591 |
+
" raise TypeError(\"Y_pred deve ser uma pandas Series ou DataFrame.\")\n",
|
592 |
+
"\n",
|
593 |
+
"# Verificar comprimentos\n",
|
594 |
+
"len_y = len(y_series)\n",
|
595 |
+
"len_Y_pred = len(Y_pred_series)\n",
|
596 |
+
"\n",
|
597 |
+
"print(f\"Comprimento de y_series: {len_y}\")\n",
|
598 |
+
"print(f\"Comprimento de Y_pred_series: {len_Y_pred}\")\n",
|
599 |
+
"\n",
|
600 |
+
"# Calcular a diferença de comprimento\n",
|
601 |
+
"diff = len_Y_pred - len_y\n",
|
602 |
+
"\n",
|
603 |
+
"if diff > 0:\n",
|
604 |
+
" print(f\"Y_pred_series é {diff} pontos maior que y_series. Truncando os últimos {diff} pontos de Y_pred_series.\")\n",
|
605 |
+
" Y_pred_aligned = Y_pred_series.iloc[:-diff]\n",
|
606 |
+
" y_aligned = y_series\n",
|
607 |
+
"elif diff < 0:\n",
|
608 |
+
" print(f\"Y_pred_series é {abs(diff)} pontos menor que y_series. Truncando os últimos {abs(diff)} pontos de y_series.\")\n",
|
609 |
+
" y_aligned = y_series.iloc[:len_Y_pred]\n",
|
610 |
+
" Y_pred_aligned = Y_pred_series\n",
|
611 |
+
"else:\n",
|
612 |
+
" print(\"y_series e Y_pred_series já estão alinhadas em comprimento.\")\n",
|
613 |
+
" y_aligned = y_series\n",
|
614 |
+
" Y_pred_aligned = Y_pred_series\n",
|
615 |
+
"\n",
|
616 |
+
"print(f\"Comprimento de y_aligned: {len(y_aligned)}\")\n",
|
617 |
+
"print(f\"Comprimento de Y_pred_aligned: {len(Y_pred_aligned)}\")\n",
|
618 |
+
"\n",
|
619 |
+
"# Calcular os resíduos\n",
|
620 |
+
"residuals = y_aligned - Y_pred_aligned\n",
|
621 |
+
"\n",
|
622 |
+
"# Exibir as primeiras linhas dos resíduos\n",
|
623 |
+
"display(residuals.head())\n",
|
624 |
+
"\n",
|
625 |
+
"# --- Implementação do Indicador de Volatilidade ---\n",
|
626 |
+
"# Definir os parâmetros para os indicadores\n",
|
627 |
+
"window_size_std = 20 # Período para Desvio Padrão Móvel\n",
|
628 |
+
"window_size_bb = 20 # Período para Bollinger Bands\n",
|
629 |
+
"num_std_dev_bb = 2 # Número de desvios padrão para Bollinger Bands\n",
|
630 |
+
"span_ewma = 20 # Span para EWMA\n",
|
631 |
+
"window_size_atr = 14 # Período para ATR\n",
|
632 |
+
"\n",
|
633 |
+
"# --- 1. Desvio Padrão Móvel ---\n",
|
634 |
+
"series['Rolling_STD'] = series['price_close'].rolling(window=window_size_std).std()\n",
|
635 |
+
"\n",
|
636 |
+
"# Verificar se a coluna foi criada\n",
|
637 |
+
"if 'Rolling_STD' in series.columns:\n",
|
638 |
+
" print(\"Desvio Padrão Móvel calculado com sucesso.\")\n",
|
639 |
+
"else:\n",
|
640 |
+
" print(\"Erro ao calcular o Desvio Padrão Móvel.\")\n",
|
641 |
+
"\n",
|
642 |
+
"# --- 2. Bollinger Bands ---\n",
|
643 |
+
"rolling_mean = series['price_close'].rolling(window=window_size_bb).mean()\n",
|
644 |
+
"rolling_std_bb = series['price_close'].rolling(window=window_size_bb).std()\n",
|
645 |
+
"\n",
|
646 |
+
"bollinger_upper = rolling_mean + (rolling_std_bb * num_std_dev_bb)\n",
|
647 |
+
"bollinger_lower = rolling_mean - (rolling_std_bb * num_std_dev_bb)\n",
|
648 |
+
"\n",
|
649 |
+
"series['Rolling_Mean'] = rolling_mean\n",
|
650 |
+
"series['Bollinger_Upper'] = bollinger_upper\n",
|
651 |
+
"series['Bollinger_Lower'] = bollinger_lower\n",
|
652 |
+
"\n",
|
653 |
+
"# Verificar se as colunas foram criadas\n",
|
654 |
+
"required_bb_columns = ['Rolling_Mean', 'Bollinger_Upper', 'Bollinger_Lower']\n",
|
655 |
+
"if all(col in series.columns for col in required_bb_columns):\n",
|
656 |
+
" print(\"Bollinger Bands calculadas com sucesso.\")\n",
|
657 |
+
"else:\n",
|
658 |
+
" missing_bb = [col for col in required_bb_columns if col not in series.columns]\n",
|
659 |
+
" print(f\"Erro ao calcular Bollinger Bands. Colunas faltantes: {missing_bb}\")\n",
|
660 |
+
"\n",
|
661 |
+
"# --- 3. EWMA Desvio Padrão ---\n",
|
662 |
+
"series['EWMA_STD'] = series['price_close'].ewm(span=span_ewma, adjust=False).std()\n",
|
663 |
+
"\n",
|
664 |
+
"if 'EWMA_STD' in series.columns:\n",
|
665 |
+
" print(\"EWMA Desvio Padrão calculado com sucesso.\")\n",
|
666 |
+
"else:\n",
|
667 |
+
" print(\"Erro ao calcular o EWMA Desvio Padrão.\")\n",
|
668 |
+
"\n",
|
669 |
+
"# --- 4. Average True Range (ATR) ---\n",
|
670 |
+
"required_atr_columns = ['price_high', 'price_low', 'price_close']\n",
|
671 |
+
"if all(col in series.columns for col in required_atr_columns):\n",
|
672 |
+
" high_low = series['price_high'] - series['price_low']\n",
|
673 |
+
" high_prev_close = (series['price_high'] - series['price_close'].shift()).abs()\n",
|
674 |
+
" low_prev_close = (series['price_low'] - series['price_close'].shift()).abs()\n",
|
675 |
+
" true_range = pd.concat([high_low, high_prev_close, low_prev_close], axis=1).max(axis=1)\n",
|
676 |
+
" series['ATR'] = true_range.rolling(window=window_size_atr).mean()\n",
|
677 |
+
" print(\"Average True Range (ATR) calculado com sucesso.\")\n",
|
678 |
+
"else:\n",
|
679 |
+
" missing_atr = [col for col in required_atr_columns if col not in series.columns]\n",
|
680 |
+
" print(f\"Não foi possível calcular o ATR. Colunas faltantes: {missing_atr}\")\n",
|
681 |
+
"\n",
|
682 |
+
"# --- 5. Visualizar os Indicadores de Volatilidade ---\n",
|
683 |
+
"\n",
|
684 |
+
"# a. Desvio Padrão Móvel\n",
|
685 |
+
"plt.figure(figsize=(14, 7))\n",
|
686 |
+
"plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
687 |
+
"plt.plot(series['Rolling_STD'], label=f'Desvio Padrão Móvel ({window_size_std} períodos)', color='orange')\n",
|
688 |
+
"plt.title('Desvio Padrão Móvel da Série de Preços')\n",
|
689 |
+
"plt.xlabel('Data')\n",
|
690 |
+
"plt.ylabel('Preço / Volatilidade')\n",
|
691 |
+
"plt.legend()\n",
|
692 |
+
"plt.show()\n",
|
693 |
+
"\n",
|
694 |
+
"# b. Bollinger Bands\n",
|
695 |
+
"plt.figure(figsize=(14, 7))\n",
|
696 |
+
"plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
697 |
+
"plt.plot(series['Rolling_Mean'], label=f'Média Móvel ({window_size_bb} períodos)', color='orange')\n",
|
698 |
+
"plt.plot(series['Bollinger_Upper'], label='Bollinger Upper', color='green')\n",
|
699 |
+
"plt.plot(series['Bollinger_Lower'], label='Bollinger Lower', color='red')\n",
|
700 |
+
"plt.fill_between(series.index, series['Bollinger_Lower'], series['Bollinger_Upper'], color='lightgray')\n",
|
701 |
+
"plt.title('Bollinger Bands da Série de Preços')\n",
|
702 |
+
"plt.xlabel('Data')\n",
|
703 |
+
"plt.ylabel('Preço')\n",
|
704 |
+
"plt.legend()\n",
|
705 |
+
"plt.show()\n",
|
706 |
+
"\n",
|
707 |
+
"# c. EWMA Desvio Padrão\n",
|
708 |
+
"plt.figure(figsize=(14, 7))\n",
|
709 |
+
"plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
710 |
+
"plt.plot(series['EWMA_STD'], label=f'EWMA Desvio Padrão (span={span_ewma})', color='purple')\n",
|
711 |
+
"plt.title('EWMA Desvio Padrão da Série de Preços')\n",
|
712 |
+
"plt.xlabel('Data')\n",
|
713 |
+
"plt.ylabel('Preço / Volatilidade')\n",
|
714 |
+
"plt.legend()\n",
|
715 |
+
"plt.show()\n",
|
716 |
+
"\n",
|
717 |
+
"# d. ATR (Opcional)\n",
|
718 |
+
"if 'ATR' in series.columns:\n",
|
719 |
+
" plt.figure(figsize=(14, 7))\n",
|
720 |
+
" plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
721 |
+
" plt.plot(series['ATR'], label=f'ATR ({window_size_atr} períodos)', color='magenta')\n",
|
722 |
+
" plt.title('Average True Range (ATR) da Série de Preços')\n",
|
723 |
+
" plt.xlabel('Data')\n",
|
724 |
+
" plt.ylabel('Preço / ATR')\n",
|
725 |
+
" plt.legend()\n",
|
726 |
+
" plt.show()\n",
|
727 |
+
"\n",
|
728 |
+
"# --- 6. Integrar os Indicadores no Modelo de Regressão ---\n",
|
729 |
+
"\n",
|
730 |
+
"# a. Selecionar as features disponíveis\n",
|
731 |
+
"features = ['Rolling_STD', 'Rolling_Mean', 'Bollinger_Upper', 'Bollinger_Lower', 'EWMA_STD']\n",
|
732 |
+
"\n",
|
733 |
+
"# Adicionar 'ATR' se estiver disponível\n",
|
734 |
+
"if 'ATR' in series.columns:\n",
|
735 |
+
" features.append('ATR')\n",
|
736 |
+
"\n",
|
737 |
+
"# Verificar quais features estão presentes no DataFrame\n",
|
738 |
+
"available_features = [feature for feature in features if feature in series.columns]\n",
|
739 |
+
"missing_features = [feature for feature in features if feature not in series.columns]\n",
|
740 |
+
"\n",
|
741 |
+
"if missing_features:\n",
|
742 |
+
" print(f\"As seguintes features estão faltando e serão ignoradas: {missing_features}\")\n",
|
743 |
+
"\n",
|
744 |
+
"print(f\"Features disponíveis para modelagem: {available_features}\")\n",
|
745 |
+
"\n",
|
746 |
+
"# b. Preparar os Dados para o Modelo\n",
|
747 |
+
"X_features = series[available_features].dropna()\n",
|
748 |
+
"print(f\"\\nNúmero de linhas em X_features após dropna(): {len(X_features)}\")\n",
|
749 |
+
"\n",
|
750 |
+
"# Encontrar os índices comuns entre X_features e y_aligned\n",
|
751 |
+
"common_indices = X_features.index.intersection(y_aligned.index)\n",
|
752 |
+
"print(f\"Número de índices comuns: {len(common_indices)}\")\n",
|
753 |
+
"\n",
|
754 |
+
"# Selecionar apenas os índices comuns\n",
|
755 |
+
"X_features_aligned = X_features.loc[common_indices]\n",
|
756 |
+
"y_features_aligned = y_aligned.loc[common_indices]\n",
|
757 |
+
"\n",
|
758 |
+
"print(f\"Comprimento de X_features_aligned: {len(X_features_aligned)}\")\n",
|
759 |
+
"print(f\"Comprimento de y_features_aligned: {len(y_features_aligned)}\")\n",
|
760 |
+
"\n",
|
761 |
+
"# Verificar se os índices estão alinhados\n",
|
762 |
+
"if len(X_features_aligned) != len(y_features_aligned):\n",
|
763 |
+
" print(\"Os dados das features e da variável alvo ainda não estão alinhados.\")\n",
|
764 |
+
"else:\n",
|
765 |
+
" print(\"X_features e y_features estão alinhados corretamente.\")\n",
|
766 |
+
"\n",
|
767 |
+
"# c. Preprocessamento e PCA\n",
|
768 |
+
"scaler = StandardScaler()\n",
|
769 |
+
"X_scaled = scaler.fit_transform(X_features_aligned)\n",
|
770 |
+
"\n",
|
771 |
+
"pca = PCA()\n",
|
772 |
+
"X_pca = pca.fit_transform(X_scaled)\n",
|
773 |
+
"cumulative_variance = np.cumsum(pca.explained_variance_ratio_)\n",
|
774 |
+
"print(\"\\nVariância explicada acumulada:\", cumulative_variance)\n",
|
775 |
+
"\n",
|
776 |
+
"# Determinar o número de componentes para 95% da variância\n",
|
777 |
+
"n_components_var = np.argmax(cumulative_variance >= 0.95) + 1\n",
|
778 |
+
"print(f\"Número ótimo de componentes (95% da variância explicada): {n_components_var}\")\n",
|
779 |
+
"\n",
|
780 |
+
"# Reduzir a dimensionalidade\n",
|
781 |
+
"pca_reduced = PCA(n_components=n_components_var)\n",
|
782 |
+
"X_reduced = pca_reduced.fit_transform(X_scaled)\n",
|
783 |
+
"print(f\"Forma da matriz após PCA: {X_reduced.shape}\")\n",
|
784 |
+
"\n",
|
785 |
+
"# d. Modelagem com Regressão Linear\n",
|
786 |
+
"model = LinearRegression()\n",
|
787 |
+
"scores = cross_val_score(model, X_reduced, y_features_aligned, cv=5, scoring='neg_mean_squared_error')\n",
|
788 |
+
"mse_mean = -np.mean(scores)\n",
|
789 |
+
"print(f\"MSE médio (5-fold CV): {mse_mean:.4f}\")\n"
|
790 |
+
]
|
791 |
+
},
|
792 |
+
{
|
793 |
+
"cell_type": "markdown",
|
794 |
+
"metadata": {},
|
795 |
+
"source": [
|
796 |
+
"# Política das 3 Barreiras"
|
797 |
+
]
|
798 |
+
},
|
799 |
+
{
|
800 |
+
"cell_type": "code",
|
801 |
+
"execution_count": null,
|
802 |
+
"metadata": {},
|
803 |
+
"outputs": [],
|
804 |
+
"source": [
|
805 |
+
"import pandas as pd\n",
|
806 |
+
"import numpy as np\n",
|
807 |
+
"import matplotlib.pyplot as plt\n",
|
808 |
+
"import seaborn as sns\n",
|
809 |
+
"from scipy.stats import shapiro\n",
|
810 |
+
"from statsmodels.graphics.tsaplots import plot_acf, plot_pacf\n",
|
811 |
+
"from sklearn.linear_model import LinearRegression\n",
|
812 |
+
"from sklearn.model_selection import cross_val_score\n",
|
813 |
+
"from sklearn.preprocessing import StandardScaler\n",
|
814 |
+
"from sklearn.decomposition import PCA\n",
|
815 |
+
"\n",
|
816 |
+
"# --- Parte anterior: Cálculo dos Resíduos ---\n",
|
817 |
+
"# Converter y para pandas Series (certifique-se de que 'y' está definido)\n",
|
818 |
+
"y_series = pd.Series(y, name='y_true')\n",
|
819 |
+
"\n",
|
820 |
+
"# Verificar se Y_pred é um DataFrame com uma única coluna\n",
|
821 |
+
"if isinstance(Y_pred, pd.DataFrame):\n",
|
822 |
+
" if Y_pred.shape[1] == 1:\n",
|
823 |
+
" # Converter para Series sem resetar o índice\n",
|
824 |
+
" Y_pred_series = Y_pred.iloc[:, 0]\n",
|
825 |
+
" Y_pred_series.name = 'y_pred'\n",
|
826 |
+
" print(f\"Convertido Y_pred para Series com comprimento {len(Y_pred_series)}.\")\n",
|
827 |
+
" else:\n",
|
828 |
+
" raise ValueError(\"Y_pred possui mais de uma coluna. Por favor, selecione a coluna correta para as previsões.\")\n",
|
829 |
+
"elif isinstance(Y_pred, pd.Series):\n",
|
830 |
+
" Y_pred_series = Y_pred\n",
|
831 |
+
" Y_pred_series.name = 'y_pred'\n",
|
832 |
+
" print(f\"Y_pred já é uma Series com comprimento {len(Y_pred_series)}.\")\n",
|
833 |
+
"else:\n",
|
834 |
+
" raise TypeError(\"Y_pred deve ser uma pandas Series ou DataFrame.\")\n",
|
835 |
+
"\n",
|
836 |
+
"# Verificar comprimentos\n",
|
837 |
+
"len_y = len(y_series)\n",
|
838 |
+
"len_Y_pred = len(Y_pred_series)\n",
|
839 |
+
"\n",
|
840 |
+
"print(f\"Comprimento de y_series: {len_y}\")\n",
|
841 |
+
"print(f\"Comprimento de Y_pred_series: {len_Y_pred}\")\n",
|
842 |
+
"\n",
|
843 |
+
"# Calcular a diferença de comprimento\n",
|
844 |
+
"diff = len_Y_pred - len_y\n",
|
845 |
+
"\n",
|
846 |
+
"if diff > 0:\n",
|
847 |
+
" print(f\"Y_pred_series é {diff} pontos maior que y_series. Truncando os últimos {diff} pontos de Y_pred_series.\")\n",
|
848 |
+
" Y_pred_aligned = Y_pred_series.iloc[:-diff]\n",
|
849 |
+
" y_aligned = y_series\n",
|
850 |
+
"elif diff < 0:\n",
|
851 |
+
" print(f\"Y_pred_series é {abs(diff)} pontos menor que y_series. Truncando os últimos {abs(diff)} pontos de y_series.\")\n",
|
852 |
+
" y_aligned = y_series.iloc[:len_Y_pred]\n",
|
853 |
+
" Y_pred_aligned = Y_pred_series\n",
|
854 |
+
"else:\n",
|
855 |
+
" print(\"y_series e Y_pred_series já estão alinhadas em comprimento.\")\n",
|
856 |
+
" y_aligned = y_series\n",
|
857 |
+
" Y_pred_aligned = Y_pred_series\n",
|
858 |
+
"\n",
|
859 |
+
"print(f\"Comprimento de y_aligned: {len(y_aligned)}\")\n",
|
860 |
+
"print(f\"Comprimento de Y_pred_aligned: {len(Y_pred_aligned)}\")\n",
|
861 |
+
"\n",
|
862 |
+
"# Calcular os resíduos\n",
|
863 |
+
"residuals = y_aligned - Y_pred_aligned\n",
|
864 |
+
"\n",
|
865 |
+
"# Adicionar os resíduos ao DataFrame 'series'\n",
|
866 |
+
"series['Residuals'] = residuals\n",
|
867 |
+
"\n",
|
868 |
+
"# Verificar se todos os índices de residuals estão em series\n",
|
869 |
+
"missing_indices = residuals.index.difference(series.index)\n",
|
870 |
+
"if not missing_indices.empty:\n",
|
871 |
+
" print(f\"\\nÍndices faltantes em 'series' que estão em 'residuals': {missing_indices.tolist()}\")\n",
|
872 |
+
" # Remover esses índices de residuals\n",
|
873 |
+
" residuals = residuals.drop(index=missing_indices)\n",
|
874 |
+
" # Atualizar o DataFrame 'series' para refletir a remoção\n",
|
875 |
+
" series = series.drop(index=missing_indices)\n",
|
876 |
+
" print(f\"Residuals após remoção dos índices faltantes: {len(residuals)}\")\n",
|
877 |
+
"else:\n",
|
878 |
+
" print(\"\\nTodos os índices de 'residuals' estão presentes em 'series'.\")\n",
|
879 |
+
"\n",
|
880 |
+
"# --- Implementação do Indicador de Volatilidade ---\n",
|
881 |
+
"# Definir os parâmetros para os indicadores\n",
|
882 |
+
"window_size_std = 20 # Período para Desvio Padrão Móvel\n",
|
883 |
+
"window_size_bb = 20 # Período para Bollinger Bands\n",
|
884 |
+
"num_std_dev_bb = 2 # Número de desvios padrão para Bollinger Bands\n",
|
885 |
+
"span_ewma = 20 # Span para EWMA\n",
|
886 |
+
"window_size_atr = 14 # Período para ATR\n",
|
887 |
+
"\n",
|
888 |
+
"# --- 1. Desvio Padrão Móvel ---\n",
|
889 |
+
"series['Rolling_STD'] = series['price_close'].rolling(window=window_size_std).std()\n",
|
890 |
+
"\n",
|
891 |
+
"# Verificar se a coluna foi criada\n",
|
892 |
+
"if 'Rolling_STD' in series.columns:\n",
|
893 |
+
" print(\"Desvio Padrão Móvel calculado com sucesso.\")\n",
|
894 |
+
"else:\n",
|
895 |
+
" print(\"Erro ao calcular o Desvio Padrão Móvel.\")\n",
|
896 |
+
"\n",
|
897 |
+
"# --- 2. Bollinger Bands ---\n",
|
898 |
+
"rolling_mean = series['price_close'].rolling(window=window_size_bb).mean()\n",
|
899 |
+
"rolling_std_bb = series['price_close'].rolling(window=window_size_bb).std()\n",
|
900 |
+
"\n",
|
901 |
+
"bollinger_upper = rolling_mean + (rolling_std_bb * num_std_dev_bb)\n",
|
902 |
+
"bollinger_lower = rolling_mean - (rolling_std_bb * num_std_dev_bb)\n",
|
903 |
+
"\n",
|
904 |
+
"series['Rolling_Mean'] = rolling_mean\n",
|
905 |
+
"series['Bollinger_Upper'] = bollinger_upper\n",
|
906 |
+
"series['Bollinger_Lower'] = bollinger_lower\n",
|
907 |
+
"\n",
|
908 |
+
"# Verificar se as colunas foram criadas\n",
|
909 |
+
"required_bb_columns = ['Rolling_Mean', 'Bollinger_Upper', 'Bollinger_Lower']\n",
|
910 |
+
"if all(col in series.columns for col in required_bb_columns):\n",
|
911 |
+
" print(\"Bollinger Bands calculadas com sucesso.\")\n",
|
912 |
+
"else:\n",
|
913 |
+
" missing_bb = [col for col in required_bb_columns if col not in series.columns]\n",
|
914 |
+
" print(f\"Erro ao calcular Bollinger Bands. Colunas faltantes: {missing_bb}\")\n",
|
915 |
+
"\n",
|
916 |
+
"# --- 3. EWMA Desvio Padrão ---\n",
|
917 |
+
"series['EWMA_STD'] = series['price_close'].ewm(span=span_ewma, adjust=False).std()\n",
|
918 |
+
"\n",
|
919 |
+
"if 'EWMA_STD' in series.columns:\n",
|
920 |
+
" print(\"EWMA Desvio Padrão calculado com sucesso.\")\n",
|
921 |
+
"else:\n",
|
922 |
+
" print(\"Erro ao calcular o EWMA Desvio Padrão.\")\n",
|
923 |
+
"\n",
|
924 |
+
"# --- 4. Average True Range (ATR) ---\n",
|
925 |
+
"required_atr_columns = ['price_high', 'price_low', 'price_close']\n",
|
926 |
+
"if all(col in series.columns for col in required_atr_columns):\n",
|
927 |
+
" high_low = series['price_high'] - series['price_low']\n",
|
928 |
+
" high_prev_close = (series['price_high'] - series['price_close'].shift()).abs()\n",
|
929 |
+
" low_prev_close = (series['price_low'] - series['price_close'].shift()).abs()\n",
|
930 |
+
" true_range = pd.concat([high_low, high_prev_close, low_prev_close], axis=1).max(axis=1)\n",
|
931 |
+
" series['ATR'] = true_range.rolling(window=window_size_atr).mean()\n",
|
932 |
+
" print(\"Average True Range (ATR) calculado com sucesso.\")\n",
|
933 |
+
"else:\n",
|
934 |
+
" missing_atr = [col for col in required_atr_columns if col not in series.columns]\n",
|
935 |
+
" print(f\"Não foi possível calcular o ATR. Colunas faltantes: {missing_atr}\")\n",
|
936 |
+
"\n",
|
937 |
+
"# --- 5. Visualizar os Indicadores de Volatilidade ---\n",
|
938 |
+
"# a. Desvio Padrão Móvel\n",
|
939 |
+
"plt.figure(figsize=(14, 7))\n",
|
940 |
+
"plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
941 |
+
"plt.plot(series['Rolling_STD'], label=f'Desvio Padrão Móvel ({window_size_std} períodos)', color='orange')\n",
|
942 |
+
"plt.title('Desvio Padrão Móvel da Série de Preços')\n",
|
943 |
+
"plt.xlabel('Data')\n",
|
944 |
+
"plt.ylabel('Preço / Volatilidade')\n",
|
945 |
+
"plt.legend()\n",
|
946 |
+
"plt.show()\n",
|
947 |
+
"\n",
|
948 |
+
"# b. Bollinger Bands\n",
|
949 |
+
"plt.figure(figsize=(14, 7))\n",
|
950 |
+
"plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
951 |
+
"plt.plot(series['Rolling_Mean'], label=f'Média Móvel ({window_size_bb} períodos)', color='orange')\n",
|
952 |
+
"plt.plot(series['Bollinger_Upper'], label='Bollinger Upper', color='green')\n",
|
953 |
+
"plt.plot(series['Bollinger_Lower'], label='Bollinger Lower', color='red')\n",
|
954 |
+
"plt.fill_between(series.index, series['Bollinger_Lower'], series['Bollinger_Upper'], color='lightgray')\n",
|
955 |
+
"plt.title('Bollinger Bands da Série de Preços')\n",
|
956 |
+
"plt.xlabel('Data')\n",
|
957 |
+
"plt.ylabel('Preço')\n",
|
958 |
+
"plt.legend()\n",
|
959 |
+
"plt.show()\n",
|
960 |
+
"\n",
|
961 |
+
"# c. EWMA Desvio Padrão\n",
|
962 |
+
"plt.figure(figsize=(14, 7))\n",
|
963 |
+
"plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
964 |
+
"plt.plot(series['EWMA_STD'], label=f'EWMA Desvio Padrão (span={span_ewma})', color='purple')\n",
|
965 |
+
"plt.title('EWMA Desvio Padrão da Série de Preços')\n",
|
966 |
+
"plt.xlabel('Data')\n",
|
967 |
+
"plt.ylabel('Preço / Volatilidade')\n",
|
968 |
+
"plt.legend()\n",
|
969 |
+
"plt.show()\n",
|
970 |
+
"\n",
|
971 |
+
"# d. ATR (Opcional)\n",
|
972 |
+
"if 'ATR' in series.columns:\n",
|
973 |
+
" plt.figure(figsize=(14, 7))\n",
|
974 |
+
" plt.plot(series['price_close'], label='Preço de Fechamento', color='blue')\n",
|
975 |
+
" plt.plot(series['ATR'], label=f'ATR ({window_size_atr} períodos)', color='magenta')\n",
|
976 |
+
" plt.title('Average True Range (ATR) da Série de Preços')\n",
|
977 |
+
" plt.xlabel('Data')\n",
|
978 |
+
" plt.ylabel('Preço / ATR')\n",
|
979 |
+
" plt.legend()\n",
|
980 |
+
" plt.show()\n",
|
981 |
+
"\n",
|
982 |
+
"# --- 6. Calcular o CUSUM dos Resíduos ---\n",
|
983 |
+
"# Parâmetros para o CUSUM\n",
|
984 |
+
"k = 0.5 # Tolerância, ajuste conforme necessário\n",
|
985 |
+
"\n",
|
986 |
+
"# Inicializar as colunas de CUSUM positivo e negativo como float\n",
|
987 |
+
"series['CUSUM_Positive'] = 0.0\n",
|
988 |
+
"series['CUSUM_Negative'] = 0.0\n",
|
989 |
+
"\n",
|
990 |
+
"# Calcular o CUSUM usando iterrows()\n",
|
991 |
+
"for i in range(1, len(series)):\n",
|
992 |
+
" current_index = series.index[i]\n",
|
993 |
+
" previous_index = series.index[i-1]\n",
|
994 |
+
" \n",
|
995 |
+
" try:\n",
|
996 |
+
" # Calcular CUSUM Positivo\n",
|
997 |
+
" series.at[current_index, 'CUSUM_Positive'] = max(\n",
|
998 |
+
" 0.0, \n",
|
999 |
+
" series.at[previous_index, 'CUSUM_Positive'] + residuals.loc[current_index] - k\n",
|
1000 |
+
" )\n",
|
1001 |
+
" \n",
|
1002 |
+
" # Calcular CUSUM Negativo\n",
|
1003 |
+
" series.at[current_index, 'CUSUM_Negative'] = min(\n",
|
1004 |
+
" 0.0, \n",
|
1005 |
+
" series.at[previous_index, 'CUSUM_Negative'] + residuals.loc[current_index] + k\n",
|
1006 |
+
" )\n",
|
1007 |
+
" except KeyError as e:\n",
|
1008 |
+
" print(f\"Erro ao acessar o índice {current_index}: {e}\")\n",
|
1009 |
+
" except Exception as e:\n",
|
1010 |
+
" print(f\"Erro inesperado ao calcular CUSUM para o índice {current_index}: {e}\")\n",
|
1011 |
+
"\n",
|
1012 |
+
"# --- 7. Definir as Barreiras ---\n",
|
1013 |
+
"# Definir os múltiplos para as barreiras\n",
|
1014 |
+
"barrier_levels = {\n",
|
1015 |
+
" 'alert': 1, # 1x a volatilidade\n",
|
1016 |
+
" 'action': 2, # 2x a volatilidade\n",
|
1017 |
+
" 'critical': 3 # 3x a volatilidade\n",
|
1018 |
+
"}\n",
|
1019 |
+
"\n",
|
1020 |
+
"# Calcular as barreiras positivas e negativas\n",
|
1021 |
+
"for level, multiplier in barrier_levels.items():\n",
|
1022 |
+
" series[f'Barreira_Pos_{level}'] = series['Rolling_STD'] * multiplier\n",
|
1023 |
+
" series[f'Barreira_Neg_{level}'] = -series['Rolling_STD'] * multiplier\n",
|
1024 |
+
"\n",
|
1025 |
+
"# --- 8. Aplicar a Política das 3 Barreiras ---\n",
|
1026 |
+
"# Inicializar uma coluna para o gatilho\n",
|
1027 |
+
"series['Gatilho'] = None\n",
|
1028 |
+
"\n",
|
1029 |
+
"# Função para aplicar a política das 3 barreiras\n",
|
1030 |
+
"def aplicar_politica(row):\n",
|
1031 |
+
" # CUSUM Positivo\n",
|
1032 |
+
" if row['CUSUM_Positive'] > row['Barreira_Pos_alert']:\n",
|
1033 |
+
" if row['CUSUM_Positive'] > row['Barreira_Pos_critical']:\n",
|
1034 |
+
" return 'Crítica Positiva'\n",
|
1035 |
+
" elif row['CUSUM_Positive'] > row['Barreira_Pos_action']:\n",
|
1036 |
+
" return 'Ação Positiva'\n",
|
1037 |
+
" else:\n",
|
1038 |
+
" return 'Alerta Positivo'\n",
|
1039 |
+
" # CUSUM Negativo\n",
|
1040 |
+
" elif row['CUSUM_Negative'] < row['Barreira_Neg_alert']:\n",
|
1041 |
+
" if row['CUSUM_Negative'] < row['Barreira_Neg_critical']:\n",
|
1042 |
+
" return 'Crítica Negativa'\n",
|
1043 |
+
" elif row['CUSUM_Negative'] < row['Barreira_Neg_action']:\n",
|
1044 |
+
" return 'Ação Negativa'\n",
|
1045 |
+
" else:\n",
|
1046 |
+
" return 'Alerta Negativo'\n",
|
1047 |
+
" else:\n",
|
1048 |
+
" return None\n",
|
1049 |
+
"\n",
|
1050 |
+
"# Aplicar a política das 3 barreiras\n",
|
1051 |
+
"series['Gatilho'] = series.apply(aplicar_politica, axis=1)\n",
|
1052 |
+
"\n",
|
1053 |
+
"# --- 9. Visualizar os Gatilhos ---\n",
|
1054 |
+
"# a. CUSUM Positivo com Barreiras e Gatilhos\n",
|
1055 |
+
"plt.figure(figsize=(14, 7))\n",
|
1056 |
+
"plt.plot(series['CUSUM_Positive'], label='CUSUM Positivo', color='green')\n",
|
1057 |
+
"plt.plot(series['Barreira_Pos_alert'], label='Barreira Positiva - Alerta', linestyle='--', color='orange')\n",
|
1058 |
+
"plt.plot(series['Barreira_Pos_action'], label='Barreira Positiva - Ação', linestyle='--', color='red')\n",
|
1059 |
+
"plt.plot(series['Barreira_Pos_critical'], label='Barreira Positiva - Crítica', linestyle='--', color='purple')\n",
|
1060 |
+
"\n",
|
1061 |
+
"# Marcadores para os gatilhos positivos\n",
|
1062 |
+
"gatilhos_pos = series[series['Gatilho'].isin(['Alerta Positivo', 'Ação Positiva', 'Crítica Positiva'])]\n",
|
1063 |
+
"plt.scatter(\n",
|
1064 |
+
" gatilhos_pos.index, \n",
|
1065 |
+
" gatilhos_pos['CUSUM_Positive'], \n",
|
1066 |
+
" c=gatilhos_pos['Gatilho'].map({\n",
|
1067 |
+
" 'Alerta Positivo': 'yellow', \n",
|
1068 |
+
" 'Ação Positiva': 'orange', \n",
|
1069 |
+
" 'Crítica Positiva': 'red'\n",
|
1070 |
+
" }), \n",
|
1071 |
+
" label='Gatilho Positivo', \n",
|
1072 |
+
" marker='o'\n",
|
1073 |
+
")\n",
|
1074 |
+
"\n",
|
1075 |
+
"plt.axhline(0, color='black', linestyle='--', linewidth=1)\n",
|
1076 |
+
"plt.title('CUSUM Positivo com Barreiras e Gatilhos')\n",
|
1077 |
+
"plt.xlabel('Data')\n",
|
1078 |
+
"plt.ylabel('Cumulative Sum')\n",
|
1079 |
+
"plt.legend()\n",
|
1080 |
+
"plt.show()\n",
|
1081 |
+
"\n",
|
1082 |
+
"# b. CUSUM Negativo com Barreiras e Gatilhos\n",
|
1083 |
+
"plt.figure(figsize=(14, 7))\n",
|
1084 |
+
"plt.plot(series['CUSUM_Negative'], label='CUSUM Negativo', color='red')\n",
|
1085 |
+
"plt.plot(series['Barreira_Neg_alert'], label='Barreira Negativa - Alerta', linestyle='--', color='orange')\n",
|
1086 |
+
"plt.plot(series['Barreira_Neg_action'], label='Barreira Negativa - Ação', linestyle='--', color='blue')\n",
|
1087 |
+
"plt.plot(series['Barreira_Neg_critical'], label='Barreira Negativa - Crítica', linestyle='--', color='purple')\n",
|
1088 |
+
"\n",
|
1089 |
+
"# Marcadores para os gatilhos negativos\n",
|
1090 |
+
"gatilhos_neg = series[series['Gatilho'].isin(['Alerta Negativo', 'Ação Negativa', 'Crítica Negativa'])]\n",
|
1091 |
+
"plt.scatter(\n",
|
1092 |
+
" gatilhos_neg.index, \n",
|
1093 |
+
" gatilhos_neg['CUSUM_Negative'], \n",
|
1094 |
+
" c=gatilhos_neg['Gatilho'].map({\n",
|
1095 |
+
" 'Alerta Negativo': 'yellow', \n",
|
1096 |
+
" 'Ação Negativa': 'blue', \n",
|
1097 |
+
" 'Crítica Negativa': 'purple'\n",
|
1098 |
+
" }), \n",
|
1099 |
+
" label='Gatilho Negativo', \n",
|
1100 |
+
" marker='o'\n",
|
1101 |
+
")\n",
|
1102 |
+
"\n",
|
1103 |
+
"plt.axhline(0, color='black', linestyle='--', linewidth=1)\n",
|
1104 |
+
"plt.title('CUSUM Negativo com Barreiras e Gatilhos')\n",
|
1105 |
+
"plt.xlabel('Data')\n",
|
1106 |
+
"plt.ylabel('Cumulative Sum')\n",
|
1107 |
+
"plt.legend()\n",
|
1108 |
+
"plt.show()\n",
|
1109 |
+
"\n",
|
1110 |
+
"# --- 10. Exibir os Eventos de Gatilho ---\n",
|
1111 |
+
"# Filtrar os eventos de gatilho\n",
|
1112 |
+
"eventos_gatilho = series[series['Gatilho'].notnull()][['Gatilho']]\n",
|
1113 |
+
"\n",
|
1114 |
+
"print(\"Eventos de Gatilho Detectados:\")\n",
|
1115 |
+
"display(eventos_gatilho)\n"
|
1116 |
+
]
|
1117 |
+
}
|
1118 |
+
],
|
1119 |
+
"metadata": {
|
1120 |
+
"kernelspec": {
|
1121 |
+
"display_name": "base",
|
1122 |
+
"language": "python",
|
1123 |
+
"name": "python3"
|
1124 |
+
},
|
1125 |
+
"language_info": {
|
1126 |
+
"codemirror_mode": {
|
1127 |
+
"name": "ipython",
|
1128 |
+
"version": 3
|
1129 |
+
},
|
1130 |
+
"file_extension": ".py",
|
1131 |
+
"mimetype": "text/x-python",
|
1132 |
+
"name": "python",
|
1133 |
+
"nbconvert_exporter": "python",
|
1134 |
+
"pygments_lexer": "ipython3",
|
1135 |
+
"version": "3.12.4"
|
1136 |
+
}
|
1137 |
+
},
|
1138 |
+
"nbformat": 4,
|
1139 |
+
"nbformat_minor": 2
|
1140 |
+
}
|
src/utils.py
ADDED
@@ -0,0 +1,372 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from loguru import logger
|
2 |
+
import pandas as pd
|
3 |
+
from dask import delayed
|
4 |
+
import dask.dataframe as dd
|
5 |
+
from dask.diagnostics import ProgressBar
|
6 |
+
import os
|
7 |
+
import traceback
|
8 |
+
import numpy as np
|
9 |
+
|
10 |
+
logger.remove()
|
11 |
+
logger.add(lambda msg: print(msg, end=""), level="INFO")
|
12 |
+
|
13 |
+
def calculate_dollar_directions(df):
|
14 |
+
try:
|
15 |
+
logger.info("Calculando 'trade_dollar', 'price_change' e 'dollar_direction'")
|
16 |
+
df['trade_dollar'] = df['price'] * df['qty']
|
17 |
+
df['price_change'] = df['price'].diff().fillna(0)
|
18 |
+
df['dollar_direction'] = df['price_change'].apply(
|
19 |
+
lambda x: 1 if x > 0 else (-1 if x < 0 else 0)
|
20 |
+
).astype(int)
|
21 |
+
logger.info("'dollar_directions' calculadas com sucesso")
|
22 |
+
return df
|
23 |
+
except KeyError as e:
|
24 |
+
logger.error(f"Erro ao calcular 'dollar_directions': {e}")
|
25 |
+
return df
|
26 |
+
except Exception as e:
|
27 |
+
logger.error(f"Erro inesperado ao calcular 'dollar_directions': {e}")
|
28 |
+
return df
|
29 |
+
|
30 |
+
def calculate_dollar_imbalance(df):
|
31 |
+
try:
|
32 |
+
logger.info("Calculando 'trade_dollar', 'dollar_side' e 'dollar_imbalance'")
|
33 |
+
df['trade_dollar'] = df['price'] * df['qty']
|
34 |
+
df['dollar_side'] = df['isBuyerMaker'].map({True: -1, False: 1})
|
35 |
+
df['dollar_imbalance'] = df['trade_dollar'] * df['dollar_side']
|
36 |
+
logger.info("'dollar_imbalance' calculadas com sucesso")
|
37 |
+
return df
|
38 |
+
except KeyError as e:
|
39 |
+
logger.error(f"Erro ao calcular 'dollar_imbalance': {e}")
|
40 |
+
return df
|
41 |
+
except Exception as e:
|
42 |
+
logger.error(f"Erro inesperado ao calcular 'dollar_imbalance': {e}")
|
43 |
+
return df
|
44 |
+
|
45 |
+
def check_column_types(df, expected_types, file):
|
46 |
+
for column, expected in expected_types.items():
|
47 |
+
if column not in df.columns:
|
48 |
+
logger.error(f"A coluna '{column}' está ausente no arquivo {file}.")
|
49 |
+
return False
|
50 |
+
|
51 |
+
actual_dtype = df[column].dtype
|
52 |
+
|
53 |
+
if expected == int:
|
54 |
+
if not np.issubdtype(actual_dtype, np.integer):
|
55 |
+
logger.error(f"A coluna '{column}' possui tipo de dado inesperado: {actual_dtype}, esperado: int")
|
56 |
+
return False
|
57 |
+
elif expected == float:
|
58 |
+
if not np.issubdtype(actual_dtype, np.floating):
|
59 |
+
logger.error(f"A coluna '{column}' possui tipo de dado inesperado: {actual_dtype}, esperado: float")
|
60 |
+
return False
|
61 |
+
elif expected == str:
|
62 |
+
if not (np.issubdtype(actual_dtype, np.object_) or np.issubdtype(actual_dtype, np.str_)):
|
63 |
+
logger.error(f"A coluna '{column}' possui tipo de dado inesperado: {actual_dtype}, esperado: str")
|
64 |
+
return False
|
65 |
+
elif expected == bool:
|
66 |
+
if not np.issubdtype(actual_dtype, np.bool_):
|
67 |
+
logger.error(f"A coluna '{column}' possui tipo de dado inesperado: {actual_dtype}, esperado: bool")
|
68 |
+
return False
|
69 |
+
elif expected == pd.Timestamp:
|
70 |
+
if not np.issubdtype(actual_dtype, np.datetime64):
|
71 |
+
logger.error(f"A coluna '{column}' possui tipo de dado inesperado: {actual_dtype}, esperado: datetime64")
|
72 |
+
return False
|
73 |
+
else:
|
74 |
+
logger.error(f"Tipo esperado não reconhecido para a coluna '{column}': {expected}")
|
75 |
+
return False
|
76 |
+
return True
|
77 |
+
|
78 |
+
def create_dollar_bars(df, dollar_threshold, output_path):
|
79 |
+
if os.path.exists(output_path):
|
80 |
+
logger.info(f"'dollar_bars' já existe. Carregando de {output_path}...")
|
81 |
+
dollar_bars = dd.read_parquet(output_path).compute()
|
82 |
+
logger.info("'dollar_bars' carregado com sucesso.")
|
83 |
+
else:
|
84 |
+
logger.info("Criando 'dollar_bars'...")
|
85 |
+
with ProgressBar():
|
86 |
+
# Calcular o valor em dólares de cada trade
|
87 |
+
df = df.assign(trade_dollar=df['price'] * df['qty'])
|
88 |
+
|
89 |
+
# Calcular o acumulado
|
90 |
+
df = df.assign(cumulative_dollar=df['trade_dollar'].cumsum())
|
91 |
+
|
92 |
+
# Definir o número do bar
|
93 |
+
df = df.assign(bar_number=(df['cumulative_dollar'] // dollar_threshold).astype(int))
|
94 |
+
|
95 |
+
# Agrupar por bar_number e realizar todas as agregações em uma única chamada
|
96 |
+
grouped = df.groupby('bar_number').agg(
|
97 |
+
trade_count=('trade_id', 'count'),
|
98 |
+
price_open=('price', 'first'),
|
99 |
+
price_high=('price', 'max'),
|
100 |
+
price_low=('price', 'min'),
|
101 |
+
price_close=('price', 'last'),
|
102 |
+
qty_sum=('qty', 'sum'),
|
103 |
+
quoteQty_sum=('quoteQty', 'sum'),
|
104 |
+
time=('time', 'max'),
|
105 |
+
isBuyerMaker_avg=('isBuyerMaker', 'mean'),
|
106 |
+
isBestMatch_avg=('isBestMatch', 'mean')
|
107 |
+
)
|
108 |
+
|
109 |
+
# Salvar diretamente como Parquet usando Dask
|
110 |
+
grouped.to_parquet(output_path, engine='pyarrow', compression='snappy')
|
111 |
+
|
112 |
+
logger.info("'dollar_bars' criado e salvo com sucesso.")
|
113 |
+
# Carregar os dados para retornar como pandas DataFrame
|
114 |
+
dollar_bars = dd.read_parquet(output_path).compute()
|
115 |
+
|
116 |
+
return dollar_bars
|
117 |
+
|
118 |
+
@delayed
|
119 |
+
def process_file_run_bars(file, dollar_threshold, max_records):
|
120 |
+
try:
|
121 |
+
df = pd.read_parquet(file)
|
122 |
+
logger.info(f"Processando arquivo: {file}")
|
123 |
+
logger.info(f"Colunas disponíveis: {df.columns.tolist()}")
|
124 |
+
|
125 |
+
# Definir os tipos esperados
|
126 |
+
expected_types = {
|
127 |
+
'price': float, # Assumindo que 'price' é float
|
128 |
+
'qty': float, # Assumindo que 'qty' é float
|
129 |
+
'trade_id': int, # 'trade_id' como inteiro
|
130 |
+
'isBuyerMaker': bool,
|
131 |
+
'isBestMatch': bool,
|
132 |
+
'time': pd.Timestamp # Assumindo que 'time' é datetime
|
133 |
+
}
|
134 |
+
|
135 |
+
# Verificar tipos de colunas
|
136 |
+
if not check_column_types(df, expected_types, file):
|
137 |
+
logger.error(f"Tipos de colunas inválidos no arquivo {file}. Pulando processamento.")
|
138 |
+
return pd.DataFrame()
|
139 |
+
|
140 |
+
# Converter 'time' para datetime, caso ainda não esteja
|
141 |
+
if not np.issubdtype(df['time'].dtype, np.datetime64):
|
142 |
+
logger.info(f"Convertendo coluna 'time' para datetime no arquivo {file}")
|
143 |
+
df['time'] = pd.to_datetime(df['time'], unit='s', errors='coerce')
|
144 |
+
if df['time'].isnull().any():
|
145 |
+
logger.error(f"Falha na conversão da coluna 'time' para datetime no arquivo {file}.")
|
146 |
+
return pd.DataFrame()
|
147 |
+
|
148 |
+
# Verificar se o DataFrame possui dados suficientes
|
149 |
+
if len(df) < 2:
|
150 |
+
logger.warning(f"O arquivo {file} possui menos de duas linhas. Pulando processamento.")
|
151 |
+
return pd.DataFrame()
|
152 |
+
|
153 |
+
# Calcular direções
|
154 |
+
df = calculate_dollar_directions(df)
|
155 |
+
|
156 |
+
# Verificar se 'price_change' foi criada
|
157 |
+
if 'price_change' not in df.columns:
|
158 |
+
logger.error(f"A coluna 'price_change' não foi criada no arquivo {file}.")
|
159 |
+
return pd.DataFrame()
|
160 |
+
|
161 |
+
# Continuar com o processamento original
|
162 |
+
df['direction_change'] = (df['dollar_direction'] != df['dollar_direction'].shift()).cumsum()
|
163 |
+
df['cumulative_dollar'] = df.groupby('direction_change')['trade_dollar'].cumsum()
|
164 |
+
df['bar_number'] = (df['cumulative_dollar'] // dollar_threshold).astype(int) + (df['direction_change'] * 1e6).astype(int)
|
165 |
+
|
166 |
+
# Limitar o DataFrame ao número máximo de registros
|
167 |
+
df = df.head(max_records)
|
168 |
+
|
169 |
+
# Agrupar por bar_number e realizar as agregações
|
170 |
+
grouped = df.groupby('bar_number').agg(
|
171 |
+
trade_count=('trade_id', 'count'),
|
172 |
+
price_open=('price', 'first'),
|
173 |
+
price_high=('price', 'max'),
|
174 |
+
price_low=('price', 'min'),
|
175 |
+
price_close=('price', 'last'),
|
176 |
+
qty_sum=('qty', 'sum'),
|
177 |
+
quoteQty_sum=('quoteQty', 'sum'),
|
178 |
+
time=('time', 'max'),
|
179 |
+
isBuyerMaker_avg=('isBuyerMaker', 'mean'),
|
180 |
+
isBestMatch_avg=('isBestMatch', 'mean')
|
181 |
+
).reset_index(drop=True)
|
182 |
+
|
183 |
+
logger.info(f"Arquivo {file} processado com sucesso.")
|
184 |
+
return grouped
|
185 |
+
except Exception as e:
|
186 |
+
logger.error(f"Erro ao processar o arquivo {file}: {e}")
|
187 |
+
logger.error(traceback.format_exc())
|
188 |
+
return pd.DataFrame()
|
189 |
+
|
190 |
+
def create_dollar_run_bars(dataset_path, dollar_threshold, output_path, max_records):
|
191 |
+
if os.path.exists(output_path):
|
192 |
+
logger.info(f"'dollar_run_bars' já existe. Carregando de {output_path}...")
|
193 |
+
dollar_run_bars = dd.read_parquet(output_path).compute()
|
194 |
+
logger.info("'dollar_run_bars' carregado com sucesso.")
|
195 |
+
else:
|
196 |
+
logger.info("Criando 'dollar_run_bars'...")
|
197 |
+
# Obter a lista de arquivos Parquet no dataset_path
|
198 |
+
parquet_files = [os.path.join(dataset_path, f) for f in os.listdir(dataset_path) if f.endswith('.parquet')]
|
199 |
+
parquet_files.sort()
|
200 |
+
|
201 |
+
# Inicializar contador de registros
|
202 |
+
total_records = 0
|
203 |
+
limited_files = []
|
204 |
+
records_per_file = {}
|
205 |
+
|
206 |
+
# Iterar sobre os arquivos para selecionar apenas os necessários para alcançar max_records
|
207 |
+
for file in parquet_files:
|
208 |
+
df_temp = pd.read_parquet(file)
|
209 |
+
num_records = len(df_temp)
|
210 |
+
if total_records + num_records <= max_records:
|
211 |
+
limited_files.append(file)
|
212 |
+
records_per_file[file] = num_records
|
213 |
+
total_records += num_records
|
214 |
+
else:
|
215 |
+
remaining = max_records - total_records
|
216 |
+
if remaining > 0:
|
217 |
+
limited_files.append(file)
|
218 |
+
records_per_file[file] = remaining
|
219 |
+
total_records += remaining
|
220 |
+
break
|
221 |
+
if total_records >= max_records:
|
222 |
+
break
|
223 |
+
|
224 |
+
if not limited_files:
|
225 |
+
logger.warning("Nenhum arquivo para processar dentro do limite de registros.")
|
226 |
+
return pd.DataFrame()
|
227 |
+
|
228 |
+
logger.info(f"Total de registros a serem processados: {total_records}")
|
229 |
+
|
230 |
+
# Aplicar a função a todos os arquivos limitados
|
231 |
+
delayed_dfs = [process_file_run_bars(file, dollar_threshold, records_per_file[file]) for file in limited_files]
|
232 |
+
|
233 |
+
# Computar em paralelo
|
234 |
+
with ProgressBar():
|
235 |
+
run_bars_dask = dd.from_delayed(delayed_dfs)
|
236 |
+
dollar_run_bars = run_bars_dask.compute()
|
237 |
+
|
238 |
+
# Verificar se o DataFrame resultante não está vazio
|
239 |
+
if not dollar_run_bars.empty:
|
240 |
+
# Salvar como Parquet
|
241 |
+
dollar_run_bars.to_parquet(output_path, engine='pyarrow', compression='snappy')
|
242 |
+
logger.info("'dollar_run_bars' criado e salvo com sucesso.")
|
243 |
+
else:
|
244 |
+
logger.warning("'dollar_run_bars' está vazio. Nenhum dado foi salvo.")
|
245 |
+
|
246 |
+
return dollar_run_bars
|
247 |
+
|
248 |
+
@delayed
|
249 |
+
def process_file_imbalance_bars(file, dollar_threshold, max_records):
|
250 |
+
try:
|
251 |
+
df = pd.read_parquet(file)
|
252 |
+
logger.info(f"Processando arquivo: {file}")
|
253 |
+
logger.info(f"Colunas disponíveis: {df.columns.tolist()}")
|
254 |
+
|
255 |
+
# Definir os tipos esperados
|
256 |
+
expected_types = {
|
257 |
+
'price': float, # Assumindo que 'price' é float
|
258 |
+
'qty': float, # Assumindo que 'qty' é float
|
259 |
+
'trade_id': int, # 'trade_id' como inteiro
|
260 |
+
'isBuyerMaker': bool,
|
261 |
+
'isBestMatch': bool,
|
262 |
+
'time': pd.Timestamp # Assumindo que 'time' é datetime
|
263 |
+
}
|
264 |
+
|
265 |
+
# Verificar tipos de colunas
|
266 |
+
if not check_column_types(df, expected_types, file):
|
267 |
+
logger.error(f"Tipos de colunas inválidos no arquivo {file}. Pulando processamento.")
|
268 |
+
return pd.DataFrame()
|
269 |
+
|
270 |
+
# Converter 'time' para datetime, caso ainda não esteja
|
271 |
+
if not np.issubdtype(df['time'].dtype, np.datetime64):
|
272 |
+
logger.info(f"Convertendo coluna 'time' para datetime no arquivo {file}")
|
273 |
+
df['time'] = pd.to_datetime(df['time'], unit='s', errors='coerce')
|
274 |
+
if df['time'].isnull().any():
|
275 |
+
logger.error(f"Falha na conversão da coluna 'time' para datetime no arquivo {file}.")
|
276 |
+
return pd.DataFrame()
|
277 |
+
|
278 |
+
# Verificar se o DataFrame possui dados suficientes
|
279 |
+
if len(df) < 1:
|
280 |
+
logger.warning(f"O arquivo {file} está vazio. Pulando processamento.")
|
281 |
+
return pd.DataFrame()
|
282 |
+
|
283 |
+
# Calcular desequilíbrio
|
284 |
+
df = calculate_dollar_imbalance(df)
|
285 |
+
|
286 |
+
# Calcular o acumulado de desequilíbrio
|
287 |
+
df['cumulative_imbalance'] = df['dollar_imbalance'].cumsum()
|
288 |
+
|
289 |
+
# Definir o número do bar baseado no limiar
|
290 |
+
df['bar_number'] = (df['cumulative_imbalance'].abs() // dollar_threshold).astype(int)
|
291 |
+
|
292 |
+
# Limitar o DataFrame ao número máximo de registros
|
293 |
+
df = df.head(max_records)
|
294 |
+
|
295 |
+
# Agrupar por bar_number e realizar as agregações
|
296 |
+
grouped = df.groupby('bar_number').agg(
|
297 |
+
trade_count=('trade_id', 'count'),
|
298 |
+
price_open=('price', 'first'),
|
299 |
+
price_high=('price', 'max'),
|
300 |
+
price_low=('price', 'min'),
|
301 |
+
price_close=('price', 'last'),
|
302 |
+
qty_sum=('qty', 'sum'),
|
303 |
+
quoteQty_sum=('quoteQty', 'sum'),
|
304 |
+
time=('time', 'max'),
|
305 |
+
isBuyerMaker_avg=('isBuyerMaker', 'mean'),
|
306 |
+
isBestMatch_avg=('isBestMatch', 'mean')
|
307 |
+
).reset_index(drop=True)
|
308 |
+
|
309 |
+
logger.info(f"Arquivo {file} processado com sucesso.")
|
310 |
+
return grouped
|
311 |
+
except Exception as e:
|
312 |
+
logger.error(f"Erro ao processar o arquivo {file}: {e}")
|
313 |
+
logger.error(traceback.format_exc())
|
314 |
+
return pd.DataFrame()
|
315 |
+
|
316 |
+
def create_dollar_imbalance_bars(dataset_path, dollar_threshold, output_path, max_records):
|
317 |
+
if os.path.exists(output_path):
|
318 |
+
logger.info(f"'dollar_imbalance_bars' já existe. Carregando de {output_path}...")
|
319 |
+
dollar_imbalance_bars = dd.read_parquet(output_path).compute()
|
320 |
+
logger.info("'dollar_imbalance_bars' carregado com sucesso.")
|
321 |
+
else:
|
322 |
+
logger.info("Criando 'dollar_imbalance_bars'...")
|
323 |
+
# Obter a lista de arquivos Parquet no dataset_path
|
324 |
+
parquet_files = [os.path.join(dataset_path, f) for f in os.listdir(dataset_path) if f.endswith('.parquet')]
|
325 |
+
parquet_files.sort()
|
326 |
+
|
327 |
+
# Inicializar contador de registros
|
328 |
+
total_records = 0
|
329 |
+
limited_files = []
|
330 |
+
records_per_file = {}
|
331 |
+
|
332 |
+
# Iterar sobre os arquivos para selecionar apenas os necessários para alcançar max_records
|
333 |
+
for file in parquet_files:
|
334 |
+
df_temp = pd.read_parquet(file)
|
335 |
+
num_records = len(df_temp)
|
336 |
+
if total_records + num_records <= max_records:
|
337 |
+
limited_files.append(file)
|
338 |
+
records_per_file[file] = num_records
|
339 |
+
total_records += num_records
|
340 |
+
else:
|
341 |
+
remaining = max_records - total_records
|
342 |
+
if remaining > 0:
|
343 |
+
limited_files.append(file)
|
344 |
+
records_per_file[file] = remaining
|
345 |
+
total_records += remaining
|
346 |
+
break
|
347 |
+
if total_records >= max_records:
|
348 |
+
break
|
349 |
+
|
350 |
+
if not limited_files:
|
351 |
+
logger.warning("Nenhum arquivo para processar dentro do limite de registros.")
|
352 |
+
return pd.DataFrame()
|
353 |
+
|
354 |
+
logger.info(f"Total de registros a serem processados: {total_records}")
|
355 |
+
|
356 |
+
# Aplicar a função a todos os arquivos limitados
|
357 |
+
delayed_dfs = [process_file_imbalance_bars(file, dollar_threshold, records_per_file[file]) for file in limited_files]
|
358 |
+
|
359 |
+
# Computar em paralelo
|
360 |
+
with ProgressBar():
|
361 |
+
imbalance_bars_dask = dd.from_delayed(delayed_dfs)
|
362 |
+
dollar_imbalance_bars = imbalance_bars_dask.compute()
|
363 |
+
|
364 |
+
# Verificar se o DataFrame resultante não está vazio
|
365 |
+
if not dollar_imbalance_bars.empty:
|
366 |
+
# Salvar como Parquet
|
367 |
+
dollar_imbalance_bars.to_parquet(output_path, engine='pyarrow', compression='snappy')
|
368 |
+
logger.info("'dollar_imbalance_bars' criado e salvo com sucesso.")
|
369 |
+
else:
|
370 |
+
logger.warning("'dollar_imbalance_bars' está vazio. Nenhum dado foi salvo.")
|
371 |
+
|
372 |
+
return dollar_imbalance_bars
|