update: scraper, and machine learning model
Browse files- .github/workflows/gru_pipeline.yaml +8 -4
- .github/workflows/lstm_gru_pipeline.yaml +8 -4
- .github/workflows/lstm_pipeline.yaml +8 -4
- postman/symbols_test.json +9 -0
- scraper.go +6 -1
- training.py +13 -6
.github/workflows/gru_pipeline.yaml
CHANGED
@@ -48,9 +48,8 @@ jobs:
|
|
48 |
if: env.match != 'true'
|
49 |
run: |
|
50 |
mkdir datasets
|
51 |
-
|
52 |
-
-
|
53 |
-
go run scraper.go
|
54 |
|
55 |
- name: Install Libraries
|
56 |
if: env.match != 'true'
|
@@ -62,7 +61,12 @@ jobs:
|
|
62 |
mkdir models
|
63 |
mkdir pickles
|
64 |
mkdir posttrained
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
- name: Set Pipeline Schedule
|
68 |
if: env.match != 'true'
|
|
|
48 |
if: env.match != 'true'
|
49 |
run: |
|
50 |
mkdir datasets
|
51 |
+
go run scraper.go \
|
52 |
+
--symbols-file=./postman/symbols_test.json
|
|
|
53 |
|
54 |
- name: Install Libraries
|
55 |
if: env.match != 'true'
|
|
|
61 |
mkdir models
|
62 |
mkdir pickles
|
63 |
mkdir posttrained
|
64 |
+
|
65 |
+
python training.py \
|
66 |
+
--epochs=200 \
|
67 |
+
--batchs=32 \
|
68 |
+
--sequences=5 \
|
69 |
+
--algorithm=GRU
|
70 |
|
71 |
- name: Set Pipeline Schedule
|
72 |
if: env.match != 'true'
|
.github/workflows/lstm_gru_pipeline.yaml
CHANGED
@@ -48,9 +48,8 @@ jobs:
|
|
48 |
if: env.match != 'true'
|
49 |
run: |
|
50 |
mkdir datasets
|
51 |
-
|
52 |
-
-
|
53 |
-
go run scraper.go
|
54 |
|
55 |
- name: Install Libraries
|
56 |
if: env.match != 'true'
|
@@ -62,7 +61,12 @@ jobs:
|
|
62 |
mkdir models
|
63 |
mkdir pickles
|
64 |
mkdir posttrained
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
- name: Set Pipeline Schedule
|
68 |
if: env.match != 'true'
|
|
|
48 |
if: env.match != 'true'
|
49 |
run: |
|
50 |
mkdir datasets
|
51 |
+
go run scraper.go \
|
52 |
+
--symbols-file=./postman/symbols_test.json
|
|
|
53 |
|
54 |
- name: Install Libraries
|
55 |
if: env.match != 'true'
|
|
|
61 |
mkdir models
|
62 |
mkdir pickles
|
63 |
mkdir posttrained
|
64 |
+
|
65 |
+
python training.py \
|
66 |
+
--epochs=200 \
|
67 |
+
--batchs=32 \
|
68 |
+
--sequences=5 \
|
69 |
+
--algorithm=LSTM_GRU
|
70 |
|
71 |
- name: Set Pipeline Schedule
|
72 |
if: env.match != 'true'
|
.github/workflows/lstm_pipeline.yaml
CHANGED
@@ -48,9 +48,8 @@ jobs:
|
|
48 |
if: env.match != 'true'
|
49 |
run: |
|
50 |
mkdir datasets
|
51 |
-
|
52 |
-
-
|
53 |
-
go run scraper.go
|
54 |
|
55 |
- name: Install Libraries
|
56 |
if: env.match != 'true'
|
@@ -62,7 +61,12 @@ jobs:
|
|
62 |
mkdir models
|
63 |
mkdir pickles
|
64 |
mkdir posttrained
|
65 |
-
|
|
|
|
|
|
|
|
|
|
|
66 |
|
67 |
- name: Set Pipeline Schedule
|
68 |
if: env.match != 'true'
|
|
|
48 |
if: env.match != 'true'
|
49 |
run: |
|
50 |
mkdir datasets
|
51 |
+
go run scraper.go \
|
52 |
+
--symbols-file=./postman/symbols_test.json
|
|
|
53 |
|
54 |
- name: Install Libraries
|
55 |
if: env.match != 'true'
|
|
|
61 |
mkdir models
|
62 |
mkdir pickles
|
63 |
mkdir posttrained
|
64 |
+
|
65 |
+
python training.py \
|
66 |
+
--epochs=200 \
|
67 |
+
--batchs=32 \
|
68 |
+
--sequences=5 \
|
69 |
+
--algorithm=LSTM
|
70 |
|
71 |
- name: Set Pipeline Schedule
|
72 |
if: env.match != 'true'
|
postman/symbols_test.json
ADDED
@@ -0,0 +1,9 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"symbols": [
|
3 |
+
"BTC-USD",
|
4 |
+
"ETH-USD",
|
5 |
+
"USDT-USD",
|
6 |
+
"BNB-USD",
|
7 |
+
"SOL-USD"
|
8 |
+
]
|
9 |
+
}
|
scraper.go
CHANGED
@@ -11,6 +11,7 @@ import (
|
|
11 |
"strconv"
|
12 |
"sync"
|
13 |
"time"
|
|
|
14 |
)
|
15 |
|
16 |
|
@@ -88,7 +89,11 @@ func getCurrentUnixTimestamp() int64 {
|
|
88 |
|
89 |
|
90 |
func main() {
|
91 |
-
|
|
|
|
|
|
|
|
|
92 |
if err != nil {
|
93 |
log.Fatalf("[ERROR] failed to open JSON file: %v", err)
|
94 |
}
|
|
|
11 |
"strconv"
|
12 |
"sync"
|
13 |
"time"
|
14 |
+
"flag"
|
15 |
)
|
16 |
|
17 |
|
|
|
89 |
|
90 |
|
91 |
func main() {
|
92 |
+
symbols_file := flag.String("symbols-file", "default", "symbols file")
|
93 |
+
flag.Parse()
|
94 |
+
|
95 |
+
// jsonFile, err := os.Open("./postman/symbols.json")
|
96 |
+
jsonFile, err := os.Open(*symbols_file)
|
97 |
if err != nil {
|
98 |
log.Fatalf("[ERROR] failed to open JSON file: %v", err)
|
99 |
}
|
training.py
CHANGED
@@ -167,14 +167,12 @@ class PostProcessor:
|
|
167 |
with open(filename, 'w') as f:
|
168 |
json.dump(data, f)
|
169 |
|
170 |
-
def main(algorithm: str):
|
171 |
datasets_path = './datasets'
|
172 |
models_path = './models'
|
173 |
posttrained = './posttrained'
|
174 |
pickle_file = './pickles'
|
175 |
|
176 |
-
sequence_length = 60
|
177 |
-
epochs = 200
|
178 |
batch_size = 32
|
179 |
|
180 |
data_processor = DataProcessor(datasets_path)
|
@@ -230,10 +228,19 @@ def main(algorithm: str):
|
|
230 |
if __name__ == "__main__":
|
231 |
parser = argparse.ArgumentParser(description = "Tebakaja Model Trainer")
|
232 |
|
233 |
-
parser.add_argument('-a', '--algorithm',
|
234 |
-
type = str, required = True,
|
235 |
help = 'select the algorithm to be trained (LSTM, GRU, LSTM_GRU)')
|
236 |
|
|
|
|
|
|
|
|
|
237 |
args = parser.parse_args()
|
238 |
|
239 |
-
main(
|
|
|
|
|
|
|
|
|
|
|
|
|
|
167 |
with open(filename, 'w') as f:
|
168 |
json.dump(data, f)
|
169 |
|
170 |
+
def main(algorithm: str, sequence_length: int, epochs: int, batch_size: int):
|
171 |
datasets_path = './datasets'
|
172 |
models_path = './models'
|
173 |
posttrained = './posttrained'
|
174 |
pickle_file = './pickles'
|
175 |
|
|
|
|
|
176 |
batch_size = 32
|
177 |
|
178 |
data_processor = DataProcessor(datasets_path)
|
|
|
228 |
if __name__ == "__main__":
|
229 |
parser = argparse.ArgumentParser(description = "Tebakaja Model Trainer")
|
230 |
|
231 |
+
parser.add_argument('-a', '--algorithm', type = str, required = True,
|
|
|
232 |
help = 'select the algorithm to be trained (LSTM, GRU, LSTM_GRU)')
|
233 |
|
234 |
+
parser.add_argument('-e', '--epochs', type = int, required = True, help = 'epochs')
|
235 |
+
parser.add_argument('-b', '--batchs', type = int, required = True, help = 'batch length')
|
236 |
+
parser.add_argument('-s', '--sequences', type = int, required = True, help = 'sequences length')
|
237 |
+
|
238 |
args = parser.parse_args()
|
239 |
|
240 |
+
main(
|
241 |
+
epochs = args.epochs,
|
242 |
+
batch_size = args.batchs,
|
243 |
+
algorithm = args.algorithm,
|
244 |
+
sequence_length = args.sequences
|
245 |
+
)
|
246 |
+
|