Fix issues
Browse filesSigned-off-by: Aadhitya A <aadhitya864@gmail.com>
- app-cuda.py +6 -1
- app.py +6 -3
app-cuda.py
CHANGED
@@ -112,6 +112,7 @@ def modelCNNLSTM(csv_file, prax):
|
|
112 |
trek = df.iloc[len(df)-100:,1:23]
|
113 |
#print(temp_data)
|
114 |
data = temp_data
|
|
|
115 |
sc = MinMaxScaler()
|
116 |
# Split the data into training and testing sets
|
117 |
train_size = int(len(data) * 0.8)
|
@@ -221,6 +222,8 @@ def modelCNNLSTM(csv_file, prax):
|
|
221 |
trek = df.iloc[0:len(df), 1:23]
|
222 |
Y = trek[0:len(trek)]
|
223 |
YP = trek[1:len(trek)]
|
|
|
|
|
224 |
Y1 = Y['Close']
|
225 |
Y2 = YP['Close']
|
226 |
Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
|
@@ -255,7 +258,6 @@ def modelCNNLSTM(csv_file, prax):
|
|
255 |
def modelCNNLSTM_OpenGap(csv_file, prax):
|
256 |
# Read the data
|
257 |
df = csv_file
|
258 |
-
df = df['Date/Time'].values.astype("float64")
|
259 |
datLength = len(df)
|
260 |
df['O-C'] = 0
|
261 |
for i in range(datLength):
|
@@ -268,6 +270,7 @@ def modelCNNLSTM_OpenGap(csv_file, prax):
|
|
268 |
trek = df.iloc[datLength-100:,1:24]
|
269 |
#print(temp_data)
|
270 |
data = temp_data
|
|
|
271 |
sc = MinMaxScaler()
|
272 |
# Split the data into training and testing sets
|
273 |
train_size = int(len(data) * 0.8)
|
@@ -378,6 +381,8 @@ def modelCNNLSTM_OpenGap(csv_file, prax):
|
|
378 |
trek = df.iloc[0:len(df), 1:24]
|
379 |
Y = trek[0:len(trek)]
|
380 |
YP = trek[1:len(trek)]
|
|
|
|
|
381 |
Y1 = Y['Close']
|
382 |
Y2 = YP['Close']
|
383 |
Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
|
|
|
112 |
trek = df.iloc[len(df)-100:,1:23]
|
113 |
#print(temp_data)
|
114 |
data = temp_data
|
115 |
+
data = data.values.astype("float64")
|
116 |
sc = MinMaxScaler()
|
117 |
# Split the data into training and testing sets
|
118 |
train_size = int(len(data) * 0.8)
|
|
|
222 |
trek = df.iloc[0:len(df), 1:23]
|
223 |
Y = trek[0:len(trek)]
|
224 |
YP = trek[1:len(trek)]
|
225 |
+
Y = Y.values.astype("float64")
|
226 |
+
YP = YP.values.astype("float64")
|
227 |
Y1 = Y['Close']
|
228 |
Y2 = YP['Close']
|
229 |
Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
|
|
|
258 |
def modelCNNLSTM_OpenGap(csv_file, prax):
|
259 |
# Read the data
|
260 |
df = csv_file
|
|
|
261 |
datLength = len(df)
|
262 |
df['O-C'] = 0
|
263 |
for i in range(datLength):
|
|
|
270 |
trek = df.iloc[datLength-100:,1:24]
|
271 |
#print(temp_data)
|
272 |
data = temp_data
|
273 |
+
data = data.values.astype("float64")
|
274 |
sc = MinMaxScaler()
|
275 |
# Split the data into training and testing sets
|
276 |
train_size = int(len(data) * 0.8)
|
|
|
381 |
trek = df.iloc[0:len(df), 1:24]
|
382 |
Y = trek[0:len(trek)]
|
383 |
YP = trek[1:len(trek)]
|
384 |
+
Y = Y.values.astype("float64")
|
385 |
+
YP = YP.values.astype("float64")
|
386 |
Y1 = Y['Close']
|
387 |
Y2 = YP['Close']
|
388 |
Yx = pd.DataFrame(YP, index=YP.index, columns=YP.columns)
|
app.py
CHANGED
@@ -67,6 +67,8 @@ MAX_EPOCHS = 3
|
|
67 |
LEARNING_RATE = 0.01
|
68 |
OPTUNA = True
|
69 |
ACCELERATOR = "cpu"
|
|
|
|
|
70 |
|
71 |
# Variables to count the number of files
|
72 |
w = 7
|
@@ -106,6 +108,7 @@ def objective(trial, X_train, y_train, X_test, y_test):
|
|
106 |
def modelCNNLSTM(csv_file, prax):
|
107 |
# Read the data
|
108 |
df = csv_file
|
|
|
109 |
temp_data = df.iloc[0:len(df)-100, 1:23]
|
110 |
trek = df.iloc[len(df)-100:,1:23]
|
111 |
#print(temp_data)
|
@@ -114,7 +117,6 @@ def modelCNNLSTM(csv_file, prax):
|
|
114 |
# Split the data into training and testing sets
|
115 |
train_size = int(len(data) * 0.8)
|
116 |
train_data, test_data = data[:train_size], data[train_size:]
|
117 |
-
|
118 |
# Separate the input features and target variable
|
119 |
X_train, y_train = train_data, train_data['Close']
|
120 |
X_test, y_test = test_data, test_data['Close']
|
@@ -266,6 +268,7 @@ def modelCNNLSTM_OpenGap(csv_file, prax):
|
|
266 |
trek = df.iloc[datLength-100:,1:24]
|
267 |
#print(temp_data)
|
268 |
data = temp_data
|
|
|
269 |
sc = MinMaxScaler()
|
270 |
# Split the data into training and testing sets
|
271 |
train_size = int(len(data) * 0.8)
|
@@ -603,7 +606,7 @@ def modelTFT(csv_file, prax):
|
|
603 |
#torch.cuda.empty_cache()
|
604 |
best_model_path = trainer.checkpoint_callback.best_model_path
|
605 |
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
606 |
-
actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])
|
607 |
predictions = best_tft.predict(val_dataloader, mode="prediction")
|
608 |
raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
|
609 |
|
@@ -835,7 +838,7 @@ def modelTFT_OpenGap(csv_file, prax):
|
|
835 |
#torch.cuda.empty_cache()
|
836 |
best_model_path = trainer.checkpoint_callback.best_model_path
|
837 |
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
838 |
-
actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])
|
839 |
predictions = best_tft.predict(val_dataloader, mode="prediction")
|
840 |
raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
|
841 |
|
|
|
67 |
LEARNING_RATE = 0.01
|
68 |
OPTUNA = True
|
69 |
ACCELERATOR = "cpu"
|
70 |
+
# This below line is only for GPU. Don't use it for CPU
|
71 |
+
#os.environ["PYTORCH_CUDA_ALLOC_CONF"] = "max_split_size_mb:1024"
|
72 |
|
73 |
# Variables to count the number of files
|
74 |
w = 7
|
|
|
108 |
def modelCNNLSTM(csv_file, prax):
|
109 |
# Read the data
|
110 |
df = csv_file
|
111 |
+
df = df['Date/Time'].values.astype("float64")
|
112 |
temp_data = df.iloc[0:len(df)-100, 1:23]
|
113 |
trek = df.iloc[len(df)-100:,1:23]
|
114 |
#print(temp_data)
|
|
|
117 |
# Split the data into training and testing sets
|
118 |
train_size = int(len(data) * 0.8)
|
119 |
train_data, test_data = data[:train_size], data[train_size:]
|
|
|
120 |
# Separate the input features and target variable
|
121 |
X_train, y_train = train_data, train_data['Close']
|
122 |
X_test, y_test = test_data, test_data['Close']
|
|
|
268 |
trek = df.iloc[datLength-100:,1:24]
|
269 |
#print(temp_data)
|
270 |
data = temp_data
|
271 |
+
#data = data.values.astype("float64")
|
272 |
sc = MinMaxScaler()
|
273 |
# Split the data into training and testing sets
|
274 |
train_size = int(len(data) * 0.8)
|
|
|
606 |
#torch.cuda.empty_cache()
|
607 |
best_model_path = trainer.checkpoint_callback.best_model_path
|
608 |
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
609 |
+
actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])#.cuda()
|
610 |
predictions = best_tft.predict(val_dataloader, mode="prediction")
|
611 |
raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
|
612 |
|
|
|
838 |
#torch.cuda.empty_cache()
|
839 |
best_model_path = trainer.checkpoint_callback.best_model_path
|
840 |
best_tft = TemporalFusionTransformer.load_from_checkpoint(best_model_path)
|
841 |
+
actuals = torch.cat([y[0] for x, y in iter(val_dataloader)])#.cuda()
|
842 |
predictions = best_tft.predict(val_dataloader, mode="prediction")
|
843 |
raw_predictions = best_tft.predict(val_dataloader, mode="raw", return_x=True)
|
844 |
|