#!/usr/bin/env python
# coding: utf-8

import pandas as pd
import time, datetime
df_data_5minute=pd.read_csv('5m.csv')

df_data_5minute.head()
df_data_5minute.drop('Unnamed: 0', axis=1, inplace=True)

df=df_data_5minute
close = df['close']
df.drop(labels=['close'], axis=1,inplace = True)
df.insert(0, 'close', close)
data_train =df.iloc[:int(df.shape[0] * 0.8), :]
data_test = df.iloc[int(df.shape[0] * 0.8):, :]
print(data_train.shape, data_test.shape)


# -*- coding: utf-8 -*-
import pandas as pd
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.preprocessing import MinMaxScaler
import time
scaler = MinMaxScaler(feature_range=(0, 1))
scaler.fit(data_train)

data_train = scaler.transform(data_train)
data_test = scaler.transform(data_test)

from keras.layers import Input, Dense, LSTM
from keras.models import Model
from keras.layers import *
from keras.models import *
from keras.optimizers import Adam

output_dim = 1
batch_size = 256
epochs = 50
seq_len = 5
hidden_size = 128


TIME_STEPS = 5
INPUT_DIM = 6

lstm_units = 64
X_train = np.array([data_train[i : i + seq_len, :] for i in range(data_train.shape[0] - seq_len)])
y_train = np.array([data_train[i + seq_len, 0] for i in range(data_train.shape[0]- seq_len)])
X_test = np.array([data_test[i : i + seq_len, :] for i in range(data_test.shape[0]- seq_len)])
y_test = np.array([data_test[i + seq_len, 0] for i in range(data_test.shape[0] - seq_len)])

print(X_train.shape, y_train.shape, X_test.shape, y_test.shape)


inputs = Input(shape=(TIME_STEPS, INPUT_DIM))
#drop1 = Dropout(0.3)(inputs)

x = Conv1D(filters = 64, kernel_size = 1, activation = 'relu')(inputs)  #, padding = 'same'
#x = Conv1D(filters=128, kernel_size=5, activation='relu')(output1)#embedded_sequences
x = MaxPooling1D(pool_size = 5)(x)
x = Dropout(0.2)(x)

print(x.shape)


# In[28]:


lstm_out = Bidirectional(LSTM(lstm_units, activation='relu'), name='bilstm')(x)
#lstm_out = LSTM(lstm_units,activation='relu')(x)
print(lstm_out.shape)


# In[29]:


output = Dense(1, activation='sigmoid')(lstm_out)
#output = Dense(10, activation='sigmoid')(drop2)

model = Model(inputs=inputs, outputs=output)
print(model.summary())


# In[30]:


model.compile(loss='mean_squared_error', optimizer='adam')
model.fit(X_train, y_train, epochs=epochs, batch_size=batch_size, shuffle=False)
y_pred = model.predict(X_test)
print('MSE Train loss:', model.evaluate(X_train, y_train, batch_size=batch_size))
print('MSE Test loss:', model.evaluate(X_test, y_test, batch_size=batch_size))
plt.plot(y_test, label='test')
plt.plot(y_pred, label='pred')
plt.legend()
plt.show()


# In[32]:


from math import sqrt 
from numpy import concatenate 
from pandas import DataFrame 
from sklearn.metrics import mean_squared_error 
from sklearn.metrics import mean_absolute_error 
from sklearn.metrics import r2_score 

error_mae = list() 
error_r2 = list() 

#预测数据逆缩放
#inv_yhat = concatenate((yhat, test_x[:, 1:]), axis=1) 
#inv_yhat = scaler.inverse_transform(inv_yhat) 
#inv_yhat = inv_yhat[:, 0] 
#真实数据逆缩放
#test_y = test_y.reshape(len(test_y), 1)
#inv_y = concatenate((test_y, test_x[:, 1:]), axis=1) 
#inv_y = scaler.inverse_transform(inv_y) 
#inv_y = inv_y[:, 0] 

r2 = r2_score(y_test, y_pred) 
mae = mean_absolute_error(y_test, y_pred) 
error_r2.append(r2) 
error_mae.append(mae) 
print('Test r2:%.3f'%r2) 
print('Test MAE:%.3f'%mae) 
print(error_r2) 
print(error_mae) 


# In[ ]:





# In[ ]:




