#!/usr/bin/env python
# coding: utf-8

# In[5]:

if __name__ == '__main__':
    # Part 1 - Data Preprocessing
    # Importing the libraries导入需要的库
    import numpy as np
    import matplotlib.pyplot as plt
    import pandas as pd
    import tushare as ts


    # In[6]:


    # Importing the training set读入数据集
    #下载数据接口介绍https://mp.weixin.qq.com/s/XoyACntxEXX3ZEqvECUbBg
    data=ts.get_k_data('600000',start='2014-01-01',end='2023-01-01')#通过tushare的接口获取浦发银行的历史数据
    print(data.shape)


    # In[7]:


    #显示数据集的前几行，查看样式
    print(data.head())
    all_data = data.iloc[:, 1:6]
    print(all_data.head())


    # # Feature Scaling特征归一化
    #

    # In[9]:


    from sklearn.preprocessing import MinMaxScaler
    sc = MinMaxScaler(feature_range = (0, 1),)
    all_data_scaled = sc.fit_transform(all_data)
    print(all_data_scaled)
    print('训练数据长度是:',len(all_data_scaled))


    # In[10]:


    # Creating a data structure with 60 timesteps and 1 output
    features = []
    labels = []
    for i in range(60, len(all_data_scaled)):
        features.append(all_data_scaled[i-60:i, ])
        labels.append(all_data_scaled[i, 1])
    features, labels = np.array(features), np.array(labels)
    features = np.reshape(features, (features.shape[0], features.shape[1], -1))
    x_train,x_test,y_train,y_test= features[:1600],features[1600:],labels[:1600],labels[1600:]
    print('shape of x_train:',x_train.shape)
    print('shape of x_test:',x_test.shape)
    print('shape of y_train:',y_train.shape)
    print('shape of y_test:',y_test.shape)


    # In[11]:


    # Part 2 - Building the LSTM
    # Importing the Keras libraries and packages
    import warnings
    warnings.filterwarnings("ignore")
    from keras.models import Sequential
    from keras.layers import Dense
    from keras.layers import LSTM,SimpleRNN
    from keras.layers import Dropout


    # In[12]:


    # Initialising the LSTM
    regressor = Sequential()
    # Adding the first LSTM layer and some Dropout regularisation

    #LSTM(100, dropout=0.2, recurrent_dropout=0.2)
    regressor.add(LSTM(units = 50, return_sequences = True, input_shape = (x_train.shape[1], 5)))
    regressor.add(Dropout(0.2))


    # In[13]:


    # Adding a second LSTM layer and some Dropout regularisation
    regressor.add(LSTM(units = 50, return_sequences = True))
    regressor.add(Dropout(0.2))


    # In[14]:


    # Adding a third LSTM layer and some Dropout regularisation
    regressor.add(LSTM(units = 50, return_sequences = True))
    regressor.add(Dropout(0.2))


    # In[15]:


    # Adding a fourth LSTM layer and some Dropout regularisation
    regressor.add(LSTM(units = 50))
    regressor.add(Dropout(0.2))


    # In[16]:


    import keras
    from sklearn.model_selection import train_test_split
    # Adding the output layer
    regressor.add(Dense(units = 1))
    # Compiling the LTSM
    regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')

    # Fitting the LSTM to the Training set
    class LossHistory(keras.callbacks.Callback):
        def on_train_begin(self, logs={}):
            self.losses = {'batch':[], 'epoch':[]}
            self.accuracy = {'batch':[], 'epoch':[]}
            self.val_loss = {'batch':[], 'epoch':[]}
            self.val_acc = {'batch':[], 'epoch':[]}

        def on_batch_end(self, batch, logs={}):
            self.losses['batch'].append(logs.get('loss'))
            self.accuracy['batch'].append(logs.get('acc'))
            self.val_loss['batch'].append(logs.get('val_loss'))
            self.val_acc['batch'].append(logs.get('val_acc'))

        def on_epoch_end(self, batch, logs={}):
            self.losses['epoch'].append(logs.get('loss'))
            self.accuracy['epoch'].append(logs.get('acc'))
            self.val_loss['epoch'].append(logs.get('val_loss'))
            self.val_acc['epoch'].append(logs.get('val_acc'))

        def loss_plot(self, loss_type):
            iters = range(len(self.losses[loss_type]))
            plt.figure()
            # loss
            plt.plot(iters, self.losses[loss_type], 'g', label='train loss')
            if loss_type == 'epoch':
                # val_loss
                plt.plot(iters, self.val_loss[loss_type], 'k', label='val loss')
            plt.grid(True)
            plt.xlabel(loss_type)
            plt.ylabel('acc-loss')
            plt.legend(loc="upper right")
            plt.show()
    history=regressor.fit(x_train, y_train, epochs = 5, batch_size = 32,validation_data=(x_test, y_test))


    # In[17]:


    #画损失曲线图
    loss = history.history['loss']
    val_loss = history.history['val_loss']
    epochs = range(1, len(loss) + 1)
    plt.title('Loss curve')
    plt.plot(epochs, loss, 'red', label='Training loss')
    plt.plot(epochs, val_loss, 'blue', label='Validation loss')
    plt.legend()
    plt.show()


    # In[16]:


    sc_one = MinMaxScaler(feature_range = (0, 1))
    sc_one.fit_transform(all_data.iloc[:, 1:2])
    predicted_stock_train = regressor.predict(x_train)
    predicted_stock_train = sc_one.inverse_transform(predicted_stock_train)
    predicted_stock_test = regressor.predict(x_test)
    predicted_stock_test = sc_one.inverse_transform(predicted_stock_test)
    real_price_train=sc_one.inverse_transform(np.reshape(y_train,(-1,1)))
    real_price_test=sc_one.inverse_transform(np.reshape(y_test,(-1,1)))


    # In[17]:


    # Visualising the train results
    plt.plot(real_price_train, color = 'red', label = 'Real Stock Price')
    plt.plot(predicted_stock_train, color = 'blue', label = 'Predicted TAT Stock Price')
    plt.title('train Stock Price Prediction')
    plt.xlabel('Time')
    plt.ylabel('Stock Price')
    plt.legend()
    plt.show()


    # In[18]:


    # Visualising the test results
    plt.plot(real_price_test, color = 'red', label = 'Real Stock Price')
    plt.plot(predicted_stock_test, color = 'blue', label = 'Predicted TAT Stock Price')
    plt.title('test Stock Price Prediction')
    plt.xlabel('Time')
    plt.ylabel('Stock Price')
    plt.legend()
    plt.show()


    # In[19]:


    from sklearn.metrics import mean_squared_error # 均方误差
    from sklearn.metrics import mean_absolute_error # 平方绝对误差
    mse_score=mean_squared_error(real_price_test,predicted_stock_test)
    mae_score=mean_absolute_error(real_price_test,predicted_stock_test)
    print('测试集的均方误差是:',mse_score)
    print('测试集的平方绝对误差是:',mae_score)


    # In[ ]:





    # In[ ]:





    # In[ ]:




