import numpy as np
from keras.layers.core import Dense, Activation, Dropout
from keras.layers.recurrent import LSTM
from keras.models import Sequential
import time
import pandas as pd
from statsmodels.tsa.arima.model import ARIMA

import matplotlib.pylab as plt

from keras.models import load_model

# 归一化
from numpy import newaxis
from pandas import DataFrame




# def normalization(data):
#     minVals = data.min(0)
#     maxVals = data.max(0)
#     ranges = maxVals - minVals
#     normData = np.zeros(np.shape(data))
#     m = data.shape[0]
#     normData = data - np.tile(minVals, (m, 1))
#     normData = normData/np.tile(ranges, (m, 1))
#     print("r:")
#     print(ranges)
#     print("min")
#     print(minVals)
#     return normData, ranges, minVals

# def revers_normalization(data, ranges, minVals):
#     normData = np.zeros(np.shape(data))
#     m = data.shape[0]
#     normData = data * np.tile(ranges, (m, 1))
#     normData = normData + np.tile(minVals, (m, 1))
#     print(normData)
#     return normData

def normalise_windows(window_data):  # 数据全部除以最开始的数据再减一
    normalised_data = []
    for window in window_data:
        normalised_window = [((float(p) / float(window[0])) - 1) for p in window]
        normalised_data.append(normalised_window)
    return normalised_data

def reverse_normalise(data, row, pro_data):
    pro_data = pro_data.copy()
    for i in range(len(pro_data)):
        pro_data[i] += 1
        pro_data[i] *= float(data[row + i])
    return pro_data

def load_data(filename, seq_len, normalise_window):
    data = pd.read_excel(filename)
    data = np.array(data).tolist()
    # print(data)
    temp = data.copy()
    data = []
    for d in temp:
        data.append(d[0])
    print(data)
    # for i in range(len(data)):
    #     data[i] += 10000
    sequence_lenghth = seq_len + 1  # #得到长度为seq_len+1的向量，最后一个作为label
    result = []
    for index in range(len(data) - sequence_lenghth):
        result.append(data[index: index + sequence_lenghth])  # 制作数据集，从data里面分割数据
    if normalise_window:
        result = normalise_windows(result)
    result = np.array(result)  # shape (4121,51) 4121代表行，51是seq_len+1
    row = round(0.7 * result.shape[0])  # round() 方法返回浮点数x的四舍五入值
    train = result[:int(row), :]  # 取前90%
    np.random.shuffle(train)  # shuffle() 方法将序列的所有元素随机排序。
    x_train = train[:, :-1]  # 取前50列，作为训练数据
    y_train = train[:, -1]  # 取最后一列作为标签
    x_test = result[int(row):, :-1]  # 取后10% 的前50列作为测试集
    y_test = result[int(row):, -1]  # 取后10% 的最后一列作为标签
    x_train = np.reshape(x_train, (x_train.shape[0], x_train.shape[1], 1))  # 最后一个维度1代表一个数据的维度
    x_test = np.reshape(x_test, (x_test.shape[0], x_test.shape[1], 1))
    return [data, x_train, y_train, x_test, y_test, row]

def plot_results(predicted_data, true_data):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(true_data, label='True Data')
    plt.plot(predicted_data, label='Prediction')
    plt.legend()
    plt.show()

def predict_point_by_point(model, data):
    predicted = model.predict(data) # 输入测试集的全部数据进行全部预测，（412，1）
    predicted = np.reshape(predicted, (predicted.size,))
    return predicted

def predict_sequences_multiple(model, data, window_size, prediction_len, interval):
    prediction_seqs = []
    for i in range(int(len(data) / interval)): # 定滑动窗口的起始点
        curr_frame = data[i * interval]
        predicted = []
        for j in range(prediction_len): # 与滑动窗口一样分析
            predicted.append(model.predict(curr_frame[newaxis, :, :])[0, 0])
            curr_frame = curr_frame[1:]
            curr_frame = np.insert(curr_frame, [window_size - 1], predicted[-1], axis=0)
        prediction_seqs.append(predicted)
    return prediction_seqs

def plot_results_multiple(predicted_data, true_data, interval):
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(true_data)
    for i, data in enumerate(predicted_data):
        padding = [None for p in range(i * interval)]
        plt.plot(padding + data)
        # plt.legend()
    plt.show()

# 按间距中的绿色按钮以运行脚本。
if __name__ == '__main__':
    filename = 'gold'
    data, x_train, y_train, x_test, y_test, row = load_data('data/'+filename+'.xlsx', 20, True)
    print('shape_x_train', np.array(x_train).shape)  # shape_x_train (3709, 50, 1)
    print('shape_y_train', np.array(y_train).shape)  # shape_y_train (3709,)
    print('shape_x_test', np.array(x_test).shape)  # shape_x_test (412, 50, 1)
    print('shape_y_test', np.array(y_test).shape)  # shape_y_test (412,)
    # # design network
    # model = Sequential()
    # model.add(LSTM(100, input_shape=(x_train.shape[1], x_train.shape[2]), return_sequences=True))
    # model.add(Dropout(0.2))
    # # for i in range(1):
    # #     model.add(LSTM(100, return_sequences=True))
    # #     model.add(Dropout(0.2))
    # model.add(LSTM(100, return_sequences=False))
    # model.add(Dropout(0.2))
    # model.add(Dense(1))
    # start = time.time()
    # model.compile(loss='mae', optimizer='adam')
    # print('compilation time : ', time.time() - start)
    # # fit network
    # model.fit(x_train, y_train, epochs=50, batch_size=512, validation_data=(x_test, y_test), verbose=1,
    #                     shuffle=False)
    #
    # model.save('model/ARIMA-LSTM-'+filename+'-predictor_layer2.h5')

    # ARIMA
    model1 = ARIMA(data, order=(5, 1, 8)).fit()
    arima_predict = model1.predict()

    # LSTM
    model = load_model('model/ARIMA-LSTM-'+filename+'-predictor_layer2.h5')
    test_predictions = predict_point_by_point(model, x_test)
    # 逆归一
    y_test_reverse = reverse_normalise(data, row, y_test)
    test_predictions = reverse_normalise(data, row, test_predictions)
    # # plot_results(test_predictions[-365:-1], y_test_reverse[-365:-1])

    fig = plt.figure(figsize=(10, 6))
    ax = fig.add_subplot(111)
    ax.plot(arima_predict[-365:-1], label="ARIMA forecast")
    ax1 = fig.add_subplot(111)
    ax1.plot(test_predictions[-365:-1], label="LSTM forecast")
    plt.plot(data[-365:-1], label="real")
    plt.xlabel('Date', fontsize=12, verticalalignment='top')
    plt.ylabel('Prices', fontsize=14, horizontalalignment='center')
    plt.legend()
    plt.savefig('save/' + filename + '/predictions.png', bbox_inches='tight')
    # plt.show()

    fig = plt.figure(figsize=(10, 6))
    ax = fig.add_subplot(111)
    ax.plot(arima_predict[-225:-125], label="ARIMA forecast")
    ax1 = fig.add_subplot(111)
    ax1.plot(test_predictions[-225:-125], label="LSTM forecast")
    plt.plot(data[-225:-125], label="real")
    plt.xlabel('Date', fontsize=12, verticalalignment='top')
    plt.ylabel('Prices', fontsize=14, horizontalalignment='center')
    plt.legend()
    plt.savefig('save/' + filename + '/part_predictions.png', bbox_inches='tight')
    plt.show()

    # test_predictions = predict_point_by_point(model, x_all)
    #
    # plot_results(test_predictions, y_all)

    # plt.plot(test_predictions)
    # plt.show()

    # interval = 1
    # pre_len = 7
    # predict_expot = []
    # predictions = predict_sequences_multiple(model, x_test, 20, pre_len, interval)
    # for i in range(len(predictions)):
    #     predictions[i] = reverse_normalise(data, row+i*interval, predictions[i])
    # for i in range(len(predictions)):
    #     predict_expot.append([predictions[i][pre_len-1]])
    # plot_results_multiple(predictions, y_test_reverse, interval)
    # # print(model.summary())
    # print(predict_expot)
    # predict_expot = pd.DataFrame(predict_expot[-365:], columns=['Prediction'])
    # predict_expot.to_excel(filename+'_predict.xlsx')








