import numpy as np
import os
import keras
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.layers import LSTM
from matplotlib import pyplot
from sklearn import preprocessing
from keras import backend as K


def predict_one_dict(train_data, predict_data, n_days=5, time_step=48, epochs=50, split=0.8):
    # 创建测试集和训练集
    dataset = train_data

    origin_data_x, origin_data_y = create_dataset(dataset, time_step)
    # ---- 参数定义----
    split_point = int(len(origin_data_x) * split)
    input_size = 1  # 输入层维数
    batch_size = 72  # 影响每次训练使用的数据量
    # 训练集数据
    train_x = origin_data_x[:split_point]
    scaler = preprocessing.StandardScaler().fit(train_x)
    train_x = scaler.transform(train_x)
    train_x = train_x.reshape([-1, input_size, time_step])
    train_x = np.transpose(train_x, [0, 2, 1])
    train_y = origin_data_y[:split_point]
    train_y = train_y.reshape([-1, 1])
    scaler1 = preprocessing.StandardScaler().fit(train_y)
    train_y = scaler1.transform(train_y)
    mean = scaler1.mean_
    std = np.sqrt(scaler1.var_)

    # 标准化，工具函数
    def calculate_mape(data_x, data_y):
        index = list(np.nonzero(data_y)[0])
        data_y = np.array([data_y[i] for i in index])
        predict = model.predict(data_x)
        predict = np.array([predict[i] for i in index])
        return np.mean(np.abs(data_y - predict) * std / (np.abs(data_y * std + mean)))

    def calculate_mae(data_x, data_y):
        index = list(np.nonzero(data_y)[0])
        data_y = np.array([data_y[i] for i in index])
        predict = model.predict(data_x)
        predict = np.array([predict[i] for i in index])
        return np.mean(np.abs(data_y - predict) * std)

    def mape(y_true, y_pred):
        return K.mean(K.abs(y_true - y_pred) * std / (K.abs(y_true * std + mean)))
    # 测试集数据
    test_x = origin_data_x[split_point:]
    test_x = scaler.transform(test_x)
    test_x = test_x.reshape([-1, input_size, time_step])
    test_x = np.transpose(test_x, [0, 2, 1])
    test_y = origin_data_y[split_point:]
    test_y = test_y.reshape([-1, 1])
    origin_data_test_y = test_y
    test_y = scaler1.transform(test_y)
    # design network
    model = Sequential()
    model.add(LSTM(30, input_shape=(train_x.shape[1], train_x.shape[2]), return_sequences=False))
    model.add(Dropout(0.5))
    model.add(Dense(units=1))
    model.summary()
    model.compile(loss=["mae"], optimizer='adam', metrics=['mape'])
    history = model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_x, test_y),
                        verbose=2, shuffle=True)
    # save model after train保存模型文件
    # model.save('./models/lstm_model_number.h5')
    # print("Testdatasets mape:", calculate_mape(test_x, test_y))
    # print("Testdatasets mae:", calculate_mae(test_x, test_y))

    # 预测n_days天数
    def predict_days(old_data, days):
        res_days = []
        res_day = 0
        for i in range(days):
            if i != 0:
                old_data = np.append(old_data, res)
            old_data = old_data[-time_step:]
            pre_data = old_data.reshape([-1, 1])
            scaler2 = preprocessing.StandardScaler().fit(pre_data)
            pre_data_trans = scaler2.transform(pre_data)
            pre_data_trans = pre_data_trans.reshape([-1, input_size, time_step])
            pre_data_trans = np.transpose(pre_data_trans, [0, 2, 1])
            res_trans = model.predict(pre_data_trans)
            res = scaler2.inverse_transform(res_trans)
            res_days.append((int)(res[0][0]))
        return np.asarray(res_days)

    res = predict_days(predict_data, n_days)
    return res


# 创建数据集
def create_dataset(dataset, look_back=1):
    dataX, dataY = [], []
    for i in range(len(dataset) - look_back):
        a = dataset[i:(i + look_back), 0]
        dataX.append(a)
        dataY.append(dataset[i + look_back, 0])
    return np.array(dataX), np.array(dataY)


