import os

import numpy as np
import pandas as pd
import tensorflow as tf
from keras.layers import Dense, Dropout
from keras.layers import LSTM
from keras.models import Sequential
from sklearn.metrics import mean_squared_error
from matplotlib import pyplot
from sklearn.preprocessing import LabelEncoder

pyplot.rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签
pyplot.rcParams['axes.unicode_minus'] = False  # 用来正常显示负号

_dataset = pd.read_csv('data/pollution.csv', header=0, index_col=0)


# 注意n_next需要使矩阵能够对齐，不然报错，采取舍弃最后几个对不齐的数据的,现阶段只预测未来1天
def create_dataset(data, pre_nx=1, n_next=1, tar_pos=0):
    """
    @chh
    :param values: 数据集
    :param pre_nx: 要根据几天的历史进行预测
    :param n_next: 要预测几天的数据
    :param tar_pos: 实际需要预测的值在哪一列
    :return:
    """

    train_X, train_Y = [], []
    for i in range(data.shape[0] - pre_nx):
        a = data[i:i + pre_nx]
        train_X.append(a)
        # b = data[i + pre_nx:i + pre_nx + n_next, tar_pos]
        b = data[i + pre_nx:i + pre_nx + n_next]
        train_Y.append(b)
    train_X = np.array(train_X, dtype='float64')
    train_Y = np.array(train_Y, dtype='float64')

    return train_X, train_Y

    # 训练集和测试集


# 训练数据归一化数据处理, 训练网络使用
def NormalizeMult(data):
    """

    :param data: 输入原始数据
    :return: 归一化后的数据
    """
    data = np.array(data)
    normalize = np.arange(2 * data.shape[1], dtype='float64')
    normalize = normalize.reshape(data.shape[1], 2)
    print(normalize.shape)
    for i in range(0, data.shape[1]):
        # 第i列
        list = data[:, i]
        listlow, listhigh = np.percentile(list, [0, 100])
        # print(i)
        normalize[i, 0] = listlow
        normalize[i, 1] = listhigh
        delta = listhigh - listlow
        if delta != 0:
            # 第j行
            for j in range(0, data.shape[0]):
                data[j, i] = (data[j, i] - listlow) / delta
    return data, normalize


# 使用训练数据的归一化处理数据集外输入数据, 使用网络预测时处理输入数据用
def NormalizeMultUseData(data, normalize):
    for i in range(0, data.shape[1]):

        listlow = normalize[i, 0]
        listhigh = normalize[i, 1]
        delta = listhigh - listlow

        if delta != 0:
            for j in range(0, data.shape[0]):
                data[j, i] = (data[j, i] - listlow) / delta

    return data


# 多维数据反归一化, 对输出数据处理
def FNormalizeMult(_data, _normalize):
    _data = np.array(_data, dtype='float64')
    # 列
    for i in range(0, _data.shape[1]):
        listlow = _normalize[i, 0]
        listhigh = _normalize[i, 1]
        delta = listhigh - listlow
        # print("listlow, listhigh, delta", listlow, listhigh, delta)
        # 行
        if delta != 0:
            for j in range(0, _data.shape[0]):
                _data[j, i] = _data[j, i] * delta + listlow

    return _data


# 创建一个差分序列
def difference(_data, interval=1):
    diff = list()
    # diff = np.array(len(_data))
    for i in range(interval, len(_data)):
        diff.append(_data[i] - _data[i - interval])
    return np.array(diff, dtype='float64')


# 反 差分
def Fdifference(_data, interval=1):
    pass


# 创建模型
def create_model(train_X, train_Y, set_units=50, set_dropout=0.5):
    """
    @chh
    :param train_X: 训练集输入
    :param train_Y: 训练集输出
    :param set_units: 设置LSTM的units
    :param set_dropout: 设置dropout防止过拟合
    :return: 创建好的模型
    """

    model = Sequential()
    model.add(LSTM(set_units,
                   input_shape=(train_X.shape[1], train_X.shape[2]),
                   return_sequences=True))
    model.add(Dropout(set_dropout))  # 防止过拟合
    model.add(LSTM(set_units, return_sequences=False))
    model.add(Dropout(set_dropout))
    model.add(Dense(train_Y.shape[1]))

    model.compile(loss='mse',
                  optimizer='adam')
    return model


# 加载模型
def get_model(modelname='_my-analysis.model'):
    """

    :param modelname: 模型名/路径名
    :return: 模型
    """
    model = tf.keras.models.load_model(modelname)
    return model


# 训练模型
def train_model(model, train_X, train_Y, test_X, test_Y, set_epochs=50, set_batch=72, save=True):
    """
    绘制训练损失、测试损失值的变化图
    :param save: 自动保存标志
    :param model: 输入model
    :param train_X:
    :param train_Y:
    :param test_X: 测试集输入
    :param test_Y: 测试集输出
    :param set_epochs: 设置迭代次数
    :param set_batch: 设置每次的batch_size
    :return: 无
    """
    history = model.fit(train_X, train_Y,
                        epochs=set_epochs,
                        batch_size=set_batch,
                        validation_data=(test_X, test_Y),
                        verbose=1)

    if save:
        model.save('auto-save-model.model')
    loss = history.history['loss']
    val_loss = history.history['val_loss']

    pyplot.plot(range(len(loss)), loss, 'b-', label='loss')
    pyplot.plot(range(len(loss)), val_loss, 'r-', label='val_loss')
    pyplot.legend(loc='best')
    pyplot.show()


# 评估模型
def evaluate_model(model, test_X, test_Y, normalize):
    """
    注意这里进行绘制时，没有对数据进行还原，
    预测值和测试集的实际值都是经过相同处理方法处理后的
    绘制预测值与实际值的数据图：蓝色为预测、红色为实际、计算rmse
    :param model:
    :param test_X: 测试集输入
    :param test_Y: 测试集输出
    :return: 无
    """
    yhat = model.predict(test_X)
    # print("\nyhat: ", yhat, "\nyhat:", yhat.shape)

    print("\n反归一化")
    inv_yhat = FNormalizeMult(_data=yhat, _normalize=normalize)
    inv_y = FNormalizeMult(_data=test_Y, _normalize=normalize)

    inv_y = inv_y[:, 0]
    # print("\ninv_y: ", inv_y, "\ninv_y.shape:", inv_y.shape)

    inv_yhat = inv_yhat[:, 0]
    # print("\ninv_yhat: ", inv_yhat, "\ninv_yhat.shape: ", inv_yhat.shape)
    # print("\n实际inv_y:\n", inv_y, "\n预测inv_yhat:", inv_yhat)
    # print("\n还原差分")
    # ==================================
    # 先还原实际y
    # inv_x = FNormalizeMult(_data=test_X, _normalize=normalize)
    # for i in range(inv_x.shape[0]):
    #     inv_y[i] = inv_y[i] + inv_x[i][2][0]
    #     inv_yhat[i] = inv_yhat[i] + inv_x[i][2][0]

    print("\n实际inv_y:\n", inv_y, "\n预测inv_yhat:", inv_yhat)
    # ==================================

    rmse = np.sqrt(mean_squared_error(inv_y, inv_yhat))
    print("\nTest RMSE : %.3f" % rmse)

    pyplot.plot(range(test_Y.shape[0] % 31), test_Y[:test_Y.shape[0] % 31, 0], 'r-', label='实际图')
    pyplot.plot(range(yhat.shape[0] % 31), yhat[:yhat.shape[0] % 31, 0], 'b-', label='预测图')
    pyplot.legend(loc='best')

    pyplot.show()


# 调用model进行预测
def do_predict(model, input_X, normalize):
    """

    :param model: 调用的预测模型
    :param input_X: 输入的数据，经过一定处理
    :param normalize: 归一化数据文件
    :return: 预测结果
    """
    yhat = model.predict(input_X)

    # print("\n反归一化")
    inv_yhat = FNormalizeMult(_data=yhat, _normalize=normalize)
    inv_yhat = inv_yhat[:, 0]
    print("\n预测结果:", inv_yhat)
    return inv_yhat


def test_predict():
    model = get_model('my-analysis-1k-2.model')
    normalize = np.load('normalizeSet.npy')
    test_data = np.array([[159.0, -11, -5.0, 1021.0, 'SE', 3.57, 0, 0],
                          [181.0, -7, -5.0, 1022.0, 'SE', 5.36, 1, 0],
                          [138.0, -7, -5.0, 1022.0, 'SE', 6.25, 2, 0],
                          [109.0, -7, -6.0, 1022.0, 'SE', 7.14, 3, 0],
                          [105.0, -7, -6.0, 1023.0, 'SE', 8.93, 4, 0],
                          [124.0, -7, -5.0, 1024.0, 'SE', 10.72, 0, 0],
                          [120.0, -8, -6.0, 1024.0, 'SE', 12.51, 0, 0],
                          [132.0, -7, -5.0, 1025.0, 'SE', 14.3, 0, 0],
                          [140.0, -7, -5.0, 1026.0, 'SE', 17.43, 1, 0],
                          [152.0, -8, -5.0, 1026.0, 'SE', 20.56, 0, 0],
                          [148.0, -8, -5.0, 1026.0, 'SE', 23.69, 0, 0],
                          [164.0, -8, -5.0, 1025.0, 'SE', 27.71, 0, 0]])
    encoder = LabelEncoder()
    test_data[:, 4] = encoder.fit_transform(test_data[:, 4])
    test_data = test_data.astype('float64')

    test_data = NormalizeMultUseData(test_data, normalize=normalize)
    test_data = test_data.reshape(1, test_data.shape[0], test_data.shape[1])
    print(test_data.shape);

    do_predict(model=model, input_X=test_data, normalize=normalize)


def test_main():
    # 需要在create dataset前进行归一化数据处理操作
    data = _dataset.values
    encoder = LabelEncoder()
    # 对不是数字的该列进行转换为数值
    data[:, 4] = encoder.fit_transform(data[:, 4])
    data = data.astype('float64')

    # # 差分处理
    # data = difference(_data=data)

    # 对数据进行归一化处理
    data, normalize = NormalizeMult(data)
    # 保存处理数据参数
    np.save('normalizeSet.npy', normalize)

    X, Y = create_dataset(data, pre_nx=12)

    print(Y.shape)
    # 分训练\测试数据集
    test_X, test_Y = X[:int(X.shape[0] * 0.3), :], Y[:int(Y.shape[0] * 0.3), :]
    train_X, train_Y = X[int(X.shape[0] * 0.3):, :], Y[int(Y.shape[0] * 0.3):, :]

    # model = get_model('my-analysis-1k-2.model')
    model = create_model(train_X, train_Y, set_units=256, set_dropout=0.75)  # NICE
    #  epoch 50 左右时已达稳定
    train_model(model, train_X, train_Y, test_X, test_Y, set_epochs=50, set_batch=2, save=False)
    evaluate_model(model, test_X, test_Y, normalize)
    # model.save('my-analysis-1k-2.model')


if __name__ == '__main__':
    # test_predict()
    # exit(0)
    test_main()
    exit(0)

    data = pd.read_csv('data/ChinaCO2_19-20.csv', header=0, index_col=0)
    data = data.values
    data = data.astype('float64')
    data, mynormalize = NormalizeMult(data)
    # np.save('ChinaCO2.npy', mynormalize)
    X, Y = create_dataset(data=data, pre_nx=24)
    test_X, test_Y = X[:int(X.shape[0] * 0.3), :], Y[:int(Y.shape[0] * 0.3), :]
    train_X, train_Y = X[int(X.shape[0] * 0.3):, :], Y[int(Y.shape[0] * 0.3):, :]
    model = get_model('ChinaCO2_analysis_pre24.model')
    # model = create_model(train_X, train_Y, set_units=256)  # NICE
    #  epoch 50 左右时已达稳定
    train_model(model, train_X, train_Y, test_X, test_Y, set_epochs=50, set_batch=1, save=False)
    evaluate_model(model, test_X, test_Y, mynormalize)
    model.save('ChinaCO2_analysis_pre24_new.model')

    pass
