import numpy as np
import os
from keras.models import Sequential
from keras.layers import Dense,Dropout
from keras.layers import LSTM,GRU
from matplotlib import pyplot
from sklearn import preprocessing
import pandas as pd
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'

# 训练模型


def train_model_one_cell(train_data, cell_num, time_step, epochs, split):
    input_size = 1   # 输入层维数
    batch_size = 12  # 影响每次训练使用的数据量
    # 创建测试集和训练集
    dataset = train_data[:, cell_num]
    dataset = dataset.reshape([len(dataset),1])
    split_point = int(len(dataset) * split)
    train_data = dataset[:split_point]
    test_data = dataset[split_point:]
    train_x, train_y, b = create_dataset(train_data, time_step)
    test_x, test_y, scaler = create_dataset(test_data, time_step)

    # 训练集数据
    train_x = train_x.reshape([-1, time_step, input_size])
    train_y = train_y.reshape([-1, input_size])
    # 测试集数据
    test_x = test_x.reshape([-1, time_step, input_size])
    test_y = test_y.reshape([-1, input_size])

    # 网络搭建
    d = 0.01
    model = Sequential()
    model.add(GRU(256, activation='relu', input_shape=(train_x.shape[1], train_x.shape[2]), return_sequences=False))
    model.add(Dense(units=48, activation='relu'))
    model.add(Dropout(d))
    model.add(Dense(units=1,  activation='relu'))
    model.summary()
    model.compile(loss=["mae"], optimizer='adam', metrics='mse')
    history = model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_x, test_y),
                        verbose=2, shuffle=True)

    # 预测部分
    predict = model.predict(test_x)
    predict_t = scaler.inverse_transform(predict)
    pyplot.figure(1)
    i = np.array(range(0, dataset.shape[0]))
    df = pd.DataFrame(i)
    train_d = pd.DataFrame(dataset[:split_point])
    train_d.index = range(1, len(train_d) + 1)  # 将index改成从1开始
    test_d = pd.DataFrame(dataset[split_point:])
    test_d.index = range(split_point, len(test_d) + split_point)  # 将index改成从1开始
    predict_d = pd.DataFrame(predict_t[:])
    predict_d.index = range(split_point, len(predict_d) + split_point)  # 将index改成从1开始
    pyplot.plot(train_d, label='train_data')
    pyplot.plot(test_d, label='test_data')
    pyplot.plot(predict_d, label='predict_data')
    pyplot.legend()
    pyplot.title('单地区预测结果', fontproperties='SimHei', fontsize=12)
    pyplot.xlabel('小时数', fontproperties='SimHei', fontsize=12)
    pyplot.ylabel('需求量', fontproperties='SimHei', fontsize=12)
    pyplot.show()
    # 保存模型文件
    model_save_path = './models/lstm_model_one_cell_'+str(cell_num)+'.h5'            # 模型保存路径
    model.save(model_save_path)


# 创建数据集
def create_dataset(dataset, look_back):
    scaler = preprocessing.MinMaxScaler().fit(dataset)
    dataset = scaler.transform(dataset)
    data_x, data_y = [], []
    for i in range(len(dataset) - look_back):
        a = dataset[i:(i + look_back), :]
        data_x.append(a)
        data_y.append(dataset[i + look_back, :])
    return np.array(data_x), np.array(data_y), scaler
