import numpy as np
import os
from keras.models import Sequential
from keras.layers import Dense, Dropout
from keras.layers import LSTM, GRU
from sklearn import preprocessing
import pandas as pd
from matplotlib import pyplot
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '0'

# 训练模型


def train_model_all_cells(train_data, rows, columns, model_save_path, time_step=48, epochs=20, split=0.7):
    # 创建测试集和训练集
    dataset = train_data
    split_point = int(len(dataset) * split)
    train_data = dataset[:split_point]
    test_data = dataset[split_point:]
    train_x, train_y, b = create_dataset(train_data, time_step)
    test_x, test_y, scaler = create_dataset(test_data, time_step)

    # ---- 参数定义----
    input_size = rows*columns  # 输入层维数
    batch_size = 12  # 影响每次训练使用的数据量
    # 训练集数据
    train_x = train_x.reshape([-1, time_step, input_size])
    train_y = train_y.reshape([-1, input_size])
    # 测试集数据
    test_x = test_x.reshape([-1, time_step, input_size])
    test_y = test_y.reshape([-1, input_size])

    # 模型构建
    model = Sequential()
    model.add(GRU(350, activation='relu', input_shape=(train_x.shape[1], train_x.shape[2]), return_sequences=False))
    # model.add(Dense(units=256))
    model.add(Dropout(0.001))
    model.add(Dense(units=input_size, activation='relu'))
    model.summary()
    model.compile(loss=["mae"], optimizer='adam', metrics=['mape'])
    history = model.fit(train_x, train_y, epochs=epochs, batch_size=batch_size, validation_data=(test_x, test_y),
                        verbose=2, shuffle=True)

    # 绘图测试部分
    '''
    predict = model.predict(test_x)
    predict_t = scaler.inverse_transform(predict)
    pyplot.figure(1)
    k = 348
    i = np.array(range(0, dataset.shape[0]))
    df = pd.DataFrame(i)
    train_d = pd.DataFrame(dataset[:split_point, k])
    train_d.index = range(1, len(train_d) + 1)
    test_d = pd.DataFrame(dataset[split_point:, k])
    test_d.index = range(split_point, len(test_d) + split_point)
    predict_d = pd.DataFrame(predict_t[:, k])
    predict_d.index = range(split_point, len(predict_d) + split_point)
    pyplot.plot(train_d, label='train_data')
    pyplot.plot(test_d, label='test_data')
    pyplot.plot(predict_d, label='predict_data')
    pyplot.legend()
    pyplot.title('多地区预测结果', fontproperties='SimHei', fontsize=12)
    pyplot.xlabel('小时数', fontproperties='SimHei', fontsize=12)
    pyplot.ylabel('需求量', fontproperties='SimHei', fontsize=12)
    pyplot.show()
    '''

    # 保存模型文件
    model.save(model_save_path)


# 创建训练集、测试集
def create_dataset(dataset, look_back):
    scaler = preprocessing.MinMaxScaler().fit(dataset)
    dataset = scaler.transform(dataset)
    dataX, dataY = [], []
    for i in range(len(dataset) - look_back):
        a = dataset[i:(i + look_back), :]
        dataX.append(a)
        dataY.append(dataset[i + look_back, :])
    return np.array(dataX), np.array(dataY), scaler
