#!/usr/bin/env python
# coding: utf-8

import sys
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt

import os
import datetime
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import MinMaxScaler
from sklearn.metrics import r2_score
import tensorflow as tf
from tensorflow.keras import Sequential, layers, utils, losses
from tensorflow.keras.callbacks import ModelCheckpoint, TensorBoard

plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False
from sklearn.metrics import mean_absolute_error
from sklearn.metrics import mean_squared_error
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras import layers
import warnings

warnings.filterwarnings('ignore')

def category(data):
    for i in range(0,data.shape[1],1):
        if(np.issubdtype(data[data.columns[i]],"object")):
             data[data.columns[i]] = data[data.columns[i]].astype('category').cat.codes

def preprocessor(inputDataSet, usingModel, outputModelFile, layerArgv, dropoutArgv, denseArgv, lossArgv, monitorArgv, patienceArgv, factorArgv, min_lrArgv, batch_sizeArgv, epochsArgv):
    data = pd.read_csv(inputDataSet,index_col="检测时间",parse_dates=True)
    data = data[(data['设备名称']=='2号主变35KV套管接线夹B相')]

    data = data[[ '设备温度(℃)', '环境温度(℃)', '环境湿度', '风速(m/s)', '风向', '负载电流(A)']]
    category(data)
    print('*********************************')
    print(data)
    data.sort_index(inplace=True)
     # print(data.shape)

    sequence_length = 24
    delay = 6
    data_ = []
    for i in range(len(data) - sequence_length - delay):
        data_.append(data.iloc[i: i + sequence_length + delay])
    data_ = np.array([df.values for df in data_])
    X = data_[:, 0:-delay, :]
    Y = data_[:, -delay:,0]
    X_train = X[:int(X.shape[0] * 0.7)]
    Y_train = Y[:int(Y.shape[0] * 0.7)]
    X_val = X[int(X.shape[0] * 0.7)::]
    Y_val = Y[int(Y.shape[0] * 0.7)::]

    X_mean = X_train.mean(axis=0)
    X_std = X_train.std(axis=0)
    Y_mean = Y_train.mean(axis=0)
    Y_std = Y_train.std(axis=0)

    np.save("./X_mean.npy",X_mean)
    np.save("./X_std.npy",X_std)
    np.save("./Y_mean.npy",Y_mean)
    np.save("./Y_std.npy",Y_std)


    X_train_norm = (X_train - X_mean) / X_std
    Y_train_norm = (Y_train - Y_mean) / Y_std
    X_val_norm = (X_val - X_mean) / X_std
    Y_val_norm = (Y_val - Y_mean) / Y_std
#     category(data)
#     data.sort_index(inplace=True)
#     X_data = data[['设备名称', '设备温度(℃)', '间隔单元', '相别', '环境温度(℃)', '环境湿度', '风速(m/s)', '风向',
#                           '负载电流(A)']]
#     Y_data = data[['设备温度(℃)', ]]
#     X = np.zeros((X_data.shape[0] // 24,
#                   24,
#                   X_data.shape[-1]))
#     Y = np.zeros((Y_data.shape[0] // 24,
#                   6))
#     rows = range(0, X_data.shape[0] - 29, 24)
#     for i, row in enumerate(rows):
#         X[i, :, :] = X_data.iloc[row: row + 24]
#         Y[i, :] = [Y_data.iloc[row + 24], Y_data.iloc[row + 24 + 1], Y_data.iloc[row + 24 + 2],
#                    Y_data.iloc[row + 24 + 3], Y_data.iloc[row + 24 + 4], Y_data.iloc[row + 24 + 5]]
#     X_train = X[:int(X.shape[0] * 0.7)]
#     Y_train = Y[:int(X.shape[0] * 0.7)]
#
#     X_val = X[int(X.shape[0] * 0.7)::]
#     Y_val = Y[int(X.shape[0] * 0.7)::]
#
#     X_mean = X_train.mean(axis=0)
#     X_std = X_train.std(axis=0)
#     Y_mean = Y_train.mean(axis=0)
#     Y_std = Y_train.std(axis=0)
#
#     X_train_norm = (X_train - X_mean) / X_std
#     Y_train_norm = (Y_train - Y_mean) / Y_std
#     X_val_norm = (X_val - X_mean) / X_std
#     Y_val_norm = (Y_val - Y_mean) / Y_std

    # 建立模型
    model = keras.Sequential()
    if (usingModel == 'LSTM'):
        model.add(layers.LSTM(layerArgv, input_shape=(X_train_norm.shape[1:]), return_sequences=True))
        model.add(layers.Dropout(dropoutArgv))
        model.add(layers.LSTM(layerArgv, return_sequences=True))
        model.add(layers.Dropout(dropoutArgv))
        model.add(layers.LSTM(layerArgv))
        model.add(layers.Dense(denseArgv))
    else:
        model.add(layers.GRU(layerArgv, input_shape=(X_train_norm.shape[1:]), return_sequences=True))
        model.add(layers.Dropout(dropoutArgv))
        model.add(layers.GRU(layerArgv, return_sequences=True))
        model.add(layers.Dropout(dropoutArgv))
        model.add(layers.GRU(layerArgv))
        model.add(layers.Dense(denseArgv))


    if os.path.exists(outputModelFile):
        print('+++++++++++++++++++++++++++++++++++++++++++++++++++')
        model = tf.keras.models.load_model(outputModelFile) # 加载

    model.compile(optimizer=keras.optimizers.Adam(), loss='mae')
    learning_rate_reduction = keras.callbacks.ReduceLROnPlateau(monitor=monitorArgv, patience=patienceArgv, factor=factorArgv,
                                                                min_lr=min_lrArgv)
    history = model.fit(X_train_norm, Y_train_norm,
                        batch_size=batch_sizeArgv,
                        epochs=epochsArgv,
                        validation_data=(X_val_norm, Y_val_norm),
                        callbacks=[learning_rate_reduction])

    model.save(outputModelFile)  # 保存模型



if __name__ == '__main__':
    argvLen = len(sys.argv)
    if(argvLen != 14):
        print("TrainModel.py need 13 argvs, please check your input!")
    else:
        # 接收参数从sys.argv[1]开始，sys.argv[0]是python脚本的参数地址
        inputDataSet = sys.argv[1]
        usingModel = sys.argv[2]
        outputModelFile = sys.argv[3]
        layerArgv = int(sys.argv[4])
        dropoutArgv = float(sys.argv[5])
        denseArgv = int(sys.argv[6])
        lossArgv = sys.argv[7]
        monitorArgv = sys.argv[8]
        patienceArgv = int(sys.argv[9])
        factorArgv = float(sys.argv[10])
        min_lrArgv = float(sys.argv[11])
        batch_sizeArgv = int(sys.argv[12])
        epochsArgv = int(sys.argv[13])
        preprocessor(inputDataSet, usingModel, outputModelFile, layerArgv, dropoutArgv, denseArgv, lossArgv, monitorArgv, patienceArgv, factorArgv, min_lrArgv, batch_sizeArgv, epochsArgv)
