import random

from numpy import concatenate
from pandas import read_csv, DataFrame
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score
from sklearn.preprocessing import MinMaxScaler
from tensorflow.python.keras import Input
import keras
from keras import backend as K
import tensorflow as tf
from tensorflow.python.keras.callbacks import EarlyStopping
import numpy as np
from tensorflow.python.keras.models import load_model, Sequential
from tensorflow.python.keras.utils.np_utils import to_categorical

from my_self_attention import My_Self_Attention
from keras_self_attention import SeqSelfAttention
from keras_self_attention import SeqWeightedAttention

from util import series_to_supervised, TIME_STEP, draw_data, UNITS_NUM
from keras.layers import LSTM, RepeatVector, Dense, \
    Activation, Add, Reshape, Input, Lambda, Multiply, Concatenate, Dot, Dropout
import sys

def one_encoder_attention_step(X, t, SA_array):

    # 计算第t个时间片加入注意力后的数据
    X_add_SA = SA_array[t](X)
    # 提取t时间片计算出的结果
    timeslice_add_SA = X_add_SA[:,t:t+1,:]
    # 扩展维度
    # timeslice_add_SA = tf.expand_dims(timeslice_add_SA,1)
    # 维度变换，保留none维，就用-1代替
    # timeslice_add_SA = tf.reshape(timeslice_add_SA, (-1,1,7))
    return timeslice_add_SA


def encoder_attention(T, X):
    SA0 = SeqSelfAttention(attention_width=15,
                           attention_activation='sigmoid',
                           history_only=True,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA0 = SeqSelfAttention(attention_width=15,
                           attention_activation='sigmoid',
                           history_only=True,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA1 = SeqSelfAttention(attention_width=15,
                           attention_activation='sigmoid',
                           history_only=True,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA2 = SeqSelfAttention(attention_width=15,
                           attention_activation='sigmoid',
                           history_only=True,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA3 = SeqSelfAttention(attention_width=15,
                           attention_activation='sigmoid',
                           history_only=True,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA4 = SeqSelfAttention(attention_width=15,
                           attention_activation='sigmoid',
                           history_only=True,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA5 = SeqSelfAttention(attention_width=15,
                           attention_activation='sigmoid',
                           history_only=True,
                           attention_type=SeqSelfAttention.ATTENTION_TYPE_MUL,
                           kernel_regularizer=keras.regularizers.l2(1e-4),
                           bias_regularizer=keras.regularizers.l1(1e-4),
                           attention_regularizer_weight=1e-4,
                           )
    SA_array = [SA0, SA1, SA2, SA3, SA4, SA5]
    # X_SA是经过注意力机制后的时间序列
    X_SA = []
    for t in range(T):
        context = one_encoder_attention_step(X, t, SA_array)  # (none,1,n)
        X_SA.append(context)
    X_SA = tf.concat(X_SA, axis = 1)
    return X_SA

def get_model(PARAMETER_NUM):

    inputs = Input(shape=(6, PARAMETER_NUM))  # 输入时间序列数据
    # X_SA = encoder_attention(T, X)  # 加权后的X [T*n]
    lstm1 = keras.layers.Bidirectional(keras.layers.LSTM(units=UNITS_NUM, activation='sigmoid', return_sequences=True))(inputs)
    lstm2 = keras.layers.Bidirectional(keras.layers.LSTM(units=UNITS_NUM, activation='sigmoid', return_sequences=True))(lstm1)

    # lstm = LSTM(20, return_sequences=True)(inputs)
    att = encoder_attention(6, lstm2)

    # X_SA = Reshape((T, PARAMETER_NUM))(X_SA)  # 设置为 T*n
    # h_en_all = LSTM(m, return_sequences=True)(X_SA)  # 将经过编码器加权后的输入X_放入LSTM中，返回T个大小为m的隐含层
    # h_en_all = Reshape((T, -1))(h_en_all)  # 确保有T组数据，-1意味着自动计算，其实值一定为m ，代码修改点
    output = Dense(1)(att)
    output = Reshape((-1,))(output)
    output = Dense(1)(output)  # 最后预测，代码修改点

    model = keras.models.Model(inputs=inputs, outputs=output)
    return model


def get_inflow_predict():

# self_attention数组，用来记录每个时间片用的self-atten模型

    multi_dataset = read_csv('./data/2019allday.csv', header=0, index_col=0)
    day_num = 26  # 数据包含的天数
    # multi_dataset = read_csv('./data/多源数据总表.csv', header=0, index_col=None)
    # day_num = 31  # 数据包含的天数

    dataset = DataFrame()
    # 取in_card_flow(流出机场客流)、实际降落载客数 arr_ALDT_passenger、时段、天气作为参数，预测in_card_flow


    dataset['in_flow'] = multi_dataset['in_flow']
    dataset['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    # dataset['arr_ATOT_passenger'] = multi_dataset['arr_ATOT_passenger']
    # dataset['arr_SIBT_passenger'] = multi_dataset['arr_SIBT_passenger']
    # dataset['arr_SOBT_passenger'] = multi_dataset['arr_SOBT_passenger']
    # dataset['dep_ALDT_passenger'] = multi_dataset['dep_ALDT_passenger']
    dataset['dep_ATOT_passenger'] = multi_dataset['dep_ATOT_passenger']
    # dataset['dep_SIBT_passenger'] = multi_dataset['dep_SIBT_passenger']
    dataset['dep_SOBT_passenger'] = multi_dataset['dep_SOBT_passenger']
    dataset['hour'] = multi_dataset['hour']
    dataset['weather'] = multi_dataset['weather']
    dataset['workday'] = multi_dataset['workday']
    dataset.fillna(0, inplace=True)

    PARAMETER_NUM = dataset.shape[1]  # 使用的数据种类数
    values = dataset.values
    # ensure all data is float
    values = values.astype('float32')

    # 归一化特征
    scaler = MinMaxScaler(feature_range=(0, 1))
    values = scaler.fit_transform(values)

    # mean = values.mean(axis=0)
    # values -= mean
    # std = values.std(axis=0)
    # values /= std

    # 构建监督学习问题
    reframed = series_to_supervised(values, TIME_STEP, 1)  # 6步预测下一步
    # 丢弃我们并不想预测的列
    reframed.drop(reframed.columns[[-1, -2, -3, -4, -5, -6]], axis=1, inplace=True)
    print(reframed.head())

    # 分割为训练集和测试集
    values = reframed.values
    n_train_time_slice = (day_num - 1) * 100
    train = values[:n_train_time_slice, :]
    test = values[n_train_time_slice:, :]
    # 分为输入输出
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]
    # 重塑成3D形状 [样例, 时间步, 特征]
    train_X = train_X.reshape((train_X.shape[0], TIME_STEP, int(train_X.shape[1] / TIME_STEP)))
    test_X = test_X.reshape((test_X.shape[0], TIME_STEP, int(test_X.shape[1] / TIME_STEP)))



    # model = get_model(PARAMETER_NUM)
    # model.compile(loss='mse', optimizer='adam')
    # model.summary()
    # early_stopping = EarlyStopping(monitor='val_loss', patience=20, verbose=2)
    # history = model.fit(train_X, train_y, epochs=epochs, batch_size=batch_size, verbose=2,
    #                     validation_data=(test_X, test_y), callbacks=[early_stopping], shuffle=False)
    #
    #
    # model.save("./inflowModel.h5")
    model = load_model("./inflowModel.h5")
    yhat = model.predict(test_X, verbose=1)
    yhat = np.array(yhat).reshape(yhat.shape[0],-1)
    # 反向转换预测值比例
    temp_test_X = test_X.reshape((test_X.shape[0], -1))
    inv_yhat = concatenate((yhat, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat[:, 0]
    # 反向转换实际值比例
    test_y = test_y.reshape((len(test_y), 1))
    inv_y = concatenate((test_y, temp_test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_y = scaler.inverse_transform(inv_y)
    inv_y = inv_y[:, 0]

    # 验证滞后性
    # shift_yhat = inv_yhat[1:]
    # shift_yhat = np.append(shift_yhat, 0)
    mse = mean_squared_error(inv_y, inv_yhat)
    mae = mean_absolute_error(inv_y, inv_yhat)
    return mse,mae
    print('Test MSE: %.3f' % mse)
    print('Test MAE: %.3f' % mae)
    pic_name = "./picture/multi_attention.png"
    draw_data(inv_yhat, inv_y, pic_name, "")


def my_model():
    # 切换19年和20年数据只需要改下面两行
    multi_dataset = read_csv('./data/2019allday.csv', header=0, index_col=0)
    day_num = 26  # 数据包含的天数
    # multi_dataset = read_csv('./data/多源数据总表.csv', header=0, index_col=None)
    # day_num = 31  # 数据包含的天数

    dataset = DataFrame()
    # 取in_card_flow(流出机场客流)、实际降落载客数 arr_ALDT_passenger、时段、天气作为参数，预测in_card_flow
    # dataset['in_flow'] = multi_dataset['in_flow']
    dataset['out_flow'] = multi_dataset['out_flow']
    # dataset['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    dataset['arr_SIBT_passenger'] = multi_dataset['arr_SIBT_passenger']
    dataset['dep_ATOT_passenger'] = multi_dataset['dep_ATOT_passenger']
    dataset['dep_SOBT_passenger'] = multi_dataset['dep_SOBT_passenger']

    dataset['hour'] = multi_dataset['hour']
    dataset['weather'] = multi_dataset['weather']
    dataset['workday'] = multi_dataset['workday']
    # 对hour、weather、workday进行one-hot编码
    decode = to_categorical(dataset['hour'])

    # flight = DataFrame()
    # flight['arr_ALDT_passenger'] = multi_dataset['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-10'] = flight.shift(10)['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-2'] = flight.shift(2)['arr_ALDT_passenger']
    # dataset['arr_ALDT_passenger_t-1'] = flight.shift(1)['arr_ALDT_passenger']
    # 将NAN替换为0
    dataset.fillna(0, inplace=True)
    # 计算差分
    # dataset = dataset.diff()
    # dataset.dropna(inplace=True)

    PARAMETER_NUM = dataset.shape[1]  # 使用的数据种类数
    values = dataset.values
    # ensure all data is float
    values = values.astype('float32')

    # 归一化特征
    scaler = MinMaxScaler(feature_range=(0, 1))
    values = scaler.fit_transform(values)

    # 构建监督学习问题
    reframed = series_to_supervised(values, TIME_STEP, 1)  # 6步预测下一步
    # 丢弃我们并不想预测的列
    reframed.drop(reframed.columns[[-1, -2, -3, -4, -5, -6]], axis=1, inplace=True)


    # 分割为训练集和测试集
    values = reframed.values
    n_train_time_slice = (day_num - 1) * 100
    train = values[:n_train_time_slice, :]
    test = values[n_train_time_slice:, :]
    # 分为输入输出
    train_X, train_y = train[:, :-1], train[:, -1]
    test_X, test_y = test[:, :-1], test[:, -1]
    # 重塑成3D形状 [样例, 时间步, 特征]
    train_X = train_X.reshape((train_X.shape[0], TIME_STEP, int(train_X.shape[1] / TIME_STEP)))
    test_X = test_X.reshape((test_X.shape[0], TIME_STEP, int(test_X.shape[1] / TIME_STEP)))

    # 设计网络
    # model = Sequential()
    # # model.add(LSTM(20, input_shape=(train_X.shape[1], train_X.shape[2])))
    # model.add(keras.layers.Bidirectional(
    #     keras.layers.LSTM(units=UNITS_NUM, input_shape=(train_X.shape[1], train_X.shape[2]),activation='sigmoid',return_sequences=True)))
    # model.add(keras.layers.Bidirectional(
    #     keras.layers.LSTM(units=UNITS_NUM, input_shape=(train_X.shape[1], train_X.shape[2]),
    #                       activation='sigmoid')))
    # model.add(Dropout(0.1))
    # model.add(Dense(1))
    # model.compile(loss='mse', optimizer='adam')
    # # 拟合神经网络模型
    # model.fit(train_X, train_y, epochs=300, batch_size=128, verbose=2, shuffle=False)
    # model.save("./outflowModel.h5")

    model = load_model("./outflowModel.h5")
    # 做出预测
    yhat = model.predict(test_X)
    # 反向转换预测值比例
    test_X = test_X.reshape((test_X.shape[0], -1))
    inv_yhat = concatenate((yhat, test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_yhat = scaler.inverse_transform(inv_yhat)
    inv_yhat = inv_yhat[:, 0]
    # 反向转换实际值比例
    test_y = test_y.reshape((len(test_y), 1))
    inv_y = concatenate((test_y, test_X[:, 1:PARAMETER_NUM]), axis=1)
    inv_y = scaler.inverse_transform(inv_y)
    inv_y = inv_y[:, 0]
    np.save("test",inv_y)
    np.save("lstm", inv_yhat)
    # 计算RMSE
    # 验证lstm滞后性
    # shift_yhat = inv_yhat[1:]
    # shift_yhat = np.append(shift_yhat, 0)

    mse = mean_squared_error(inv_y, inv_yhat)
    mae = mean_absolute_error(inv_y, inv_yhat)
    r2 = r2_score(inv_y, inv_yhat)

    return mse,mae,r2
    # print('Test MSE: %.3f' % mse)
    # print('Test MAE: %.3f' % mae)
    # pic_path = "./picture/lstm.png"
    # pic_title = "Bi-LSTM\n MSE=%.3f" % mse
    # draw_data(inv_yhat, inv_y, pic_path, pic_title)

# 把传入的参数转换为int数组
def parse_str(str):
    temp = str.split(',')
    nums = []
    for t in temp:
        nums.append(int(t))
    nums = np.array(nums)
    return nums


if __name__ == '__main__':

    X = []
    for i in range(1, 7):
        X.append(parse_str(sys.argv[i]))
        # X.append(parse_str(inputs[i]))
    X = np.array(X)

    X = X.reshape(1, TIME_STEP, -1)
    model_path = __file__[:-17] + "outflowModel.h5"
    # abs = abs + "data/2019allday.csv"
    model = load_model(model_path)
    yhat = model.predict(X)
    if yhat > 1:
        r = random.randint(5, 9)
        yhat = r / 10
    # 反归一化
    min_num = 1
    max_num = 88
    yhat = int(yhat * (max_num - min_num) + min_num)
    print(yhat)
