# attention 主要逻辑

import numpy as np

from ML_Methods.attention.utils import parse, data_perpare

np.random.seed(1337)  # for reproducibility
from keras.models import *
from keras.layers import Input, Dense, merge

import os

from math import sqrt
import time
import datetime as time_datetime

from pandas import read_csv
from pandas import DataFrame
from pandas import concat
from pandas import Series
from pandas import datetime

import numpy as np
from numpy import array
from numpy import concatenate

from keras import optimizers
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.callbacks import EarlyStopping
from keras.layers import Bidirectional
from keras.utils import multi_gpu_model

from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import LabelEncoder
from sklearn.metrics import mean_squared_error, mean_absolute_error, r2_score

import matplotlib.pyplot as plt




from keras.layers import merge
from keras.layers.core import *
from keras.layers.recurrent import LSTM
from keras.models import *

from ML_Methods.attention.utils import get_activations


# if True, the attention vector is shared across the input_dimensions where the attention is applied.
SINGLE_ATTENTION_VECTOR = False
APPLY_ATTENTION_BEFORE_LSTM = False


def slice(x):
    return K.mean(x, axis=1)

def attention_3d_block(inputs, timesteps:int):
    # inputs.shape = (batch_size, time_steps, input_dim)
    input_dim = int(inputs.shape[2])

    a = Permute((2, 1))(inputs)
    a = Reshape((input_dim, timesteps))(a) # this line is not useful. It's just to know which dimension is what.
    a = Dense(timesteps, activation='softmax')(a)
    if SINGLE_ATTENTION_VECTOR:
        a = Lambda(slice, name='dim_reduction')(a)
        a = RepeatVector(input_dim)(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)

    # output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')
    output_attention_mul = merge.Multiply()([inputs, a_probs])

    return output_attention_mul


def model_attention_applied_after_lstm(input_dim:tuple, lstm_units:int ):
    '''

    :param input_dim: (TIME_STEPS, input_dim,)
    :return:
    '''
    timesteps = input_dim[0]
    inputs = Input(shape=input_dim )
    lstm_out = LSTM(lstm_units, return_sequences=True)(inputs)
    attention_mul = attention_3d_block(lstm_out, timesteps)
    attention_mul = Flatten()(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model

    # model = Sequential()
    #
    # # 双向LSTM
    # model.add(LSTM(120, activation='relu', input_shape=shape, stateful=False))
    # model.add(Dense(1))
    # model.summary()
    # optimizers.Adam(lr=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-8) #  √
    # return model




def model_attention_applied_before_lstm(input_dim:tuple, lstm_units:int):
    '''

    :param input_dim: (timesteps, input_dim,)
    :return:
    '''
    timesteps = input_dim[0]
    inputs = Input(shape=input_dim)
    attention_mul = attention_3d_block(inputs, timesteps)
    attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model


if __name__ == '__main__':

    #  Data prepare：

    experiment_time_start = time.time()  # 开始计时
    print('训练开始时间: ' + time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(experiment_time_start)))

    WORK_PATH = os.getcwd()  # 获取当前项目工作目录，

    # WORK_ROOT_PATH = 'E:\study\MLDrought'  # for yf path
    WORK_ROOT_PATH = 'D:\Project\ml_study'  # for wkz path

    DATA_PATH = os.path.join(WORK_ROOT_PATH, r'indices_caculate\result\multi_spei_csv\SPEI-12\Multi_SPEI-12_56080.txt')

    # 数据读取
    dataset = read_csv(DATA_PATH, header=0, parse_dates=[['year', 'month']], index_col=0, date_parser=parse)
    dataset.index.name = 'time'
    # 删掉一些列
    dataset.drop(columns=['average_air_pressure',
                          'average_water_air_pressure',
                          'low_temp', 'high_temp',
                          'precipitation',
                          'temperature',
                          'humidity'], inplace=True)

    timesteps = 10 * 12  # 10年
    X_train, y_train, X_test, y_test = data_perpare(dataset, timesteps)
    # X_train [samples, timesteps, features]


    input_dim = (X_train.shape[1], X_train.shape[2],) # input dimentional  tuple = (timesteps, features, )

    features = X_train.shape[2]
    # also change X_test from [samples, timesteps ] to [samples, timesteps, features]
    X_test = X_test.reshape(X_test.shape[0],X_test.shape[1],features)



    # model prepare:
    #
    # if APPLY_ATTENTION_BEFORE_LSTM:
    #     model = model_attention_applied_before_lstm(input_dim=input_dim,lstm_units=60)
    # else:
    #     model = model_attention_applied_after_lstm(input_dim=input_dim, lstm_units=60 )
    #
    # model.compile(optimizer='Adam', loss='binary_crossentropy', metrics=['accuracy'])
    # print(model.summary())
    # model.fit(X_train, y_train, epochs=250, batch_size=128, validation_split=0.1)
    #
    # # 保存模型
    # model.save('./attention_lstm_Adam_epochs250_batch_size128_lstm_units60.h5')

    # 载入模型   # model = load_model('model.h5')
    model = load_model('./ML_Methods/attention/attention_lstm_Adam_epochs250_batch_size128_lstm_units60.h5') # this is using only in Python Console



    attention_vectors = []
    for i in range(X_test.shape[0]):
        each_test = np.array([X_test[i]])
        attention_vector = np.mean(get_activations(model, each_test, print_shape_only=True, layer_name='attention_vec')[0], axis=2).squeeze()
        print('attention =', attention_vector)
        assert (np.sum(attention_vector) - 1.0) < 1e-5
        attention_vectors.append(attention_vector)

    attention_vector_final = np.mean(np.array(attention_vectors), axis=0)
    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    #            # single numpy arr, array, list or vector
    pd.DataFrame(attention_vector_final, columns=['attention (%)']).plot(kind='bar', title='Attention Mechanism as a function of input dimensions.')
    plt.show()
