# attention 主要逻辑

from keras.layers import merge
from keras.layers.core import *
from keras.layers.recurrent import LSTM
from keras.models import *

from ML_Methods.attention_test.attention_utils import get_activations, get_data_recurrent

# INPUT_DIM = 2
# TIME_STEPS = 20

TIME_STEPS = 6
INPUT_DIM = 3
ATTENTION_DIM = 3 # target attention dim or which feature attention most


# if True, the attention vector is shared across the input_dimensions where the attention is applied.
SINGLE_ATTENTION_VECTOR = False
APPLY_ATTENTION_BEFORE_LSTM = False


def attention_3d_block(inputs):
    # inputs.shape = (batch_size, time_steps, input_dim)
    input_dim = int(inputs.shape[2])
    a = Permute((2, 1))(inputs)
    a = Reshape((input_dim, TIME_STEPS))(a) # this line is not useful. It's just to know which dimension is what.
    a = Dense(TIME_STEPS, activation='softmax')(a)
    if SINGLE_ATTENTION_VECTOR:
        a = Lambda(lambda x: K.mean(x, axis=1), name='dim_reduction')(a)
        a = RepeatVector(input_dim)(a)
    a_probs = Permute((2, 1), name='attention_vec')(a)

    # output_attention_mul = merge([inputs, a_probs], name='attention_mul', mode='mul')


    output_attention_mul = merge.Multiply()([inputs, a_probs])

    # import keras.backend as keras_backend
    # output_attention_mul = keras_backend.concatenate([inputs, a_probs]) # concatenate two something (can be array/ layer/)



    return output_attention_mul


def model_attention_applied_after_lstm():
    inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))
    lstm_units = 32
    lstm_out = LSTM(lstm_units, return_sequences=True)(inputs)
    attention_mul = attention_3d_block(lstm_out)
    attention_mul = Flatten()(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model


def model_attention_applied_before_lstm():
    inputs = Input(shape=(TIME_STEPS, INPUT_DIM,))
    attention_mul = attention_3d_block(inputs)
    lstm_units = 32
    attention_mul = LSTM(lstm_units, return_sequences=False)(attention_mul)
    output = Dense(1, activation='sigmoid')(attention_mul)
    model = Model(input=[inputs], output=output)
    return model


if __name__ == '__main__':

    # TIME_STEPS = 6 #
    # INPUT_DIM = 3  # feature
    # ATTENTION_DIM = 3  # holp attention timesteps

    N = 300 # samples quantity
    # N = 300 -> too few = no training
    inputs_1, outputs = get_data_recurrent(n=N, time_steps=TIME_STEPS, input_dim =INPUT_DIM, attention_column=ATTENTION_DIM)

    if APPLY_ATTENTION_BEFORE_LSTM:
        m = model_attention_applied_before_lstm()
    else:
        m = model_attention_applied_after_lstm()

    m.compile(optimizer='adam', loss='binary_crossentropy', metrics=['accuracy'])
    print(m.summary())

    m.fit([inputs_1], outputs, epochs=250, batch_size=64, validation_split=0.1)



    attention_vectors = []
    for i in range(50):
        testing_inputs_1, testing_outputs = get_data_recurrent(1, TIME_STEPS, INPUT_DIM, attention_column=ATTENTION_DIM)
        attention_vector = np.mean(get_activations(m,
                                                   testing_inputs_1,
                                                   print_shape_only=True,
                                                   layer_name='attention_vec')[0], axis=2).squeeze()
        print('attention =', attention_vector)
        assert (np.sum(attention_vector) - 1.0) < 1e-5
        attention_vectors.append(attention_vector)

    print('attention_vectors is :')
    print(attention_vectors)

    print('attention_vector_final is :')
    attention_vector_final = np.mean(np.array(attention_vectors), axis=0)
    print(attention_vector_final)

    # plot part.
    import matplotlib.pyplot as plt
    import pandas as pd

    pd.DataFrame(attention_vector_final, columns=['attention (%)']).plot(kind='bar',
                                                                         title='Attention Mechanism as '
                                                                               'a function of input'
                                                                               ' dimensions.')
    plt.show()


# model summary :

'''

Model: "model_1"
__________________________________________________________________________________________________
Layer (type)                    Output Shape         Param #     Connected to                     
==================================================================================================
input_1 (InputLayer)            (None, 6, 3)         0                                            
__________________________________________________________________________________________________
lstm_1 (LSTM)                   (None, 6, 32)        4608        input_1[0][0]                    
__________________________________________________________________________________________________
permute_1 (Permute)             (None, 32, 6)        0           lstm_1[0][0]                     
__________________________________________________________________________________________________
reshape_1 (Reshape)             (None, 32, 6)        0           permute_1[0][0]                  
__________________________________________________________________________________________________
dense_1 (Dense)                 (None, 32, 6)        42          reshape_1[0][0]                  
__________________________________________________________________________________________________
attention_vec (Permute)         (None, 6, 32)        0           dense_1[0][0]                    
__________________________________________________________________________________________________
multiply_1 (Multiply)           (None, 6, 32)        0           lstm_1[0][0]                     
                                                                 attention_vec[0][0]              
__________________________________________________________________________________________________
flatten_1 (Flatten)             (None, 192)          0           multiply_1[0][0]                 
__________________________________________________________________________________________________
dense_2 (Dense)                 (None, 1)            193         flatten_1[0][0]                  
==================================================================================================
Total params: 4,843
Trainable params: 4,843
Non-trainable params: 0
__________________________________________________________________________________________________
None
'''


# activation vector 32 is depends on model units number,
# and for each vector we will get the mean of each timesteps(feature). ----wkz
# ----- activations -----
# (1, 6, 32)
# (test_number, timesteps, attention_layer_units)


'''
----- activations -----
(1, 6, 32)
attention = [0.04944547 0.0503965  0.06899929 0.29234383 0.46103185 0.07778306]
----- activations -----
(1, 6, 32)
attention = [0.05824947 0.05696339 0.07248975 0.3035223  0.4503396  0.05843551]
----- activations -----
(1, 6, 32)
attention = [0.03731741 0.03831849 0.05223504 0.3399899  0.46736455 0.0647746 ]
----- activations -----
(1, 6, 32)
attention = [0.04799121 0.05627837 0.07009952 0.31711483 0.43523794 0.0732781 ]
----- activations -----
(1, 6, 32)
attention = [0.05210477 0.04445565 0.06017521 0.29489326 0.488456   0.0599151 ]
----- activations -----
(1, 6, 32)
attention = [0.05253304 0.06138708 0.07932198 0.327818   0.42207086 0.05686906]
----- activations -----
(1, 6, 32)
attention = [0.05467027 0.06568004 0.07873715 0.3408351  0.3932315  0.06684594]
----- activations -----
(1, 6, 32)
attention = [0.05960238 0.06794271 0.08688962 0.277176   0.43653557 0.07185373]
----- activations -----
(1, 6, 32)
attention = [0.05053864 0.04363982 0.06282894 0.285424   0.4862376  0.07133105]
----- activations -----
(1, 6, 32)
attention = [0.05303946 0.07027581 0.07529501 0.32032084 0.40802154 0.07304735]
----- activations -----
(1, 6, 32)
attention = [0.04998688 0.04053639 0.06118158 0.31737095 0.467706   0.06321821]
----- activations -----
(1, 6, 32)
attention = [0.04296206 0.05263569 0.05798183 0.2635864  0.5019875  0.08084652]
----- activations -----
(1, 6, 32)
attention = [0.05016574 0.05041624 0.07380162 0.29500324 0.45307726 0.07753591]
----- activations -----
(1, 6, 32)
attention = [0.05872375 0.07711616 0.09253556 0.31887007 0.3836655  0.06908897]
----- activations -----
(1, 6, 32)
attention = [0.05287126 0.07181155 0.08046886 0.3089204  0.4141389  0.071789  ]
----- activations -----
(1, 6, 32)
attention = [0.05068967 0.04637453 0.0658455  0.28604537 0.47514436 0.07590058]
----- activations -----
(1, 6, 32)
attention = [0.04130671 0.03640082 0.05625134 0.32363665 0.47522324 0.06718124]
----- activations -----
(1, 6, 32)
attention = [0.06305046 0.0660326  0.09390202 0.27238944 0.42507857 0.07954693]
----- activations -----
(1, 6, 32)
attention = [0.05585364 0.06436647 0.07804628 0.3120584  0.4258651  0.06381014]
----- activations -----
(1, 6, 32)
attention = [0.04668836 0.05111461 0.06497878 0.3007431  0.45615363 0.08032151]
----- activations -----
(1, 6, 32)
attention = [0.04224592 0.04175546 0.06331363 0.31277063 0.46685952 0.07305484]
----- activations -----
(1, 6, 32)
attention = [0.04733574 0.04249424 0.06606113 0.27788857 0.5008515  0.06536876]
----- activations -----
(1, 6, 32)
attention = [0.05021419 0.06791922 0.07769972 0.3243281  0.41055366 0.06928509]
----- activations -----
(1, 6, 32)
attention = [0.05347637 0.06031065 0.07567936 0.34919438 0.3958335  0.06550572]
----- activations -----
(1, 6, 32)
attention = [0.05231143 0.04481003 0.06055702 0.2851275  0.48895678 0.06823727]
----- activations -----
(1, 6, 32)
attention = [0.0500264  0.05281472 0.07033128 0.28406185 0.46379936 0.07896638]
----- activations -----
(1, 6, 32)
attention = [0.04784581 0.05528247 0.06107371 0.29204112 0.45975316 0.08400373]
----- activations -----
(1, 6, 32)
attention = [0.04316173 0.03917085 0.06244899 0.32079127 0.4688919  0.06553527]
----- activations -----
(1, 6, 32)
attention = [0.05047902 0.05039375 0.07283056 0.26352    0.48390728 0.07886936]
----- activations -----
(1, 6, 32)
attention = [0.05019001 0.05097374 0.06663258 0.31137985 0.44630867 0.07451513]
----- activations -----
(1, 6, 32)
attention = [0.04729434 0.06575289 0.06563834 0.24923241 0.47524172 0.09684032]
----- activations -----
(1, 6, 32)
attention = [0.05076164 0.0528443  0.07062879 0.3254047  0.43405005 0.06631048]
----- activations -----
(1, 6, 32)
attention = [0.03893761 0.03681614 0.04734201 0.35041502 0.4760078  0.05048141]
----- activations -----
(1, 6, 32)
attention = [0.06332025 0.06273049 0.09120578 0.2797963  0.42183498 0.08111215]
----- activations -----
(1, 6, 32)
attention = [0.05386409 0.0620643  0.07659841 0.32437775 0.4230488  0.06004667]
----- activations -----
(1, 6, 32)
attention = [0.04557488 0.04513834 0.0611873  0.26934755 0.49788955 0.08086239]
----- activations -----
(1, 6, 32)
attention = [0.04935108 0.07104638 0.07587403 0.29720455 0.4287817  0.07774225]
----- activations -----
(1, 6, 32)
attention = [0.05029336 0.05954173 0.07952875 0.33351338 0.42090052 0.05622227]
----- activations -----
(1, 6, 32)
attention = [0.04720484 0.04442001 0.06479879 0.29487008 0.46958232 0.07912396]
----- activations -----
(1, 6, 32)
attention = [0.05208185 0.06361209 0.07668111 0.34674674 0.40122116 0.05965701]
----- activations -----
(1, 6, 32)
attention = [0.04702765 0.04896446 0.07068118 0.27055252 0.4851752  0.077599  ]
----- activations -----
(1, 6, 32)
attention = [0.05234558 0.06874028 0.08026873 0.2712231  0.4429934  0.08442891]
----- activations -----
(1, 6, 32)
attention = [0.04683508 0.05124109 0.06970835 0.2522201  0.49742547 0.08256989]
----- activations -----
(1, 6, 32)
attention = [0.05915146 0.06954353 0.08424568 0.29667097 0.422358   0.06803034]
----- activations -----
(1, 6, 32)
attention = [0.06017489 0.06448431 0.08408311 0.33027285 0.39602143 0.06496339]
----- activations -----
(1, 6, 32)
attention = [0.04632423 0.04869407 0.06772847 0.25739998 0.49888185 0.08097138]
----- activations -----
(1, 6, 32)
attention = [0.06790626 0.08017486 0.1000886  0.23009872 0.4227044  0.09902716]
----- activations -----
(1, 6, 32)
attention = [0.04446068 0.05007035 0.05920289 0.249634   0.520203   0.07642904]
----- activations -----
(1, 6, 32)
attention = [0.05902701 0.06236188 0.07279908 0.28005177 0.43709707 0.08866318]
----- activations -----
(1, 6, 32)
attention = [0.04762034 0.06631778 0.07059789 0.26334932 0.46747708 0.08463758]

'''


# put all attention_vector in to vectors :
'''
attention_vectors is :
[
array([0.04944547, 0.0503965 , 0.06899929, 0.29234383, 0.46103185, 0.07778306], dtype=float32),
array([0.05824947, 0.05696339, 0.07248975, 0.3035223 , 0.4503396 , 0.05843551], dtype=float32),
array([0.03731741, 0.03831849, 0.05223504, 0.3399899 , 0.46736455, 0.0647746 ], dtype=float32),
array([0.04799121, 0.05627837, 0.07009952, 0.31711483, 0.43523794, 0.0732781 ], dtype=float32),
array([0.05210477, 0.04445565, 0.06017521, 0.29489326, 0.488456  , 0.0599151 ], dtype=float32),
array([0.05253304, 0.06138708, 0.07932198, 0.327818  , 0.42207086, 0.05686906], dtype=float32),
array([0.05467027, 0.06568004, 0.07873715, 0.3408351 , 0.3932315 , 0.06684594], dtype=float32),
array([0.05960238, 0.06794271, 0.08688962, 0.277176  , 0.43653557, 0.07185373], dtype=float32),
array([0.05053864, 0.04363982, 0.06282894, 0.285424  , 0.4862376 , 0.07133105], dtype=float32),
array([0.05303946, 0.07027581, 0.07529501, 0.32032084, 0.40802154, 0.07304735], dtype=float32),
array([0.04998688, 0.04053639, 0.06118158, 0.31737095, 0.467706  , 0.06321821], dtype=float32),
array([0.04296206, 0.05263569, 0.05798183, 0.2635864 , 0.5019875 , 0.08084652], dtype=float32),
array([0.05016574, 0.05041624, 0.07380162, 0.29500324, 0.45307726, 0.07753591], dtype=float32),
array([0.05872375, 0.07711616, 0.09253556, 0.31887007, 0.3836655 , 0.06908897], dtype=float32),
array([0.05287126, 0.07181155, 0.08046886, 0.3089204 , 0.4141389 , 0.071789  ], dtype=float32),
array([0.05068967, 0.04637453, 0.0658455 , 0.28604537, 0.47514436, 0.07590058], dtype=float32),
array([0.04130671, 0.03640082, 0.05625134, 0.32363665, 0.47522324, 0.06718124], dtype=float32),
array([0.06305046, 0.0660326 , 0.09390202, 0.27238944, 0.42507857, 0.07954693], dtype=float32),
array([0.05585364, 0.06436647, 0.07804628, 0.3120584 , 0.4258651 , 0.06381014], dtype=float32),
array([0.04668836, 0.05111461, 0.06497878, 0.3007431 , 0.45615363, 0.08032151], dtype=float32),
array([0.04224592, 0.04175546, 0.06331363, 0.31277063, 0.46685952, 0.07305484], dtype=float32),
array([0.04733574, 0.04249424, 0.06606113, 0.27788857, 0.5008515 , 0.06536876], dtype=float32),
array([0.05021419, 0.06791922, 0.07769972, 0.3243281 , 0.41055366, 0.06928509], dtype=float32),
array([0.05347637, 0.06031065, 0.07567936, 0.34919438, 0.3958335 , 0.06550572], dtype=float32),
array([0.05231143, 0.04481003, 0.06055702, 0.2851275 , 0.48895678, 0.06823727], dtype=float32),
array([0.0500264 , 0.05281472, 0.07033128, 0.28406185, 0.46379936, 0.07896638], dtype=float32),
array([0.04784581, 0.05528247, 0.06107371, 0.29204112, 0.45975316, 0.08400373], dtype=float32),
array([0.04316173, 0.03917085, 0.06244899, 0.32079127, 0.4688919 , 0.06553527], dtype=float32),
array([0.05047902, 0.05039375, 0.07283056, 0.26352   , 0.48390728, 0.07886936], dtype=float32),
array([0.05019001, 0.05097374, 0.06663258, 0.31137985, 0.44630867, 0.07451513], dtype=float32),
array([0.04729434, 0.06575289, 0.06563834, 0.24923241, 0.47524172, 0.09684032], dtype=float32),
array([0.05076164, 0.0528443 , 0.07062879, 0.3254047 , 0.43405005, 0.06631048], dtype=float32),
array([0.03893761, 0.03681614, 0.04734201, 0.35041502, 0.4760078 , 0.05048141], dtype=float32),
array([0.06332025, 0.06273049, 0.09120578, 0.2797963 , 0.42183498, 0.08111215], dtype=float32),
array([0.05386409, 0.0620643 , 0.07659841, 0.32437775, 0.4230488 , 0.06004667], dtype=float32),
array([0.04557488, 0.04513834, 0.0611873 , 0.26934755, 0.49788955, 0.08086239], dtype=float32),
array([0.04935108, 0.07104638, 0.07587403, 0.29720455, 0.4287817 , 0.07774225], dtype=float32),
array([0.05029336, 0.05954173, 0.07952875, 0.33351338, 0.42090052, 0.05622227], dtype=float32),
array([0.04720484, 0.04442001, 0.06479879, 0.29487008, 0.46958232, 0.07912396], dtype=float32),
array([0.05208185, 0.06361209, 0.07668111, 0.34674674, 0.40122116, 0.05965701], dtype=float32),
array([0.04702765, 0.04896446, 0.07068118, 0.27055252, 0.4851752 , 0.077599  ], dtype=float32),
array([0.05234558, 0.06874028, 0.08026873, 0.2712231 , 0.4429934 , 0.08442891], dtype=float32),
array([0.04683508, 0.05124109, 0.06970835, 0.2522201 , 0.49742547, 0.08256989], dtype=float32),
array([0.05915146, 0.06954353, 0.08424568, 0.29667097, 0.422358  , 0.06803034], dtype=float32),
array([0.06017489, 0.06448431, 0.08408311, 0.33027285, 0.39602143, 0.06496339], dtype=float32),
array([0.04632423, 0.04869407, 0.06772847, 0.25739998, 0.49888185, 0.08097138], dtype=float32),
array([0.06790626, 0.08017486, 0.1000886 , 0.23009872, 0.4227044 , 0.09902716], dtype=float32),
array([0.04446068, 0.05007035, 0.05920289, 0.249634  , 0.520203  , 0.07642904], dtype=float32),
array([0.05902701, 0.06236188, 0.07279908, 0.28005177, 0.43709707, 0.08866318], dtype=float32),
array([0.04762034, 0.06631778, 0.07059789, 0.26334932, 0.46747708, 0.08463758], dtype=float32)
]
'''

# calculate the means of all the attention_vector for each timesteps
'''

attention_vector_final is :
[0.05089269 0.05585255 0.0715516  0.29783097 0.45102355 0.07284866]


'''
