from sklearn.base import BaseEstimator, TransformerMixin
from keras.models import Model, Input
from keras.layers import Dense, LSTM, Dropout, Embedding, SpatialDropout1D, Bidirectional, concatenate, InputSpec, Dot, \
    Add, add, RepeatVector
from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.optimizers import Adam, RMSprop
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MultiLabelBinarizer, LabelEncoder
import pickle
import numpy as np
from keras import regularizers, constraints
from keras.engine.topology import Layer
from keras import initializers as initializers, regularizers, constraints
from keras import backend as K


# self.trainable_weights = [self.w_s]
# self.trainable_weights = [self.u]
# self.trainable_weights = [self.v]
# self.trainable_weights = [self.w]

class AttentionLayer(Layer):
    def __init__(self, return_attention=True,
                 w_s_regularizer=None, w_regularizer=None, u_regularizer=None, v_regularizer=None,
                 w_s_constraint=None, w_constraint=None, u_constraint=None, v_constraint=None,
                 **kwargs):
        self.init = initializers.get("RandomNormal")  # 选择初始化方式

        self.w_s_regularizer = regularizers.get(w_s_regularizer)  # 正则化器
        self.u_regularizer = regularizers.get(u_regularizer)
        self.w_regularizer = regularizers.get(w_regularizer)
        self.v_regularizer = regularizers.get(v_regularizer)

        self.w_s_constraint = constraints.get(w_s_constraint)  # 约束
        self.u_constraint = constraints.get(u_constraint)
        self.w_constraint = constraints.get(w_constraint)
        self.v_constraint = constraints.get(v_constraint)

        self.return_attention = return_attention
        super(AttentionLayer, self).__init__(**kwargs)

    def build(self, input_shape):
        self.input_spec = [InputSpec(ndim=3)]

        assert len(input_shape) == 3
        self.w_s = self.add_weight(shape=[input_shape[2], input_shape[2]],
                                   name='{}_w_s'.format(self.name),
                                   initializer=self.init,
                                   regularizer=self.w_s_regularizer,
                                   constraint=self.w_s_constraint)
        self.u = self.add_weight(shape=[input_shape[2], input_shape[2]],
                                 name='{}_u'.format(self.name),
                                 initializer=self.init,
                                 regularizer=self.u_regularizer,
                                 constraint=self.u_constraint)
        self.w = self.add_weight(shape=[input_shape[2], input_shape[2]],
                                 name='{}_w'.format(self.name),
                                 initializer=self.init,
                                 regularizer=self.w_regularizer,
                                 constraint=self.w_constraint)
        self.v = self.add_weight(shape=[input_shape[2], 1],
                                 name='{}_v'.format(self.name),
                                 initializer=self.init,
                                 regularizer=self.v_regularizer,
                                 constraint=self.v_constraint)

        # self.trainable_weights = [self.w_s]
        # self.trainable_weights = [self.u]
        # self.trainable_weights = [self.w]
        # self.trainable_weights = [self.v]
        # print(self.w_s)
        # print(self.u)
        #
        # print(self.v)
        super(AttentionLayer, self).build(input_shape)

    def call(self, h, **kwargs):
        h_shape = K.shape(h)  # K.shape(encoder_final_state)
        batch_size, maxlen, emb = h_shape[0], h_shape[1], h_shape[2]  # 25 20 100
        x = h
        h_0 = K.reshape(x, [maxlen, batch_size, emb])[
            0]  # 20 25 100 ->1 25 100 转换维度，取第一时步的hidden state输出 [20,-1,100])[0]#
        h_0 = K.reshape(h_0, (batch_size, emb))  # 25 100 恢复维度 (-1,100))#
        # print(K.get_variable_shape(h_0))

        encoder_final_state = K.tanh(K.dot(h_0, self.w_s))
        encoder_final_state = K.repeat(encoder_final_state, maxlen)  # 维度复制 -> 25 20 100
        encoder_final_state = K.reshape(encoder_final_state, [-1, maxlen, emb])
        # print(K.get_variable_shape(encoder_final_state))

        a1 = K.dot(h, self.w)
        print(self.v)
        # print(h)
        a2 = K.dot(encoder_final_state, self.u)
        auto_attention_rate = K.dot(K.tanh(a1 + a2), self.v)
        alpha = K.softmax(auto_attention_rate)  # 注意力权重
        output = K.sum(h * alpha, axis=1)
        print(auto_attention_rate)
        # output = K.dot(output, self.final_w)+self.final_b
        # print(K.get_variable_shape(output))
        # output = K.dot(output, self.final_w)#+self.final_b
        # print(K.get_variable_shape(output))
        if self.return_attention:
            return [output, alpha]
        else:
            return output

    def get_output_shape_for(self, input_shape):
        return self.compute_output_shape(input_shape)

    def compute_output_shape(self, input_shape):
        output_len = input_shape[2]
        if self.return_attention:
            return [(input_shape[0], output_len), (input_shape[0], input_shape[1])]  # output_len
        return (input_shape[0], output_len)  # output_len
