from sklearn.base import BaseEstimator, TransformerMixin
from keras.models import Model, Input
from keras.layers import Dense, LSTM, Dropout, Embedding, SpatialDropout1D, Bidirectional, concatenate, InputSpec,Dot,Add,add,RepeatVector
from keras.layers import GlobalAveragePooling1D, GlobalMaxPooling1D
from keras.optimizers import Adam, RMSprop
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from sklearn.metrics import accuracy_score
from sklearn.preprocessing import MultiLabelBinarizer, LabelEncoder
import pickle
import numpy as np
from keras import regularizers,constraints
from keras.engine.topology import Layer
from keras import initializers as initializers, regularizers, constraints
from keras import backend as K
from keras import initializers
import tensorflow as tf

# self.trainable_weights = [self.w_s]
# self.trainable_weights = [self.u]
# self.trainable_weights = [self.v]
# self.trainable_weights = [self.w]

class AttentionLayer(Layer):
    def __init__(self,return_attention=True,init_weight = None,
                 w_s_regularizer = None,w_regularizer = None,u_regularizer = None,v_regularizer = None,
                 w_s_constraint=None, w_constraint=None, u_constraint=None, v_constraint=None,
                 **kwargs):
        self.init = initializers.get("RandomNormal")

        self.w_s_regularizer = regularizers.get(w_s_regularizer)
        self.u_regularizer = regularizers.get(u_regularizer)
        self.w_regularizer = regularizers.get(w_regularizer)
        self.v_regularizer = regularizers.get(v_regularizer)

        self.w_s_constraint = constraints.get(w_s_constraint)
        self.u_constraint = constraints.get(u_constraint)
        self.w_constraint = constraints.get(w_constraint)
        self.v_constraint = constraints.get(v_constraint)

        self.return_attention=return_attention

        super(AttentionLayer, self).__init__(** kwargs)

    def build(self, input_shape):
        self.input_spec = [InputSpec(ndim=3)]

        assert len(input_shape)==3

        self.w_s = self.add_weight(shape=[input_shape[2], input_shape[2]], #100 100
								 name='{}_w_s'.format(self.name),
								 initializer=self.init,
                                 regularizer=self.w_s_regularizer,
                                 constraint=self.w_s_constraint)
        self.u = self.add_weight(shape=[input_shape[2], input_shape[2]],#100 100
								 name='{}_u'.format(self.name),
                                 initializer=initializers.RandomNormal(mean=0.0, stddev=1.0, seed=None),
                                 regularizer=self.u_regularizer,
                                 constraint=self.u_constraint)
        self.w = self.add_weight(shape=[input_shape[2], input_shape[2]],#100 100
								 name='{}_w'.format(self.name),
                                 initializer=initializers.RandomNormal(mean=0.0, stddev=1.0, seed=None),
                                 regularizer=self.w_regularizer,
                                 constraint=self.w_constraint)
        self.v = self.add_weight(shape=[input_shape[2],1], #100 1
								 name='{}_v'.format(self.name),
                                 initializer=initializers.RandomNormal(mean=0.0, stddev=1.0, seed=None),
                                 regularizer=self.v_regularizer,
                                 constraint=self.v_constraint)

        #self.weights
        self.weights.append(self.u)
        self.weights.append(self.w)
        self.weights.append(self.v)
        self.built = True


        super(AttentionLayer, self).build(input_shape)

    def call(self, h, **kwargs):
        h_shape = K.shape(h) #K.shape(encoder_final_state)
        batch_size, maxlen ,emb= h_shape[0], h_shape[1], h_shape[2] # 25 20 100
        x = h
        h_0 = tf.transpose(x, [1, 0, 2])[0] # 20 25 100 ->1 25 100 转换维度，取第一时步的hidden state输出 [20,-1,100])[0]#
        h_0 = K.reshape(h_0,(batch_size,emb))          # 25 100 恢复维度 (-1,100))#
        #print(K.get_variable_shape(h_0))

        encoder_final_state = K.tanh(K.dot(h_0, self.w_s))
        encoder_final_state = K.repeat(encoder_final_state,maxlen) #维度复制 -> 25 20 100

        encoder_final_state = K.reshape(encoder_final_state,[-1,maxlen,emb])
        #print(K.get_variable_shape(encoder_final_state))
        a1 = K.dot(h, self.w)
        a2 = K.dot(encoder_final_state, self.u)
        a = a1 + a2
        auto_attention_rate = K.dot(K.tanh(a), self.v)
        alpha = K.softmax(auto_attention_rate,1)
        output = K.sum(h * alpha, axis=1) # 25 20 100 x 1 20

        if self.return_attention:
           return [output,alpha]
        else:
           return output

    def get_output_shape_for(self, input_shape):
         return self.compute_output_shape(input_shape)

    def compute_output_shape(self, input_shape):
        output_len=input_shape[2]
        if self.return_attention:
             return [(input_shape[0],output_len), (input_shape[0], input_shape[1])] #output_len
        return (input_shape[0],output_len)#output_len



