from os import name
import tensorflow as tf
import numpy as np
from keras import layers
from keras.regularizers import l2
from keras import initializers
from keras.models import Model
from keras.optimizers import Adam
import keras.backend as K
def f1(y_true, y_pred):
    def recall(y_true, y_pred):
        """Recall metric.
        Only computes a batch-wise average of recall.
        Computes the recall, a metric for multi-label classification of
        how many relevant items are selected.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
        recall = true_positives / (possible_positives + K.epsilon())
        return recall
 
    def precision(y_true, y_pred):
        """Precision metric.
        Only computes a batch-wise average of precision.
        Computes the precision, a metric for multi-label classification of
        how many selected items are relevant.
        """
        true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
        predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
        precision = true_positives / (predicted_positives + K.epsilon())
        return precision
    precision = precision(y_true, y_pred)
    recall = recall(y_true, y_pred)
    return 2*((precision*recall)/(precision+recall+K.epsilon()))

class CapsuleNorm(layers.Layer):
    """
    inputs: shape=[None, num_vectors, dim_vector]
    output: shape=[None, num_vectors]
    """
    def call(self, inputs, **kwargs):
        return K.sqrt(K.sum(K.square(inputs), -1) + K.epsilon())

    def compute_output_shape(self, input_shape):
        return input_shape[:-1]

    def get_config(self):
        # config = super(Length, self).get_config()
        config = super().get_config()
        print("config--->",config)
        return config


class Routing(layers.Layer):

    def __init__(self, num_capsule,
                 dim_capsule,
                 routing=False,
                 num_routing=3,
                 l2_constant=0.0001,
                 kernel_initializer='glorot_uniform', **kwargs):

        super(Routing, self).__init__(**kwargs)
        self.num_capsule = num_capsule # 类别数
        self.dim_capsule = dim_capsule # 输出胶囊层的长度
        self.routing = routing # 路由的类别
        self.num_routing = num_routing # 迭代次数
        self.l2_constant = l2_constant # l2 正则化
        # 获取初始化方法
        self.kernel_initializer = initializers.get(kernel_initializer)

    def build(self, input_shape):

        self.input_num_capsule = input_shape[1] # 6
        self.input_dim_capsule = input_shape[2] # 10

        # Transform matrix，定义权重矩阵（2,6,16,10）
        self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,
                                        self.dim_capsule, self.input_dim_capsule],
                                 initializer=self.kernel_initializer,
                                 regularizer=l2(self.l2_constant),
                                 name='capsule_weight')
        self.built = True

    def call(self, inputs, training=True):
        # 添加维度，例如dim=1, (3,2) -> (3,1,2)
        # 在这里 inputs: (None,6,10) -> (None,1,6,10)
        inputs_expand = K.expand_dims(inputs, 1) 

        # 将x在各个维度上重复n次，x为张量，n为与x维度数目相同的列表
        # (None,1,6,10) -> (None,num_capsule,6,10)
        inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])

        # inputs_hat.shape = [None, num_capsule, input_num_capsule, upper capsule length]
        # self.W (2,6,16,10)
        inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)

        # dynamic routing
        if self.routing:
            # 初始化b，[batch, 2, 6]
            b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])

            for i in range(self.num_routing):
                # c shape = [batch_size, num_capsule, input_num_capsule]
                c = tf.nn.softmax(b, dim=1)
                # outputs = [batch_size, num_classes, upper capsule length]
                outputs = squash(K.batch_dot(c, inputs_hat, [2, 2]))

                if i < self.routing - 1:
                    b += K.batch_dot(outputs, inputs_hat, [2, 3])

        # static routing
        else:
            # outputs = [batch_size, num_classes, upper capsule length]
            outputs = K.sum(inputs_hat, axis=2)
            outputs = squash(outputs)
        return outputs

    def compute_output_shape(self, input_shape):
        return tuple([None, self.num_capsule, self.dim_capsule])

    def get_config(self):
        config = {
            'num_capsule': self.num_capsule,
            'dim_capsule': self.dim_capsule,
            'routing': self.routing,
            'num_routing': self.num_routing,
            'l2_constant': self.l2_constant
        }
        base_config = super(Routing, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))


def squash(vectors, axis=-1):
    """
    The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0
    :param vectors: some vectors to be squashed, N-dim tensor
    :param axis: the axis to squash
    :return: a Tensor with same shape as input vectors
    """
    s_squared_norm = K.sum(K.square(vectors), axis, keepdims=True)
    scale = s_squared_norm / (1 + s_squared_norm) / K.sqrt(s_squared_norm + K.epsilon())
    return scale * vectors


def margin_loss(y_true, y_pred):

    """
    :param y_true: [None, n_classes]
    :param y_pred: [None, num_capsule]
    :return: a scalar loss value.
    """
    L = y_true * K.square(K.maximum(0., 0.9 - y_pred)) + \
        0.5 * (1 - y_true) * K.square(K.maximum(0., y_pred - 0.1))

    return K.mean(K.sum(L, 1))


def get_model(self, summary=True):

    if self.routing:
        use_routing = True
    else:
        use_routing = False

    # 用于接收输入
    input_tokens = layers.Input((self.sequence_length,))
    # Embedding层接受权重：(None,800) -> (None,800,300)
    if self.pretrain_vec == "None":
        print("不使用预训练词向量")
        embedding = layers.Embedding(self.vocab_size, self.embedding_size,
                                    dropout=self.dropout_ratio,
                                    #weights=[self.pretrain_vec],
                                    trainable=True,
                                    embeddings_regularizer=l2(self.l2), mask_zero=True)(input_tokens)
    else:
        print("使用预训练词向量")
        embedding = layers.Embedding(self.vocab_size, self.embedding_size,
                                        dropout=self.dropout_ratio,
                                        weights=[self.pretrain_vec],
                                        trainable=True,
                                        embeddings_regularizer=l2(self.l2), mask_zero=True)(input_tokens)        
    # 扩展维度：(None,800,300) -> (None,800,300,1)                             
    embedding = layers.Lambda(lambda x: K.expand_dims(x, axis=-1))(embedding)

    # non-linear gate layer，(None,800,300,1) -> (None,798,1,256)
    elu_layer = layers.Conv2D(self.num_filter, kernel_size=(self.filter_size, self.embedding_size),
                              use_bias=False,
                              kernel_regularizer=l2(self.l2), activation=None)(embedding)
    elu_layer = layers.BatchNormalization()(elu_layer)
    elu_layer = layers.Activation('elu')(elu_layer)

    #(None,798,1,256) -> (None,798,1,256)
    conv_layer = layers.Conv2D(self.num_filter, kernel_size=(self.filter_size, self.embedding_size),
                               use_bias=False,
                               kernel_regularizer=l2(self.l2), activation=None)(embedding)
    conv_layer = layers.BatchNormalization()(conv_layer)

    # 将输入列表按元素相乘,[(None,798,1,256),(None,798,1,256)] -> (None,798,1,256)
    gate_layer = layers.Multiply()([elu_layer, conv_layer])

    # dropout,随机失活 (None, 798, 1, 256) 
    gate_layer = layers.Dropout(self.dropout_ratio)(gate_layer)

    # convolutional capsule layer
    # 卷积胶囊层，(None, 798, 1, 256) -> (None, 1, 1, 60)
    # self.len_ui: set length of primary capsule property, 10
    # self.num_capsule: set number of capsule,6
    h_i = layers.Conv2D(self.num_capsule * self.len_ui,
                        kernel_size=(K.int_shape(gate_layer)[1], 1),
                        use_bias=False,
                        kernel_regularizer=l2(self.l2), activation=None)(gate_layer)
    #  (None, 1, 1, 60) -> (None, 6, 10)  
    h_i = layers.Reshape((self.num_capsule, self.len_ui))(h_i)


    h_i = layers.BatchNormalization()(h_i)

    h_i = layers.Activation('relu')(h_i)

    # dropout
    h_i = layers.Dropout(self.dropout_ratio)(h_i)

    # routing algorithm
    # 动态路由算法：num_class 是分类类别数
    # (None, 6, 10) -> (None, 2, 16)
    text_caps = Routing(num_capsule=self.num_classes,
                        l2_constant=self.l2,
                        dim_capsule=self.len_vj,
                        routing=use_routing,
                        num_routing=3)(h_i)

    output = CapsuleNorm()(text_caps)

    model = Model(input_tokens, output, name='text-capsnet')

    if summary:
        model.summary()

    # compile model
    model.compile(loss=[margin_loss], optimizer=Adam(self.init_lr, beta_1=0.7, beta_2=0.999, amsgrad=True),
                  metrics=['accuracy',f1])

    return model

def get_GRU_model(self, summary=True):

    input_tokens = layers.Input((self.sequence_length,))

    cw_embedding = layers.Embedding(
        name="sougo_cw2c",
        input_dim=self.vocab_size,output_dim=self.embedding_size,
        dropout=self.dropout_ratio,
        weights=[self.pretrain_cw],
        trainable=True
    )(input_tokens)
    print("_"*10)
    print(type(cw_embedding),cw_embedding)
    print(type(self.pretrain_cw),self.pretrain_cw.shape)
    if self.pretrain_vec == "None":
        print("不使用预训练词向量")
        embedding = layers.Embedding(
            name="random_w2c",
            input_dim=self.vocab_size, output_dim=self.embedding_size,
            dropout=self.dropout_ratio,
            trainable=True,
            embeddings_regularizer=l2(self.l2), 
            mask_zero=True)(input_tokens)
    else:
        print("使用预训练词向量")
        embedding = layers.Embedding(
            name="sougo_w2c",
            input_dim=self.vocab_size, output_dim=self.embedding_size,
            dropout=self.dropout_ratio,
            weights=[self.pretrain_vec],
            trainable=True,
            embeddings_regularizer=l2(self.l2), mask_zero=True)(input_tokens)  
    print("-"*10)
    print(type(embedding),embedding)
    # 扩充维数，目的是指定通道数是1                            
    embedding = layers.Lambda(lambda x: K.expand_dims(x, axis=-1))(embedding)   
    cw_embedding = layers.Lambda(lambda x: K.expand_dims(x, axis=-1))(cw_embedding)   

    cw_layer = layers.Conv2D(name="cw2c_conv",filters=self.num_filter, kernel_size=(self.filter_size, self.embedding_size),
                              use_bias=False,
                              kernel_regularizer=l2(self.l2), activation=None)(cw_embedding)
    cw_layer = layers.BatchNormalization()(cw_layer)
    cw_layer = layers.Activation('elu')(cw_layer)

    conv_layer = layers.Conv2D(name="w2c_conv",filters=self.num_filter, kernel_size=(self.filter_size, self.embedding_size),
                                use_bias=False,
                                kernel_regularizer=l2(self.l2), activation=None)(embedding)

    conv_layer = layers.BatchNormalization()(conv_layer)
    conv_layer = layers.Activation("elu")(conv_layer)
    
    gate_layer = layers.Multiply()([cw_layer, conv_layer])
    gate_layer = layers.Dropout(self.dropout_ratio)(gate_layer)

    h_i = layers.Conv2D(name="main_capsule",filters=self.num_capsule * self.len_ui,
                        kernel_size=(K.int_shape(gate_layer)[1], 1),
                        use_bias=False,
                        kernel_regularizer=l2(self.l2), activation=None)(gate_layer)
    h_i = layers.Reshape((self.num_capsule, self.len_ui))(h_i)

    h_i = layers.BatchNormalization()(h_i)

    h_i = layers.Activation('relu')(h_i)

    h_i = layers.Dropout(self.dropout_ratio)(h_i)

    text_caps = Routing(num_capsule=self.num_classes,
                        l2_constant=self.l2,
                        dim_capsule=self.len_vj,
                        routing=True,
                        num_routing=3)(h_i)
    output = CapsuleNorm()(text_caps)
    model = Model(input_tokens, output, name=self.model_name)

    if summary:
        model.summary()
    model.compile(loss=[margin_loss], optimizer=Adam(self.init_lr, beta_1=0.7, beta_2=0.999, amsgrad=True),
                  metrics=['accuracy',f1])
    return model
    