# -*- encoding: utf-8 -*-
"""
@File    : model_layers.py
@Author  : lilong
@Time    : 2022/5/3 6:48 下午
"""

import tensorflow as tf
from tensorflow.keras.layers import Dense, Layer
from tensorflow.keras import backend as K

# from keras_layer_normalization import LayerNormalization as lnl

tf.compat.v1.disable_v2_behavior()


class ScaleShift(Layer):
    """缩放平移变换层（Scale and shift）
    让数据分布更加稳定，先验分布层：学习输出的先验分布（标题的字词很可能在文章出现过）
    可参考：https://www.zhihu.com/question/263856024

    example:
        x_sample = np.array(
            [
                [[0, 0, 0, 1, 0, 1, 0, 0]],
                [[0, 0, 1, 0, 0, 0, 0, 0]]
            ])
        x_sample = K.cast(x_sample, 'float32')
        x = ScaleShift()(x_sample)
        print(x.numpy())
        # [[[0. 0. 0. 1. 0. 1. 0. 0.]]
        #  [[0. 0. 1. 0. 0. 0. 0. 0.]]]
    """

    def __init__(self, **kwargs):
        super(ScaleShift, self).__init__(**kwargs)

    def build(self, input_shape):
        kernel_shape = (1,) * (len(input_shape) - 1) + (input_shape[-1],)
        self.log_scale = self.add_weight(name='log_scale',
                                         shape=kernel_shape,
                                         initializer='zeros')
        self.shift = self.add_weight(name='shift',
                                     shape=kernel_shape,
                                     initializer='zeros')

        # # 继承自Layer的一个方法，可以接受额外字典参数**kwargs
        # self.built = True

    def call(self, inputs, **kwargs):
        """为数据进行tf.exp的目的是既不改变数据之间的关系又可以压缩数据尺寸，消除异方差的以及转化计算方法
        """
        x_outs = tf.exp(self.log_scale) * inputs + self.shift
        return x_outs


class OurLayer(Layer):
    """定义新的Layer，增加reuse方法，允许在定义Layer时调用现成的层
    """

    def reuse(self, layer, *args, **kwargs):
        # layer.built 用于控制
        if not layer.built:
            if len(args) > 0:
                inputs = args[0]
            else:
                inputs = kwargs['inputs']
            if isinstance(inputs, list):
                # int_shape以元祖tuple的形式返回tensor或者变量(tf.keras.backend.variable)的shape
                # 且tf.keras.backend.variable的shape为()
                input_shape = [K.int_shape(x) for x in inputs]
            else:
                input_shape = K.int_shape(inputs)
            layer.build(input_shape)  # 根据传入的inputs_shape创建一个对应的layer层
        outputs = layer.call(*args, **kwargs)  # 返回层本身的inputs

        if not tf.keras.__version__.startswith('2.3.'):
            # 根据keras版本的不同添加其他版本的参数
            for w in layer.trainable_weights:
                if w not in self._trainable_weights:
                    self._trainable_weights.append(w)
            for w in layer.non_trainable_weights:
                if w not in self._non_trainable_weights:
                    self._non_trainable_weights.append(w)
            for u in layer.updates:
                if not hasattr(self, '_updates'):
                    self._updates = []
                if u not in self._updates:
                    self._updates.append(u)

        return outputs


class OurBidirectional(OurLayer):
    """自己封装双向RNN，允许传入mask，保证对齐
    所谓双向RNN，就是正反各做一次RNN然后拼接或者相加之类的。假如我们要对[1,0,3,4,5,0,0,0]
    做逆向RNN运算时，最后输出的结果都会包含padding部分的0（因为padding部分在一开始就参与了运算）。因此事后是没法排除的，只有在事前排除。
    排除的方案是：要做逆向RNN，先将[1,0,3,4,5,0,0,0]反转为[5,4,3,0,1,0,0,0]，然后做一个正向RNN，然后再把结果反转回去，
    要注意反转的时候只反转非padding部分（这样才能保证递归运算时padding部分始终不参与，并且保证跟正向RNN的结果对齐）。
    参考：https://www.spaces.ac.cn/archives/6810
    """

    def __init__(self, layer, **args):
        super(OurBidirectional, self).__init__(**args)
        self.forward_layer = layer.__class__.from_config(layer.get_config())
        self.backward_layer = layer.__class__.from_config(layer.get_config())
        self.forward_layer_name = 'forward_' + self.forward_layer.name
        self.backward_layer_name = 'backward_' + self.backward_layer.name

    def compute_output_shape(self, input_shape):
        """维度变化：(2, 2) + (4,) -> (2, 2, 4)"""
        return input_shape[0][:-1] + (self.forward_layer.units * 2,)

    def reverse_sequence(self, x_sample, mask):
        """这里的mask.shape=[batch_size, seq_len, 1]，最后一维是1，
        说明：每一行代表一句话的txt2id，然后根据mask得到的为1的总和seq_len，
        在每一行的下标为seq_len的地方进行翻转，前提是MASK的ID都在尾部。

        example:
            # 模型的输入是词ID矩阵，形状为[batch_size, seq_len]，
            # 必须明确的是：0作为padding的ID，而1作为UNK的ID。也就是0必定在句子的结尾。
            # 构造数据时，必须注意0的位置，这里的是2句话，每句话的长度是4，长度不够的进行padding。
            x_sample = np.array(
                        [[5, 3, 0, 0],
                         [2, 2, 8, 0]]
                       )
            # 这样生成的mask矩阵大小是[batch_size, seq_len, 1]，然后词ID矩阵经过Embedding层后的大小为[batch_size, seq_len, word_size]，
            # 这样一来就可以用mask矩阵对输出结果就行处理了。这种写法只是我的习惯，并非就是唯一的标准。
            x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x_sample, 2), 0), 'float32'))(x_sample)
            print('x_mask:', x_mask)
            # x_mask: tf.Tensor(
            # [[[1.]
            #   [1.]
            #   [0.]
            #   [0.]]
            #
            #  [[1.]
            #   [1.]
            #   [1.]
            #   [0.]]], shape=(2, 4, 1), dtype=float32)

            # input_dim：受到输入的维度的限制，output_dim的大小受到后续的维度限制
            # x_sample_t = Embedding(input_dim=10, output_dim=5)(x_sample)
            # print('embedding:', x_sample_t)
            # embedding: tf.Tensor(
            # [[[ 0.04486466  0.00936719  0.03012386  0.04542464  0.03457835]
            #   [-0.00690521  0.04227466 -0.02510612 -0.03621238 -0.03407208]
            #   [ 0.04992676  0.0475871  -0.01127332  0.0038435   0.03936446]
            #   [ 0.04992676  0.0475871  -0.01127332  0.0038435   0.03936446]]
            #  [[-0.01768273  0.0431792   0.02256468  0.00848768 -0.02654272]
            #   [-0.01768273  0.0431792   0.02256468  0.00848768 -0.02654272]
            #   [ 0.03059833 -0.0157055   0.04822886  0.00583827  0.022221  ]
            #   [ 0.04992676  0.0475871  -0.01127332  0.0038435   0.03936446]]], shape=(2, 4, 5), dtype=float32)

            # 模拟embedding：shape=(2, 4, 5)，batch为2，每句话4个token，且每个token维度是5
            x_sample_t = np.array(
                [
                    [[1, 0, 2, 2, 4],
                     [2, 2, 0, 3, 3],
                     [1, 9, 2, 2, 2],
                     [1, 0, 8, 2, 2]],

                    [[1, 4, 2, 2, 2],
                     [5, 2, 0, 3, 3],
                     [1, 9, 0, 2, 2],
                     [1, 2, 8, 3, 2]]
                ]
            )

            # 反转，以行为单位
            seq_lengths = tf.round(tf.reduce_sum(x_mask, axis=1)[:, 0])
            print('seq_len:', seq_lengths)
            # seq_len: tf.Tensor([2. 3.], shape=(2,), dtype=float32)
            seq_lengths = tf.cast(seq_lengths, 'int32')
            # seq_len: tf.Tensor([2 3], shape=(2,), dtype=int32)
            print('seq_len:', seq_lengths)

            # 此处的seq_len为每条mask内部为1的值的数目和
            out = tf.reverse_sequence(x_sample_t, seq_lengths, seq_axis=1, batch_axis=0)
            print(out)
            # 序列的翻转，但对于MASK的token，不进行翻转
            # tf.Tensor(
            # [[[2 2 0 3 3]
            #   [1 0 2 2 4]
            #   [1 9 2 2 2]
            #   [1 0 8 2 2]],
            #
            #  [[1 9 0 2 2]
            #   [5 2 0 3 3]
            #   [1 4 2 2 2]
            #   [1 2 8 3 2]]], shape=(2, 4, 5), dtype=int64)
        """

        seq_len = tf.round(tf.reduce_sum(mask, axis=1)[:, 0])
        seq_len = tf.cast(seq_len, 'int32')
        return tf.reverse_sequence(x_sample, seq_len, seq_axis=1, batch_axis=0)

    def call(self, inputs, **kwargs):
        x, mask = inputs
        x_forward = self.reuse(self.forward_layer, x)
        x_backward = self.reverse_sequence(x, mask)
        x_backward = self.reuse(self.backward_layer, x_backward)
        x_backward = self.reverse_sequence(x_backward, mask)
        x = K.concatenate([x_forward, x_backward], -1)
        # if K.ndim(x) == 3:
        if x.shape.rank == 3:
            return x * mask  # 获取x的维度
        else:
            return x


class SelfModulatedLayerNormalization(OurLayer):
    """模仿Self-Modulated Batch Normalization，
    只不过将Batch Normalization改为Layer Normalization，
    batch-Norm 针对的是每组数据的单一数据相较于组数据维度的缩放。

    example:
        tv = [x1, x2, x3....xi]
        tv_1 = [ (i - i/len(tv))**2 / len(tv) for i in tv]
        sklearn.preprocessing.Normalizer，scale & shift 缩放平移
        batch-normalization的目的是防止梯度小时和爆炸，加快收敛，可正则化模型，此处是针对层的normalization。
    """

    def __init__(self, num_hidden, **kwargs):
        super(SelfModulatedLayerNormalization, self).__init__(**kwargs)
        self.num_hidden = num_hidden

    def build(self, input_shape):
        super(SelfModulatedLayerNormalization, self).build(input_shape)
        output_dim = input_shape[0][-1]
        self.layernorm = LayerNormalization(center=False, scale=False)
        self.beta_dense_1 = Dense(self.num_hidden, activation='relu')
        self.beta_dense_2 = Dense(output_dim)
        self.gamma_dense_1 = Dense(self.num_hidden, activation='relu')
        self.gamma_dense_2 = Dense(output_dim)

    def compute_output_shape(self, input_shape):
        return input_shape[0]

    def call(self, inputs, **kwargs):
        inputs, cond = inputs
        inputs = self.reuse(self.layernorm, inputs)
        beta = self.reuse(self.beta_dense_1, cond)
        beta = self.reuse(self.beta_dense_2, beta)
        gamma = self.reuse(self.gamma_dense_1, cond)
        gamma = self.reuse(self.gamma_dense_2, gamma)

        for _ in range(K.ndim(inputs) - K.ndim(cond)):
            beta = K.expand_dims(beta, 1)
            gamma = K.expand_dims(gamma, 1)
        return inputs * (gamma + 1) + beta


class Attention(OurLayer):
    """多头注意力机制
    所谓”多头“（Multi-Head），就是只多做几次同样的事情（参数不共享），然后把结果拼接
    """

    def __init__(self, heads, size_per_head, key_size=None, mask_right=False, **kwargs):
        super(Attention, self).__init__(**kwargs)
        self.heads = heads
        self.size_per_head = size_per_head
        self.out_dim = heads * size_per_head
        self.key_size = key_size if key_size else size_per_head
        self.mask_right = mask_right

    def build(self, input_shape):
        super(Attention, self).build(input_shape)
        self.q_dense = Dense(self.key_size * self.heads, use_bias=False)
        self.k_dense = Dense(self.key_size * self.heads, use_bias=False)
        self.v_dense = Dense(self.out_dim, use_bias=False)

    def mask(self, x, mask, mode='mul'):
        """
        1-mask 存在两种结果:
        1: 0 说明该位置存在值, 相当于x对应位置的值减去0即不变
        2: 1 说明该位置对应的x处不存在值, 相当于x对应位置的值减去-1e10即负无穷
        此举的目的是计算attention score的时候对padding做mask操作
        """
        if mask is None:
            return x
        else:
            for _ in range(K.ndim(x) - K.ndim(mask)):
                mask = K.expand_dims(mask, K.ndim(mask))
            if mode == 'mul':
                return x * mask
            else:
                return x - (1 - mask) * 1e10

    def call(self, inputs, **kwargs):
        """
        获取attention score
        公式是: https://pic1.zhimg.com/v2-e698e0083f4cc8d0fae45c501fb9aef8_r.jpg
        其中q指下方的q_value进行的一系列计算, k指下方的k_value的一系列计算, dk指的是self.key_size
        """

        # inputs -> [y, x, x, x_mask]
        # Attention在seq2seq中的思路是重复利用输入值与输出值作为下一个单字的输出，因此这里有两个x
        q, k, v = inputs[:3]
        v_mask, q_mask = None, None
        if len(inputs) > 3:
            v_mask = inputs[3]
            if len(inputs) > 4:
                q_mask = inputs[4]

        # 线性变换
        qw = self.reuse(self.q_dense, q)
        kw = self.reuse(self.k_dense, k)
        vw = self.reuse(self.v_dense, v)

        # 形状变换
        # 确保reshape内部shape的乘积等于q_linear.shape内部的乘积即可
        qw = K.reshape(qw, (-1, K.shape(qw)[1], self.heads, self.key_size))
        # 将维度空间从(64, 128, 128)->(64, 128, 8, 16)的目的是:
        # 转化为多个低维空间最后进行拼接，形成同样维度的输出，借此丰富特性信息，降低了计算量
        # print(f"qw shape: {qw.shape}")
        kw = K.reshape(kw, (-1, K.shape(kw)[1], self.heads, self.key_size))
        vw = K.reshape(vw, (-1, K.shape(vw)[1], self.heads, self.size_per_head))

        # 维度置换
        qw = K.permute_dimensions(qw, (0, 2, 1, 3))
        # print(f"qw transpose shape: {qw.shape}")
        # 首先需要明确这里的0, 2, 1, 3并不是让qw的维度变为0, 2, 1, 3(因为也没有0这个维度)
        # 这里的0, 2, 1, 3可以认为是对维度位置对应维度的调换
        # 比如qw的维度是(64, 128, 8, 16)本身维度的位置是(0, 1, 2, 3)此时transpose希望将它的维度转变
        # 就通过它的位置信息传递, 即将其维度变为(64, 8, 128, 16)此时对应的位置就是(0, 2, 1, 3)
        kw = K.permute_dimensions(kw, (0, 2, 1, 3))
        vw = K.permute_dimensions(vw, (0, 2, 1, 3))

        # Attention
        a = tf.einsum('ijkl,ijml->ijkm', qw, kw) / self.key_size ** 0.5
        a = K.permute_dimensions(a, (0, 3, 2, 1))
        a = self.mask(a, v_mask, 'add')
        a = K.permute_dimensions(a, (0, 3, 2, 1))
        if self.mask_right:
            ones = K.ones_like(a[:1, :1])
            mask = (ones - K.tf.matrix_band_part(ones, -1, 0)) * 1e10
            a = a - mask
            # 以矩阵对角线(从(0, 0)处到右下角结束) num_low代表下三角, num_upper代表上三角
            # 若取负值则视作不处理, 若取正数则当做向上或者向下从起点和结束点平移对应位置画出一个三角
            # 这个三角内部的值全部填充为0
            # 此处相当于将ones的以对角线分割下半区为1上半区为0
            # 假设ones原本为:       现在则变为了:
            # [[1, 1, 1, 1]]      [[1, 0, 0, 0]]
            # [[1, 1, 1, 1]]      [[1, 1, 0, 0]]
            # [[1, 1, 1, 1]]      [[1, 1, 1, 0]]
            # [[1, 1, 1, 1]]      [[1, 1, 1, 1]]

        # 完成输出
        a = K.softmax(a)
        o = tf.einsum('ijkl,ijlm->ijkm', a, vw)
        o = K.permute_dimensions(o, (0, 2, 1, 3))
        o = K.reshape(o, (-1, K.shape(o)[1], self.out_dim))
        o = self.mask(o, q_mask, 'mul')
        return o

    def compute_output_shape(self, input_shape):
        return [input_shape[0][0], input_shape[0][1], self.out_dim]


class LayerNormalization(Layer):
    """
    原始代码的LayerNormalization来自于from keras_layer_normalization import LayerNormalization
    但是受限于版本问题(可能要求tf1.+版本)因此重写该类，其实就是将其中的一些函数改为TensorFlow
    """

    def __init__(self,
                 center=True,
                 scale=True,
                 epsilon=None,
                 gamma_initializer='ones',
                 beta_initializer='zeros',
                 gamma_regularizer=None,
                 beta_regularizer=None,
                 gamma_constraint=None,
                 beta_constraint=None,
                 **kwargs):
        """Layer normalization layer
        See: [Layer Normalization](https://arxiv.org/pdf/1607.06450.pdf)
        :param center: Add an offset parameter if it is True.
        :param scale: Add a scale parameter if it is True.
        :param epsilon: Epsilon for calculating variance.
        :param gamma_initializer: Initializer for the gamma weight.
        :param beta_initializer: Initializer for the beta weight.
        :param gamma_regularizer: Optional regularizer for the gamma weight.
        :param beta_regularizer: Optional regularizer for the beta weight.
        :param gamma_constraint: Optional constraint for the gamma weight.
        :param beta_constraint: Optional constraint for the beta weight.
        :param kwargs:
        """
        super(LayerNormalization, self).__init__(**kwargs)
        self.supports_masking = True
        self.center = center
        self.scale = scale
        if epsilon is None:
            epsilon = tf.keras.backend.epsilon() * tf.keras.backend.epsilon()
        # 返回数值表达式中使用的模糊因子的值
        self.epsilon = epsilon
        self.gamma_initializer = tf.keras.initializers.get(gamma_initializer)
        self.beta_initializer = tf.keras.initializers.get(beta_initializer)
        self.gamma_regularizer = tf.keras.regularizers.get(gamma_regularizer)
        self.beta_regularizer = tf.keras.regularizers.get(beta_regularizer)
        self.gamma_constraint = tf.keras.constraints.get(gamma_constraint)
        self.beta_constraint = tf.keras.constraints.get(beta_constraint)
        self.gamma, self.beta = None, None

    def get_config(self):
        config = {
            'center': self.center,
            'scale': self.scale,
            'epsilon': self.epsilon,
            'gamma_initializer': tf.keras.initializers.serialize(self.gamma_initializer),
            'beta_initializer': tf.keras.initializers.serialize(self.beta_initializer),
            'gamma_regularizer': tf.keras.regularizers.serialize(self.gamma_regularizer),
            'beta_regularizer': tf.keras.regularizers.serialize(self.beta_regularizer),
            'gamma_constraint': tf.keras.constraints.serialize(self.gamma_constraint),
            'beta_constraint': tf.keras.constraints.serialize(self.beta_constraint),
        }
        base_config = super(LayerNormalization, self).get_config()
        return dict(list(base_config.items()) + list(config.items()))

    def compute_output_shape(self, input_shape):
        return input_shape

    def compute_mask(self, inputs, input_mask=None):
        return input_mask

    def build(self, input_shape):
        shape = input_shape[-1:]
        if self.scale:
            self.gamma = self.add_weight(
                shape=shape,
                initializer=self.gamma_initializer,
                regularizer=self.gamma_regularizer,
                constraint=self.gamma_constraint,
                name='gamma',
            )
        if self.center:
            self.beta = self.add_weight(
                shape=shape,
                initializer=self.beta_initializer,
                regularizer=self.beta_regularizer,
                constraint=self.beta_constraint,
                name='beta',
            )
        super(LayerNormalization, self).build(input_shape)

    def call(self, inputs, training=None):

        mean = tf.keras.backend.mean(inputs, axis=-1, keepdims=True)
        variance = tf.keras.backend.mean(tf.keras.backend.square(inputs - mean), axis=-1, keepdims=True)
        std = tf.keras.backend.sqrt(variance + self.epsilon)
        outputs = (inputs - mean) / std
        if self.scale:
            outputs *= self.gamma
        if self.center:
            outputs += self.beta
        return outputs
