# -*- encoding: utf-8 -*-
"""
@File    : layers.py
@Author  : lilong
@Time    : 2023/2/16 12:45 下午
"""

import numpy as np


import tensorflow as tf
from tensorflow.keras.layers import Input, Dense, Layer, LSTM, Lambda
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model

tf.compat.v1.disable_v2_behavior()


def reverse_sequence(input, mask):
    """这里 mask.shape=[batch_size, seq_len, 1]
    此处的 seq_len 为每条mask内部为1的值的数目和

    例子：
        # 输入的x是2维的
        x_sample = np.array(
                    [[1, 0, 2, 2],
                     [2, 2, 0, 3]]
                   )
        x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x_sample, 2), 0), 'float32'))(x_sample)
        print('x_mask:', x_mask)
        => x_mask: tf.Tensor(
            [
                [[1.]
                 [0.]
                 [1.]
                 [1.]],

                 [[1.]
                  [1.]
                  [0.]
                  [1.]]], shape=(2, 4, 1), dtype=float32)
        # 反转
        seq_len = tf.round(tf.reduce_sum(x_mask, axis=1)[:, 0])
        print('seq_len:', seq_len)
        => seq_len: tf.Tensor([3. 3.], shape=(2,), dtype=float32)

        seq_len = tf.cast(seq_len, 'int32')
        print('seq_len:', seq_len)
        => seq_len: tf.Tensor([3 3], shape=(2,), dtype=int32)

        # 此处的seq_len 为每条mask内部为1的值的数目和
        out = tf.reverse_sequence(x_sample, seq_len, seq_axis=1)
        print(out)
        # 操作是将x的每一行对应的seq_len数据处进行翻转
        => tf.Tensor(
            [[2 0 1 2]
             [0 2 2 3]], shape=(2, 4), dtype=int64)
    """

    # seq_len = K.round(K.sum(mask, axis=1)[:, 0])
    # seq_len = K.cast(seq_len, 'int32')
    # return tf.reverse_sequence(v, seq_len, seq_dim=1)

    seq_len = K.round(tf.reduce_sum(mask, axis=1)[:, 0])
    seq_len = K.cast(seq_len, 'int32')
    return tf.reverse_sequence(input, seq_len, seq_axis=1)


class OurLayer(Layer):
    """定义新的Layer，增加reuse方法，允许在定义Layer时调用现成的层
    """

    def reuse(self, layer, *args, **kwargs):
        # layer.built 用于控制
        print('==reuse:', layer.name, layer.built, args)
        if not layer.built:
            if len(args) > 0:
                inputs = args[0]
            else:
                inputs = kwargs['inputs']
            if isinstance(inputs, list):
                print('==reuse inputs:', inputs)
                # int_shape以元祖tuple的形式返回tensor或者变量(tf.keras.backend.variable)的shape
                # 且tf.keras.backend.variable的shape为( )
                input_shape = [K.int_shape(x) for x in inputs]
            else:
                input_shape = K.int_shape(inputs)
            print('==reuse input_shape:', input_shape)

            layer.build(input_shape)  # 根据传入的inputs_shape创建一个对应的layer层
        outputs = layer.call(*args, **kwargs)  # 返回层本身的inputs

        if not tf.keras.__version__.startswith('2.3.'):
            # 根据keras版本的不同添加其他版本的参数
            for w in layer.trainable_weights:
                if w not in self._trainable_weights:
                    self._trainable_weights.append(w)
            for w in layer.non_trainable_weights:
                if w not in self._non_trainable_weights:
                    self._non_trainable_weights.append(w)
            for u in layer.updates:
                if not hasattr(self, '_updates'):
                    self._updates = []
                if u not in self._updates:
                    self._updates.append(u)

        print('==outputs:', outputs)
        return outputs


class OurBidirectional(OurLayer):
    """自己封装双向RNN，允许传入mask，保证对齐
    """

    def __init__(self, layer, **args):
        super(OurBidirectional, self).__init__(**args)
        self.forward_layer = layer.__class__.from_config(layer.get_config())
        self.backward_layer = layer.__class__.from_config(layer.get_config())
        self.forward_layer_name = 'forward_' + self.forward_layer.name
        self.backward_layer_name = 'backward_' + self.backward_layer.name

    def compute_output_shape(self, input_shape):
        """维度变化：(2, 2) + (4,) -> (2, 2, 4)"""
        return input_shape[0][:-1] + (self.forward_layer.units * 2,)

    def call(self, inputs, **kwargs):
        x, mask = inputs
        print("==call:", x.shape, mask.shape)

        # 根据mask对x进行翻转
        x_backward = reverse_sequence(x, mask)
        print("==x_backward reverse 1:", x_backward)
        x_backward = self.reuse(self.backward_layer, x_backward)
        print("==x_backward reuse:", x_backward)
        x_backward = reverse_sequence(x_backward, mask)
        print("==x_backward reverse 2:", x_backward)

        x_forward = self.reuse(self.forward_layer, x)
        print("==call x_forward:", x_forward)
        x = K.concatenate([x_forward, x_backward], -1)
        print("==call:", x)

        # if K.ndim(x) == 3:
        if x.shape.rank == 3:
            print("==return dim:", x, mask)
            return x * mask  # 获取x的维度
        else:
            return x


def test_revise():

    def tt_1():
        # 测试1
        x = Input(shape=(None, ))
        x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x)
        print('x_mask:', x_mask)

        # 反转
        seq_len = tf.round(tf.reduce_sum(x_mask, axis=1)[:, 0])
        print('seq_len:', seq_len)
        seq_len = tf.cast(seq_len, 'int32')

        out = tf.reverse_sequence(x, seq_len, seq_axis=1)
        print(out)

        model = Model(inputs=x, outputs=[out])

        # 例子
        x_sample = np.array(
            [[1, 1, 2, 2],
             [2, 2, 3, 3]])
        rst = model.predict(x_sample)
        print(rst)

    def tt_2():
        # 测试2
        x_sample = np.array(
            [[1, 0, 2, 2],
             [2, 2, 0, 3]])
        x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x_sample, 2), 0), 'float32'))(x_sample)
        print('x_mask:', x_mask)

        # 反转
        seq_len = tf.round(tf.reduce_sum(x_mask, axis=1)[:, 0])
        print('seq_len:', seq_len)
        seq_len = tf.cast(seq_len, 'int32')
        print('seq_len:', seq_len)

        out = tf.reverse_sequence(x_sample, seq_len, seq_axis=1)
        print(out)

    tt_2()


if __name__ == '__main__':

    # 测试 1
    # test_revise()

    # # 测试 2
    # x = Input(shape=(2, 4))  # (btz, seq_len_x, seq_dim)
    # x_mask = Input(shape=(2, 1))  # (btz, seq_len_y, seq_dim)
    #
    # rst = OurBidirectional(LSTM(6, return_sequences=True))([x, x_mask])
    # print(rst)

    # 测试 3
    x = np.array(
        [[[1, 1, 2, 2],
         [2, 2, 3, 3]]])
    x = tf.convert_to_tensor(x, dtype=tf.float32)
    x_mask = np.array(
        [[[1],
          [2]]])
    x_mask = tf.convert_to_tensor(x_mask, dtype=tf.float32)

    bi_layer = OurBidirectional(LSTM(6, return_sequences=True))([x, x_mask])
    print('rst:', bi_layer)
    tf.compat.v1.disable_eager_execution()

    from tensorflow.compat.v1 import ConfigProto
    from tensorflow.compat.v1 import InteractiveSession
    config = ConfigProto()
    session = InteractiveSession(config=config)
    print(session.run(bi_layer))

    # with tf.compat.v1.Session() as sess:
    #     numpy_data = bi_layer.eval()
    #     print(sess.run(bi_layer))
    #     print('gg:', tf.print(bi_layer))

    # # 测试 4
    # x = Input(shape=(None,))
    # x_mask = Lambda(lambda x: tf.cast(tf.greater(tf.expand_dims(x, 2), 0), tf.float32))(x)
    # print('x_mask::', x_mask)
    # yy = OurBidirectional(LSTM(6, return_sequences=True))([x, x_mask])
    # model = Model(inputs=x, outputs=yy)
    # x = np.array(
    #     [[[1, 1, 2, 2],
    #      [2, 2, 3, 3]]])
    # tf.compat.v1.disable_eager_execution()
    # rst = model.predict(x)
    # # with tf.compat.v1.Session() as sess:
    # #     numpy_data = rst.eval()

    # # 测试 5
    # x = np.array(
    #     [[1, 1, 2, 2],
    #       [2, 2, 3, 3]])
    # x_mask = Lambda(lambda x: tf.cast(tf.greater(tf.expand_dims(x, 2), 0), tf.float32))(x)
    # print('x_mask::', x_mask)
    # yy = OurBidirectional(LSTM(6, return_sequences=True))([x, x_mask])
    # model = Model(inputs=x, outputs=yy)
    #
    # tf.compat.v1.disable_eager_execution()
    # rst = model.predict(x)
    # # with tf.compat.v1.Session() as sess:
    # #     numpy_data = rst.eval()


