# -*- encoding: utf-8 -*-
"""
@File    : layer_process.py
@Author  : lilong
@Time    : 2023/3/1 10:36 上午
"""

import os
import json
import numpy as np
from tqdm import tqdm
from typing import Dict, List


import tensorflow as tf
from tensorflow.keras.layers import (Input, Lambda, Embedding, LSTM,
        Dense, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D,
        Flatten, Activation, GlobalAveragePooling2D, GlobalMaxPooling2D,
        add, Layer, InputSpec, BatchNormalization, Concatenate, LeakyReLU)

from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras import initializers
from tensorflow.keras.callbacks import Callback
from tensorflow.keras.optimizers import Adam
from tensorflow.keras.utils import get_source_inputs


class LayerProcess:

    def __init__(self, pre_obj):
        self.pre_obj = pre_obj

    def to_one_hot(self, input):
        """输出一个词表大小的向量，来标记该词是否在文章出现过
        x: (btz, seq_len)，其中一维是字的下标向量
        x_mask: (btz, seq_len, 1)，第二维是对应的字下标的掩码，0和1组成

        example:
            # 模型的输入是词ID矩阵，形状为[batch_size, seq_len]
            x = np.array(
                        [[5, 3, 0, 0],
                         [2, 2, 8, 0]]
                       )
            # 这样生成的mask矩阵大小是[batch_size, seq_len, 1]，
            x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x)
            print('x_mask:', x_mask)
            # x_mask: tf.Tensor(
            # [[[1.]
            #   [1.]
            #   [0.]
            #   [0.]],
            #
            #  [[1.]
            #   [1.]
            #   [1.]
            #   [0.]]], shape=(2, 4, 1), dtype=float32)
            x = K.cast(x, 'int32')  # 类型转换
            # 由下标向量转换为（0，1）矩阵：(btz, seq_len, table_size)
            x = K.one_hot(x, 8)   # table_size=8，必须大于等于字典大小
            print('K.one_hot:', x)
            # 由下标向量转换为（0，1）矩阵：(btz, seq_len, table_size)
            # K.one_hot: tf.Tensor(
            # [[[0. 0. 0. 0. 0. 1. 0. 0.]
            #   [0. 0. 0. 1. 0. 0. 0. 0.]
            #   [1. 0. 0. 0. 0. 0. 0. 0.]
            #   [1. 0. 0. 0. 0. 0. 0. 0.]],
            #  [[0. 0. 1. 0. 0. 0. 0. 0.]
            #   [0. 0. 1. 0. 0. 0. 0. 0.]
            #   [0. 0. 0. 0. 0. 0. 0. 0.]
            #   [1. 0. 0. 0. 0. 0. 0. 0.]]], shape=(2, 4, 8), dtype=float32)

            # (btz, seq_len, 1) * (btz, seq_len, table_size) 只保留有效字的一维编码，MASK的编码为0
            # K.sum，输出其中一维向量由1，0组成，但保持了所有维度，其中第二个维度缩放
            x = K.sum(x_mask * x, axis=1, keepdims=True)
            print('K.sum:', x)
            # K.sum: tf.Tensor(
            # [[[0. 0. 0. 1. 0. 1. 0. 0.]]
            #  [[0. 0. 2. 0. 0. 0. 0. 0.]]], shape=(2, 1, 8), dtype=float32)

            x = K.cast(K.greater(x, 0.5), 'float32')
            print(x)
            # tf.Tensor(
            # [[[0. 0. 0. 1. 0. 1. 0. 0.]]
            #  [[0. 0. 1. 0. 0. 0. 0. 0.]]], shape=(2, 1, 8), dtype=float32)
        """
        x, x_mask = input
        x = K.cast(x, 'int32')   # (btz, seq_len)
        x = K.one_hot(x, len(self.pre_obj.chars) + 4)
        x = K.sum(x_mask * x, axis=1, keepdims=True)
        x = K.cast(K.greater(x, 0.5), 'float32')
        return x

    def seq_maxpool(self, x):
        """seq是[None, seq_len, s_size]的格式，
        mask是[None, seq_len, 1]的格式，先除去mask部分，然后再做maxpooling。

        example:
            x_sample = np.array(
                [[5, 3, 0, 0],
                 [2, 2, 8, 0]])
            x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x_sample, 2), 0), 'float32'))(x_sample)
            print('x_mask:', x_mask)
            # x_mask: tf.Tensor(
            # [[[1.]
            #   [1.]
            #   [0.]
            #   [0.]]
            #  [[1.]
            #   [1.]
            #   [1.]
            #   [0.]]], shape=(2, 4, 1), dtype=float32)

            # 模拟x的中间变量
            x_sample_embed = np.array(
                [
                    [[1, 0, 2, 2, 4],
                     [2, 2, 0, 3, 3],
                     [1, 9, 2, 2, 2],
                     [1, 0, 8, 2, 2]],

                    [[1, 4, 2, 2, 2],
                     [5, 2, 0, 3, 3],
                     [1, 9, 0, 2, 2],
                     [1, 2, 8, 3, 2]]
                ]
            )
            x_sample_embed -= (1 - x_mask) * 1e10
            print('x_sample_embed:', x_sample_embed)
            # x_sample_embed: tf.Tensor(
            # [[[ 1.e+00  0.e+00  2.e+00  2.e+00  4.e+00]
            #   [ 2.e+00  2.e+00  0.e+00  3.e+00  3.e+00]
            #   [-1.e+10 -1.e+10 -1.e+10 -1.e+10 -1.e+10]
            #   [-1.e+10 -1.e+10 -1.e+10 -1.e+10 -1.e+10]],

            #  [[ 1.e+00  4.e+00  2.e+00  2.e+00  2.e+00]
            #   [ 5.e+00  2.e+00  0.e+00  3.e+00  3.e+00]
            #   [ 1.e+00  9.e+00  0.e+00  2.e+00  2.e+00]
            #   [-1.e+10 -1.e+10 -1.e+10 -1.e+10 -1.e+10]]], shape=(2, 4, 5), dtype=float32)

            msx_pool = K.max(x_sample_embed, 1)
            print(msx_pool)
            # tf.Tensor(
            # [[2. 2. 2. 3. 4.]
            #  [5. 9. 2. 3. 3.]], shape=(2, 5), dtype=float32)

        """
        seq, mask = x
        seq -= (1 - mask) * 1e10
        return K.max(seq, 1)
