# -*- encoding: utf-8 -*-
"""
@File    : seq_test.py
@Author  : lilong
@Time    : 2022/5/3 9:45 下午
"""

import os
import traceback
from typing import Dict, List
import numpy as np


import tensorflow as tf
from tensorflow.keras.layers import Input, Lambda, Embedding, LSTM, \
        Dense, Conv2D, MaxPooling2D, AveragePooling2D, ZeroPadding2D, \
        Flatten, Activation, GlobalAveragePooling2D, GlobalMaxPooling2D, \
        add, Layer, InputSpec, BatchNormalization
from tensorflow.keras import backend as K
from tensorflow.keras.models import Model
from tensorflow.keras import initializers
from tensorflow.keras.utils import get_source_inputs

from keras_layer_normalization import LayerNormalization
from autoTitleSeq2seq.seq2seq import PreProcess, Seq2SeqAutoSummary
from autoTitleSeq2seq.model_layers_test import ScaleShift, OurBidirectional, SelfModulatedLayerNormalization, Attention


if tf.__version__ >= '2.0.0':
    tf.compat.v1.disable_eager_execution()
    tf.compat.v1.disable_v2_behavior()


class Seq2seqTest:
    """测试模型类"""

    def __init__(self, pre_obj):
        self.pre_process = pre_obj

    def stepTest(self, x, y):
        """步骤测试"""
        x_mask = K.cast(K.greater(K.expand_dims(x, 2), 0))
        x_one_hot = self.to_one_hot([x, x_mask])

    def to_one_hot(self, x_and_mask):
        """输出一个词表大小的向量，来标记该词是否在文章出现过"""
        x, x_mask = x_and_mask
        x = K.cast(x, 'int32')
        x = K.one_hot(x, len(self.pre_process.chars) + 4)   # 固定词表+占位符
        x = K.sum(x_mask * x, 1, keepdims=True)
        x = K.cast(K.greater(x, 0.5), 'float32')
        return x

    def modelMain(self):
        """搭建seq2seq模型"""
        x_in = Input(shape=(None,))
        y_in = Input(shape=(None,))
        x, y = x_in, y_in

        # 掩码
        x_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(x)
        y_mask = Lambda(lambda x: K.cast(K.greater(K.expand_dims(x, 2), 0), 'float32'))(y)

        # 先验分布层：学习输出的先验分布（标题的字词很可能在文章出现过）
        x_one_hot = Lambda(self.to_one_hot)
        x_one_hot = x_one_hot([x, x_mask])
        x_prior = ScaleShift()(x_one_hot)

        # 嵌入层
        embedding = Embedding(len(self.pre_process.chars) + 4, 128)  # 128维
        x = embedding(x)
        y = embedding(y)

        # encoder：双层双向LSTM
        z_dim = 128
        x = LayerNormalization()(x)
        x = OurBidirectional(LSTM(z_dim // 2, return_sequences=True))([x, x_mask])
        # x = LayerNormalization()(x)
        # x = OurBidirectional(LSTM(z_dim // 2, return_sequences=True))([x, x_mask])
        #
        # model = Model(x_in, x_one_hot)
        #
        # return model


if __name__ == '__main__':
    pre_obj = PreProcess()
    pre_obj.statis()

    # 测试
    sample = None
    example_path = 'example.npz'
    if not os.path.exists(example_path):
        data_sample: List[np.array] = pre_obj.get_one_sample()
        for sam in data_sample:
            print(sam)
            np.savez(example_path, x=sam[0], y=sam[1])
            sample = sam
            break
    else:
        sample = np.load(example_path, allow_pickle=True)

    x_content = sample['x']
    y_lable = sample['y']
    # print(x_content, y_lable)

    s2s = Seq2seqTest(pre_obj)

    try:
        model = s2s.modelMain()
    except Exception as e:
        traceback.print_exc()

    # print(model.predict(x_content))

