# -*- coding: utf-8 -*-  
'''
transformer的预处理

    - x_preprocess
        输入：frist_couplet: list[str1, str2...] 上联
             second_couplet: list[str1, str2...] 下联
        输出：ndarray(batch_size, 2, sentence_maxlen+1)
                 encoder_inputs: ndarray(batch_size, sentence_maxlen+1) 编码器词编码向量
                                    batch_size：批量大小
                                    sentence_maxlen + 1：最大句子长度。末尾追加<PAD>
                 decoder_inputs: ndarray(batch_size, sentence_maxlen+1) 解码器词编码向量
                                    batch_size：批量大小
                                    sentence_maxlen + 1：最大句子长度。开头追加<GO>，其他的词整体右移1个位置
            
    - y_preprocess
        输入：second_couplet: list[str1, str2...] 下联
        输出：ndarray(batch_size, sentence_maxlen+1)
                batch_size：批量大小
                sentence_maxlen + 1：最大句子长度。末尾追加<PAD>
    
@author: luoyi
Created on 2021年3月22日
'''
import tensorflow as tf

import utils.dictionaries as dictionaries
import utils.conf as conf


#    x数据预处理
def x_preprocess(frist_couplet, second_couplet, sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen() + 2):
    '''
        @param frist_couplet: list[str1, str2...] 上联
        @param second_couplet: list[str1, str2...] 下联
        @param sentence_maxlen: 句子最大长度
        @return: Tensor(2, sentence_maxlen+1)
                     encoder_inputs: Tensor(sentence_maxlen+1, ) 编码器词编码向量
                                        sentence_maxlen + 1：最大句子长度。末尾追加<PAD>
                     decoder_inputs: Tensor(sentence_maxlen+1, ) 解码器词编码向量
                                        sentence_maxlen + 1：最大句子长度。开头追加<GO>，其他的词整体右移1个位置
    '''
    #    frist_couplet转为词编码向量，开头末尾追加<GO>,<EOS>。并统一长度
    frist_couplet = [dictionaries.word2idx_slist(['<GO>'] + frist_couplet + ['<EOS>'])]
    if (len(frist_couplet) > sentence_maxlen): frist_couplet = frist_couplet[:sentence_maxlen]
    encoder_inputs = tf.keras.preprocessing.sequence.pad_sequences(frist_couplet, padding='post', maxlen=sentence_maxlen)
    
    #    second_couplet转为词编码向量，并开头末尾追加<GO>,<EOS>。统一长度
    second_couplet = [dictionaries.word2idx_slist(['<GO>'] + second_couplet)]
    if (len(second_couplet) > sentence_maxlen): second_couplet = second_couplet[:sentence_maxlen]
    decoder_inputs = tf.keras.preprocessing.sequence.pad_sequences(second_couplet, padding='post', maxlen=sentence_maxlen)
    
    return tf.concat([encoder_inputs, decoder_inputs], axis=0)


#    y数据预处理
def y_preprocess(second_couplet, sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen() + 2):
    '''
        @param second_couplet: list[str1, str2...] 下联
        @return: Tensor(sentence_maxlen+1, )
                            sentence_maxlen + 1：最大句子长度。开头末尾追加<GO>,<EOS>
    '''
    second_couplet = [dictionaries.word2idx_slist(second_couplet + ['<EOS>'])]
    if (len(second_couplet) > sentence_maxlen): second_couplet = second_couplet[:sentence_maxlen]
    decoder_inputs = tf.keras.preprocessing.sequence.pad_sequences(second_couplet, padding='post', maxlen=sentence_maxlen)
    decoder_inputs = tf.squeeze(decoder_inputs, axis=0)
    return decoder_inputs


#    根据词编码向量计算padding掩码矩阵
def padding_mask_matrix(seq_src, mutil_groups=conf.TRANSFORMER.get_mutil_head_attention_groups()):
    ''' 计算两个词编码向量的掩码矩阵
        @param words_ids_src: 源词编码向量 Tensor(batch_size, seq_len1)
        @param words_ids_tag: 目标词编码向量 Tensor(batch_size, seq_len2)
        @param mutil_groups: 多头自编码中的头数
        @return: Tensor(batch_size, mutil_groups, seq_len2, seq_len1)
                    有词的地方为0，填充为1
    '''
    words_ids_src = seq_src[:, tf.newaxis, tf.newaxis, :]     #    Tensor(batch_size, 1, 1, words_len1)
    words_ids_src = tf.tile(words_ids_src, multiples=[1, mutil_groups, seq_src.shape[-1], 1])
    words_ids_src = words_ids_src > 0
    words_ids_src = tf.where(words_ids_src, tf.zeros_like(words_ids_src, dtype=tf.float32), tf.ones_like(words_ids_src, dtype=tf.float32))
    return words_ids_src
#    根据给定的句子长度计算sequence掩码矩阵
def sequence_mask_matrix(batch_size=conf.DATASET.get_batch_size(), sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen() + 2, mutil_groups=conf.TRANSFORMER.get_mutil_head_attention_groups()):
    '''根据给定的句子长度计算sequence掩码矩阵
        @param batch_size: 批量大小
        @param sentence_maxlen: 句子大小
        @param mutil_groups: 多头自编码中的头数
        @return: Tensor(batch_size, mutil_groups, sentence_maxlen, sentence_maxlen)
                    不需要遮盖的地方为0，需要遮盖的地方为1
    '''
    mat = tf.ones(shape=(batch_size, sentence_maxlen, sentence_maxlen), dtype=tf.int8)
    mat = tf.linalg.band_part(mat, -1, 0)
    mat = mat > 0
    #    按头数复制
    mat = tf.expand_dims(mat, axis=1)
    mat = tf.repeat(mat, repeats=mutil_groups, axis=1)
    mat = tf.where(mat, tf.zeros_like(mat, dtype=tf.float32), tf.ones_like(mat, dtype=tf.float32))
    return mat

