# -*- coding: utf-8 -*-  
'''
gpt2网络结构

@author: luoyi
Created on 2021年4月7日
'''
import tensorflow as tf
import numpy as np

import utils.conf as conf
import utils.dictionaries as dictionaries
from models.abstract_nets import AbstractModel
from models.gpt2.part import EmbeddingLayer, EmbeddingOpType, PositionEmbeddingLayer, TransformerBlockLayer, PreOutputLayer
from models.gpt2.preprocess import padding_mask, position_mask
from models.gpt2.losses import Gpt2PreLoss
from models.gpt2.metrics import Gpt2PreMetric


#    GPT2网络
class Gpt2Model(AbstractModel):
    def __init__(self,
                 name='Gpt2Model',
                 batch_size=conf.GPT2.get_batch_size(),
                 n_head_attention=conf.GPT2.get_n_head_attention(),
                 sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(),
                 f_model=conf.GPT2.get_f_model(),
                 d_model=conf.GPT2.get_d_model(),
                 dropout_rate=conf.GPT2.get_dropout_rate(),
                 learning_rate=conf.GPT2.get_learning_rate(),
                 n_block=conf.GPT2.get_n_block(),
                 vocab_size=dictionaries.dict_size(),
                 input_shape=[conf.GPT2.get_batch_size(), conf.GPT2.get_pre_training_sentence_maxlen()],
                 pre_embedding_weights=None):
        self._name = name
        self._batch_size = batch_size
        self._n_head_attention = n_head_attention
        self._sentence_maxlen = sentence_maxlen
        self._f_model = f_model
        self._d_model = d_model
        self._dropout_rate = dropout_rate
        self._learning_rate = learning_rate
        self._n_block = n_block
        self._vocab_size = vocab_size
        self._pre_embedding_weights = pre_embedding_weights             #    词向量预训练的参数
        
        super(Gpt2Model, self).__init__(learning_rate=learning_rate, name=name, input_shape=input_shape)
        pass
    
    
    #    创建优化器
    def create_optimizer(self, learning_rate=0.001):
        return tf.optimizers.Adam(learning_rate=learning_rate, beta_1=0.9, beta_2=0.98)
    def create_loss(self):
        return Gpt2PreLoss(sentence_maxlen=self._sentence_maxlen)
    def create_metrics(self):
        return [Gpt2PreMetric(sentence_maxlen=self._sentence_maxlen)]
    
    def assembling(self):
        #    词编码嵌入 与 位置编码嵌入
        self._word_embedding = EmbeddingLayer(name='word_embedding', 
                                              vocab_size=self._vocab_size, 
                                              d_model=self._d_model, 
                                              op=EmbeddingOpType.embedding, 
                                              pre_weights=self._pre_embedding_weights)
        self._position_embedding = PositionEmbeddingLayer(name='pos_embedding', 
                                                          sentence_maxlen=self._sentence_maxlen, 
                                                          batch_size=self._batch_size, 
                                                          d_model=self._d_model)
        
        #    n个transformer block层
        self._transformer_blocks = [TransformerBlockLayer(name='transformer_block_' + str(i), 
                                                          batch_size=self._batch_size, 
                                                          n_head_attention=self._n_head_attention, 
                                                          sentence_maxlen=self._sentence_maxlen, 
                                                          f_model=self._f_model, 
                                                          d_model=self._d_model, 
                                                          dropout_rate=self._dropout_rate) for i in range(self._n_block)]
        #    最后1个layer_norm
        self._out_norm = tf.keras.layers.LayerNormalization()
        
        #    预训练的输出
        self._pre_output_layer = PreOutputLayer(name='pre_output', word_embedding=self._word_embedding)
        pass
    
    def call(self, inputs, training=None, mask=None):
        '''
            @param inputs: 句子词编码    Tensor(batch_size, max_sen_len)
            @param mask: 掩码矩阵    Tensor(batch_size, max_sen_len, max_sen_len)
        '''
        inputs = tf.cast(inputs, dtype=tf.int64)
        
        #    掩码
        mask_pad = padding_mask(inputs)
        mask_pos = position_mask(self._sentence_maxlen)
        mask = tf.maximum(mask_pad, mask_pos)
        
        #    词编码嵌入向量 + 位置编码嵌入向量
        word_vec = self._word_embedding(inputs, op=EmbeddingOpType.embedding)
        pos_vec = self._position_embedding(inputs)
        x = word_vec + pos_vec              #    Tensor(batch_size, sentence_maxlen, d_model)
        
        #    过n个transformer_block层
        for transformer_block in self._transformer_blocks:
            x = transformer_block(x, mask=mask)
            pass
        
        #    过layer_norm层
        x = self._out_norm(x)
        
        #    预训练的输出
        out = self._pre_output_layer(x)
        
        return out
    
    
    #    预测值转为可读文本
    def translate(self, preds):
        '''
            @param pred: narray(batch_size, sentence_maxlen, vocab_size)
            @return: list[预测文本1, 预测文本2, ...batch_size个...]
        '''
        res = []
        for pred in preds:
            pred = np.argmax(pred, axis=-1)         #    narray(sentence_maxlen, )
            pred = pred.tolist()                    #    list(词编码...)
            
            pred = dictionaries.id2word_idlist(pred)
            #    找到第一个结束符的下标
            eos_idx = pred.index('<EOS>')
            pred = pred[:eos_idx]
            pred = ''.join(pred)
            res.append(pred)
            pass
        return res
    
    
    #    测试输出
    def test(self, context):
        '''
            @param context: 测试文本
            @return: 预测输出文本
        '''
        #    测试文本转为词编码，并且前面追加<GO>，并且统一长度
        if (type(context) is str): context = list(context)
        x = dictionaries.word2idx_slist(['<GO>'] + context)
        if (len(x) > self._sentence_maxlen): x = x[:self._sentence_maxlen]
        if (len(x) < self._sentence_maxlen): x = x + [0] * (self._sentence_maxlen - len(x))
        #    做成Tensor(1, sentence_maxlen)
        x = tf.cast(tf.convert_to_tensor(x), dtype=tf.int64)
        x = tf.expand_dims(x, axis=0)
        
        #    预测，转为可读文本
        preds = self.predict(x)
        res = self.translate(preds)
        return res[0]
    
    
#     #    预测
#     def divination(self, context, max_sen_len=conf.GPT2.get_pre_training_sentence_maxlen()):
#         '''
#             @param context: 上文文本
#             @return: 预测输出文本
#         '''
#         #    测试文本转为词编码，并且前面追加<GO>，并且统一长度
#         if (type(context) is str): context = list(context)
#         x = dictionaries.word2idx_slist(['<GO>'] + context)
#         if (len(x) > self._sentence_maxlen): x = x[:self._sentence_maxlen]
#         if (len(x) < self._sentence_maxlen): x = x + [0] * (self._sentence_maxlen - len(x))
#         
#         #    开始预测
#         prev = context          #    上文
#         i = 0                   #    当前以及生成的词数
#         while (i < max_sen_len):
#             #    上文过前向，拿到针对上文的预测
#             pred = self.call(prev)
#             
#             #    取针对上文预测的最后1个词
#             
#             
#             i += 1
#             pass
#         pass
    
