# -*- coding: utf-8 -*-  
'''
GPT2.0的损失函数

@author: luoyi
Created on 2021年4月7日
'''
import tensorflow as tf

import utils.conf as conf
import utils.logger_factory as logf
from models.gpt2.preprocess import loss_mask


#    gpt2.0的预训练loss
class Gpt2PreLoss(tf.keras.losses.Loss):
    '''
        从y_true中拿<PAD>掩码，有词的地方为1，<PAD>为0
    '''
    def __init__(self, 
                 name='Gpt2PreLoss', 
                 sentence_maxlen=conf.GPT2.get_pre_training_sentence_maxlen(),
                 **kwargs):
        super(Gpt2PreLoss, self).__init__(**kwargs)
        
        self._name = name
        self._sentence_maxlen = sentence_maxlen
        pass

    def call(self, y_true, y_pred):
        '''
            @param y_pred: Tensor(batch_size, sentence_maxlen, vocab_size)
            @param y_true: Tensor(batch_size, sentence_maxlen)
        '''
        y_true = tf.cast(y_true, dtype=tf.int64)
        
        #    loss掩码
        _, effective_count = loss_mask(y_true)
        
        #    通过y_true的值做成y_pred的索引
        idx = tf.where(y_true > 0)
        idx = tf.concat([idx[:, :2], 
                         tf.expand_dims(tf.gather_nd(y_true, indices=idx), axis=-1)], axis=-1)
        
        #    正确词的预测概率        
        y_pred = tf.gather_nd(y_pred, indices=idx)          #    Tensor(sum(effective_count), )
        loss = -tf.math.log(y_pred)                         #    Tensor(sum(effective_count), )
        loss = tf.RaggedTensor.from_row_lengths(loss, row_lengths=effective_count)      #    RaggedTensor(batch_size, None(每个batch的实际有效词))
        loss = tf.math.reduce_mean(loss, axis=-1)           #    Tensor(batch, )
        
        tf.print('loss:', loss, output_stream=logf.get_logger_filepath('gpt2_losses'))
        
        return loss

    pass


