# -*- coding: utf-8 -*-  
'''
bert相关损失

@author: luoyi
Created on 2021年4月15日
'''
import tensorflow as tf

import utils.conf as conf
import utils.logger_factory as logf
from utils.dictionary import Dictionaries


#    预训练的损失
class PreLoss(tf.keras.losses.Loss):
    def __init__(self,
                 name='PreLoss',
                 lamud_loss_pre_nsp=conf.BERT.get_lamud_loss_pre_nsp(),
                 lamud_loss_pre_mlm=conf.BERT.get_lamud_loss_pre_mlm(),
                 max_sen_len=conf.NLU.get_max_sen_len(),
                 vocab_size=Dictionaries.instance().size(),
                 **kwargs):
        super(PreLoss, self).__init__(name=name, **kwargs)
        
        self._max_sen_len = max_sen_len
        self._vocab_size = vocab_size
        
        self._lamud_loss_pre_nsp = lamud_loss_pre_nsp
        self._loss_nsp = PreNSPLoss(name=name + '_nsp')
        
        self._lamud_loss_pre_mlm = lamud_loss_pre_mlm
        self._loss_mlm = PreMLMLoss(name=name + '_mlm', max_sen_len=max_sen_len, vocab_size=vocab_size)
        
        pass
    
    def call(self, y_true, y_pred):
        loss_nsp = self._lamud_loss_pre_nsp * self._loss_nsp.call(y_true, y_pred)
        loss_mlm = self._lamud_loss_pre_mlm * self._loss_mlm.call(y_true, y_pred)
        
        loss = loss_nsp + loss_mlm
        
        tf.print('------------------------------------------------------------', output_stream=logf.get_logger_filepath('bert_losses'))
        tf.print('loss:', loss, output_stream=logf.get_logger_filepath('bert_losses'))
        tf.print('loss_nsp:', loss_nsp, output_stream=logf.get_logger_filepath('bert_losses'))
        tf.print('loss_mlm:', loss_mlm, output_stream=logf.get_logger_filepath('bert_losses'))
                
        return loss
    pass


#    NSP预测损失
class PreNSPLoss(tf.keras.losses.Loss):
    '''预测第2句是否是前一句的下句'''
    def __init__(self,
                 name='PreNSPLoss',
                 **kwargs):
        super(PreNSPLoss, self).__init__(name=name, **kwargs)
        pass
    
    def call(self, y_true, y_pred):
        '''
            @param y_pred: RaggedTensor
                                Tensor(batch_size * 1, 2)                        [CLS]过softmax的2个预测概率
                                    (batch_size * 1, 0): 预测为负样本概率
                                    (batch_size * 1, 1): 预测为正样本概率
                                Tensor(batch_size * max_sen_len, vocab_size)     句子中每个词的预测概率
            @param y_true: Tensor(batch_size, 1+1+1, rewrite_max)
                                    1: 是否正样本。全1代表是，全0代表不是
                                    1: 被[MASK]的下标索引
                                    1: 被[MASK]的原词编码
        '''
        #    取样本是否为正样本
        y_true = y_true[:, 0, :]
        y_true = tf.math.reduce_mean(y_true, axis=-1)       #    Tensor(batch_size, )
        
        #    正负样本对应的预测概率
        y_pred = y_pred[0].to_tensor()                      #    Tensor(batch_size, 2)
        idx = tf.where(y_true >= 0)
        idx = tf.stack([idx[:, 0], y_true], axis=-1)
        y_pred = tf.gather_nd(y_pred, indices=idx)          #    Tensor(batch_size, )
        
        #    计算多分类交叉熵
        loss = -tf.math.log(y_pred)
        
        return loss
    pass


#    MLM预测任务
class PreMLMLoss(tf.keras.losses.Loss):
    '''预测被随机替换的词的真实词'''
    def __init__(self,
                 name='PreMLMLoss',
                 max_sen_len=conf.BERT.get_max_sen_len(),
                 vocab_size=Dictionaries.instance().size(),
                 **kwargs):
        super(PreMLMLoss, self).__init__(name=name, **kwargs)
        
        self._max_sen_len = max_sen_len
        self._vocab_size = vocab_size
        pass
    
    def call(self, y_true, y_pred):
        '''
            @param y_pred: RaggedTensor
                                Tensor(batch_size * 1, 2)                        [CLS]过softmax的2个预测概率
                                Tensor(batch_size * max_sen_len, vocab_size)     句子中每个词的预测概率
            @param y_true: Tensor(batch_size, 1+1+1, rewrite_max)
                                    1: 是否正样本。全1代表是，全0代表不是
                                    1: 被[MASK]的下标索引，-1代表是[PAD]
                                    1: 被[MASK]的原词编码，-1代表是[PAD]
        '''
        #    取每个词的预测概率        Tensor(batch_size, max_sen_len, vocab_size)
        y_pred = y_pred[1].to_tensor()
        y_pred = tf.reshape(y_pred, shape=(-1, self._max_sen_len-1, self._vocab_size))
        
        #    取每个掩掉词的预测概率
        y_true_idx = y_true[:, 1, :]                            #    被掩掉的词下标    Tensor(batch_size, rewrite_max)
        y_true_label = y_true[:, 2, :]                          #    被掩掉的词编码    Tensor(batch_size, rewrite_max)
        #    每个batch_size的实际词数量
        nums = tf.math.count_nonzero(y_true_idx + 1, axis=-1)
        idx = tf.where(y_true_idx >= 0) 
        y_true_idx = tf.gather_nd(y_true_idx, indices=idx)
        y_true_label = tf.gather_nd(y_true_label, indices=idx)
        idx = tf.stack([idx[:,0], y_true_idx, y_true_label], axis=-1)       #    被掩掉的词对应的真实词预测概率索引
        #    被掩掉的词对应的真实词预测概率索引    Tensor(所有batch_size中被掩掉的词数量, )
        y_pred = tf.gather_nd(y_pred, indices=idx)
        
        #    多分类交叉熵
        loss = -tf.math.log(y_pred)
        loss = tf.RaggedTensor.from_row_lengths(loss, row_lengths=nums)
        loss = tf.math.reduce_mean(loss, axis=-1)
        
        return loss
    pass