# -*- coding: utf-8 -*-  
'''
transformer损失函数
    多分类交叉熵

@author: luoyi
Created on 2021年3月22日
'''
import tensorflow as tf

import utils.conf as conf
import utils.logger_factory as logf


#    transformer损失函数
class TransformerLoss(tf.keras.losses.Loss):
 

    def __init__(self,
                 name='TransformerLoss',
                 batch_size=conf.DATASET.get_batch_size(),
                 sentence_maxlen=conf.TRANSFORMER.get_sentence_maxlen() + 1,
                 **kwargs):
        super(TransformerLoss, self).__init__(name=name, **kwargs)
        
        self._batch_size = batch_size
        self._sentence_maxlen = sentence_maxlen
        pass


    def call(self, y_true, y_pred):
        '''
            @param y_pred: Tensor(batch_size, sentence_maxlen + 2, dict_size)
            @param y_pred: Tensor(batch_size, sentence_maxlen + 2)
        '''
#         #    计算所有输出的交叉熵（包含<PAD>）
#         idx_batch = tf.range(self._batch_size)
#         idx_batch = tf.repeat(idx_batch, repeats=self._sentence_maxlen, axis=-1)
#         idx_sentence = tf.range(self._sentence_maxlen)
#         idx_sentence = tf.repeat(tf.expand_dims(idx_sentence, axis=0), repeats=self._batch_size, axis=0)
#         idx_sentence = tf.reshape(idx_sentence, shape=(self._batch_size * self._sentence_maxlen, ))
#         idx_dict = tf.reshape(tf.cast(y_true, dtype=tf.int32), shape=(self._batch_size * self._sentence_maxlen, ))
#         idx = tf.stack([idx_batch, idx_sentence, idx_dict], axis=-1)      
#         y_pred = tf.gather_nd(y_pred, indices=idx)
#         
#         loss = -tf.math.log(y_pred)
#         loss = tf.reshape(loss, shape=(self._batch_size, self._sentence_maxlen))
#         loss = tf.math.reduce_mean(loss, axis=-1)
        
        #    <PAD>值不计入损失
        #    取y_true每一个batch_size中有效词数量
        num_words = tf.math.count_nonzero(y_true, axis=-1)
         
        #    根据y_true中词的索引从y_pred中拿该词的预测概率
        idx_batch = tf.range(self._batch_size)
        idx_batch = tf.repeat(idx_batch, repeats=self._sentence_maxlen, axis=-1)
        idx_sentence = tf.range(self._sentence_maxlen)
        idx_sentence = tf.repeat(tf.expand_dims(idx_sentence, axis=0), repeats=self._batch_size, axis=0)
        idx_sentence = tf.reshape(idx_sentence, shape=(self._batch_size * self._sentence_maxlen, ))
        idx_dict = tf.reshape(tf.cast(y_true, dtype=tf.int32), shape=(self._batch_size * self._sentence_maxlen, ))
        idx = tf.stack([idx_batch, idx_sentence, idx_dict], axis=-1)                    #    Tensor(batch_size * sentence_maxlen + 2, 3)
        #    过滤掉idx[:, 2] == 0的值（<PAD> == 0）
        idx = idx[idx[:, 2] > 0]                                                        #    Tensor(sum(num_words), )
        y_pred = tf.gather_nd(y_pred, indices=idx)                                      #    Tensor(sum(num_words), )
         
        #    计算交叉熵
        loss = -tf.math.log(y_pred)                                                     #    Tensor(sum(num_words))
        loss = tf.RaggedTensor.from_row_lengths(loss, row_lengths=num_words)            #    RaggedTensor(batch_size, num_words)
        loss = tf.math.reduce_mean(loss, axis=-1)                                       #    Tensor(batch_size, )
        tf.print('loss:', loss, output_stream=logf.get_logger_filepath('transformer_losses'))
        
        return loss
    
    pass
