# -*- coding: utf-8 -*-  
'''
损失函数

@author: luoyi
Created on 2021年3月18日
'''
import tensorflow as tf

import utils.conf as conf


#    seq2seq的损失函数
class Seq2SeqLoss(tf.keras.losses.Loss):
    
    
    def __init__(self,
                 name='Seq2SeqLoss',
                 batch_size=conf.DATASET.get_batch_size(),
                 max_len=conf.TEXT.get_max_len(),
                 **kwargs):
        super(Seq2SeqLoss, self).__init__(name=name, **kwargs)
        
        self._batch_size = batch_size
        self._max_len = max_len
        pass
    
    
    def call(self, y_true, y_pred):
        '''
            @param y_true: Tensor (batch_size, max_len)
                                    batch_size：批量大小
                                    max_len：最大句子长度。不够的补0
            @param y_pred: Tensor (batch_size, max_len, word_dict_size)
                                    batch_size：批量大小
                                    max_len：最大句子长度。不够的补0
                                    word_dict_size：句子中的每个词应该是什么，在字典中的预测概率（很大）
            
            @return: Tensor(batch_size, max_len)
                                貌似需要精确到每个词的损失
        '''
        #    多分类交叉熵
        #    根据y_true中每个词的索引取y_pred中的预测概率    
        idx_batch_size = tf.range(self._batch_size)
        idx_batch_size = tf.repeat(idx_batch_size, repeats=self._max_len, axis=-1)                          #    Tensor(batch_size * max_len, )
        idx_max_len = tf.range(self._max_len)
        idx_max_len = tf.repeat(tf.expand_dims(idx_max_len, axis=0), repeats=self._batch_size, axis=0)      
        idx_max_len = tf.reshape(idx_max_len, shape=(self._batch_size * self._max_len,))                    #    Tensor(batch_size * max_len, )
        idx_word = tf.reshape(tf.cast(y_true, dtype=tf.int32), shape=(self._batch_size * self._max_len,))                            #    Tensor(batch_size * max_len, )
        idx = tf.stack([idx_batch_size, idx_max_len, idx_word], axis=-1)                                    #    Tensor(batch_size * max_len, 3)
        p_pred = tf.gather_nd(y_pred, indices=idx)                                                          #    Tensor(batch_size * max_len, )
        
        #    计算交叉熵
        loss = 1 * -tf.math.log(p_pred) + (1 - 0) * -tf.math.log(1 - p_pred)                                #    Tensor(batch_size * max_len, )
        loss = tf.reshape(loss, shape=(self._batch_size, self._max_len))
        return loss
    
    pass
