# -*- coding: utf-8 -*-  
'''
TPLinker loss函数

@author: luoyi
Created on 2021年6月24日
'''
import tensorflow as tf

import utils.logger_factory as logf
import utils.conf as conf
import utils.relationships as rel
from models.tplinker.process import parse_y, relative_idx_et, relative_idx_sht


#    tplinker的loss
class TPLinkerLosser(tf.keras.losses.Loss):
    def __init__(self, 
                 fc_layer=None, 
#                  input_layer=None,
                 
                 max_sen_len=conf.TPLINKER.get_max_sentence_len(),
                 rel_size = len(rel.id_rel),
                 
                 loss_lamda_ner = conf.TPLINKER.get_loss_lamda_ner(),
                 loss_lamda_re = conf.TPLINKER.get_loss_lamda_re(),
                 **kwargs):
        super(TPLinkerLosser, self).__init__(**kwargs)
        
        self._max_sen_len = max_sen_len - 2                                         #    去掉首尾的[CLS][EOS]
        self._rel_size = rel_size
        
        self._fc_layer = fc_layer
#         self._input_layer = input_layer
        
        self._loss_lamda_ner = loss_lamda_ner
        self._loss_lamda_re = loss_lamda_re
        
        if (max_sen_len % 2 == 0): fold_num = int(((1 + max_sen_len-2) * (max_sen_len-2) / 2 + (max_sen_len-2) / 2) / (max_sen_len-2))
        else: fold_num = int((1 + max_sen_len-2) * (max_sen_len-2) / 2 / (max_sen_len-2))
        self._fold_num = fold_num
        pass
    
    def call(self, y_true, y_pred):
        #    取预测值
        X_et_to_et = self._fc_layer.get_et_to_et()                                  #    Tensor(batch_size, ∑max_sen_len, 2)
        X_sht_to_sht = self._fc_layer.get_sht_to_sht()                              #    Tensor(batch_size, ∑max_sen_len, rel_size, 2, 3)
        
        tf.print('----------------------------------------------------------------', output_stream=logf.get_logger_filepath('tplinker_losses'))
        
        #    取标注值    Tensor(batch_size, max_handshake_size, 2)，pad部分为-1
        Y_et_to_et, Y_et_no, \
            Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no, \
            Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no = parse_y(y_true)
            
        
        loss_et_to_et = self.loss_et_to_et(X_et_to_et, Y_et_to_et, Y_et_no)
        loss_sht_to_sht = self.loss_sht_to_sht(X_sht_to_sht, 
                                               Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no,
                                               Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no
                                               )
        
        tf.print('loss_et_to_et:', loss_et_to_et, ' loss_sht_to_sht:', loss_sht_to_sht, output_stream=logf.get_logger_filepath('tplinker_losses'))
        
        #    加权求和
        #    loss = λ[NER] * loss_ner + λ[RE] * loss_re
        #    loss_ner = loss_et_pos + loss_et_neg
        #    loss_re = loss_sh + loss_st + loss_sht_no
        loss = self._loss_lamda_ner * loss_et_to_et \
                 + self._loss_lamda_re * loss_sht_to_sht
        
        return loss
    
    #    计算et_to_et
    def loss_et_to_et(self, X_et_to_et, Y_et_to_et, Y_et_no):
        '''loss_et = loss_et_to_et + Y_et_no
            @param X_et_to_et: Tensor(batch_size, ∑max_sen_len, 2)
            @param Y_et_to_et: Tensor(batch_size, max_sen_len)    每个batch下et_to_et的握手索引
        '''
        #    计算握手loss  
        num_et, idx_et = relative_idx_et(Y_et_to_et)
        pred_et = tf.gather_nd(X_et_to_et, indices=idx_et)                      #    Tensor(sum_et, 2)
        #    计算真实握手信息的loss
        loss_et = -tf.math.log(pred_et[:, 1])
        loss_et = tf.RaggedTensor.from_row_lengths(loss_et, row_lengths=num_et) #    RaggedTensor(batch_size, None)
        loss_et = tf.math.reduce_mean(loss_et, axis=-1)                         #    Tensor(batch_size, )
        
        #    计算不握手loss
        num_no, idx_no = relative_idx_et(Y_et_no)
        pred_no = tf.gather_nd(X_et_to_et, indices=idx_no)                      #    Tensor(sum_et, 2)
        #    计算真实握手信息的loss
        loss_no = -tf.math.log(pred_no[:, 0])
        loss_no = tf.RaggedTensor.from_row_lengths(loss_no, row_lengths=num_no) #    RaggedTensor(batch_size, None)
        loss_no = tf.math.reduce_mean(loss_no, axis=-1)                         #    Tensor(batch_size, )
        
        tf.print('loss_et:', loss_et, ' loss_no:', loss_no, output_stream=logf.get_logger_filepath('tplinker_losses'))
        
        return loss_et + loss_no

    
    #    计算sht_to_sht
    def loss_sht_to_sht(self, 
                        X_sht_to_sht, 
                        Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no,
                        Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no
                        ):
        '''
            loss_sht_to_sht = loss_sh_to_sh + loss_st_to_st + loss_no_sht
            loss_sh_to_sh = loss_sh_to_sh_1 + loss_sh_to_sh_2
            loss_st_to_st = loss_st_to_st_1 + loss_st_to_st_2
            
            @param X_sht_to_sht: Tensor(batch_size, rel_size, ∑max_sen_len, 2, 3)
            @param Y_sh_to_sh_1: RaggedTensor(batch_size, None, 2)
            @param Y_sh_to_sh_2: RaggedTensor(batch_size, None, 2)
            @param Y_sh_no: RaggedTensor(batch_size, None, 2)
            @param Y_sh_to_sh_1: RaggedTensor(batch_size, None, 2)
            @param Y_sh_to_sh_2: RaggedTensor(batch_size, None, 2)
            @param Y_sh_no: RaggedTensor(batch_size, None, 2)
        '''
        #    计算：loss_sh_to_sh = loss_sh_to_sh_1 + loss_sh_to_sh_2 + loss_sh_no
        loss_sh_to_sh_1 = self.loss_sht_to_sht_from_X_Yidx(X_sht_to_sht[:, :, :, 0, :], Y_sh_to_sh_1, 1, 'sh_to_sh')
        loss_sh_to_sh_2 = self.loss_sht_to_sht_from_X_Yidx(X_sht_to_sht[:, :, :, 0, :], Y_sh_to_sh_2, 2, 'sh_to_sh') 
        loss_sh_no = self.loss_sht_to_sht_from_X_Yidx(X_sht_to_sht[:, :, :, 0, :], Y_sh_to_sh_no, 0, 'sh_to_sh') 

        #    计算：loss_st_to_st = loss_st_to_st_1 + loss_st_to_st_2 + loss_st_no
        loss_st_to_st_1 = self.loss_sht_to_sht_from_X_Yidx(X_sht_to_sht[:, :, :, 1, :], Y_st_to_st_1, 1, 'st_to_st')
        loss_st_to_st_2 = self.loss_sht_to_sht_from_X_Yidx(X_sht_to_sht[:, :, :, 1, :], Y_st_to_st_2, 2, 'st_to_st') 
        loss_st_no = self.loss_sht_to_sht_from_X_Yidx(X_sht_to_sht[:, :, :, 0, :], Y_st_to_st_no, 0, 'st_to_st') 
        
        loss = loss_sh_to_sh_1 + loss_sh_to_sh_2 + loss_sh_no \
                + loss_st_to_st_1 + loss_st_to_st_2 + loss_st_no
        
        tf.print('loss_sh_to_sh_1:', loss_sh_to_sh_1, ' loss_sh_to_sh_2:', loss_sh_to_sh_2, ' loss_sh_no:', loss_sh_no, output_stream=logf.get_logger_filepath('tplinker_losses'))
        tf.print('loss_st_to_st_1:', loss_st_to_st_1, ' loss_st_to_st_2:', loss_st_to_st_2, ' loss_st_no:', loss_st_no, output_stream=logf.get_logger_filepath('tplinker_losses'))
        
        return loss


    #    计算：loss_sh_to_sh_1 = loss_sh_to_sh_1 + loss_sh_to_sh_1_no
    def loss_sht_to_sht_from_X_Yidx(self, 
                        x, 
                        y,
                        val, 
                        log_tag):
        '''
            @param x: Tensor(batch_size, rel_size, ∑max_sen_len, 3)
            @param y: RaggedTensor(batch_size, None, 2)
            @param val: int x最后一维取第几个索引
            @param log_tag: 日志标记
        '''
        #    取每个batch下的实际有效数量，和实际有效索引
        num, idx = relative_idx_sht(y)
        
        #    通过索引取预测，并计算loss
        pred = tf.gather_nd(x, indices=idx)                         #    Tensor(sum_x, 3)
        pred = pred[:, val]                                         #    Tensor(sum_x, )
        loss = -tf.math.log(pred)
        loss = tf.RaggedTensor.from_row_lengths(loss, row_lengths=num)  #    Tensor(batch_size, None)
        loss = tf.math.reduce_mean(loss, axis=-1)
        
        #    如果某个batch下没有索引，则loss置为0
        loss = tf.where(num == 0, tf.zeros_like(loss), loss)
        return loss
    pass


