# -*- coding: utf-8 -*-  
'''
crf-linker 损失函数
    crf损失 + tplinker的sht损失

@author: luoyi
Created on 2021年7月4日
'''
import tensorflow as tf

import utils.conf as conf
import utils.logger_factory as logf
from models.crflinker.process import parse_y, relative_idx_sht
from models.crflinker.process import score, score_full_path


#    crf-linker损失函数
class CRFLinkerLosser(tf.keras.losses.Loss):
    '''
        loss = λ_crf * loss_crf + λ_tplinker * loss_tplinker
        loss_crf = log(Z(i)) - score(i)        （详见CRF文档）
        loss_tplinker = loss_sh_1 + loss_sh_2 + loss_sh_no
                        + loss_st_1 + loss_st_2 + loss_st_no
    '''
    def __init__(self,
                 crf_layer=None,                                     #    crf_layer层（主要为了拿输出）
                 tplinker_layer=None,                                #    tplinker层
                 
                 loss_lamda_ner=conf.CRFLINKER.get_loss_lamda_ner(),
                 loss_lamda_re=conf.CRFLINKER.get_loss_lamda_re(),
                 **kwargs):
        super(CRFLinkerLosser, self).__init__(**kwargs)
        
        self._crf_layer = crf_layer
        self._tplinker_layer = tplinker_layer
        
        self._loss_lamda_ner = loss_lamda_ner
        self._loss_lamda_re = loss_lamda_re
        pass
    
    def call(self, y_true, y_pred):
        '''
            @param y_true: Tensor(batch_size, 7, max_sen_len, 2)
        '''
        tf.print('============================================================', output_stream=logf.get_logger_filepath('crflinker_losses'))
        
        #    x预测
        x_crf = self._crf_layer.get_out()
        x_tplinker = self._tplinker_layer.get_out()
        
        #    解析y_true
        Y_id_pos, \
            Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no, \
            Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no = parse_y(y_true)
            
        #    loss = λ_ner * loss_crf + λ_re * loss_tplinker
        loss_crf = self._loss_crf(x_crf, Y_id_pos)
        
        tf.print('------------------------------------------------------------', output_stream=logf.get_logger_filepath('crflinker_losses'))
        
        loss_tplinker = self._loss_tplinker(x_tplinker, 
                                            Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no, 
                                            Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no)

        loss = self._loss_lamda_ner * loss_crf + self._loss_lamda_re * loss_tplinker
        
        tf.print('------------------------------------------------------------', output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss_crf:', loss_crf, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss_tplinker:', loss_tplinker, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss:', loss, output_stream=logf.get_logger_filepath('crflinker_losses'))
        return loss
        
    #    计算loss_crf
    def _loss_crf(self, x_crf, Y_id_pos):
        '''
            @param x_crf: Tensor(batch_size, max_sen_len, pos_size)
            @param Y_id_pos: Tensor(batch_size, max_sen_len, )        包括[CLS]和[EOS]的标注
        '''
        transfer = self._crf_layer.get_transfer()
        score_i = score(x_crf, Y_id_pos, transfer)
        Z_i = score_full_path(x_crf, Y_id_pos, transfer)
        loss = Z_i - score_i
        
        tf.print('score_i:', score_i, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('Z_i:', Z_i, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss:', loss, output_stream=logf.get_logger_filepath('crflinker_losses'))
        return loss
    
    #    计算loss_tplinker
    def _loss_tplinker(self, 
                      x_tplinker, 
                      Y_sh_to_sh_1, Y_sh_to_sh_2, Y_sh_to_sh_no, 
                      Y_st_to_st_1, Y_st_to_st_2, Y_st_to_st_no):
        '''loss = loss_sh_1 + loss_sh_2 + loss_sh_no
                    + loss_st_1 + loss_st_2 + loss_st_no
            
            @param x_tplinker: Tensor(batch_size,rel_size, max_vec_len, 2, 3)
            @param Y_sh_to_sh_1: Tensor(batch_size, max_vec_len, 2)
            @param Y_sh_to_sh_2: Tensor(batch_size, max_vec_len, 2)
            @param Y_sh_to_sh_no: Tensor(batch_size, max_vec_len, 2)
            @param Y_st_to_st_1: Tensor(batch_size, max_vec_len, 2)
            @param Y_st_to_st_2: Tensor(batch_size, max_vec_len, 2)
            @param Y_st_to_st_no: Tensor(batch_size, max_vec_len, 2)
        '''
        #    计算sh相关loss
        loss_sh_1 = self._loss_sht_to_sht_from_X_Yidx(x_tplinker[:, :, :, 0, :], Y_sh_to_sh_1, 1, 'sh_to_sh')
        loss_sh_2 = self._loss_sht_to_sht_from_X_Yidx(x_tplinker[:, :, :, 0, :], Y_sh_to_sh_2, 2, 'sh_to_sh')
        loss_sh_no = self._loss_sht_to_sht_from_X_Yidx(x_tplinker[:, :, :, 0, :], Y_sh_to_sh_no, 0, 'sh_to_sh')
        
        #    计算st相关loss
        loss_st_1 = self._loss_sht_to_sht_from_X_Yidx(x_tplinker[:, :, :, 1, :], Y_st_to_st_1, 1, 'st_to_st')
        loss_st_2 = self._loss_sht_to_sht_from_X_Yidx(x_tplinker[:, :, :, 1, :], Y_st_to_st_2, 2, 'st_to_st')
        loss_st_no = self._loss_sht_to_sht_from_X_Yidx(x_tplinker[:, :, :, 1, :], Y_st_to_st_no, 0, 'st_to_st')
        
        loss = loss_sh_1 + loss_sh_2 + loss_sh_no + \
                loss_st_1 + loss_st_2 + loss_st_no
        
        tf.print('loss_sh_1:', loss_sh_1, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss_sh_2:', loss_sh_1, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss_sh_no:', loss_sh_1, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss_st_1:', loss_sh_1, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss_st_2:', loss_sh_1, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss_st_no:', loss_sh_1, output_stream=logf.get_logger_filepath('crflinker_losses'))
        tf.print('loss:', loss, output_stream=logf.get_logger_filepath('crflinker_losses'))
        
        return loss
    #    计算：loss_sh_to_sh_1 = loss_sh_to_sh_1 + loss_sh_to_sh_1_no
    def _loss_sht_to_sht_from_X_Yidx(self, 
                        x, 
                        y,
                        val, 
                        log_tag):
        '''
            @param x: Tensor(batch_size, rel_size, max_vec_len, 3)
            @param y: RaggedTensor(batch_size, None, 2)
            @param val: int x最后一维取第几个索引
            @param log_tag: 日志标记
        '''
        #    取每个batch下的实际有效数量，和实际有效索引
        num, idx = relative_idx_sht(y)
        
        #    通过索引取预测，并计算loss
        pred = tf.gather_nd(x, indices=idx)                         #    Tensor(sum_x, 3)
        pred = pred[:, val]                                         #    Tensor(sum_x, )
        loss = -tf.math.log(pred)
        loss = tf.RaggedTensor.from_row_lengths(loss, row_lengths=num)  #    Tensor(batch_size, None)
        loss = tf.math.reduce_mean(loss, axis=-1)
        #    如果某个batch下没有索引，则loss置为0
        loss = tf.where(num == 0, tf.zeros_like(loss), loss)

        return loss
    pass


