# -*- coding: utf-8 -*-  
'''
bert的评价函数

@author: luoyi
Created on 2021年4月16日
'''
import tensorflow as tf

import utils.conf as conf
import utils.logger_factory as logf
import utils.dictionaries_bert as dict_bert


#    MLM评价函数
class PreMLMMetric(tf.metrics.Metric):
    def __init__(self,
                 name='PreMLMMetric',
                 max_sen_len=conf.BERT.get_pre_training_sentence_maxlen(),
                 vocab_size=dict_bert.dict_size(),
                 **kwargs):
        super(PreMLMMetric, self).__init__(name=name, **kwargs)
        
        self._max_sen_len = max_sen_len
        self._vocab_size = vocab_size
        self._acc = self.add_weight('mlm_metrics', initializer='zero', dtype=tf.float64)
        pass
    
    def update_state(self, y_true, y_pred, sample_weight=None):
        '''
            @param y_pred: RaggedTensor
                                Tensor(batch_size * 1, 2)                        [CLS]过softmax的2个预测概率
                                Tensor(batch_size * max_sen_len, vocab_size)     句子中每个词的预测概率
            @param y_true: Tensor(batch_size, 1+1+1, rewrite_max)
                                    1: 是否正样本。全1代表是，全0代表不是
                                    1: 被[MASK]的下标索引，-1代表是[PAD]
                                    1: 被[MASK]的原词编码，-1代表是[PAD]
        '''
        #    取每个词的预测概率        Tensor(batch_size, max_sen_len, vocab_size)
        y_pred = y_pred[1].to_tensor()
        y_pred = tf.reshape(y_pred, shape=(-1, self._max_sen_len - 1, self._vocab_size))
        
        #    取每个掩掉词的预测概率
        y_true_idx = y_true[:, 1, :]                            #    被掩掉的词下标    Tensor(batch_size, rewrite_max)
        y_true_label = y_true[:, 2, :]                          #    被掩掉的词编码    Tensor(batch_size, rewrite_max)
        #    每个batch_size的实际词数量
        nums = tf.math.count_nonzero(y_true_idx + 1, axis=-1)
        idx = tf.where(y_true_idx >= 0) 
        y_true_idx = tf.gather_nd(y_true_idx, indices=idx)
        y_true_label = tf.gather_nd(y_true_label, indices=idx)  #    被掩掉的词对应的真实词编码
        idx = tf.stack([idx[:,0], y_true_idx], axis=-1)         #    被掩掉的词对应对应所有词的索引
        #    被掩掉的词对应对应所有词的预测概率    Tensor(所有batch_size中被掩掉的词数量, vocab_size)
        y_pred = tf.gather_nd(y_pred, indices=idx)
        y_pred = tf.math.argmax(y_pred, axis=-1)                #    被掩掉的词预测的词编码
        
        #    比较预测正确的词数量
        T = tf.math.count_nonzero(tf.equal(y_pred, y_true_label))
        #    总共被掩掉的词数量
        P = tf.math.reduce_sum(nums)
        acc = T / P
        self._acc.assign(acc)
        
        tf.print('------------------------------------------------------------', output_stream=logf.get_logger_filepath('bert_metrics'))
        tf.print('MLM acc:', acc, ' T:', T, ' P:', P, output_stream=logf.get_logger_filepath('bert_metrics'))
        
        pass
    
    def reset_states(self):
        self._acc.assign(0.)
        pass
    def result(self):
        return self._acc
    
    pass


#    NSP评价函数
class PreNSPMetric(tf.metrics.Metric):
    def __init__(self,
                 name='PreNSPMetric',
                 **kwargs):
        super(PreNSPMetric, self).__init__(name=name, **kwargs)
        
        self._acc = self.add_weight('mlm_metric', initializer='zero', dtype=tf.float64)
        pass
    
    def update_state(self, y_true, y_pred, sample_weight=None):
        '''
            @param y_pred: RaggedTensor
                                Tensor(batch_size * 1, 2)                        [CLS]过softmax的2个预测概率
                                Tensor(batch_size * max_sen_len, vocab_size)     句子中每个词的预测概率
            @param y_true: Tensor(batch_size, 1+1+1, rewrite_max)
                                    1: 是否正样本。全1代表是，全0代表不是
                                    1: 被[MASK]的下标索引，-1代表是[PAD]
                                    1: 被[MASK]的原词编码，-1代表是[PAD]
        '''
        #    取[CLS]位的预测，第0位代表是否负样本，第1位代表是否正样本    Tensor(batch_size, 2) 
        y_pred = y_pred[0].to_tensor()                          #    Tensor(batch_size, 2) 
        y_pred = tf.math.argmax(y_pred, axis=-1)                #    Tensor(batch_size, )    
        
        #    取真实标签
        y_true = y_true[:, 0, :]                                
        y_true = tf.math.reduce_mean(y_true, axis=-1)           #    Tensor(batch_size, )
        
        #    比较预测结果
        T = tf.math.count_nonzero(tf.equal(y_pred, y_true))
        P = tf.math.count_nonzero(y_true + 1)
        
        acc = T / P
        self._acc.assign(acc)
        
        tf.print('NSP acc:', acc, ' T:', T, ' P:', P, output_stream=logf.get_logger_filepath('bert_metrics'))
        
        pass
    
    def reset_states(self):
        self._acc.assign(0.)
        pass
    def result(self):
        return self._acc
    pass