# -*- coding: utf-8 -*-  
'''
tbert评价函数

Created on 2021年9月12日
@author: luoyi
'''
import tensorflow as tf

import utils.logger_factory as logf
import utils.conf as conf


#    tbert评价函数
class TBertMetric(tf.metrics.Metric):
    '''F1得分 = 2 * 精确度 / 召回率
        precision = { TP[k] / (TP[k] + FP[k]) }        
        recall = { TP[k] / (TP[k] + FN[k]) }
        F1_score = 2 * (precision * recall) / (precision + recall)
        其中：
            TP[k]：表示第k个分类预测正确的数量
            FP[k]：表示第k个分类被预测为其他分类的数量
            FN[k]：表示其他分类被预测为第k个分类的数量
    '''
    def __init__(self,
                 name='tbert_f1_score',
                 K=conf.TBERT.get_k(),
                 **kwargs):
        self._K = K
        
        super(TBertMetric, self).__init__(name=name, **kwargs)
        
        self._f1_score = self.add_weight(name='f1_score', initializer='zero', dtype=tf.float64)
        pass
    
    def update_state(self, y_true, y_pred, sample_weight=None):
        '''F1_score = 2 * (precision * recall) / (precision + recall)
            precision = { TP[k] / (TP[k] + FP[k]) }        
            recall = { TP[k] / (TP[k] + FN[k]) }
            其中：
                TP[k]：表示第k个分类预测正确的数量
                FP[k]：表示第k个分类被预测为其他分类的数量
                FN[k]：表示其他分类被预测为第k个分类的数量 = y_pred[k] - TP[k]
                TP[k] + FP[k]：表示真实标签中第k个分类的数量
                TP[k] + FN[k]：表示预测结果中第k个分类的数量
            @param y_pred: Tensor(batch_size, K)            #    每句话，从属每个主题的概率
            @param y_true: Tensor(batch_size, )             #    每句话的真实主题
        '''
        y_true = tf.squeeze(y_true, axis=-1)
        #    每句话的预测 
        y_pred = tf.argmax(input=y_pred, axis=-1)           #    Tensor(batch_size, )
        
        #    每个主题预测正确的主题数量    Tensor(K, )
        TP = self.tp(y_true, y_pred)
        #    标签中每个主题的数量         Tensor(K, )
        TN = self.count_k(y_true)
        #    预测中每个主题的数量         Tensor(K, )
        PN = self.count_k(y_pred)
        
        #    精确度    { TP[k] / (TP[k] + FP[k]) } = { TP[k] / TN[k] }
        precision = TP / TN
        precision = tf.where(TN == 0, -tf.ones_like(precision), precision)                      #    标签中没有出现某个主题，则忽略该主题的f1_score
        #    召回率    { TP[k] / (TP[k] + FN[k]) } = { TP[k] / PN[k] }
        recall = TP / PN
        recall = tf.where(TP == 0, tf.zeros_like(recall), recall)                               #    分子为0，一切皆为0
        recall = tf.where(TN == 0, -tf.ones_like(recall), recall)                               #    标签中没有出现某个主题，则忽略该主题的f1_score
        
        #    F1得分    2 * (precision * recall) / (precision + recall)
        p1 = precision * recall
        p2 = precision + recall
        f1_score = 2 * (p1) / p2
        f1_score = tf.where(TP == 0, tf.zeros_like(f1_score), f1_score)                         #    分子为0，一切皆为0
        f1_score = tf.where(TN == 0, -tf.ones_like(f1_score), f1_score)                         #    标签中没有出现某个主题，则忽略该主题的f1_score
        
        
        #    打日志
        tf.print('------------------------------------------------', output_stream=logf.get_logger_filepath('tbert_metrics'))
        tt = tf.stack([tf.cast(TP, dtype=tf.float64), 
                       tf.cast(TN, dtype=tf.float64), 
                       tf.cast(PN, dtype=tf.float64), 
                       precision, recall, f1_score], axis=-1)
        i = 0
        for t in tt:
            tf.print('k:', i, 'detail:', t, output_stream=logf.get_logger_filepath('tbert_metrics'))
            i += 1
            pass
        
        #    实际有效数量
        num_k = tf.math.count_nonzero(f1_score >= 0)
        f1_score = tf.where(f1_score >= 0, f1_score, tf.zeros_like(f1_score))
        self._f1_score.assign(tf.math.reduce_sum(f1_score) / tf.cast(num_k, dtype=tf.float64))
        pass
    
    #    每个主题预测正确的数量
    def tp(self, y_true, y_pred):
        '''
            @return: Tensor(K, )
        '''
        #    预测正确的主题
        TP = tf.gather_nd(params=y_true, indices=tf.where(tf.equal(y_true, y_pred)))
        return self.count_k(TP)
    #    统计向量中每个分类的数量
    def count_k(self, vec):
        uy, _, uc = tf.unique_with_counts(vec)
        u = tf.stack([uy, tf.cast(uc, dtype=tf.int64)], axis=-1)
        u = tf.gather(params=u, indices=tf.argsort(uy), axis=0)
        
        t_idx = tf.cast(u[:, 0], dtype=tf.int64)
        t_idx = tf.stack([t_idx, tf.zeros_like(t_idx)], axis=-1)          #    Tensor(num(new_topic), 2)
        t_val = u[:, 1]                                                   #    Tensor(num(new_topic), )
        t_incr = tf.SparseTensor(indices=t_idx, values=t_val, dense_shape=(self._K, 1))
        t_incr = tf.squeeze(tf.sparse.to_dense(t_incr), axis=-1)          #    Tensor(K, )
        return t_incr
    #    每个主题预测错误的数量
    def fp(self, y_true, y_pred):
        '''
            @return: Tensor(K, )
        '''
        #    预测错误的主题
        FP = tf.gather_nd(params=y_true, indices=tf.where(tf.not_equal(y_true, y_pred)))
        return self.count_k(FP)
    
    def reset_states(self):
        self._f1_score.assign(0.)
        pass
    def result(self):
        return self._f1_score
    pass
