# -*- coding: utf-8 -*-  
'''
gsdmm评价函数

Created on 2021年9月4日
@author: luoyi
'''
import tensorflow as tf


#    gsdmm评价函数
class GSDMMMetric(tf.keras.metrics.Metric):
    '''
        困惑度评价函数：
        Perplexity = exp(-1/N * ∑(N)logP(x) )
        N = batch_size
        基于每一个词来计算困惑度
        P(w=t) = ∫z P(w|z)P(z) dz
               = ∑(K) P(w=t|z=k)P(z=k)
               = ∑(K) φ[k,t] * θ[k]
        ppl = exp( -1/∑(D)∑(Nd) * ∑(D)∑(Nd)log(∑(K) φ[k,t] * θ[k]) )
        
        基于每一篇文档来计算困惑度
        P(x) = P(d) = ∏(w∈d) P(w)
                    = ∏(w∈d) ∫z P(w|z)P(z) dz
                    = ∏(w∈d)∑(K) P(w=v|z=k)P(z=k)
                    = ∏(w∈d)∑(K) φ[k,v] * θ[k]
        logP(x) = log∏(w∈d)∑(K) φ[k,v] * θ[k]
                = ∑(w∈d) log∑(K)(φ[k,v] * θ[k])
        ppl = exp( -1/D * ∑(D)∑(w∈d) log∑(K)(φ[k,v] * θ[k]) )
    '''
    def __init__(self,
                 name='perplexity_gsdmm_gibbs',
                 prob_layer=None,
                 **kwargs):
        self._prob_layer = prob_layer
        super(GSDMMMetric, self).__init__(name=name, **kwargs)
        
        self._perplexity = self.add_weight('metrics_gsdmm_ppl', initializer='zero', dtype=tf.float32)
        pass
    
    def update_state(self, y_true, y_pred, sample_weight=None):
        '''
            @param y_true: 忽略
            @param y_pred: Tensor(batch_size, max_doc_words)    -1为填充值
        '''
        #    取batch_size
        batch_size = tf.math.count_nonzero(y_pred[:, 0] >= 0)
        #    每篇文章的词数
        num_d = tf.math.count_nonzero(y_pred >= 0, axis=-1)                 #    Tensor(batch_size, )
        
        #    取 主题-词分布 与 文档-主题 分布
        p_tw = self._prob_layer.topic_word_prob()                            #    Tensor(K, V)
        p_dt = self._prob_layer.docs_topic_prob()                            #    Tensor(K, )
        
        #    以词为维度统计困惑度
        self.ppl_word(y_pred, batch_size, num_d, p_tw, p_dt)
        #    以文档为维度统计困惑度（有问题。对于长文本，连乘操作的时会造成inf，即使放到log里变连加也一样）
#         self.ppl_doc(y_pred, batch_size, num_d, p_tw, p_dt)
        pass
    
    #    计算词维度的困惑度
    def ppl_word(self, y_pred, batch_size, num_d, p_tw, p_dt):
        '''ppl = exp( -1/∑(D)∑(Nd) * ∑(D)∑(Nd)log(∑(K) φ[k,t] * θ[k]) )'''
        
        #    ∑(D)∑(Nd)
        D = tf.math.reduce_sum(num_d)
        
        #    计算每个词的log(∑(K) φ[k,t] * θ[k]) 
        wids_vec = tf.gather_nd(y_pred, indices=tf.where(y_pred >= 0))                      #    Tensor(sum(num_d), ) 本轮batch所有词id
        tw = tf.gather(params=p_tw, indices=tf.cast(wids_vec, dtype=tf.int64), axis=-1)     #    Tensor(K, sum(num_d)) 每个词在每个主题下的概率φ[k,t]
        tw_dt = tw * p_dt[:, tf.newaxis]                                                    #    Tensor(K, sum(num_d)) 每个词的φ[k,t] * θ[k]
        tw_dt = tf.math.reduce_sum(tw_dt, axis=0)                                           #    Tensor(sum(num_d), ) 每个词的∑(K) φ[k,t] * θ[k]
        log_tw_dt = tf.math.log(tw_dt)                                                      #    Tensor(sum(num_d), ) 每个词的log(∑(K) φ[k,t] * θ[k])
        
        #    计算∑(D)∑(Nd)log(∑(K) φ[k,t] * θ[k])
        sum_log_tw_dt = tf.math.reduce_sum(log_tw_dt)                                       #    Tensor() 所有词的∑(D)∑(Nd)log(∑(K) φ[k,t] * θ[k])
        
        ppl = tf.math.exp(- sum_log_tw_dt / tf.cast(D, dtype=tf.float32))
        self._perplexity.assign(ppl)
        pass
    
    #    计算文档维度的困惑度
    def ppl_doc(self, y_pred, batch_size, num_d, p_tw, p_dt):
        '''ppl = exp( -1/D * ∑(D)∑(w∈d) log∑(K)(φ[k,v] * θ[k]) )'''
        #    每篇文档的实际有效词
        wids_vec = tf.gather_nd(y_pred, indices=tf.where(y_pred >= 0))      #    Tensor(sum(num_d), )
        wids = tf.RaggedTensor.from_row_lengths(values=wids_vec, row_lengths=num_d)
        wids = tf.RaggedTensor.to_tensor(wids, default_value=0)            #    Tensor(batch_size, max(num_d))
        
        #    计算每个词，在每个主题下的概率φ[k,v]
        tw = tf.gather(params=p_tw, indices=tf.cast(wids_vec, dtype=tf.int64), axis=-1)              #    Tensor(K, sum(num_d))
        #    计算log(∑(K)φ[k,v] * θ[k])
        log_tw_dt = tf.math.log(tf.math.reduce_sum(tw * p_dt[:, tf.newaxis], axis=0))                   #    Tensor(sum(num_d), )
        #    计算∑(w∈d)log(∑(K)φ[k,v] * θ[k])
        tw_dt = tf.RaggedTensor.from_row_lengths(values=log_tw_dt, row_lengths=num_d)                   #    Tensor(None(num_d), )
        tw_dt = tf.RaggedTensor.to_tensor(tw_dt, default_value=0)           #    Tensor(batch_size, max(num_d))
        sum_d_tw_dt = tf.math.reduce_sum(tw_dt, axis=-1)                    #    Tensor(batch_size, )
        #    计算exp(-1/N * ∑(N)logP(x) )
        ppl = tf.math.exp(-tf.math.reduce_sum(sum_d_tw_dt) / tf.cast(batch_size, dtype=tf.float32))
        
        #    计算困惑度
        self._perplexity.assign(ppl)
        pass
    
    def reset_states(self):
        self._perplexity.assign(0.)
        pass
    def result(self):
        return self._perplexity
    pass
