# -*- coding: utf-8 -*-  
'''
LDA评价函数

Created on 2021年8月22日
@author: luoyi
'''
import tensorflow as tf

import utils.conf as conf


#    LDA评价函数
class LDAMetric(tf.keras.metrics.Metric):
    '''LDA评价函数
        Perplexity = exp(- 1/N * ∑(N)log(q(xn)))
                   = exo(- 1/∑(M)Nm) * ∑(Nm)log(q(x[m,n]))
            Nm：表示第m篇文档总词数
            q(x[m,n])：表示第m篇文档第n个词的概率
            q(x[m,n]) = p(w) = ∫z p(w|z) * p(z) dz
                      = ∑(Z) φ[w,z] * θm(z)
                      = ∑(Z) topic_word[z,w] * doc_topic[m,z]
            topic_word：为主题-词分布 Tensor(K, W)
            doc_topic：为文档-主题分布 Tensor(batch_size, K)
            
        @param prob_layer: 计算概率分布的层。主要提供两个方法：
                            1 topic_word_prob    取主题-词分布    Tensor(K, W)
                            2 doc_topic_prob     取文档-主题分布  Tensor(batch_size, K)
    '''
    def __init__(self,
                 name='perplexity_lda_gibbs',
                 prob_layer=None,
                 K=conf.LDA.get_k(),
                 max_doc_words=conf.LDA.get_max_doc_words(),
                 **kwargs):
        self._prob_layer = prob_layer
        self._K = K
        self._max_doc_words = max_doc_words
        
        super(LDAMetric, self).__init__(name=name, **kwargs)
        
        self._perplexity = self.add_weight('metrics_lda_ppl', initializer='zero', dtype=tf.float32)
        pass
    
    def update_state(self, y_true, y_pred, sample_weight=None):
        '''
            @param y_true: 忽略
            @param y_pred: Tensor(batch_size, max_doc_words)    -1为填充值
        '''
        #    取 主题-词分布 与 文档-主题 分布
        topic_word = self._prob_layer.topic_word_prob()                           #    Tensor(K, W)
        doc_topic = self._prob_layer.docs_topic_prob()                            #    Tensor(batch_size, K)
        
        #    计算每篇文档的总词数    Tensor(batch_size, )
        Nm = tf.math.count_nonzero(y_pred >= 0, axis=-1)
        N = tf.math.reduce_sum(Nm)
        
        #    计算每篇文档、每个词的困惑度
        #    每个wid，对应每个topic的概率
        idx = tf.where(y_pred >= 0)
        wids = tf.gather_nd(y_pred, indices=idx)                                 #    Tensor(sum(Nm))
        word_every_topic = tf.gather(params=topic_word, indices=wids, axis=-1)   #    Tensor(K, sum(Nm))
        word_every_topic = tf.transpose(word_every_topic, perm=[1, 0])           #    Tensor(sum(Nm), K)
        
        doc_topic_tile = tf.tile(doc_topic, multiples=[1, self._max_doc_words])
        doc_topic_tile = tf.reshape(doc_topic_tile, shape=[-1, self._K])
        doc_topic_tile = tf.gather(doc_topic_tile, indices=idx[:,0] * self._max_doc_words + idx[:,1], axis=0)    #    Tensor(sum(Nm), K)
        
        #    每个词的概率    Tensor(sum(Nm), )
        p = tf.math.reduce_sum(word_every_topic * doc_topic_tile, axis=-1)
        
        #    计算困惑度
        #    perplexity = exp(- ∑(M)Nm) * ∑(Nm)log(q(x[m,n]))
        perplexity = tf.math.exp(- tf.math.reduce_sum(tf.math.log(p)) / tf.cast(N, dtype=tf.float32))
        self._perplexity.assign(perplexity)
        pass
    
    def reset_states(self):
        self._perplexity.assign(0.)
        pass
    def result(self):
        return self._perplexity
    pass