# -*- coding: utf-8 -*-  
'''
gsdmm相关处理

Created on 2021年9月9日
@author: luoyi
'''
import tensorflow as tf
import numpy as np

import utils.conf as conf
from utils.iexicon import LiteWordsWarehouse


#    用已经训练好的参数推理文章中每个词的主题
def divination(docs, wn_topic_words, alpha=None, beta=None, batch_words=conf.LDA.get_batch_words(), epochs=10):
    #    先拿相关参数
    (K, V) = wn_topic_words.shape
    B = tf.math.count_nonzero(docs[:, 0] >= 0, axis=-1)
    if (alpha is None): alpha = tf.convert_to_tensor([50. / K] * K)
    if (beta is None): beta = tf.convert_to_tensor([0.2] * V)

    #    每个主题下的文档数量
    dn_topic_doc = tf.zeros(shape=(K, ), dtype=tf.float32)
    #    每个主题下每个词的计数
    tmp_wn_topic_words = wn_topic_words
    #    每篇文档被每个主题选中的次数
    num_dt = tf.zeros(shape=(B, K), dtype=tf.float32)

    for _ in range(epochs):
        #    每篇文档的主题分布
        p = log_topic_distribution(K=K,
                                   docs=docs, 
                                   dn_topic_doc=dn_topic_doc, 
                                   wn_topic_word=tmp_wn_topic_words, 
                                   alpha=alpha, beta=beta)
        
        #    抽样每个文档的主题
        new_topics = tf.random.categorical(p, num_samples=1)
        new_topics = tf.squeeze(new_topics, axis=-1)
        
        #    更新计数
        #    更新 主题-文档 计数
        incr_topic_doc = incr_num_topic_doc(new_topics, K=K)
        dn_topic_doc = dn_topic_doc + incr_topic_doc
        #    更新 主题-词 词计数
        incr_topic_words = stat_nw_docs(new_topics, docs, K=K, V=V)
        incr_topic_words = tf.math.reduce_sum(incr_topic_words, axis=0)         #    Tensor(K, V)
        incr_topic_words = tf.cast(incr_topic_words, dtype=tf.float32)
        tmp_wn_topic_words = tmp_wn_topic_words + incr_topic_words
        #    更新 文档-主题 计数
        new_topics = tf.one_hot(new_topics, depth=K)
        num_dt = num_dt + new_topics
        pass
    
    #    每篇文档被选中词数最多的主题即为文档主题
    topics = tf.math.argmax(num_dt, axis=-1)
    return topics


#    计算文档的主题概率分布
def log_topic_distribution(K, docs, dn_topic_doc, wn_topic_word, alpha, beta):
    '''根据边缘概率计算每篇文档的所属主题概率分布
        logP(z[i]=k | W,α,β) = part1 - part2 + part3 - part4
        part1 = log( dn_topic_doc[k] + α[k] ) 
        part2 = log( ∑(K)( dn_topic_doc[k] + α[k] ) )
        part3 = ∑(w∈W[d])log( wn_topic_word[k,w] + β[w] )                        ------ W[d]表示第d篇文档中的词集合，可能会有重复的词
        part4 = ∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
                            
        @param docs: 文档 Tensor(batch_size, max_doc_words)    -1表示填充词
        @return Tensor(batch_size, K)
    '''
    #    计算part1    Tensor(K, )
    part1 = op_part1(docs, dn_topic_doc, alpha)
    #    计算part2    Tensor()
    part2 = op_part2(docs, dn_topic_doc, alpha)
    #    计算part3    Tensor(batch_size, K)
    part3 = op_part3(docs, wn_topic_word, beta)
    #    计算part4    Tensor(batch_size, K)
    part4 = op_part4(K, docs, wn_topic_word, beta)
    
    tf.print('part1:', part1)
    tf.print('part2:', part2)
    tf.print('part3:', part3)
    tf.print('part4:', part4)
    tf.print('part34:', tf.math.exp(part3-part4))
    
    return part1[tf.newaxis, :] - part2 + part3 - part4
#    计算part1
def op_part1(docs, dn_topic_doc, alpha):
    '''part1 = log( dn_topic_doc[k] + α[k] ) 
        @param docs: Tensor(batch_size, max_docs_words)    -1表示填充值
        @return Tensor(K, )
    '''
    return tf.math.log(dn_topic_doc + alpha)
#    计算part2
def op_part2(docs, dn_topic_doc, alpha):
    '''log( ∑(K)( dn_topic_doc[k] + α[k] ) )
        @param docs: Tensor(batch_size, max_docs_words)    -1表示填充值
        @return Tensor()
    '''
    return tf.math.log(tf.math.reduce_sum(dn_topic_doc + alpha, axis=-1))
#    计算part3
def op_part3(docs, wn_topic_word, beta):
    '''∑(w∈W[d])log( wn_topic_word[k,w] + β[w] )                        ------ W[d]表示第d篇文档中的词集合，可能会有重复的词
        @param docs: Tensor(batch_size, max_docs_words)    -1表示填充值
        @return Tensor(batch_size, K)
    '''
    #    每个文档的词数    Tensor(bathc_size, )
    num_d = tf.math.count_nonzero(docs >= 0, axis=-1)
        
    #    计算每个batch中每个词的log(wn_topic_word[k,w] + β[w])
    wids_vec = tf.gather_nd(params=docs, indices=tf.where(docs >= 0))           #    Tensor(sum(num_d), ) 所有batch中每个词id组成的向量
    wids_vec = tf.cast(wids_vec, dtype=tf.int64)
    wn = tf.gather(params=wn_topic_word, indices=wids_vec, axis=-1)             #    Tensor(K, sum(num_d)) 每个主题下，每个词的当前计数
    wn = tf.transpose(wn, perm=[1, 0])                                          #    Tensor(sum(num_d), K) 每个词在所有主题下的当前计数
    beta = tf.gather(params=beta, indices=wids_vec, axis=-1)                    #    Tensor(sum(num_d), ) 每个词的β超参
    wn_beta = wn + beta[:, tf.newaxis]
    log_wn_beta = tf.math.log(wn_beta)                                          #    Tensor(sum(num_d), K) 每个词在每个主题下的log(wn_topic_word[k,w] + β[w])
        
    #    计算∑(w∈W[d])log( wn_topic_word[k,w] + β[w] )
    log_wn_beta = tf.RaggedTensor.from_row_lengths(log_wn_beta, row_lengths=num_d)      #    RaggedTensor(None(num_d), K)
    log_wn_beta = tf.RaggedTensor.to_tensor(log_wn_beta, default_value=0)               #    Tensor(batch_size, max(num_d), K)
    res = tf.math.reduce_sum(log_wn_beta, axis=1)                               #    Tensor(batch_size, K) 最终结果∑(w∈W[d])log( wn_topic_word[k,w] + β[w] )
        
    return res
#    计算part4
def op_part4(K, docs, wn_topic_word, beta):
    '''∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
        @param docs: Tensor(batch_size, max_docs_words)    -1表示填充值
        @return Tensor(batch_size, K)
    '''
    #    batch_size
    batch_size = tf.math.count_nonzero(docs[:, 0] >= 0)
        
    #    每个文档的词数    Tensor(bathc_size, )
    num_d = tf.math.count_nonzero(docs >= 0, axis=-1)
    max_num_d = tf.math.reduce_max(num_d)
        
    #    计算每个文档的∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
    wn_beta = tf.math.reduce_sum(wn_topic_word + beta[tf.newaxis, :], axis=-1)                   #    Tensor(K, ) ∑(V)(wn_topic_word[k,v] + β[v])
    t_range = tf.range(max_num_d)                                               #    Tensor(max_num_d, ) 自增量
    t_range = tf.tile(t_range[tf.newaxis, :], multiples=[batch_size, 1])        #    Tensor(batch_size, max_num_d) 自增量
    t_mask = t_range < num_d[:, tf.newaxis]                                     #    Tensor(batch_size, max_num_d) 掩码，真实词为True，填充为False
    t_mask = tf.repeat(t_mask[:, tf.newaxis, :], repeats=K, axis=1)       #    Tensor(batch_size, K, max_num_d) 每个主题维度下的掩码 真实词为True，填充为False
    wn_beta = tf.cast(t_range[:, tf.newaxis, :], dtype=tf.float32) + wn_beta[tf.newaxis, :, tf.newaxis]
    wn_beta = tf.math.log(wn_beta)                                              #    Tensor(batch_size, K, max_num_d) ∑中的每一个j
    wn_beta = tf.where(t_mask, wn_beta, tf.zeros_like(wn_beta))                 #    Tensor(batch_size, K, max_num_d) 执行掩码，填充的地方为0
    res = tf.math.reduce_sum(wn_beta, axis=-1)                                  #    Tensor(batch_size, K) 执行每个batch的累加
        
    return res


#    主题-文档 计数更新增量
def incr_num_topic_doc(new_topics, K):
    uy, _, uc = tf.unique_with_counts(new_topics)                     #    unique_with_counts可以直接实现去重和计数，特么打副本不看攻略。。。
    num_td = tf.stack([uy, tf.cast(uc, dtype=tf.int64)], axis=-1)
    num_td = tf.gather(num_td, indices=tf.argsort(num_td[:, 0]), axis=0)
        
    #    做成increment张量，后面直接累加    Tensor(K, )
    t_idx = tf.cast(num_td[:, 0], dtype=tf.int64)
    t_idx = tf.stack([t_idx, tf.zeros_like(t_idx)], axis=-1)          #    Tensor(num(new_topic), 2)
    t_val = tf.cast(num_td[:, 1], dtype=tf.float32)                 #    Tensor(num(new_topic), )
    t_incr = tf.SparseTensor(indices=t_idx, values=t_val, dense_shape=(K, 1))
    t_incr = tf.squeeze(tf.sparse.to_dense(t_incr), axis=-1)          #    Tensor(K, )
    
    return t_incr

#    统计每篇文档，每个词的词数
def stat_nw_docs(new_topics, docs, K, V):
    '''
        @param docs: Tensor(batch_size, 1 + max_words_docs)
    '''
    #    计算主题-词 词增量    
    new_topic_docs = tf.concat([tf.cast(new_topics[:, tf.newaxis], dtype=tf.int64), 
                                tf.cast(docs, dtype=tf.int64)], axis=-1)                       #    Tensor(batch_size, 1+ max_docs_words)
    res = tf.map_fn(fn=lambda d:stat_wn_doc(d, K, V), elems=new_topic_docs)
    return res
def stat_wn_doc(doc, K, V):
    '''
        @param docs: Tensor(1 + max_words_docs, )
                        0: 主题
                        1~: 词id
        @return Tensor(K, V)
    '''
    #    所属主题
    new_topic = doc[0]
    #    去掉填充词
    wids = tf.gather(params=doc[1:], indices=tf.where(doc[1:] >= 0))
    wids = tf.squeeze(wids, axis=-1)
    
    #    统计每个词的词数
    uy, _, uc = tf.unique_with_counts(wids)
    u = tf.stack([uy, tf.cast(uc, dtype=tf.int64)], axis=-1)
    u = tf.gather(u, indices=tf.argsort(u[:, 0]), axis=0)
    
    #    将词数还原为增量向量，V维
    incr_idx = u[:, 0]
    incr_val = u[:, 1]
    idx_k = tf.expand_dims(new_topic, axis=-1)
    idx_k = tf.repeat(idx_k, repeats=tf.shape(incr_idx)[0], axis=-1)
    incr_idx = tf.stack([idx_k, incr_idx, tf.zeros_like(incr_idx)], axis=-1)
    incr = tf.SparseTensor(indices=incr_idx, values=incr_val, dense_shape=(K, V, 1))
    incr = tf.sparse.to_dense(incr)
    incr = tf.squeeze(incr, axis=-1)
    return incr


#    计算所有文档的初始分布
def initial_count(db, 
                  num_docs,
                  K=conf.DATASET_SOHU_THUCNEWS.get_k(), 
                  V=LiteWordsWarehouse.instance().words_count()):
    '''
        @param db: tf.data.Dataset    Tensor(batch_size, max_doc_words)    -1表示填充
                   注：参数的epoch必须为1
        @param num_docs: 数据集总文档数
    '''
    assert db._count != 1, '初始化计数时，db的repear只能是1.'
    
    #    初始分布，K维均匀分布
    p = tf.convert_to_tensor([1./K] * K, dtype=tf.float32)
    p = tf.math.log(p)[tf.newaxis, :]
    
    #    每个主题下的文档计数
    dn_topic_doc = tf.zeros(shape=(K, ), dtype=tf.int32)
    #    每个主题下，每个词 的词计数
    wn_topic_doc = tf.zeros(shape=(K, V), dtype=tf.int32)
    
    #    每篇文档所属主题
    doc_topic = -tf.ones(shape=(num_docs), dtype=tf.int32)
    
    #    遍历每篇文档
    step = 0
    for x,_ in db:
        '''
            x: Tensor(batch_size, max_doc_words)    -1表示填充
        '''
        #    每篇文档的词数
        num_d = tf.math.count_nonzero(x >= 0, axis=-1)
        batch_size = tf.math.count_nonzero(x[:, 0] >= 0)
        
        #    采样每篇文档的主题
        new_topics = tf.random.categorical(logits=p, num_samples=batch_size)
        new_topics = tf.squeeze(new_topics)
        
        #    更新 每个主题下的文档计数
        uy, _, uc = tf.unique_with_counts(new_topics)
        sort_idx = tf.argsort(uy, axis=0)
        uy = tf.gather(params=uy, indices=sort_idx, axis=-1)
        uc = tf.gather(params=uc, indices=sort_idx, axis=-1)
        incr_idx = tf.stack([uy, tf.zeros_like(uy)], axis=-1)
        incr_dn = tf.SparseTensor(indices=incr_idx, values=uc, dense_shape=(K, ))
        incr_dn = tf.sparse.to_dense(incr_dn, default_value=0)
        dn_topic_doc += incr_dn
        #    更新 每个主题下，每个词 的词计数
        
        
        step += 1
        pass
    pass




