# -*- coding: utf-8 -*-  
'''
lda相关处理过程

Created on 2021年9月9日
@author: luoyi
'''
import tensorflow as tf

import utils.conf as conf


#    用已经训练好的参数推理文章中每个词的主题
def divination(docs, wn_topic_words, alpha=None, beta=None, batch_words=conf.LDA.get_batch_words(), epochs=10):
    '''
        @param docs: Tensor(batch_size, max_doc_words)    -1表示填充值
        @param wn_topic_words: Tensor(K, V) 已经训练好的主题-词 词计数
        @param batch_words: 批量计算block数量
        
        @return: topic_docs: Tensor(batch_size, )        每篇文章的主题预测
                 topic_words: Tensor(sum(num_d), )       每篇文章每个词的主题预测
    '''
    max_doc_words = docs.shape[1]
    (K, V) = wn_topic_words.shape
    assert max_doc_words % batch_words == 0, 'max_doc_words:({})必须是batch_words:({})的整数倍.'.format(max_doc_words, batch_words)
    #    批量数
    B = tf.math.count_nonzero(docs[:, 0] >= 0)
    
    X = tf.reshape(docs, shape=(-1, max_doc_words // batch_words, batch_words))   #    Tensor(batch_size, max_doc_words // batch_words, batch_words)
    X = tf.transpose(a=X, perm=[1, 0, 2])                                         #    Tensor(max_doc_words // batch_words, batch_size, batch_words)
    
    #    每个batch_size的词数    Tensor(batch_size, )
    num_d = tf.math.count_nonzero(docs >= 0, axis=-1)
    sum_d = tf.math.reduce_sum(num_d)
    
    if (alpha is None): 
        alpha = tf.convert_to_tensor([50. / K], dtype=tf.float32)
        alpha = tf.tile(alpha, multiples=[K])
        pass
    if (beta is None):
        beta = tf.convert_to_tensor([0.02], dtype=tf.float32)
        beta = tf.tile(beta, multiples=[V])
        pass
    
    #    每篇文档，每个主题的 词计数    Tensor(batch_size, K)
    wn_docs_topic = tf.zeros(shape=(B, K), dtype=tf.float32)
    #    每篇文章，每个词，被每个主题选中的次数    Tensor(sum(num_d), K)
    tn_words = tf.zeros(shape=(sum_d, K), dtype=tf.float32)
    #    计算∑(W)(wn_topic_word[k,w] + β[w])
    sum_wn_topic_word = tf.math.reduce_sum(wn_topic_words + beta[tf.newaxis, :])
    #    计算每篇文档，每个词的主题分布
    for _ in range(epochs):
        topics = None
        for block in X:
            #    如果block全是-1则忽略
            if (tf.math.reduce_all(block == -1)): 
                if (topics is None): topics = -tf.ones_like(block)[tf.newaxis, ::]
                else: topics = tf.concat([topics, -tf.ones_like(block)[tf.newaxis, ::]], axis=0)
                continue
            
            #    block中每个batch有多少次
            num_block = tf.math.count_nonzero(block >= 0, axis=-1)
            
            #    每个词的概率分布    Tensor(sum(num_d), K)
            p = block_divination_words(num_d=num_block,
                                       block=block, 
                                       wn_doc_topic=wn_docs_topic,
                                       wn_topic_word=wn_topic_words,
                                       alpha=alpha,
                                       beta=beta,
                                       sum_wn_topic_word=sum_wn_topic_word)
            #    采样每个词的新主题    Tensor(sum(num_d), )
            new_topics = tf.random.categorical(logits=tf.math.log(p), num_samples=1, dtype=tf.int32)[:, 0]
#             print(new_topics)
            
            #    根据新主题更新wn_docs_topic
            incr = incr_wn_docs_topic(num_block, new_topics, K)
#             print(incr)
            wn_docs_topic = wn_docs_topic + incr
            #    根据主题更新tn_words
            new_topics = tf.RaggedTensor.from_row_lengths(new_topics, row_lengths=num_block)
            new_topics = tf.RaggedTensor.to_tensor(new_topics, default_value=-1, shape=block.shape)     #    Tensor(batch_size, batch_words)
            if (topics is None): topics = new_topics[tf.newaxis, ::]
            else: topics = tf.concat([topics, new_topics[tf.newaxis, ::]], axis=0)
            
            pass
        #    累加每个词被主题选中的词数
        topics = tf.transpose(topics, perm=[1, 0, 2])               #    Tensor(batch_size, max_doc_words // batch_words, batch_words)
        topics = tf.reshape(topics, shape=(-1, max_doc_words))      #    Tensor(batch_size, max_doc_words)
        topics = tf.gather_nd(topics, indices=tf.where(topics >= 0))#    Tensor(sum(num_d), )
        topics = tf.one_hot(topics, depth=K)                        #    Tensor(sum(num_d), K)
        tn_words = tn_words + topics
        
        tmp_topic_docs = tf.math.argmax(wn_docs_topic, axis=-1)
        print(tmp_topic_docs)
        tmp_topic_words = tf.math.argmax(tn_words, axis=-1)
        print(tmp_topic_words)
        pass
    
    #    wn_docs_topic中数量最多的那个主题即为每篇文章的主题预测
    topic_docs = tf.math.argmax(wn_docs_topic, axis=-1)
    #    tn_words中数量最多的那个主题即为每个词的主题预测
    topic_words = tf.math.argmax(tn_words, axis=-1)
    return topic_docs, topic_words
def block_divination_words(num_d, block, wn_doc_topic, wn_topic_word, alpha, beta, sum_wn_topic_word):
    '''p[k] = part1 * part2
        part1 = (wn_doc_topic[k] + α[k]) / ∑(K) (wn_doc_topic[k] + α[k])
        part2 = (wn_topic_word[tf.newaxis, k,w] + nwn_topic_word[:, k, w] + β[w]) / ∑(W)(wn_topic_word[tf.newaxis, k,w] + nwn_topic_word[:, k, w] + β[w])
        为简便运算nwn_topic_word[:, k, w]过程就省掉了（如果模型训练的够久，wn_topic_word会远大于nwn_topic_word）
            
        @param block: Tensor(batch_size, batch_words)
        @param wn_doc_topic: Tensor(batch_size, K)
        @param wn_topic_word: Tensor(K, W)
        @param nwn_topic_word: Tensor(batch_size, K, W)
    '''
    #    计算part1    (wn_doc_topic[k] + α[k]) / ∑(K) (wn_doc_topic[k] + α[k])
    part1_molecule = wn_doc_topic + alpha[tf.newaxis, :]                #    Tensor(batch_size, K)    (wn_doc_topic[k] + α[k])
    part1_denominator = tf.math.reduce_sum(part1_molecule, axis=-1)     #    Tensor(batch_size, )    ∑(K) (wn_doc_topic[k] + α[k])
    part1 = part1_molecule / part1_denominator[:, tf.newaxis]           #    Tensor(batch_size, K)
    
    #    计算part2    (wn_topic_word[tf.newaxis, k,w] + nwn_topic_word[:, k, w] + β[w]) / ∑(W)(wn_topic_word[tf.newaxis, k,w] + nwn_topic_word[:, k, w] + β[w])
    #    实际计算的是：(wn_topic_word[k,w] + β[w]) / ∑(W)(wn_topic_word[k,w] + β[w])
    #    计算分子
    wids_vec = tf.gather_nd(params=block, indices=tf.where(block >= 0))     #    Tensor(sum(num_d), )
    wn = tf.gather(params=wn_topic_word, indices=wids_vec, axis=-1)         #    Tensor(K, sum(num_d))
    wn = tf.transpose(wn, perm=[1, 0])                                      #    Tensor(sum(num_d), K)    每个词的wn_topic_word[k,w]
    bwn = tf.gather(params=beta, indices=wids_vec, axis=-1)                 #    Tensor(sum(num_d), )    每个词的β[w]
    part2_molecule = wn + bwn[:, tf.newaxis]                                #    Tensor(sum(num_d), K)    每个词的wn_topic_word[k,w] + β[w]
    part2 = part2_molecule / sum_wn_topic_word
    part2 = tf.RaggedTensor.from_row_lengths(values=part2, row_lengths=num_d)
    part2 = tf.RaggedTensor.to_tensor(part2, default_value=-1)              #    Tensor(batch_size, max(num_d), K)
    
    #    每个词的概率分布    Tensor(sum(num_d), K)
    p = part1[:, tf.newaxis, :] * part2  
    p = tf.gather_nd(p, indices=tf.where(p[:, :, 0] >= 0))                     #    Tensor(sum(num_d), K)
    return p
def incr_wn_docs_topic(num_d, new_topics, K):
    '''
        @param num_d:    Tensor(batch_size, )
        @param new_topics: Tensor(sum(num_d), )
        @param wn_docs_topic: Tensor(batch_size, K)
    '''
    #    做成incr张量
    new_topics = tf.RaggedTensor.from_row_lengths(new_topics, row_lengths=num_d)
    new_topics = tf.RaggedTensor.to_tensor(new_topics, default_value=-1)            #    Tensor(batch_size, max(num_d))
    
#     print('new_topics:', new_topics)
    
    incrs = tf.map_fn(fn=lambda n:topic_incr(n, K), elems=new_topics, dtype=tf.float32)               #    Tensor(batch_size, K)
    return incrs
def topic_incr(new_topic, K):
    '''
        @param new_topic: Tensor(max(num_d), )
        @return: Tensor(K, )
    '''
    new_topic = tf.gather(new_topic, indices=tf.where(new_topic >= 0), axis=-1)     #    Tensor(num_d, )
    new_topic = tf.squeeze(new_topic, axis=-1)
    y, _, c = tf.unique_with_counts(new_topic)
    yc = tf.stack([y, c], axis=-1)                                                  #    Tensor(num_d, 2)
    yc = tf.gather(yc, indices=tf.argsort(yc[:, 0]), axis=0)
#     yc = tf.sort(yc, axis=0)
    
    #    主题下每个词的计数增量
    incr_idx = tf.cast(yc[:, 0], dtype=tf.int64)
    incr_idx = tf.stack([incr_idx, tf.zeros_like(incr_idx)], axis=-1)
    incr_val = tf.cast(yc[:, 1], dtype=tf.float32)
    incr = tf.SparseTensor(indices=incr_idx, values=incr_val, dense_shape=(K, 1))
    incr = tf.sparse.to_dense(incr)
    incr = tf.squeeze(incr, axis=-1)
    return incr

