# -*- coding: utf-8 -*-  
'''
gsdmm模型，numpy实现

Created on 2021年9月11日
@author: luoyi
'''
import pickle
import numpy as np
import tqdm

import utils.conf as conf
from utils.iexicon import LiteWordsWarehouse


#    gsdmm模型
class GSDMM:
    '''
        按照边缘概率计算每篇文档的所属每个主题概率：
            原论文的算法，但连乘时文档长一点就会区域无穷
            P(z[i]=k | W,α,β) = dn_topic_doc[k] + α[k] / ∑(K)( dn_topic_doc[k] + α[k] )
                                    *
                                ∏(w ∈ W[d])( wn_topic_word[k,w] + β[w] ) /  ∏(j=1->n[d])( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
            logP(z[i]=k | W,α,β) = log( dn_topic_doc[k] + α[k] )
                                    - log( ∑(K)( dn_topic_doc[k] + α[k] ) )
                                    + ∑(w ∈ W[d])log( wn_topic_word[k,w] + β[w] )                    ------ W[d]表示第d篇文档中的词集合，可能会有重复的词
                                    - ∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
                                 = part1 - part2 + part3 - part4
                part1 = log( dn_topic_doc[k] + α[k] ) 
                      = log( dn_topic_doc[k] + α )                                                   ------ α每个维度都一样，为一个常数
                part2 = log( ∑(K)( dn_topic_doc[k] + α[k] ) ) 
                      = log(num_docs - 1 + α)                                                        ------ num_docs为总文档数
                part3 = ∑(w ∈ W[d])log( wn_topic_word[k,w] + β[w] ) 
                      = ∑(w ∈ W[d])log( wn_topic_word[k,w] + β )                                     ------ β每个维度都一样，为一个常数s
                part4 = ∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
                      = ∑(j=1->n[d])log( wn_topic[k] + β + j - 1 )
            计算文档从属每个主题的概率，组成多项分布θ
        从多项分布θ中采样拿到文档的new_topic
        根据new_topic更新相关计数
        wn_topic_word[new_topic, v] += n[d, v]
        dn_topic_doc[new_topic] += 1
    '''
    def __init__(self,
                 K=conf.GSDMM.get_k(),
                 V=LiteWordsWarehouse.instance().words_count(),
                 alpha=None,
                 beta=None,
                 save_doc_inteval=200,                                #    每多少篇文档保存一次参数。<0则不生效
                 save_epoch_inteval=1,                                #    每多少轮epoch保存一次参数。<0则不生效
                 wn_topic_word_path=conf.GSDMM.get_wn_topic_word_path(),
                 wn_topic_path=conf.GSDMM.get_wn_topic_path(),
                 dn_topic_path=conf.GSDMM.get_dn_topic_path()
                 ):
        self._K = K
        self._V = V
        
        #    相关参数初始化
        if (alpha is None): alpha = 50. / K
        self._alpha = alpha
        if (beta is None): beta = 0.1
        self._beta = beta
        
        self._save_doc_inteval = save_doc_inteval
        self._save_epoch_inteval = save_epoch_inteval
        
        self._wn_topic_word_path = wn_topic_word_path
        self._wn_topic_path = wn_topic_path
        self._dn_topic_path = dn_topic_path
        
        #    统一缩放量
        self._zoom = 100.
        pass
    
    
    #    初始化相关计数
    def initial_count(self, K, V, docs):
        '''
            @param K: 主题数
            @param V: 词库总词数
            @param docs: 文档迭代器  每条记录：[词id]
        '''
        print('初始化相关计数...')
        #    初始化每个主题下的文档计数
        self._dn_topic = np.zeros(shape=(K,), dtype=np.int32)
        #    初始化每个主题下的词计数
        self._wn_topic = np.zeros(shape=(K, ), dtype=np.int32)
        #    初始化每个主题下，每个词的 词计数
        self._wn_topic_word = np.zeros(shape=(K, V), dtype=np.int32)
        #    初始化每篇文档的所属主题
        self._doc_topic = []
#         np.ones(shape=(num_docs, ), dtype=np.int32)
        #    随机选择权重
        p = [1./K] * K
        
        print('初始化每个文档的主题，并计数...')
        #    初始化每篇文档的所属主题，并初始化计数
        for _, (_, doc) in enumerate(docs):
            #    从均匀分布中随机采样第i篇文档的主题
            topic = np.random.choice(a=K, size=1, p=p)[0]
            
            #    第topic个主题的文档数 += 1
            self._dn_topic[topic] += 1
            #    第topic个主题下的词数 += len(doc)
            self._wn_topic[topic] += len(doc)
            #    第topic个主题下，每个词的词数 += 1
            u = np.unique(doc, return_counts=True)
            self._wn_topic_word[topic, u[0]] += u[1]
            
            #    记录第i篇文档的主题
            self._doc_topic.append(topic)
            pass
        
        self._doc_topic = np.array(self._doc_topic)
        self._num_docs = len(self._doc_topic )
        print('初始化完成. num_docs:', self._num_docs)
        pass
    
    #    保存参数
    def save_weight(self):
        #    保存主题-词 计数
        conf.mkfiledir_ifnot_exises(self._wn_topic_path)
        with open(file=self._wn_topic_path, mode='wb') as fw: pickle.dump(self._wn_topic, fw)
        
        #    保存主题-词 词计数
        conf.mkfiledir_ifnot_exises(self._wn_topic_word_path)
        with open(file=self._wn_topic_word_path, mode='wb') as fw: pickle.dump(self._wn_topic_word, fw)
        
        #    保存主题-文档计数
        conf.mkfiledir_ifnot_exises(self._dn_topic_path)
        with open(file=self._dn_topic_path, mode='wb') as fw: pickle.dump(self._dn_topic, fw)
        pass
    #    加载参数
    def load_weight(self):
        #    加载主题-词 计数
        with open(file=self._wn_topic_path, mode='rb') as fr: self._wn_topic = pickle.load(fr)
        
        #    加载主题-词 词计数
        with open(file=self._wn_topic_word_path, mode='rb') as fr: self._wn_topic_word = pickle.load(fr)
        self._K = self._wn_topic_word.shape[0]
        self._V = self._wn_topic_word.shape[1]
        
        #    加载主题-文档 计数
        with open(file=self._dn_topic_path, mode='rb') as fr: self._dn_topic = pickle.load(fr)
        pass
    
    #    训练模型
    def training(self, gsdmm_ds, epochs=10):
        '''
            @param num_docs: 总文档数
            @param docs: 文档迭代器  每条记录：[词id]
            @param epochs: 遍历多少次
        '''
        #    初始化相关计数
        self.initial_count(K=self._K, V=self._V, docs=gsdmm_ds.words_iterator())
        
        print('开始遍历每一篇文档.')
        for epoch in range(epochs):
            for i, (_, doc) in tqdm.tqdm(iterable=enumerate(gsdmm_ds.words_iterator()), desc='Epoch={}'.format(epoch), total=self._num_docs, leave=True, ncols=100):
                #    第i篇文档旧主题
                old_topic = self._doc_topic[i]
                #    累减相关计数
                self._dn_topic[old_topic] -= 1
                self._wn_topic[old_topic] -= len(doc)
                u = np.unique(doc, return_counts=True)
                self._wn_topic_word[old_topic, u[0]] -= u[1]
                
                #    生成文档的新主题分布
                p = self.topic_distribution(doc,
                                            dn_topic=self._dn_topic, 
                                            num_docs=self._num_docs, 
                                            wn_topic_word=self._wn_topic_word, 
                                            wn_topic=self._wn_topic)
                
                #    从新主题分布中采样
                new_topic = np.random.choice(self._K, size=1, p=p)
                
                #    根据新主题分布更新相关计数
                self._doc_topic[i] = new_topic
                self._dn_topic[new_topic] += 1
                self._wn_topic[new_topic] += len(doc)
                self._wn_topic_word[new_topic, u[0]] += u[1]
                
                #    是否出发保存机制
                if (self._save_doc_inteval > 0 and i+1 % self._save_doc_inteval == 0): self.save_weight()
                pass
            
            #    是否出发保存机制
            if (self._save_epoch_inteval > 0 and epoch+1 % self._save_epoch_inteval == 0): self.save_weight()
            pass
        pass
    
    #    计算新主题的分布
    def topic_distribution(self, doc, dn_topic, num_docs, wn_topic_word, wn_topic):
        '''logP(z[i]=k | W,α,β) = log( dn_topic_doc[k] + α[k] )
                                    - log( ∑(K)( dn_topic_doc[k] + α[k] ) )
                                    + ∑(w ∈ W[d])log( wn_topic_word[k,w] + β[w] )                    ------ W[d]表示第d篇文档中的词集合，可能会有重复的词
                                    - ∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
                                 = part1 - part2 + part3 - part4
                part1 = log( dn_topic[k] + α[k] ) 
                      = log( dn_topic[k] + K*α )                                                       ------ α每个维度都一样，为一个常数
                part2 = log( ∑(K)( dn_topic[k] + α[k] ) ) 
                      = log(num_docs - 1 + α)                                                        ------ num_docs为总文档数
                part3 = ∑(w ∈ W[d])log( wn_topic_word[k,w] + β[w] ) 
                      = ∑(w ∈ W[d])log( wn_topic_word[k,w] + β )                                     ------ β每个维度都一样，为一个常数s
                part4 = ∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
                      = ∑(j=1->n[d])log( wn_topic[k] + V*β + j - 1 )
            @param doc: Ndarray(len(doc))    文档中的词id数组
            @param dn_topic: Ndarray(K, )    主题-文档 计数
            @param wn_topic: Ndarray(K, )    主题-词 计数
            @param wn_topic_word: Ndarray(K, V)    主题-词 词计数
            @param alpha: float        主题Dirichlet超参
            @param beta: float         主题Dirichlet超参
        '''
        #    part1：计算log( dn_topic_doc[k] + α[k] )
        part1 = self.op_part1(doc, dn_topic)
        #    part2：计算log( ∑(K)( dn_topic_doc[k] + α[k] ) )
        part2 = self.op_part2(doc, num_docs)
        #    part3：计算∑(w ∈ W[d])log( wn_topic_word[k,w] + β[w] ) 
        part3 = self.op_part3(doc, wn_topic_word)
        #    part4：计算∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
        part4 = self.op_part4(doc, wn_topic)
        
        p = part1 - part2 + part3 - part4
        
        #    p = exp(a+b+c) = exp(a-d + b-d + c-d + 3d) = exp(a-d + b-d + c-d) * exp(3d)
        #    对p做归一化时exp(3d)可以约掉的
        max_p = np.max(p)
        p = p - max_p
        p = np.exp(p)
        #    归一化
        p = p / np.sum(p)
        return p
    #    计算part1
    def op_part1(self, doc, dn_topic):
        '''part1 = log( dn_topic[k] + α )
            @return Ndarray(K, )
        '''
        return np.log(dn_topic + self._alpha)
    #    计算part2
    def op_part2(self, doc, num_docs):
        '''part2 = log( ∑(K)( dn_topic[k] + α[k] ) ) 
                 = log(num_docs - 1 + K*α) 
            @return float
        '''
        return np.log(num_docs - 1 + self._K * self._alpha)
    #    计算part3
    def op_part3(self, doc, wn_topic_word):
        '''part3 = ∑(w ∈ W[d])log( wn_topic_word[k,w] + β[w] ) 
                 = ∑(w ∈ W[d])log( wn_topic_word[k,w] + β )
            @return Ndarray(K, )
        '''
        #    计算log( wn_topic_word[k,w] + β )
        wn = wn_topic_word[:, doc]
        wn = wn + self._beta                                    #    Ndarray(K, num_d)
        wn = np.log(wn)                                         #    Ndarray(K, num_d)
        #    计算∑(w∈W[d])log( wn_topic_word[k,w] + β )
        return np.sum(wn, axis=-1)
    #    计算part4
    def op_part4(self, doc, wn_topic):
        '''part4 = ∑(j=1->n[d])log( ∑(V)(wn_topic_word[k,v] + β[v]) + j - 1 )
                 = ∑(j=1->n[d])log( wn_topic[k] + V*β + j - 1 )
            @return Ndarray(K, )
        '''
        #    计算 wn_topic[k] + β + j - 1中的每个j
        wn = wn_topic + self._V * self._beta              #    Ndaray(K, )
        n_range = np.arange(len(doc))
        wn = wn[:, np.newaxis] + n_range[np.newaxis, :]         #    Ndarray(K, num_d)
        wn = np.log(wn)
        #    计算∑(j=1->n[d])log( wn_topic[k] + β + j - 1 )
        return np.sum(wn, axis=-1)
    
    
    #    预测
    def divination(self, docs, epochs=10):
        '''预测文档的主题
            @param param: docs    Ndarray(batch_size, max_doc_words) -1表示填充值
            @param epochs: 迭代次数
        '''
        dn_topic = self._dn_topic
        num_docs = np.sum(self._dn_topic)
        wn_topic_word = self._wn_topic_word 
        wn_topic = self._wn_topic
        topics = []
        for doc in docs:
            #    去掉填充值
            doc = doc[doc >= 0]
            p = self.topic_distribution(doc, dn_topic, num_docs, wn_topic_word, wn_topic)
            topic = np.argmax(p)
            topics.append(topic)
            pass
        
        topics = np.array(topics)
        return topics
    pass
