# -*- coding: utf-8 -*-  
'''
np版lda网络

Created on 2021年9月11日
@author: luoyi
'''
import pickle
import tqdm
import numpy as np

import utils.logger_factory as logf
import utils.conf as conf
from utils.iexicon import LiteWordsWarehouse


log = logf.get_logger('lda_train')


#    numpy写的lda模型
class LDA:
    '''LDA主题模型
    '''
    def __init__(self, 
                 K=conf.LDA.get_k(),
                 V=LiteWordsWarehouse.instance().words_count(),
                 alpha=None,
                 beta=None,
                 save_doc_inteval=200,                                #    每多少篇文档保存一次参数。<0则不生效
                 save_epoch_inteval=1,                                #    每多少轮epoch保存一次参数。<0则不生效
                 wn_topic_word_path=conf.LDA.get_wn_topic_word_path(),
                 wn_topic_path=conf.LDA.get_wn_topic_path(),
                 dn_topic_path=conf.LDA.get_dn_topic_path(),
                 doc_word_topic_path=conf.LDA.get_doc_word_topic_path(),
                 ):
        self._K = K
        self._V = V
        
        if (alpha is None): alpha = 50. / K
        self._alpha = alpha
        if (beta is None): beta = 0.02
        self._beta = beta
        
        self._save_doc_inteval = save_doc_inteval
        self._save_epoch_inteval = save_epoch_inteval
        
        self._wn_topic_word_path = wn_topic_word_path
        self._wn_topic_path = wn_topic_path
        self._dn_topic_path = dn_topic_path
        self._doc_word_topic_path = doc_word_topic_path
        pass
    
    #    保存相关计数
    def save_weights(self):
        #    保存主题-词 词计数
        conf.mkfiledir_ifnot_exises(self._wn_topic_word_path)
        with open(file=self._wn_topic_word_path, mode='wb') as fw: pickle.dump(self._wn_topic_word, fw)
        
        #    保存主题-词 计数
        conf.mkfiledir_ifnot_exises(self._wn_topic_path)
        with open(file=self._wn_topic_path, mode='wb') as fw: pickle.dump(self._wn_topic, fw)
        
        #    保存文档-主题 计数
        conf.mkfiledir_ifnot_exises(self._dn_topic_path)
        with open(file=self._dn_topic_path, mode='wb') as fw: pickle.dump(self._dn_topic, fw)
        
        #    保存文档-词所属主题
        conf.mkfiledir_ifnot_exises(self._doc_word_topic_path)
        with open(file=self._doc_word_topic_path, mode='wb') as fw: pickle.dump(self._doc_word_topic, fw)
        pass
    #    加载相关计数
    def load_weight(self):
        #    加载主题-词 词计数
        with open(file=self._wn_topic_word_path, mode='rb') as fr: self._wn_topic_word = pickle.load(fr)
        self._K = self._wn_topic_word.shape[0]
        self._V = self._wn_topic_word.shape[1]
        
        #    加载主题-词 计数
        with open(file=self._wn_topic_path, mode='rb') as fr: self._wn_topic = pickle.load(fr)
        
        #    加载文档-主题 计数
        with open(file=self._dn_topic_path, mode='rb') as fr: self._dn_topic = pickle.load(fr)
        self._num_docs = len(self._dn_topic)
        
        #    加载每篇文档，每个词的主题
        with open(file=self._doc_word_topic_path, mode='rb') as fr: self._doc_word_topic = pickle.load(fr)
        pass
    
    #    初始化相关计数
    def initial_count(self, K, V, docs):
        '''
            @param K: 主题数
            @param V: 词库总词数
            @param docs: 文档迭代器  每条记录：[词id]
        '''
        print('初始化相关计数...')
        #    初始化主题-词 计数    Ndarray(K, )
        self._wn_topic = np.zeros(shape=(K, ), dtype=np.int32)
        #    初始化主题-词 词计数    Ndaray(K, V)
        self._wn_topic_word = np.zeros(shape=(K, V), dtype=np.int32)
        #    初始化每篇文章的 主题-词 计数 Ndarray(num_docs, K, )
        dn_topic = []
        #    初始化每篇文章，每个词的所属主题
        doc_word_topic = []
        
        #    初始化采样的均匀分布
        p = [1. / K] * K  
        
        print('初始化每篇文档，每个词相关计数')
        #    初始化每篇文章、每个词的相关计数
        #    遍历每篇文档，每个词
        for _, (_, doc) in enumerate(docs):
            #    文档-主题 的计数    Ndarray(K, )
            dn_topic_i = np.zeros(shape=(K, ), dtype=np.int32)
            #    文档-主题 的词计数    Ndarray(K, V)
            dn_topic_word_i = np.zeros(shape=(K, V), dtype=np.int32)
            
            #    从初始化分布中采样每个词的主题
            new_topics = np.random.choice(a=K, size=len(doc), p=p)
            u = np.unique(new_topics, return_counts=True)
            
            #    更新相关计数
            self._wn_topic[u[0]] += u[1]
            dn_topic_i[u[0]] += u[1]
            for t,w in zip(new_topics, doc): 
                self._wn_topic_word[t][w] += 1
                dn_topic_word_i[t][w] += 1
                pass
            
            dn_topic.append(dn_topic_i)
            doc_word_topic.append(new_topics)
            pass
        
        self._dn_topic = np.array(dn_topic)
        self._num_docs = len(dn_topic)
        self._doc_word_topic = doc_word_topic
        print('初始化完成. num_docs:', self._num_docs)
        pass
    
    
    #    计算模型困惑度
    def ppl(self, docs):
        '''计算模型困惑度
            ppl = exp(- 1/N * ∑(D)∑(Nd) log(∑(K)prob_topic_word[k,w] * prob_doc_topic[k]))
        '''
        #    计算当前 主题-词分布 和 每篇文档的 主题 分布
        prob_topic_word = self._wn_topic_word / np.sum(self._wn_topic_word, axis=-1)[:, np.newaxis]     #    Ndarray(K, V)
        prob_doc_topic = self._dn_topic / np.sum(self._dn_topic, axis=-1)[:, np.newaxis]                #    Ndarray(D, K)
        
        sum_p = 0
        N = 0
        #    计算每篇文章的 ∑(Nd) log(∑(K)prob_topic_word[k,w] * prob_doc_topic[k]))
        for i, (_, doc) in enumerate(docs):
            #    计算log(∑(K)prob_topic_word[k,w] * prob_doc_topic[k]))
            p = prob_topic_word[:, doc] * prob_doc_topic[i][:, np.newaxis]
            p = np.log(np.sum(p, axis=0))
            #    计算∑(Nd) log(∑(K)prob_topic_word[k,w] * prob_doc_topic[k]))
            p = np.sum(p, axis=-1)
            #    计算∑(D)∑(Nd) log(∑(K)prob_topic_word[k,w] * prob_doc_topic[k]))
            sum_p += p
            N += len(doc)
            pass
        
        #    计算exp(- 1/num_docs * sum_p)
        ppl = np.exp(-1 * sum_p / N)
        return ppl
    
    
    #    训练模型
    def training(self, lda_ds, epochs=100):
        '''
            @param lda_ds: 文档数据源
        '''
        #    _wn_topic _wn_topic_word _dn_topic _num_docs _doc_word_topic
        #    如果这些属性都存在，则认为是断点处继续训练
        if (hasattr(self, '_wn_topic')
                and hasattr(self, '_wn_topic_word')
                and hasattr(self, '_dn_topic')
                and hasattr(self, '_num_docs')
                and hasattr(self, '_doc_word_topic')):
            pass
        #    否则初始化
        else:
            self.initial_count(self._K, self._V, lda_ds.words_iterator())
            pass
        
        #    上轮迭代的主题词分布
        wt_prob_prev = None
        
        print('遍历每篇文档，每个词')
        #    迭代遍历
        for epoch in range(epochs):
            #    遍历每篇文档
            for i, (_, doc) in tqdm.tqdm(iterable=enumerate(lda_ds.words_iterator()), desc='Epoch={}'.format(epoch), total=self._num_docs, leave=True, ncols=100):
                #    遍历每个词
                num_d = len(doc)
                for j, wid in enumerate(doc):
                    #    累减相关计数
                    old_topic = self._doc_word_topic[i][j]
                    self._wn_topic[old_topic] -= 1
                    self._wn_topic_word[old_topic][wid] -= 1
                    self._dn_topic[i][old_topic] -= 1
                    
                    #    计算每个词的主题分布
                    p = self.topic_distribution(wid=wid, 
                                                dn_topic=self._dn_topic[i], 
                                                alpha=self._alpha, 
                                                num_d=num_d, 
                                                wn_topic_word=self._wn_topic_word, 
                                                beta=self._beta, 
                                                wn_topic=self._wn_topic)
                    #    重新采样词的主题
                    new_topic = np.random.choice(a=self._K, size=1, p=p)[0]
                    
                    #    重新计数
                    self._wn_topic[new_topic] += 1
                    self._wn_topic_word[new_topic][wid] += 1
                    self._dn_topic[i][new_topic] += 1
                    self._doc_word_topic[i][j] = new_topic
                    pass
                
                if (self._save_doc_inteval > 0 and i+1 % self._save_doc_inteval == 0): self.save_weights()
                pass
            
            if (self._save_epoch_inteval > 0 and epoch+1 % self._save_epoch_inteval == 0): self.save_weights()
            
            #    每做完一轮epoch打印每个主题的词分布与上一次分布的绝对差值的平均值
            wt_prob = self._wn_topic_word / np.sum(self._wn_topic_word, axis=-1)[:, np.newaxis]
            if (wt_prob_prev is None): wt_prob_prev = wt_prob
            else:
                log.info('epoch:{} avg_diff_wn_prob:{} ppl:{}'.format(epoch, np.average(np.abs(wt_prob - wt_prob_prev)), self.ppl(lda_ds.words_iterator())))
                wt_prob_prev = wt_prob
                pass
            pass
        pass
    
    #    计算词的主题分布
    def topic_distribution(self, wid, dn_topic, alpha, num_d, wn_topic_word, beta, wn_topic):
        '''p[k] = part1 * part2
            part1 = (dn_topic[k] + α) / num_d + K*α
            part2 = (wn_topic_word[k,w] + β) / wn_topic[k] + b*β
           @return Ndarray(K, )
        '''
        #    计算part1
        part1 = self.op_part1(dn_topic, alpha, num_d)
        #    计算part2
        part2 = self.op_part2(wid, wn_topic_word, beta, wn_topic)
        
        p = part1 * part2
        #    归一化
        p = p / np.sum(p)
        return p
    #    计算part1
    def op_part1(self, dn_topic, alpha, num_d):
        '''part1 = (dn_topic[k] + α) / num_d + K*α'''
        p1 = dn_topic + alpha
        p2 = num_d + self._K * alpha
        return p1 / p2 
    #    计算part2
    def op_part2(self, wid, wn_topic_word, beta, wn_topic):
        '''part2 = (wn_topic_word[k,w] + β) / wn_topic[k] + V*β'''
        p1 = wn_topic_word[:, wid] + beta
        p2 = wn_topic + self._V * beta
        return p1 / p2
    
    
    #    预测
    def divination(self, docs, max_doc_words, epochs=20):
        '''预测文档的主题
            @param docs:    Ndarray(batch_size, max_doc_words) -1表示填充值
            @param epochs: 迭代次数
            
            @return docs_topic: Ndarray(batch_size,)    每篇文章的主题
                    words_topic: Ndarray(batch_size, max_doc_words)    每篇文章每个词的主题，-1表示填充
        '''
        
        
        docs_topic = []     #    每篇文档的主题
        words_topic = []    #    每篇文档每个词的主题
        for doc in docs:
            #    去掉填充值
            doc = doc[doc >= 0]
            
            dn_topic = np.zeros(shape=(self._K, ), dtype=np.int32)      #    Ndarray(K, ) 每篇文档每个主题下的词数
            num_docs = len(doc)                                         #    文档总词数
            wn_topic_word = self._wn_topic_word                         #    主题-词 词计数    Ndarray(K, V)
            wn_topic = self._wn_topic                                   #    主题-词 计数      Ndarray(K, )
            
            #    初始化每个词采样
            #    初始化采样的均匀分布
            p = [1. / self._K] * self._K 
            topics = np.random.choice(a=self._K, size=len(doc), p=p)
            u = np.unique(topics, return_counts=True)
            
            #    更新相关计数
            dn_topic[u[0]] += u[1]
            wn_topic[u[0]] += u[1]
            doc_word_topic = topics
            for t,w in zip(topics, doc): 
                wn_topic_word[t][w] += 1
                pass

            #    重复采样epoch次
            for _ in range(epochs):
                for i, wid in enumerate(doc):
                    old_topic = doc_word_topic[i]
                    #    累减计数
                    dn_topic[old_topic] -= 1
                    wn_topic[old_topic] -= 1
                    wn_topic_word[old_topic][wid] -= 1
                   
                    #    每个词的主题分布
                    p = self.topic_distribution(wid, 
                                                dn_topic=dn_topic, 
                                                alpha=self._alpha, 
                                                num_d=num_docs, 
                                                wn_topic_word=wn_topic_word, 
                                                beta=self._beta, 
                                                wn_topic=wn_topic)
                    #    重新采样主题
#                     new_topic = np.random.choice(a=self._K, size=1, p=p)[0]
                    new_topic = np.argmax(p)
                    
                    #    累加相关计数
                    dn_topic[new_topic] += 1
                    wn_topic[new_topic] += 1
                    wn_topic_word[new_topic][wid] += 1
                    doc_word_topic[i] = new_topic
                    pass
                pass
            
            doc_topic = np.argmax(dn_topic)
            #    保存文档主题和每个词主题
            docs_topic.append(doc_topic)
            #    word_topic长度不够的填充-1，方便组成张量
            wt = doc_word_topic
            if (len(doc_word_topic) < max_doc_words): wt = np.pad(doc_word_topic, (0, max_doc_words-len(doc_word_topic)), 'constant', constant_values=(0,-1))
            if (len(doc_word_topic) > max_doc_words): wt = doc_word_topic[:max_doc_words]
            words_topic.append(wt)
            pass
        
        words_topic = np.array(words_topic)
        docs_topic = np.array(docs_topic)
        return docs_topic, words_topic
    pass
