#encoding=utf8
'''
Created on 2019年8月22日

@author: sida
'''
from gensim import corpora, models
from gensim.models.coherencemodel import CoherenceModel
import logging
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.DEBUG)

def computeDic(self):
    #将得到的训练数据中得到词典
    train_dt_iter = self.sunNodes[0].getData()
    print(train_dt_iter[0])
    dic = corpora.Dictionary(train_dt_iter)
    print('dic id2token: ',dic.id2token)#字典为空，但是dic 对象是对的 wtf
    return dic

def computeCorpus(self):
    #计算corpus从字典以及训练数据中
    train_set = self.sunNodes[0].getData()
    dic = self.sunNodes[1].getData()
    corpus = []
    for train_data in train_set:
        corpus.append( dic.doc2bow(train_data) )
    return corpus

def topNlabels(self,topN=12000):
    #获得所有的主题的topN的关键词
    lda = self.sunNodes[0].getData()
    dic = self.sunNodes[1].getData();import numpy as np
    
    a = tokenIdNumTopicsWeightsMatrix = lda.get_topics()
    vectorSort_a = np.apply_along_axis(lambda x:x.argsort(),1,a)
    vectorSort_a = vectorSort_a.transpose()
    topNids = vectorSort_a[-topN:]
    topNids = set ( topNids.flatten())
    labels = [dic[id] for id in topNids]
    print('auto labels:',labels[:15],labels[-15:],len(labels),sep='\n')
    return set(labels)

num_topics = 512
def compute_lda(self):
    print('start training lda')
    corpus = self.sunNodes[0].getData()
    dic = self.sunNodes[1].getData()
    lda = models.LdaModel(corpus, id2word = dic, num_topics = num_topics,\
                          passes=10,iterations=25,chunksize=200,random_state=1)
    log_perplexity = lda.log_perplexity(corpus)
    print(num_topics,'Average topic coherence: %.4f.' % log_perplexity,sep=' ')
    return lda

def predict(test_doc,lda,dic,TopN=5,methodToGetTopNterms = 0):
    doc_bow = dic.doc2bow(test_doc) #文档转换成bow
    topicId=0
    doc_topics_distribution = lda[doc_bow] #得到新文档的主题分布
    #输出新文档的主题分布
    print('\n',test_doc)
    if methodToGetTopNterms ==0: #取最大概率那个主题的前N个主题词
        p=0;
        for item in doc_topics_distribution:
            if item[1]>p:
                p=item[1];topicId=item[0]
        if topicId !=None:
            topNtermsKeys = lda.get_topic_terms(topicid=topicId, topn=TopN)#[(key,probability)]
            topNterms = [ dic[key[0]] for key in topNtermsKeys ]
        else :print('topicId is None ,for statement loop didn`t run ')
    elif methodToGetTopNterms == 1:
        #贝叶斯法，将主题分布的概率看做似然概率，原先主题词分布看做先验，计算topN后验
        beta = lda.get_topics() #The probability for each word in each topic, shape (`num_topics`, `vocabulary_size`).
        pOfK = list( range(num_topics) )
        for topicID,pOfTopic in doc_topics_distribution:
            pOfK[topicID]=pOfTopic
        import numpy as np
        pOfK = np.array(pOfK).reshape(len(pOfK))
        print(pOfK.shape,beta.shape)
        pOfV = pOfK.dot(beta)
        keysOfVOfpSort = pOfV.argsort()[-TopN:]
        topNterms = [dic[key] for key in keysOfVOfpSort ]
    return topicId,topNterms
    
    
    
    