# gensim LDA related

import os
import time

from gensim.models import CoherenceModel
from gensim.models.ldamodel import LdaModel
import numpy as np

from . import independent_script_folder
from .utils import cut_doc, filter_words, convert_sparse2dense


def get_corpus(txt_corpus_fp):
    texts = []
    with open(txt_corpus_fp, encoding='utf-8') as fi:
        for line in fi:
            words = [w.strip() for w in line.strip().split()]
            words = filter_words(words)
            texts.append(words)
    return texts


def generate_corpus(txt_corpus_fp):
    with open(txt_corpus_fp, encoding='utf-8') as fi:
        for line in fi:
            words = [w.strip() for w in line.strip().split()]
            words = filter_words(words)
            yield words


def train(corpus_fp, save_model_fp, topic_num=100):
    """
    训练主题模型，挖掘新闻中的主题，辅助分类。
    """
    start = time.perf_counter()
    if os.name == 'nt':
        cmd = 'venvpy\\Scripts\\python %s\\multicore-train-lda-model.py %s %s %d' % (
            independent_script_folder, corpus_fp, save_model_fp, topic_num)
    else:
        cmd = 'venv/bin/python %s/multicore-train-lda-model.py %s %s %d' % (
            independent_script_folder, corpus_fp, save_model_fp, topic_num)
    os.system(cmd)
    tm_cost = time.perf_counter() - start
    print('Train LDA model: %fs' % tm_cost)


def get_lda_model(model_fp):
    return LdaModel.load(model_fp)


def show_lda_topic(lda_model, topic_id, top_words=10):
    terms = lda_model.get_topic_terms(topic_id, topn=top_words)
    print('Topic = %d\t%d' % (topic_id, top_words))
    print('==========================================================')
    for wid, prob in terms:
        word = lda_model.id2word[wid]
        print('%s\t%f' % (word, prob))


def show_lda_topics(lda_model, top_words=10):
    for tid in range(lda_model.num_topics):
        show_lda_topic(lda_model, tid, top_words)


def compute_coherence_value(model_fp, corpus_fp):
    print('Loading model from %s..' % model_fp)
    lda_model = LdaModel.load(model_fp)
    start = time.perf_counter()
    print('Computing model coherence..')
    co_model = CoherenceModel(lda_model, texts=generate_corpus(corpus_fp), dictionary=lda_model.id2word)
    co_value = co_model.get_coherence()
    print('Compute model coherence: %f/%fs' % (co_value, time.perf_counter() - start))
    return co_value


def infer_doc_topics(doc, lda_model, sort=True):
    doc_bow = lda_model.id2word.doc2bow(cut_doc(doc))
    vector = lda_model[doc_bow]
    if sort:
        sorted_vector = sorted(vector, key=lambda x: x[1], reverse=True)
        return sorted_vector
    return vector


def get_topic_word_matrix(lda_model):
    """
    给定主题下词的分布，topic*word matrix, topic -> word, P(word|topic), phi
    """
    return lda_model.get_topics()


def get_word_topic_matrix(lda_model):
    """
    给定词下的主题分布，nw, word -> topic, P(topic|word), word*topic matrix
    """
    print('Fetching word*topic matrix..')
    dictionary = lda_model.id2word
    nw = np.zeros((len(dictionary), lda_model.num_topics))
    counter = 0
    for word_id in range(len(dictionary)):
        t_probs = lda_model.get_term_topics(word_id, 1e-8)
        for tid, prob in t_probs:
            nw[word_id][tid] = prob
        counter += 1
        print('\r%d..' % counter, end='')
    return nw


def calculate_doc_generation_prob_vector(text, lda_model, topic_word_matrix=None):
    """
    计算文档中的单词在给定主题下的生成概率分布。
    """
    if topic_word_matrix is None:
        topic_word_matrix = get_topic_word_matrix(lda_model)
    doc_bow = lda_model.id2word.doc2bow(text)
    doc_bow_dict = {word_id: count for word_id, count in doc_bow}
    topic_probs, word_topic, word_phi = lda_model.get_document_topics(doc_bow, per_word_topics=True)
    topic_probs = {topic_id: prob for topic_id, prob in topic_probs}

    def get_topic_prob(topic_id):
        if topic_id in topic_probs:
            return topic_probs[topic_id]
        return 0.

    word_probs = []
    for word_id, topic_weights in word_phi:
        word_count = doc_bow_dict[word_id]
        sum_weights = sum([e[1] for e in topic_weights])
        prob = 0.
        for topic_id, phi in topic_weights:
            normed_weight = phi / sum_weights
            prob += (normed_weight * get_topic_prob(topic_id) * topic_word_matrix[topic_id][word_id])
        prob *= word_count
        word_probs.append((word_id, prob))
    return convert_sparse2dense(word_probs, lda_model.num_topics)


def calculate_mass_article_probability_vector(corpus_fp, lda_model, topic_word_matrix=None):
    """
    计算众文档平均概率向量。
    """
    if topic_word_matrix is None:
        topic_word_matrix = get_topic_word_matrix(lda_model)
    total_topic_probs = {}
    corpus_counter = 0
    for text in generate_corpus(corpus_fp):
        bow = lda_model.id2word.doc2bow(text)
        topic_probs = lda_model.get_document_topics(bow)
        for topic_id, prob in topic_probs:
            if topic_id not in total_topic_probs:
                total_topic_probs[topic_id] = [prob]
            else:
                total_topic_probs[topic_id].append(prob)
        corpus_counter += 1
        print('\r%d..' % corpus_counter, end='')
    corpus_normed_topic_probs = {
        topic_id: sum(probs) / corpus_counter for topic_id, probs in total_topic_probs.items()
    }

    def get_normed_topic_prob(topic_id):
        if topic_id in corpus_normed_topic_probs:
            return corpus_normed_topic_probs[topic_id]
        return 0.

    mapv = []
    for word_id in lda_model.id2word.keys():
        mass_probs = []
        for topic_id in range(lda_model.num_topics):
            mass_probs.append(topic_word_matrix[topic_id][word_id] * get_normed_topic_prob(topic_id))
        mapv.append((word_id, sum(mass_probs)))
    return convert_sparse2dense(mapv, len(lda_model.id2word.keys()))
