# GibbsLDA++ related

import os
from itertools import islice
import pickle
import time

from . import independent_script_folder
from .familia_wrapper import InferenceEngineWrapper
from .utils import cut_doc, convert_sparse2dense


def gpu_train(corpus_fp, save_model_fp, topic_num=1024, iterations=50):
    """
    利用 GPU 训练模型，glda 训练的模型会转化为 familia 模型格式，推断通过 familia 推断引擎进行。
    """
    start = time.perf_counter()
    if os.name == 'nt':
        cmd = 'venvpy\\Scripts\\python %s\\gpu-train-lda-model.py %s %s %d %d' % (
            independent_script_folder, corpus_fp, save_model_fp, topic_num, iterations)
    else:
        cmd = 'venv/bin/python %s/gpu-train-lda-model.py %s %s %d %d' % (
            independent_script_folder, corpus_fp, save_model_fp, topic_num, iterations)
    os.system(cmd)
    tm_cost = time.perf_counter() - start
    print('Train LDA model: %fs' % tm_cost)


def get_glda_infer_engine(glda_model_fp):
    model_dir = os.path.dirname(glda_model_fp)
    model_config = '%s.conf' % glda_model_fp
    inference_engine = InferenceEngineWrapper(model_dir, os.path.basename(model_config))
    return inference_engine


def infer_doc_topics_by_glda(doc, infer_engine):
    words = ' '.join(cut_doc(doc))
    topic_dis = infer_engine.lda_infer(words)
    return topic_dis


def read_glda_phi_file(glda_model_fp):
    phi_fp = '%s.phi' % glda_model_fp
    if os.path.exists('%s.pkl' % phi_fp):
        with open('%s.pkl' % phi_fp, 'rb') as fi:
            phis = pickle.load(fi)
        return phis
    print('Reading glda phi file from %s..' % phi_fp)
    phis = []
    counter = 0
    with open(phi_fp, encoding='utf-8') as fi:
        while 1:
            lines = list(islice(fi, 100))
            if not lines:
                break
            for line in lines:
                phis.append([float(e) for e in line.strip().split()])
                counter += 1
                print('\r%d..' % counter, end='')
    with open('%s.pkl' % phi_fp, 'wb') as fo:
        pickle.dump(phis, fo)
    return phis


def read_glda_theta_file(glda_model_fp):
    theta_fp = '%s.theta' % glda_model_fp
    print('Reading glda theta file from %s..' % theta_fp)
    with open(theta_fp, encoding='utf-8') as fi:
        for line in fi:
            yield [float(e) for e in line.strip().split()]


def read_glda_conf(glda_model_fp):
    conf_fp = '%s.conf' % glda_model_fp
    conf = {}
    with open(conf_fp, encoding='utf-8') as fi:
        for line in fi:
            cnt = line.strip().replace('"', '').split(':')
            conf[cnt[0]] = cnt[1]
    return conf


def read_glda_vocab(glda_model_fp):
    vocab_file = '%s/vocab_info.txt' % os.path.dirname(glda_model_fp)
    word2id = {}
    with open(vocab_file, encoding='utf-8') as fi:
        for line in fi:
            cnt = line.strip().split('\t')
            word2id[cnt[1]] = int(cnt[2])
    return word2id


def calculate_glda_mapv(glda_model_fp):
    conf = read_glda_conf(glda_model_fp)
    topic_num = int(conf['num_topics'])
    word2id = read_glda_vocab(glda_model_fp)
    vocab_size = len(word2id.keys())
    print('Calculating glda mapv: %s' % glda_model_fp)
    phis = read_glda_phi_file(glda_model_fp)
    topic_mass_probs = [0.] * topic_num
    theta_sum = 0.
    counter = 0
    for topic_probs in read_glda_theta_file(glda_model_fp):
        theta_sum += sum(topic_probs)
        for topic_id, prob in enumerate(topic_probs):
            topic_mass_probs[topic_id] += prob
        counter += 1
        print('\r%d..' % counter, end='')
    topic_mass_probs = [e / theta_sum for e in topic_mass_probs]
    mapv = []
    for word_id in range(vocab_size):
        mass_probs = []
        for topic_id in range(topic_num):
            mass_probs.append(phis[topic_id][word_id] * topic_mass_probs[topic_id])
        mapv.append((word_id, sum(mass_probs)))
    return convert_sparse2dense(mapv, vocab_size)


def calculate_glda_doc_generation_prob_vector(text, doc_topic_probs, phi, word2id):
    words = set(text)
    word_ids = {word2id[w] for w in words if w in word2id}
    word_probs = []
    for word_id in word_ids:
        word_prob = 0.
        for topic_id, prob in doc_topic_probs:
            word_prob += prob * phi[topic_id][word_id]
        word_probs.append((word_id, word_prob))
    return convert_sparse2dense(word_probs, len(word2id.keys()))


def read_glda_mapv(model_fp):
    mapv_fp = '%s.mapv' % model_fp
    with open(mapv_fp, 'rb') as fi:
        mapv = pickle.load(fi)
    return mapv


def read_topic_words(model_fp):
    topic_words = {}
    topic_id = 0
    with open('%s.twords' % model_fp, encoding='utf-8') as fi:
        for line in fi:
            if line.startswith('Topic'):
                topic_id = int(line.strip().split()[1].replace('th:', ''))
                topic_words[topic_id] = []
            else:
                cnt = line.strip().split()
                topic_words[topic_id].append([cnt[0], float(cnt[1])])
    return topic_words
