import time
import os
import sys
import shutil

from gensim.corpora.dictionary import Dictionary
from gensim.corpora import TextCorpus
from gensim.corpora import LowCorpus

current_folder = os.path.dirname(os.path.realpath(__file__))
sys.path.insert(0, os.path.dirname(os.path.dirname(current_folder)))

from news_classification import glda_bin
from news_classification.utils import filter_words


def doc_generator(corpus_fp, do_filter=False):
    start = time.perf_counter()
    counter = 0
    with open(corpus_fp, encoding='utf-8') as fi:
        for line in fi:
            words = [w for w in line.split()]
            if do_filter:
                words = filter_words(words)
            if len(words) == 0:
                continue
            counter += 1
            if counter % 1000 == 0:
                print('\r%d/%fs..' % (counter, time.perf_counter() - start), end='')
            yield words


def count_corpus(corpus_fp):
    print('Counting corpus..')
    counter = 0
    start = time.perf_counter()
    with open(corpus_fp, encoding='utf-8') as fi:
        for _ in fi:
            counter += 1
            if counter % 100 == 0:
                print('\r%d/%fs..' % (counter, time.perf_counter() - start), end='')
    print()
    return counter


def __convert_txt_corpus_with_filtered_dictionary(corpus_fp, gpu_corpus_fp, word_num=400000):
    print('Generating dictionary..')
    dictionary = Dictionary(doc_generator(corpus_fp), word_num)
    dictionary.filter_extremes(no_below=5, no_above=0.5, keep_n=None)
    dictionary.save_as_text('%s.dictionary' % corpus_fp, sort_by_word=False)
    corpus = TextCorpus(corpus_fp, dictionary=dictionary, token_filters=[])
    print('Converting to GibbsLDA++ format..')
    LowCorpus.serialize(gpu_corpus_fp, corpus, dictionary)
    return gpu_corpus_fp


def convert_txt_corpus(corpus_fp, filter_dictionary=False):
    corpus4gpu_fp = os.path.splitext(corpus_fp)[0] + '-gpu.txt'
    if os.path.exists(corpus4gpu_fp):
        return corpus4gpu_fp
    if filter_dictionary:
        return __convert_txt_corpus_with_filtered_dictionary(corpus_fp, corpus4gpu_fp)
    counter = count_corpus(corpus_fp)
    os.system('sed "1i%d" %s > %s' % (counter, corpus_fp, corpus4gpu_fp))
    return corpus4gpu_fp


def convert_wordmap2vocab_info(word_map_fp):
    fo = open('%s/vocab_info.txt' % os.path.dirname(word_map_fp), 'w', encoding='utf-8')
    with open(word_map_fp, encoding='utf-8') as fi:
        next(fi)
        for line in fi:
            word, term_id = line.split()
            fo.write('\t'.join(['word', word, term_id, 'null', 'null']) + '\n')
    fo.close()


def convert_others2conf(other_fp, model_name):
    conf = {}
    with open(other_fp, encoding='utf-8') as fi:
        for line in fi:
            k, v = line.strip().split('=')
            conf[k] = v
    with open(other_fp.replace('.others', '.conf'), 'w', encoding='utf-8') as fo:
        fo.write('type: LDA\n')
        fo.write('num_topics: %s\n' % conf['ntopics'])
        fo.write('alpha: %s\n' % conf['alpha'])
        fo.write('beta: %s\n' % conf['beta'])
        fo.write('word_topic_file: "%s.nw"\n' % model_name)
        fo.write('vocab_file: "vocab_info.txt"\n')


def train(corpus_fp, save_model_fp, topic_num=1024, iterations=50):
    corpus_fp = convert_txt_corpus(corpus_fp, filter_dictionary=True)
    cmd = '%s -est -niters %d -savestep %d -twords 10 -dfile %s -ntopics %d' % (
        glda_bin, iterations, iterations + 1, corpus_fp, topic_num)
    os.system(cmd)
    save_model_fd = os.path.dirname(save_model_fp)
    model_name = os.path.basename(save_model_fp)
    corpus_fd = os.path.dirname(corpus_fp)
    shutil.move('%s/model-final.twords' % corpus_fd, '%s/%s.twords' % (save_model_fd, model_name))
    shutil.move('%s/model-final.phi' % corpus_fd, '%s/%s.phi' % (save_model_fd, model_name))
    shutil.move('%s/model-final.theta' % corpus_fd, '%s/%s.theta' % (save_model_fd, model_name))
    others_fp = '%s/%s.others' % (save_model_fd, model_name)
    shutil.move('%s/model-final.others' % corpus_fd, others_fp)
    shutil.move('%s/model-final.tassign' % corpus_fd, '%s/%s.tassign' % (save_model_fd, model_name))
    shutil.move('%s/model-final.nw' % corpus_fd, '%s/%s.nw' % (save_model_fd, model_name))
    wordmap_fp = '%s/%s-wordmap.txt' % (save_model_fd, model_name)
    shutil.move('%s/wordmap.txt' % corpus_fd, wordmap_fp)
    convert_wordmap2vocab_info(wordmap_fp)
    convert_others2conf(others_fp, model_name)


if __name__ == '__main__':
    train(sys.argv[1], sys.argv[2], int(sys.argv[3]), int(sys.argv[4]))
