# 用训练出来的 LDA 模型分析测试数据的主题分布情况

import json
from collections import Counter
import sys

from ..convert_dataset2corpus import read_jsonl_dataset
from ..lda import get_lda_model, infer_doc_topics


def run():
    # model_fp = './model/1M-news-50-topics-lda.model'
    model_fp = sys.argv[1]
    lda_model = get_lda_model(model_fp)
    # dataset_fp = './data/1w-data.jsonl'
    dataset_fp = sys.argv[2]
    # output_fp = 'data/topic-classes/1M-news-50-topics-1w-news-test.txt'
    output_fp = sys.argv[3]
    topic_counts = []
    counter = 0
    fo = open(output_fp, 'w', encoding='utf-8')
    for keyno, text in read_jsonl_dataset(dataset_fp):
        vector = infer_doc_topics(text, lda_model)
        for topic_id, prob in vector:
            if prob >= 0.2:
                topic_counts.append(topic_id)
                line = '%d\t%s' % (topic_id, json.dumps({'text': text, 'keyno': keyno}, ensure_ascii=False)) + '\n'
                fo.write(line)
        counter += 1
        print('\r%d..' % counter, end='')
    print()
    fo.close()
    for tid, c in Counter(topic_counts).most_common():
        word_ids = [r[0] for r in lda_model.get_topic_terms(tid)]
        words = [lda_model.id2word[wid] for wid in word_ids]
        print('%d\t%d\t%s' % (tid, c, ','.join(words)))
