"""
打开文件，取question,分词，按照词频从高到低排序。
"""

import jieba
import os
import json


def sentence2words(sentence, cut_all=False):
    """
    分词（默认全量模式）

    :param sentence: 句子
    :param cut_all: 是否全量模式，默认True
    :return: 分词后的列表
    """
    split_list = jieba.cut(sentence, cut_all=cut_all)
    return split_list


def read_file(path):
    with open(path, 'r', encoding='utf8') as f:
        while True:
            line = f.readline()
            if not line:
                # 读取完了
                break

            # 去掉换行符
            if '\r\n' == line[-2:]:
                line = line[:-2]
            elif '\n' == line[-1:]:
                line = line[:-1]

            yield line


def analyze_doc(path):
    generator = read_file(path)

    word2count = {}
    word2idx_set = {}
    questions = []

    for i, line in enumerate(generator):
        #读取一行并解析
        xdict = json.loads(line)
        # 取question
        question = xdict['question']
        questions.append(question)
        # 分词
        words = sentence2words(question)
        for w in words:
            # 计数
            cnt = word2count.get(w, 0)
            cnt += 1
            word2count[w] = cnt

            # 索引
            idxs = word2idx_set.get(w, set())
            idxs.add(i)
            word2idx_set[w] = idxs

    # 按词频排序
    word_cnt_pairs = [(k, v) for k, v in word2count.items()]
    word_cnt_pairs = sorted(word_cnt_pairs, key=lambda x: x[1])[::-1]
    sorted_word_cnt_pairs = word_cnt_pairs

    return word2count, word2idx_set, sorted_word_cnt_pairs, questions


if __name__ == '__main__':

    def main():
        dir = os.path.dirname(os.path.abspath(__file__))
        path = os.path.join(dir, 'submit_v2_1.tmp.json')
        word2count, word2idx_set, sorted_word_cnt_pairs, questions = analyze_doc(path)

        top_k = 5
        print(f'出现最频繁的{top_k}个词及出现个数：')
        print(sorted_word_cnt_pairs[:top_k])

        print(f'出现最少的{top_k}个词及出现个数：')
        print(sorted_word_cnt_pairs[-top_k:])

        def get_source(word, top_k=3):
            print('--------')
            print(f'出现"{word}"的前{top_k}个句子：')
            idx_set = word2idx_set[word]
            idx_list = sorted(idx_set)[:top_k]
            tuples = [(idx, questions[idx], ) for idx in idx_list]  # 用索引找到句子
            print('\n'.join([f'{idx}: {question}' for idx, question in tuples]))

        for word, _ in sorted_word_cnt_pairs[:top_k] + sorted_word_cnt_pairs[-top_k:]:
            get_source(word)

    main()
