# 计算tfidf
import math
import os

import jieba

from collections import defaultdict


def build_tf_idf_dict(corpus: list(list())):
    tf_dict = defaultdict(dict)
    idf_dict = defaultdict(set)
    for index,text in enumerate(corpus):
        for word in text:
            if word not in tf_dict[index]:
                tf_dict[index][word] = 0
            tf_dict[index][word] += 1
            idf_dict[word].add(index)
    idf_dict = dict([(key, len(value)) for key, value in idf_dict.items()])
    return tf_dict, idf_dict

def calculate_tf_idf(tf_dict, idf_dict):
    tfidf = defaultdict(dict)
    for index,word_and_count in tf_dict.items():
        text_count = sum(tf_dict[index].values())
        for word,count in word_and_count.items():
            tf = count/text_count
            tfidf[index][word] = tf * math.log(len(tf_dict) / (idf_dict[word] + 1))
    return tfidf

def calculate_tfidf(corpus):
    #先进行分词
    corpus = [jieba.cut(text) for text in corpus]
    tf_dict, idf_dict = build_tf_idf_dict(corpus)
    tf_idf_dict = calculate_tf_idf(tf_dict, idf_dict)
    return tf_idf_dict

#根据tfidf字典，显示每个领域topK的关键词
def tf_idf_topk(tfidf_dict, paths=[], top=10, print_word=True):
    topk_dict = {}
    for text_index, text_tfidf_dict in tfidf_dict.items():
        word_list = sorted(text_tfidf_dict.items(), key=lambda x:x[1], reverse=True)
        topk_dict[text_index] = word_list[:top]
        if print_word:
            print(text_index, paths[text_index])
            for i in range(top):
                print(word_list[i])
            print("----------")
    return topk_dict

def main():
    dir_path = r"category_corpus/"
    corpus = []
    paths = []
    for path in os.listdir(dir_path):
        path = os.path.join(dir_path, path)
        if path.endswith("txt"):
            corpus.append(open(path, encoding="utf8").read())
            paths.append(os.path.basename(path))
    tf_idf_dict = calculate_tfidf(corpus)
    tf_idf_topk(tf_idf_dict, paths)

if __name__ == "__main__":
    main()