"""
生成词云图：预处理（分词、词频统计）
"""


import jieba
import nltk
import pandas as pd
from nltk import word_tokenize, FreqDist
# from jieba import analyse


novel_path = '../../notebook/ch-2.txt'
stopwords_path = '../../notebook/stop-words.txt'
user_dict_path = '../../notebook/user-dict.txt'

"""读取小说内容"""
with open(novel_path, 'r', encoding='utf-8') as f:
    ch2 = f.read()

# print(sentence[:500])

"""分词"""
# 加载自定义词典
jieba.load_userdict(user_dict_path)

# 导入停用词词典
# analyse.set_stop_words(stopwords_path)
# words = analyse.extract_tags(ch2)

words = jieba.lcut(ch2)
# print(words[:20])


"""词频统计"""

# 手动统计
def manual_count(words: list) -> dict[str, int]:
    frequency = {}
    for word in words:
        frequency[word] = frequency.get(word, 0) + 1
    return frequency

# print(manual_count(words))

# pandas统计
def pd_count(words: list) -> pd.Series:
    # 创建数据框
    df = pd.DataFrame(words, columns=['word'])
    # 统计各个词的出现次数
    result = df.groupby(['word']).size()
    # 降序排列
    frequency = result.sort_values(ascending=False)
    return frequency

# print(pd_count(words))

# nltk统计
def nltk_count(words: list):
    freq = FreqDist(words)
    return freq.most_common(10)

print(nltk_count(words))