# 本文件用于执行词频分析（按词性分类 + N-gram短语）
# 功能：
#   1. 提取动词前50项、名词前50项
#   2. 提取2-gram短语前10项
#   3. 提取3-gram短语前10项
# 返回内容说明（JSON格式）：
#   - verbs: 动词词频（前50）
#       - key: 动词 (str)
#       - value: 出现次数 (int)
#   - nouns: 名词词频（前50）
#       - key: 名词 (str)
#       - value: 出现次数 (int)
#   - bigrams: 2-gram短语（前10）
#       - key: 二元短语 (str)
#       - value: 出现次数 (int)
#   - trigrams: 3-gram短语（前10）
#       - key: 三元短语 (str)
#       - value: 出现次数 (int)
import json
from collections import Counter
import jieba.posseg as pseg
from data_load import load_data,load_stopwords
import jieba


def generate_ngrams(words, n):
    """生成n-gram短语列表"""
    return ["".join(words[i:i + n]) for i in range(len(words) - n + 1)]


def get_word_analysis(data):
    """生成词频分析结果（词性分类 + N-gram短语）"""
    texts = [item['text'] for item in data.values()]
    stopwords = load_stopwords()

    # 初始化计数器
    verb_counter = Counter()
    noun_counter = Counter()
    bigram_counter = Counter()
    trigram_counter = Counter()

    # 遍历所有文本
    for text in texts:
        # 分词和词性标注
        words = [word for word, _ in pseg.cut(text)
                 if word not in stopwords and len(word) > 1]

        # 词性统计（动词/名词）
        for word, flag in pseg.cut(text):
            if word in stopwords or len(word) <= 1:
                continue
            if flag.startswith('v'):
                verb_counter[word] += 1
            elif flag.startswith('n'):
                noun_counter[word] += 1

        # 2-gram 短语统计
        if len(words) >= 2:
            bigrams = generate_ngrams(words, 2)
            bigram_counter.update(bigrams)

        # 3-gram 短语统计
        if len(words) >= 3:
            trigrams = generate_ngrams(words, 3)
            trigram_counter.update(trigrams)
    # 分词处理
    all_words = []
    for text in texts:
        words = jieba.lcut(text)
        all_words.extend([word for word in words if word not in stopwords and len(word) > 1])

    # 词频统计表前30
    word_freq = Counter(all_words)
    top_30 = dict(word_freq.most_common(30))

    return {
        "verbs": dict(verb_counter.most_common(50)),
        "nouns": dict(noun_counter.most_common(50)),
        "bigrams": dict(bigram_counter.most_common(10)),
        "trigrams": dict(trigram_counter.most_common(10)),
        "top_30": top_30
    }


if __name__ == "__main__":

    data = load_data()
    analysis = get_word_analysis(data)
    print(json.dumps(analysis, indent=2, ensure_ascii=False))