import pandas as pd
import jieba
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import os
from collections import Counter

from utils.config import Config

config = Config('E:/Python+AI/group4_nlp_project')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei']
plt.rcParams['axes.unicode_minus'] = False

# 定义类别映射
CATEGORY_MAPPING = {
    0: "书籍",
    1: "平板",
    2: "手机",
    3: "水果",
    4: "洗发水",
    5: "衣服",
    6: "酒店",
    7: "计算机",
    8: "蒙牛",
    9: "热水器"
}

def load_stopwords():
    """加载停用词"""
    stopwords_path = 'E:/Python+AI/group4_nlp_project/data/origin_data/stopwords.txt'
    if not os.path.exists(stopwords_path):
        print("警告: 停用词文件不存在")
        return set()

    with open(stopwords_path, 'r', encoding='utf-8') as f:
        stopwords = set([line.strip() for line in f.readlines() if line.strip()])

    # 添加一些通用的高频词
    common_words = {'没有', '不错', '很好', '可以', '感觉', '东西', '价格', '质量', '非常', '比较', '一般', '还是', '就是', '应该', '可能', '不会', '不能', '不要', '不要钱', '不会用', '不好用', '不好看', '不好吃', '不好喝', '不新鲜', '不耐用'}
    stopwords.update(common_words)

    return stopwords

def load_predictions():
    """加载预测结果"""
    predictions_df = pd.read_csv('data/predictions_with_text.csv')
    return predictions_df

def count_words_in_texts(texts, stopwords):
    """统计文本中的词频"""
    # 合并所有文本
    all_text = ' '.join([str(text) for text in texts if pd.notna(text)])

    # 使用jieba进行中文分词
    words = jieba.lcut(all_text)

    # 过滤停用词和长度小于2的词
    filtered_words = [word for word in words if word not in stopwords and len(word) > 1]

    # 统计词频
    word_freq = Counter(filtered_words)

    return word_freq

def get_category_specific_words(category_texts, all_texts, stopwords, top_n=100):
    """获取特定类别中更有代表性的词汇"""
    # 统计当前类别的词频
    category_word_freq = count_words_in_texts(category_texts, stopwords)

    # 统计所有文本的词频
    all_word_freq = count_words_in_texts(all_texts, stopwords)

    # 计算TF-IDF类似的权重，突出类别特有的词
    specific_words = {}
    total_categories = len(CATEGORY_MAPPING)

    for word, freq in category_word_freq.items():
        # 计算词在当前类别中的频率
        category_freq = freq

        # 计算词在所有文本中的频率
        all_freq = all_word_freq.get(word, 0)

        # 如果词在当前类别中出现但在其他类别中较少出现，则认为是特色词
        if all_freq > 0:
            # 使用简单的权重计算方法：当前类别频率 / 总频率 * log(总类别数 / 包含该词的类别数估计)
            # 这里简化为当前频率的平方除以总频率来突出特色词
            weight = (category_freq ** 2) / all_freq
            specific_words[word] = int(freq * weight)

    # 返回权重最高的词
    sorted_words = sorted(specific_words.items(), key=lambda x: x[1], reverse=True)
    return dict(sorted_words[:top_n])

def generate_wordcloud_for_category(category_id, category_name, texts, label_type, stopwords, all_texts):
    """为特定类别和标签类型生成词云图"""
    # 获取该类别特有的词
    specific_words = get_category_specific_words(texts, all_texts, stopwords)

    if not specific_words:
        # 如果没有特有词，则使用普通词频统计
        specific_words = count_words_in_texts(texts, stopwords)

    if not specific_words:
        print(f"警告: 类别'{category_name}'的{label_type}评价中没有有效的词")
        return

    # 生成词云图
    try:
        wordcloud = WordCloud(
            font_path='C:/Windows/Fonts/simhei.ttf',
            width=800,
            height=600,
            background_color='white',
            max_words=200,
            colormap='viridis'
        ).generate_from_frequencies(specific_words)
    except:
        wordcloud = WordCloud(
            width=800,
            height=600,
            background_color='white',
            max_words=200,
            colormap='viridis'
        ).generate_from_frequencies(specific_words)

    # 显示和保存词云图
    plt.figure(figsize=(10, 8))
    plt.imshow(wordcloud, interpolation='bilinear')
    plt.axis('off')
    title = f'{category_name}{label_type}词云图'
    plt.title(title, fontsize=20)
    plt.tight_layout()

    # 保存文件
    save_path = f'data/{category_name}_{label_type}_词云图.png'
    plt.savefig(save_path, dpi=300, bbox_inches='tight')
    plt.show()
    print(f"已生成: {title}")

def generate_category_wordclouds():
    """为每个类别生成好评和差评词云图"""
    # 加载停用词
    stopwords = load_stopwords()

    # 加载预测结果
    predictions_df = load_predictions()

    # 获取所有文本用于对比分析
    all_texts = predictions_df['text'].tolist()

    # 为每个类别生成词云图
    for category_id, category_name in CATEGORY_MAPPING.items():
        # 筛选该类别的数据
        category_mask = predictions_df['predicted_category'] == category_id
        category_df = predictions_df[category_mask]

        if category_df.empty:
            print(f"跳过: 类别'{category_name}'没有预测数据")
            continue

        # 生成该类别的好评词云图
        positive_mask = category_df['predicted_label'] == 1
        positive_texts = category_df[positive_mask]['text'].tolist()

        if positive_texts:
            generate_wordcloud_for_category(category_id, category_name, positive_texts, "好评", stopwords, all_texts)
        else:
            print(f"跳过: 类别'{category_name}'没有预测为好评的数据")

        # 生成该类别的差评词云图
        negative_mask = category_df['predicted_label'] == 0
        negative_texts = category_df[negative_mask]['text'].tolist()

        if negative_texts:
            generate_wordcloud_for_category(category_id, category_name, negative_texts, "差评", stopwords, all_texts)
        else:
            print(f"跳过: 类别'{category_name}'没有预测为差评的数据")

if __name__ == '__main__':
    generate_category_wordclouds()
