import pandas as pd
import jieba
import re
from sklearn.feature_extraction.text import TfidfVectorizer
from wordcloud import WordCloud
import matplotlib.pyplot as plt
import os
plt.rcParams['font.sans-serif'] = ['SimHei']  # 黑体

# 1. 加载数据
data = pd.read_csv('elderly_product_reviews.csv')

# 2. 筛选负面评论
negative_reviews = data[(data['rating'] <= 3) & (data['comment'].notna())].copy()

# 3. 定义中文停用词集（避免文件依赖）
chinese_stopwords = {
    '的', '了', '和', '是', '在', '我', '你', '他', '这', '那', '就',
    '要', '也', '都', '不', '没有', '很', '可以', '会', '有', '好',
    '啊', '呀', '哦', '嗯', '啦', '吧', '吗', '呢', '着', '过'
}


# 4. 增强分词函数
def enhanced_word_cut(text):
    text = str(text)
    # 统一处理中英文标点
    text = re.sub(r'[^\w\u4e00-\u9fff，。！？、]', ' ', text)

    # 提取问题描述部分
    patterns = [
        r'(?:有待改进|问题严重|不满意|不好用|设计不合理|质量差)[，,]*(.*?)[，。！；]',
        r'(?:希望|建议)[：:](.*?)[，。！；]',
        r'(?:但是|不过|然而)(.*?)[，。！；]'
    ]

    issues = []
    for pattern in patterns:
        issues.extend(re.findall(pattern, text))

    if not issues:  # 如果模式匹配失败，使用整个文本
        issues = [text]

    # 分词并过滤
    words = []
    for issue in issues:
        words.extend([word for word in jieba.cut(issue)
                      if (word not in chinese_stopwords) and (len(word) > 1)])

    return ' '.join(words) if words else None


# 应用分词
negative_reviews['cut_comment'] = negative_reviews['comment'].apply(enhanced_word_cut)

# 5. 过滤空结果并检查数据
negative_reviews = negative_reviews[negative_reviews['cut_comment'].notna()]

if len(negative_reviews) == 0:
    print("警告: 没有可分析的负面评论数据! 请检查:")
    print("1. 原始数据中是否有rating<=3的评论")
    print("2. 评论内容是否包含中文文本")
else:
    print(f"准备分析 {len(negative_reviews)} 条负面评论")

    # 6. TF-IDF向量化
    tfidf = TfidfVectorizer(
        max_features=500,
        token_pattern=r'(?u)\b\w+\b',
        stop_words=list(chinese_stopwords)  # 确保与预处理一致
    )

    try:
        X = tfidf.fit_transform(negative_reviews['cut_comment'])
        print("成功提取特征词汇:", len(tfidf.vocabulary_))

        # 7. 生成词云
        all_text = ' '.join(negative_reviews['cut_comment'])
        wordcloud = WordCloud(
            font_path='msyh.ttc',
            width=800,
            height=600,
            background_color='white',
            max_words=50,
            stopwords=chinese_stopwords
        ).generate(all_text)

        plt.figure(figsize=(10, 8))
        plt.imshow(wordcloud, interpolation='bilinear')
        plt.axis('off')
        plt.title('老年产品用户痛点词云图')
        plt.show()

    except ValueError as e:
        print(f"错误: {e}")
        print("可能原因和解决方案:")
        print("1. 检查分词结果示例:", negative_reviews['cut_comment'].head(3).values)
        print("2. 尝试放宽过滤条件，如允许单字词")
        print("3. 检查原始评论是否包含有效中文内容")