import pandas as pd
from textblob import TextBlob
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.decomposition import LatentDirichletAllocation
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords

# 1. 读取CSV数据
data = pd.read_csv("new_data.csv",encoding='gbk')

# 2. 情感分析：对评论进行情感分析
def sentiment_analysis(comment):
    analysis = TextBlob(comment)
    polarity = analysis.sentiment.polarity
    if polarity > 0:
        return '积极'
    elif polarity < 0:
        return '消极'
    else:
        return '中立'

data['情感倾向'] = data['评论'].apply(sentiment_analysis)

# 3. 关键词提取：使用TF-IDF提取评论中的关键词
stop_words = stopwords.words('chinese')  # 使用中文停用词
vectorizer = TfidfVectorizer(stop_words=stop_words, max_features=10)  # 提取前10个重要关键词
X = vectorizer.fit_transform(data['评论'])
keywords = vectorizer.get_feature_names_out()

# 4. 主题建模：使用LDA进行主题建模
lda = LatentDirichletAllocation(n_components=3, random_state=42)
lda.fit(X)

# 打印每个主题的前10个关键词
for topic_idx, topic in enumerate(lda.components_):
    print(f"主题 {topic_idx}:")
    print(" ".join([keywords[i] for i in topic.argsort()[:-11:-1]]))

# 5. 用户画像构建：根据用户行为（点赞数、评论、回复量等）构建用户画像
def user_profile(row):
    engagement = row['点赞数'] + row['回复量']  # 计算用户的互动行为（点赞 + 回复量）
    sentiment = row['情感倾向']
    return {
        '互动指数': engagement,
        '情感倾向': sentiment,
        '关注点': '热门话题' if '全明星' in row['评论'] else '其他'
    }

data['用户画像'] = data.apply(user_profile, axis=1)

# 6. 输出分析结果
print("\n用户评论情感分析结果：")
print(data[['用户id', '评论', '情感倾向', '用户画像']])

# 7. 输出每个用户的情感分析和关键词
print("\n每个主题的前10个关键词：")
for topic_idx, topic in enumerate(lda.components_):
    print(f"主题 {topic_idx}:")
    print(" ".join([keywords[i] for i in topic.argsort()[:-11:-1]]))
