import pandas as pd
from snownlp import SnowNLP
import jieba
from collections import Counter
df=pd.read_csv('new_data.csv',encoding='gbk')
# 假设数据存储在一个 DataFrame 中，这里先模拟数据
data = {
    'index': df['index'],
    '用户id': df['用户id'],
    '性别': df['性别'],
    '点赞数': df['点赞数'],
    '评论': df['评论'],
    '回复量': df['回复量'],
    '评论日期':df['评论日期'],
    '哪吒关键词出现次数': df['哪吒关键词出现次数']
}

df = pd.DataFrame(data)

# 情感分析函数
def sentiment_analysis(text):
    s = SnowNLP(text)
    return s.sentiments

# 关键词提取函数
def keyword_extraction(text, topK=5):
    words = jieba.lcut(text)
    word_counts = Counter(words)
    return [word for word, _ in word_counts.most_common(topK)]

# 对每条评论进行情感分析和关键词提取
df['情感得分'] = df['评论'].apply(sentiment_analysis)
df['关键词'] = df['评论'].apply(keyword_extraction)

# 提取所有评论的关键词并统计频率
all_keywords = [keyword for keywords_list in df['关键词'] for keyword in keywords_list]
keyword_counts = Counter(all_keywords)

# 找出热点话题（这里取出现频率最高的5个关键词）
hot_topics = keyword_counts.most_common(5)

# 分析用户情感倾向
positive_count = len(df[df['情感得分'] > 0.5])
negative_count = len(df[df['情感得分'] < 0.5])
neutral_count = len(df[(df['情感得分'] >= 0.5) & (df['情感得分'] <= 0.5)])

# 输出结果
print("每条评论的情感得分和关键词：")
print(df[['评论', '情感得分', '关键词']])
print("\n热点话题：")
print(hot_topics)
print("\n用户情感倾向：")
print(f"积极评论数量: {positive_count}")
print(f"消极评论数量: {negative_count}")
print(f"中立评论数量: {neutral_count}")