import pandas as pd
from nltk.sentiment import SentimentIntensityAnalyzer
import jieba
from nltk.corpus import stopwords
import nltk

nltk.download('stopwords')
# 读取Excel文件
file_path = '大众点评-万寿宫评价.xlsx'  # 替换为你的Excel文件路径
df = pd.read_excel(file_path)

# 初始化情感分析器
sid = SentimentIntensityAnalyzer()

# 中文分词并去除停用词
stop_words = set(stopwords.words('chinese'))

def tokenize_and_remove_stopwords(text):
    words = jieba.cut(str(text))
    filtered_words = [word for word in words if word not in stop_words]
    return " ".join(filtered_words)

# 添加一列来存储分词后的文本
df['分词后评论'] = df['评论'].apply(tokenize_and_remove_stopwords)

# 添加一列来存储VADER分数
df['VADER分数'] = df['分词后评论'].apply(lambda x: sid.polarity_scores(x)['compound'])

# 定义阈值来判断评论是正面、负面还是中性
vader_threshold = 0.1
df['VADER情感'] = df['VADER分数'].apply(lambda x: '正面' if x > vader_threshold else ('负面' if x < -vader_threshold else '中性'))

# 统计正面、负面和中性评论数量
vader_sentiment_counts = df['VADER情感'].value_counts()

# 输出VADER情感分析结果
print("VADER情感分析结果:")
print(vader_sentiment_counts)

# 输出结果到Excel文件
output_file_path = 'vader_sentiment_analysis_result.xlsx'
df.to_excel(output_file_path, index=False)

print(f"\nVADER情感分析结果已保存到 {output_file_path}")
