{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "781a84b5-9903-4eb5-9a2d-3b345702dfc7",
   "metadata": {},
   "outputs": [
    {
     "ename": "LookupError",
     "evalue": "\n**********************************************************************\n  Resource \u001b[93mvader_lexicon\u001b[0m not found.\n  Please use the NLTK Downloader to obtain the resource:\n\n  \u001b[31m>>> import nltk\n  >>> nltk.download('vader_lexicon')\n  \u001b[0m\n  For more information see: https://www.nltk.org/data.html\n\n  Attempted to load \u001b[93msentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon.txt\u001b[0m\n\n  Searched in:\n    - 'C:\\\\Users\\\\17828/nltk_data'\n    - 'C:\\\\ProgramData\\\\anaconda3\\\\nltk_data'\n    - 'C:\\\\ProgramData\\\\anaconda3\\\\share\\\\nltk_data'\n    - 'C:\\\\ProgramData\\\\anaconda3\\\\lib\\\\nltk_data'\n    - 'C:\\\\Users\\\\17828\\\\AppData\\\\Roaming\\\\nltk_data'\n    - 'C:\\\\nltk_data'\n    - 'D:\\\\nltk_data'\n    - 'E:\\\\nltk_data'\n    - ''\n**********************************************************************\n",
     "output_type": "error",
     "traceback": [
      "\u001b[1;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[1;31mLookupError\u001b[0m                               Traceback (most recent call last)",
      "Cell \u001b[1;32mIn[5], line 12\u001b[0m\n\u001b[0;32m      9\u001b[0m data[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtext\u001b[39m\u001b[38;5;124m'\u001b[39m] \u001b[38;5;241m=\u001b[39m data[\u001b[38;5;124m'\u001b[39m\u001b[38;5;124mtext\u001b[39m\u001b[38;5;124m'\u001b[39m]\u001b[38;5;241m.\u001b[39mastype(\u001b[38;5;28mstr\u001b[39m)\n\u001b[0;32m     11\u001b[0m \u001b[38;5;66;03m# 初始化VADER情感分析器\u001b[39;00m\n\u001b[1;32m---> 12\u001b[0m sid \u001b[38;5;241m=\u001b[39m SentimentIntensityAnalyzer()\n\u001b[0;32m     14\u001b[0m \u001b[38;5;66;03m# 定义情感标注函数\u001b[39;00m\n\u001b[0;32m     15\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21mget_sentiment\u001b[39m(text):\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\nltk\\sentiment\\vader.py:340\u001b[0m, in \u001b[0;36mSentimentIntensityAnalyzer.__init__\u001b[1;34m(self, lexicon_file)\u001b[0m\n\u001b[0;32m    336\u001b[0m \u001b[38;5;28;01mdef\u001b[39;00m \u001b[38;5;21m__init__\u001b[39m(\n\u001b[0;32m    337\u001b[0m     \u001b[38;5;28mself\u001b[39m,\n\u001b[0;32m    338\u001b[0m     lexicon_file\u001b[38;5;241m=\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124msentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon.txt\u001b[39m\u001b[38;5;124m\"\u001b[39m,\n\u001b[0;32m    339\u001b[0m ):\n\u001b[1;32m--> 340\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlexicon_file \u001b[38;5;241m=\u001b[39m nltk\u001b[38;5;241m.\u001b[39mdata\u001b[38;5;241m.\u001b[39mload(lexicon_file)\n\u001b[0;32m    341\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mlexicon \u001b[38;5;241m=\u001b[39m \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mmake_lex_dict()\n\u001b[0;32m    342\u001b[0m     \u001b[38;5;28mself\u001b[39m\u001b[38;5;241m.\u001b[39mconstants \u001b[38;5;241m=\u001b[39m VaderConstants()\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\nltk\\data.py:836\u001b[0m, in \u001b[0;36mload\u001b[1;34m(resource_url, format, cache, verbose, logic_parser, fstruct_reader, encoding)\u001b[0m\n\u001b[0;32m    833\u001b[0m     \u001b[38;5;28mprint\u001b[39m(\u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m<<Loading \u001b[39m\u001b[38;5;132;01m{\u001b[39;00mresource_url\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;124m>>\u001b[39m\u001b[38;5;124m\"\u001b[39m)\n\u001b[0;32m    835\u001b[0m \u001b[38;5;66;03m# Load the resource.\u001b[39;00m\n\u001b[1;32m--> 836\u001b[0m opened_resource \u001b[38;5;241m=\u001b[39m _open(resource_url)\n\u001b[0;32m    838\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m \u001b[38;5;28mformat\u001b[39m \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mraw\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m    839\u001b[0m     resource_val \u001b[38;5;241m=\u001b[39m opened_resource\u001b[38;5;241m.\u001b[39mread()\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\nltk\\data.py:962\u001b[0m, in \u001b[0;36m_open\u001b[1;34m(resource_url)\u001b[0m\n\u001b[0;32m    959\u001b[0m protocol, path_ \u001b[38;5;241m=\u001b[39m split_resource_url(resource_url)\n\u001b[0;32m    961\u001b[0m \u001b[38;5;28;01mif\u001b[39;00m protocol \u001b[38;5;129;01mis\u001b[39;00m \u001b[38;5;28;01mNone\u001b[39;00m \u001b[38;5;129;01mor\u001b[39;00m protocol\u001b[38;5;241m.\u001b[39mlower() \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mnltk\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[1;32m--> 962\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m find(path_, path \u001b[38;5;241m+\u001b[39m [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m])\u001b[38;5;241m.\u001b[39mopen()\n\u001b[0;32m    963\u001b[0m \u001b[38;5;28;01melif\u001b[39;00m protocol\u001b[38;5;241m.\u001b[39mlower() \u001b[38;5;241m==\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124mfile\u001b[39m\u001b[38;5;124m\"\u001b[39m:\n\u001b[0;32m    964\u001b[0m     \u001b[38;5;66;03m# urllib might not use mode='rb', so handle this one ourselves:\u001b[39;00m\n\u001b[0;32m    965\u001b[0m     \u001b[38;5;28;01mreturn\u001b[39;00m find(path_, [\u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m\"\u001b[39m])\u001b[38;5;241m.\u001b[39mopen()\n",
      "File \u001b[1;32m~\\AppData\\Roaming\\Python\\Python312\\site-packages\\nltk\\data.py:579\u001b[0m, in \u001b[0;36mfind\u001b[1;34m(resource_name, paths)\u001b[0m\n\u001b[0;32m    577\u001b[0m sep \u001b[38;5;241m=\u001b[39m \u001b[38;5;124m\"\u001b[39m\u001b[38;5;124m*\u001b[39m\u001b[38;5;124m\"\u001b[39m \u001b[38;5;241m*\u001b[39m \u001b[38;5;241m70\u001b[39m\n\u001b[0;32m    578\u001b[0m resource_not_found \u001b[38;5;241m=\u001b[39m \u001b[38;5;124mf\u001b[39m\u001b[38;5;124m\"\u001b[39m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00msep\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00mmsg\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;132;01m{\u001b[39;00msep\u001b[38;5;132;01m}\u001b[39;00m\u001b[38;5;130;01m\\n\u001b[39;00m\u001b[38;5;124m\"\u001b[39m\n\u001b[1;32m--> 579\u001b[0m \u001b[38;5;28;01mraise\u001b[39;00m \u001b[38;5;167;01mLookupError\u001b[39;00m(resource_not_found)\n",
      "\u001b[1;31mLookupError\u001b[0m: \n**********************************************************************\n  Resource \u001b[93mvader_lexicon\u001b[0m not found.\n  Please use the NLTK Downloader to obtain the resource:\n\n  \u001b[31m>>> import nltk\n  >>> nltk.download('vader_lexicon')\n  \u001b[0m\n  For more information see: https://www.nltk.org/data.html\n\n  Attempted to load \u001b[93msentiment/vader_lexicon.zip/vader_lexicon/vader_lexicon.txt\u001b[0m\n\n  Searched in:\n    - 'C:\\\\Users\\\\17828/nltk_data'\n    - 'C:\\\\ProgramData\\\\anaconda3\\\\nltk_data'\n    - 'C:\\\\ProgramData\\\\anaconda3\\\\share\\\\nltk_data'\n    - 'C:\\\\ProgramData\\\\anaconda3\\\\lib\\\\nltk_data'\n    - 'C:\\\\Users\\\\17828\\\\AppData\\\\Roaming\\\\nltk_data'\n    - 'C:\\\\nltk_data'\n    - 'D:\\\\nltk_data'\n    - 'E:\\\\nltk_data'\n    - ''\n**********************************************************************\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "from snownlp import SnowNLP\n",
    "\n",
    "# 读取数据\n",
    "data = pd.read_csv('your_data.csv')  # 替换为你的数据文件路径\n",
    "\n",
    "# 确保文本列是字符串类型，并将缺失值替换为空字符串\n",
    "data['text'] = data['text'].fillna('')  # 替换缺失值为空字符串\n",
    "data['text'] = data['text'].astype(str)  # 确保所有文本都是字符串类型\n",
    "\n",
    "# 调试输出：检查文本数据\n",
    "print(\"前几条文本数据：\")\n",
    "print(data['text'].head())\n",
    "\n",
    "# 定义情感标注函数\n",
    "def get_sentiment(text):\n",
    "    # 调试输出：检查每条文本\n",
    "    if not text.strip():\n",
    "        print(f\"警告：文本为空或仅包含空格：'{text}'\")\n",
    "        return 'neutral'  # 或者返回其他默认值\n",
    "    try:\n",
    "        s = SnowNLP(text)\n",
    "        sentiment = s.sentiments\n",
    "        if sentiment > 0.7:  # 阈值可以根据需要调整\n",
    "            return 'positive'\n",
    "        elif sentiment < 0.3:\n",
    "            return 'negative'\n",
    "        else:\n",
    "            return 'neutral'\n",
    "    except Exception as e:\n",
    "        print(f\"错误：无法处理文本 '{text}'，错误信息：{e}\")\n",
    "        return 'neutral'  # 或者返回其他默认值\n",
    "\n",
    "# 应用情感标注函数到数据\n",
    "data['sentiment'] = data['text'].apply(get_sentiment)\n",
    "\n",
    "# 查看标注结果\n",
    "print(\"\\n标注后的数据：\")\n",
    "print(data[['text', 'sentiment']].head())\n",
    "\n",
    "# 保存标注后的数据\n",
    "data.to_csv('labeled_data.csv', index=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "85e2390e-4be6-45bf-a757-1b125a498d76",
   "metadata": {},
   "outputs": [],
   "source": [
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "from matplotlib import rcParams\n",
    "\n",
    "# 加载数据\n",
    "data = pd.read_csv('奶茶话题评论.csv')\n",
    "\n",
    "# 处理缺失值：将缺失的评论文本替换为空字符串\n",
    "data['text'] = data['text'].fillna('')\n",
    "\n",
    "# 定义积极和消极关键词列表\n",
    "positive_keywords = ['好喝', '喜欢', '爱喝', '不错', '推荐']\n",
    "negative_keywords = ['难喝', '不喜欢', '不喝', '糟糕', '失望']\n",
    "\n",
    "# 初始化情感计数\n",
    "data['积极词数'] = 0\n",
    "data['消极词数'] = 0\n",
    "\n",
    "# 遍历每条评论，统计关键词出现次数\n",
    "for index, row in data.iterrows():\n",
    "    for keyword in positive_keywords:\n",
    "        data.at[index, '积极词数'] += row['text'].count(keyword)\n",
    "    for keyword in negative_keywords:\n",
    "        data.at[index, '消极词数'] += row['text'].count(keyword)\n",
    "\n",
    "# 计算每条评论的情感倾向得分（简单相减）\n",
    "data['情感倾向得分'] = data['积极词数'] - data['消极词数']\n",
    "\n",
    "# 根据情感倾向得分划分标签\n",
    "# 定义阈值：可以根据数据分布选择合适的阈值\n",
    "# 这里简单使用零作为中性分界点，并设置一个范围来区分正面和负面\n",
    "# 例如：正面得分 > 0，中性得分 = 0，负面得分 < 0\n",
    "# 也可以使用其他方法，如分位数\n",
    "data['情感标签'] = '中性'  # 默认标签\n",
    "data.loc[data['情感倾向得分'] > 0, '情感标签'] = '正面'\n",
    "data.loc[data['情感倾向得分'] < 0, '情感标签'] = '负面'\n",
    "\n",
    "# 描述性统计情感倾向得分\n",
    "print(data['情感倾向得分'].describe())\n",
    "\n",
    "# 设置Matplotlib支持中文字体显示（以SimHei为例，需确保系统中安装了该字体）\n",
    "rcParams['font.sans-serif'] = ['SimHei']  # 用来正常显示中文标签\n",
    "rcParams['axes.unicode_minus'] = False  # 用来正常显示负号\n",
    "\n",
    "# 可视化情感倾向得分分布\n",
    "plt.figure(figsize=(12, 6))  # 调整图表大小\n",
    "sns.histplot(data['情感倾向得分'], bins=20, kde=True, color='skyblue')\n",
    "plt.title('情感倾向得分分布', fontsize=16, fontweight='bold')  # 设置标题，并调整字体大小和粗细\n",
    "plt.xlabel('情感倾向得分', fontsize=14)  # 设置x轴标签，并调整字体大小\n",
    "plt.ylabel('频率', fontsize=14)  # 设置y轴标签，并调整字体大小\n",
    "plt.xticks(fontsize=12)  # 设置x轴刻度标签字体大小\n",
    "plt.yticks(fontsize=12)  # 设置y轴刻度标签字体大小\n",
    "plt.grid(axis='y', linestyle='--', alpha=0.7)  # 添加y轴网格线，并设置样式和透明度\n",
    "plt.tight_layout()  # 自动调整子图参数，使之填充整个图像区域\n",
    "plt.show()\n",
    "\n",
    "# 查看标签分布\n",
    "print(data['情感标签'].value_counts())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c2d07b9b-f6b7-45d2-a5aa-93bea5dc49c4",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.4"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
