{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "dafa0301-0ad7-4d8e-a7a7-321349bd9503",
   "metadata": {},
   "outputs": [],
   "source": [
    "import requests\n",
    "import csv\n",
    "import random\n",
    "import time\n",
    "from selenium import webdriver\n",
    "from selenium.webdriver.chrome.options import Options\n",
    "from selenium.webdriver.chrome.service import Service"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "407bc78c-f7c0-49a9-a6a9-61cafd3d4046",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 地址选择\n",
    "chrome_binary_path = r\"C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe\"\n",
    "chromedriver_path = r\"E:\\Temp\\Program Files\\chromedriver-win64\\chromedriver-win64\\chromedriver.exe\" \n",
    "\n",
    "# 创建配置对象\n",
    "options = Options()\n",
    "options.binary_location = chrome_binary_path  # 指定浏览器路径\n",
    "\n",
    "# 创建Service对象并指定驱动路径\n",
    "service = Service(executable_path=chromedriver_path)\n",
    "\n",
    "# 启动浏览器\n",
    "driver = webdriver.Chrome(service=service, options=options)\n",
    "time.sleep(10)\n",
    "driver.get(\"https://weibo.com/1653603955/PpWAzwNrR\")\n",
    "driver.quit()\n",
    "\n",
    "# 设置请求头，需要替换Cookie和Referer\n",
    "headers = {\n",
    "    \"User-Agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/135.0.0.0 Safari/537.36 Edg/135.0.0.0\",\n",
    "    \"Cookie\": \"SCF=AviQ34ctD8AqL58pVLOa5IfmXB-q9KLQ8oq5jZFVSwLq7xsxtgcdZir0MC7fT4gg1SG1r_s720Lt1TBeJQhn6B8.; SUB=_2A25FEKHeDeRhGeFM61oV8y3OzjuIHXVmb7sWrDV8PUNbmtANLWH1kW9NQPl62XvBqDhgUA8TuxcSPS5fhzQP7gxE; SUBP=0033WrSXqPxfM725Ws9jqgMF55529P9D9W5huAzIK_YyMUOCZB127KRz5NHD95QNeo5RShe0eo-NWs4DqcjMi--NiK.Xi-2Ri--ciKnRi-zNS0z71hB0e0zfS7tt; ALF=02_1748786830; SINAGLOBAL=7842825163463.952.1746194834598; XSRF-TOKEN=ygYAhRK2GyEUStWgctdmT4VA; _s_tentry=www.weibo.com; Apache=1097507397759.2042.1746253831106; ULV=1746253831170:5:5:5:1097507397759.2042.1746253831106:1746236897153; webim_unReadCount=%7B%22time%22%3A1746256893881%2C%22dm_pub_total%22%3A1%2C%22chat_group_client%22%3A0%2C%22chat_group_notice%22%3A0%2C%22allcountNum%22%3A1%2C%22msgbox%22%3A0%7D; PC_TOKEN=a5ad3972af; WBPSESS=b6QVOSx6TcL3FVXi5xI1iTQe-_vMDUh3_VWkNd2kjWKg9hn8a-SzgzJ77raS42orlqP1xr8m6VKOBO9pYYlqF2VuqZjrkqP8OI12-0ehRfWigw7lFRZtMgPwI1ISX7vq6QeZL-bLkKGdw_Z0i-LgiA==\",\n",
    "    \"Referer\": \"https://weibo.com/1715351501/PpVmZ656o\"\n",
    "}\n",
    "url = \"https://weibo.com/ajax/statuses/buildComments?\"\n",
    "\n",
    "# 打开文件\n",
    "f = open(\"test2.csv\",\"a\",encoding=\"utf-8-sig\",newline=\"\")\n",
    "\n",
    "# 写入表头，根据实际情况修改列名\n",
    "writer = csv.writer(f)\n",
    "writer.writerow([\"comments\",\"created_at\",\"gender\",\"location\"])\n",
    "\n",
    "# 定义爬取二级评论的第一页的函数的参数\n",
    "def setFirstParams(id,max_id):\n",
    "    # 需要替换uid\n",
    "    \"\"\"\n",
    "    :param id: 一级评论的id\n",
    "    :param max_id: 一级评论的max_id\n",
    "    :return: 二级评论的参数\n",
    "    \"\"\"\n",
    "    params = {\n",
    "        \"is_reload\": \"1\",\n",
    "        \"id\":id,\n",
    "        \"is_show_bulletin\": \"2\",\n",
    "        \"is_mix\": \"1\",\n",
    "        \"fetch_level\": \"1\",\n",
    "        \"max_id\": max_id,\n",
    "        \"count\": \"20\",\n",
    "        \"uid\": \"1715351501\",\n",
    "        \"locale\": \"zh-CN\"\n",
    "    }\n",
    "    return params\n",
    "\n",
    "# 定义爬取二级评论的第一页的函数\n",
    "def crawl2(id, max_id):\n",
    "    \"\"\"\n",
    "    :param id: 一级评论的id\n",
    "    :param max_id: 一级评论的max_id\n",
    "    :return: 一级评论的id和max_id\n",
    "    \"\"\"\n",
    "    # 计数\n",
    "    i = 1\n",
    "    # 请求数据\n",
    "    response = requests.get(url=url,params=setFirstParams(id=id,max_id=max_id), headers=headers).json()\n",
    "    # 获取数据\n",
    "    data_list = response[\"data\"]\n",
    "    for data in data_list:\n",
    "        # 遍历data_list，获取每一条二级评论数据\n",
    "        comments = data[\"text_raw\"]\n",
    "        created_at = data[\"created_at\"]\n",
    "        gender = data[\"user\"][\"gender\"]\n",
    "        location = data[\"user\"][\"location\"] \n",
    "        # 写入文件\n",
    "        writer.writerow([comments,created_at,gender,location])\n",
    "        print(f\"本页第{i}条评论已爬取\")\n",
    "        i += 1\n",
    "        # 获取第一页二级评论的id和max_id\n",
    "        id = str(data[\"id\"])\n",
    "        max_id = \"max_id=\" + str(response[\"max_id\"])\n",
    "    # 当存在下一页时，递归调用\n",
    "    if response[\"max_id\"] != 0:\n",
    "        try:\n",
    "            time.sleep(random.randint(1,3))\n",
    "            # 使用crawl3函数爬取二级评论的下一页\n",
    "            crawl3(id,max_id)\n",
    "        except Exception as e:\n",
    "            print(e)\n",
    "    # 当不存在下一页时，返回第一页二级评论的id和max_id\n",
    "    return id, max_id\n",
    "\n",
    "# 定义爬取二级评论的下一页的函数的参数\n",
    "def setSecondParams(id, max_id):\n",
    "    \"\"\"\n",
    "    :param id: 二级评论的id\n",
    "    :param max_id: 二级评论的max_id\n",
    "    :return: 二级评论的参数\n",
    "    \"\"\"\n",
    "    params = {\n",
    "        \"flow\": \"1\",\n",
    "        \"is_reload\": \"1\",\n",
    "        \"id\": id,\n",
    "        \"is_show_bulletin\": \"2\",\n",
    "        \"is_mix\": \"1\",\n",
    "        \"fetch_level\": \"1\",\n",
    "        \"max_id\": max_id,\n",
    "        \"count\": \"20\",\n",
    "        \"uid\": \"1715351501\",\n",
    "        \"locale\": \"zh-CN\",\n",
    "    }\n",
    "    return params\n",
    "\n",
    "# 定义爬取二级评论的下一页的函数\n",
    "def crawl3(id, max_id):\n",
    "    \"\"\"\n",
    "    :param id: 二级评论的id\n",
    "    :param max_id: 二级评论的max_id\n",
    "    :return: 二级评论的id和max_id\n",
    "    \"\"\"\n",
    "    print(\"开始爬取二级评论的下一页!\")\n",
    "    # 请求数据\n",
    "    response = requests.get(url=url,params=setSecondParams(id=id,max_id=max_id), headers=headers).json()\n",
    "    # 遍历data_list，获取每二级评论数据\n",
    "    data_list = response[\"data\"]\n",
    "    for data in data_list:\n",
    "        # 获取数据\n",
    "        comments = data[\"text_raw\"]\n",
    "        created_at = data[\"created_at\"]\n",
    "        gender = data[\"user\"][\"gender\"]\n",
    "        location = data[\"user\"][\"location\"] \n",
    "        # 写入文件\n",
    "        writer.writerow([comments,created_at,gender,location])\n",
    "        # 获取下一页二级评论的id和max_id\n",
    "        id = str(data[\"id\"])\n",
    "        max_id = \"max_id=\" + str(response[\"max_id\"])\n",
    "\n",
    "    # 当存在下一页时，递归调用\n",
    "    if response[\"max_id\"] != 0:\n",
    "        try:\n",
    "            time.sleep(random.randint(1,3))\n",
    "            crawl3(id,max_id)\n",
    "        except Exception as e:\n",
    "            print(e)\n",
    "    return id, max_id\n",
    "\n",
    "# 定义爬取一级评论的函数，需要替换url并且将链接中的\"count=10\"替换为\"{next}\"\n",
    "def crawl(next = \"count=10\"):\n",
    "    \"\"\"\n",
    "    :param next: 一级评论的翻页参数，默认为count=10，此后的参数为max_id\n",
    "    :return: None\n",
    "    \"\"\"\n",
    "    # 页数计数\n",
    "    global page\n",
    "    try:\n",
    "        # 爬取一级评论\n",
    "        url = f\"https://weibo.com/ajax/statuses/buildComments?is_reload=1&id=5161787331449584&is_show_bulletin=2&is_mix=0&max_id=139570841121107&{next}&uid=1715351501&fetch_level=0&locale=zh-CN\"\n",
    "        response = requests.get(url=url, headers=headers).json()\n",
    "        # 遍历data_list，获取每一条一级评论数据\n",
    "        data_list = response[\"data\"]\n",
    "        for data in data_list:\n",
    "            # 获取数据\n",
    "            comments = data[\"text_raw\"]\n",
    "            created_at = data[\"created_at\"]\n",
    "            gender = data[\"user\"][\"gender\"]\n",
    "            location = data[\"user\"][\"location\"] \n",
    "            # 写入文件\n",
    "            writer.writerow([comments,created_at,gender,location])\n",
    "            # 获取下一页一级评论的id和max_id\n",
    "            id = data[\"id\"]\n",
    "            max_id = \"max_id=\" + str(response[\"max_id\"])\n",
    "\n",
    "            # 如果data中的[total_number]不为0，表明存在二级评论，调用crawl2爬取第一页二级评论\n",
    "            if data[\"total_number\"] != 0:\n",
    "            # if len(data[\"comments\"]) != 0:\n",
    "                try:\n",
    "                    time.sleep(random.randint(1,3))\n",
    "                    crawl2(id,max_id)\n",
    "                except Exception as e:\n",
    "                    print(e)\n",
    "\n",
    "        print(f\"------第{page}页已爬取！-------\")\n",
    "        page += 1\n",
    "\n",
    "        # 当未爬取完所有评论时，递归调用\n",
    "        if response[\"max_id\"] != 0:\n",
    "            try:\n",
    "                time.sleep(random.randint(1,3))\n",
    "                crawl(max_id)\n",
    "            except Exception as e:\n",
    "                print(e)\n",
    "                f.close()\n",
    "        print(\"----爬取结束！-----\")\n",
    "        f.close()\n",
    "    except Exception as e:\n",
    "        print(e)\n",
    "        f.close()\n",
    "\n",
    "if __name__ == '__main__':\n",
    "    # 页数计数\n",
    "    page = 1\n",
    "    # 调用crawl函数\n",
    "    crawl()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "76afb157-f763-4b59-8b8d-3ebccea38adc",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/html": [
       "<div>\n",
       "<style scoped>\n",
       "    .dataframe tbody tr th:only-of-type {\n",
       "        vertical-align: middle;\n",
       "    }\n",
       "\n",
       "    .dataframe tbody tr th {\n",
       "        vertical-align: top;\n",
       "    }\n",
       "\n",
       "    .dataframe thead th {\n",
       "        text-align: right;\n",
       "    }\n",
       "</style>\n",
       "<table border=\"1\" class=\"dataframe\">\n",
       "  <thead>\n",
       "    <tr style=\"text-align: right;\">\n",
       "      <th></th>\n",
       "      <th>comments</th>\n",
       "      <th>created_at</th>\n",
       "    </tr>\n",
       "  </thead>\n",
       "  <tbody>\n",
       "    <tr>\n",
       "      <th>0</th>\n",
       "      <td>和青年演员王一博一起，唱响新时代“强国有我”的宣言，延续百年精神火种</td>\n",
       "      <td>Sat May 03 12:00:06 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>1</th>\n",
       "      <td>王一博💚💚💚</td>\n",
       "      <td>Sat May 03 12:00:50 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2</th>\n",
       "      <td>[打call]《青春赞歌-2025五四青年节特别节目》主题曲《青春探路者》由@UNIQ-王一...</td>\n",
       "      <td>Sat May 03 12:01:46 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>3</th>\n",
       "      <td>正能量青年演员王一博！[打call]</td>\n",
       "      <td>Sat May 03 12:01:15 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>4</th>\n",
       "      <td>一起来听《青春探路者》💚</td>\n",
       "      <td>Sat May 03 12:04:11 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>...</th>\n",
       "      <td>...</td>\n",
       "      <td>...</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2808</th>\n",
       "      <td>虎不虎 哈哈哈</td>\n",
       "      <td>Fri May 02 20:29:42 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2809</th>\n",
       "      <td>哇！！</td>\n",
       "      <td>Fri May 02 20:27:08 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2810</th>\n",
       "      <td>什么？幸芷蕾发微博了！[打call]</td>\n",
       "      <td>Fri May 02 20:24:35 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2811</th>\n",
       "      <td>啥？你说啥？我家辛芷蕾发微博了[憧憬]</td>\n",
       "      <td>Fri May 02 20:23:41 +0800 2025</td>\n",
       "    </tr>\n",
       "    <tr>\n",
       "      <th>2812</th>\n",
       "      <td>以为你忘记密码了，姐[允悲]</td>\n",
       "      <td>Fri May 02 20:21:49 +0800 2025</td>\n",
       "    </tr>\n",
       "  </tbody>\n",
       "</table>\n",
       "<p>2656 rows × 2 columns</p>\n",
       "</div>"
      ],
      "text/plain": [
       "                                               comments  \\\n",
       "0                    和青年演员王一博一起，唱响新时代“强国有我”的宣言，延续百年精神火种   \n",
       "1                                                王一博💚💚💚   \n",
       "2     [打call]《青春赞歌-2025五四青年节特别节目》主题曲《青春探路者》由@UNIQ-王一...   \n",
       "3                                    正能量青年演员王一博！[打call]   \n",
       "4                                          一起来听《青春探路者》💚   \n",
       "...                                                 ...   \n",
       "2808                                            虎不虎 哈哈哈   \n",
       "2809                                                哇！！   \n",
       "2810                                 什么？幸芷蕾发微博了！[打call]   \n",
       "2811                                啥？你说啥？我家辛芷蕾发微博了[憧憬]   \n",
       "2812                                     以为你忘记密码了，姐[允悲]   \n",
       "\n",
       "                          created_at  \n",
       "0     Sat May 03 12:00:06 +0800 2025  \n",
       "1     Sat May 03 12:00:50 +0800 2025  \n",
       "2     Sat May 03 12:01:46 +0800 2025  \n",
       "3     Sat May 03 12:01:15 +0800 2025  \n",
       "4     Sat May 03 12:04:11 +0800 2025  \n",
       "...                              ...  \n",
       "2808  Fri May 02 20:29:42 +0800 2025  \n",
       "2809  Fri May 02 20:27:08 +0800 2025  \n",
       "2810  Fri May 02 20:24:35 +0800 2025  \n",
       "2811  Fri May 02 20:23:41 +0800 2025  \n",
       "2812  Fri May 02 20:21:49 +0800 2025  \n",
       "\n",
       "[2656 rows x 2 columns]"
      ]
     },
     "execution_count": 2,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "##数据清洗\n",
    "import re\n",
    "import pandas as pd\n",
    "import emoji\n",
    "import collections\n",
    "from transformers import pipeline\n",
    "\n",
    "data = pd.read_csv('test2.csv')\n",
    "##剔除无关列\n",
    "data_drop = data.drop(columns=['gender', 'location'])\n",
    "data_drop = data_drop.dropna()\n",
    "data_drop = data_drop.drop_duplicates()\n",
    "data_drop"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "c4269316-38c0-45b8-9164-250aad99e539",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[提示] 未安装pandarallel，使用普通模式\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\Administrator\\AppData\\Local\\Temp\\ipykernel_5152\\240374640.py:86: UserWarning: Could not infer format, so each element will be parsed individually, falling back to `dateutil`. To ensure parsing is consistent and as-expected, please specify a format.\n",
      "  df['created_at'] = pd.to_datetime(df['created_at'], errors='coerce')\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "处理结果样例:\n",
      "                                                                    comments  \\\n",
      "created_at                                                                     \n",
      "2025-05-03 12:00:06+08:00                 和青年演员王一博一起，唱响新时代“强国有我”的宣言，延续百年精神火种   \n",
      "2025-05-03 12:00:50+08:00                                             王一博💚💚💚   \n",
      "2025-05-03 12:01:46+08:00  [打call]《青春赞歌-2025五四青年节特别节目》主题曲《青春探路者》由@UNIQ-王一...   \n",
      "2025-05-03 12:01:15+08:00                                 正能量青年演员王一博！[打call]   \n",
      "2025-05-03 12:04:11+08:00                                       一起来听《青春探路者》💚   \n",
      "\n",
      "                                                                      tokens  \n",
      "created_at                                                                    \n",
      "2025-05-03 12:00:06+08:00   [青年, 演员, 王一博, 唱响, 新, 时代, 强国, 宣言, 延续, 百年, 精神, 火种]  \n",
      "2025-05-03 12:00:50+08:00                                              [王一博]  \n",
      "2025-05-03 12:01:46+08:00  [青春, 赞歌, 五四, 青年节, 特别节目, 主题曲, 青春, 探路者, 由王, 一博, ...  \n",
      "2025-05-03 12:01:15+08:00                               [正, 能量, 青年, 演员, 王一博]  \n",
      "2025-05-03 12:04:11+08:00                                       [听, 青春, 探路者]  \n",
      "\n",
      "高频词TOP10:\n",
      "刘诗: 647\n",
      "诗: 647\n",
      "刘宇: 358\n",
      "东方: 303\n",
      "淮竹: 302\n",
      "淮水: 277\n",
      "竹亭: 277\n",
      "李昀锐: 229\n",
      "期待: 177\n",
      "宝宝: 144\n"
     ]
    }
   ],
   "source": [
    "import re\n",
    "import pandas as pd\n",
    "import jieba\n",
    "from datetime import datetime\n",
    "from collections import Counter\n",
    "# --------------------------\n",
    "# 预编译正则表达式（提升性能）\n",
    "# --------------------------\n",
    "emoji_pattern = re.compile(\n",
    "    \"[\"\n",
    "    u\"\\U0001F600-\\U0001F64F\"  # 表情符号\n",
    "    u\"\\U0001F300-\\U0001F5FF\"  # 符号和象形文字\n",
    "    u\"\\U0001F680-\\U0001F6FF\"  # 交通和地图符号\n",
    "    u\"\\u2600-\\u2B55\"          # 杂项符号\n",
    "    \"]+\", \n",
    "    flags=re.UNICODE\n",
    ")\n",
    "chinese_pattern = re.compile(r'[^\\u4e00-\\u9fa5]')  # 非中文字符\n",
    "whitespace_pattern = re.compile(r'\\s+')           # 空白字符\n",
    "\n",
    "# --------------------------\n",
    "# 停用词处理（带缓存和默认值）\n",
    "# --------------------------\n",
    "try:\n",
    "    with open('tycb.txt', 'r', encoding='utf-8') as f:\n",
    "        STOPWORDS = set(line.strip() for line in f)\n",
    "except FileNotFoundError:\n",
    "    print(\"[警告] 停用词文件未找到，使用默认停用词\")\n",
    "    STOPWORDS = {'的', '了', '是', '在', '和', '我', '你', '他', '这', '那'}\n",
    "\n",
    "# --------------------------\n",
    "# 核心处理函数\n",
    "# --------------------------\n",
    "def clean_text(text):\n",
    "    \"\"\"高效文本清洗管道\"\"\"\n",
    "    try:\n",
    "        text = str(text)\n",
    "        text = emoji_pattern.sub('', text)                  # 去除表情符号\n",
    "        text = chinese_pattern.sub('', text)                # 去除非中文\n",
    "        text = whitespace_pattern.sub('', text)            # 去除空白\n",
    "        return text.strip()\n",
    "    except Exception as e:\n",
    "        print(f\"文本清洗异常: {str(e)}\")\n",
    "        return ''\n",
    "\n",
    "def tokenize_pipeline(text):\n",
    "    \"\"\"分词处理管道\"\"\"\n",
    "    cleaned = clean_text(text)\n",
    "    if not cleaned:\n",
    "        return []\n",
    "    try:\n",
    "        words = list(jieba.cut(cleaned, cut_all=False))     # 精确模式分词\n",
    "        return [w for w in words if w and w not in STOPWORDS]\n",
    "    except Exception as e:\n",
    "        print(f\"分词异常: {str(e)}\")\n",
    "        return []\n",
    "\n",
    "# --------------------------\n",
    "# 主处理流程\n",
    "# --------------------------\n",
    "def process_data(input_df):\n",
    "    \"\"\"全流程数据处理\"\"\"\n",
    "    # 深拷贝防止修改原始数据\n",
    "    df = input_df.copy()\n",
    "    \n",
    "    # 列存在性检查\n",
    "    required_columns = {'comments', 'created_at'}\n",
    "    if not required_columns.issubset(df.columns):\n",
    "        missing = required_columns - set(df.columns)\n",
    "        raise KeyError(f\"缺失必要列: {missing}\")\n",
    "    \n",
    "    # 初始化并行处理\n",
    "    if pandarallel:\n",
    "        pandarallel.initialize(progress_bar=True, verbose=0)\n",
    "        apply_func = lambda col, fn: col.parallel_apply(fn)\n",
    "    else:\n",
    "        print(\"[提示] 未安装pandarallel，使用普通模式\")\n",
    "        apply_func = lambda col, fn: col.apply(fn)\n",
    "    \n",
    "    # 执行清洗和分词\n",
    "    df['cleaned'] = apply_func(df['comments'], clean_text)\n",
    "    df['tokens'] = apply_func(df['cleaned'], tokenize_pipeline)\n",
    "    \n",
    "    # 时间索引处理（带容错）\n",
    "    try:\n",
    "        df['created_at'] = pd.to_datetime(df['created_at'], errors='coerce')\n",
    "        df.set_index('created_at', inplace=True)\n",
    "    except Exception as e:\n",
    "        print(f\"时间处理异常: {str(e)}\")\n",
    "    \n",
    "    # 最终列验证\n",
    "    assert 'tokens' in df.columns, \"tokens列未生成！\"\n",
    "    \n",
    "    return df\n",
    "\n",
    "# --------------------------\n",
    "# 执行与输出\n",
    "# --------------------------\n",
    "if __name__ == \"__main__\":\n",
    "    # 示例数据（实际应从文件读取）\n",
    "    sample_data = data_drop\n",
    "    try:\n",
    "        # 执行处理流程\n",
    "        processed_df = process_data(sample_data)\n",
    "        \n",
    "        # 打印结果（带列存在性检查）\n",
    "        if {'comments', 'tokens'}.issubset(processed_df.columns):\n",
    "            print(\"\\n处理结果样例:\")\n",
    "            print(processed_df[['comments', 'tokens']].head())\n",
    "        else:\n",
    "            print(\"输出列缺失，请检查处理流程！\")\n",
    "        \n",
    "        # 词频统计（使用生成器节省内存）\n",
    "        word_counts = Counter(\n",
    "            word \n",
    "            for tokens in processed_df['tokens'] \n",
    "            for word in tokens\n",
    "        )\n",
    "        print(\"\\n高频词TOP10:\")\n",
    "        for word, count in word_counts.most_common(10):\n",
    "            print(f\"{word}: {count}\")\n",
    "            \n",
    "        # 保存结果（保留索引）\n",
    "        processed_df.to_csv('cleaned_data.csv', \n",
    "                          index=True, \n",
    "                          encoding='utf-8-sig')\n",
    "    except Exception as e:\n",
    "        print(f\"处理流程失败: {str(e)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5b310867-4cbd-4d83-a5d9-62575115cf3b",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 24,
   "id": "6fc39c71-78db-4bb9-b989-347f2f63fca8",
   "metadata": {},
   "outputs": [],
   "source": [
    "import re\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "import nltk\n",
    "from nltk.tokenize import RegexpTokenizer\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from gensim.models import Word2Vec\n",
    "from transformers import BertTokenizer, BertModel\n",
    "import spacy\n",
    "import random\n",
    "from spacy.tokens import Doc\n",
    "import ast"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "a42a00fe-9a95-465e-9154-18eebfbd13ed",
   "metadata": {},
   "outputs": [],
   "source": [
    "class DataPreprocessor:\n",
    "    \"\"\"数据预处理管道（使用nltk方法）\"\"\"\n",
    "    \n",
    "    def __init__(self, stopwords_path='tycb.txt'):\n",
    "        # 初始化分词器\n",
    "        self.chinese_tokenizer = RegexpTokenizer(r'[\\u4e00-\\u9fa5]+') \n",
    "        \n",
    "        # 加载停用词\n",
    "        try:\n",
    "            with open(stopwords_path, 'r', encoding='utf-8') as f:\n",
    "                self.stopwords = set([line.strip() for line in f])\n",
    "        except FileNotFoundError:\n",
    "            self.stopwords = set()\n",
    "\n",
    "    def clean_text(self, text):\n",
    "        \"\"\"文本清洗流程\"\"\"\n",
    "        # 去除表情符号\n",
    "        emoji_pattern = re.compile(\"[\"\n",
    "            u\"\\U0001F600-\\U0001F64F\"  \n",
    "            u\"\\U0001F300-\\U0001F5FF\"  \n",
    "            u\"\\U0001F680-\\U0001F6FF\"  \n",
    "            u\"\\u2600-\\u2B55]+\", flags=re.UNICODE)\n",
    "        text = emoji_pattern.sub('', str(text))\n",
    "        \n",
    "        # 去除非中文字符和空白\n",
    "        text = re.sub(r'[^\\u4e00-\\u9fa5]', '', text)\n",
    "        text = re.sub(r'\\s+', '', text)\n",
    "        return text.strip()\n",
    "\n",
    "    def tokenize(self, text):\n",
    "        \"\"\"使用nltk进行中文分词\"\"\"\n",
    "        return self.chinese_tokenizer.tokenize(text)\n",
    "\n",
    "    def remove_stopwords(self, tokens):\n",
    "        \"\"\"去除停用词\"\"\"\n",
    "        return [word for word in tokens if word not in self.stopwords]"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7c232536-d437-403f-9004-22884e3dcdf4",
   "metadata": {},
   "outputs": [],
   "source": [
    "##文本表示\n",
    "class TextRepresentation:\n",
    "    \"\"\"三种文本表示方法实现\"\"\"\n",
    "    \n",
    "    @staticmethod\n",
    "    def tfidf_representation(corpus, max_features=5000):\n",
    "        \"\"\"TF-IDF向量化\"\"\"\n",
    "        vectorizer = TfidfVectorizer(max_features=max_features)\n",
    "        return vectorizer.fit_transform(corpus)\n",
    "    \n",
    "    @staticmethod\n",
    "    def word2vec_representation(tokenized_docs, vector_size=300, window=5):\n",
    "        \"\"\"Word2Vec词向量平均\"\"\"\n",
    "        model = Word2Vec(\n",
    "            sentences=tokenized_docs,\n",
    "            vector_size=vector_size,\n",
    "            window=window,\n",
    "            min_count=2,\n",
    "            sg=1\n",
    "        )\n",
    "        \n",
    "        # 文档向量计算（词向量平均）\n",
    "        doc_vectors = []\n",
    "        for doc in tokenized_docs:\n",
    "            vectors = [model.wv[word] for word in doc if word in model.wv]\n",
    "            if vectors:\n",
    "                doc_vectors.append(np.mean(vectors, axis=0))\n",
    "            else:\n",
    "                doc_vectors.append(np.zeros(vector_size))\n",
    "        return np.array(doc_vectors)\n",
    "    \n",
    "    @staticmethod\n",
    "    def bert_representation(texts, model_name='bert-base-chinese'):\n",
    "        \"\"\"BERT句向量提取\"\"\"\n",
    "        tokenizer = BertTokenizer.from_pretrained(model_name)\n",
    "        model = BertModel.from_pretrained(model_name)\n",
    "        \n",
    "        # 生成CLS向量\n",
    "        inputs = tokenizer(texts, return_tensors='pt', padding=True, truncation=True)\n",
    "        with torch.no_grad():\n",
    "            outputs = model(**inputs)\n",
    "        return outputs.last_hidden_state[:, 0, :].numpy()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "50f51667-1436-4353-ada9-5dce94fdcd20",
   "metadata": {},
   "outputs": [],
   "source": [
    "class SyntaxEnhancedModel(torch.nn.Module):\n",
    "    \"\"\"句法增强的BERT模型（spaCy实现）\"\"\"\n",
    "    \n",
    "    def __init__(self, bert_model='bert-base-chinese', syntax_dim=64):\n",
    "        super().__init__()\n",
    "        # 加载spaCy中文模型\n",
    "        self.nlp = spacy.load(\"zh_core_web_sm\")\n",
    "        \n",
    "        # 基础组件\n",
    "        self.bert = BertModel.from_pretrained(bert_model)\n",
    "        self.bert_tokenizer = BertTokenizer.from_pretrained(bert_model)\n",
    "        \n",
    "        # 特征融合层\n",
    "        self.lstm = torch.nn.LSTM(\n",
    "            input_size=768 + syntax_dim,\n",
    "            hidden_size=256,\n",
    "            bidirectional=True,\n",
    "            batch_first=True\n",
    "        )\n",
    "        self.crf = torch.nn.Linear(512, 2)\n",
    "\n",
    "    def extract_syntax_features(self, text):\n",
    "        \"\"\"spaCy句法特征提取\"\"\"\n",
    "        doc = self.nlp(text)\n",
    "        features = []\n",
    "        for token in doc:\n",
    "            features.extend([\n",
    "                token.head.i,        # 依存头索引\n",
    "                len(token.dep_),     # 依存关系类型长度\n",
    "                token.i              # 当前词位置\n",
    "            ])\n",
    "        # 特征标准化处理\n",
    "        features = features[:64]    # 保持与原始维度一致\n",
    "        features += [0]*(64-len(features))  # 填充不足部分\n",
    "        return torch.tensor(features, dtype=torch.float32)\n",
    "\n",
    "    def forward(self, input_texts):\n",
    "        # BERT语义特征\n",
    "        inputs = self.bert_tokenizer(\n",
    "            input_texts, \n",
    "            return_tensors='pt', \n",
    "            padding=True,\n",
    "            truncation=True\n",
    "        )\n",
    "        bert_out = self.bert(**inputs).last_hidden_state\n",
    "        \n",
    "        # 句法特征\n",
    "        syntax_feats = torch.stack(\n",
    "            [self.extract_syntax_features(text) for text in input_texts]\n",
    "        )\n",
    "        \n",
    "        # 特征融合（适配维度）\n",
    "        syntax_feats = syntax_feats.unsqueeze(1).expand(\n",
    "            -1, bert_out.size(1), -1\n",
    "        )\n",
    "        combined = torch.cat([bert_out, syntax_feats], dim=-1)\n",
    "        \n",
    "        # 双向LSTM\n",
    "        lstm_out, _ = self.lstm(combined)\n",
    "        \n",
    "        # CRF层\n",
    "        return self.crf(lstm_out)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "52e2a076-38ce-412c-8f67-b9778d38fb4f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "初始化创新模型...\n",
      "模型输出维度：torch.Size([200, 117, 2])\n",
      "TF-IDF矩阵：(2656, 1903)\n",
      "Word2Vec向量：(2656, 300)\n",
      "BERT向量：(2656, 768)\n"
     ]
    }
   ],
   "source": [
    "if __name__ == \"__main__\":\n",
    "    # 数据准备\n",
    "    data = pd.read_csv('cleaned_data.csv')\n",
    "    texts = data['cleaned']\n",
    "    \n",
    "    # 初始化预处理器\n",
    "    preprocessor = DataPreprocessor()\n",
    "    \n",
    "    # 数据预处理\n",
    "    cleaned_texts = [preprocessor.clean_text(t) for t in texts]\n",
    "    tokenized_docs = [preprocessor.tokenize(t) for t in cleaned_texts]\n",
    "    filtered_docs = [preprocessor.remove_stopwords(doc) for doc in tokenized_docs]\n",
    "    sampled_data = random.sample(cleaned_texts, 200)\n",
    "    # 文本表示对比\n",
    "    tfidf_matrix = TextRepresentation.tfidf_representation(cleaned_texts)\n",
    "    w2v_vectors = TextRepresentation.word2vec_representation(filtered_docs)\n",
    "    bert_vectors = TextRepresentation.bert_representation(cleaned_texts)\n",
    "    \n",
    "    # 创新模型使用示例\n",
    "    print(\"\\n初始化创新模型...\")\n",
    "    model = SyntaxEnhancedModel()\n",
    "    sample_text = cleaned_texts\n",
    "    output = model(sampled_data)\n",
    "    print(f\"模型输出维度：{output.shape}\")  \n",
    "    print(f\"TF-IDF矩阵：{tfidf_matrix.shape}\")\n",
    "    print(f\"Word2Vec向量：{w2v_vectors.shape}\")\n",
    "    print(f\"BERT向量：{bert_vectors.shape}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "417c143c-5a63-4447-a4e9-4ebb30c02f8f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "import re\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import torch\n",
    "import spacy\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "from gensim.corpora import Dictionary\n",
    "from gensim.models import LdaModel\n",
    "import pyLDAvis.gensim_models\n",
    "from statsmodels.tsa.seasonal import STL\n",
    "from transformers import BertTokenizer, BertForSequenceClassification\n",
    "from nltk.tokenize import RegexpTokenizer\n",
    "from datetime import datetime\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from ltp import LTP\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows系统自带字体\n",
    "plt.rcParams['axes.unicode_minus'] = False   "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "dc153190-8618-4e24-b84a-ee1d42359d03",
   "metadata": {},
   "outputs": [],
   "source": [
    "# ----------------- 情感分析模块 ----------------\n",
    "class AspectAnalyzer:\n",
    "    \"\"\"细粒度情感分析模块\"\"\"\n",
    "    def __init__(self):\n",
    "        self.nlp = spacy.load(\"zh_core_web_sm\")\n",
    "        self.tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "        self.model = BertForSequenceClassification.from_pretrained(\n",
    "            'bert-base-chinese', \n",
    "            num_labels=3,\n",
    "            output_attentions=True\n",
    "        )\n",
    "    def visualize_dependencies(self, text):\n",
    "        \"\"\"依存关系可视化\"\"\"\n",
    "        return spacy.displacy.render(self.nlp(text), style='dep', page=True)\n",
    "\n",
    "    def analyze_sentiment(self, text):\n",
    "        \"\"\"情感分析\"\"\"\n",
    "        inputs = self.tokenizer(text, return_tensors='pt')\n",
    "        outputs = self.model(**inputs)\n",
    "        self._plot_attention(outputs.attentions, inputs['input_ids'][0])\n",
    "        return torch.softmax(outputs.logits, dim=1).detach().numpy()\n",
    "\n",
    "    def _plot_attention(self, attentions, input_ids):\n",
    "        \"\"\"绘制注意力热力图\"\"\"\n",
    "        tokens = self.tokenizer.convert_ids_to_tokens(input_ids)\n",
    "        plt.figure(figsize=(15, 5))\n",
    "        sns.heatmap(\n",
    "            attentions[-1].mean(dim=1)[0].detach().numpy(),\n",
    "            xticklabels=tokens,\n",
    "            cmap=\"YlGnBu\",\n",
    "            annot=True,\n",
    "            fmt=\".2f\"\n",
    "        )\n",
    "        plt.xticks(rotation=45, ha='right')\n",
    "        plt.title(\"Attention Weights\")\n",
    "        plt.tight_layout()\n",
    "        plt.savefig('attention_heatmap.png', dpi=300)\n",
    "        plt.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "24256169-a7ba-422b-b18b-8cac66558eee",
   "metadata": {},
   "outputs": [],
   "source": [
    "# ----------------- 主题演化模块（已修复）-------------\n",
    "class TopicEvolution:\n",
    "    \"\"\"动态主题演化分析（时间格式修复版）\"\"\"\n",
    "    def __init__(self, num_periods=4):\n",
    "        self.num_periods = num_periods\n",
    "        self.dictionary = None\n",
    "        self.models = {}\n",
    "        self.corpus = {}\n",
    "        self.time_labels = []\n",
    "        self.num_topics = 0  # 显式初始化\n",
    "        \n",
    "    def fit(self, docs, timestamps, num_topics=5):\n",
    "        \"\"\"时间处理逻辑修复\"\"\"\n",
    "        self.num_topics = num_topics  # 确保属性存在\n",
    "        \n",
    "        try:\n",
    "            # 强化时间解析（中文兼容）\n",
    "            time_series = pd.to_datetime(\n",
    "                timestamps,\n",
    "                format='mixed' if any('年' in str(t) for t in timestamps) else None,\n",
    "                errors='coerce'\n",
    "            )\n",
    "            valid_mask = time_series.notna()\n",
    "            \n",
    "            if valid_mask.sum() < 10:\n",
    "                print(\"警告：时间数据不足，改为伪时序分析\")\n",
    "                time_series = pd.date_range(\n",
    "                    start='2023-01-01',\n",
    "                    periods=len(docs),\n",
    "                    freq='D'\n",
    "                )\n",
    "            else:\n",
    "                time_series = time_series[valid_mask]\n",
    "                docs = [d for d, m in zip(docs, valid_mask) if m]\n",
    "            \n",
    "            # 修复分箱逻辑：转换时间戳为数值型\n",
    "            time_numeric = time_series.view('int64')  # 使用实际时间值代替rank\n",
    "            self.time_labels = pd.qcut(\n",
    "                time_numeric,\n",
    "                self.num_periods,\n",
    "                labels=[f\"Phase_{i+1}\" for i in range(self.num_periods)]\n",
    "            ).astype(str)\n",
    "            \n",
    "            # 构建词典和模型\n",
    "            self.dictionary = Dictionary(docs)\n",
    "            for phase in np.unique(self.time_labels):\n",
    "                phase_docs = [docs[i] for i, label in enumerate(self.time_labels) if label == phase]\n",
    "                self.corpus[phase] = [self.dictionary.doc2bow(d) for d in phase_docs]\n",
    "                self.models[phase] = LdaModel(\n",
    "                    corpus=self.corpus[phase],\n",
    "                    num_topics=num_topics,\n",
    "                    id2word=self.dictionary,\n",
    "                    passes=15,\n",
    "                    random_state=42\n",
    "                )\n",
    "                \n",
    "        except Exception as e:\n",
    "            print(f\"时间处理失败: {str(e)}\")\n",
    "            self._fallback_fit(docs, num_topics)\n",
    "\n",
    "    def _fallback_fit(self, docs, num_topics):\n",
    "        \"\"\"降级模式修复\"\"\"\n",
    "        self.num_topics = num_topics  # 关键修复：传递参数\n",
    "        print(\"进入降级模式：统一时段分析\")\n",
    "        self.time_labels = ['Full_Period']\n",
    "        self.dictionary = Dictionary(docs)\n",
    "        self.corpus['Full_Period'] = [self.dictionary.doc2bow(d) for d in docs]\n",
    "        self.models['Full_Period'] = LdaModel(\n",
    "            corpus=self.corpus['Full_Period'],\n",
    "            num_topics=num_topics,\n",
    "            id2word=self.dictionary,\n",
    "            passes=15,\n",
    "            random_state=42\n",
    "        )\n",
    "        \n",
    "    def visualize_evolution(self):\n",
    "        \"\"\"可视化主题强度变化（修复维度匹配问题）\"\"\"\n",
    "        # 正确初始化DataFrame结构\n",
    "        strength = pd.DataFrame(\n",
    "            index=range(self.num_topics),  # 行：主题编号\n",
    "            columns=self.time_labels,       # 列：时间阶段\n",
    "            dtype=float\n",
    "        )\n",
    "        \n",
    "        # 计算主题强度\n",
    "        for phase in self.time_labels:\n",
    "            model = self.models[phase]\n",
    "            topic_dist = np.zeros(self.num_topics)\n",
    "            \n",
    "            for bow in self.corpus[phase]:\n",
    "                topics = model.get_document_topics(bow)\n",
    "                for t, p in topics:\n",
    "                    topic_dist[t] += p\n",
    "            \n",
    "            if len(self.corpus[phase]) > 0:\n",
    "                strength[phase] = topic_dist / len(self.corpus[phase])\n",
    "            else:\n",
    "                strength[phase] = 0\n",
    "        \n",
    "        # 转置数据以便绘图\n",
    "        strength = strength.T  # 行：时间阶段，列：主题\n",
    "        \n",
    "        # 绘制趋势图\n",
    "        plt.figure(figsize=(12, 6))\n",
    "        for topic in strength.columns:\n",
    "            plt.plot(\n",
    "                strength.index,\n",
    "                strength[topic],\n",
    "                marker='o',\n",
    "                linestyle='--',\n",
    "                label=f'Topic {topic+1}'\n",
    "            )\n",
    "        \n",
    "        plt.xticks(rotation=45)\n",
    "        plt.xlabel(\"Time Period\")\n",
    "        plt.ylabel(\"Normalized Strength\")\n",
    "        plt.title(\"Topic Evolution\")\n",
    "        plt.legend(bbox_to_anchor=(1.05, 1))\n",
    "        plt.tight_layout()\n",
    "        plt.savefig('topic_evolution.png', dpi=300)\n",
    "        plt.close()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 20,
   "id": "f1d8d107-4f2e-4924-a57f-8d2106ab35a4",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "                     created_at  \\\n",
      "0     2025-05-03 12:00:06+08:00   \n",
      "1     2025-05-03 12:00:50+08:00   \n",
      "2     2025-05-03 12:01:46+08:00   \n",
      "3     2025-05-03 12:01:15+08:00   \n",
      "4     2025-05-03 12:04:11+08:00   \n",
      "...                         ...   \n",
      "2651  2025-05-02 20:29:42+08:00   \n",
      "2652  2025-05-02 20:27:08+08:00   \n",
      "2653  2025-05-02 20:24:35+08:00   \n",
      "2654  2025-05-02 20:23:41+08:00   \n",
      "2655  2025-05-02 20:21:49+08:00   \n",
      "\n",
      "                                               comments  \\\n",
      "0                    和青年演员王一博一起，唱响新时代“强国有我”的宣言，延续百年精神火种   \n",
      "1                                                王一博💚💚💚   \n",
      "2     [打call]《青春赞歌-2025五四青年节特别节目》主题曲《青春探路者》由@UNIQ-王一...   \n",
      "3                                    正能量青年演员王一博！[打call]   \n",
      "4                                          一起来听《青春探路者》💚   \n",
      "...                                                 ...   \n",
      "2651                                            虎不虎 哈哈哈   \n",
      "2652                                                哇！！   \n",
      "2653                                 什么？幸芷蕾发微博了！[打call]   \n",
      "2654                                啥？你说啥？我家辛芷蕾发微博了[憧憬]   \n",
      "2655                                     以为你忘记密码了，姐[允悲]   \n",
      "\n",
      "                                cleaned  \\\n",
      "0        和青年演员王一博一起唱响新时代强国有我的宣言延续百年精神火种   \n",
      "1                                   王一博   \n",
      "2     打青春赞歌五四青年节特别节目主题曲青春探路者由王一博热血献唱送花花   \n",
      "3                           正能量青年演员王一博打   \n",
      "4                             一起来听青春探路者   \n",
      "...                                 ...   \n",
      "2651                             虎不虎哈哈哈   \n",
      "2652                                  哇   \n",
      "2653                         什么幸芷蕾发微博了打   \n",
      "2654                    啥你说啥我家辛芷蕾发微博了憧憬   \n",
      "2655                        以为你忘记密码了姐允悲   \n",
      "\n",
      "                                                 tokens  \n",
      "0     ['青年', '演员', '王一博', '唱响', '新', '时代', '强国', '宣言...  \n",
      "1                                               ['王一博']  \n",
      "2     ['青春', '赞歌', '五四', '青年节', '特别节目', '主题曲', '青春',...  \n",
      "3                        ['正', '能量', '青年', '演员', '王一博']  \n",
      "4                                    ['听', '青春', '探路者']  \n",
      "...                                                 ...  \n",
      "2651                                     ['虎不虎', '哈哈哈']  \n",
      "2652                                                 []  \n",
      "2653                                     ['幸芷蕾', '发微博']  \n",
      "2654                    ['说', '我家', '辛芷蕾', '发微博', '憧憬']  \n",
      "2655                                ['忘记', '密码', '姐允悲']  \n",
      "\n",
      "[2656 rows x 4 columns]\n"
     ]
    }
   ],
   "source": [
    "data = pd.read_csv('cleaned_data.csv')\n",
    "print(data)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 25,
   "id": "aaf22e41-8386-40ee-b9df-3f95f6624f2b",
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Some weights of BertForSequenceClassification were not initialized from the model checkpoint at bert-base-chinese and are newly initialized: ['classifier.bias', 'classifier.weight']\n",
      "You should probably TRAIN this model on a down-stream task to be able to use it for predictions and inference.\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "情感分布: [0.53477174 0.24306567 0.22216254]\n"
     ]
    }
   ],
   "source": [
    "# ----------------- 主程序 ----------------\n",
    "if __name__ == \"__main__\":\n",
    "    try:\n",
    "        # 数据准备\n",
    "        data = pd.read_csv('cleaned_data.csv')\n",
    "        data['cleaned'] = data['cleaned'].fillna('').astype(str)  # 填充空值\n",
    "        texts = data['cleaned'].sample(n=1000, random_state=42, replace=False).reset_index(drop=True) \n",
    "        processed_docstexts = [doc.split() for doc in texts if doc.strip() != \"\"]\n",
    "        data['tokens'] = data['tokens'].apply(ast.literal_eval)\n",
    "     # 情感分析示例\n",
    "        analyzer = AspectAnalyzer()\n",
    "        sample_idx = 0\n",
    "        print(\"情感分布:\", analyzer.analyze_sentiment(texts.iloc[sample_idx])[0])\n",
    "        \n",
    "        # 主题演化分析\n",
    "        topic_model = TopicEvolution(num_periods=1)  # 因时间范围较小设为1个时段\n",
    "        topic_model.fit(data['tokens'],data['created_at'])\n",
    "        topic_model.visualize_evolution()\n",
    "        \n",
    "        # 生成LDA可视化\n",
    "        vis_data = pyLDAvis.gensim_models.prepare(\n",
    "            topic_model.models[topic_model.time_labels[0]],\n",
    "            topic_model.corpus[topic_model.time_labels[0]],\n",
    "            topic_model.dictionary\n",
    "        )\n",
    "        pyLDAvis.save_html(vis_data, 'lda_visualization.html')\n",
    "\n",
    "    except Exception as e:\n",
    "        import traceback\n",
    "        print(f\"运行错误: {str(e)}\\n{traceback.format_exc()}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fd8ba9e0-1099-444b-8f95-c2cf47b0825f",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "24f02a16-de6f-4e2c-92b1-9d23166589da",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.11.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
