{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2e174045",
   "metadata": {},
   "outputs": [],
   "source": [
    "# -*- coding: utf-8 -*-\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from transformers import BertTokenizer, BertModel\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import re\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import classification_report, confusion_matrix\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "from tqdm import tqdm\n",
    "\n",
    "# 设置中文显示\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']\n",
    "plt.rcParams['axes.unicode_minus'] = False\n",
    "\n",
    "\n",
    "# ========== BERT特征提取器 ==========\n",
    "class BertFeatureExtractor:\n",
    "    def __init__(self):\n",
    "        self.tokenizer = BertTokenizer.from_pretrained('bert-base-chinese')\n",
    "        self.model = BertModel.from_pretrained('bert-base-chinese')\n",
    "        self.model.eval()\n",
    "\n",
    "    def extract_features(self, texts, batch_size=32):\n",
    "        features = []\n",
    "        for i in tqdm(range(0, len(texts)), desc=\"提取BERT特征\"):\n",
    "            batch = texts[i:i + batch_size]\n",
    "            inputs = self.tokenizer(\n",
    "                batch,\n",
    "                padding=True,\n",
    "                truncation=True,\n",
    "                max_length=128,\n",
    "                return_tensors=\"pt\"\n",
    "            )\n",
    "            with torch.no_grad():\n",
    "                outputs = self.model(**inputs)\n",
    "            features.append(outputs.last_hidden_state.mean(dim=1))  # 每个 batch 处理完后加入列表\n",
    "\n",
    "        all_features = torch.cat(features, dim=0)  # ✅ 所有 batch 合并成一个完整的 tensor\n",
    "        return all_features.cpu().numpy()\n",
    "\n",
    "\n",
    "# ========== 多模态数据集类 ==========\n",
    "class FusionDataset(Dataset):\n",
    "    def __init__(self, bert_features, temporal_sequences, labels):\n",
    "        self.bert_features = torch.FloatTensor(bert_features)\n",
    "        self.temporal_sequences = torch.FloatTensor(temporal_sequences)\n",
    "        self.labels = torch.LongTensor(labels)\n",
    "\n",
    "    def __len__(self):\n",
    "        return len(self.labels)\n",
    "\n",
    "    def __getitem__(self, idx):\n",
    "        return {\n",
    "            'bert': self.bert_features[idx],\n",
    "            'temporal': self.temporal_sequences[idx],\n",
    "            'label': self.labels[idx]\n",
    "        }\n",
    "\n",
    "\n",
    "# ========== 融合模型架构 ==========\n",
    "class FusionClassifier(nn.Module):\n",
    "    def __init__(self, bert_dim=768, temporal_input_size=3, hidden_dim=128):\n",
    "        super().__init__()\n",
    "        # BERT分支\n",
    "        self.bert_fc = nn.Sequential(\n",
    "            nn.Linear(bert_dim, 256),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(0.2)\n",
    "        )\n",
    "\n",
    "        # 时序分支\n",
    "        self.temporal_lstm = nn.LSTM(\n",
    "            input_size=temporal_input_size,\n",
    "            hidden_size=64,\n",
    "            num_layers=2,\n",
    "            batch_first=True,\n",
    "            dropout=0.3\n",
    "        )\n",
    "\n",
    "        # 融合分类器\n",
    "        self.fusion = nn.Sequential(\n",
    "            nn.Linear(256 + 64, hidden_dim),\n",
    "            nn.ReLU(),\n",
    "            nn.Dropout(0.3),\n",
    "            nn.Linear(hidden_dim, 2)\n",
    "        )\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        # BERT特征处理\n",
    "        bert_out = self.bert_fc(inputs['bert'])\n",
    "\n",
    "        # 时序特征处理\n",
    "        temporal_out, _ = self.temporal_lstm(inputs['temporal'])\n",
    "        temporal_out = temporal_out[:, -1, :]  # 取最后时间步\n",
    "\n",
    "        # 特征融合\n",
    "        fused = torch.cat([bert_out, temporal_out], dim=1)\n",
    "        return self.fusion(fused)\n",
    "\n",
    "\n",
    "# ========== 改进版数据预处理 ==========\n",
    "def prepare_fusion_data(window_size=5, test_size=0.3):\n",
    "    # 读取数据\n",
    "    df = pd.read_csv(r'D:\\BaiduNetdiskDownload\\时空稳定分析_20250507_1224.csv', parse_dates=['time'])\n",
    "    df.sort_values('time', inplace=True)\n",
    "\n",
    "    # 特征工程\n",
    "    df['hour'] = df['time'].dt.hour\n",
    "    df['has_link'] = df['text'].str.contains('http').astype(int)\n",
    "    df['amount'] = df['text'].apply(lambda x: int(re.findall(r'差(\\d+)', x)[0]) if re.findall(r'差(\\d+)', x) else 0)\n",
    "\n",
    "    # 文本特征处理\n",
    "    texts = []\n",
    "    temporal_features = []\n",
    "    labels = []\n",
    "\n",
    "    # 构建时序窗口\n",
    "    scaler = StandardScaler()\n",
    "    for i in range(len(df) - window_size):\n",
    "        # 获取窗口数据\n",
    "        window = df.iloc[i:i + window_size]\n",
    "\n",
    "        # 收集文本（使用窗口最后一个文本）\n",
    "        texts.append(window.iloc[-1]['text'])\n",
    "\n",
    "        # 构建时序特征\n",
    "        temporal = window[['hour', 'has_link', 'amount']].values\n",
    "        scaled_temporal = scaler.fit_transform(temporal)\n",
    "        temporal_features.append(scaled_temporal)\n",
    "\n",
    "        # 获取标签（使用窗口最后一个情感标签）\n",
    "        labels.append(1 if window.iloc[-1]['sentiment'] == '正面' else 0)\n",
    "\n",
    "    # 数据集划分\n",
    "    X_train, X_test, text_train, text_test, y_train, y_test = train_test_split(\n",
    "        np.array(temporal_features),\n",
    "        texts,\n",
    "        np.array(labels),\n",
    "        test_size=test_size,\n",
    "        shuffle=False\n",
    "    )\n",
    "\n",
    "    return X_train, X_test, text_train, text_test, y_train, y_test\n",
    "\n",
    "\n",
    "# ========== 增强版训练函数 ==========\n",
    "def train_fusion_model(model, train_loader, test_loader, epochs=100):\n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001, weight_decay=1e-4)\n",
    "\n",
    "    train_losses, test_losses = [], []\n",
    "    train_accs, test_accs = [], []\n",
    "    best_acc = 0.0\n",
    "\n",
    "    for epoch in range(epochs):\n",
    "        # 训练阶段\n",
    "        model.train()\n",
    "        total_loss = 0\n",
    "        correct = 0\n",
    "        for batch in tqdm(train_loader, desc=f\"训练 Epoch {epoch + 1}\"):\n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(batch)\n",
    "            loss = criterion(outputs, batch['label'])\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "\n",
    "            total_loss += loss.item()\n",
    "            preds = outputs.argmax(dim=1)\n",
    "            correct += (preds == batch['label']).sum().item()\n",
    "\n",
    "        train_loss = total_loss / len(train_loader)\n",
    "        train_acc = correct / len(train_loader.dataset)\n",
    "\n",
    "        # 验证阶段\n",
    "        model.eval()\n",
    "        test_loss = 0\n",
    "        test_correct = 0\n",
    "        with torch.no_grad():\n",
    "            for batch in test_loader:\n",
    "                outputs = model(batch)\n",
    "                test_loss += criterion(outputs, batch['label']).item()\n",
    "                preds = outputs.argmax(dim=1)\n",
    "                test_correct += (preds == batch['label']).sum().item()\n",
    "\n",
    "        test_loss /= len(test_loader)\n",
    "        test_acc = test_correct / len(test_loader.dataset)\n",
    "\n",
    "        # 记录指标\n",
    "        train_losses.append(train_loss)\n",
    "        train_accs.append(train_acc)\n",
    "        test_losses.append(test_loss)\n",
    "        test_accs.append(test_acc)\n",
    "\n",
    "        # 保存最佳模型\n",
    "        if test_acc > best_acc:\n",
    "            best_acc = test_acc\n",
    "            torch.save(model.state_dict(), 'best_fusion_model.pth')\n",
    "\n",
    "        print(f\"Epoch {epoch + 1}/{epochs} | \"\n",
    "              f\"Train Loss: {train_loss:.4f} | Train Acc: {train_acc:.2%} | \"\n",
    "              f\"Test Loss: {test_loss:.4f} | Test Acc: {test_acc:.2%}\")\n",
    "\n",
    "    # 可视化训练过程\n",
    "    plt.figure(figsize=(15, 5))\n",
    "    plt.subplot(1, 2, 1)\n",
    "    plt.plot(train_losses, label='训练损失')\n",
    "    plt.plot(test_losses, label='测试损失')\n",
    "    plt.title('损失曲线')\n",
    "    plt.legend()\n",
    "\n",
    "    plt.subplot(1, 2, 2)\n",
    "    plt.plot(train_accs, label='训练准确率')\n",
    "    plt.plot(test_accs, label='测试准确率')\n",
    "    plt.title('准确率曲线')\n",
    "    plt.legend()\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "    return model\n",
    "\n",
    "\n",
    "# ========== 主程序 ==========\n",
    "if __name__ == \"__main__\":\n",
    "    # 准备多模态数据\n",
    "    X_train, X_test, text_train, text_test, y_train, y_test = prepare_fusion_data(window_size=6)\n",
    "\n",
    "    # 提取BERT特征\n",
    "    bert_extractor = BertFeatureExtractor()\n",
    "    bert_train = bert_extractor.extract_features(text_train)\n",
    "    bert_test = bert_extractor.extract_features(text_test)\n",
    "\n",
    "    # 创建数据集\n",
    "    train_dataset = FusionDataset(bert_train, X_train, y_train)\n",
    "    test_dataset = FusionDataset(bert_test, X_test, y_test)\n",
    "\n",
    "    # 创建数据加载器\n",
    "    train_loader = DataLoader(train_dataset, batch_size=32, shuffle=True)\n",
    "    test_loader = DataLoader(test_dataset, batch_size=32)\n",
    "\n",
    "    # 初始化模型\n",
    "    model = FusionClassifier(\n",
    "        temporal_input_size=X_train.shape[-1]\n",
    "    )\n",
    "\n",
    "    # 训练模型\n",
    "    trained_model = train_fusion_model(model, train_loader, test_loader, epochs=50)\n",
    "\n",
    "    # 最终评估\n",
    "    trained_model.eval()\n",
    "    all_preds = []\n",
    "    with torch.no_grad():\n",
    "        for batch in test_loader:\n",
    "            outputs = trained_model(batch)\n",
    "            preds = outputs.argmax(dim=1)\n",
    "            all_preds.extend(preds.cpu().numpy())\n",
    "\n",
    "    print(\"\\n分类报告：\")\n",
    "    print(classification_report(y_test, all_preds, target_names=['负面', '正面']))\n",
    "\n",
    "    # 混淆矩阵\n",
    "    cm = confusion_matrix(y_test, all_preds)\n",
    "    plt.figure(figsize=(6, 6))\n",
    "    plt.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n",
    "    plt.title('混淆矩阵')\n",
    "    plt.colorbar()\n",
    "    plt.xticks([0, 1], ['负面', '正面'])\n",
    "    plt.yticks([0, 1], ['负面', '正面'])\n",
    "    for i in range(2):\n",
    "        for j in range(2):\n",
    "            plt.text(j, i, f\"{cm[i, j]}\",\n",
    "                     ha=\"center\", va=\"center\",\n",
    "                     color=\"white\" if cm[i, j] > cm.max() / 2 else \"black\")\n",
    "    plt.show()\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "pytorch",
   "language": "python",
   "name": "env_name"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.21"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
