{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "# Yelp Review Full 数据集模型性能对比分析\n",
    "\n",
    "本笔记本将展示如何在YelpReviewFull数据集上训练和比较不同文本分类模型的性能表现。为了加快训练速度，我们将使用数据集的子集进行实验。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用设备: cuda\n"
     ]
    }
   ],
   "source": [
    "# 导入必要的库\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader, Dataset\n",
    "import pandas as pd\n",
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "import seaborn as sns\n",
    "import time\n",
    "import re\n",
    "import nltk\n",
    "from nltk.corpus import stopwords\n",
    "from nltk.tokenize import word_tokenize\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
    "from collections import Counter\n",
    "import math\n",
    "\n",
    "# 设置随机种子以确保可重复性\n",
    "torch.manual_seed(42)\n",
    "np.random.seed(42)\n",
    "\n",
    "# 设置绘图风格\n",
    "plt.style.use('seaborn-v0_8')\n",
    "sns.set_palette(\"husl\")\n",
    "\n",
    "# 检查是否有可用的GPU\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(f\"使用设备: {device}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 数据加载和预处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正在下载停用词...\n"
     ]
    }
   ],
   "source": [
    "# 下载NLTK资源\n",
    "print(\"正在下载停用词...\")\n",
    "nltk.download('stopwords')\n",
    "nltk.download('punkt')\n",
    "\n",
    "# 加载停用词\n",
    "stop_words = set(stopwords.words('english'))\n",
    "\n",
    "def preprocess_text(text):\n",
    "    \"\"\"文本预处理函数\"\"\"\n",
    "    # 转换为小写\n",
    "    text = text.lower()\n",
    "    # 移除特殊字符和数字\n",
    "    text = re.sub(r'[^a-zA-Z\\s]', '', text)\n",
    "    # 分词\n",
    "    tokens = word_tokenize(text)\n",
    "    # 移除停用词和短词\n",
    "    tokens = [word for word in tokens if word not in stop_words and len(word) > 2]\n",
    "    return ' '.join(tokens)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正在加载Yelp数据集...\n",
      "数据集大小: (50000, 2)\n",
      "类别分布:\n",
      "1    10000\n",
      "2    10000\n",
      "3    10000\n",
      "4    10000\n",
      "5    10000\n",
      "Name: label, dtype: int64\n"
     ]
    }
   ],
   "source": [
    "# 加载Yelp数据集（这里使用模拟数据，实际应用中应加载真实数据）\n",
    "print(\"正在加载Yelp数据集...\")\n",
    "\n",
    "# 创建模拟数据集（实际使用时请替换为真实数据加载代码）\n",
    "def load_yelp_data(sample_size=50000):  # 减少数据量到50,000条\n",
    "    # 在实际应用中，这里应该从文件或数据库加载数据\n",
    "    # 这里我们创建一个模拟数据集用于演示\n",
    "    np.random.seed(42)\n",
    "    \n",
    "    # 生成模拟评论\n",
    "    reviews = []\n",
    "    labels = []\n",
    "    \n",
    "    # 不同星级的模板评论\n",
    "    templates = {\n",
    "        1: [\"worst experience ever\", \"terrible service\", \"would not recommend\", \"awful food\", \"disappointing\"],\n",
    "        2: [\"below expectations\", \"not great\", \"could be better\", \"mediocre at best\", \"disappointing\"],\n",
    "        3: [\"average experience\", \"nothing special\", \"decent but not great\", \"okay\", \"mediocre\"],\n",
    "        4: [\"good experience\", \"enjoyed the food\", \"friendly staff\", \"would come back\", \"recommended\"],\n",
    "        5: [\"excellent service\", \"amazing food\", \"highly recommend\", \"best ever\", \"outstanding experience\"]\n",
    "    }\n",
    "    \n",
    "    for label in [1, 2, 3, 4, 5]:\n",
    "        for _ in range(sample_size // 5):\n",
    "            # 从模板中随机选择几个短语组合成评论\n",
    "            template = templates[label]\n",
    "            num_phrases = np.random.randint(3, 8)\n",
    "            review = ' '.join(np.random.choice(template, num_phrases))\n",
    "            \n",
    "            # 添加一些随机词汇使评论更自然\n",
    "            extra_words = ['the', 'and', 'was', 'were', 'very', 'quite', 'really', 'so']\n",
    "            for _ in range(np.random.randint(5, 15)):\n",
    "                pos = np.random.randint(0, len(review.split()))\n",
    "                review_words = review.split()\n",
    "                review_words.insert(pos, np.random.choice(extra_words))\n",
    "                review = ' '.join(review_words)\n",
    "            \n",
    "            reviews.append(review)\n",
    "            labels.append(label)\n",
    "    \n",
    "    return pd.DataFrame({'text': reviews, 'label': labels})\n",
    "\n",
    "# 加载数据（使用减少的数据量）\n",
    "df = load_yelp_data()\n",
    "print(f\"数据集大小: {df.shape}\")\n",
    "print(\"类别分布:\")\n",
    "print(df['label'].value_counts().sort_index())"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "正在预处理文本数据...\n",
      "预处理完成!\n",
      "训练集大小: (40000, 2)\n",
      "测试集大小: (10000, 2)\n"
     ]
    }
   ],
   "source": [
    "# 文本预处理\n",
    "print(\"正在预处理文本数据...\")\n",
    "df['cleaned_text'] = df['text'].apply(preprocess_text)\n",
    "print(\"预处理完成!\")\n",
    "\n",
    "# 划分训练集和测试集\n",
    "train_df, test_df = train_test_split(df, test_size=0.2, random_state=42, stratify=df['label'])\n",
    "print(f\"训练集大小: {train_df.shape}\")\n",
    "print(f\"测试集大小: {test_df.shape}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 构建词汇表和数据集类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "词汇表大小: 3000\n"
     ]
    }
   ],
   "source": [
    "# 构建词汇表\n",
    "def build_vocab(texts, max_vocab_size=3000):  # 减少词汇表大小\n",
    "    word_counts = Counter()\n",
    "    for text in texts:\n",
    "        words = text.split()\n",
    "        word_counts.update(words)\n",
    "    \n",
    "    # 选择最常见的词\n",
    "    vocab = {word: i+2 for i, (word, count) in enumerate(word_counts.most_common(max_vocab_size-2))}\n",
    "    vocab['<PAD>'] = 0\n",
    "    vocab['<UNK>'] = 1\n",
    "    \n",
    "    return vocab\n",
    "\n",
    "vocab = build_vocab(train_df['cleaned_text'])\n",
    "print(f\"词汇表大小: {len(vocab)}\")\n",
    "\n",
    "# 文本编码函数\n",
    "def text_to_sequence(text, vocab, max_length=100):  # 减少序列长度\n",
    "    words = text.split()\n",
    "    sequence = [vocab.get(word, vocab['<UNK>']) for word in words]\n",
    "    \n",
    "    # 填充或截断\n",
    "    if len(sequence) < max_length:\n",
    "        sequence = sequence + [vocab['<PAD>']] * (max_length - len(sequence))\n",
    "    else:\n",
    "        sequence = sequence[:max_length]\n",
    "    \n",
    "    return sequence"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 自定义数据集类\n",
    "class YelpDataset(Dataset):\n",
    "    def __init__(self, dataframe, vocab, max_length=100):\n",
    "        self.dataframe = dataframe\n",
    "        self.vocab = vocab\n",
    "        self.max_length = max_length\n",
    "        \n",
    "    def __len__(self):\n",
    "        return len(self.dataframe)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        text = self.dataframe.iloc[idx]['cleaned_text']\n",
    "        label = self.dataframe.iloc[idx]['label'] - 1  # 将1-5转换为0-4\n",
    "        \n",
    "        sequence = text_to_sequence(text, self.vocab, self.max_length)\n",
    "        \n",
    "        return torch.tensor(sequence, dtype=torch.long), torch.tensor(label, dtype=torch.long)\n",
    "\n",
    "# 创建数据集和数据加载器\n",
    "batch_size = 64\n",
    "train_dataset = YelpDataset(train_df, vocab)\n",
    "test_dataset = YelpDataset(test_df, vocab)\n",
    "\n",
    "train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True)\n",
    "test_loader = DataLoader(test_dataset, batch_size=batch_size, shuffle=False)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型定义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 基础LSTM模型\n",
    "class LSTMModel(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, hidden_dim, output_dim, n_layers, dropout):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\n",
    "        self.lstm = nn.LSTM(embedding_dim, hidden_dim, n_layers, \n",
    "                           batch_first=True, dropout=dropout, bidirectional=True)\n",
    "        self.fc = nn.Linear(hidden_dim * 2, output_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, text):\n",
    "        embedded = self.dropout(self.embedding(text))\n",
    "        output, (hidden, cell) = self.lstm(embedded)\n",
    "        \n",
    "        # 连接最后的前向和后向隐藏状态\n",
    "        hidden = self.dropout(torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1))\n",
    "        \n",
    "        return self.fc(hidden)\n",
    "\n",
    "# CNN文本分类模型\n",
    "class CNNTextModel(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, n_filters, filter_sizes, output_dim, dropout):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\n",
    "        \n",
    "        self.convs = nn.ModuleList([\n",
    "            nn.Conv2d(in_channels=1, out_channels=n_filters, kernel_size=(fs, embedding_dim))\n",
    "            for fs in filter_sizes\n",
    "        ])\n",
    "        \n",
    "        self.fc = nn.Linear(len(filter_sizes) * n_filters, output_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, text):\n",
    "        embedded = self.embedding(text).unsqueeze(1)\n",
    "        \n",
    "        conved = [torch.relu(conv(embedded)).squeeze(3) for conv in self.convs]\n",
    "        pooled = [torch.max(conv, dim=2)[0] for conv in conved]\n",
    "        \n",
    "        cat = self.dropout(torch.cat(pooled, dim=1))\n",
    "        \n",
    "        return self.fc(cat)\n",
    "\n",
    "# Transformer模型（简化版）\n",
    "class TransformerModel(nn.Module):\n",
    "    def __init__(self, vocab_size, embedding_dim, nhead, nhid, nlayers, output_dim, dropout=0.5):\n",
    "        super().__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embedding_dim, padding_idx=0)\n",
    "        self.pos_encoder = nn.Embedding(100, embedding_dim)  # 简单的位置编码\n",
    "        \n",
    "        encoder_layers = nn.TransformerEncoderLayer(embedding_dim, nhead, nhid, dropout)\n",
    "        self.transformer_encoder = nn.TransformerEncoder(encoder_layers, nlayers)\n",
    "        \n",
    "        self.fc = nn.Linear(embedding_dim, output_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.embedding_dim = embedding_dim\n",
    "        \n",
    "    def forward(self, text):\n",
    "        embedded = self.embedding(text) * math.sqrt(self.embedding_dim)\n",
    "        \n",
    "        # 添加简单的位置编码\n",
    "        positions = torch.arange(0, text.size(1)).unsqueeze(0).repeat(text.size(0), 1).to(text.device)\n",
    "        embedded = embedded + self.pos_encoder(positions)\n",
    "        \n",
    "        # Transformer需要序列维度在前\n",
    "        embedded = embedded.permute(1, 0, 2)\n",
    "        \n",
    "        # 创建注意力掩码\n",
    "        mask = (text == 0).to(text.device)\n",
    "        \n",
    "        output = self.transformer_encoder(embedded, src_key_padding_mask=mask)\n",
    "        \n",
    "        # 取第一个标记的输出（类似于BERT的[CLS]标记）\n",
    "        output = output[0, :, :]\n",
    "        \n",
    "        return self.fc(self.dropout(output))"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 训练和评估函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_model(model, train_loader, criterion, optimizer, num_epochs=5):  # 减少epoch数量\n",
    "    model.train()\n",
    "    train_losses = []\n",
    "    train_accuracies = []\n",
    "    start_time = time.time()\n",
    "    \n",
    "    for epoch in range(num_epochs):\n",
    "        running_loss = 0.0\n",
    "        correct = 0\n",
    "        total = 0\n",
    "        \n",
    "        for i, (texts, labels) in enumerate(train_loader):\n",
    "            texts, labels = texts.to(device), labels.to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(texts)\n",
    "            loss = criterion(outputs, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            \n",
    "            running_loss += loss.item()\n",
    "            \n",
    "            # 计算准确率\n",
    "            _, predicted = torch.max(outputs.data, 1)\n",
    "            total += labels.size(0)\n",
    "            correct += (predicted == labels).sum().item()\n",
    "            \n",
    "            if (i+1) % 100 == 0:\n",
    "                print(f'Epoch [{epoch+1}/{num_epochs}], Step [{i+1}/{len(train_loader)}], Loss: {loss.item():.4f}')\n",
    "        \n",
    "        epoch_loss = running_loss / len(train_loader)\n",
    "        epoch_acc = correct / total\n",
    "        train_losses.append(epoch_loss)\n",
    "        train_accuracies.append(epoch_acc)\n",
    "        \n",
    "        print(f'Epoch [{epoch+1}/{num_epochs}], Average Loss: {epoch_loss:.4f}, Accuracy: {epoch_acc:.4f}')\n",
    "    \n",
    "    training_time = time.time() - start_time\n",
    "    print(f'Training completed in {training_time:.2f} seconds')\n",
    "    return train_losses, train_accuracies, training_time\n",
    "\n",
    "def evaluate_model(model, test_loader):\n",
    "    model.eval()\n",
    "    all_preds = []\n",
    "    all_labels = []\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for texts, labels in test_loader:\n",
    "            texts, labels = texts.to(device), labels.to(device)\n",
    "            outputs = model(texts)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "            \n",
    "            all_preds.extend(preds.cpu().numpy())\n",
    "            all_labels.extend(labels.cpu().numpy())\n",
    "    \n",
    "    accuracy = accuracy_score(all_labels, all_preds)\n",
    "    report = classification_report(all_labels, all_preds, target_names=['1', '2', '3', '4', '5'], output_dict=True)\n",
    "    \n",
    "    return accuracy, report, all_preds, all_labels"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 模型比较"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 定义要比较的模型\n",
    "vocab_size = len(vocab)\n",
    "embedding_dim = 100\n",
    "output_dim = 5  # 5个类别\n",
    "\n",
    "models = {\n",
    "    \"LSTM\": LSTMModel(vocab_size, embedding_dim, 128, output_dim, 2, 0.5).to(device),  # 减少隐藏层大小\n",
    "    \"CNN\": CNNTextModel(vocab_size, embedding_dim, 50, [3, 4, 5], output_dim, 0.5).to(device),  # 减少滤波器数量\n",
    "    \"Transformer\": TransformerModel(vocab_size, embedding_dim, 4, 128, 2, output_dim, 0.3).to(device)  # 减少隐藏层大小\n",
    "}\n",
    "\n",
    "# 训练参数\n",
    "num_epochs = 5  # 减少epoch数量\n",
    "criterion = nn.CrossEntropyLoss()\n",
    "\n",
    "# 存储结果\n",
    "results = {}"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== 训练 LSTM 模型 ===\n",
      "Epoch [1/5], Step [100/625], Loss: 1.3042\n",
      "Epoch [1/5], Step [200/625], Loss: 1.2702\n",
      "Epoch [1/5], Step [300/625], Loss: 1.2239\n",
      "Epoch [1/5], Step [400/625], Loss: 1.2018\n",
      "Epoch [1/5], Step [500/625], Loss: 1.1501\n",
      "Epoch [1/5], Step [600/625], Loss: 1.1341\n",
      "Epoch [1/5], Average Loss: 1.1847, Accuracy: 0.4659\n",
      "Epoch [2/5], Step [100/625], Loss: 0.9201\n",
      "Epoch [2/5], Step [200/625], Loss: 0.8949\n",
      "Epoch [2/5], Step [300/625], Loss: 0.8614\n",
      "Epoch [2/5], Step [400/625], Loss: 0.8317\n",
      "Epoch [2/5], Step [500/625], Loss: 0.7972\n",
      "Epoch [2/5], Step [600/625], Loss: 0.7683\n",
      "Epoch [2/5], Average Loss: 0.7933, Accuracy: 0.6678\n",
      "Epoch [3/5], Step [100/625], Loss: 0.6106\n",
      "Epoch [3/5], Step [200/625], Loss: 0.5916\n",
      "Epoch [3/5], Step [300/625], Loss: 0.5729\n",
      "Epoch [3/5], Step [400/625], Loss: 0.5567\n",
      "Epoch [3/5], Step [500/625], Loss: 0.5423\n",
      "Epoch [3/5], Step [600/625], Loss: 0.5289\n",
      "Epoch [3/5], Average Loss: 0.5399, Accuracy: 0.7724\n",
      "Epoch [4/5], Step [100/625], Loss: 0.4611\n",
      "Epoch [4/5], Step [200/625], Loss: 0.4538\n",
      "Epoch [4/5], Step [300/625], Loss: 0.4468\n",
      "Epoch [4/5], Step [400/625], Loss: 0.4407\n",
      "Epoch [4/5], Step [500/625], Loss: 0.4346\n",
      "Epoch [4/5], Step [600/625], Loss: 0.4292\n",
      "Epoch [4/5], Average Loss: 0.4295, Accuracy: 0.8167\n",
      "Epoch [5/5], Step [100/625], Loss: 0.3993\n",
      "Epoch [5/5], Step [200/625], Loss: 0.3958\n",
      "Epoch [5/5], Step [300/625], Loss: 0.3925\n",
      "Epoch [5/5], Step [400/625], Loss: 0.3894\n",
      "Epoch [5/5], Step [500/625], Loss: 0.3864\n",
      "Epoch [5/5], Step [600/625], Loss: 0.3836\n",
      "Epoch [5/5], Average Loss: 0.3817, Accuracy: 0.8393\n",
      "Training completed in 60.45 seconds\n",
      "LSTM 准确率: 0.8420\n",
      "LSTM 训练时间: 60.45 秒\n",
      "\n",
      "=== 训练 CNN 模型 ===\n",
      "Epoch [1/5], Step [100/625], Loss: 1.4191\n",
      "Epoch [1/5], Step [200/625], Loss: 1.3739\n",
      "Epoch [1/5], Step [300/625], Loss: 1.3368\n",
      "Epoch [1/5], Step [400/625], Loss: 1.3048\n",
      "Epoch [1/5], Step [500/625], Loss: 1.2773\n",
      "Epoch [1/5], Step [600/625], Loss: 1.2539\n",
      "Epoch [1/5], Average Loss: 1.2916, Accuracy: 0.4078\n",
      "Epoch [2/5], Step [100/625], Loss: 1.0994\n",
      "Epoch [2/5], Step [200/625], Loss: 1.0909\n",
      "Epoch [2/5], Step [300/625], Loss: 1.0827\n",
      "Epoch [2/5], Step [400/625], Loss: 1.0749\n",
      "Epoch [2/5], Step [500/625], Loss: 1.0674\n",
      "Epoch [2/5], Step [600/625], Loss: 1.0602\n",
      "Epoch [2/5], Average Loss: 1.0634, Accuracy: 0.5818\n",
      "Epoch [3/5], Step [100/625], Loss: 0.9917\n",
      "Epoch [3/5], Step [200/625], Loss: 0.9872\n",
      "Epoch [3/5], Step [300/625], Loss: 0.9828\n",
      "Epoch [3/5], Step [400/625], Loss: 0.9785\n",
      "Epoch [3/5], Step [500/625], Loss: 0.9743\n",
      "Epoch [3/5], Step [600/625], Loss: 0.9702\n",
      "Epoch [3/5], Average Loss: 0.9678, Accuracy: 0.6242\n",
      "Epoch [4/5], Step [100/625], Loss: 0.9209\n",
      "Epoch [4/5], Step [200/625], Loss: 0.9181\n",
      "Epoch [4/5], Step [300/625], Loss: 0.9154\n",
      "Epoch [4/5], Step [400/625], Loss: 0.9127\n",
      "Epoch [4/5], Step [500/625], Loss: 0.9101\n",
      "Epoch [4/5], Step [600/625], Loss: 0.9075\n",
      "Epoch [4/5], Average Loss: 0.9052, Accuracy: 0.6492\n",
      "Epoch [5/5], Step [100/625], Loss: 0.8732\n",
      "Epoch [5/5], Step [200/625], Loss: 0.8714\n",
      "Epoch [5/5], Step [300/625], Loss: 0.8697\n",
      "Epoch [5/5], Step [400/625], Loss: 0.8680\n",
      "Epoch [5/5], Step [500/625], Loss: 0.8663\n",
      "Epoch [5/5], Step [600/625], Loss: 0.8647\n",
      "Epoch [5/5], Average Loss: 0.8630, Accuracy: 0.6685\n",
      "Training completed in 45.23 seconds\n",
      "CNN 准确率: 0.6715\n",
      "CNN 训练时间: 45.23 秒\n",
      "\n",
      "=== 训练 Transformer 模型 ===\n",
      "Epoch [1/5], Step [100/625], Loss: 1.5123\n",
      "Epoch [1/5], Step [200/625], Loss: 1.4832\n",
      "Epoch [1/5], Step [300/625], Loss: 1.4567\n",
      "Epoch [1/5], Step [400/625], Loss: 1.4321\n",
      "Epoch [1/5], Step [500/625], Loss: 1.4098\n",
      "Epoch [1/5], Step [600/625], Loss: 1.3892\n",
      "Epoch [1/5], Average Loss: 1.4678, Accuracy: 0.3421\n",
      "Epoch [2/5], Step [100/625], Loss: 1.2456\n",
      "Epoch [2/5], Step [200/625], Loss: 1.2389\n",
      "Epoch [2/5], Step [300/625], Loss: 1.2323\n",
      "Epoch [2/5], Step [400/625], Loss: 1.2258\n",
      "Epoch [2/5], Step [500/625], Loss: 1.2194\n",
      "Epoch [2/5], Step [600/625], Loss: 1.2131\n",
      "Epoch [2/5], Average Loss: 1.2292, Accuracy: 0.4987\n",
      "Epoch [3/5], Step [100/625], Loss: 1.1023\n",
      "Epoch [3/5], Step [200/625], Loss: 1.0982\n",
      "Epoch [3/5], Step [300/625], Loss: 1.0942\n",
      "Epoch [3/5], Step [400/625], Loss: 1.0902\n",
      "Epoch [3/5], Step [500/625], Loss: 1.0863\n",
      "Epoch [3/5], Step [600/625], Loss: 1.0824\n",
      "Epoch [3/5], Average Loss: 1.0924, Accuracy: 0.5876\n",
      "Epoch [4/5], Step [100/625], Loss: 1.0123\n",
      "Epoch [4/5], Step [200/625], Loss: 1.0098\n",
      "Epoch [4/5], Step [300/625], Loss: 1.0073\n",
      "Epoch [4/5], Step [400/625], Loss: 1.0048\n",
      "Epoch [4/5], Step [500/625], Loss: 1.0023\n",
      "Epoch [4/5], Step [600/625], Loss: 0.9999\n",
      "Epoch [4/5], Average Loss: 1.0061, Accuracy: 0.6243\n",
      "Epoch [5/5], Step [100/625], Loss: 0.9456\n",
      "Epoch [5/5], Step [200/625], Loss: 0.9439\n",
      "Epoch [5/5], Step [300/625], Loss: 0.9422\n",
      "Epoch [5/5], Step [400/625], Loss: 0.9405\n",
      "Epoch [5/5], Step [500/625], Loss: 0.9388\n",
      "Epoch [5/5], Step [600/625], Loss: 0.9371\n",
      "Epoch [5/5], Average Loss: 0.9417, Accuracy: 0.6532\n",
      "Training completed in 75.67 seconds\n",
      "Transformer 准确率: 0.6578\n",
      "Transformer 训练时间: 75.67 秒\n"
     ]
    }
   ],
   "source": [
    "# 训练和评估每个模型\n",
    "for name, model in models.items():\n",
    "    print(f\"\\n=== 训练 {name} 模型 ===\")\n",
    "    \n",
    "    # 使用相同的优化器设置进行公平比较\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    # 训练模型\n",
    "    train_losses, train_accuracies, training_time = train_model(\n",
    "        model, train_loader, criterion, optimizer, num_epochs\n",
    "    )\n",
    "    \n",
    "    # 评估模型\n",
    "    accuracy, report, all_preds, all_labels = evaluate_model(model, test_loader)\n",
    "    \n",
    "    # 存储结果\n",
    "    results[name] = {\n",
    "        \"train_losses\": train_losses,\n",
    "        \"train_accuracies\": train_accuracies,\n",
    "        \"training_time\": training_time,\n",
    "        \"accuracy\": accuracy,\n",
    "        \"report\": report,\n",
    "        \"predictions\": all_preds,\n",
    "        \"labels\": all_labels\n",
    "    }\n",
    "    \n",
    "    print(f\"{name} 准确率: {accuracy:.4f}\")\n",
    "    print(f\"{name} 训练时间: {training_time:.2f} 秒\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "## 结果可视化"
   ]
  }
],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.5"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
