{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-06-12T01:09:22.727968Z",
     "start_time": "2025-06-12T00:45:24.341127Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.metrics import accuracy_score, classification_report\n",
    "from tqdm import tqdm\n",
    "import matplotlib.pyplot as plt\n",
    "from datetime import datetime\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# ------------------------------ 数据处理函数 ------------------------------\n",
    "def load_class_labels(class_path):\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for idx, line in enumerate(f):\n",
    "                label_map[idx] = line.strip()\n",
    "    return label_map or {idx: f\"类别{idx}\" for idx in sorted(set(label_map.keys()))}\n",
    "\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    texts, labels = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in tqdm(f, desc=f\"加载 {file_path}\"):\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "            parts = line.rsplit('\\t', 1)\n",
    "            if len(parts) == 2:\n",
    "                texts.append(parts[0])\n",
    "                labels.append(int(parts[1]))\n",
    "    return texts, labels\n",
    "\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        words = jieba.cut(text, HMM=True)\n",
    "        filtered = [word for word in words if word not in stopwords and len(word) >= 2]\n",
    "        # 确保每个文本至少有一个词（处理空文本）\n",
    "        processed.append(filtered if filtered else ['<PAD>'])\n",
    "    return processed\n",
    "\n",
    "\n",
    "def load_imdb_data(data_path):\n",
    "    train_texts, train_labels = [], []\n",
    "    test_texts, test_labels = [], []\n",
    "\n",
    "    # 检查根目录是否存在\n",
    "    if not os.path.exists(data_path):\n",
    "        raise FileNotFoundError(f\"数据集根目录不存在: {data_path}\")\n",
    "\n",
    "    for label in ['pos', 'neg']:\n",
    "        for split in ['train', 'test']:\n",
    "            path = os.path.join(data_path, split, label)\n",
    "            # 检查子目录是否存在\n",
    "            if not os.path.exists(path):\n",
    "                print(f\"警告: 路径不存在 - {path}\")\n",
    "                continue\n",
    "\n",
    "            for file_name in os.listdir(path):\n",
    "                if file_name.endswith('.txt'):\n",
    "                    with open(os.path.join(path, file_name), 'r', encoding='utf-8') as file:\n",
    "                        text = file.read()\n",
    "                        if split == 'train':\n",
    "                            train_texts.append(text)\n",
    "                            train_labels.append(1 if label == 'pos' else 0)\n",
    "                        else:\n",
    "                            test_texts.append(text)\n",
    "                            test_labels.append(1 if label == 'pos' else 0)\n",
    "    return train_texts, train_labels, test_texts, test_labels\n",
    "\n",
    "\n",
    "# ------------------------------ 自定义数据集类 ------------------------------\n",
    "class TextDataset(Dataset):\n",
    "    def __init__(self, texts, labels, vocab, max_length=100):\n",
    "        self.texts = texts\n",
    "        self.labels = labels\n",
    "        self.vocab = vocab\n",
    "        self.max_length = max_length\n",
    "        self.pad_idx = vocab.get('<PAD>', 0)\n",
    "        self.unk_idx = vocab.get('<UNK>', 1)\n",
    "        \n",
    "    def __len__(self):\n",
    "        return len(self.texts)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        text = self.texts[idx]\n",
    "        label = self.labels[idx]\n",
    "        \n",
    "        # 将文本转换为索引序列，确保至少有一个词\n",
    "        indices = [self.vocab.get(word, self.unk_idx) for word in text[:self.max_length]]\n",
    "        \n",
    "        # 处理空文本\n",
    "        if not indices:\n",
    "            indices = [self.pad_idx]\n",
    "            \n",
    "        return torch.tensor(indices), torch.tensor(label)\n",
    "\n",
    "\n",
    "# ------------------------------ 模型定义 ------------------------------\n",
    "class CNNClassifier(nn.Module):\n",
    "    def __init__(self, vocab_size, embed_dim, num_classes, num_filters, filter_sizes, dropout=0.5):\n",
    "        super(CNNClassifier, self).__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n",
    "        \n",
    "        # 多个卷积层，每个使用不同大小的滤波器\n",
    "        self.convs = nn.ModuleList([\n",
    "            nn.Conv1d(embed_dim, num_filters, fs) for fs in filter_sizes\n",
    "        ])\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.fc = nn.Linear(len(filter_sizes) * num_filters, num_classes)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        # x: [batch_size, seq_len]\n",
    "        embedded = self.embedding(x)  # [batch_size, seq_len, embed_dim]\n",
    "        embedded = embedded.permute(0, 2, 1)  # [batch_size, embed_dim, seq_len]\n",
    "        \n",
    "        # 应用卷积和池化\n",
    "        pooled_outputs = []\n",
    "        for conv in self.convs:\n",
    "            conv_out = nn.functional.relu(conv(embedded))  # [batch_size, num_filters, seq_len-fs+1]\n",
    "            pooled = nn.functional.max_pool1d(conv_out, conv_out.shape[2])  # [batch_size, num_filters, 1]\n",
    "            pooled = pooled.squeeze(2)  # [batch_size, num_filters]\n",
    "            pooled_outputs.append(pooled)\n",
    "        \n",
    "        # 合并所有卷积层的输出\n",
    "        cat = self.dropout(torch.cat(pooled_outputs, dim=1))  # [batch_size, num_filters * len(filter_sizes)]\n",
    "        return self.fc(cat)\n",
    "\n",
    "\n",
    "class RNNClassifier(nn.Module):\n",
    "    def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes, num_layers=1, bidirectional=True, dropout=0.5):\n",
    "        super(RNNClassifier, self).__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n",
    "        \n",
    "        self.rnn = nn.LSTM(\n",
    "            embed_dim, \n",
    "            hidden_dim, \n",
    "            num_layers=num_layers,\n",
    "            bidirectional=bidirectional,\n",
    "            batch_first=True,\n",
    "            dropout=dropout if num_layers > 1 else 0\n",
    "        )\n",
    "        \n",
    "        # 计算最终的特征维度\n",
    "        self.fc_input_dim = hidden_dim * 2 if bidirectional else hidden_dim\n",
    "        self.fc = nn.Linear(self.fc_input_dim, num_classes)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        # x: [batch_size, seq_len]\n",
    "        embedded = self.embedding(x)  # [batch_size, seq_len, embed_dim]\n",
    "        \n",
    "        # 前向传播RNN\n",
    "        outputs, (hidden, cell) = self.rnn(embedded)\n",
    "        \n",
    "        # 取最后一个时间步的隐藏状态\n",
    "        if self.rnn.bidirectional:\n",
    "            # 双向RNN，合并两个方向的最后隐藏状态\n",
    "            hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)\n",
    "        else:\n",
    "            hidden = hidden[-1,:,:]\n",
    "            \n",
    "        hidden = self.dropout(hidden)\n",
    "        return self.fc(hidden)\n",
    "\n",
    "\n",
    "# ------------------------------ 模型训练函数 ------------------------------\n",
    "def create_vocab(texts, min_freq=2):\n",
    "    \"\"\"创建词汇表\"\"\"\n",
    "    vocab = {'<PAD>': 0, '<UNK>': 1}\n",
    "    word_freq = {}\n",
    "    \n",
    "    for text in texts:\n",
    "        for word in text:\n",
    "            word_freq[word] = word_freq.get(word, 0) + 1\n",
    "    \n",
    "    # 只保留频率大于等于min_freq的词\n",
    "    for word, freq in word_freq.items():\n",
    "        if freq >= min_freq:\n",
    "            vocab[word] = len(vocab)\n",
    "    \n",
    "    return vocab\n",
    "\n",
    "\n",
    "def collate_fn(batch):\n",
    "    \"\"\"自定义DataLoader的批处理函数，用于填充序列\"\"\"\n",
    "    texts, labels = zip(*batch)\n",
    "    texts = pad_sequence(texts, batch_first=True, padding_value=0)\n",
    "    return texts, torch.tensor(labels)\n",
    "\n",
    "\n",
    "def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=10, device='cpu'):\n",
    "    \"\"\"训练模型\"\"\"\n",
    "    model.to(device)\n",
    "    best_val_acc = 0.0\n",
    "    \n",
    "    for epoch in range(num_epochs):\n",
    "        # 训练阶段\n",
    "        model.train()\n",
    "        train_loss = 0.0\n",
    "        train_correct = 0\n",
    "        \n",
    "        for texts, labels in tqdm(train_loader, desc=f\"Epoch {epoch+1}/{num_epochs} [Train]\"):\n",
    "            texts, labels = texts.to(device), labels.to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(texts)\n",
    "            loss = criterion(outputs, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            \n",
    "            train_loss += loss.item() * texts.size(0)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "            train_correct += torch.sum(preds == labels.data)\n",
    "        \n",
    "        train_loss = train_loss / len(train_loader.dataset)\n",
    "        train_acc = train_correct.double() / len(train_loader.dataset)\n",
    "        \n",
    "        # 验证阶段\n",
    "        model.eval()\n",
    "        val_loss = 0.0\n",
    "        val_correct = 0\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            for texts, labels in tqdm(val_loader, desc=f\"Epoch {epoch+1}/{num_epochs} [Val]\"):\n",
    "                texts, labels = texts.to(device), labels.to(device)\n",
    "                \n",
    "                outputs = model(texts)\n",
    "                loss = criterion(outputs, labels)\n",
    "                \n",
    "                val_loss += loss.item() * texts.size(0)\n",
    "                _, preds = torch.max(outputs, 1)\n",
    "                val_correct += torch.sum(preds == labels.data)\n",
    "        \n",
    "        val_loss = val_loss / len(val_loader.dataset)\n",
    "        val_acc = val_correct.double() / len(val_loader.dataset)\n",
    "        \n",
    "        print(f\"Epoch {epoch+1}/{num_epochs}\")\n",
    "        print(f\"Train Loss: {train_loss:.4f} Acc: {train_acc:.4f}\")\n",
    "        print(f\"Val Loss: {val_loss:.4f} Acc: {val_acc:.4f}\")\n",
    "        \n",
    "        # 保存最佳模型\n",
    "        if val_acc > best_val_acc:\n",
    "            best_val_acc = val_acc\n",
    "            torch.save(model.state_dict(), 'best_model.pth')\n",
    "    \n",
    "    # 加载最佳模型\n",
    "    model.load_state_dict(torch.load('best_model.pth'))\n",
    "    return model\n",
    "\n",
    "\n",
    "def evaluate_model(model, test_loader, criterion, device='cpu', label_map=None):\n",
    "    \"\"\"评估模型\"\"\"\n",
    "    model.to(device)\n",
    "    model.eval()\n",
    "    \n",
    "    test_loss = 0.0\n",
    "    all_preds = []\n",
    "    all_labels = []\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for texts, labels in tqdm(test_loader, desc=\"评估模型\"):\n",
    "            texts, labels = texts.to(device), labels.to(device)\n",
    "            \n",
    "            outputs = model(texts)\n",
    "            loss = criterion(outputs, labels)\n",
    "            \n",
    "            test_loss += loss.item() * texts.size(0)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "            \n",
    "            all_preds.extend(preds.cpu().numpy())\n",
    "            all_labels.extend(labels.cpu().numpy())\n",
    "    \n",
    "    test_loss = test_loss / len(test_loader.dataset)\n",
    "    test_acc = accuracy_score(all_labels, all_preds)\n",
    "    \n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"测试集损失: {test_loss:.4f}\")\n",
    "    \n",
    "    # 打印分类报告\n",
    "    if label_map:\n",
    "        target_names = [label_map[i] for i in range(len(label_map))]\n",
    "        print(\"分类报告:\\n\", classification_report(all_labels, all_preds, target_names=target_names, digits=4))\n",
    "    \n",
    "    return test_acc, test_loss\n",
    "\n",
    "\n",
    "def run_cnn(data, stopwords, label_map):\n",
    "    train_texts, train_labels, test_texts, test_labels = data\n",
    "    classes = list(label_map.values())\n",
    "    num_classes = len(classes)\n",
    "    \n",
    "    # 预处理文本\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 创建词汇表\n",
    "    vocab = create_vocab(train_processed)\n",
    "    vocab_size = len(vocab)\n",
    "    \n",
    "    # 创建数据集\n",
    "    train_dataset = TextDataset(train_processed, train_labels, vocab)\n",
    "    test_dataset = TextDataset(test_processed, test_labels, vocab)\n",
    "    \n",
    "    # 划分训练集和验证集\n",
    "    train_data, val_data = train_test_split(\n",
    "        list(zip(train_processed, train_labels)), \n",
    "        test_size=0.1, \n",
    "        random_state=42,\n",
    "        stratify=train_labels\n",
    "    )\n",
    "    \n",
    "    train_dataset = TextDataset([x[0] for x in train_data], [x[1] for x in train_data], vocab)\n",
    "    val_dataset = TextDataset([x[0] for x in val_data], [x[1] for x in val_data], vocab)\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    batch_size = 64\n",
    "    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n",
    "    val_loader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    test_loader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    \n",
    "    # 初始化CNN模型\n",
    "    embed_dim = 100\n",
    "    num_filters = 100\n",
    "    filter_sizes = [3, 4, 5]  # 不同大小的卷积核\n",
    "    dropout = 0.5\n",
    "    \n",
    "    model = CNNClassifier(\n",
    "        vocab_size=vocab_size,\n",
    "        embed_dim=embed_dim,\n",
    "        num_classes=num_classes,\n",
    "        num_filters=num_filters,\n",
    "        filter_sizes=filter_sizes,\n",
    "        dropout=dropout\n",
    "    )\n",
    "    \n",
    "    # 训练模型\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    print(f\"使用设备: {device}\")\n",
    "    \n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    start_time = datetime.now()\n",
    "    model = train_model(\n",
    "        model=model,\n",
    "        train_loader=train_loader,\n",
    "        val_loader=val_loader,\n",
    "        criterion=criterion,\n",
    "        optimizer=optimizer,\n",
    "        num_epochs=5,  # 减少训练轮数，避免过长时间\n",
    "        device=device\n",
    "    )\n",
    "    train_time = (datetime.now() - start_time).total_seconds()\n",
    "    \n",
    "    # 评估模型\n",
    "    test_acc, _ = evaluate_model(model, test_loader, criterion, device, label_map)\n",
    "    \n",
    "    # 输出结果\n",
    "    print(f\"\\n---------------- CNN模型 ----------------\")\n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "    \n",
    "    return {\n",
    "        \"model\": \"CNN\",\n",
    "        \"dataset\": data[0][0][:5],  # 仅用于标识数据集类型，非实际数据\n",
    "        \"test_accuracy\": test_acc,\n",
    "        \"train_time\": train_time\n",
    "    }\n",
    "\n",
    "\n",
    "def run_rnn(data, stopwords, label_map):\n",
    "    train_texts, train_labels, test_texts, test_labels = data\n",
    "    classes = list(label_map.values())\n",
    "    num_classes = len(classes)\n",
    "    \n",
    "    # 预处理文本\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 创建词汇表\n",
    "    vocab = create_vocab(train_processed)\n",
    "    vocab_size = len(vocab)\n",
    "    \n",
    "    # 创建数据集\n",
    "    train_dataset = TextDataset(train_processed, train_labels, vocab)\n",
    "    test_dataset = TextDataset(test_processed, test_labels, vocab)\n",
    "    \n",
    "    # 划分训练集和验证集\n",
    "    train_data, val_data = train_test_split(\n",
    "        list(zip(train_processed, train_labels)), \n",
    "        test_size=0.1, \n",
    "        random_state=42,\n",
    "        stratify=train_labels\n",
    "    )\n",
    "    \n",
    "    train_dataset = TextDataset([x[0] for x in train_data], [x[1] for x in train_data], vocab)\n",
    "    val_dataset = TextDataset([x[0] for x in val_data], [x[1] for x in val_data], vocab)\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    batch_size = 64\n",
    "    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n",
    "    val_loader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    test_loader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    \n",
    "    # 初始化RNN模型\n",
    "    embed_dim = 100\n",
    "    hidden_dim = 128\n",
    "    num_layers = 2\n",
    "    bidirectional = True\n",
    "    dropout = 0.5\n",
    "    \n",
    "    model = RNNClassifier(\n",
    "        vocab_size=vocab_size,\n",
    "        embed_dim=embed_dim,\n",
    "        hidden_dim=hidden_dim,\n",
    "        num_classes=num_classes,\n",
    "        num_layers=num_layers,\n",
    "        bidirectional=bidirectional,\n",
    "        dropout=dropout\n",
    "    )\n",
    "    \n",
    "    # 训练模型\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    print(f\"使用设备: {device}\")\n",
    "    \n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    start_time = datetime.now()\n",
    "    model = train_model(\n",
    "        model=model,\n",
    "        train_loader=train_loader,\n",
    "        val_loader=val_loader,\n",
    "        criterion=criterion,\n",
    "        optimizer=optimizer,\n",
    "        num_epochs=5,  # 减少训练轮数，避免过长时间\n",
    "        device=device\n",
    "    )\n",
    "    train_time = (datetime.now() - start_time).total_seconds()\n",
    "    \n",
    "    # 评估模型\n",
    "    test_acc, _ = evaluate_model(model, test_loader, criterion, device, label_map)\n",
    "    \n",
    "    # 输出结果\n",
    "    print(f\"\\n---------------- RNN模型 ----------------\")\n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "    \n",
    "    return {\n",
    "        \"model\": \"RNN\",\n",
    "        \"dataset\": data[0][0][:5],  # 仅用于标识数据集类型，非实际数据\n",
    "        \"test_accuracy\": test_acc,\n",
    "        \"train_time\": train_time\n",
    "    }\n",
    "\n",
    "\n",
    "# ------------------------------ 主函数 ------------------------------\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 文本分类实验开始...\")\n",
    "\n",
    "    # 设置中文字体\n",
    "    plt.rcParams[\"font.family\"] = [\"SimHei\", \"WenQuanYi Micro Hei\", \"Heiti TC\"]\n",
    "\n",
    "    # 新闻标题数据集配置\n",
    "    THUCNEWS_DATA_DIR = r\"D:\\机器学习\\THUCNews-txt\"\n",
    "    THUCNEWS_CLASS_PATH = os.path.join(THUCNEWS_DATA_DIR, \"class.txt\")\n",
    "    stopwords = {\n",
    "        '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',\n",
    "        '个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有'\n",
    "    }\n",
    "\n",
    "    # 加载新闻标题数据集\n",
    "    if os.path.exists(THUCNEWS_DATA_DIR):\n",
    "        thucnews_train_texts, thucnews_train_labels = load_text_dataset(os.path.join(THUCNEWS_DATA_DIR, \"train.txt\"))\n",
    "        thucnews_test_texts, thucnews_test_labels = load_text_dataset(os.path.join(THUCNEWS_DATA_DIR, \"test.txt\"))\n",
    "        thucnews_label_map = load_class_labels(THUCNEWS_CLASS_PATH)\n",
    "        thucnews_data = (thucnews_train_texts, thucnews_train_labels, thucnews_test_texts, thucnews_test_labels)\n",
    "\n",
    "        # 输出新闻标题数据集信息\n",
    "        print(f\"新闻标题训练集样本数：{len(thucnews_train_texts)}\")\n",
    "        print(f\"新闻标题测试集样本数：{len(thucnews_test_texts)}\")\n",
    "\n",
    "        # 运行CNN模型 - 新闻标题数据集\n",
    "        thucnews_cnn_result = run_cnn(thucnews_data, stopwords, thucnews_label_map)\n",
    "\n",
    "        # 运行RNN模型 - 新闻标题数据集\n",
    "        thucnews_rnn_result = run_rnn(thucnews_data, stopwords, thucnews_label_map)\n",
    "    else:\n",
    "        print(f\"警告: 新闻标题数据集路径不存在 - {THUCNEWS_DATA_DIR}\")\n",
    "        thucnews_cnn_result = thucnews_rnn_result = None\n",
    "\n",
    "    # 电影评论数据集配置\n",
    "    IMDB_DATA_DIR = r\"D:\\机器学习\\aclImdb_v1\\aclImdb\"\n",
    "\n",
    "    # 加载电影评论数据集\n",
    "    if os.path.exists(IMDB_DATA_DIR):\n",
    "        imdb_train_texts, imdb_train_labels, imdb_test_texts, imdb_test_labels = load_imdb_data(IMDB_DATA_DIR)\n",
    "        imdb_label_map = {0: 'neg', 1: 'pos'}\n",
    "        imdb_data = (imdb_train_texts, imdb_train_labels, imdb_test_texts, imdb_test_labels)\n",
    "\n",
    "        # 输出电影评论数据集信息\n",
    "        print(f\"电影评论训练集样本数：{len(imdb_train_texts)}\")\n",
    "        print(f\"电影评论测试集样本数：{len(imdb_test_texts)}\")\n",
    "\n",
    "        # 运行CNN模型 - 电影评论数据集\n",
    "        imdb_cnn_result = run_cnn(imdb_data, stopwords, imdb_label_map)\n",
    "\n",
    "        # 运行RNN模型 - 电影评论数据集\n",
    "        imdb_rnn_result = run_rnn(imdb_data, stopwords, imdb_label_map)\n",
    "    else:\n",
    "        print(f\"警告: 电影评论数据集路径不存在 - {IMDB_DATA_DIR}\")\n",
    "        imdb_cnn_result = imdb_rnn_result = None\n",
    "\n",
    "    # 整理所有结果\n",
    "    results = []\n",
    "    if thucnews_cnn_result:\n",
    "        results.append(thucnews_cnn_result)\n",
    "    if thucnews_rnn_result:\n",
    "        results.append(thucnews_rnn_result)\n",
    "    if imdb_cnn_result:\n",
    "        results.append(imdb_cnn_result)\n",
    "    if imdb_rnn_result:\n",
    "        results.append(imdb_rnn_result)\n",
    "\n",
    "    if not results:\n",
    "        print(\"没有可用的数据集和结果进行可视化。\")\n",
    "        return\n",
    "\n",
    "    # ------------------------------ 可视化 ------------------------------\n",
    "    plt.figure(figsize=(12, 8))\n",
    "\n",
    "    # 准确率对比\n",
    "    plt.subplot(2, 1, 1)\n",
    "    bars = plt.bar(\n",
    "        [f\"{res['model']}\\n新闻\" for res in results if '新闻' in res['dataset'][:5]] +\n",
    "        [f\"{res['model']}\\n电影\" for res in results if '电影' in res['dataset'][:5]],\n",
    "        [res['test_accuracy'] for res in results],\n",
    "        width=0.4,\n",
    "        color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']\n",
    "    )\n",
    "    plt.title('模型准确率对比')\n",
    "    plt.ylabel('准确率')\n",
    "    plt.ylim(0.0, 1.0)  # 调整y轴范围以完整显示数值\n",
    "    for bar in bars:\n",
    "        height = bar.get_height()\n",
    "        plt.text(bar.get_x() + bar.get_width()/2., height,\n",
    "                f'{height:.4f}',\n",
    "                ha='center', va='bottom')\n",
    "\n",
    "    # 训练时间对比\n",
    "    plt.subplot(2, 1, 2)\n",
    "    bars = plt.bar(\n",
    "        [f\"{res['model']}\\n新闻\" for res in results if '新闻' in res['dataset'][:5]] +\n",
    "        [f\"{res['model']}\\n电影\" for res in results if '电影' in res['dataset'][:5]],\n",
    "        [res['train_time'] for res in results],\n",
    "        width=0.4,\n",
    "        color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']\n",
    "    )\n",
    "    plt.title('模型训练时间对比')\n",
    "    plt.xlabel('数据集/模型')\n",
    "    plt.ylabel('训练时间（秒）')\n",
    "    for bar in bars:\n",
    "        height = bar.get_height()\n",
    "        plt.text(bar.get_x() + bar.get_width()/2., height,\n",
    "                f'{height:.2f}s',\n",
    "                ha='center', va='bottom')\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()\n"
   ],
   "id": "c3cbe4901bfc9c59",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-06-12 08:45:27.634487] 文本分类实验开始...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\机器学习\\THUCNews-txt\\train.txt: 180000it [00:00, 1205910.47it/s]\n",
      "加载 D:\\机器学习\\THUCNews-txt\\test.txt: 10000it [00:00, 1091584.43it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "新闻标题训练集样本数：180000\n",
      "新闻标题测试集样本数：10000\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理:   0%|          | 0/180000 [00:00<?, ?it/s]Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\hanji\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.670 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "文本预处理: 100%|██████████| 180000/180000 [00:16<00:00, 10907.40it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 14230.63it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用设备: cpu\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/5 [Train]: 100%|██████████| 2532/2532 [01:03<00:00, 39.98it/s]\n",
      "Epoch 1/5 [Val]: 100%|██████████| 282/282 [00:00<00:00, 351.59it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "Train Loss: 1.2850 Acc: 0.5728\n",
      "Val Loss: 0.7081 Acc: 0.7824\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/5 [Train]: 100%|██████████| 2532/2532 [00:58<00:00, 43.47it/s]\n",
      "Epoch 2/5 [Val]: 100%|██████████| 282/282 [00:00<00:00, 360.96it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/5\n",
      "Train Loss: 0.6236 Acc: 0.8031\n",
      "Val Loss: 0.5138 Acc: 0.8442\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/5 [Train]: 100%|██████████| 2532/2532 [01:01<00:00, 40.94it/s]\n",
      "Epoch 3/5 [Val]: 100%|██████████| 282/282 [00:01<00:00, 225.40it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3/5\n",
      "Train Loss: 0.4356 Acc: 0.8649\n",
      "Val Loss: 0.4518 Acc: 0.8614\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/5 [Train]: 100%|██████████| 2532/2532 [01:05<00:00, 38.49it/s]\n",
      "Epoch 4/5 [Val]: 100%|██████████| 282/282 [00:01<00:00, 227.46it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4/5\n",
      "Train Loss: 0.3366 Acc: 0.8954\n",
      "Val Loss: 0.4320 Acc: 0.8713\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5/5 [Train]: 100%|██████████| 2532/2532 [01:02<00:00, 40.68it/s]\n",
      "Epoch 5/5 [Val]: 100%|██████████| 282/282 [00:00<00:00, 325.71it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5/5\n",
      "Train Loss: 0.2725 Acc: 0.9149\n",
      "Val Loss: 0.4319 Acc: 0.8766\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "评估模型: 100%|██████████| 157/157 [00:00<00:00, 313.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试集准确率: 0.8775\n",
      "测试集损失: 0.4253\n",
      "分类报告:\n",
      "                precision    recall  f1-score   support\n",
      "\n",
      "      finance     0.9029    0.8740    0.8882      1000\n",
      "       realty     0.8647    0.9140    0.8887      1000\n",
      "       stocks     0.8379    0.8220    0.8299      1000\n",
      "    education     0.9425    0.9180    0.9301      1000\n",
      "      science     0.8182    0.8550    0.8362      1000\n",
      "      society     0.8547    0.8470    0.8508      1000\n",
      "     politics     0.8554    0.8640    0.8597      1000\n",
      "       sports     0.9424    0.9170    0.9295      1000\n",
      "         game     0.9319    0.8620    0.8956      1000\n",
      "entertainment     0.8383    0.9020    0.8690      1000\n",
      "\n",
      "     accuracy                         0.8775     10000\n",
      "    macro avg     0.8789    0.8775    0.8778     10000\n",
      " weighted avg     0.8789    0.8775    0.8778     10000\n",
      "\n",
      "\n",
      "---------------- CNN模型 ----------------\n",
      "测试集准确率: 0.8775\n",
      "训练时间: 316.59 秒\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 180000/180000 [00:11<00:00, 15940.23it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 16370.37it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用设备: cpu\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/5 [Train]: 100%|██████████| 2532/2532 [01:30<00:00, 28.08it/s]\n",
      "Epoch 1/5 [Val]: 100%|██████████| 282/282 [00:01<00:00, 165.17it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "Train Loss: 0.9040 Acc: 0.7058\n",
      "Val Loss: 0.5484 Acc: 0.8284\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/5 [Train]: 100%|██████████| 2532/2532 [01:28<00:00, 28.59it/s]\n",
      "Epoch 2/5 [Val]: 100%|██████████| 282/282 [00:01<00:00, 166.55it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/5\n",
      "Train Loss: 0.4261 Acc: 0.8689\n",
      "Val Loss: 0.4424 Acc: 0.8627\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/5 [Train]: 100%|██████████| 2532/2532 [01:30<00:00, 27.89it/s]\n",
      "Epoch 3/5 [Val]: 100%|██████████| 282/282 [00:01<00:00, 168.30it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3/5\n",
      "Train Loss: 0.2857 Acc: 0.9124\n",
      "Val Loss: 0.4331 Acc: 0.8697\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/5 [Train]: 100%|██████████| 2532/2532 [01:31<00:00, 27.78it/s]\n",
      "Epoch 4/5 [Val]: 100%|██████████| 282/282 [00:01<00:00, 169.61it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4/5\n",
      "Train Loss: 0.2025 Acc: 0.9386\n",
      "Val Loss: 0.4556 Acc: 0.8742\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5/5 [Train]: 100%|██████████| 2532/2532 [01:29<00:00, 28.25it/s]\n",
      "Epoch 5/5 [Val]: 100%|██████████| 282/282 [00:01<00:00, 173.39it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5/5\n",
      "Train Loss: 0.1447 Acc: 0.9561\n",
      "Val Loss: 0.4887 Acc: 0.8746\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "评估模型: 100%|██████████| 157/157 [00:00<00:00, 173.40it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试集准确率: 0.8725\n",
      "测试集损失: 0.4965\n",
      "分类报告:\n",
      "                precision    recall  f1-score   support\n",
      "\n",
      "      finance     0.9068    0.8270    0.8651      1000\n",
      "       realty     0.9015    0.8970    0.8992      1000\n",
      "       stocks     0.8249    0.8290    0.8269      1000\n",
      "    education     0.9067    0.9330    0.9197      1000\n",
      "      science     0.8480    0.8370    0.8425      1000\n",
      "      society     0.8672    0.8490    0.8580      1000\n",
      "     politics     0.8446    0.8640    0.8542      1000\n",
      "       sports     0.8936    0.9240    0.9086      1000\n",
      "         game     0.9188    0.8600    0.8884      1000\n",
      "entertainment     0.8227    0.9050    0.8619      1000\n",
      "\n",
      "     accuracy                         0.8725     10000\n",
      "    macro avg     0.8735    0.8725    0.8724     10000\n",
      " weighted avg     0.8735    0.8725    0.8724     10000\n",
      "\n",
      "\n",
      "---------------- RNN模型 ----------------\n",
      "测试集准确率: 0.8725\n",
      "训练时间: 458.90 秒\n",
      "电影评论训练集样本数：25000\n",
      "电影评论测试集样本数：25000\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 25000/25000 [00:54<00:00, 455.96it/s]\n",
      "文本预处理: 100%|██████████| 25000/25000 [00:53<00:00, 466.82it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用设备: cpu\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/5 [Train]: 100%|██████████| 352/352 [00:16<00:00, 21.87it/s]\n",
      "Epoch 1/5 [Val]: 100%|██████████| 40/40 [00:00<00:00, 76.38it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "Train Loss: 0.6879 Acc: 0.6014\n",
      "Val Loss: 0.5742 Acc: 0.6944\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/5 [Train]: 100%|██████████| 352/352 [00:16<00:00, 21.56it/s]\n",
      "Epoch 2/5 [Val]: 100%|██████████| 40/40 [00:00<00:00, 71.88it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/5\n",
      "Train Loss: 0.5678 Acc: 0.6991\n",
      "Val Loss: 0.5838 Acc: 0.6756\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/5 [Train]: 100%|██████████| 352/352 [00:15<00:00, 22.11it/s]\n",
      "Epoch 3/5 [Val]: 100%|██████████| 40/40 [00:00<00:00, 78.02it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3/5\n",
      "Train Loss: 0.4879 Acc: 0.7600\n",
      "Val Loss: 0.4869 Acc: 0.7700\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/5 [Train]: 100%|██████████| 352/352 [00:15<00:00, 22.50it/s]\n",
      "Epoch 4/5 [Val]: 100%|██████████| 40/40 [00:00<00:00, 78.61it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4/5\n",
      "Train Loss: 0.4103 Acc: 0.8116\n",
      "Val Loss: 0.4348 Acc: 0.7900\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5/5 [Train]: 100%|██████████| 352/352 [00:15<00:00, 22.42it/s]\n",
      "Epoch 5/5 [Val]: 100%|██████████| 40/40 [00:00<00:00, 75.34it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5/5\n",
      "Train Loss: 0.3418 Acc: 0.8509\n",
      "Val Loss: 0.4315 Acc: 0.7996\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "评估模型: 100%|██████████| 391/391 [00:04<00:00, 78.40it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试集准确率: 0.8019\n",
      "测试集损失: 0.4284\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "         neg     0.8081    0.7918    0.7999     12500\n",
      "         pos     0.7959    0.8120    0.8039     12500\n",
      "\n",
      "    accuracy                         0.8019     25000\n",
      "   macro avg     0.8020    0.8019    0.8019     25000\n",
      "weighted avg     0.8020    0.8019    0.8019     25000\n",
      "\n",
      "\n",
      "---------------- CNN模型 ----------------\n",
      "测试集准确率: 0.8019\n",
      "训练时间: 82.47 秒\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 25000/25000 [00:54<00:00, 455.52it/s]\n",
      "文本预处理: 100%|██████████| 25000/25000 [00:54<00:00, 456.79it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用设备: cpu\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/5 [Train]: 100%|██████████| 352/352 [00:53<00:00,  6.57it/s]\n",
      "Epoch 1/5 [Val]: 100%|██████████| 40/40 [00:01<00:00, 24.95it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "Train Loss: 0.6441 Acc: 0.6211\n",
      "Val Loss: 0.5902 Acc: 0.6832\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/5 [Train]: 100%|██████████| 352/352 [00:52<00:00,  6.70it/s]\n",
      "Epoch 2/5 [Val]: 100%|██████████| 40/40 [00:01<00:00, 23.03it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/5\n",
      "Train Loss: 0.5259 Acc: 0.7440\n",
      "Val Loss: 0.5018 Acc: 0.7624\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/5 [Train]: 100%|██████████| 352/352 [00:53<00:00,  6.62it/s]\n",
      "Epoch 3/5 [Val]: 100%|██████████| 40/40 [00:01<00:00, 24.87it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 3/5\n",
      "Train Loss: 0.4061 Acc: 0.8232\n",
      "Val Loss: 0.4933 Acc: 0.7688\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 4/5 [Train]: 100%|██████████| 352/352 [00:57<00:00,  6.12it/s]\n",
      "Epoch 4/5 [Val]: 100%|██████████| 40/40 [00:01<00:00, 24.18it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 4/5\n",
      "Train Loss: 0.3212 Acc: 0.8699\n",
      "Val Loss: 0.4841 Acc: 0.7932\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 5/5 [Train]: 100%|██████████| 352/352 [00:55<00:00,  6.36it/s]\n",
      "Epoch 5/5 [Val]: 100%|██████████| 40/40 [00:01<00:00, 24.60it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 5/5\n",
      "Train Loss: 0.2378 Acc: 0.9101\n",
      "Val Loss: 0.5354 Acc: 0.7976\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "评估模型: 100%|██████████| 391/391 [00:16<00:00, 24.07it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "测试集准确率: 0.7758\n",
      "测试集损失: 0.5783\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "         neg     0.7310    0.8728    0.7956     12500\n",
      "         pos     0.8422    0.6788    0.7517     12500\n",
      "\n",
      "    accuracy                         0.7758     25000\n",
      "   macro avg     0.7866    0.7758    0.7737     25000\n",
      "weighted avg     0.7866    0.7758    0.7737     25000\n",
      "\n",
      "\n",
      "---------------- RNN模型 ----------------\n",
      "测试集准确率: 0.7758\n",
      "训练时间: 280.55 秒\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "shape mismatch: objects cannot be broadcast to a single shape.  Mismatch is between arg 0 with shape (0,) and arg 1 with shape (4,).",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mValueError\u001B[0m                                Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[1], line 582\u001B[0m\n\u001B[0;32m    578\u001B[0m     plt\u001B[38;5;241m.\u001B[39mshow()\n\u001B[0;32m    581\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;18m__name__\u001B[39m \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m__main__\u001B[39m\u001B[38;5;124m\"\u001B[39m:\n\u001B[1;32m--> 582\u001B[0m     main()\n",
      "Cell \u001B[1;32mIn[1], line 543\u001B[0m, in \u001B[0;36mmain\u001B[1;34m()\u001B[0m\n\u001B[0;32m    541\u001B[0m \u001B[38;5;66;03m# 准确率对比\u001B[39;00m\n\u001B[0;32m    542\u001B[0m plt\u001B[38;5;241m.\u001B[39msubplot(\u001B[38;5;241m2\u001B[39m, \u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m1\u001B[39m)\n\u001B[1;32m--> 543\u001B[0m bars \u001B[38;5;241m=\u001B[39m plt\u001B[38;5;241m.\u001B[39mbar(\n\u001B[0;32m    544\u001B[0m     [\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mres[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mmodel\u001B[39m\u001B[38;5;124m'\u001B[39m]\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m新闻\u001B[39m\u001B[38;5;124m\"\u001B[39m \u001B[38;5;28;01mfor\u001B[39;00m res \u001B[38;5;129;01min\u001B[39;00m results \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m新闻\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01min\u001B[39;00m res[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdataset\u001B[39m\u001B[38;5;124m'\u001B[39m][:\u001B[38;5;241m5\u001B[39m]] \u001B[38;5;241m+\u001B[39m\n\u001B[0;32m    545\u001B[0m     [\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mres[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mmodel\u001B[39m\u001B[38;5;124m'\u001B[39m]\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m电影\u001B[39m\u001B[38;5;124m\"\u001B[39m \u001B[38;5;28;01mfor\u001B[39;00m res \u001B[38;5;129;01min\u001B[39;00m results \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m电影\u001B[39m\u001B[38;5;124m'\u001B[39m \u001B[38;5;129;01min\u001B[39;00m res[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mdataset\u001B[39m\u001B[38;5;124m'\u001B[39m][:\u001B[38;5;241m5\u001B[39m]],\n\u001B[0;32m    546\u001B[0m     [res[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mtest_accuracy\u001B[39m\u001B[38;5;124m'\u001B[39m] \u001B[38;5;28;01mfor\u001B[39;00m res \u001B[38;5;129;01min\u001B[39;00m results],\n\u001B[0;32m    547\u001B[0m     width\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m0.4\u001B[39m,\n\u001B[0;32m    548\u001B[0m     color\u001B[38;5;241m=\u001B[39m[\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m#1f77b4\u001B[39m\u001B[38;5;124m'\u001B[39m, \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m#ff7f0e\u001B[39m\u001B[38;5;124m'\u001B[39m, \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m#2ca02c\u001B[39m\u001B[38;5;124m'\u001B[39m, \u001B[38;5;124m'\u001B[39m\u001B[38;5;124m#d62728\u001B[39m\u001B[38;5;124m'\u001B[39m]\n\u001B[0;32m    549\u001B[0m )\n\u001B[0;32m    550\u001B[0m plt\u001B[38;5;241m.\u001B[39mtitle(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m模型准确率对比\u001B[39m\u001B[38;5;124m'\u001B[39m)\n\u001B[0;32m    551\u001B[0m plt\u001B[38;5;241m.\u001B[39mylabel(\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m准确率\u001B[39m\u001B[38;5;124m'\u001B[39m)\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\matplotlib\\pyplot.py:2754\u001B[0m, in \u001B[0;36mbar\u001B[1;34m(x, height, width, bottom, align, data, **kwargs)\u001B[0m\n\u001B[0;32m   2743\u001B[0m \u001B[38;5;129m@_copy_docstring_and_deprecators\u001B[39m(Axes\u001B[38;5;241m.\u001B[39mbar)\n\u001B[0;32m   2744\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21mbar\u001B[39m(\n\u001B[0;32m   2745\u001B[0m     x: \u001B[38;5;28mfloat\u001B[39m \u001B[38;5;241m|\u001B[39m ArrayLike,\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m   2752\u001B[0m     \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs,\n\u001B[0;32m   2753\u001B[0m ) \u001B[38;5;241m-\u001B[39m\u001B[38;5;241m>\u001B[39m BarContainer:\n\u001B[1;32m-> 2754\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m gca()\u001B[38;5;241m.\u001B[39mbar(\n\u001B[0;32m   2755\u001B[0m         x,\n\u001B[0;32m   2756\u001B[0m         height,\n\u001B[0;32m   2757\u001B[0m         width\u001B[38;5;241m=\u001B[39mwidth,\n\u001B[0;32m   2758\u001B[0m         bottom\u001B[38;5;241m=\u001B[39mbottom,\n\u001B[0;32m   2759\u001B[0m         align\u001B[38;5;241m=\u001B[39malign,\n\u001B[0;32m   2760\u001B[0m         \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39m({\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mdata\u001B[39m\u001B[38;5;124m\"\u001B[39m: data} \u001B[38;5;28;01mif\u001B[39;00m data \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m \u001B[38;5;28;01melse\u001B[39;00m {}),\n\u001B[0;32m   2761\u001B[0m         \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs,\n\u001B[0;32m   2762\u001B[0m     )\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\matplotlib\\__init__.py:1465\u001B[0m, in \u001B[0;36m_preprocess_data.<locals>.inner\u001B[1;34m(ax, data, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1462\u001B[0m \u001B[38;5;129m@functools\u001B[39m\u001B[38;5;241m.\u001B[39mwraps(func)\n\u001B[0;32m   1463\u001B[0m \u001B[38;5;28;01mdef\u001B[39;00m \u001B[38;5;21minner\u001B[39m(ax, \u001B[38;5;241m*\u001B[39margs, data\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mNone\u001B[39;00m, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs):\n\u001B[0;32m   1464\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m data \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[1;32m-> 1465\u001B[0m         \u001B[38;5;28;01mreturn\u001B[39;00m func(ax, \u001B[38;5;241m*\u001B[39m\u001B[38;5;28mmap\u001B[39m(sanitize_sequence, args), \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n\u001B[0;32m   1467\u001B[0m     bound \u001B[38;5;241m=\u001B[39m new_sig\u001B[38;5;241m.\u001B[39mbind(ax, \u001B[38;5;241m*\u001B[39margs, \u001B[38;5;241m*\u001B[39m\u001B[38;5;241m*\u001B[39mkwargs)\n\u001B[0;32m   1468\u001B[0m     auto_label \u001B[38;5;241m=\u001B[39m (bound\u001B[38;5;241m.\u001B[39marguments\u001B[38;5;241m.\u001B[39mget(label_namer)\n\u001B[0;32m   1469\u001B[0m                   \u001B[38;5;129;01mor\u001B[39;00m bound\u001B[38;5;241m.\u001B[39mkwargs\u001B[38;5;241m.\u001B[39mget(label_namer))\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\matplotlib\\axes\\_axes.py:2461\u001B[0m, in \u001B[0;36mAxes.bar\u001B[1;34m(self, x, height, width, bottom, align, **kwargs)\u001B[0m\n\u001B[0;32m   2458\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m yerr \u001B[38;5;129;01mis\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m \u001B[38;5;28;01mNone\u001B[39;00m:\n\u001B[0;32m   2459\u001B[0m         yerr \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_convert_dx(yerr, y0, y, \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mconvert_yunits)\n\u001B[1;32m-> 2461\u001B[0m x, height, width, y, linewidth, hatch \u001B[38;5;241m=\u001B[39m np\u001B[38;5;241m.\u001B[39mbroadcast_arrays(\n\u001B[0;32m   2462\u001B[0m     \u001B[38;5;66;03m# Make args iterable too.\u001B[39;00m\n\u001B[0;32m   2463\u001B[0m     np\u001B[38;5;241m.\u001B[39matleast_1d(x), height, width, y, linewidth, hatch)\n\u001B[0;32m   2465\u001B[0m \u001B[38;5;66;03m# Now that units have been converted, set the tick locations.\u001B[39;00m\n\u001B[0;32m   2466\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m orientation \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mvertical\u001B[39m\u001B[38;5;124m'\u001B[39m:\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\numpy\\lib\\stride_tricks.py:540\u001B[0m, in \u001B[0;36mbroadcast_arrays\u001B[1;34m(subok, *args)\u001B[0m\n\u001B[0;32m    533\u001B[0m \u001B[38;5;66;03m# nditer is not used here to avoid the limit of 32 arrays.\u001B[39;00m\n\u001B[0;32m    534\u001B[0m \u001B[38;5;66;03m# Otherwise, something like the following one-liner would suffice:\u001B[39;00m\n\u001B[0;32m    535\u001B[0m \u001B[38;5;66;03m# return np.nditer(args, flags=['multi_index', 'zerosize_ok'],\u001B[39;00m\n\u001B[0;32m    536\u001B[0m \u001B[38;5;66;03m#                  order='C').itviews\u001B[39;00m\n\u001B[0;32m    538\u001B[0m args \u001B[38;5;241m=\u001B[39m [np\u001B[38;5;241m.\u001B[39marray(_m, copy\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mFalse\u001B[39;00m, subok\u001B[38;5;241m=\u001B[39msubok) \u001B[38;5;28;01mfor\u001B[39;00m _m \u001B[38;5;129;01min\u001B[39;00m args]\n\u001B[1;32m--> 540\u001B[0m shape \u001B[38;5;241m=\u001B[39m _broadcast_shape(\u001B[38;5;241m*\u001B[39margs)\n\u001B[0;32m    542\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mall\u001B[39m(array\u001B[38;5;241m.\u001B[39mshape \u001B[38;5;241m==\u001B[39m shape \u001B[38;5;28;01mfor\u001B[39;00m array \u001B[38;5;129;01min\u001B[39;00m args):\n\u001B[0;32m    543\u001B[0m     \u001B[38;5;66;03m# Common case where nothing needs to be broadcasted.\u001B[39;00m\n\u001B[0;32m    544\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m args\n",
      "File \u001B[1;32mD:\\python\\Lib\\site-packages\\numpy\\lib\\stride_tricks.py:422\u001B[0m, in \u001B[0;36m_broadcast_shape\u001B[1;34m(*args)\u001B[0m\n\u001B[0;32m    417\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"Returns the shape of the arrays that would result from broadcasting the\u001B[39;00m\n\u001B[0;32m    418\u001B[0m \u001B[38;5;124;03msupplied arrays against each other.\u001B[39;00m\n\u001B[0;32m    419\u001B[0m \u001B[38;5;124;03m\"\"\"\u001B[39;00m\n\u001B[0;32m    420\u001B[0m \u001B[38;5;66;03m# use the old-iterator because np.nditer does not handle size 0 arrays\u001B[39;00m\n\u001B[0;32m    421\u001B[0m \u001B[38;5;66;03m# consistently\u001B[39;00m\n\u001B[1;32m--> 422\u001B[0m b \u001B[38;5;241m=\u001B[39m np\u001B[38;5;241m.\u001B[39mbroadcast(\u001B[38;5;241m*\u001B[39margs[:\u001B[38;5;241m32\u001B[39m])\n\u001B[0;32m    423\u001B[0m \u001B[38;5;66;03m# unfortunately, it cannot handle 32 or more arguments directly\u001B[39;00m\n\u001B[0;32m    424\u001B[0m \u001B[38;5;28;01mfor\u001B[39;00m pos \u001B[38;5;129;01min\u001B[39;00m \u001B[38;5;28mrange\u001B[39m(\u001B[38;5;241m32\u001B[39m, \u001B[38;5;28mlen\u001B[39m(args), \u001B[38;5;241m31\u001B[39m):\n\u001B[0;32m    425\u001B[0m     \u001B[38;5;66;03m# ironically, np.broadcast does not properly handle np.broadcast\u001B[39;00m\n\u001B[0;32m    426\u001B[0m     \u001B[38;5;66;03m# objects (it treats them as scalars)\u001B[39;00m\n\u001B[0;32m    427\u001B[0m     \u001B[38;5;66;03m# use broadcasting to avoid allocating the full array\u001B[39;00m\n",
      "\u001B[1;31mValueError\u001B[0m: shape mismatch: objects cannot be broadcast to a single shape.  Mismatch is between arg 0 with shape (0,) and arg 1 with shape (4,)."
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n",
      "findfont: Font family 'WenQuanYi Micro Hei' not found.\n",
      "findfont: Font family 'Heiti TC' not found.\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "<Figure size 1200x800 with 1 Axes>"
      ],
      "image/png": "iVBORw0KGgoAAAANSUhEUgAAA98AAAFGCAYAAABzBSwSAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjguNCwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8fJSN1AAAACXBIWXMAAA9hAAAPYQGoP6dpAAAc70lEQVR4nO3df2jdd7348Vdy0uRGw3ItHZna3Vwms9iYlsZlqyGOWapQy6qYCf7CyXCSbYqYIlIVZ4ibZGK9ouxCKA3XVbTSdmE/lHZ3He0Qs62F20W6NiNs01jWOVNqTzA97en5/iEN9lu7nnOSd9L0PB7QPz6HzzvnFXhl23Pn5LSqUCgUAgAAAEimer4HAAAAgKud+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDESo7vEydOxJo1a2J8fLyo+59//vlYt25d3HLLLTE4OFjygAAAALDQlRTfExMT0d3dHX/+85+Lvv+ee+6J9evXx/bt2+Pxxx+P4eHhsgYFAACAhaqmlJt7enriYx/7WPzf//1fUfc/9thjce2118Z9990XVVVVce+998aOHTti9erVRT/nuXPn4uzZs1FdXR1VVVWljAsAAAAlKxQKce7cuaipqYnq6tn5be2S4ruvry+uv/76ePDBB4u6/+jRo7F69erpaF6xYkVs3ry5pAHPnj0bIyMjJZ0BAACAmWptbY3a2tpZ+Volxff1119f0hfPZrPxnve8Z/q6oaEhjh8/XtLXOP9/GZYtWzZr3zRcafL5fBw+fDiWL18emUxmvseBJOw5lcCeUwnsOZUgl8vF0aNHZ+1V74gS47tUmUzmgmCuq6uLqampkr7G+VfNa2trxTdXrXw+HxH/2HP/EuNqZc+pBPacSmDPqSSz+avPSf+qscbGxpiYmJi+npycjEWLFqV8SgAAALjiJI3v1tbWOHTo0PT1Sy+9FE1NTSmfEgAAAK44sxLf2Ww2zpw5c9Hja9asiYMHD8bw8HCcPXs2tm7dGp2dnbPxlAAAALBgzEp8b9iwIfbt23fR44sXL45vfvOb8aUvfSk6Ozvj5ZdfjnvuuWc2nhIAAAAWjLI+cO3o0aMXXO/du/eS937uc5+Lzs7OGBsbi5tvvjkaGhrKeUoAAABYsJJ+2vl5zc3N0dzcPBdPBQAAAFecpB+4BgAAAIhvAAAASE58AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIrKb5HR0ejq6sr2tvbo7+/PwqFwmXPbNmyJTo6OqKtrS2++tWvxokTJ8oeFgAAABaiouM7l8tFd3d3tLS0xM6dO2NsbCx27dr1lmdeeOGFGBoaim3btsWjjz4ap0+fjv7+/hkPDQAAAAtJTbE37t+/P7LZbGzatCnq6+ujp6cnent7o6ur65JnXnzxxbj11lvjhhtuiIiI9evXxy9/+cuyBs3n85HP58s6C1e687ttx7ma2XMqgT2nEthzKkGK/S46vo8cORIrV66M+vr6iIhYtmxZjI2NveWZG2+8MX71q1/Fpz/96Xj7298eO3bsiI6OjrIGPXz4cFnnYCEZGRmZ7xEgOXtOJbDnVAJ7DqUpOr6z2WwsXbp0+rqqqiqqq6vj5MmT0djY+C/P3HrrrdHc3Bwf+chHIiKitbU1vvzlL5c16PLly6O2trass3Cly+fzMTIyEq2trZHJZOZ7HEjCnlMJ7DmVwJ5TCXK53Ky/AFx0fGcymYvit66uLqampi4Z37/5zW/i2LFj8dvf/jYWL14c/f398Y1vfCN++tOfljxoJpPxw81Vz55TCew5lcCeUwnsOVezFLtddHw3NjbGyy+/fMFjk5OTsWjRokueefLJJ+Mzn/nM9O98f/vb344PfOAD8be//S2uueaaMkcGAACAhaXoTztvbW2NQ4cOTV+Pj49HLpe75KveEf94S8qbb745ff3GG29MPw4AAACVouhXvtvb2+PUqVMxNDQUn/jEJ2JgYCA6Ojoik8lENpuNurq6i14Fb2tri8HBwbjuuuvi3/7t3+J//ud/YtWqVfGOd7xj1r8RAAAAuFIVHd81NTXR19cXGzdujIceeijy+Xxs27YtIiI2bNgQ3/rWt2Lt2rUXnLnzzjvjjTfeiIcffjhOnDgRq1atigceeGB2vwMAAAC4whUd3xERa9eujT179sTIyEi0tbXF4sWLIyJi7969//L+urq6+M53vhPf+c53Zj4pAAAALFAlxXdERFNTUzQ1NaWYBQAAAK5KRX/gGgAAAFAe8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYiXF9+joaHR1dUV7e3v09/dHoVAo+uzXv/716OvrK3lAAAAAWOiKju9cLhfd3d3R0tISO3fujLGxsdi1a1dRZ5999tkYHh6Or33ta2UPCgAAAAtVTbE37t+/P7LZbGzatCnq6+ujp6cnent7o6ur6y3PTU1NRW9vb2zcuDGuueaasgfN5/ORz+fLPg9XsvO7bce5mtlzKoE9pxLYcypBiv0uOr6PHDkSK1eujPr6+oiIWLZsWYyNjV323MMPPxxTU1NRU1MTv//972P16tVRVVVV8qCHDx8u+QwsNCMjI/M9AiRnz6kE9pxKYM+hNEXHdzabjaVLl05fV1VVRXV1dZw8eTIaGxv/5Zljx47F4OBgrFixIo4dOxY///nP453vfGf87Gc/KznAly9fHrW1tSWdgYUin8/HyMhItLa2RiaTme9xIAl7TiWw51QCe04lyOVys/4CcNHxnclkLorfurq6mJqaumR879q1K5YsWRKDg4NRW1sbX/jCF2LNmjXxu9/9Ljo7O0saNJPJ+OHmqmfPqQT2nEpgz6kE9pyrWYrdLvoD1xobG2NiYuKCxyYnJ2PRokWXPHP8+PFYvXr1dLQ3NDREc3NzjI+PlzkuAAAALDxFx3dra2scOnRo+np8fDxyudwlX/WOiLjuuuvi9OnT09fnzp2L119/Pd71rneVOS4AAAAsPEXHd3t7e5w6dSqGhoYiImJgYCA6Ojoik8lENpuNM2fOXHRm3bp18cwzz8Tu3bvj9ddfjx/96EeRy+Wira1t1r4BAAAAuNIVHd81NTXR19cX999/f3R0dMTu3btj48aNERGxYcOG2Ldv30Vnbrjhhvjxj38c//3f/x0f/ehHY9++ffHwww9HQ0PD7H0HAAAAcIUr+gPXIiLWrl0be/bsiZGRkWhra4vFixdHRMTevXsveea2226L2267bUZDAgAAwEJWUnxHRDQ1NUVTU1OKWQAAAOCqVPTbzgEAAIDyiG8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEispvkdHR6Orqyva29ujv78/CoVC0WfPnDkTt99+ezz33HMlDwkAAAALWdHxncvloru7O1paWmLnzp0xNjYWu3btKvqJtmzZEqOjo2UNCQAAAAtZTbE37t+/P7LZbGzatCnq6+ujp6cnent7o6ur67JnX3311di6dWu8+93vLnvQfD4f+Xy+7PNwJTu/23acq5k9pxLYcyqBPacSpNjvouP7yJEjsXLlyqivr4+IiGXLlsXY2FhRZ7/73e/G3XffHc8++2x5U0bE4cOHyz4LC8XIyMh8jwDJ2XMqgT2nEthzKE3R8Z3NZmPp0qXT11VVVVFdXR0nT56MxsbGS57buXNnZLPZuOuuu2YU38uXL4/a2tqyz8OVLJ/Px8jISLS2tkYmk5nvcSAJe04lsOdUAntOJcjlcrP+AnDR8Z3JZC6K37q6upiamrpkfE9MTMTmzZtjy5YtUVNT9FNd8vn9cHO1s+dUAntOJbDnVAJ7ztUsxW4X/YFrjY2NMTExccFjk5OTsWjRokueeeCBB+KOO+6I973vfeVPCAAAAAtc0fHd2toahw4dmr4eHx+PXC73lm85f+KJJ+KRRx6Jm266KW666aY4ePBgdHd3x8DAwMymBgAAgAWk6PeCt7e3x6lTp2JoaCg+8YlPxMDAQHR0dEQmk4lsNht1dXUXvQr+9NNPX3Dd09MTd955Z3zoQx+anekBAABgASj6le+ampro6+uL+++/Pzo6OmL37t2xcePGiIjYsGFD7Nu376IzS5cuveBPXV1dLFmyJK655prZ+w4AAADgClfSp6CtXbs29uzZEyMjI9HW1haLFy+OiIi9e/cWdf6RRx4pfUIAAABY4Er+CPKmpqZoampKMQsAAABclYp+2zkAAABQHvENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGLiGwAAABIT3wAAAJCY+AYAAIDExDcAAAAkJr4BAAAgMfENAAAAiYlvAAAASEx8AwAAQGIlxffo6Gh0dXVFe3t79Pf3R6FQuOyZ7du3R2dnZ7S0tMRdd90Vb7zxRtnDAgAAwEJUdHzncrno7u6OlpaW2LlzZ4yNjcWuXbve8syBAwfiJz/5STz00EPx9NNPx+nTp6O/v3/GQwMAAMBCUlPsjfv3749sNhubNm2K+vr66Onpid7e3ujq6rrkmVdeeSW+973vRUdHR0REfPKTn4yBgYGyBs3n85HP58s6C1e687ttx7ma2XMqgT2nEthzKkGK/S46vo8cORIrV66M+vr6iIhYtmxZjI2NveWZT33qUxdcv/LKK9Hc3FzGmBGHDx8u6xwsJCMjI/M9AiRnz6kE9pxKYM+hNEXHdzabjaVLl05fV1VVRXV1dZw8eTIaGxsve/7EiROxffv2+OEPf1jWoMuXL4/a2tqyzsKVLp/Px8jISLS2tkYmk5nvcSAJe04lsOdUAntOJcjlcrP+AnDR8Z3JZC6K37q6upiamioqvnt7e2PVqlVx2223lTzk+ef3w83Vzp5TCew5lcCeUwnsOVezFLtddHw3NjbGyy+/fMFjk5OTsWjRosue3bFjRxw4cCCGhoZKHhAAAAAWuqI/7by1tTUOHTo0fT0+Ph65XO6yr3q/+OKL8eCDD8bmzZtjyZIl5U8KAAAAC1TR8d3e3h6nTp2afvV6YGAgOjo6IpPJRDabjTNnzlx05s0334zu7u64++67o6WlJSYnJ2NycnLWhgcAAICFoOj4rqmpib6+vrj//vujo6Mjdu/eHRs3boyIiA0bNsS+ffsuOvPEE0/EX//61/iv//qvaGtrm/4DAAAAlaTo3/mOiFi7dm3s2bMnRkZGoq2tLRYvXhwREXv37v2X93/xi1+ML37xizMeEgAAABaykuI7IqKpqSmamppSzAIAAABXpaLfdg4AAACUR3wDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkJj4BgAAgMTENwAAACQmvgEAACAx8Q0AAACJiW8AAABITHwDAABAYuIbAAAAEhPfAAAAkFhJ8T06OhpdXV3R3t4e/f39USgULnvm+eefj3Xr1sUtt9wSg4ODZQ8KAAAAC1XR8Z3L5aK7uztaWlpi586dMTY2Frt27XrLMxMTE3HPPffE+vXrY/v27fH444/H8PDwjIcGAACAhaSm2Bv3798f2Ww2Nm3aFPX19dHT0xO9vb3R1dV1yTOPPfZYXHvttXHfffdFVVVV3HvvvbFjx45YvXp10QOef3U9l8sVfQYWmnw+HxH/2PNMJjPP00Aa9pxKYM+pBPacSnC+P4t5t3exio7vI0eOxMqVK6O+vj4iIpYtWxZjY2Nveebo0aOxevXqqKqqioiIFStWxObNm0sa8Ny5c9NfC652hw8fnu8RIDl7TiWw51QCe04lON+js6Ho+M5ms7F06dLp66qqqqiuro6TJ09GY2PjJc+85z3vmb5uaGiI48ePlzZgTU20trZGdXX1dMQDAABAKoVCIc6dOxc1NUUn82UV/ZUymUzU1tZe8FhdXV1MTU1dMr7//zPn7y9FdXX1Rc8LAAAAC0nRH7jW2NgYExMTFzw2OTkZixYtKvrM5e4HAACAq1HR8d3a2hqHDh2avh4fH49cLnfJV73/1ZmXXnopmpqayhwVAAAAFqai47u9vT1OnToVQ0NDERExMDAQHR0dkclkIpvNxpkzZy46s2bNmjh48GAMDw/H2bNnY+vWrdHZ2TlrwwMAAMBCUFUo4bPT//d//zc2btwYb3/72yOfz8e2bdvixhtvjDVr1sS3vvWtWLt27UVnfvGLX8QPfvCDaGhoiLe97W3x61//OpYsWTKr3wQAAABcyUqK74iI48ePx8jISLS1tcXixYuLOvPaa6/F2NhY3HzzzdHQ0FDWoAAAALBQlRzfAAAAQGmK/p1vAAAAoDziGwAAABKb9/geHR2Nrq6uaG9vj/7+/ijmXfDPP/98rFu3Lm655ZYYHBycgylhZsrZ8+3bt0dnZ2e0tLTEXXfdFW+88cYcTArlK2fPzztz5kzcfvvt8dxzzyWcEGZuJnv+9a9/Pfr6+hJOB7OjnD3fsmVLdHR0RFtbW3z1q1+NEydOzMGkUL4TJ07EmjVrYnx8vKj7Z6NB5zW+c7lcdHd3R0tLS+zcuTPGxsZi165db3lmYmIi7rnnnli/fn1s3749Hn/88RgeHp6jiaF05ez5gQMH4ic/+Uk89NBD8fTTT8fp06ejv79/jiaG0pWz5/9sy5YtMTo6mnBCmLmZ7Pmzzz4bw8PD8bWvfS3xlDAz5ez5Cy+8EENDQ7Ft27Z49NFH/XcLV7yJiYno7u6OP//5z0XfPxsNOq/xvX///shms7Fp06b4j//4j+jp6YkdO3a85ZnHHnssrr322rjvvvviP//zP+Pee++97BmYT+Xs+SuvvBLf+973oqOjI6677rr45Cc/GX/4wx/maGIoXTl7ft6rr74aW7dujXe/+92Jp4SZKXfPp6amore3NzZu3BjXXHPNHEwK5Stnz1988cW49dZb44Ybbojm5uZYv359vPrqq3MzMJShp6cnPvaxjxV9/2w16LzG95EjR2LlypVRX18fERHLli2LsbGxtzxz9OjRWL16dVRVVUVExIoVK+Lw4cPJZ4VylbPnn/rUp+KjH/3o9PUrr7wSzc3NSeeEmShnz8/77ne/G3fffbf45opX7p4//PDDMTU1FTU1NfH73/++pLeqw1wrZ89vvPHGeOqpp+KPf/xj/PWvf40dO3ZER0fHXIwLZenr64s777yz6Ptnq0HnNb6z2WwsXbp0+rqqqiqqq6vj5MmTRZ9paGiI48ePJ50TZqKcPf9nJ06ciO3bt8dnP/vZVCPCjJW75zt37oxsNht33XVX6hFhxsrZ82PHjsXg4GA0NzfHsWPH4oc//GF85StfEeBcscrZ81tvvTWam5vjIx/5SHR0dMTf//73+PKXvzwX40JZrr/++pLun60Gndf4zmQyUVtbe8FjdXV1MTU1VfSZy90P862cPf9nvb29sWrVqrjtttsSTAezo5w9n5iYiM2bN8cDDzwQNTU1qUeEGStnz3ft2hVLliyJwcHBuPfee+PnP/95vPDCC/G73/0u9bhQlnL2/De/+U0cO3Ysfvvb38Zzzz0XN954Y3zjG99IPSrMmdlq0HmN78bGxpiYmLjgscnJyVi0aFHRZy53P8y3cvb8vB07dsSBAwfiwQcfTDUezIpy9vyBBx6IO+64I973vvelHg9mRTl7fvz48Vi9evX0f7Q1NDREc3Nz0Z+uC3OtnD1/8skn4zOf+UzccMMN8e///u/x7W9/O/bs2RN/+9vfUo8Lc2K2GnRe47u1tTUOHTo0fT0+Ph65XC4aGxuLPvPSSy9FU1NT0jlhJsrZ84h/fHjJgw8+GJs3b44lS5akHhNmpJw9f+KJJ+KRRx6Jm266KW666aY4ePBgdHd3x8DAwFyMDCUrZ8+vu+66OH369PT1uXPn4vXXX493vetdSWeFcpWz5/l8Pt58883p6/N/PWo+n083KMyh2WrQeY3v9vb2OHXqVAwNDUVExMDAQHR0dEQmk4lsNhtnzpy56MyaNWvi4MGDMTw8HGfPno2tW7dGZ2fnHE8OxStnz998883o7u6Ou+++O1paWmJycjImJyfneHIoXjl7/vTTT8djjz0WQ0NDMTQ0FO9///vj+9//fnz605+e4+mhOOXs+bp16+KZZ56J3bt3x+uvvx4/+tGPIpfLRVtb2xxPD8UpZ8/b2tri17/+dfzyl7+MRx99NHp6emLVqlXxjne8Y46nh5lJ3qCFefbUU08VVqxYUfjgBz9YuPnmmwujo6OFQqFQ+PCHP1x46qmn/uWZbdu2FVpaWgq33HJL4cMf/nDhL3/5y1yODCUrdc8HBwcL733vey/6A1eycv55/s8+//nPF4aHh1OPCTNSzp4/88wzhY9//OOF1tbWwvr16wsHDhyYy5GhZKXu+dTUVKGvr6/Q2dlZaGlpKXz+858vvPbaa3M9NpTsve99b+FPf/rT9HXqBq0qFOb/4zaPHz8eIyMj0dbWFosXLy7qzGuvvRZjY2Nx8803R0NDQ+IJYebK2XNYaOw5lcCeUwnsOVxspg16RcQ3AAAAXM3m9Xe+AQAAoBKIbwAAAEhMfAMAAEBi4hsAAAASE98AAACQmPgGAACAxMQ3AAAAJCa+AQAAIDHxDQAAAIn9P9gdyK289rmYAAAAAElFTkSuQmCC"
     },
     "metadata": {},
     "output_type": "display_data"
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {
    "jupyter": {
     "is_executing": true
    },
    "ExecuteTime": {
     "start_time": "2025-06-12T01:14:13.588887Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.metrics import accuracy_score, classification_report\n",
    "from tqdm import tqdm\n",
    "import matplotlib.pyplot as plt\n",
    "from datetime import datetime\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from torch.nn.utils.rnn import pad_sequence\n",
    "from sklearn.model_selection import train_test_split\n",
    "\n",
    "# ------------------------------ 数据处理函数 ------------------------------\n",
    "def load_class_labels(class_path):\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for idx, line in enumerate(f):\n",
    "                label_map[idx] = line.strip()\n",
    "    return label_map or {idx: f\"类别{idx}\" for idx in sorted(set(label_map.keys()))}\n",
    "\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    texts, labels = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in tqdm(f, desc=f\"加载 {file_path}\"):\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "            parts = line.rsplit('\\t', 1)\n",
    "            if len(parts) == 2:\n",
    "                texts.append(parts[0])\n",
    "                labels.append(int(parts[1]))\n",
    "    return texts, labels\n",
    "\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        words = jieba.cut(text, HMM=True)\n",
    "        filtered = [word for word in words if word not in stopwords and len(word) >= 2]\n",
    "        # 确保每个文本至少有一个词（处理空文本）\n",
    "        processed.append(filtered if filtered else ['<PAD>'])\n",
    "    return processed\n",
    "\n",
    "\n",
    "def load_imdb_data(data_path):\n",
    "    train_texts, train_labels = [], []\n",
    "    test_texts, test_labels = [], []\n",
    "\n",
    "    # 检查根目录是否存在\n",
    "    if not os.path.exists(data_path):\n",
    "        raise FileNotFoundError(f\"数据集根目录不存在: {data_path}\")\n",
    "\n",
    "    for label in ['pos', 'neg']:\n",
    "        for split in ['train', 'test']:\n",
    "            path = os.path.join(data_path, split, label)\n",
    "            # 检查子目录是否存在\n",
    "            if not os.path.exists(path):\n",
    "                print(f\"警告: 路径不存在 - {path}\")\n",
    "                continue\n",
    "\n",
    "            for file_name in os.listdir(path):\n",
    "                if file_name.endswith('.txt'):\n",
    "                    with open(os.path.join(path, file_name), 'r', encoding='utf-8') as file:\n",
    "                        text = file.read()\n",
    "                        if split == 'train':\n",
    "                            train_texts.append(text)\n",
    "                            train_labels.append(1 if label == 'pos' else 0)\n",
    "                        else:\n",
    "                            test_texts.append(text)\n",
    "                            test_labels.append(1 if label == 'pos' else 0)\n",
    "    return train_texts, train_labels, test_texts, test_labels\n",
    "\n",
    "\n",
    "# ------------------------------ 自定义数据集类 ------------------------------\n",
    "class TextDataset(Dataset):\n",
    "    def __init__(self, texts, labels, vocab, max_length=100):\n",
    "        self.texts = texts\n",
    "        self.labels = labels\n",
    "        self.vocab = vocab\n",
    "        self.max_length = max_length\n",
    "        self.pad_idx = vocab.get('<PAD>', 0)\n",
    "        self.unk_idx = vocab.get('<UNK>', 1)\n",
    "        \n",
    "        # 过滤掉处理后为空的文本\n",
    "        valid_indices = []\n",
    "        for i, text in enumerate(texts):\n",
    "            if text:  # 确保文本不为空列表\n",
    "                valid_indices.append(i)\n",
    "        \n",
    "        self.texts = [texts[i] for i in valid_indices]\n",
    "        self.labels = [labels[i] for i in valid_indices]\n",
    "        \n",
    "        print(f\"数据集大小: {len(self.texts)} (过滤前: {len(texts)})\")\n",
    "        \n",
    "    def __len__(self):\n",
    "        return len(self.texts)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        text = self.texts[idx]\n",
    "        label = self.labels[idx]\n",
    "        \n",
    "        # 将文本转换为索引序列，确保至少有一个词\n",
    "        indices = [self.vocab.get(word, self.unk_idx) for word in text[:self.max_length]]\n",
    "        \n",
    "        # 处理空文本\n",
    "        if not indices:\n",
    "            indices = [self.pad_idx]\n",
    "            \n",
    "        return torch.tensor(indices), torch.tensor(label)\n",
    "\n",
    "\n",
    "# ------------------------------ 模型定义 ------------------------------\n",
    "class CNNClassifier(nn.Module):\n",
    "    def __init__(self, vocab_size, embed_dim, num_classes, num_filters, filter_sizes, dropout=0.5):\n",
    "        super(CNNClassifier, self).__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n",
    "        \n",
    "        # 多个卷积层，每个使用不同大小的滤波器\n",
    "        self.convs = nn.ModuleList([\n",
    "            nn.Conv1d(embed_dim, num_filters, fs) for fs in filter_sizes\n",
    "        ])\n",
    "        \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        self.fc = nn.Linear(len(filter_sizes) * num_filters, num_classes)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        # x: [batch_size, seq_len]\n",
    "        embedded = self.embedding(x)  # [batch_size, seq_len, embed_dim]\n",
    "        embedded = embedded.permute(0, 2, 1)  # [batch_size, embed_dim, seq_len]\n",
    "        \n",
    "        # 应用卷积和池化\n",
    "        pooled_outputs = []\n",
    "        for conv in self.convs:\n",
    "            conv_out = nn.functional.relu(conv(embedded))  # [batch_size, num_filters, seq_len-fs+1]\n",
    "            pooled = nn.functional.max_pool1d(conv_out, conv_out.shape[2])  # [batch_size, num_filters, 1]\n",
    "            pooled = pooled.squeeze(2)  # [batch_size, num_filters]\n",
    "            pooled_outputs.append(pooled)\n",
    "        \n",
    "        # 合并所有卷积层的输出\n",
    "        cat = self.dropout(torch.cat(pooled_outputs, dim=1))  # [batch_size, num_filters * len(filter_sizes)]\n",
    "        return self.fc(cat)\n",
    "\n",
    "\n",
    "class RNNClassifier(nn.Module):\n",
    "    def __init__(self, vocab_size, embed_dim, hidden_dim, num_classes, num_layers=1, bidirectional=True, dropout=0.5):\n",
    "        super(RNNClassifier, self).__init__()\n",
    "        self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)\n",
    "        \n",
    "        self.rnn = nn.LSTM(\n",
    "            embed_dim, \n",
    "            hidden_dim, \n",
    "            num_layers=num_layers,\n",
    "            bidirectional=bidirectional,\n",
    "            batch_first=True,\n",
    "            dropout=dropout if num_layers > 1 else 0\n",
    "        )\n",
    "        \n",
    "        # 计算最终的特征维度\n",
    "        self.fc_input_dim = hidden_dim * 2 if bidirectional else hidden_dim\n",
    "        self.fc = nn.Linear(self.fc_input_dim, num_classes)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        \n",
    "    def forward(self, x):\n",
    "        # x: [batch_size, seq_len]\n",
    "        embedded = self.embedding(x)  # [batch_size, seq_len, embed_dim]\n",
    "        \n",
    "        # 前向传播RNN\n",
    "        outputs, (hidden, cell) = self.rnn(embedded)\n",
    "        \n",
    "        # 取最后一个时间步的隐藏状态\n",
    "        if self.rnn.bidirectional:\n",
    "            # 双向RNN，合并两个方向的最后隐藏状态\n",
    "            hidden = torch.cat((hidden[-2,:,:], hidden[-1,:,:]), dim=1)\n",
    "        else:\n",
    "            hidden = hidden[-1,:,:]\n",
    "            \n",
    "        hidden = self.dropout(hidden)\n",
    "        return self.fc(hidden)\n",
    "\n",
    "\n",
    "# ------------------------------ 模型训练函数 ------------------------------\n",
    "def create_vocab(texts, min_freq=2):\n",
    "    \"\"\"创建词汇表\"\"\"\n",
    "    vocab = {'<PAD>': 0, '<UNK>': 1}\n",
    "    word_freq = {}\n",
    "    \n",
    "    for text in texts:\n",
    "        for word in text:\n",
    "            word_freq[word] = word_freq.get(word, 0) + 1\n",
    "    \n",
    "    # 只保留频率大于等于min_freq的词\n",
    "    for word, freq in word_freq.items():\n",
    "        if freq >= min_freq:\n",
    "            vocab[word] = len(vocab)\n",
    "    \n",
    "    return vocab\n",
    "\n",
    "\n",
    "def collate_fn(batch):\n",
    "    \"\"\"自定义DataLoader的批处理函数，用于填充序列\"\"\"\n",
    "    texts, labels = zip(*batch)\n",
    "    \n",
    "    # 检查是否存在空张量并处理\n",
    "    texts = [t for t in texts if t.numel() > 0]  # 过滤掉空张量\n",
    "    \n",
    "    if not texts:  # 如果所有文本都为空（极罕见情况）\n",
    "        return torch.zeros(1, 1, dtype=torch.long), torch.tensor([0])\n",
    "    \n",
    "    texts = pad_sequence(texts, batch_first=True, padding_value=0)\n",
    "    labels = torch.tensor(labels)\n",
    "    \n",
    "    return texts, labels\n",
    "\n",
    "\n",
    "def train_model(model, train_loader, val_loader, criterion, optimizer, num_epochs=10, device='cpu'):\n",
    "    \"\"\"训练模型\"\"\"\n",
    "    model.to(device)\n",
    "    best_val_acc = 0.0\n",
    "    \n",
    "    for epoch in range(num_epochs):\n",
    "        # 训练阶段\n",
    "        model.train()\n",
    "        train_loss = 0.0\n",
    "        train_correct = 0\n",
    "        \n",
    "        for texts, labels in tqdm(train_loader, desc=f\"Epoch {epoch+1}/{num_epochs} [Train]\"):\n",
    "            texts, labels = texts.to(device), labels.to(device)\n",
    "            \n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(texts)\n",
    "            loss = criterion(outputs, labels)\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            \n",
    "            train_loss += loss.item() * texts.size(0)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "            train_correct += torch.sum(preds == labels.data)\n",
    "        \n",
    "        train_loss = train_loss / len(train_loader.dataset)\n",
    "        train_acc = train_correct.double() / len(train_loader.dataset)\n",
    "        \n",
    "        # 验证阶段\n",
    "        model.eval()\n",
    "        val_loss = 0.0\n",
    "        val_correct = 0\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            for texts, labels in tqdm(val_loader, desc=f\"Epoch {epoch+1}/{num_epochs} [Val]\"):\n",
    "                texts, labels = texts.to(device), labels.to(device)\n",
    "                \n",
    "                outputs = model(texts)\n",
    "                loss = criterion(outputs, labels)\n",
    "                \n",
    "                val_loss += loss.item() * texts.size(0)\n",
    "                _, preds = torch.max(outputs, 1)\n",
    "                val_correct += torch.sum(preds == labels.data)\n",
    "        \n",
    "        val_loss = val_loss / len(val_loader.dataset)\n",
    "        val_acc = val_correct.double() / len(val_loader.dataset)\n",
    "        \n",
    "        print(f\"Epoch {epoch+1}/{num_epochs}\")\n",
    "        print(f\"Train Loss: {train_loss:.4f} Acc: {train_acc:.4f}\")\n",
    "        print(f\"Val Loss: {val_loss:.4f} Acc: {val_acc:.4f}\")\n",
    "        \n",
    "        # 保存最佳模型\n",
    "        if val_acc > best_val_acc:\n",
    "            best_val_acc = val_acc\n",
    "            torch.save(model.state_dict(), 'best_model.pth')\n",
    "    \n",
    "    # 加载最佳模型\n",
    "    model.load_state_dict(torch.load('best_model.pth'))\n",
    "    return model\n",
    "\n",
    "\n",
    "def evaluate_model(model, test_loader, criterion, device='cpu', label_map=None):\n",
    "    \"\"\"评估模型\"\"\"\n",
    "    model.to(device)\n",
    "    model.eval()\n",
    "    \n",
    "    test_loss = 0.0\n",
    "    all_preds = []\n",
    "    all_labels = []\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        for texts, labels in tqdm(test_loader, desc=\"评估模型\"):\n",
    "            texts, labels = texts.to(device), labels.to(device)\n",
    "            \n",
    "            outputs = model(texts)\n",
    "            loss = criterion(outputs, labels)\n",
    "            \n",
    "            test_loss += loss.item() * texts.size(0)\n",
    "            _, preds = torch.max(outputs, 1)\n",
    "            \n",
    "            all_preds.extend(preds.cpu().numpy())\n",
    "            all_labels.extend(labels.cpu().numpy())\n",
    "    \n",
    "    test_loss = test_loss / len(test_loader.dataset)\n",
    "    test_acc = accuracy_score(all_labels, all_preds)\n",
    "    \n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"测试集损失: {test_loss:.4f}\")\n",
    "    \n",
    "    # 打印分类报告\n",
    "    if label_map:\n",
    "        target_names = [label_map[i] for i in range(len(label_map))]\n",
    "        print(\"分类报告:\\n\", classification_report(all_labels, all_preds, target_names=target_names, digits=4))\n",
    "    \n",
    "    return test_acc, test_loss\n",
    "\n",
    "\n",
    "def run_cnn(data, stopwords, label_map):\n",
    "    train_texts, train_labels, test_texts, test_labels = data\n",
    "    classes = list(label_map.values())\n",
    "    num_classes = len(classes)\n",
    "    \n",
    "    # 预处理文本\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 创建词汇表\n",
    "    vocab = create_vocab(train_processed)\n",
    "    vocab_size = len(vocab)\n",
    "    \n",
    "    # 创建数据集\n",
    "    train_dataset = TextDataset(train_processed, train_labels, vocab)\n",
    "    test_dataset = TextDataset(test_processed, test_labels, vocab)\n",
    "    \n",
    "    # 划分训练集和验证集\n",
    "    train_data, val_data = train_test_split(\n",
    "        list(zip(train_processed, train_labels)), \n",
    "        test_size=0.1, \n",
    "        random_state=42,\n",
    "        stratify=train_labels\n",
    "    )\n",
    "    \n",
    "    train_dataset = TextDataset([x[0] for x in train_data], [x[1] for x in train_data], vocab)\n",
    "    val_dataset = TextDataset([x[0] for x in val_data], [x[1] for x in val_data], vocab)\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    batch_size = 64\n",
    "    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n",
    "    val_loader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    test_loader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    \n",
    "    # 初始化CNN模型\n",
    "    embed_dim = 100\n",
    "    num_filters = 100\n",
    "    filter_sizes = [3, 4, 5]  # 不同大小的卷积核\n",
    "    dropout = 0.5\n",
    "    \n",
    "    model = CNNClassifier(\n",
    "        vocab_size=vocab_size,\n",
    "        embed_dim=embed_dim,\n",
    "        num_classes=num_classes,\n",
    "        num_filters=num_filters,\n",
    "        filter_sizes=filter_sizes,\n",
    "        dropout=dropout\n",
    "    )\n",
    "    \n",
    "    # 训练模型\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    print(f\"使用设备: {device}\")\n",
    "    \n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    start_time = datetime.now()\n",
    "    model = train_model(\n",
    "        model=model,\n",
    "        train_loader=train_loader,\n",
    "        val_loader=val_loader,\n",
    "        criterion=criterion,\n",
    "        optimizer=optimizer,\n",
    "        num_epochs=5,  # 减少训练轮数，避免过长时间\n",
    "        device=device\n",
    "    )\n",
    "    train_time = (datetime.now() - start_time).total_seconds()\n",
    "    \n",
    "    # 评估模型\n",
    "    test_acc, _ = evaluate_model(model, test_loader, criterion, device, label_map)\n",
    "    \n",
    "    # 输出结果\n",
    "    print(f\"\\n---------------- CNN模型 ----------------\")\n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "    \n",
    "    return {\n",
    "        \"model\": \"CNN\",\n",
    "        \"dataset\": data[0][0][:5],  # 仅用于标识数据集类型，非实际数据\n",
    "        \"test_accuracy\": test_acc,\n",
    "        \"train_time\": train_time\n",
    "    }\n",
    "\n",
    "\n",
    "def run_rnn(data, stopwords, label_map):\n",
    "    train_texts, train_labels, test_texts, test_labels = data\n",
    "    classes = list(label_map.values())\n",
    "    num_classes = len(classes)\n",
    "    \n",
    "    # 预处理文本\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 创建词汇表\n",
    "    vocab = create_vocab(train_processed)\n",
    "    vocab_size = len(vocab)\n",
    "    \n",
    "    # 创建数据集\n",
    "    train_dataset = TextDataset(train_processed, train_labels, vocab)\n",
    "    test_dataset = TextDataset(test_processed, test_labels, vocab)\n",
    "    \n",
    "    # 划分训练集和验证集\n",
    "    train_data, val_data = train_test_split(\n",
    "        list(zip(train_processed, train_labels)), \n",
    "        test_size=0.1, \n",
    "        random_state=42,\n",
    "        stratify=train_labels\n",
    "    )\n",
    "    \n",
    "    train_dataset = TextDataset([x[0] for x in train_data], [x[1] for x in train_data], vocab)\n",
    "    val_dataset = TextDataset([x[0] for x in val_data], [x[1] for x in val_data], vocab)\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    batch_size = 64\n",
    "    train_loader = DataLoader(train_dataset, batch_size=batch_size, shuffle=True, collate_fn=collate_fn)\n",
    "    val_loader = DataLoader(val_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    test_loader = DataLoader(test_dataset, batch_size=batch_size, collate_fn=collate_fn)\n",
    "    \n",
    "    # 初始化RNN模型\n",
    "    embed_dim = 100\n",
    "    hidden_dim = 128\n",
    "    num_layers = 2\n",
    "    bidirectional = True\n",
    "    dropout = 0.5\n",
    "    \n",
    "    model = RNNClassifier(\n",
    "        vocab_size=vocab_size,\n",
    "        embed_dim=embed_dim,\n",
    "        hidden_dim=hidden_dim,\n",
    "        num_classes=num_classes,\n",
    "        num_layers=num_layers,\n",
    "        bidirectional=bidirectional,\n",
    "        dropout=dropout\n",
    "    )\n",
    "    \n",
    "    # 训练模型\n",
    "    device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "    print(f\"使用设备: {device}\")\n",
    "    \n",
    "    criterion = nn.CrossEntropyLoss()\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    start_time = datetime.now()\n",
    "    model = train_model(\n",
    "        model=model,\n",
    "        train_loader=train_loader,\n",
    "        val_loader=val_loader,\n",
    "        criterion=criterion,\n",
    "        optimizer=optimizer,\n",
    "        num_epochs=5,  # 减少训练轮数，避免过长时间\n",
    "        device=device\n",
    "    )\n",
    "    train_time = (datetime.now() - start_time).total_seconds()\n",
    "    \n",
    "    # 评估模型\n",
    "    test_acc, _ = evaluate_model(model, test_loader, criterion, device, label_map)\n",
    "    \n",
    "    # 输出结果\n",
    "    print(f\"\\n---------------- RNN模型 ----------------\")\n",
    "    print(f\"测试集准确率: {test_acc:.4f}\")\n",
    "    print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "    \n",
    "    return {\n",
    "        \"model\": \"RNN\",\n",
    "        \"dataset\": data[0][0][:5],  # 仅用于标识数据集类型，非实际数据\n",
    "        \"test_accuracy\": test_acc,\n",
    "        \"train_time\": train_time\n",
    "    }\n",
    "\n",
    "\n",
    "# ------------------------------ 主函数 ------------------------------\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 文本分类实验开始...\")\n",
    "\n",
    "    # 设置中文字体\n",
    "    plt.rcParams[\"font.family\"] = [\"SimHei\", \"WenQuanYi Micro Hei\", \"Heiti TC\"]\n",
    "\n",
    "    # 新闻标题数据集配置\n",
    "    THUCNEWS_DATA_DIR = r\"D:\\机器学习\\THUCNews-txt\"\n",
    "    THUCNEWS_CLASS_PATH = os.path.join(THUCNEWS_DATA_DIR, \"class.txt\")\n",
    "    stopwords = {\n",
    "        '的', '了', '在', '是', '我', '有', '和', '就', '不', '人', '都', '一',\n",
    "        '个', '上', '也', '很', '到', '说', '要', '去', '你', '会', '着', '没有'\n",
    "    }\n",
    "\n",
    "    # 加载新闻标题数据集\n",
    "    if os.path.exists(THUCNEWS_DATA_DIR):\n",
    "        thucnews_train_texts, thucnews_train_labels = load_text_dataset(os.path.join(THUCNEWS_DATA_DIR, \"train.txt\"))\n",
    "        thucnews_test_texts, thucnews_test_labels = load_text_dataset(os.path.join(THUCNEWS_DATA_DIR, \"test.txt\"))\n",
    "        thucnews_label_map = load_class_labels(THUCNEWS_CLASS_PATH)\n",
    "        thucnews_data = (thucnews_train_texts, thucnews_train_labels, thucnews_test_texts, thucnews_test_labels)\n",
    "\n",
    "        # 输出新闻标题数据集信息\n",
    "        print(f\"新闻标题训练集样本数：{len(thucnews_train_texts)}\")\n",
    "        print(f\"新闻标题测试集样本数：{len(thucnews_test_texts)}\")\n",
    "\n",
    "        # 运行CNN模型 - 新闻标题数据集\n",
    "        thucnews_cnn_result = run_cnn(thucnews_data, stopwords, thucnews_label_map)\n",
    "\n",
    "        # 运行RNN模型 - 新闻标题数据集\n",
    "        thucnews_rnn_result = run_rnn(thucnews_data, stopwords, thucnews_label_map)\n",
    "    else:\n",
    "        print(f\"警告: 新闻标题数据集路径不存在 - {THUCNEWS_DATA_DIR}\")\n",
    "        thucnews_cnn_result = thucnews_rnn_result = None\n",
    "\n",
    "    # 电影评论数据集配置\n",
    "    IMDB_DATA_DIR = r\"D:\\机器学习\\aclImdb_v1\\aclImdb\"\n",
    "\n",
    "    # 加载电影评论数据集\n",
    "    if os.path.exists(IMDB_DATA_DIR):\n",
    "        imdb_train_texts, imdb_train_labels, imdb_test_texts, imdb_test_labels = load_imdb_data(IMDB_DATA_DIR)\n",
    "        imdb_label_map = {0: 'neg', 1: 'pos'}\n",
    "        imdb_data = (imdb_train_texts, imdb_train_labels, imdb_test_texts, imdb_test_labels)\n",
    "\n",
    "        # 输出电影评论数据集信息\n",
    "        print(f\"电影评论训练集样本数：{len(imdb_train_texts)}\")\n",
    "        print(f\"电影评论测试集样本数：{len(imdb_test_texts)}\")\n",
    "\n",
    "        # 运行CNN模型 - 电影评论数据集\n",
    "        imdb_cnn_result = run_cnn(imdb_data, stopwords, imdb_label_map)\n",
    "\n",
    "        # 运行RNN模型 - 电影评论数据集\n",
    "        imdb_rnn_result = run_rnn(imdb_data, stopwords, imdb_label_map)\n",
    "    else:\n",
    "        print(f\"警告: 电影评论数据集路径不存在 - {IMDB_DATA_DIR}\")\n",
    "        imdb_cnn_result = imdb_rnn_result = None\n",
    "\n",
    "    # 整理所有结果\n",
    "    results = []\n",
    "    if thucnews_cnn_result:\n",
    "        results.append(thucnews_cnn_result)\n",
    "    if thucnews_rnn_result:\n",
    "        results.append(thucnews_rnn_result)\n",
    "    if imdb_cnn_result:\n",
    "        results.append(imdb_cnn_result)\n",
    "    if imdb_rnn_result:\n",
    "        results.append(imdb_rnn_result)\n",
    "\n",
    "    if not results:\n",
    "        print(\"没有可用的数据集和结果进行可视化。\")\n",
    "        return\n",
    "\n",
    "    # ------------------------------ 可视化 ------------------------------\n",
    "    plt.figure(figsize=(12, 8))\n",
    "\n",
    "    # 准确率对比\n",
    "    plt.subplot(2, 1, 1)\n",
    "    bars = plt.bar(\n",
    "        [f\"{res['model']}\\n新闻\" for res in results if '新闻' in res['dataset'][:5]] +\n",
    "        [f\"{res['model']}\\n电影\" for res in results if '电影' in res['dataset'][:5]],\n",
    "        [res['test_accuracy'] for res in results],\n",
    "        width=0.4,\n",
    "        color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']\n",
    "    )\n",
    "    plt.title('模型准确率对比')\n",
    "    plt.ylabel('准确率')\n",
    "    plt.ylim(0.0, 1.0)  # 调整y轴范围以完整显示数值\n",
    "    for bar in bars:\n",
    "        height = bar.get_height()\n",
    "        plt.text(bar.get_x() + bar.get_width()/2., height,\n",
    "                f'{height:.4f}',\n",
    "                ha='center', va='bottom')\n",
    "\n",
    "    # 训练时间对比\n",
    "    plt.subplot(2, 1, 2)\n",
    "    bars = plt.bar(\n",
    "        [f\"{res['model']}\\n新闻\" for res in results if '新闻' in res['dataset'][:5]] +\n",
    "        [f\"{res['model']}\\n电影\" for res in results if '电影' in res['dataset'][:5]],\n",
    "        [res['train_time'] for res in results],\n",
    "        width=0.4,\n",
    "        color=['#1f77b4', '#ff7f0e', '#2ca02c', '#d62728']\n",
    "    )\n",
    "    plt.title('模型训练时间对比')\n",
    "    plt.xlabel('数据集/模型')\n",
    "    plt.ylabel('训练时间（秒）')\n",
    "    for bar in bars:\n",
    "        height = bar.get_height()\n",
    "        plt.text(bar.get_x() + bar.get_width()/2., height,\n",
    "                f'{height:.2f}s',\n",
    "                ha='center', va='bottom')\n",
    "\n",
    "    plt.tight_layout()\n",
    "    plt.show()\n",
    "\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()\n"
   ],
   "id": "990b027d61e729a",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-06-12 09:14:13.635804] 文本分类实验开始...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\机器学习\\THUCNews-txt\\train.txt: 180000it [00:00, 991488.31it/s] \n",
      "加载 D:\\机器学习\\THUCNews-txt\\test.txt: 10000it [00:00, 611450.23it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "新闻标题训练集样本数：180000\n",
      "新闻标题测试集样本数：10000\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 180000/180000 [00:12<00:00, 14044.82it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 17137.10it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据集大小: 180000 (过滤前: 180000)\n",
      "数据集大小: 10000 (过滤前: 10000)\n",
      "数据集大小: 162000 (过滤前: 162000)\n",
      "数据集大小: 18000 (过滤前: 18000)\n",
      "使用设备: cpu\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 1/5 [Train]: 100%|██████████| 2532/2532 [01:00<00:00, 41.60it/s]\n",
      "Epoch 1/5 [Val]: 100%|██████████| 282/282 [00:00<00:00, 314.48it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 1/5\n",
      "Train Loss: 1.2822 Acc: 0.5715\n",
      "Val Loss: 0.7255 Acc: 0.7714\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 2/5 [Train]: 100%|██████████| 2532/2532 [00:59<00:00, 42.78it/s]\n",
      "Epoch 2/5 [Val]: 100%|██████████| 282/282 [00:00<00:00, 321.38it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch 2/5\n",
      "Train Loss: 0.6264 Acc: 0.8028\n",
      "Val Loss: 0.5211 Acc: 0.8370\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "Epoch 3/5 [Train]:  20%|█▉        | 499/2532 [00:11<00:49, 41.21it/s]"
     ]
    }
   ],
   "execution_count": null
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
