{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T02:06:52.251480Z",
     "start_time": "2025-05-23T02:06:52.241941Z"
    }
   },
   "cell_type": "code",
   "outputs": [],
   "execution_count": 2,
   "source": [
    "from transformers import AutoModel, AutoTokenizer\n",
    "import torch.nn as nn\n",
    "import torch\n",
    "from torch.nn import functional as F\n",
    "import numpy as np\n",
    "\n",
    "class FastTextModel(nn.Module):\n",
    "    def __init__(self, embedding_file='embedding_SougouNews.npz', class_num=10):\n",
    "        super(FastTextModel, self).__init__()\n",
    "        self.embedding_size = 300\n",
    "        embedding_pretrained = torch.tensor(\n",
    "            np.load(embedding_file)[\"embeddings\"].astype('float32'))\n",
    "        # 定义词嵌入层\n",
    "        self.embedding = nn.Embedding.from_pretrained(embedding_pretrained, freeze=False)\n",
    "        self.fc = nn.Linear(self.embedding_size, class_num)\n",
    "\n",
    "    def forward(self, x):\n",
    "        # 确保输入索引在合法范围内\n",
    "        x = torch.clamp(x, min=0, max=self.embedding.num_embeddings - 1)\n",
    "\n",
    "        # text = [batch size,sent len]\n",
    "        embedded = self.embedding(x).float()\n",
    "        # embedded = [batch size, sent len, emb dim]\n",
    "        pooled = F.avg_pool2d(embedded, (embedded.shape[1], 1)).squeeze(1)\n",
    "        logits = self.fc(pooled)\n",
    "        # pooled = [batch size, embedding_dim]\n",
    "        return logits"
   ],
   "id": "19f210d81442e287"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T02:06:55.196070Z",
     "start_time": "2025-05-23T02:06:55.179321Z"
    }
   },
   "cell_type": "code",
   "outputs": [],
   "execution_count": 3,
   "source": [
    " class TextCNNModel(nn.Module):  # 定义模型\n",
    "    def __init__(self, embedding_file='embedding_SougouNews.npz'):\n",
    "        super(TextCNNModel, self).__init__()\n",
    "        # 加载词向量文件\n",
    "        embedding_pretrained = torch.tensor(\n",
    "            np.load(embedding_file)[\"embeddings\"].astype('float32'))\n",
    "        # 定义词嵌入层\n",
    "        self.embedding = nn.Embedding.from_pretrained(embedding_pretrained, freeze=False)\n",
    "        # 定义三个卷积\n",
    "        self.convs = nn.ModuleList(\n",
    "            [nn.Conv2d(1, 256, (k, 300)) for k in [2, 3, 4]])\n",
    "        # 定义dropout层\n",
    "        self.dropout = nn.Dropout(0.3)\n",
    "        # 定义全连接层\n",
    "        self.fc = nn.Linear(256 * 3, 10)\n",
    "\n",
    "    def conv_and_pool(self, x, conv):  # 定义卷积+激活函数+池化层构成的一个操作块\n",
    "        x = conv(x)  # N,1,32,300 -> N,256,31/30/29,1\n",
    "        x = F.relu(x).squeeze(3)  # x -> N,256,31/30/29\n",
    "        x = F.max_pool1d(x, x.size(2)).squeeze(2)  # x -> N,256,1 -> N,256\n",
    "        return x\n",
    "\n",
    "    def forward(self, x):  # 前向传播\n",
    "        out = self.embedding(x)  # N,32 -> N,32,300\n",
    "        out = out.unsqueeze(1)  # out -> N,1,32,300\n",
    "        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1) # out ->N,768\n",
    "        out = self.dropout(out)\n",
    "        out = self.fc(out)  # N,768 -> N,10\n",
    "        return out"
   ],
   "id": "6b35f010c230cce8"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T02:06:56.609653Z",
     "start_time": "2025-05-23T02:06:56.596684Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class RNNModel(nn.Module):\n",
    "    def __init__(self, embedding_file='embedding_SougouNews.npz', \\\n",
    "                 rnn_type=\"lstm\", hidden_dim=256, class_num=10, n_layers=2, bidirectional=True, dropout=0.3, batch_first=True):\n",
    "        super(RNNModel, self).__init__()\n",
    "        self.rnn_type = rnn_type.lower()\n",
    "        self.bidirectional = bidirectional\n",
    "        self.hidden_dim = hidden_dim\n",
    "        self.n_layers = n_layers\n",
    "        self.batch_first = batch_first\n",
    "        self.embedding_size = 300\n",
    "        embedding_pretrained = torch.tensor(\n",
    "            np.load(embedding_file)[\"embeddings\"].astype('float32'))\n",
    "        # 定义词嵌入层\n",
    "        self.embedding = nn.Embedding.from_pretrained(embedding_pretrained, freeze=False)\n",
    "        \"\"\"\n",
    "            输出序列和最后一个时间步的隐藏状态\n",
    "        if batch_first:\n",
    "            output, hidden = [bitch_size, max_seq, hidden_size * bidirectional] \\ \n",
    "            [num_layers * bidirectional, batch_size, hidden_size]\n",
    "        else:\n",
    "            output, hidden = [bitch_size, max_seq, hidden_size * bidirectional] \\ \n",
    "            [num_layers * bidirectional, max_seq, hidden_size]\n",
    "        \"\"\"\n",
    "        if rnn_type == 'lstm':\n",
    "            self.rnn = nn.LSTM(self.embedding_size,\n",
    "                               hidden_dim,\n",
    "                               num_layers=n_layers,\n",
    "                               bidirectional=bidirectional,\n",
    "                               batch_first=batch_first,\n",
    "                               dropout=dropout)\n",
    "        elif rnn_type == 'gru':\n",
    "            self.rnn = nn.GRU(self.embedding_size,\n",
    "                              hidden_size=hidden_dim,\n",
    "                              num_layers=n_layers,\n",
    "                              bidirectional=bidirectional,\n",
    "                              batch_first=batch_first,\n",
    "                              dropout=dropout)\n",
    "        else:\n",
    "            self.rnn = nn.RNN(self.embedding_size,\n",
    "                              hidden_size=hidden_dim,\n",
    "                              num_layers=n_layers,\n",
    "                              bidirectional=bidirectional,\n",
    "                              batch_first=batch_first,\n",
    "                              dropout=dropout)\n",
    "            \n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "        if self.bidirectional:\n",
    "            self.fc = nn.Linear(self.hidden_dim * 2, class_num)\n",
    "        else:\n",
    "            self.fc = nn.Linear(self.hidden_dim, class_num)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.embedding(x)\n",
    "        self.rnn.flatten_parameters() # 扁平化\n",
    "        if self.rnn_type in ['rnn', 'gru']:\n",
    "            output, hidden = self.rnn(x)\n",
    "        else:\n",
    "            output, (hidden, cell) = self.rnn(x)\n",
    "        x = output[:, -1, :]\n",
    "        x = self.dropout(x)\n",
    "        logits = self.fc(x)\n",
    "\n",
    "        return logits\n",
    "\n",
    "\"\"\"\n",
    "PLM models\n",
    "\"\"\""
   ],
   "id": "4197c13de9a648dc",
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "<>:15: SyntaxWarning: invalid escape sequence '\\ '\n",
      "<>:15: SyntaxWarning: invalid escape sequence '\\ '\n",
      "C:\\Users\\X\\AppData\\Local\\Temp\\ipykernel_15464\\993907795.py:15: SyntaxWarning: invalid escape sequence '\\ '\n",
      "  \"\"\"\n"
     ]
    },
    {
     "data": {
      "text/plain": [
       "'\\nPLM models\\n'"
      ]
     },
     "execution_count": 4,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T02:06:57.716383Z",
     "start_time": "2025-05-23T02:06:57.707224Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class ModelForSequenceClassification(nn.Module):\n",
    "    def __init__(self, model_Name, num_classes):\n",
    "        super(ModelForSequenceClassification, self).__init__()\n",
    "        self.model_Name = model_Name\n",
    "        self.model = AutoModel.from_pretrained(model_Name)\n",
    "        if model_Name != \"hfl/chinese-xlnet-base\":\n",
    "            self.dropout = nn.Dropout(self.model.config.hidden_dropout_prob)\n",
    "            self.classifier = nn.Linear(self.model.config.hidden_size, num_classes)\n",
    "        else:\n",
    "            self.dropout = nn.Dropout(self.model.config.summary_last_dropout)\n",
    "            self.classifier = nn.Linear(self.model.config.d_model, num_classes)\n",
    "    \n",
    "    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,\n",
    "                position_ids=None, head_mask=None, inputs_embeds=None):\n",
    "\n",
    "        outputs = self.model(input_ids,\n",
    "                            attention_mask=attention_mask,\n",
    "                            token_type_ids=token_type_ids,\n",
    "                            position_ids=position_ids,\n",
    "                            head_mask=head_mask,\n",
    "                            inputs_embeds=inputs_embeds)\n",
    "        \n",
    "        if self.model_Name == \"hfl/chinese-xlnet-base\":\n",
    "            pooled_output = torch.sum(outputs[0], dim=1)\n",
    "        else:\n",
    "            pooled_output = outputs[1]\n",
    "\n",
    "        pooled_output = self.dropout(pooled_output)\n",
    "        logits = self.classifier(pooled_output)\n",
    "\n",
    "        return logits"
   ],
   "id": "5bf6029846e2fb7e",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T02:06:58.648453Z",
     "start_time": "2025-05-23T02:06:58.641827Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class ModelTextCNNForSequenceClassification(nn.Module):\n",
    "    def __init__(self, model_Name, num_classes):\n",
    "        super(ModelTextCNNForSequenceClassification, self).__init__()\n",
    "        self.model_Name = model_Name\n",
    "        self.num_classes = num_classes\n",
    "        self.model = AutoModel.from_pretrained(model_Name)\n",
    "\n",
    "        if model_Name != \"hfl/chinese-xlnet-base\":\n",
    "            self.convs = nn.ModuleList(\n",
    "                [nn.Conv2d(1, 256, (k, self.model.config.hidden_size)) for k in [2, 3, 4]])\n",
    "        else:\n",
    "            self.convs = nn.ModuleList(\n",
    "                [nn.Conv2d(1, 256, (k, self.model.config.d_model)) for k in [2, 3, 4]])\n",
    "        # 定义dropout层\n",
    "        self.dropout_cnn = nn.Dropout(0.3)\n",
    "        # 定义全连接层\n",
    "        self.classifier = nn.Linear(256 * 3, self.num_classes)\n",
    "\n",
    "    def conv_and_pool(self, x, conv):  # 定义卷积+激活函数+池化层构成的一个操作块\n",
    "        x = conv(x)  # N,1,32,300 -> N,256,31/30/29,1\n",
    "        x = F.relu(x).squeeze(3)  # x -> N,256,31/30/29\n",
    "        x = F.max_pool1d(x, x.size(2)).squeeze(2)  # x -> N,256,1 -> N,256\n",
    "        return x\n",
    "    \n",
    "    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,\n",
    "                position_ids=None, head_mask=None, inputs_embeds=None):\n",
    "\n",
    "        outputs = self.model(input_ids,\n",
    "                            attention_mask=attention_mask,\n",
    "                            token_type_ids=token_type_ids,\n",
    "                            position_ids=position_ids,\n",
    "                            head_mask=head_mask,\n",
    "                            inputs_embeds=inputs_embeds)\n",
    "        \n",
    "        seq_output = outputs[0] # [batch_size, seq_len, hidden_size]\n",
    "        out = seq_output.unsqueeze(1) # [batch_size, 1, seq_len, hidden_size]\n",
    "        out = torch.cat([self.conv_and_pool(out, conv) for conv in self.convs], 1)\n",
    "        out = self.dropout_cnn(out)\n",
    "        logits = self.classifier(out)    \n",
    "        return logits"
   ],
   "id": "bb2cc1e22da9fc57",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-23T02:06:59.510701Z",
     "start_time": "2025-05-23T02:06:59.490985Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class ModelRNNForSequenceClassification(nn.Module):\n",
    "    def __init__(self, model_Name, num_classes):\n",
    "        super(ModelRNNForSequenceClassification, self).__init__()\n",
    "        self.model_Name = model_Name\n",
    "        self.num_classes = num_classes\n",
    "        self.rnn_type = \"lstm\"\n",
    "        self.hidden_dim = 256\n",
    "        self.n_layers = 2\n",
    "        self.droprate = 0.3\n",
    "        self.bidirectional = True\n",
    "        self.batch_first = True\n",
    "\n",
    "        self.model = AutoModel.from_pretrained(model_Name)\n",
    "        if model_Name != \"hfl/chinese-xlnet-base\":\n",
    "            self.embedding_size = self.model.config.hidden_size\n",
    "        else:\n",
    "            self.embedding_size = self.model.config.d_model\n",
    "\n",
    "        if self.rnn_type == 'lstm':\n",
    "            self.rnn = nn.LSTM(self.embedding_size,\n",
    "                               self.hidden_dim,\n",
    "                               num_layers=self.n_layers,\n",
    "                               bidirectional=self.bidirectional,\n",
    "                               batch_first=self.batch_first,\n",
    "                               dropout=self.droprate)\n",
    "        elif self.rnn_type == 'gru':\n",
    "            self.rnn = nn.GRU(self.embedding_size,\n",
    "                              hidden_size=self.hidden_dim,\n",
    "                              num_layers=self.n_layers,\n",
    "                              bidirectional=self.bidirectional,\n",
    "                              batch_first=self.batch_first,\n",
    "                              dropout=self.droprate)\n",
    "        else:\n",
    "            self.rnn = nn.RNN(self.embedding_size,\n",
    "                              hidden_size=self.hidden_dim,\n",
    "                              num_layers=self.n_layers,\n",
    "                              bidirectional=self.bidirectional,\n",
    "                              batch_first=self.batch_first,\n",
    "                              dropout=self.droprate)\n",
    "            \n",
    "        self.dropout= nn.Dropout(p=0.3)\n",
    "\n",
    "        if self.bidirectional:\n",
    "            self.classifier = nn.Linear(self.hidden_dim * 2, num_classes)\n",
    "        else:\n",
    "            self.classifier = nn.Linear(self.hidden_dim, num_classes)\n",
    "    \n",
    "    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,\n",
    "                position_ids=None, head_mask=None, inputs_embeds=None):\n",
    "\n",
    "        outputs = self.model(input_ids,\n",
    "                            attention_mask=attention_mask,\n",
    "                            token_type_ids=token_type_ids,\n",
    "                            position_ids=position_ids,\n",
    "                            head_mask=head_mask,\n",
    "                            inputs_embeds=inputs_embeds)\n",
    "        seq_output = outputs[0]\n",
    "        self.rnn.flatten_parameters() # 扁平化\n",
    "        if self.rnn_type in ['rnn', 'gru']:\n",
    "            output, hidden = self.rnn(seq_output)\n",
    "        else:\n",
    "            output, (hidden, cell) = self.rnn(seq_output)\n",
    "        x = output[:, -1, :]\n",
    "        x = self.dropout(x)\n",
    "        logits = self.classifier(x)\n",
    "\n",
    "        return logits\n",
    "    \n",
    "\n",
    "class ModelRCNNForSequenceClassification(nn.Module):\n",
    "    def __init__(self, model_Name, num_classes):\n",
    "        super(ModelRCNNForSequenceClassification, self).__init__()\n",
    "        self.model_Name = model_Name\n",
    "        self.num_classes = num_classes\n",
    "        self.rnn_type = \"lstm\"\n",
    "        self.hidden_dim = 256\n",
    "        self.n_layers = 2\n",
    "        self.droprate = 0.3\n",
    "        self.bidirectional = True\n",
    "        self.batch_first = True\n",
    "\n",
    "        self.model = AutoModel.from_pretrained(model_Name)\n",
    "        if model_Name != \"hfl/chinese-xlnet-base\":\n",
    "            self.embedding_size = self.model.config.hidden_size\n",
    "        else:\n",
    "            self.embedding_size = self.model.config.d_model\n",
    "\n",
    "        if self.rnn_type == 'lstm':\n",
    "            self.rnn = nn.LSTM(self.embedding_size,\n",
    "                               self.hidden_dim,\n",
    "                               num_layers=self.n_layers,\n",
    "                               bidirectional=self.bidirectional,\n",
    "                               batch_first=self.batch_first,\n",
    "                               dropout=self.droprate)\n",
    "        elif self.rnn_type == 'gru':\n",
    "            self.rnn = nn.GRU(self.embedding_size,\n",
    "                              hidden_size=self.hidden_dim,\n",
    "                              num_layers=self.n_layers,\n",
    "                              bidirectional=self.bidirectional,\n",
    "                              batch_first=self.batch_first,\n",
    "                              dropout=self.droprate)\n",
    "        else:\n",
    "            self.rnn = nn.RNN(self.embedding_size,\n",
    "                              hidden_size=self.hidden_dim,\n",
    "                              num_layers=self.n_layers,\n",
    "                              bidirectional=self.bidirectional,\n",
    "                              batch_first=self.batch_first,\n",
    "                              dropout=self.droprate)\n",
    "            \n",
    "        self.dropout= nn.Dropout(p=0.3)\n",
    "        self.maxpool = nn.MaxPool1d(32)\n",
    "        self.ReLU = nn.ReLU()\n",
    "\n",
    "        if self.bidirectional:\n",
    "            self.classifier = nn.Linear(self.hidden_dim * 2 + self.embedding_size, num_classes)\n",
    "        else:\n",
    "            self.classifier = nn.Linear(self.hidden_dim + self.embedding_size, num_classes)\n",
    "\n",
    "    \n",
    "    def forward(self, input_ids=None, attention_mask=None, token_type_ids=None,\n",
    "                position_ids=None, head_mask=None, inputs_embeds=None):\n",
    "\n",
    "        outputs = self.model(input_ids,\n",
    "                            attention_mask=attention_mask,\n",
    "                            token_type_ids=token_type_ids,\n",
    "                            position_ids=position_ids,\n",
    "                            head_mask=head_mask,\n",
    "                            inputs_embeds=inputs_embeds)\n",
    "        seq_output = outputs[0]\n",
    "        self.rnn.flatten_parameters() # 扁平化\n",
    "        if self.rnn_type in ['rnn', 'gru']:\n",
    "            output, hidden = self.rnn(seq_output)\n",
    "        else:\n",
    "            output, (hidden, cell) = self.rnn(seq_output)\n",
    "        x = torch.cat([seq_output, output], dim=2) # 连接Bertmodel 和 rnnmodel 的输出\n",
    "        x = self.ReLU(x) # 非线性变化\n",
    "        x = x.permute(0, 2, 1) # [batch_size, embedding_dim, max_seq]\n",
    "        x = self.maxpool(x).squeeze(2) # [batch_size, embedding_dim]\n",
    "        x = self.dropout(x)\n",
    "        logits = self.classifier(x)\n",
    "\n",
    "        return logits\n"
   ],
   "id": "23597403634c634b",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-25T11:41:44.661381Z",
     "start_time": "2025-05-25T11:41:39.732837Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import numpy as np\n",
    "import pandas as pd\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.discriminant_analysis import LinearDiscriminantAnalysis\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.metrics import classification_report\n",
    "import tensorflow as tf\n",
    "from tensorflow.keras.models import Sequential\n",
    "from tensorflow.keras.layers import Embedding, Conv1D, GlobalMaxPooling1D, Dense, Dropout\n",
    "from tensorflow.keras.preprocessing.text import Tokenizer\n",
    "from tensorflow.keras.preprocessing.sequence import pad_sequences\n",
    "import os\n",
    "\n",
    "# ----------------------\n",
    "# 1. 数据读取与预处理\n",
    "# ----------------------\n",
    "def load_thucnews_data(data_path):\n",
    "    \"\"\"加载THUCNews数据集\"\"\"\n",
    "    texts = []\n",
    "    labels = []\n",
    "    \n",
    "    # 假设数据按类别分文件夹存储，每个文件夹名是类别名，文件夹内是文本文件\n",
    "    for label_idx, category in enumerate(os.listdir(data_path)):\n",
    "        category_path = os.path.join(data_path, category)\n",
    "        if not os.path.isdir(category_path):\n",
    "            continue\n",
    "            \n",
    "        for filename in os.listdir(category_path):\n",
    "            file_path = os.path.join(category_path, filename)\n",
    "            try:\n",
    "                with open(file_path, 'r', encoding='utf-8') as f:\n",
    "                    text = f.read()\n",
    "                    texts.append(text)\n",
    "                    labels.append(label_idx)\n",
    "            except Exception as e:\n",
    "                print(f\"Error reading file {file_path}: {e}\")\n",
    "    \n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_text(texts):\n",
    "    \"\"\"文本预处理：分词并去除停用词\"\"\"\n",
    "    # 加载停用词表\n",
    "    stopwords = set()\n",
    "    try:\n",
    "        with open('stopwords.txt', 'r', encoding='utf-8') as f:\n",
    "            for line in f:\n",
    "                stopwords.add(line.strip())\n",
    "    except FileNotFoundError:\n",
    "        print(\"Warning: 未找到停用词表，继续处理但可能影响效果...\")\n",
    "    \n",
    "    # 分词并过滤停用词\n",
    "    processed_texts = []\n",
    "    for text in texts:\n",
    "        words = jieba.cut(text)\n",
    "        filtered_words = [word for word in words if word not in stopwords and word.strip()]\n",
    "        processed_texts.append(' '.join(filtered_words))\n",
    "    \n",
    "    return processed_texts\n",
    "\n",
    "# ----------------------\n",
    "# 2. 特征工程\n",
    "# ----------------------\n",
    "def create_tfidf_features(texts, max_features=10000):\n",
    "    \"\"\"使用TF-IDF提取文本特征\"\"\"\n",
    "    vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=(1, 2))\n",
    "    X = vectorizer.fit_transform(texts)\n",
    "    return X, vectorizer\n",
    "\n",
    "def create_embedding_features(texts, vocab_size=10000, max_length=200, embedding_dim=300):\n",
    "    \"\"\"使用预训练词向量创建特征\"\"\"\n",
    "    # 分词\n",
    "    tokenizer = Tokenizer(num_words=vocab_size)\n",
    "    tokenizer.fit_on_texts(texts)\n",
    "    sequences = tokenizer.texts_to_sequences(texts)\n",
    "    \n",
    "    # 填充序列\n",
    "    X = pad_sequences(sequences, maxlen=max_length)\n",
    "    \n",
    "    # 加载预训练词向量\n",
    "    try:\n",
    "        embedding_data = np.load('embedding_SougouNews.npz')\n",
    "        embedding_matrix = embedding_data['embeddings']\n",
    "        \n",
    "        # 确保词向量维度匹配\n",
    "        if embedding_matrix.shape[1] != embedding_dim:\n",
    "            print(f\"警告：预训练词向量维度为{embedding_matrix.shape[1]}，但指定维度为{embedding_dim}，将调整...\")\n",
    "            embedding_dim = embedding_matrix.shape[1]\n",
    "    except Exception as e:\n",
    "        print(f\"无法加载预训练词向量: {e}，将使用随机初始化\")\n",
    "        embedding_matrix = np.random.rand(vocab_size, embedding_dim)\n",
    "    \n",
    "    return X, embedding_matrix, tokenizer.word_index\n",
    "\n",
    "# ----------------------\n",
    "# 3. 模型训练与评估\n",
    "# ----------------------\n",
    "def train_linear_model(X_train, y_train, X_test, y_test):\n",
    "    \"\"\"训练逻辑回归模型\"\"\"\n",
    "    model = LogisticRegression(C=1.0, penalty='l2', solver='liblinear', multi_class='ovr')\n",
    "    model.fit(X_train, y_train)\n",
    "    \n",
    "    # 评估\n",
    "    y_pred = model.predict(X_test)\n",
    "    report = classification_report(y_test, y_pred, digits=4)\n",
    "    return model, report\n",
    "\n",
    "def train_lda_model(X_train, y_train, X_test, y_test):\n",
    "    \"\"\"训练LDA模型\"\"\"\n",
    "    model = LinearDiscriminantAnalysis(solver='svd')\n",
    "    model.fit(X_train, y_train)\n",
    "    \n",
    "    # 评估\n",
    "    y_pred = model.predict(X_test)\n",
    "    report = classification_report(y_test, y_pred, digits=4)\n",
    "    return model, report\n",
    "\n",
    "def train_svm_model(X_train, y_train, X_test, y_test):\n",
    "    \"\"\"训练SVM模型\"\"\"\n",
    "    model = SVC(kernel='rbf', C=1.0, gamma='scale', probability=True)\n",
    "    model.fit(X_train, y_train)\n",
    "    \n",
    "    # 评估\n",
    "    y_pred = model.predict(X_test)\n",
    "    report = classification_report(y_test, y_pred, digits=4)\n",
    "    return model, report\n",
    "\n",
    "\n",
    "# ----------------------\n",
    "# 4. 主流程\n",
    "# ----------------------\n",
    "def main():\n",
    "    # 1. 加载数据\n",
    "    print(\"加载数据...\")\n",
    "    texts, labels = load_thucnews_data(\"THUCNews\")  # 替换为实际数据路径\n",
    "    \n",
    "    # 2. 预处理\n",
    "    print(\"预处理文本...\")\n",
    "    processed_texts = preprocess_text(texts)\n",
    "    \n",
    "    # 3. 划分训练集和测试集\n",
    "    print(\"划分数据集...\")\n",
    "    X_train_text, X_test_text, y_train, y_test = train_test_split(\n",
    "        processed_texts, labels, test_size=0.2, random_state=42)\n",
    "    \n",
    "    # 4. 创建特征 (TF-IDF用于传统模型，词向量用于CNN)\n",
    "    print(\"创建TF-IDF特征...\")\n",
    "    X_train_tfidf, vectorizer = create_tfidf_features(X_train_text)\n",
    "    X_test_tfidf = vectorizer.transform(X_test_text)\n",
    "    \n",
    "    print(\"创建词向量特征...\")\n",
    "    X_train_emb, embedding_matrix, word_index = create_embedding_features(X_train_text + X_test_text)\n",
    "    X_test_emb = X_train_emb[len(X_train_text):]\n",
    "    X_train_emb = X_train_emb[:len(X_train_text)]\n",
    "    \n",
    "    # 5. 训练和评估模型\n",
    "    print(\"\\n==== 训练逻辑回归模型 ====\")\n",
    "    linear_model, linear_report = train_linear_model(X_train_tfidf, y_train, X_test_tfidf, y_test)\n",
    "    print(linear_report)\n",
    "    \n",
    "    print(\"\\n==== 训练LDA模型 ====\")\n",
    "    lda_model, lda_report = train_lda_model(X_train_tfidf.toarray(), y_train, X_test_tfidf.toarray(), y_test)\n",
    "    print(lda_report)\n",
    "    \n",
    "    print(\"\\n==== 训练SVM模型 ====\")\n",
    "    svm_model, svm_report = train_svm_model(X_train_tfidf, y_train, X_test_tfidf, y_test)\n",
    "    print(svm_report)\n",
    "    \n",
    "    \n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "95832d2f7436b6f1",
   "outputs": [
    {
     "ename": "ImportError",
     "evalue": "Traceback (most recent call last):\n  File \"D:\\python\\anaconda1\\Lib\\site-packages\\tensorflow\\python\\pywrap_tensorflow.py\", line 73, in <module>\n    from tensorflow.python._pywrap_tensorflow_internal import *\nImportError: DLL load failed while importing _pywrap_tensorflow_internal: 动态链接库(DLL)初始化例程失败。\n\n\nFailed to load the native TensorFlow runtime.\nSee https://www.tensorflow.org/install/errors for some common causes and solutions.\nIf you need help, create an issue at https://github.com/tensorflow/tensorflow/issues and include the entire stack trace above this error message.",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mImportError\u001B[0m                               Traceback (most recent call last)",
      "File \u001B[1;32mD:\\python\\anaconda1\\Lib\\site-packages\\tensorflow\\python\\pywrap_tensorflow.py:73\u001B[0m\n\u001B[0;32m     72\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m---> 73\u001B[0m   \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01mtensorflow\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mpython\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01m_pywrap_tensorflow_internal\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m \u001B[38;5;241m*\u001B[39m\n\u001B[0;32m     74\u001B[0m \u001B[38;5;66;03m# This try catch logic is because there is no bazel equivalent for py_extension.\u001B[39;00m\n\u001B[0;32m     75\u001B[0m \u001B[38;5;66;03m# Externally in opensource we must enable exceptions to load the shared object\u001B[39;00m\n\u001B[0;32m     76\u001B[0m \u001B[38;5;66;03m# by exposing the PyInit symbols with pybind. This error will only be\u001B[39;00m\n\u001B[0;32m     77\u001B[0m \u001B[38;5;66;03m# caught internally or if someone changes the name of the target _pywrap_tensorflow_internal.\u001B[39;00m\n\u001B[0;32m     78\u001B[0m \n\u001B[0;32m     79\u001B[0m \u001B[38;5;66;03m# This logic is used in other internal projects using py_extension.\u001B[39;00m\n",
      "\u001B[1;31mImportError\u001B[0m: DLL load failed while importing _pywrap_tensorflow_internal: 动态链接库(DLL)初始化例程失败。",
      "\nDuring handling of the above exception, another exception occurred:\n",
      "\u001B[1;31mImportError\u001B[0m                               Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[10], line 10\u001B[0m\n\u001B[0;32m      8\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01msklearn\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01msvm\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m SVC\n\u001B[0;32m      9\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01msklearn\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mmetrics\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m classification_report\n\u001B[1;32m---> 10\u001B[0m \u001B[38;5;28;01mimport\u001B[39;00m \u001B[38;5;21;01mtensorflow\u001B[39;00m \u001B[38;5;28;01mas\u001B[39;00m \u001B[38;5;21;01mtf\u001B[39;00m\n\u001B[0;32m     11\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01mtensorflow\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mkeras\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mmodels\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m Sequential\n\u001B[0;32m     12\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01mtensorflow\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mkeras\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mlayers\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m Embedding, Conv1D, GlobalMaxPooling1D, Dense, Dropout\n",
      "File \u001B[1;32mD:\\python\\anaconda1\\Lib\\site-packages\\tensorflow\\__init__.py:40\u001B[0m\n\u001B[0;32m     37\u001B[0m _os\u001B[38;5;241m.\u001B[39menviron\u001B[38;5;241m.\u001B[39msetdefault(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mENABLE_RUNTIME_UPTIME_TELEMETRY\u001B[39m\u001B[38;5;124m\"\u001B[39m, \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m1\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m     39\u001B[0m \u001B[38;5;66;03m# Do not remove this line; See https://github.com/tensorflow/tensorflow/issues/42596\u001B[39;00m\n\u001B[1;32m---> 40\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01mtensorflow\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mpython\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m pywrap_tensorflow \u001B[38;5;28;01mas\u001B[39;00m _pywrap_tensorflow  \u001B[38;5;66;03m# pylint: disable=unused-import\u001B[39;00m\n\u001B[0;32m     41\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01mtensorflow\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mpython\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mtools\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m module_util \u001B[38;5;28;01mas\u001B[39;00m _module_util\n\u001B[0;32m     42\u001B[0m \u001B[38;5;28;01mfrom\u001B[39;00m \u001B[38;5;21;01mtensorflow\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mpython\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mutil\u001B[39;00m\u001B[38;5;21;01m.\u001B[39;00m\u001B[38;5;21;01mlazy_loader\u001B[39;00m \u001B[38;5;28;01mimport\u001B[39;00m KerasLazyLoader \u001B[38;5;28;01mas\u001B[39;00m _KerasLazyLoader\n",
      "File \u001B[1;32mD:\\python\\anaconda1\\Lib\\site-packages\\tensorflow\\python\\pywrap_tensorflow.py:88\u001B[0m\n\u001B[0;32m     86\u001B[0m     sys\u001B[38;5;241m.\u001B[39msetdlopenflags(_default_dlopen_flags)\n\u001B[0;32m     87\u001B[0m \u001B[38;5;28;01mexcept\u001B[39;00m \u001B[38;5;167;01mImportError\u001B[39;00m:\n\u001B[1;32m---> 88\u001B[0m   \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mImportError\u001B[39;00m(\n\u001B[0;32m     89\u001B[0m       \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;132;01m{\u001B[39;00mtraceback\u001B[38;5;241m.\u001B[39mformat_exc()\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m'\u001B[39m\n\u001B[0;32m     90\u001B[0m       \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124mFailed to load the native TensorFlow runtime.\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m'\u001B[39m\n\u001B[0;32m     91\u001B[0m       \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mSee https://www.tensorflow.org/install/errors \u001B[39m\u001B[38;5;124m'\u001B[39m\n\u001B[0;32m     92\u001B[0m       \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mfor some common causes and solutions.\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m'\u001B[39m\n\u001B[0;32m     93\u001B[0m       \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mIf you need help, create an issue \u001B[39m\u001B[38;5;124m'\u001B[39m\n\u001B[0;32m     94\u001B[0m       \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mat https://github.com/tensorflow/tensorflow/issues \u001B[39m\u001B[38;5;124m'\u001B[39m\n\u001B[0;32m     95\u001B[0m       \u001B[38;5;124mf\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mand include the entire stack trace above this error message.\u001B[39m\u001B[38;5;124m'\u001B[39m)\n\u001B[0;32m     97\u001B[0m \u001B[38;5;66;03m# pylint: enable=wildcard-import,g-import-not-at-top,unused-import,line-too-long\u001B[39;00m\n",
      "\u001B[1;31mImportError\u001B[0m: Traceback (most recent call last):\n  File \"D:\\python\\anaconda1\\Lib\\site-packages\\tensorflow\\python\\pywrap_tensorflow.py\", line 73, in <module>\n    from tensorflow.python._pywrap_tensorflow_internal import *\nImportError: DLL load failed while importing _pywrap_tensorflow_internal: 动态链接库(DLL)初始化例程失败。\n\n\nFailed to load the native TensorFlow runtime.\nSee https://www.tensorflow.org/install/errors for some common causes and solutions.\nIf you need help, create an issue at https://github.com/tensorflow/tensorflow/issues and include the entire stack trace above this error message."
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-25T12:03:06.894734Z",
     "start_time": "2025-05-25T12:03:06.840480Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "import time\n",
    "import joblib\n",
    "from tqdm import tqdm\n",
    "\n",
    "# ----------------------\n",
    "# 1. 数据加载与预处理\n",
    "# ----------------------\n",
    "def load_class_labels(class_path):\n",
    "    \"\"\"加载类别标签映射（如果存在class.txt）\"\"\"\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for line in f:\n",
    "                idx, name = line.strip().split('\\t')\n",
    "                label_map[int(idx)] = name\n",
    "    return label_map\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    \"\"\"加载文本数据集（train/dev/test.txt）\"\"\"\n",
    "    texts, labels = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in tqdm(f, desc=f\"加载 {file_path}\"):\n",
    "            parts = line.strip().split('\\t', 1)  # 按第一个制表符分割\n",
    "            if len(parts) != 2:\n",
    "                continue  # 跳过格式错误的行\n",
    "            label, text = parts\n",
    "            labels.append(int(label))\n",
    "            texts.append(text)\n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    \"\"\"分词并过滤停用词\"\"\"\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        words = jieba.cut(text)\n",
    "        filtered = [word for word in words if word not in stopwords and word.strip()]\n",
    "        processed.append(' '.join(filtered))\n",
    "    return processed\n",
    "\n",
    "# ----------------------\n",
    "# 2. 特征工程\n",
    "# ----------------------\n",
    "def extract_tfidf_features(train_texts, val_texts, test_texts, max_features=10000):\n",
    "    \"\"\"使用TF-IDF提取文本特征\"\"\"\n",
    "    vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(train_texts)\n",
    "    X_val = vectorizer.transform(val_texts)\n",
    "    X_test = vectorizer.transform(test_texts)\n",
    "    return X_train, X_val, X_test, vectorizer\n",
    "\n",
    "# ----------------------\n",
    "# 3. 模型训练与评估\n",
    "# ----------------------\n",
    "def train_and_evaluate(models, X_train, y_train, X_val, y_val, X_test, y_test, label_map):\n",
    "    \"\"\"训练并评估所有模型\"\"\"\n",
    "    results = {}\n",
    "    for model_name, model in models.items():\n",
    "        # 训练模型\n",
    "        start_time = time.time()\n",
    "        model.fit(X_train, y_train)\n",
    "        train_time = time.time() - start_time\n",
    "        \n",
    "        # 评估测试集\n",
    "        y_pred = model.predict(X_test)\n",
    "        accuracy = accuracy_score(y_test, y_pred)\n",
    "        report = classification_report(y_test, y_pred, target_names=label_map.values())\n",
    "        \n",
    "        print(f\"\\n{model_name} 准确率: {accuracy:.4f}\")\n",
    "        print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "        print(f\"分类报告:\\n{report}\")\n",
    "        \n",
    "        results[model_name] = {\n",
    "            'model': model,\n",
    "            'accuracy': accuracy,\n",
    "            'train_time': train_time,\n",
    "            'report': report\n",
    "        }\n",
    "    return results\n",
    "\n",
    "# ----------------------\n",
    "# 4. 主流程\n",
    "# ----------------------\n",
    "def main():\n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D://jiqixuexi//mytest//wangyuzhen//shangjier//THUCNews-txt\"  # 数据集目录\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"  # 停用词表路径\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")  # 类别标签路径\n",
    "    FEATURE_METHOD = \"tfidf\"  # 特征提取方法：tfidf 或 embedding（需额外实现）\n",
    "    MAX_FEATURES = 10000  # TF-IDF最大特征数\n",
    "    \n",
    "    # 1. 加载数据集\n",
    "    print(\"加载数据集...\")\n",
    "    train_path = os.path.join(DATA_DIR, \"train.txt\")\n",
    "    val_path = os.path.join(DATA_DIR, \"dev.txt\")\n",
    "    test_path = os.path.join(DATA_DIR, \"test.txt\")\n",
    "    \n",
    "    train_texts, train_labels = load_text_dataset(train_path)\n",
    "    val_texts, val_labels = load_text_dataset(val_path)\n",
    "    test_texts, test_labels = load_text_dataset(test_path)\n",
    "    \n",
    "    print(f\"训练集样本数: {len(train_texts)}\")\n",
    "    print(f\"验证集样本数: {len(val_texts)}\")\n",
    "    print(f\"测试集样本数: {len(test_texts)}\")\n",
    "    \n",
    "    # 2. 加载停用词表\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "    else:\n",
    "        print(\"警告: 未找到停用词表，跳过过滤...\")\n",
    "    \n",
    "    # 3. 预处理文本\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 4. 提取TF-IDF特征\n",
    "    X_train, X_val, X_test, vectorizer = extract_tfidf_features(\n",
    "        train_processed, val_processed, test_processed, MAX_FEATURES\n",
    "    )\n",
    "    \n",
    "    # 5. 定义六种模型\n",
    "    models = {\n",
    "        \"逻辑回归\": LogisticRegression(solver='lbfgs', multi_class='auto', n_jobs=-1, max_iter=1000),\n",
    "        \"支持向量机\": SVC(kernel='rbf', probability=True, gamma='scale'),\n",
    "        \"决策树\": DecisionTreeClassifier(max_depth=5),\n",
    "        \"随机森林\": RandomForestClassifier(n_estimators=100, n_jobs=-1),\n",
    "        \"K近邻\": KNeighborsClassifier(n_neighbors=5, n_jobs=-1),\n",
    "        \"朴素贝叶斯\": MultinomialNB()\n",
    "    }\n",
    "    \n",
    "    # 6. 加载类别标签（可选）\n",
    "    label_map = load_class_labels(CLASS_PATH) or {idx: f\"类别{idx}\" for idx in set(train_labels)}\n",
    "    \n",
    "    # 7. 训练与评估\n",
    "    results = train_and_evaluate(models, X_train, train_labels, X_val, val_labels, X_test, test_labels, label_map)\n",
    "    \n",
    "    # 8. 保存最佳模型\n",
    "    best_model_name = max(results, key=lambda k: results[k]['accuracy'])\n",
    "    joblib.dump(results[best_model_name]['model'], f\"thucnews_{best_model_name.lower()}_model.pkl\")\n",
    "    print(f\"\\n最佳模型: {best_model_name}, 准确率: {results[best_model_name]['accuracy']:.4f}\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "cbbc91c7f3cc6fed",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "加载数据集...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D://jiqixuexi//mytest//wangyuzhen//shangjier//THUCNews-txt\\train.txt: 0it [00:00, ?it/s]\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "invalid literal for int() with base 10: '中华女子学院：本科层次仅1专业招男生'",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mValueError\u001B[0m                                Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[16], line 159\u001B[0m\n\u001B[0;32m    156\u001B[0m     \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m最佳模型: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mbest_model_name\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m, 准确率: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mresults[best_model_name][\u001B[38;5;124m'\u001B[39m\u001B[38;5;124maccuracy\u001B[39m\u001B[38;5;124m'\u001B[39m]\u001B[38;5;132;01m:\u001B[39;00m\u001B[38;5;124m.4f\u001B[39m\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m    158\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;18m__name__\u001B[39m \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m__main__\u001B[39m\u001B[38;5;124m\"\u001B[39m:\n\u001B[1;32m--> 159\u001B[0m     \u001B[43mmain\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n",
      "Cell \u001B[1;32mIn[16], line 111\u001B[0m, in \u001B[0;36mmain\u001B[1;34m()\u001B[0m\n\u001B[0;32m    108\u001B[0m val_path \u001B[38;5;241m=\u001B[39m os\u001B[38;5;241m.\u001B[39mpath\u001B[38;5;241m.\u001B[39mjoin(DATA_DIR, \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mdev.txt\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m    109\u001B[0m test_path \u001B[38;5;241m=\u001B[39m os\u001B[38;5;241m.\u001B[39mpath\u001B[38;5;241m.\u001B[39mjoin(DATA_DIR, \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mtest.txt\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[1;32m--> 111\u001B[0m train_texts, train_labels \u001B[38;5;241m=\u001B[39m \u001B[43mload_text_dataset\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtrain_path\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    112\u001B[0m val_texts, val_labels \u001B[38;5;241m=\u001B[39m load_text_dataset(val_path)\n\u001B[0;32m    113\u001B[0m test_texts, test_labels \u001B[38;5;241m=\u001B[39m load_text_dataset(test_path)\n",
      "Cell \u001B[1;32mIn[16], line 41\u001B[0m, in \u001B[0;36mload_text_dataset\u001B[1;34m(file_path)\u001B[0m\n\u001B[0;32m     39\u001B[0m             \u001B[38;5;28;01mcontinue\u001B[39;00m  \u001B[38;5;66;03m# 跳过格式错误的行\u001B[39;00m\n\u001B[0;32m     40\u001B[0m         label, text \u001B[38;5;241m=\u001B[39m parts\n\u001B[1;32m---> 41\u001B[0m         labels\u001B[38;5;241m.\u001B[39mappend(\u001B[38;5;28;43mint\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43mlabel\u001B[49m\u001B[43m)\u001B[49m)\n\u001B[0;32m     42\u001B[0m         texts\u001B[38;5;241m.\u001B[39mappend(text)\n\u001B[0;32m     43\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m texts, labels\n",
      "\u001B[1;31mValueError\u001B[0m: invalid literal for int() with base 10: '中华女子学院：本科层次仅1专业招男生'"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-25T12:12:28.322033Z",
     "start_time": "2025-05-25T12:12:27.844201Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "import time\n",
    "import joblib\n",
    "from tqdm import tqdm\n",
    "\n",
    "# ----------------------\n",
    "# 1. 数据加载与预处理\n",
    "# ----------------------\n",
    "def load_class_labels(class_path):\n",
    "    \"\"\"加载类别标签映射（如果存在class.txt）\"\"\"\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for line in f:\n",
    "                idx, name = line.strip().split('\\t')\n",
    "                label_map[int(idx)] = name\n",
    "    return label_map\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    \"\"\"加载文本数据集（train/dev/test.txt），处理格式为'标题：标题 标签数字'的行\"\"\"\n",
    "    texts, labels = [], []\n",
    "    error_lines = []\n",
    "    \n",
    "    print(f\"加载文件: {file_path}\")\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for i, line in enumerate(tqdm(f, desc=f\"解析 {file_path}\")):\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "                \n",
    "            # 从行末提取最后一个空格后的数字作为标签\n",
    "            last_space_pos = line.rfind(' ')\n",
    "            if last_space_pos == -1:\n",
    "                error_lines.append(f\"行 {i+1}: 未找到空格分隔符 - {line[:50]}...\")\n",
    "                continue\n",
    "                \n",
    "            label_str = line[last_space_pos+1:].strip()\n",
    "            text = line[:last_space_pos].strip()\n",
    "            \n",
    "            # 检查标签是否为有效数字\n",
    "            if not label_str.isdigit():\n",
    "                error_lines.append(f\"行 {i+1}: 标签不是数字 - {label_str}\")\n",
    "                continue\n",
    "                \n",
    "            labels.append(int(label_str))\n",
    "            texts.append(text)\n",
    "    \n",
    "    # 打印错误摘要\n",
    "    if error_lines:\n",
    "        print(f\"发现 {len(error_lines)} 条格式错误数据，占比: {len(error_lines)/(i+1):.2%}\")\n",
    "    \n",
    "    print(f\"成功加载 {len(texts)} 条数据\")\n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    \"\"\"分词并过滤停用词\"\"\"\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        words = jieba.cut(text)\n",
    "        filtered = [word for word in words if word not in stopwords and word.strip()]\n",
    "        processed.append(' '.join(filtered))\n",
    "    return processed\n",
    "\n",
    "# ----------------------\n",
    "# 2. 特征工程\n",
    "# ----------------------\n",
    "def extract_tfidf_features(train_texts, val_texts, test_texts, max_features=10000):\n",
    "    \"\"\"使用TF-IDF提取文本特征\"\"\"\n",
    "    vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(train_texts)\n",
    "    X_val = vectorizer.transform(val_texts)\n",
    "    X_test = vectorizer.transform(test_texts)\n",
    "    return X_train, X_val, X_test, vectorizer\n",
    "\n",
    "# ----------------------\n",
    "# 3. 模型训练与评估\n",
    "# ----------------------\n",
    "def train_and_evaluate(models, X_train, y_train, X_val, y_val, X_test, y_test, label_map):\n",
    "    \"\"\"训练并评估所有模型\"\"\"\n",
    "    results = {}\n",
    "    for model_name, model in models.items():\n",
    "        # 训练模型\n",
    "        start_time = time.time()\n",
    "        model.fit(X_train, y_train)\n",
    "        train_time = time.time() - start_time\n",
    "        \n",
    "        # 评估测试集\n",
    "        y_pred = model.predict(X_test)\n",
    "        accuracy = accuracy_score(y_test, y_pred)\n",
    "        report = classification_report(y_test, y_pred, target_names=label_map.values())\n",
    "        \n",
    "        print(f\"\\n{model_name} 准确率: {accuracy:.4f}\")\n",
    "        print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "        print(f\"分类报告:\\n{report}\")\n",
    "        \n",
    "        results[model_name] = {\n",
    "            'model': model,\n",
    "            'accuracy': accuracy,\n",
    "            'train_time': train_time,\n",
    "            'report': report\n",
    "        }\n",
    "    return results\n",
    "\n",
    "# ----------------------\n",
    "# 4. 主流程\n",
    "# ----------------------\n",
    "def main():\n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"  # 修正路径格式\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"  # 停用词表路径\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")  # 类别标签路径\n",
    "    FEATURE_METHOD = \"tfidf\"  # 特征提取方法：tfidf 或 embedding（需额外实现）\n",
    "    MAX_FEATURES = 10000  # TF-IDF最大特征数\n",
    "    \n",
    "    # 检查数据集目录是否存在\n",
    "    if not os.path.exists(DATA_DIR):\n",
    "        print(f\"错误：数据集目录 {DATA_DIR} 不存在！\")\n",
    "        exit(1)\n",
    "    \n",
    "    # 列出目录内容（调试用）\n",
    "    print(f\"\\n{DATA_DIR} 目录内容:\")\n",
    "    for item in os.listdir(DATA_DIR):\n",
    "        print(f\"- {item}\")\n",
    "    \n",
    "    # 1. 加载数据集\n",
    "    print(\"\\n加载数据集...\")\n",
    "    train_path = os.path.join(DATA_DIR, \"train.txt\")\n",
    "    val_path = os.path.join(DATA_DIR, \"dev.txt\")\n",
    "    test_path = os.path.join(DATA_DIR, \"test.txt\")\n",
    "    \n",
    "    # 验证数据格式\n",
    "    print(\"\\n=== 验证数据格式 ===\")\n",
    "    with open(train_path, 'r', encoding='utf-8') as f:\n",
    "        sample = f.readline().strip()\n",
    "        print(f\"样本行: {sample}\")\n",
    "        \n",
    "        # 尝试解析样本\n",
    "        last_space_pos = sample.rfind(' ')\n",
    "        if last_space_pos == -1:\n",
    "            print(\"格式错误：未找到空格分隔符\")\n",
    "        else:\n",
    "            label_str = sample[last_space_pos+1:].strip()\n",
    "            text = sample[:last_space_pos].strip()\n",
    "            print(f\"解析结果 - 标签: {label_str}, 文本前30字: {text[:30]}...\")\n",
    "    \n",
    "    train_texts, train_labels = load_text_dataset(train_path)\n",
    "    val_texts, val_labels = load_text_dataset(val_path)\n",
    "    test_texts, test_labels = load_text_dataset(test_path)\n",
    "    \n",
    "    print(f\"训练集样本数: {len(train_texts)}\")\n",
    "    print(f\"验证集样本数: {len(val_texts)}\")\n",
    "    print(f\"测试集样本数: {len(test_texts)}\")\n",
    "    \n",
    "    # 2. 加载停用词表\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "    else:\n",
    "        print(\"警告: 未找到停用词表，跳过过滤...\")\n",
    "    \n",
    "    # 3. 预处理文本\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 4. 提取TF-IDF特征\n",
    "    X_train, X_val, X_test, vectorizer = extract_tfidf_features(\n",
    "        train_processed, val_processed, test_processed, MAX_FEATURES\n",
    "    )\n",
    "    \n",
    "    # 5. 定义六种模型\n",
    "    models = {\n",
    "        \"逻辑回归\": LogisticRegression(solver='lbfgs', multi_class='auto', n_jobs=-1, max_iter=1000),\n",
    "        \"支持向量机\": SVC(kernel='rbf', probability=True, gamma='scale'),\n",
    "        \"决策树\": DecisionTreeClassifier(max_depth=5),\n",
    "        \"随机森林\": RandomForestClassifier(n_estimators=100, n_jobs=-1),\n",
    "        \"K近邻\": KNeighborsClassifier(n_neighbors=5, n_jobs=-1),\n",
    "        \"朴素贝叶斯\": MultinomialNB()\n",
    "    }\n",
    "    \n",
    "    # 6. 加载类别标签（可选）\n",
    "    label_map = load_class_labels(CLASS_PATH) or {idx: f\"类别{idx}\" for idx in set(train_labels)}\n",
    "    \n",
    "    # 7. 训练与评估\n",
    "    print(\"\\n开始模型训练与评估...\")\n",
    "    results = train_and_evaluate(models, X_train, train_labels, X_val, val_labels, X_test, test_labels, label_map)\n",
    "    \n",
    "    # 8. 保存最佳模型\n",
    "    best_model_name = max(results, key=lambda k: results[k]['accuracy'])\n",
    "    joblib.dump(results[best_model_name]['model'], f\"thucnews_{best_model_name.lower()}_model.pkl\")\n",
    "    print(f\"\\n最佳模型: {best_model_name}, 准确率: {results[best_model_name]['accuracy']:.4f}\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "dc451193f074ae1c",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt 目录内容:\n",
      "- class.txt\n",
      "- dev.txt\n",
      "- embedding_SougouNews.npz\n",
      "- embedding_Tencent.npz\n",
      "- test.txt\n",
      "- tianchiyi.py\n",
      "- train.txt\n",
      "- vocab.pkl\n",
      "\n",
      "加载数据集...\n",
      "\n",
      "=== 验证数据格式 ===\n",
      "样本行: 中华女子学院：本科层次仅1专业招男生\t3\n",
      "格式错误：未找到空格分隔符\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 180000it [00:00, 848511.20it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "发现 180000 条格式错误数据，占比: 100.00%\n",
      "成功加载 0 条数据\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 10000it [00:00, 1085510.49it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "发现 10000 条格式错误数据，占比: 100.00%\n",
      "成功加载 0 条数据\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 10000it [00:00, 877120.81it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "发现 10000 条格式错误数据，占比: 100.00%\n",
      "成功加载 0 条数据\n",
      "训练集样本数: 0\n",
      "验证集样本数: 0\n",
      "测试集样本数: 0\n",
      "警告: 未找到停用词表，跳过过滤...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 0it [00:00, ?it/s]\n",
      "文本预处理: 0it [00:00, ?it/s]\n",
      "文本预处理: 0it [00:00, ?it/s]\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "empty vocabulary; perhaps the documents only contain stop words",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mValueError\u001B[0m                                Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[17], line 208\u001B[0m\n\u001B[0;32m    205\u001B[0m     \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m最佳模型: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mbest_model_name\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m, 准确率: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00mresults[best_model_name][\u001B[38;5;124m'\u001B[39m\u001B[38;5;124maccuracy\u001B[39m\u001B[38;5;124m'\u001B[39m]\u001B[38;5;132;01m:\u001B[39;00m\u001B[38;5;124m.4f\u001B[39m\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m    207\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;18m__name__\u001B[39m \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m__main__\u001B[39m\u001B[38;5;124m\"\u001B[39m:\n\u001B[1;32m--> 208\u001B[0m     \u001B[43mmain\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n",
      "Cell \u001B[1;32mIn[17], line 181\u001B[0m, in \u001B[0;36mmain\u001B[1;34m()\u001B[0m\n\u001B[0;32m    178\u001B[0m test_processed \u001B[38;5;241m=\u001B[39m preprocess_texts(test_texts, stopwords)\n\u001B[0;32m    180\u001B[0m \u001B[38;5;66;03m# 4. 提取TF-IDF特征\u001B[39;00m\n\u001B[1;32m--> 181\u001B[0m X_train, X_val, X_test, vectorizer \u001B[38;5;241m=\u001B[39m \u001B[43mextract_tfidf_features\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    182\u001B[0m \u001B[43m    \u001B[49m\u001B[43mtrain_processed\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mval_processed\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mtest_processed\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mMAX_FEATURES\u001B[49m\n\u001B[0;32m    183\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    185\u001B[0m \u001B[38;5;66;03m# 5. 定义六种模型\u001B[39;00m\n\u001B[0;32m    186\u001B[0m models \u001B[38;5;241m=\u001B[39m {\n\u001B[0;32m    187\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m逻辑回归\u001B[39m\u001B[38;5;124m\"\u001B[39m: LogisticRegression(solver\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mlbfgs\u001B[39m\u001B[38;5;124m'\u001B[39m, multi_class\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mauto\u001B[39m\u001B[38;5;124m'\u001B[39m, n_jobs\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m-\u001B[39m\u001B[38;5;241m1\u001B[39m, max_iter\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1000\u001B[39m),\n\u001B[0;32m    188\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m支持向量机\u001B[39m\u001B[38;5;124m\"\u001B[39m: SVC(kernel\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mrbf\u001B[39m\u001B[38;5;124m'\u001B[39m, probability\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mTrue\u001B[39;00m, gamma\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mscale\u001B[39m\u001B[38;5;124m'\u001B[39m),\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    192\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m朴素贝叶斯\u001B[39m\u001B[38;5;124m\"\u001B[39m: MultinomialNB()\n\u001B[0;32m    193\u001B[0m }\n",
      "Cell \u001B[1;32mIn[17], line 83\u001B[0m, in \u001B[0;36mextract_tfidf_features\u001B[1;34m(train_texts, val_texts, test_texts, max_features)\u001B[0m\n\u001B[0;32m     81\u001B[0m \u001B[38;5;250m\u001B[39m\u001B[38;5;124;03m\"\"\"使用TF-IDF提取文本特征\"\"\"\u001B[39;00m\n\u001B[0;32m     82\u001B[0m vectorizer \u001B[38;5;241m=\u001B[39m TfidfVectorizer(max_features\u001B[38;5;241m=\u001B[39mmax_features, ngram_range\u001B[38;5;241m=\u001B[39m(\u001B[38;5;241m1\u001B[39m, \u001B[38;5;241m2\u001B[39m))\n\u001B[1;32m---> 83\u001B[0m X_train \u001B[38;5;241m=\u001B[39m \u001B[43mvectorizer\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfit_transform\u001B[49m\u001B[43m(\u001B[49m\u001B[43mtrain_texts\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     84\u001B[0m X_val \u001B[38;5;241m=\u001B[39m vectorizer\u001B[38;5;241m.\u001B[39mtransform(val_texts)\n\u001B[0;32m     85\u001B[0m X_test \u001B[38;5;241m=\u001B[39m vectorizer\u001B[38;5;241m.\u001B[39mtransform(test_texts)\n",
      "File \u001B[1;32mD:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\feature_extraction\\text.py:2138\u001B[0m, in \u001B[0;36mTfidfVectorizer.fit_transform\u001B[1;34m(self, raw_documents, y)\u001B[0m\n\u001B[0;32m   2131\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_check_params()\n\u001B[0;32m   2132\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_tfidf \u001B[38;5;241m=\u001B[39m TfidfTransformer(\n\u001B[0;32m   2133\u001B[0m     norm\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mnorm,\n\u001B[0;32m   2134\u001B[0m     use_idf\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39muse_idf,\n\u001B[0;32m   2135\u001B[0m     smooth_idf\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msmooth_idf,\n\u001B[0;32m   2136\u001B[0m     sublinear_tf\u001B[38;5;241m=\u001B[39m\u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39msublinear_tf,\n\u001B[0;32m   2137\u001B[0m )\n\u001B[1;32m-> 2138\u001B[0m X \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43msuper\u001B[39;49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfit_transform\u001B[49m\u001B[43m(\u001B[49m\u001B[43mraw_documents\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   2139\u001B[0m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39m_tfidf\u001B[38;5;241m.\u001B[39mfit(X)\n\u001B[0;32m   2140\u001B[0m \u001B[38;5;66;03m# X is already a transformed view of raw_documents so\u001B[39;00m\n\u001B[0;32m   2141\u001B[0m \u001B[38;5;66;03m# we set copy to False\u001B[39;00m\n",
      "File \u001B[1;32mD:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\base.py:1474\u001B[0m, in \u001B[0;36m_fit_context.<locals>.decorator.<locals>.wrapper\u001B[1;34m(estimator, *args, **kwargs)\u001B[0m\n\u001B[0;32m   1467\u001B[0m     estimator\u001B[38;5;241m.\u001B[39m_validate_params()\n\u001B[0;32m   1469\u001B[0m \u001B[38;5;28;01mwith\u001B[39;00m config_context(\n\u001B[0;32m   1470\u001B[0m     skip_parameter_validation\u001B[38;5;241m=\u001B[39m(\n\u001B[0;32m   1471\u001B[0m         prefer_skip_nested_validation \u001B[38;5;129;01mor\u001B[39;00m global_skip_validation\n\u001B[0;32m   1472\u001B[0m     )\n\u001B[0;32m   1473\u001B[0m ):\n\u001B[1;32m-> 1474\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mfit_method\u001B[49m\u001B[43m(\u001B[49m\u001B[43mestimator\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32mD:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\feature_extraction\\text.py:1389\u001B[0m, in \u001B[0;36mCountVectorizer.fit_transform\u001B[1;34m(self, raw_documents, y)\u001B[0m\n\u001B[0;32m   1381\u001B[0m             warnings\u001B[38;5;241m.\u001B[39mwarn(\n\u001B[0;32m   1382\u001B[0m                 \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mUpper case characters found in\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m   1383\u001B[0m                 \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m vocabulary while \u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mlowercase\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m   1384\u001B[0m                 \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m is True. These entries will not\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m   1385\u001B[0m                 \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m be matched with any documents\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m   1386\u001B[0m             )\n\u001B[0;32m   1387\u001B[0m             \u001B[38;5;28;01mbreak\u001B[39;00m\n\u001B[1;32m-> 1389\u001B[0m vocabulary, X \u001B[38;5;241m=\u001B[39m \u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_count_vocab\u001B[49m\u001B[43m(\u001B[49m\u001B[43mraw_documents\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mfixed_vocabulary_\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m   1391\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;28mself\u001B[39m\u001B[38;5;241m.\u001B[39mbinary:\n\u001B[0;32m   1392\u001B[0m     X\u001B[38;5;241m.\u001B[39mdata\u001B[38;5;241m.\u001B[39mfill(\u001B[38;5;241m1\u001B[39m)\n",
      "File \u001B[1;32mD:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\feature_extraction\\text.py:1295\u001B[0m, in \u001B[0;36mCountVectorizer._count_vocab\u001B[1;34m(self, raw_documents, fixed_vocab)\u001B[0m\n\u001B[0;32m   1293\u001B[0m     vocabulary \u001B[38;5;241m=\u001B[39m \u001B[38;5;28mdict\u001B[39m(vocabulary)\n\u001B[0;32m   1294\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;129;01mnot\u001B[39;00m vocabulary:\n\u001B[1;32m-> 1295\u001B[0m         \u001B[38;5;28;01mraise\u001B[39;00m \u001B[38;5;167;01mValueError\u001B[39;00m(\n\u001B[0;32m   1296\u001B[0m             \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124mempty vocabulary; perhaps the documents only contain stop words\u001B[39m\u001B[38;5;124m\"\u001B[39m\n\u001B[0;32m   1297\u001B[0m         )\n\u001B[0;32m   1299\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m indptr[\u001B[38;5;241m-\u001B[39m\u001B[38;5;241m1\u001B[39m] \u001B[38;5;241m>\u001B[39m np\u001B[38;5;241m.\u001B[39miinfo(np\u001B[38;5;241m.\u001B[39mint32)\u001B[38;5;241m.\u001B[39mmax:  \u001B[38;5;66;03m# = 2**31 - 1\u001B[39;00m\n\u001B[0;32m   1300\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m _IS_32BIT:\n",
      "\u001B[1;31mValueError\u001B[0m: empty vocabulary; perhaps the documents only contain stop words"
     ]
    }
   ],
   "execution_count": 17
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-25T12:18:17.106207Z",
     "start_time": "2025-05-25T12:18:16.793099Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "import time\n",
    "import joblib\n",
    "from tqdm import tqdm\n",
    "\n",
    "# ----------------------\n",
    "# 1. 数据加载与预处理\n",
    "# ----------------------\n",
    "def load_class_labels(class_path):\n",
    "    \"\"\"加载类别标签映射（如果存在class.txt）\"\"\"\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for line in f:\n",
    "                idx, name = line.strip().split('\\t')\n",
    "                label_map[int(idx)] = name\n",
    "    return label_map\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    \"\"\"加载文本数据集，处理格式为'标题：标题 标签数字'的行\"\"\"\n",
    "    texts, labels = [], []\n",
    "    error_lines = []\n",
    "    empty_lines = 0\n",
    "    \n",
    "    print(f\"加载文件: {file_path}\")\n",
    "    \n",
    "    # 检查文件是否存在\n",
    "    if not os.path.exists(file_path):\n",
    "        print(f\"错误：文件 {file_path} 不存在！\")\n",
    "        return [], []\n",
    "    \n",
    "    # 尝试不同编码读取\n",
    "    encodings = ['utf-8', 'gbk', 'gb2312']\n",
    "    content = None\n",
    "    \n",
    "    for encoding in encodings:\n",
    "        try:\n",
    "            with open(file_path, 'r', encoding=encoding) as f:\n",
    "                content = f.read()\n",
    "                print(f\"成功使用 {encoding} 编码读取文件\")\n",
    "                break\n",
    "        except UnicodeDecodeError:\n",
    "            continue\n",
    "    \n",
    "    if content is None:\n",
    "        print(f\"无法使用任何编码读取文件: {file_path}\")\n",
    "        return [], []\n",
    "    \n",
    "    # 检查文件是否为空\n",
    "    if not content.strip():\n",
    "        print(f\"错误：文件为空！\")\n",
    "        return [], []\n",
    "    \n",
    "    # 按行分割并处理\n",
    "    lines = content.split('\\n')\n",
    "    total_lines = len(lines)\n",
    "    \n",
    "    print(f\"文件总行数: {total_lines}\")\n",
    "    \n",
    "    for i, line in enumerate(tqdm(lines, desc=f\"解析 {file_path}\")):\n",
    "        line = line.strip()\n",
    "        if not line:\n",
    "            empty_lines += 1\n",
    "            continue\n",
    "            \n",
    "        # 从行末提取最后一个空格后的数字作为标签\n",
    "        last_space_pos = line.rfind(' ')\n",
    "        if last_space_pos == -1:\n",
    "            error_lines.append(f\"行 {i+1}: 未找到空格分隔符 - {line[:50]}...\")\n",
    "            continue\n",
    "            \n",
    "        label_str = line[last_space_pos+1:].strip()\n",
    "        text = line[:last_space_pos].strip()\n",
    "        \n",
    "        # 检查标签是否为有效数字\n",
    "        if not label_str.isdigit():\n",
    "            error_lines.append(f\"行 {i+1}: 标签不是数字 - {label_str}\")\n",
    "            continue\n",
    "            \n",
    "        labels.append(int(label_str))\n",
    "        texts.append(text)\n",
    "    \n",
    "    # 打印详细统计信息\n",
    "    valid_count = len(texts)\n",
    "    print(f\"成功加载 {valid_count} 条数据\")\n",
    "    print(f\"格式错误的行: {len(error_lines)} ({len(error_lines)/total_lines:.2%})\")\n",
    "    print(f\"空行: {empty_lines} ({empty_lines/total_lines:.2%})\")\n",
    "    \n",
    "    if error_lines:\n",
    "        print(f\"显示前5条错误行:\")\n",
    "        for line in error_lines[:5]:\n",
    "            print(f\"  - {line}\")\n",
    "        if len(error_lines) > 5:\n",
    "            print(f\"  ...等 {len(error_lines)-5} 行\")\n",
    "    \n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    \"\"\"分词并过滤停用词，添加异常处理\"\"\"\n",
    "    processed = []\n",
    "    empty_count = 0\n",
    "    \n",
    "    for i, text in enumerate(tqdm(texts, desc=\"文本预处理\")):\n",
    "        # 简单文本清洗\n",
    "        cleaned_text = text.strip()\n",
    "        if not cleaned_text:\n",
    "            empty_count += 1\n",
    "            processed.append(\"空文本\")  # 防止空文本\n",
    "            continue\n",
    "            \n",
    "        # 使用jieba分词，添加HMM模型提高未登录词识别率\n",
    "        jieba.enable_hmm()\n",
    "        words = jieba.cut(cleaned_text)\n",
    "        \n",
    "        # 过滤停用词和空词\n",
    "        filtered = [word for word in words if word not in stopwords and word.strip()]\n",
    "        \n",
    "        # 确保至少有一个词，避免空词汇表\n",
    "        if not filtered:\n",
    "            filtered = [\"无关键词\"]\n",
    "            empty_count += 1\n",
    "            \n",
    "        processed.append(' '.join(filtered))\n",
    "    \n",
    "    if empty_count > 0:\n",
    "        print(f\"警告：{empty_count} 条文本预处理后为空或仅含停用词\")\n",
    "    \n",
    "    return processed\n",
    "\n",
    "# ----------------------\n",
    "# 2. 特征工程\n",
    "# ----------------------\n",
    "def extract_tfidf_features(train_texts, val_texts, test_texts, max_features=10000):\n",
    "    \"\"\"使用TF-IDF提取文本特征，添加空词汇表检查\"\"\"\n",
    "    print(\"\\n=== 提取TF-IDF特征 ===\")\n",
    "    \n",
    "    # 检查是否有文本为空\n",
    "    empty_train = sum(1 for t in train_texts if not t.strip())\n",
    "    empty_val = sum(1 for t in val_texts if not t.strip())\n",
    "    empty_test = sum(1 for t in test_texts if not t.strip())\n",
    "    \n",
    "    if empty_train > 0:\n",
    "        print(f\"警告：训练集包含 {empty_train} 个空文本\")\n",
    "    if empty_val > 0:\n",
    "        print(f\"警告：验证集包含 {empty_val} 个空文本\")\n",
    "    if empty_test > 0:\n",
    "        print(f\"警告：测试集包含 {empty_test} 个空文本\")\n",
    "    \n",
    "    # 提取特征\n",
    "    vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=(1, 2))\n",
    "    \n",
    "    try:\n",
    "        X_train = vectorizer.fit_transform(train_texts)\n",
    "        X_val = vectorizer.transform(val_texts)\n",
    "        X_test = vectorizer.transform(test_texts)\n",
    "        \n",
    "        # 检查词汇表是否为空\n",
    "        vocabulary = vectorizer.vocabulary_\n",
    "        if not vocabulary:\n",
    "            raise ValueError(\"生成的词汇表为空！请检查文本预处理步骤。\")\n",
    "        \n",
    "        print(f\"词汇表大小: {len(vocabulary)}\")\n",
    "        print(f\"训练集特征矩阵形状: {X_train.shape}\")\n",
    "        \n",
    "        return X_train, X_val, X_test, vectorizer\n",
    "    \n",
    "    except Exception as e:\n",
    "        print(f\"特征提取失败: {e}\")\n",
    "        # 返回空矩阵和None\n",
    "        return None, None, None, None\n",
    "\n",
    "# ----------------------\n",
    "# 3. 模型训练与评估\n",
    "# ----------------------\n",
    "def train_and_evaluate(models, X_train, y_train, X_val, y_val, X_test, y_test, label_map):\n",
    "    \"\"\"训练并评估所有模型\"\"\"\n",
    "    results = {}\n",
    "    \n",
    "    if X_train is None or X_train.shape[0] == 0:\n",
    "        print(\"错误：训练数据为空或特征提取失败，无法进行模型训练！\")\n",
    "        return results\n",
    "    \n",
    "    for model_name, model in models.items():\n",
    "        # 训练模型\n",
    "        print(f\"\\n训练 {model_name} 模型...\")\n",
    "        start_time = time.time()\n",
    "        \n",
    "        try:\n",
    "            model.fit(X_train, y_train)\n",
    "            train_time = time.time() - start_time\n",
    "            \n",
    "            # 评估测试集\n",
    "            y_pred = model.predict(X_test)\n",
    "            accuracy = accuracy_score(y_test, y_pred)\n",
    "            report = classification_report(y_test, y_pred, target_names=label_map.values())\n",
    "            \n",
    "            print(f\"{model_name} 准确率: {accuracy:.4f}\")\n",
    "            print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "            print(f\"分类报告:\\n{report}\")\n",
    "            \n",
    "            results[model_name] = {\n",
    "                'model': model,\n",
    "                'accuracy': accuracy,\n",
    "                'train_time': train_time,\n",
    "                'report': report\n",
    "            }\n",
    "        except Exception as e:\n",
    "            print(f\"{model_name} 训练失败: {e}\")\n",
    "    \n",
    "    return results\n",
    "\n",
    "# ----------------------\n",
    "# 4. 主流程\n",
    "# ----------------------\n",
    "def main():\n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"  # 数据集目录\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"  # 停用词表路径\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")  # 类别标签路径\n",
    "    FEATURE_METHOD = \"tfidf\"  # 特征提取方法：tfidf 或 embedding（需额外实现）\n",
    "    MAX_FEATURES = 10000  # TF-IDF最大特征数\n",
    "    \n",
    "    print(\"=== 新闻文本分类系统 ===\")\n",
    "    \n",
    "    # 检查数据集目录是否存在\n",
    "    if not os.path.exists(DATA_DIR):\n",
    "        print(f\"错误：数据集目录 {DATA_DIR} 不存在！\")\n",
    "        exit(1)\n",
    "    \n",
    "    # 列出目录内容（调试用）\n",
    "    print(f\"\\n{DATA_DIR} 目录内容:\")\n",
    "    for item in os.listdir(DATA_DIR):\n",
    "        print(f\"- {item}\")\n",
    "    \n",
    "    # 1. 加载数据集\n",
    "    print(\"\\n加载数据集...\")\n",
    "    train_path = os.path.join(DATA_DIR, \"train.txt\")\n",
    "    val_path = os.path.join(DATA_DIR, \"dev.txt\")\n",
    "    test_path = os.path.join(DATA_DIR, \"test.txt\")\n",
    "    \n",
    "    # 验证数据格式\n",
    "    print(\"\\n=== 验证数据格式 ===\")\n",
    "    with open(train_path, 'r', encoding='utf-8') as f:\n",
    "        sample = f.readline().strip()\n",
    "        print(f\"样本行: {sample}\")\n",
    "        \n",
    "        # 尝试解析样本\n",
    "        last_space_pos = sample.rfind(' ')\n",
    "        if last_space_pos == -1:\n",
    "            print(\"格式错误：未找到空格分隔符\")\n",
    "        else:\n",
    "            label_str = sample[last_space_pos+1:].strip()\n",
    "            text = sample[:last_space_pos].strip()\n",
    "            print(f\"解析结果 - 标签: {label_str}, 文本前30字: {text[:30]}...\")\n",
    "    \n",
    "    train_texts, train_labels = load_text_dataset(train_path)\n",
    "    val_texts, val_labels = load_text_dataset(val_path)\n",
    "    test_texts, test_labels = load_text_dataset(test_path)\n",
    "    \n",
    "    print(f\"\\n数据集统计:\")\n",
    "    print(f\"训练集样本数: {len(train_texts)}\")\n",
    "    print(f\"验证集样本数: {len(val_texts)}\")\n",
    "    print(f\"测试集样本数: {len(test_texts)}\")\n",
    "    \n",
    "    # 检查标签分布\n",
    "    print(\"\\n=== 标签分布 ===\")\n",
    "    for split_name, labels in [(\"训练集\", train_labels), (\"验证集\", val_labels), (\"测试集\", test_labels)]:\n",
    "        if labels:\n",
    "            label_counts = pd.Series(labels).value_counts().sort_index()\n",
    "            print(f\"\\n{split_name} 标签分布:\")\n",
    "            for label, count in label_counts.items():\n",
    "                print(f\"  标签 {label}: {count} 条数据 ({count/len(labels):.2%})\")\n",
    "        else:\n",
    "            print(f\"\\n{split_name} 标签数据为空！\")\n",
    "    \n",
    "    # 2. 加载停用词表\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        print(f\"\\n加载停用词表: {STOPWORDS_PATH}\")\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        print(f\"停用词数量: {len(stopwords)}\")\n",
    "        \n",
    "        # 检查常用词是否被错误包含\n",
    "        common_words = [\"中国\", \"北京\", \"大学\", \"人民\", \"发展\", \"社会\"]\n",
    "        for word in common_words:\n",
    "            if word in stopwords:\n",
    "                print(f\"警告：停用词表包含常用词 '{word}'，可能影响分词效果\")\n",
    "    else:\n",
    "        print(\"警告: 未找到停用词表，跳过过滤...\")\n",
    "    \n",
    "    # 3. 预处理文本\n",
    "    print(\"\\n开始文本预处理...\")\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 显示预处理后的样本\n",
    "    print(\"\\n=== 预处理后样本 ===\")\n",
    "    for i in range(min(3, len(train_processed))):\n",
    "        print(f\"训练样本 {i+1}:\")\n",
    "        print(f\"  原始: {train_texts[i][:50]}...\")\n",
    "        print(f\"  处理: {train_processed[i][:50]}...\")\n",
    "        print()\n",
    "    \n",
    "    # 4. 提取TF-IDF特征\n",
    "    print(\"\\n提取特征...\")\n",
    "    X_train, X_val, X_test, vectorizer = extract_tfidf_features(\n",
    "        train_processed, val_processed, test_processed, MAX_FEATURES\n",
    "    )\n",
    "    \n",
    "    if X_train is None:\n",
    "        print(\"特征提取失败，程序终止！\")\n",
    "        exit(1)\n",
    "    \n",
    "    # 5. 定义六种模型\n",
    "    print(\"\\n初始化模型...\")\n",
    "    models = {\n",
    "        \"逻辑回归\": LogisticRegression(solver='lbfgs', multi_class='auto', n_jobs=-1, max_iter=1000),\n",
    "        \"支持向量机\": SVC(kernel='rbf', probability=True, gamma='scale'),\n",
    "        \"决策树\": DecisionTreeClassifier(max_depth=5),\n",
    "        \"随机森林\": RandomForestClassifier(n_estimators=100, n_jobs=-1),\n",
    "        \"K近邻\": KNeighborsClassifier(n_neighbors=5, n_jobs=-1),\n",
    "        \"朴素贝叶斯\": MultinomialNB()\n",
    "    }\n",
    "    \n",
    "    # 6. 加载类别标签（可选）\n",
    "    label_map = load_class_labels(CLASS_PATH)\n",
    "    if label_map:\n",
    "        print(f\"\\n加载类别映射: {len(label_map)} 个类别\")\n",
    "        for idx, name in label_map.items():\n",
    "            print(f\"  {idx}: {name}\")\n",
    "    else:\n",
    "        print(\"\\n未找到类别映射文件，使用数字标签\")\n",
    "        label_map = {idx: f\"类别{idx}\" for idx in set(train_labels)}\n",
    "    \n",
    "    # 7. 训练与评估\n",
    "    print(\"\\n开始模型训练与评估...\")\n",
    "    results = train_and_evaluate(models, X_train, train_labels, X_val, val_labels, X_test, test_labels, label_map)\n",
    "    \n",
    "    # 8. 保存最佳模型\n",
    "    if results:\n",
    "        best_model_name = max(results, key=lambda k: results[k]['accuracy'])\n",
    "        joblib.dump(results[best_model_name]['model'], f\"thucnews_{best_model_name.lower()}_model.pkl\")\n",
    "        print(f\"\\n最佳模型: {best_model_name}, 准确率: {results[best_model_name]['accuracy']:.4f}\")\n",
    "    else:\n",
    "        print(\"\\n没有模型训练成功！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "8d4796cf481fabf",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=== 新闻文本分类系统 ===\n",
      "\n",
      "D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt 目录内容:\n",
      "- class.txt\n",
      "- dev.txt\n",
      "- embedding_SougouNews.npz\n",
      "- embedding_Tencent.npz\n",
      "- test.txt\n",
      "- tianchiyi.py\n",
      "- train.txt\n",
      "- vocab.pkl\n",
      "\n",
      "加载数据集...\n",
      "\n",
      "=== 验证数据格式 ===\n",
      "样本行: 中华女子学院：本科层次仅1专业招男生\t3\n",
      "格式错误：未找到空格分隔符\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt\n",
      "成功使用 utf-8 编码读取文件\n",
      "文件总行数: 180001\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 100%|██████████| 180001/180001 [00:00<00:00, 1488094.74it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "成功加载 0 条数据\n",
      "格式错误的行: 180000 (100.00%)\n",
      "空行: 1 (0.00%)\n",
      "显示前5条错误行:\n",
      "  - 行 1: 未找到空格分隔符 - 中华女子学院：本科层次仅1专业招男生\t3...\n",
      "  - 行 2: 未找到空格分隔符 - 两天价网站背后重重迷雾：做个网站究竟要多少钱\t4...\n",
      "  - 行 3: 未找到空格分隔符 - 东5环海棠公社230-290平2居准现房98折优惠\t1...\n",
      "  - 行 4: 标签不是数字 - 不希望英德战踢点球\t7\n",
      "  - 行 5: 未找到空格分隔符 - 82岁老太为学生做饭扫地44年获授港大荣誉院士\t5...\n",
      "  ...等 179995 行\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt\n",
      "成功使用 utf-8 编码读取文件\n",
      "文件总行数: 10001\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 100%|██████████| 10001/10001 [00:00<00:00, 1554464.86it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "成功加载 0 条数据\n",
      "格式错误的行: 10000 (99.99%)\n",
      "空行: 1 (0.01%)\n",
      "显示前5条错误行:\n",
      "  - 行 1: 标签不是数字 - 倚天屠龙记十大创新概览\t8\n",
      "  - 行 2: 未找到空格分隔符 - 60年铁树开花形状似玉米芯(组图)\t5...\n",
      "  - 行 3: 未找到空格分隔符 - 同步A股首秀：港股缩量回调\t2...\n",
      "  - 行 4: 标签不是数字 - 兔子舞热辣表演\t8\n",
      "  - 行 5: 未找到空格分隔符 - 锌价难续去年辉煌\t0...\n",
      "  ...等 9995 行\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt\n",
      "成功使用 utf-8 编码读取文件\n",
      "文件总行数: 10001\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 100%|██████████| 10001/10001 [00:00<00:00, 1345842.99it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "成功加载 0 条数据\n",
      "格式错误的行: 10000 (99.99%)\n",
      "空行: 1 (0.01%)\n",
      "显示前5条错误行:\n",
      "  - 行 1: 标签不是数字 - 08年考研暑期英语复习全指南\t3\n",
      "  - 行 2: 未找到空格分隔符 - 中国人民公安大学2012年硕士研究生目录及书目\t3...\n",
      "  - 行 3: 未找到空格分隔符 - 日本地震：金吉列关注在日学子系列报道\t3...\n",
      "  - 行 4: 未找到空格分隔符 - 名师辅导：2012考研英语虚拟语气三种用法\t3...\n",
      "  - 行 5: 未找到空格分隔符 - 自考经验谈：自考生毕业论文选题技巧\t3...\n",
      "  ...等 9995 行\n",
      "\n",
      "数据集统计:\n",
      "训练集样本数: 0\n",
      "验证集样本数: 0\n",
      "测试集样本数: 0\n",
      "\n",
      "=== 标签分布 ===\n",
      "\n",
      "训练集 标签数据为空！\n",
      "\n",
      "验证集 标签数据为空！\n",
      "\n",
      "测试集 标签数据为空！\n",
      "警告: 未找到停用词表，跳过过滤...\n",
      "\n",
      "开始文本预处理...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 0it [00:00, ?it/s]\n",
      "文本预处理: 0it [00:00, ?it/s]\n",
      "文本预处理: 0it [00:00, ?it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== 预处理后样本 ===\n",
      "\n",
      "提取特征...\n",
      "\n",
      "=== 提取TF-IDF特征 ===\n",
      "特征提取失败: empty vocabulary; perhaps the documents only contain stop words\n",
      "特征提取失败，程序终止！\n",
      "\n",
      "初始化模型...\n"
     ]
    },
    {
     "ename": "ValueError",
     "evalue": "not enough values to unpack (expected 2, got 1)",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mValueError\u001B[0m                                Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[18], line 363\u001B[0m\n\u001B[0;32m    360\u001B[0m         \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124m\"\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m没有模型训练成功！\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n\u001B[0;32m    362\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m \u001B[38;5;18m__name__\u001B[39m \u001B[38;5;241m==\u001B[39m \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m__main__\u001B[39m\u001B[38;5;124m\"\u001B[39m:\n\u001B[1;32m--> 363\u001B[0m     \u001B[43mmain\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n",
      "Cell \u001B[1;32mIn[18], line 341\u001B[0m, in \u001B[0;36mmain\u001B[1;34m()\u001B[0m\n\u001B[0;32m    331\u001B[0m models \u001B[38;5;241m=\u001B[39m {\n\u001B[0;32m    332\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m逻辑回归\u001B[39m\u001B[38;5;124m\"\u001B[39m: LogisticRegression(solver\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mlbfgs\u001B[39m\u001B[38;5;124m'\u001B[39m, multi_class\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mauto\u001B[39m\u001B[38;5;124m'\u001B[39m, n_jobs\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m-\u001B[39m\u001B[38;5;241m1\u001B[39m, max_iter\u001B[38;5;241m=\u001B[39m\u001B[38;5;241m1000\u001B[39m),\n\u001B[0;32m    333\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m支持向量机\u001B[39m\u001B[38;5;124m\"\u001B[39m: SVC(kernel\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mrbf\u001B[39m\u001B[38;5;124m'\u001B[39m, probability\u001B[38;5;241m=\u001B[39m\u001B[38;5;28;01mTrue\u001B[39;00m, gamma\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mscale\u001B[39m\u001B[38;5;124m'\u001B[39m),\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    337\u001B[0m     \u001B[38;5;124m\"\u001B[39m\u001B[38;5;124m朴素贝叶斯\u001B[39m\u001B[38;5;124m\"\u001B[39m: MultinomialNB()\n\u001B[0;32m    338\u001B[0m }\n\u001B[0;32m    340\u001B[0m \u001B[38;5;66;03m# 6. 加载类别标签（可选）\u001B[39;00m\n\u001B[1;32m--> 341\u001B[0m label_map \u001B[38;5;241m=\u001B[39m \u001B[43mload_class_labels\u001B[49m\u001B[43m(\u001B[49m\u001B[43mCLASS_PATH\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m    342\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m label_map:\n\u001B[0;32m    343\u001B[0m     \u001B[38;5;28mprint\u001B[39m(\u001B[38;5;124mf\u001B[39m\u001B[38;5;124m\"\u001B[39m\u001B[38;5;130;01m\\n\u001B[39;00m\u001B[38;5;124m加载类别映射: \u001B[39m\u001B[38;5;132;01m{\u001B[39;00m\u001B[38;5;28mlen\u001B[39m(label_map)\u001B[38;5;132;01m}\u001B[39;00m\u001B[38;5;124m 个类别\u001B[39m\u001B[38;5;124m\"\u001B[39m)\n",
      "Cell \u001B[1;32mIn[18], line 28\u001B[0m, in \u001B[0;36mload_class_labels\u001B[1;34m(class_path)\u001B[0m\n\u001B[0;32m     26\u001B[0m     \u001B[38;5;28;01mwith\u001B[39;00m \u001B[38;5;28mopen\u001B[39m(class_path, \u001B[38;5;124m'\u001B[39m\u001B[38;5;124mr\u001B[39m\u001B[38;5;124m'\u001B[39m, encoding\u001B[38;5;241m=\u001B[39m\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mutf-8\u001B[39m\u001B[38;5;124m'\u001B[39m) \u001B[38;5;28;01mas\u001B[39;00m f:\n\u001B[0;32m     27\u001B[0m         \u001B[38;5;28;01mfor\u001B[39;00m line \u001B[38;5;129;01min\u001B[39;00m f:\n\u001B[1;32m---> 28\u001B[0m             idx, name \u001B[38;5;241m=\u001B[39m line\u001B[38;5;241m.\u001B[39mstrip()\u001B[38;5;241m.\u001B[39msplit(\u001B[38;5;124m'\u001B[39m\u001B[38;5;130;01m\\t\u001B[39;00m\u001B[38;5;124m'\u001B[39m)\n\u001B[0;32m     29\u001B[0m             label_map[\u001B[38;5;28mint\u001B[39m(idx)] \u001B[38;5;241m=\u001B[39m name\n\u001B[0;32m     30\u001B[0m \u001B[38;5;28;01mreturn\u001B[39;00m label_map\n",
      "\u001B[1;31mValueError\u001B[0m: not enough values to unpack (expected 2, got 1)"
     ]
    }
   ],
   "execution_count": 18
  },
  {
   "metadata": {
    "jupyter": {
     "is_executing": true
    },
    "ExecuteTime": {
     "start_time": "2025-05-25T13:13:53.044239Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.svm import SVC\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.naive_bayes import MultinomialNB\n",
    "from sklearn.metrics import accuracy_score, classification_report, confusion_matrix\n",
    "from sklearn.preprocessing import StandardScaler\n",
    "import time\n",
    "import joblib\n",
    "from tqdm import tqdm\n",
    "\n",
    "# ----------------------\n",
    "# 1. 数据加载与预处理\n",
    "# ----------------------\n",
    "def load_class_labels(class_path):\n",
    "    \"\"\"加载类别标签映射，支持纯类别名称格式\"\"\"\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            # 按行读取类别名称，自动分配标签ID（0, 1, 2...）\n",
    "            for idx, line in enumerate(f):\n",
    "                category = line.strip()\n",
    "                if category:\n",
    "                    label_map[idx] = category\n",
    "                else:\n",
    "                    print(f\"警告：class.txt 第 {idx+1} 行为空，跳过\")\n",
    "    \n",
    "    if label_map:\n",
    "        print(f\"成功加载 {len(label_map)} 个类别，标签ID范围：0 ~ {len(label_map)-1}\")\n",
    "    else:\n",
    "        print(\"警告：未从class.txt中加载到有效类别，将使用数字标签\")\n",
    "    \n",
    "    return label_map\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    \"\"\"加载文本数据集，使用制表符 \\t 分隔标签和文本\"\"\"\n",
    "    texts, labels = [], []\n",
    "    error_lines = []\n",
    "    \n",
    "    print(f\"加载文件: {file_path}\")\n",
    "    \n",
    "    # 检查文件是否存在\n",
    "    if not os.path.exists(file_path):\n",
    "        print(f\"错误：文件 {file_path} 不存在！\")\n",
    "        return [], []\n",
    "    \n",
    "    # 尝试不同编码读取\n",
    "    encodings = ['utf-8', 'gbk', 'gb2312']\n",
    "    content = None\n",
    "    \n",
    "    for encoding in encodings:\n",
    "        try:\n",
    "            with open(file_path, 'r', encoding=encoding) as f:\n",
    "                content = f.read()\n",
    "                print(f\"成功使用 {encoding} 编码读取文件\")\n",
    "                break\n",
    "        except UnicodeDecodeError:\n",
    "            continue\n",
    "    \n",
    "    if content is None:\n",
    "        print(f\"无法使用任何编码读取文件: {file_path}\")\n",
    "        return [], []\n",
    "    \n",
    "    # 检查文件是否为空\n",
    "    if not content.strip():\n",
    "        print(f\"错误：文件为空！\")\n",
    "        return [], []\n",
    "    \n",
    "    # 按行分割并处理\n",
    "    lines = content.split('\\n')\n",
    "    total_lines = len(lines)\n",
    "    \n",
    "    print(f\"文件总行数: {total_lines}\")\n",
    "    \n",
    "    for i, line in enumerate(tqdm(lines, desc=f\"解析 {file_path}\")):\n",
    "        line = line.strip()\n",
    "        if not line:\n",
    "            continue\n",
    "            \n",
    "        # 使用制表符从右侧分割一次\n",
    "        parts = line.rsplit('\\t', 1)\n",
    "        if len(parts) != 2:\n",
    "            error_lines.append(f\"行 {i+1}: 未找到制表符分隔符 - {line[:50]}...\")\n",
    "            continue\n",
    "            \n",
    "        text, label_str = parts\n",
    "        text = text.strip()\n",
    "        label_str = label_str.strip()\n",
    "        \n",
    "        if not label_str.isdigit():\n",
    "            error_lines.append(f\"行 {i+1}: 标签不是数字 - {label_str}\")\n",
    "            continue\n",
    "            \n",
    "        labels.append(int(label_str))\n",
    "        texts.append(text)\n",
    "    \n",
    "    # 打印错误摘要\n",
    "    if error_lines:\n",
    "        print(f\"发现 {len(error_lines)} 条格式错误数据，占比: {len(error_lines)/(i+1):.2%}\")\n",
    "    print(f\"成功加载 {len(texts)} 条数据\")\n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    \"\"\"分词并过滤停用词，兼容jieba版本问题\"\"\"\n",
    "    processed = []\n",
    "    empty_count = 0\n",
    "    \n",
    "    # 尝试使用HMM参数替代enable_hmm()\n",
    "    try:\n",
    "        for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "            cleaned_text = text.strip()\n",
    "            if not cleaned_text:\n",
    "                empty_count += 1\n",
    "                processed.append(\"空文本\")\n",
    "                continue\n",
    "                \n",
    "            words = jieba.cut(cleaned_text, HMM=True)  # 使用HMM参数\n",
    "            filtered = [word for word in words if word not in stopwords and word.strip()]\n",
    "            \n",
    "            if not filtered:\n",
    "                filtered = [\"无关键词\"]\n",
    "                empty_count += 1\n",
    "                \n",
    "            processed.append(' '.join(filtered))\n",
    "    \n",
    "    except Exception as e:\n",
    "        print(f\"警告：使用HMM分词失败，原因: {e}，回退到默认模式\")\n",
    "        \n",
    "        # 回退到默认分词模式\n",
    "        for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "            cleaned_text = text.strip()\n",
    "            if not cleaned_text:\n",
    "                empty_count += 1\n",
    "                processed.append(\"空文本\")\n",
    "                continue\n",
    "                \n",
    "            words = jieba.cut(cleaned_text)  # 默认模式\n",
    "            filtered = [word for word in words if word not in stopwords and word.strip()]\n",
    "            \n",
    "            if not filtered:\n",
    "                filtered = [\"无关键词\"]\n",
    "                empty_count += 1\n",
    "                \n",
    "            processed.append(' '.join(filtered))\n",
    "    \n",
    "    if empty_count > 0:\n",
    "        print(f\"警告：{empty_count} 条文本预处理后为空或仅含停用词\")\n",
    "    \n",
    "    return processed\n",
    "\n",
    "# ----------------------\n",
    "# 2. 特征工程\n",
    "# ----------------------\n",
    "def extract_tfidf_features(train_texts, val_texts, test_texts, max_features=10000):\n",
    "    \"\"\"使用TF-IDF提取文本特征，添加空词汇表检查\"\"\"\n",
    "    print(\"\\n=== 提取TF-IDF特征 ===\")\n",
    "    \n",
    "    # 检查是否有文本为空\n",
    "    empty_train = sum(1 for t in train_texts if not t.strip())\n",
    "    empty_val = sum(1 for t in val_texts if not t.strip())\n",
    "    empty_test = sum(1 for t in test_texts if not t.strip())\n",
    "    \n",
    "    if empty_train > 0:\n",
    "        print(f\"警告：训练集包含 {empty_train} 个空文本\")\n",
    "    if empty_val > 0:\n",
    "        print(f\"警告：验证集包含 {empty_val} 个空文本\")\n",
    "    if empty_test > 0:\n",
    "        print(f\"警告：测试集包含 {empty_test} 个空文本\")\n",
    "    \n",
    "    # 提取特征\n",
    "    vectorizer = TfidfVectorizer(max_features=max_features, ngram_range=(1, 2))\n",
    "    \n",
    "    try:\n",
    "        X_train = vectorizer.fit_transform(train_texts)\n",
    "        X_val = vectorizer.transform(val_texts)\n",
    "        X_test = vectorizer.transform(test_texts)\n",
    "        \n",
    "        # 检查词汇表是否为空\n",
    "        vocabulary = vectorizer.vocabulary_\n",
    "        if not vocabulary:\n",
    "            raise ValueError(\"生成的词汇表为空！请检查文本预处理步骤。\")\n",
    "        \n",
    "        print(f\"词汇表大小: {len(vocabulary)}\")\n",
    "        print(f\"训练集特征矩阵形状: {X_train.shape}\")\n",
    "        \n",
    "        return X_train, X_val, X_test, vectorizer\n",
    "    \n",
    "    except Exception as e:\n",
    "        print(f\"特征提取失败: {e}\")\n",
    "        # 返回空矩阵和None\n",
    "        return None, None, None, None\n",
    "\n",
    "# ----------------------\n",
    "# 3. 模型训练与评估\n",
    "# ----------------------\n",
    "def train_and_evaluate(models, X_train, y_train, X_val, y_val, X_test, y_test, label_map):\n",
    "    \"\"\"训练并评估所有模型\"\"\"\n",
    "    results = {}\n",
    "    \n",
    "    if X_train is None or X_train.shape[0] == 0:\n",
    "        print(\"错误：训练数据为空或特征提取失败，无法进行模型训练！\")\n",
    "        return results\n",
    "    \n",
    "    for model_name, model in models.items():\n",
    "        # 训练模型\n",
    "        print(f\"\\n训练 {model_name} 模型...\")\n",
    "        start_time = time.time()\n",
    "        \n",
    "        try:\n",
    "            model.fit(X_train, y_train)\n",
    "            train_time = time.time() - start_time\n",
    "            \n",
    "            # 评估测试集\n",
    "            y_pred = model.predict(X_test)\n",
    "            accuracy = accuracy_score(y_test, y_pred)\n",
    "            report = classification_report(y_test, y_pred, target_names=label_map.values())\n",
    "            \n",
    "            print(f\"{model_name} 准确率: {accuracy:.4f}\")\n",
    "            print(f\"训练时间: {train_time:.2f} 秒\")\n",
    "            print(f\"分类报告:\\n{report}\")\n",
    "            \n",
    "            results[model_name] = {\n",
    "                'model': model,\n",
    "                'accuracy': accuracy,\n",
    "                'train_time': train_time,\n",
    "                'report': report\n",
    "            }\n",
    "        except Exception as e:\n",
    "            print(f\"{model_name} 训练失败: {e}\")\n",
    "    \n",
    "    return results\n",
    "\n",
    "# ----------------------\n",
    "# 4. 主流程\n",
    "# ----------------------\n",
    "def main():\n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"  # 数据集目录\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"  # 停用词表路径\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")  # 类别标签路径\n",
    "    FEATURE_METHOD = \"tfidf\"  # 特征提取方法：tfidf 或 embedding（需额外实现）\n",
    "    MAX_FEATURES = 10000  # TF-IDF最大特征数\n",
    "    \n",
    "    print(\"=== 新闻文本分类系统 ===\")\n",
    "    \n",
    "    # 检查数据集目录是否存在\n",
    "    if not os.path.exists(DATA_DIR):\n",
    "        print(f\"错误：数据集目录 {DATA_DIR} 不存在！\")\n",
    "        exit(1)\n",
    "    \n",
    "    # 列出目录内容（调试用）\n",
    "    print(f\"\\n{DATA_DIR} 目录内容:\")\n",
    "    for item in os.listdir(DATA_DIR):\n",
    "        print(f\"- {item}\")\n",
    "    \n",
    "    # 1. 加载数据集\n",
    "    print(\"\\n加载数据集...\")\n",
    "    train_path = os.path.join(DATA_DIR, \"train.txt\")\n",
    "    val_path = os.path.join(DATA_DIR, \"dev.txt\")\n",
    "    test_path = os.path.join(DATA_DIR, \"test.txt\")\n",
    "    \n",
    "    # 验证数据格式\n",
    "    print(\"\\n=== 验证数据格式 ===\")\n",
    "    with open(train_path, 'r', encoding='utf-8') as f:\n",
    "        sample = f.readline().strip()\n",
    "        print(f\"样本行: {sample}\")\n",
    "        \n",
    "        # 尝试解析样本\n",
    "        parts = sample.rsplit('\\t', 1)\n",
    "        if len(parts) != 2:\n",
    "            print(\"格式错误：未找到制表符分隔符\")\n",
    "        else:\n",
    "            text, label_str = parts\n",
    "            print(f\"解析结果 - 标签: {label_str}, 文本前30字: {text[:30]}...\")\n",
    "    \n",
    "    train_texts, train_labels = load_text_dataset(train_path)\n",
    "    val_texts, val_labels = load_text_dataset(val_path)\n",
    "    test_texts, test_labels = load_text_dataset(test_path)\n",
    "    \n",
    "    print(f\"\\n数据集统计:\")\n",
    "    print(f\"训练集样本数: {len(train_texts)}\")\n",
    "    print(f\"验证集样本数: {len(val_texts)}\")\n",
    "    print(f\"测试集样本数: {len(test_texts)}\")\n",
    "    \n",
    "    # 检查标签分布\n",
    "    print(\"\\n=== 标签分布 ===\")\n",
    "    for split_name, labels in [(\"训练集\", train_labels), (\"验证集\", val_labels), (\"测试集\", test_labels)]:\n",
    "        if labels:\n",
    "            label_counts = pd.Series(labels).value_counts().sort_index()\n",
    "            print(f\"\\n{split_name} 标签分布:\")\n",
    "            for label, count in label_counts.items():\n",
    "                print(f\"  标签 {label}: {count} 条数据 ({count/len(labels):.2%})\")\n",
    "        else:\n",
    "            print(f\"\\n{split_name} 标签数据为空！\")\n",
    "    \n",
    "    # 2. 加载停用词表\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        print(f\"\\n加载停用词表: {STOPWORDS_PATH}\")\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        print(f\"停用词数量: {len(stopwords)}\")\n",
    "        \n",
    "        # 检查常用词是否被错误包含\n",
    "        common_words = [\"中国\", \"北京\", \"大学\", \"人民\", \"发展\", \"社会\"]\n",
    "        for word in common_words:\n",
    "            if word in stopwords:\n",
    "                print(f\"警告：停用词表包含常用词 '{word}'，可能影响分词效果\")\n",
    "    else:\n",
    "        print(\"警告: 未找到停用词表，跳过过滤...\")\n",
    "    \n",
    "    # 3. 预处理文本\n",
    "    print(\"\\n开始文本预处理...\")\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 显示预处理后的样本\n",
    "    print(\"\\n=== 预处理后样本 ===\")\n",
    "    for i in range(min(3, len(train_processed))):\n",
    "        print(f\"训练样本 {i+1}:\")\n",
    "        print(f\"  原始: {train_texts[i][:50]}...\")\n",
    "        print(f\"  处理: {train_processed[i][:50]}...\")\n",
    "        print()\n",
    "    \n",
    "    # 4. 提取TF-IDF特征\n",
    "    print(\"\\n提取特征...\")\n",
    "    X_train, X_val, X_test, vectorizer = extract_tfidf_features(\n",
    "        train_processed, val_processed, test_processed, MAX_FEATURES\n",
    "    )\n",
    "    \n",
    "    if X_train is None:\n",
    "        print(\"特征提取失败，程序终止！\")\n",
    "        exit(1)\n",
    "    \n",
    "    # 5. 定义六种模型\n",
    "    print(\"\\n初始化模型...\")\n",
    "    models = {\n",
    "        \"逻辑回归\": LogisticRegression(solver='lbfgs', multi_class='auto', n_jobs=-1, max_iter=1000),\n",
    "        \"支持向量机\": SVC(kernel='rbf', probability=True, gamma='scale'),\n",
    "        \"决策树\": DecisionTreeClassifier(max_depth=5),\n",
    "        \"随机森林\": RandomForestClassifier(n_estimators=100, n_jobs=-1),\n",
    "        \"K近邻\": KNeighborsClassifier(n_neighbors=5, n_jobs=-1),\n",
    "        \"朴素贝叶斯\": MultinomialNB()\n",
    "    }\n",
    "    \n",
    "    # 6. 加载类别标签（支持纯名称格式）\n",
    "    label_map = load_class_labels(CLASS_PATH)\n",
    "    \n",
    "    # 如果class.txt为空或格式错误，使用数据中的唯一标签作为fallback\n",
    "    if not label_map:\n",
    "        unique_labels = sorted(set(train_labels))\n",
    "        label_map = {idx: f\"类别{idx}\" for idx in unique_labels}\n",
    "        print(f\"警告：使用自动生成的标签映射：{label_map}\")\n",
    "    \n",
    "    # 显示类别映射\n",
    "    if label_map:\n",
    "        print(\"\\n=== 类别映射 ===\")\n",
    "        for label_id, category in label_map.items():\n",
    "            print(f\"  标签 {label_id}: {category}\")\n",
    "    \n",
    "    # 7. 训练与评估\n",
    "    print(\"\\n开始模型训练与评估...\")\n",
    "    results = train_and_evaluate(models, X_train, train_labels, X_val, val_labels, X_test, test_labels, label_map)\n",
    "    \n",
    "    # 8. 保存最佳模型\n",
    "    if results:\n",
    "        best_model_name = max(results, key=lambda k: results[k]['accuracy'])\n",
    "        joblib.dump(results[best_model_name]['model'], f\"thucnews_{best_model_name.lower()}_model.pkl\")\n",
    "        print(f\"\\n最佳模型: {best_model_name}, 准确率: {results[best_model_name]['accuracy']:.4f}\")\n",
    "    else:\n",
    "        print(\"\\n没有模型训练成功！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "d96206ff4104c1fb",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "=== 新闻文本分类系统 ===\n",
      "\n",
      "D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt 目录内容:\n",
      "- class.txt\n",
      "- dev.txt\n",
      "- embedding_SougouNews.npz\n",
      "- embedding_Tencent.npz\n",
      "- test.txt\n",
      "- tianchiyi.py\n",
      "- train.txt\n",
      "- vocab.pkl\n",
      "\n",
      "加载数据集...\n",
      "\n",
      "=== 验证数据格式 ===\n",
      "样本行: 中华女子学院：本科层次仅1专业招男生\t3\n",
      "解析结果 - 标签: 3, 文本前30字: 中华女子学院：本科层次仅1专业招男生...\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt\n",
      "成功使用 utf-8 编码读取文件\n",
      "文件总行数: 180001\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 100%|██████████| 180001/180001 [00:00<00:00, 1534678.43it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "成功加载 180000 条数据\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt\n",
      "成功使用 utf-8 编码读取文件\n",
      "文件总行数: 10001\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 100%|██████████| 10001/10001 [00:00<00:00, 1901851.39it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "成功加载 10000 条数据\n",
      "加载文件: D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt\n",
      "成功使用 utf-8 编码读取文件\n",
      "文件总行数: 10001\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "解析 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 100%|██████████| 10001/10001 [00:00<?, ?it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "成功加载 10000 条数据\n",
      "\n",
      "数据集统计:\n",
      "训练集样本数: 180000\n",
      "验证集样本数: 10000\n",
      "测试集样本数: 10000\n",
      "\n",
      "=== 标签分布 ===\n",
      "\n",
      "训练集 标签分布:\n",
      "  标签 0: 18000 条数据 (10.00%)\n",
      "  标签 1: 18000 条数据 (10.00%)\n",
      "  标签 2: 18000 条数据 (10.00%)\n",
      "  标签 3: 18000 条数据 (10.00%)\n",
      "  标签 4: 18000 条数据 (10.00%)\n",
      "  标签 5: 18000 条数据 (10.00%)\n",
      "  标签 6: 18000 条数据 (10.00%)\n",
      "  标签 7: 18000 条数据 (10.00%)\n",
      "  标签 8: 18000 条数据 (10.00%)\n",
      "  标签 9: 18000 条数据 (10.00%)\n",
      "\n",
      "验证集 标签分布:\n",
      "  标签 0: 1000 条数据 (10.00%)\n",
      "  标签 1: 1000 条数据 (10.00%)\n",
      "  标签 2: 1000 条数据 (10.00%)\n",
      "  标签 3: 1000 条数据 (10.00%)\n",
      "  标签 4: 1000 条数据 (10.00%)\n",
      "  标签 5: 1000 条数据 (10.00%)\n",
      "  标签 6: 1000 条数据 (10.00%)\n",
      "  标签 7: 1000 条数据 (10.00%)\n",
      "  标签 8: 1000 条数据 (10.00%)\n",
      "  标签 9: 1000 条数据 (10.00%)\n",
      "\n",
      "测试集 标签分布:\n",
      "  标签 0: 1000 条数据 (10.00%)\n",
      "  标签 1: 1000 条数据 (10.00%)\n",
      "  标签 2: 1000 条数据 (10.00%)\n",
      "  标签 3: 1000 条数据 (10.00%)\n",
      "  标签 4: 1000 条数据 (10.00%)\n",
      "  标签 5: 1000 条数据 (10.00%)\n",
      "  标签 6: 1000 条数据 (10.00%)\n",
      "  标签 7: 1000 条数据 (10.00%)\n",
      "  标签 8: 1000 条数据 (10.00%)\n",
      "  标签 9: 1000 条数据 (10.00%)\n",
      "警告: 未找到停用词表，跳过过滤...\n",
      "\n",
      "开始文本预处理...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理:   0%|          | 0/180000 [00:00<?, ?it/s]Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\X\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.539 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "文本预处理: 100%|██████████| 180000/180000 [00:10<00:00, 17854.37it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 20088.78it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 20221.11it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "=== 预处理后样本 ===\n",
      "训练样本 1:\n",
      "  原始: 中华女子学院：本科层次仅1专业招男生...\n",
      "  处理: 中华 女子 学院 ： 本科 层次 仅 1 专业 招 男生...\n",
      "\n",
      "训练样本 2:\n",
      "  原始: 两天价网站背后重重迷雾：做个网站究竟要多少钱...\n",
      "  处理: 两天 价 网站 背后 重重 迷雾 ： 做个 网站 究竟 要 多少 钱...\n",
      "\n",
      "训练样本 3:\n",
      "  原始: 东5环海棠公社230-290平2居准现房98折优惠...\n",
      "  处理: 东 5 环 海棠 公社 230 - 290 平 2 居 准现房 98 折 优惠...\n",
      "\n",
      "\n",
      "提取特征...\n",
      "\n",
      "=== 提取TF-IDF特征 ===\n",
      "词汇表大小: 10000\n",
      "训练集特征矩阵形状: (180000, 10000)\n",
      "\n",
      "初始化模型...\n",
      "成功加载 10 个类别，标签ID范围：0 ~ 9\n",
      "\n",
      "=== 类别映射 ===\n",
      "  标签 0: finance\n",
      "  标签 1: realty\n",
      "  标签 2: stocks\n",
      "  标签 3: education\n",
      "  标签 4: science\n",
      "  标签 5: society\n",
      "  标签 6: politics\n",
      "  标签 7: sports\n",
      "  标签 8: game\n",
      "  标签 9: entertainment\n",
      "\n",
      "开始模型训练与评估...\n",
      "\n",
      "训练 逻辑回归 模型...\n",
      "逻辑回归 准确率: 0.8767\n",
      "训练时间: 8.76 秒\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "      finance       0.89      0.87      0.88      1000\n",
      "       realty       0.92      0.90      0.91      1000\n",
      "       stocks       0.81      0.83      0.82      1000\n",
      "    education       0.95      0.93      0.94      1000\n",
      "      science       0.84      0.84      0.84      1000\n",
      "      society       0.84      0.86      0.85      1000\n",
      "     politics       0.85      0.85      0.85      1000\n",
      "       sports       0.94      0.93      0.93      1000\n",
      "         game       0.91      0.88      0.89      1000\n",
      "entertainment       0.82      0.88      0.85      1000\n",
      "\n",
      "     accuracy                           0.88     10000\n",
      "    macro avg       0.88      0.88      0.88     10000\n",
      " weighted avg       0.88      0.88      0.88     10000\n",
      "\n",
      "\n",
      "训练 支持向量机 模型...\n"
     ]
    }
   ],
   "execution_count": null
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "918846aad4486e8"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
