{
 "cells": [
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:01:51.611203Z",
     "start_time": "2025-05-26T07:01:03.913238Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.linear_model import LogisticRegression\n",
    "from sklearn.metrics import accuracy_score, classification_report\n",
    "from tqdm import tqdm\n",
    "from datetime import datetime\n",
    "\n",
    "def load_class_labels(class_path):\n",
    "    \"\"\"加载类别标签映射，支持纯类别名称格式\"\"\"\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for idx, line in enumerate(f):\n",
    "                category = line.strip()\n",
    "                if category:\n",
    "                    label_map[idx] = category\n",
    "    return label_map\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    \"\"\"加载文本数据集，使用制表符 \\t 分隔标签和文本\"\"\"\n",
    "    texts, labels = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in tqdm(f, desc=f\"加载 {file_path}\"):\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "            parts = line.rsplit('\\t', 1)\n",
    "            if len(parts) != 2:\n",
    "                continue\n",
    "            text, label_str = parts\n",
    "            if label_str.isdigit():\n",
    "                texts.append(text)\n",
    "                labels.append(int(label_str))\n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    \"\"\"分词并过滤停用词\"\"\"\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        cleaned_text = text.strip()\n",
    "        if not cleaned_text:\n",
    "            processed.append(\"空文本\")\n",
    "            continue\n",
    "        words = jieba.cut(cleaned_text, HMM=True)  # 使用HMM分词\n",
    "        filtered = [word for word in words if word not in stopwords and word.strip()]\n",
    "        if not filtered:\n",
    "            filtered = [\"无关键词\"]\n",
    "        processed.append(' '.join(filtered))\n",
    "    return processed\n",
    "\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 逻辑回归模型开始运行...\")\n",
    "    \n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")\n",
    "    MAX_FEATURES = 10000\n",
    "    \n",
    "    # 加载停用词\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        print(f\"加载 {len(stopwords)} 个停用词\")\n",
    "    \n",
    "    # 加载数据集\n",
    "    print(f\"[{datetime.now()}] 开始加载数据...\")\n",
    "    train_texts, train_labels = load_text_dataset(os.path.join(DATA_DIR, \"train.txt\"))\n",
    "    val_texts, val_labels = load_text_dataset(os.path.join(DATA_DIR, \"dev.txt\"))\n",
    "    test_texts, test_labels = load_text_dataset(os.path.join(DATA_DIR, \"test.txt\"))\n",
    "    \n",
    "    # 文本预处理\n",
    "    print(f\"[{datetime.now()}] 开始文本预处理...\")\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 特征提取\n",
    "    print(f\"[{datetime.now()}] 开始特征提取...\")\n",
    "    vectorizer = TfidfVectorizer(max_features=MAX_FEATURES, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(train_processed)\n",
    "    X_val = vectorizer.transform(val_processed)\n",
    "    X_test = vectorizer.transform(test_processed)\n",
    "    \n",
    "    # 加载类别标签\n",
    "    label_map = load_class_labels(CLASS_PATH)\n",
    "    if not label_map:\n",
    "        unique_labels = sorted(set(train_labels))\n",
    "        label_map = {idx: f\"类别{idx}\" for idx in unique_labels}\n",
    "    \n",
    "    # 训练模型\n",
    "    print(f\"[{datetime.now()}] 开始训练逻辑回归模型...\")\n",
    "    model = LogisticRegression(solver='lbfgs', multi_class='auto', n_jobs=-1, max_iter=1000)\n",
    "    model.fit(X_train, train_labels)\n",
    "    \n",
    "    # 评估模型\n",
    "    print(f\"[{datetime.now()}] 开始评估模型...\")\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    \n",
    "    print(\"\\n训练集准确率:\", accuracy_score(train_labels, train_pred))\n",
    "    print(\"\\n测试集准确率:\", accuracy_score(test_labels, test_pred))\n",
    "    print(\"\\n分类报告:\")\n",
    "    print(classification_report(test_labels, test_pred, target_names=list(label_map.values())))\n",
    "    \n",
    "    print(f\"[{datetime.now()}] 逻辑回归模型训练完成！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "eb2802f811bc1afe",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:01:11.495784] 逻辑回归模型开始运行...\n",
      "[2025-05-26 15:01:11.497793] 开始加载数据...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 180000it [00:00, 1221304.01it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 10000it [00:00, 1010188.82it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 10000it [00:00, 968885.19it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:01:11.714794] 开始文本预处理...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理:   0%|          | 0/180000 [00:00<?, ?it/s]Building prefix dict from the default dictionary ...\n",
      "Loading model from cache C:\\Users\\X\\AppData\\Local\\Temp\\jieba.cache\n",
      "Loading model cost 0.651 seconds.\n",
      "Prefix dict has been built successfully.\n",
      "文本预处理: 100%|██████████| 180000/180000 [00:20<00:00, 8954.51it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:01<00:00, 7147.27it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:01<00:00, 7361.62it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:01:34.595588] 开始特征提取...\n",
      "[2025-05-26 15:01:42.937995] 开始训练逻辑回归模型...\n",
      "[2025-05-26 15:01:51.488844] 开始评估模型...\n",
      "\n",
      "训练集准确率: 0.9005833333333333\n",
      "\n",
      "测试集准确率: 0.8767\n",
      "\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "      finance       0.89      0.87      0.88      1000\n",
      "       realty       0.92      0.90      0.91      1000\n",
      "       stocks       0.81      0.83      0.82      1000\n",
      "    education       0.95      0.93      0.94      1000\n",
      "      science       0.84      0.84      0.84      1000\n",
      "      society       0.84      0.86      0.85      1000\n",
      "     politics       0.85      0.85      0.85      1000\n",
      "       sports       0.94      0.93      0.93      1000\n",
      "         game       0.91      0.88      0.89      1000\n",
      "entertainment       0.82      0.88      0.85      1000\n",
      "\n",
      "     accuracy                           0.88     10000\n",
      "    macro avg       0.88      0.88      0.88     10000\n",
      " weighted avg       0.88      0.88      0.88     10000\n",
      "\n",
      "[2025-05-26 15:01:51.548164] 逻辑回归模型训练完成！\n"
     ]
    }
   ],
   "execution_count": 1
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:03:37.009152Z",
     "start_time": "2025-05-26T07:03:34.733298Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from sklearn.svm import SVC, LinearSVC  # 添加LinearSVC导入\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 优化版支持向量机模型开始运行...\")\n",
    "    \n",
    "    # 配置参数 - 优化性能\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")\n",
    "    MAX_FEATURES = 5000   # 减少特征数量\n",
    "    SAMPLE_SIZE = 10000  # 限制训练样本数量\n",
    "    USE_SGD = False     # 是否使用SGDClassifier\n",
    "    \n",
    "    # 加载停用词\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        print(f\"加载 {len(stopwords)} 个停用词\")\n",
    "    \n",
    "    # 加载数据集\n",
    "    print(f\"[{datetime.now()}] 开始加载数据...\")\n",
    "    train_texts, train_labels = load_text_dataset(os.path.join(DATA_DIR, \"train.txt\"))\n",
    "    val_texts, val_labels = load_text_dataset(os.path.join(DATA_DIR, \"dev.txt\"))\n",
    "    test_texts, test_labels = load_text_dataset(os.path.join(DATA_DIR, \"test.txt\"))\n",
    "    \n",
    "    # 数据采样 - 优化性能\n",
    "    if len(train_texts) > SAMPLE_SIZE:\n",
    "        print(f\"数据采样: 从 {len(train_texts)} 条样本中选择 {SAMPLE_SIZE} 条\")\n",
    "        indices = np.random.choice(len(train_texts), SAMPLE_SIZE, replace=False)\n",
    "        train_texts = [train_texts[i] for i in indices]\n",
    "        train_labels = [train_labels[i] for i in indices]\n",
    "    \n",
    "    # 文本预处理\n",
    "    print(f\"[{datetime.now()}] 开始文本预处理...\")\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 特征提取 - 优化性能\n",
    "    print(f\"[{datetime.now()}] 开始特征提取...\")\n",
    "    vectorizer = TfidfVectorizer(max_features=MAX_FEATURES, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(train_processed)\n",
    "    X_val = vectorizer.transform(val_processed)\n",
    "    X_test = vectorizer.transform(test_processed)\n",
    "    \n",
    "    print(f\"训练数据形状: {X_train.shape}\")\n",
    "    \n",
    "    # 加载类别标签\n",
    "    label_map = load_class_labels(CLASS_PATH)\n",
    "    if not label_map:\n",
    "        unique_labels = sorted(set(train_labels))\n",
    "        label_map = {idx: f\"类别{idx}\" for idx in unique_labels}\n",
    "    \n",
    "    # 训练模型 - 优化性能\n",
    "    print(f\"[{datetime.now()}] 开始训练支持向量机模型...\")\n",
    "    \n",
    "    if USE_SGD:\n",
    "        # 使用SGDClassifier模拟线性SVM，支持增量学习\n",
    "        from sklearn.linear_model import SGDClassifier\n",
    "        model = SGDClassifier(\n",
    "            loss='hinge',\n",
    "            alpha=0.0001,\n",
    "            max_iter=1000,\n",
    "            tol=1e-3,\n",
    "            n_jobs=-1,  # 使用所有CPU核心\n",
    "            verbose=1\n",
    "        )\n",
    "    else:\n",
    "        # 使用优化的线性SVC\n",
    "        model = LinearSVC(\n",
    "            C=1.0,\n",
    "            dual=False,  # 当样本数>特征数时使用False\n",
    "            max_iter=1000,\n",
    "            verbose=1\n",
    "        )\n",
    "    \n",
    "    model.fit(X_train, train_labels)\n",
    "    \n",
    "    # 评估模型\n",
    "    print(f\"[{datetime.now()}] 开始评估模型...\")\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    \n",
    "    print(\"\\n训练集准确率:\", accuracy_score(train_labels, train_pred))\n",
    "    print(\"\\n测试集准确率:\", accuracy_score(test_labels, test_pred))\n",
    "    print(\"\\n分类报告:\")\n",
    "    print(classification_report(test_labels, test_pred, target_names=list(label_map.values())))\n",
    "    \n",
    "    print(f\"[{datetime.now()}] 支持向量机模型训练完成！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "1fb87552d318448d",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:03:34.738895] 优化版支持向量机模型开始运行...\n",
      "[2025-05-26 15:03:34.739919] 开始加载数据...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 180000it [00:00, 1351833.58it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 10000it [00:00, 1300922.43it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 10000it [00:00, 1107436.24it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据采样: 从 180000 条样本中选择 10000 条\n",
      "[2025-05-26 15:03:34.910101] 开始文本预处理...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 17421.54it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 19732.20it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 19914.56it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:03:36.500423] 开始特征提取...\n",
      "训练数据形状: (10000, 5000)\n",
      "[2025-05-26 15:03:36.873517] 开始训练支持向量机模型...\n",
      "[LibLinear][2025-05-26 15:03:36.981863] 开始评估模型...\n",
      "\n",
      "训练集准确率: 0.9761\n",
      "\n",
      "测试集准确率: 0.7801\n",
      "\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "      finance       0.81      0.76      0.78      1000\n",
      "       realty       0.85      0.83      0.84      1000\n",
      "       stocks       0.69      0.71      0.70      1000\n",
      "    education       0.88      0.89      0.88      1000\n",
      "      science       0.73      0.72      0.73      1000\n",
      "      society       0.77      0.78      0.78      1000\n",
      "     politics       0.75      0.75      0.75      1000\n",
      "       sports       0.84      0.84      0.84      1000\n",
      "         game       0.80      0.78      0.79      1000\n",
      "entertainment       0.69      0.75      0.72      1000\n",
      "\n",
      "     accuracy                           0.78     10000\n",
      "    macro avg       0.78      0.78      0.78     10000\n",
      " weighted avg       0.78      0.78      0.78     10000\n",
      "\n",
      "[2025-05-26 15:03:37.001734] 支持向量机模型训练完成！\n"
     ]
    }
   ],
   "execution_count": 2
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:51:03.620307Z",
     "start_time": "2025-05-26T07:50:46.980516Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.tree import DecisionTreeClassifier\n",
    "from sklearn.metrics import accuracy_score, classification_report\n",
    "from sklearn.model_selection import StratifiedShuffleSplit\n",
    "from tqdm import tqdm\n",
    "from datetime import datetime\n",
    "\n",
    "def load_class_labels(class_path):\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for idx, line in enumerate(f):\n",
    "                category = line.strip()\n",
    "                if category:\n",
    "                    label_map[idx] = category\n",
    "    return label_map\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    texts, labels = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in tqdm(f, desc=f\"加载 {file_path}\"):\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "            parts = line.rsplit('\\t', 1)\n",
    "            if len(parts) != 2:\n",
    "                continue\n",
    "            text, label_str = parts\n",
    "            if label_str.isdigit():\n",
    "                texts.append(text)\n",
    "                labels.append(int(label_str))\n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        cleaned_text = text.strip()\n",
    "        if not cleaned_text:\n",
    "            processed.append(\"\")  # 空文本留空，避免误判为有效特征\n",
    "            continue\n",
    "        words = jieba.cut(cleaned_text, HMM=True)\n",
    "        filtered = [word for word in words if word not in stopwords and len(word.strip()) >= 2]  # 过滤单字符\n",
    "        processed.append(' '.join(filtered))  # 允许空字符串（由后续TF-IDF处理）\n",
    "    return processed\n",
    "\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 决策树模型开始运行...\")\n",
    "    \n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")\n",
    "    MAX_FEATURES = 20000  # 增大特征数量\n",
    "    SAMPLE_SIZE = None  # 不采样，使用全量数据（若内存不足可调整）\n",
    "    TEST_SIZE = 0.2      # 划分验证集\n",
    "    \n",
    "    # 加载停用词（增加单字符过滤）\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        # 额外过滤单字符和常见标点\n",
    "        stopwords.update({chr(i) for i in range(33, 48)} | {chr(i) for i in range(58, 65)} | {chr(i) for i in range(91, 97)} | {chr(i) for i in range(123, 127)})\n",
    "        print(f\"加载 {len(stopwords)} 个停用词\")\n",
    "    \n",
    "    # 加载数据集（使用分层采样保持类别平衡）\n",
    "    print(f\"[{datetime.now()}] 开始加载数据...\")\n",
    "    all_texts, all_labels = load_text_dataset(os.path.join(DATA_DIR, \"train.txt\"))\n",
    "    if SAMPLE_SIZE and len(all_texts) > SAMPLE_SIZE:\n",
    "        # 分层采样保持类别比例\n",
    "        sss = StratifiedShuffleSplit(n_splits=1, test_size=SAMPLE_SIZE, random_state=42)\n",
    "        for train_idx, _ in sss.split(all_texts, all_labels):\n",
    "            train_texts = [all_texts[i] for i in train_idx]\n",
    "            train_labels = [all_labels[i] for i in train_idx]\n",
    "    else:\n",
    "        train_texts, train_labels = all_texts, all_labels\n",
    "    \n",
    "    # 加载验证集和测试集（假设dev.txt和test.txt已正确划分）\n",
    "    val_texts, val_labels = load_text_dataset(os.path.join(DATA_DIR, \"dev.txt\"))\n",
    "    test_texts, test_labels = load_text_dataset(os.path.join(DATA_DIR, \"test.txt\"))\n",
    "    \n",
    "    # 文本预处理（增强过滤逻辑）\n",
    "    print(f\"[{datetime.now()}] 开始文本预处理...\")\n",
    "    stopwords.add('')  # 防止空字符串进入特征\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 特征提取（修复stop_words参数）\n",
    "    print(f\"[{datetime.now()}] 开始特征提取...\")\n",
    "    # 确保停用词列表不含空字符串，并转换为列表格式\n",
    "    stopwords_list = list(stopwords - {''})  # 移除空字符串\n",
    "    vectorizer = TfidfVectorizer(\n",
    "        max_features=MAX_FEATURES,\n",
    "        ngram_range=(1, 2),\n",
    "        min_df=2,  # 过滤低频词\n",
    "        max_df=0.95,  # 过滤高频通用词\n",
    "        stop_words=stopwords_list  # 传递列表类型的停用词\n",
    "    )\n",
    "    X_train = vectorizer.fit_transform(train_processed)\n",
    "    X_val = vectorizer.transform(val_processed)\n",
    "    X_test = vectorizer.transform(test_processed)\n",
    "    \n",
    "    # 加载类别标签（处理空标签情况）\n",
    "    label_map = load_class_labels(CLASS_PATH)\n",
    "    if not label_map:\n",
    "        unique_labels = sorted(set(train_labels))\n",
    "        label_map = {label: f\"类别{label}\" for label in unique_labels}\n",
    "    target_names = [label_map[label] for label in sorted(label_map.keys())]\n",
    "    \n",
    "    # 训练模型（调优决策树参数）\n",
    "    print(f\"[{datetime.now()}] 开始训练决策树模型...\")\n",
    "    model = DecisionTreeClassifier(\n",
    "        max_depth=8,       # 增加树的深度\n",
    "        min_samples_split=50,  # 节点分裂最小样本数\n",
    "        min_samples_leaf=20,   # 叶子节点最小样本数\n",
    "        random_state=42\n",
    "    )\n",
    "    model.fit(X_train, train_labels)\n",
    "    \n",
    "    # 评估模型\n",
    "    print(f\"[{datetime.now()}] 开始评估模型...\")\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    \n",
    "    print(\"\\n训练集准确率:\", accuracy_score(train_labels, train_pred))\n",
    "    print(\"\\n测试集准确率:\", accuracy_score(test_labels, test_pred))\n",
    "    print(\"\\n分类报告:\")\n",
    "    print(classification_report(test_labels, test_pred, target_names=target_names))\n",
    "    \n",
    "    print(f\"[{datetime.now()}] 决策树模型训练完成！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "c65908fde0eaaf79",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:50:47.008372] 决策树模型开始运行...\n",
      "[2025-05-26 15:50:47.008372] 开始加载数据...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 180000it [00:00, 1334809.13it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 10000it [00:00, 2538310.34it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 10000it [00:00, 621746.81it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:50:47.163246] 开始文本预处理...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 180000/180000 [00:09<00:00, 19717.01it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 19785.23it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 19887.78it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:50:57.314253] 开始特征提取...\n",
      "[2025-05-26 15:51:02.501052] 开始训练决策树模型...\n",
      "[2025-05-26 15:51:03.489521] 开始评估模型...\n",
      "\n",
      "训练集准确率: 0.21351111111111112\n",
      "\n",
      "测试集准确率: 0.2164\n",
      "\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "      finance       0.95      0.31      0.46      1000\n",
      "       realty       0.96      0.24      0.38      1000\n",
      "       stocks       0.00      0.00      0.00      1000\n",
      "    education       0.99      0.25      0.40      1000\n",
      "      science       0.00      0.00      0.00      1000\n",
      "      society       0.95      0.16      0.28      1000\n",
      "     politics       0.00      0.00      0.00      1000\n",
      "       sports       0.11      0.99      0.20      1000\n",
      "         game       0.93      0.21      0.34      1000\n",
      "entertainment       0.00      0.00      0.00      1000\n",
      "\n",
      "     accuracy                           0.22     10000\n",
      "    macro avg       0.49      0.22      0.21     10000\n",
      " weighted avg       0.49      0.22      0.21     10000\n",
      "\n",
      "[2025-05-26 15:51:03.558568] 决策树模型训练完成！\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\metrics\\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n",
      "  _warn_prf(average, modifier, f\"{metric.capitalize()} is\", len(result))\n",
      "D:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\metrics\\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n",
      "  _warn_prf(average, modifier, f\"{metric.capitalize()} is\", len(result))\n",
      "D:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\metrics\\_classification.py:1509: UndefinedMetricWarning: Precision is ill-defined and being set to 0.0 in labels with no predicted samples. Use `zero_division` parameter to control this behavior.\n",
      "  _warn_prf(average, modifier, f\"{metric.capitalize()} is\", len(result))\n"
     ]
    }
   ],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:07:39.036216Z",
     "start_time": "2025-05-26T07:07:35.028978Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from sklearn.feature_selection import SelectKBest, chi2  # 用于特征选择\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 优化版随机森林模型开始运行...\")\n",
    "    \n",
    "    # 配置参数 - 优化性能\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")\n",
    "    MAX_FEATURES = 5000   # 减少特征数量\n",
    "    SAMPLE_SIZE = 20000  # 限制训练样本数量\n",
    "    USE_SELECTKBEST = True  # 是否使用特征选择\n",
    "    \n",
    "    # 加载停用词\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        print(f\"加载 {len(stopwords)} 个停用词\")\n",
    "    \n",
    "    # 加载数据集\n",
    "    print(f\"[{datetime.now()}] 开始加载数据...\")\n",
    "    train_texts, train_labels = load_text_dataset(os.path.join(DATA_DIR, \"train.txt\"))\n",
    "    val_texts, val_labels = load_text_dataset(os.path.join(DATA_DIR, \"dev.txt\"))\n",
    "    test_texts, test_labels = load_text_dataset(os.path.join(DATA_DIR, \"test.txt\"))\n",
    "    \n",
    "    # 数据采样 - 优化性能\n",
    "    if len(train_texts) > SAMPLE_SIZE:\n",
    "        print(f\"数据采样: 从 {len(train_texts)} 条样本中选择 {SAMPLE_SIZE} 条\")\n",
    "        indices = np.random.choice(len(train_texts), SAMPLE_SIZE, replace=False)\n",
    "        train_texts = [train_texts[i] for i in indices]\n",
    "        train_labels = [train_labels[i] for i in indices]\n",
    "    \n",
    "    # 文本预处理\n",
    "    print(f\"[{datetime.now()}] 开始文本预处理...\")\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 特征提取 - 优化性能\n",
    "    print(f\"[{datetime.now()}] 开始特征提取...\")\n",
    "    vectorizer = TfidfVectorizer(max_features=MAX_FEATURES, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(train_processed)\n",
    "    X_val = vectorizer.transform(val_processed)  # 先转换验证集\n",
    "    X_test = vectorizer.transform(test_processed)  # 先转换测试集\n",
    "\n",
    "    print(f\"原始训练数据形状: {X_train.shape}\")\n",
    "\n",
    "    # 特征选择 - 优化性能\n",
    "    if USE_SELECTKBEST:\n",
    "        print(f\"[{datetime.now()}] 开始特征选择...\")\n",
    "        selector = SelectKBest(chi2, k=MAX_FEATURES)\n",
    "        X_train = selector.fit_transform(X_train, train_labels)\n",
    "        X_val = selector.transform(X_val)  # 再进行特征选择\n",
    "        X_test = selector.transform(X_test)  # 再进行特征选择\n",
    "        print(f\"选择后的训练数据形状: {X_train.shape}\")\n",
    "    \n",
    "    # 加载类别标签\n",
    "    label_map = load_class_labels(CLASS_PATH)\n",
    "    if not label_map:\n",
    "        unique_labels = sorted(set(train_labels))\n",
    "        label_map = {idx: f\"类别{idx}\" for idx in unique_labels}\n",
    "    \n",
    "    # 训练模型 - 优化性能\n",
    "    print(f\"[{datetime.now()}] 开始训练随机森林模型...\")\n",
    "    model = RandomForestClassifier(\n",
    "        n_estimators=50,           # 减少树的数量\n",
    "        max_depth=10,              # 限制树的深度\n",
    "        min_samples_split=5,       # 增加分裂所需的最小样本数\n",
    "        min_samples_leaf=2,        # 增加叶子节点所需的最小样本数\n",
    "        max_features='sqrt',       # 每个节点考虑的最大特征数\n",
    "        n_jobs=-1,                 # 使用所有CPU核心\n",
    "        random_state=42,           # 随机种子\n",
    "        verbose=1                  # 显示训练进度\n",
    "    )\n",
    "    \n",
    "    model.fit(X_train, train_labels)\n",
    "    \n",
    "    # 评估模型\n",
    "    print(f\"[{datetime.now()}] 开始评估模型...\")\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    \n",
    "    print(\"\\n训练集准确率:\", accuracy_score(train_labels, train_pred))\n",
    "    print(\"\\n测试集准确率:\", accuracy_score(test_labels, test_pred))\n",
    "    print(\"\\n分类报告:\")\n",
    "    print(classification_report(test_labels, test_pred, target_names=list(label_map.values())))\n",
    "    \n",
    "    print(f\"[{datetime.now()}] 随机森林模型训练完成！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "5a8e460c4620a9fb",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:07:35.288167] 优化版随机森林模型开始运行...\n",
      "[2025-05-26 15:07:35.288167] 开始加载数据...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 180000it [00:00, 1098220.13it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 10000it [00:00, 879216.85it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 10000it [00:00, 894994.88it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "数据采样: 从 180000 条样本中选择 20000 条\n",
      "[2025-05-26 15:07:35.487961] 开始文本预处理...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 20000/20000 [00:01<00:00, 15783.15it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 17788.45it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 20118.90it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:07:37.827705] 开始特征提取...\n",
      "原始训练数据形状: (20000, 5000)\n",
      "[2025-05-26 15:07:38.806951] 开始特征选择...\n",
      "选择后的训练数据形状: (20000, 5000)\n",
      "[2025-05-26 15:07:38.824741] 开始训练随机森林模型...\n",
      "[2025-05-26 15:07:38.965181] 开始评估模型...\n",
      "\n",
      "训练集准确率: 0.5004\n",
      "\n",
      "测试集准确率: 0.4912\n",
      "\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "      finance       0.78      0.49      0.60      1000\n",
      "       realty       0.89      0.63      0.74      1000\n",
      "       stocks       0.84      0.32      0.46      1000\n",
      "    education       0.86      0.78      0.82      1000\n",
      "      science       0.86      0.37      0.52      1000\n",
      "      society       0.84      0.34      0.49      1000\n",
      "     politics       0.75      0.34      0.47      1000\n",
      "       sports       0.91      0.33      0.48      1000\n",
      "         game       0.86      0.38      0.52      1000\n",
      "entertainment       0.18      0.93      0.30      1000\n",
      "\n",
      "     accuracy                           0.49     10000\n",
      "    macro avg       0.78      0.49      0.54     10000\n",
      " weighted avg       0.78      0.49      0.54     10000\n",
      "\n",
      "[2025-05-26 15:07:39.019673] 随机森林模型训练完成！\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "[Parallel(n_jobs=-1)]: Using backend ThreadingBackend with 12 concurrent workers.\n",
      "[Parallel(n_jobs=-1)]: Done  26 tasks      | elapsed:    0.0s\n",
      "[Parallel(n_jobs=-1)]: Done  50 out of  50 | elapsed:    0.0s finished\n",
      "[Parallel(n_jobs=12)]: Using backend ThreadingBackend with 12 concurrent workers.\n",
      "[Parallel(n_jobs=12)]: Done  26 tasks      | elapsed:    0.0s\n",
      "[Parallel(n_jobs=12)]: Done  50 out of  50 | elapsed:    0.0s finished\n",
      "[Parallel(n_jobs=12)]: Using backend ThreadingBackend with 12 concurrent workers.\n",
      "[Parallel(n_jobs=12)]: Done  26 tasks      | elapsed:    0.0s\n",
      "[Parallel(n_jobs=12)]: Done  50 out of  50 | elapsed:    0.0s finished\n"
     ]
    }
   ],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:21:56.492094Z",
     "start_time": "2025-05-26T07:21:50.330064Z"
    }
   },
   "cell_type": "code",
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import jieba\n",
    "from sklearn.feature_extraction.text import TfidfVectorizer\n",
    "from sklearn.neighbors import KNeighborsClassifier\n",
    "from sklearn.metrics import accuracy_score, classification_report\n",
    "from tqdm import tqdm\n",
    "from datetime import datetime\n",
    "\n",
    "def load_class_labels(class_path):\n",
    "    label_map = {}\n",
    "    if os.path.exists(class_path):\n",
    "        with open(class_path, 'r', encoding='utf-8') as f:\n",
    "            for idx, line in enumerate(f):\n",
    "                category = line.strip()\n",
    "                if category:\n",
    "                    label_map[idx] = category\n",
    "    return label_map\n",
    "\n",
    "def load_text_dataset(file_path):\n",
    "    texts, labels = [], []\n",
    "    with open(file_path, 'r', encoding='utf-8') as f:\n",
    "        for line in tqdm(f, desc=f\"加载 {file_path}\"):\n",
    "            line = line.strip()\n",
    "            if not line:\n",
    "                continue\n",
    "            parts = line.rsplit('\\t', 1)\n",
    "            if len(parts) != 2:\n",
    "                continue\n",
    "            text, label_str = parts\n",
    "            if label_str.isdigit():\n",
    "                texts.append(text)\n",
    "                labels.append(int(label_str))\n",
    "    return texts, labels\n",
    "\n",
    "def preprocess_texts(texts, stopwords):\n",
    "    processed = []\n",
    "    for text in tqdm(texts, desc=\"文本预处理\"):\n",
    "        cleaned_text = text.strip()\n",
    "        if not cleaned_text:\n",
    "            processed.append(\"空文本\")\n",
    "            continue\n",
    "        words = jieba.cut(cleaned_text, HMM=True)\n",
    "        filtered = [word for word in words if word not in stopwords and word.strip()]\n",
    "        processed.append(' '.join(filtered) if filtered else \"无关键词\")\n",
    "    return processed\n",
    "\n",
    "def main():\n",
    "    print(f\"[{datetime.now()}] 优化版 K 近邻模型开始运行...\")\n",
    "    \n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")\n",
    "    MAX_FEATURES = 5000  \n",
    "    SAMPLE_SIZE = 10000  \n",
    "    \n",
    "    # 加载停用词\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        print(f\"加载 {len(stopwords)} 个停用词\")\n",
    "    \n",
    "    # 加载并采样数据\n",
    "    train_texts, train_labels = load_text_dataset(os.path.join(DATA_DIR, \"train.txt\"))\n",
    "    if len(train_texts) > SAMPLE_SIZE:\n",
    "        indices = np.random.choice(len(train_texts), SAMPLE_SIZE, replace=False)\n",
    "        train_texts = [train_texts[i] for i in indices]\n",
    "        train_labels = [train_labels[i] for i in indices]\n",
    "    \n",
    "    # 预处理数据\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_texts, val_labels = load_text_dataset(os.path.join(DATA_DIR, \"dev.txt\"))\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_texts, test_labels = load_text_dataset(os.path.join(DATA_DIR, \"test.txt\"))\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 特征提取\n",
    "    vectorizer = TfidfVectorizer(max_features=MAX_FEATURES, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(train_processed)\n",
    "    X_val = vectorizer.transform(val_processed)\n",
    "    X_test = vectorizer.transform(test_processed)\n",
    "\n",
    "    # 归一化特征向量（添加这部分）\n",
    "    from sklearn.preprocessing import normalize\n",
    "    X_train = normalize(X_train, norm='l2', axis=1)\n",
    "    X_val = normalize(X_val, norm='l2', axis=1)\n",
    "    X_test = normalize(X_test, norm='l2', axis=1)\n",
    "\n",
    "    # 加载类别标签\n",
    "    label_map = load_class_labels(CLASS_PATH) or {idx: f\"类别{idx}\" for idx in sorted(set(train_labels))}\n",
    "\n",
    "    # 训练模型（修改 metric 为 euclidean）\n",
    "    print(f\"[{datetime.now()}] 开始训练 K 近邻模型...\")\n",
    "    model = KNeighborsClassifier(\n",
    "        n_neighbors=5,\n",
    "        n_jobs=-1,\n",
    "        algorithm='ball_tree',  # 可以继续使用 ball_tree\n",
    "        metric='euclidean'      # 在归一化数据上等价于余弦相似度\n",
    "    )\n",
    "    model.fit(X_train, train_labels)\n",
    "    \n",
    "    # 评估模型\n",
    "    print(f\"[{datetime.now()}] 开始评估模型...\")\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    \n",
    "    print(\"\\n训练集准确率:\", accuracy_score(train_labels, train_pred))\n",
    "    print(\"\\n测试集准确率:\", accuracy_score(test_labels, test_pred))\n",
    "    print(\"\\n分类报告:\")\n",
    "    print(classification_report(test_labels, test_pred, target_names=list(label_map.values())))\n",
    "    \n",
    "    print(f\"[{datetime.now()}] K 近邻模型训练完成！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "c6e25681723ef031",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:21:50.352556] 优化版 K 近邻模型开始运行...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 180000it [00:00, 1309780.87it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 17598.34it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 10000it [00:00, 777875.37it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 19108.99it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 10000it [00:00, 551548.27it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 18369.67it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:21:52.586249] 开始训练 K 近邻模型...\n",
      "[2025-05-26 15:21:52.588484] 开始评估模型...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "D:\\python\\anaconda1\\Lib\\site-packages\\sklearn\\neighbors\\_base.py:584: UserWarning: cannot use tree with sparse input: using brute force\n",
      "  warnings.warn(\"cannot use tree with sparse input: using brute force\")\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "\n",
      "训练集准确率: 0.4799\n",
      "\n",
      "测试集准确率: 0.2542\n",
      "\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "      finance       0.22      0.36      0.27      1000\n",
      "       realty       0.22      0.29      0.25      1000\n",
      "       stocks       0.29      0.23      0.25      1000\n",
      "    education       0.51      0.22      0.31      1000\n",
      "      science       0.23      0.25      0.24      1000\n",
      "      society       0.55      0.23      0.32      1000\n",
      "     politics       0.52      0.15      0.24      1000\n",
      "       sports       0.67      0.07      0.13      1000\n",
      "         game       0.66      0.24      0.36      1000\n",
      "entertainment       0.14      0.49      0.22      1000\n",
      "\n",
      "     accuracy                           0.25     10000\n",
      "    macro avg       0.40      0.25      0.26     10000\n",
      " weighted avg       0.40      0.25      0.26     10000\n",
      "\n",
      "[2025-05-26 15:21:56.480687] K 近邻模型训练完成！\n"
     ]
    }
   ],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-05-26T07:08:15.499092Z",
     "start_time": "2025-05-26T07:07:56.448061Z"
    }
   },
   "cell_type": "code",
   "source": [
    "from sklearn.naive_bayes import MultinomialNB\n",
    "\n",
    "def main():\n",
    "    # ... 前面代码保持不变 ...\n",
    "    print(f\"[{datetime.now()}] 朴素贝叶斯模型开始运行...\")\n",
    "    \n",
    "    # 配置参数\n",
    "    DATA_DIR = r\"D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\"\n",
    "    STOPWORDS_PATH = \"stopwords.txt\"\n",
    "    CLASS_PATH = os.path.join(DATA_DIR, \"class.txt\")\n",
    "    MAX_FEATURES = 10000\n",
    "    \n",
    "    # 加载停用词\n",
    "    stopwords = set()\n",
    "    if os.path.exists(STOPWORDS_PATH):\n",
    "        with open(STOPWORDS_PATH, 'r', encoding='utf-8') as f:\n",
    "            stopwords = set([line.strip() for line in f])\n",
    "        print(f\"加载 {len(stopwords)} 个停用词\")\n",
    "    \n",
    "    # 加载数据集\n",
    "    print(f\"[{datetime.now()}] 开始加载数据...\")\n",
    "    train_texts, train_labels = load_text_dataset(os.path.join(DATA_DIR, \"train.txt\"))\n",
    "    val_texts, val_labels = load_text_dataset(os.path.join(DATA_DIR, \"dev.txt\"))\n",
    "    test_texts, test_labels = load_text_dataset(os.path.join(DATA_DIR, \"test.txt\"))\n",
    "    \n",
    "    # 文本预处理\n",
    "    print(f\"[{datetime.now()}] 开始文本预处理...\")\n",
    "    train_processed = preprocess_texts(train_texts, stopwords)\n",
    "    val_processed = preprocess_texts(val_texts, stopwords)\n",
    "    test_processed = preprocess_texts(test_texts, stopwords)\n",
    "    \n",
    "    # 特征提取\n",
    "    print(f\"[{datetime.now()}] 开始特征提取...\")\n",
    "    vectorizer = TfidfVectorizer(max_features=MAX_FEATURES, ngram_range=(1, 2))\n",
    "    X_train = vectorizer.fit_transform(train_processed)\n",
    "    X_val = vectorizer.transform(val_processed)\n",
    "    X_test = vectorizer.transform(test_processed)\n",
    "    \n",
    "    # 加载类别标签\n",
    "    label_map = load_class_labels(CLASS_PATH)\n",
    "    if not label_map:\n",
    "        unique_labels = sorted(set(train_labels))\n",
    "        label_map = {idx: f\"类别{idx}\" for idx in unique_labels}\n",
    "    # 训练模型\n",
    "    print(f\"[{datetime.now()}] 开始训练朴素贝叶斯模型...\")\n",
    "    model = MultinomialNB()\n",
    "    model.fit(X_train, train_labels)\n",
    "    # 评估模型\n",
    "    print(f\"[{datetime.now()}] 开始评估模型...\")\n",
    "    train_pred = model.predict(X_train)\n",
    "    test_pred = model.predict(X_test)\n",
    "    \n",
    "    print(\"\\n训练集准确率:\", accuracy_score(train_labels, train_pred))\n",
    "    print(\"\\n测试集准确率:\", accuracy_score(test_labels, test_pred))\n",
    "    print(\"\\n分类报告:\")\n",
    "    print(classification_report(test_labels, test_pred, target_names=list(label_map.values())))\n",
    "    \n",
    "    print(f\"[{datetime.now()}] 朴素贝叶斯模型训练完成！\")\n",
    "\n",
    "if __name__ == \"__main__\":\n",
    "    main()"
   ],
   "id": "852ae15c715886f9",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:07:56.458671] 朴素贝叶斯模型开始运行...\n",
      "[2025-05-26 15:07:56.458671] 开始加载数据...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\train.txt: 180000it [00:00, 1385684.06it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\dev.txt: 10000it [00:00, ?it/s]\n",
      "加载 D:\\jiqixuexi\\mytest\\wangyuzhen\\shangjier\\THUCNews-txt\\test.txt: 10000it [00:00, 499268.41it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:07:56.608600] 开始文本预处理...\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "文本预处理: 100%|██████████| 180000/180000 [00:09<00:00, 18080.48it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 17127.84it/s]\n",
      "文本预处理: 100%|██████████| 10000/10000 [00:00<00:00, 17374.11it/s]\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "[2025-05-26 15:08:07.730026] 开始特征提取...\n",
      "[2025-05-26 15:08:15.190288] 开始训练朴素贝叶斯模型...\n",
      "[2025-05-26 15:08:15.277969] 开始评估模型...\n",
      "\n",
      "训练集准确率: 0.8717111111111111\n",
      "\n",
      "测试集准确率: 0.8633\n",
      "\n",
      "分类报告:\n",
      "               precision    recall  f1-score   support\n",
      "\n",
      "      finance       0.83      0.87      0.85      1000\n",
      "       realty       0.93      0.84      0.88      1000\n",
      "       stocks       0.82      0.81      0.81      1000\n",
      "    education       0.91      0.93      0.92      1000\n",
      "      science       0.84      0.81      0.82      1000\n",
      "      society       0.83      0.88      0.86      1000\n",
      "     politics       0.82      0.85      0.84      1000\n",
      "       sports       0.91      0.93      0.92      1000\n",
      "         game       0.91      0.86      0.88      1000\n",
      "entertainment       0.84      0.86      0.85      1000\n",
      "\n",
      "     accuracy                           0.86     10000\n",
      "    macro avg       0.86      0.86      0.86     10000\n",
      " weighted avg       0.86      0.86      0.86     10000\n",
      "\n",
      "[2025-05-26 15:08:15.374710] 朴素贝叶斯模型训练完成！\n"
     ]
    }
   ],
   "execution_count": 8
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "",
   "id": "7ed868adf3633709"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
