{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "46f55a3c",
   "metadata": {},
   "source": [
    "# DAY13"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "db000f3b",
   "metadata": {
    "vscode": {
     "languageId": "plaintext"
    }
   },
   "source": [
    "# 不平衡数据集的处理"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "7cae516b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 先运行之前预处理好的代码\n",
    "import pandas as pd\n",
    "import pandas as pd    #用于数据处理和分析，可处理表格数据。\n",
    "import numpy as np     #用于数值计算，提供了高效的数组操作。\n",
    "import matplotlib.pyplot as plt    #用于绘制各种类型的图表\n",
    "import seaborn as sns   #基于matplotlib的高级绘图库，能绘制更美观的统计图形。\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    " \n",
    " # 设置中文字体（解决中文显示问题）\n",
    "plt.rcParams['font.sans-serif'] = ['SimHei']  # Windows系统常用黑体字体\n",
    "plt.rcParams['axes.unicode_minus'] = False    # 正常显示负号\n",
    "data = pd.read_csv('data.csv')    #读取数据\n",
    "\n",
    "\n",
    "# 先筛选字符串变量 \n",
    "discrete_features = data.select_dtypes(include=['object']).columns.tolist()\n",
    "# Home Ownership 标签编码\n",
    "home_ownership_mapping = {\n",
    "    'Own Home': 1,\n",
    "    'Rent': 2,\n",
    "    'Have Mortgage': 3,\n",
    "    'Home Mortgage': 4\n",
    "}\n",
    "data['Home Ownership'] = data['Home Ownership'].map(home_ownership_mapping)\n",
    "\n",
    "# Years in current job 标签编码\n",
    "years_in_job_mapping = {\n",
    "    '< 1 year': 1,\n",
    "    '1 year': 2,\n",
    "    '2 years': 3,\n",
    "    '3 years': 4,\n",
    "    '4 years': 5,\n",
    "    '5 years': 6,\n",
    "    '6 years': 7,\n",
    "    '7 years': 8,\n",
    "    '8 years': 9,\n",
    "    '9 years': 10,\n",
    "    '10+ years': 11\n",
    "}\n",
    "data['Years in current job'] = data['Years in current job'].map(years_in_job_mapping)\n",
    "\n",
    "# Purpose 独热编码，记得需要将bool类型转换为数值\n",
    "data = pd.get_dummies(data, columns=['Purpose'])\n",
    "data2 = pd.read_csv(\"data.csv\") # 重新读取数据，用来做列名对比\n",
    "list_final = [] # 新建一个空列表，用于存放独热编码后新增的特征名\n",
    "for i in data.columns:\n",
    "    if i not in data2.columns:\n",
    "       list_final.append(i) # 这里打印出来的就是独热编码后的特征名\n",
    "for i in list_final:\n",
    "    data[i] = data[i].astype(int) # 这里的i就是独热编码后的特征名\n",
    "\n",
    "\n",
    "\n",
    "# Term 0 - 1 映射\n",
    "term_mapping = {\n",
    "    'Short Term': 0,\n",
    "    'Long Term': 1\n",
    "}\n",
    "data['Term'] = data['Term'].map(term_mapping)\n",
    "data.rename(columns={'Term': 'Long Term'}, inplace=True) # 重命名列\n",
    "continuous_features = data.select_dtypes(include=['int64', 'float64']).columns.tolist()  #把筛选出来的列名转换成列表\n",
    " \n",
    " # 连续特征用中位数补全\n",
    "for feature in continuous_features:     \n",
    "    mode_value = data[feature].mode()[0]            #获取该列的众数。\n",
    "    data[feature].fillna(mode_value, inplace=True)          #用众数填充该列的缺失值，inplace=True表示直接在原数据上修改。\n",
    "\n",
    "# 最开始也说了 很多调参函数自带交叉验证，甚至是必选的参数，你如果想要不交叉反而实现起来会麻烦很多\n",
    "# 所以这里我们还是只划分一次数据集\n",
    "from sklearn.model_selection import train_test_split\n",
    "X = data.drop(['Credit Default'], axis=1)  # 特征，axis=1表示按列删除\n",
    "y = data['Credit Default'] # 标签\n",
    "# 按照8:2划分训练集和测试集\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)  # 80%训练集，20%测试集\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "42f54225",
   "metadata": {},
   "source": [
    "## 基准模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "8dc94d13",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 1. 默认参数随机森林 (训练集 -> 测试集) ---\n",
      "训练与预测耗时: 0.9966 秒\n",
      "\n",
      "默认随机森林 在测试集上的分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.77      0.97      0.86      1059\n",
      "           1       0.79      0.30      0.43       441\n",
      "\n",
      "    accuracy                           0.77      1500\n",
      "   macro avg       0.78      0.63      0.64      1500\n",
      "weighted avg       0.77      0.77      0.73      1500\n",
      "\n",
      "默认随机森林 在测试集上的混淆矩阵：\n",
      "[[1023   36]\n",
      " [ 309  132]]\n"
     ]
    }
   ],
   "source": [
    "\n",
    "from sklearn.ensemble import RandomForestClassifier #随机森林分类器\n",
    "\n",
    "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score # 用于评估分类器性能的指标\n",
    "from sklearn.metrics import classification_report, confusion_matrix #用于生成分类报告和混淆矩阵\n",
    "import warnings #用于忽略警告信息\n",
    "warnings.filterwarnings(\"ignore\") # 忽略所有警告信息\n",
    "# --- 1. 默认参数的随机森林 ---\n",
    "# 评估基准模型，这里确实不需要验证集\n",
    "print(\"--- 1. 默认参数随机森林 (训练集 -> 测试集) ---\")\n",
    "import time # 这里介绍一个新的库，time库，主要用于时间相关的操作，因为调参需要很长时间，记录下会帮助后人知道大概的时长\n",
    "start_time = time.time() # 记录开始时间\n",
    "rf_model = RandomForestClassifier(random_state=42)\n",
    "rf_model.fit(X_train, y_train) # 在训练集上训练\n",
    "rf_pred = rf_model.predict(X_test) # 在测试集上预测\n",
    "end_time = time.time() # 记录结束时间\n",
    "\n",
    "print(f\"训练与预测耗时: {end_time - start_time:.4f} 秒\")\n",
    "print(\"\\n默认随机森林 在测试集上的分类报告：\")\n",
    "print(classification_report(y_test, rf_pred))\n",
    "print(\"默认随机森林 在测试集上的混淆矩阵：\")\n",
    "print(confusion_matrix(y_test, rf_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a5e14f35",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 1. 默认参数随机森林 (训练集 -> 测试集) ---\n",
      "默认模型训练与预测耗时: 0.9849 秒\n",
      "\n",
      "默认随机森林 在测试集上的分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.77      0.97      0.86      1059\n",
      "           1       0.79      0.30      0.43       441\n",
      "\n",
      "    accuracy                           0.77      1500\n",
      "   macro avg       0.78      0.63      0.64      1500\n",
      "weighted avg       0.77      0.77      0.73      1500\n",
      "\n",
      "默认随机森林 在测试集上的混淆矩阵：\n",
      "[[1023   36]\n",
      " [ 309  132]]\n",
      "--------------------------------------------------\n",
      "--- 2. 带权重随机森林 + 交叉验证 (在训练集上进行) ---\n",
      "训练集中各类别数量: [4328 1672]\n",
      "少数类标签: 1, 多数类标签: 0\n",
      "开始进行 5 折交叉验证...\n",
      "交叉验证耗时: 1.8450 秒\n",
      "\n",
      "带权重随机森林 交叉验证平均性能 (基于训练集划分)：\n",
      "  平均 accuracy: 0.7798 (+/- 0.0085)\n",
      "  平均 precision_minority: 0.8291 (+/- 0.0182)\n",
      "  平均 recall_minority: 0.2650 (+/- 0.0400)\n",
      "  平均 f1_minority: 0.3998 (+/- 0.0455)\n",
      "--------------------------------------------------\n",
      "--- 3. 训练最终的带权重模型 (整个训练集) 并在测试集上评估 ---\n",
      "最终带权重模型训练与预测耗时: 0.9521 秒\n",
      "\n",
      "带权重随机森林 在测试集上的分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.76      0.97      0.86      1059\n",
      "           1       0.81      0.27      0.41       441\n",
      "\n",
      "    accuracy                           0.77      1500\n",
      "   macro avg       0.78      0.62      0.63      1500\n",
      "weighted avg       0.78      0.77      0.72      1500\n",
      "\n",
      "带权重随机森林 在测试集上的混淆矩阵：\n",
      "[[1030   29]\n",
      " [ 320  121]]\n",
      "--------------------------------------------------\n",
      "性能对比 (测试集上的少数类召回率 Recall):\n",
      "  默认模型: 0.2993\n",
      "  带权重模型: 0.2744\n"
     ]
    }
   ],
   "source": []
  },
  {
   "cell_type": "markdown",
   "id": "eba5866e",
   "metadata": {},
   "source": [
    "## 过采样\n",
    "- 过采样一般包含2种做法：随机采样和SMOTE\n",
    "- 过采样是把少的类别补充和多的类别一样多，欠采样是把多的类别减少和少的类别一样\n",
    "- 一般都是缺数据，所以很少用欠采样"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "541d4ae2",
   "metadata": {},
   "source": [
    "### 随机过采样ROS\n",
    "\n",
    "随机过采样是从少数类中随机选择样本，并将其复制后添加到训练集。\n",
    "随机过采样的步骤如下：\n",
    "\n",
    "1. 确定少数类的样本数。\n",
    "2. 从少数类中随机选择样本，并将其复制。\n",
    "3. 将复制的样本添加到训练集。\n",
    "\n",
    "随机过采样的优点是，它可以增加少数类的样本数，从而提高模型的泛化能力。小。\n",
    "\n",
    "随机过采样的缺点是，它可能会增加训练集的大小，从而增加训练时间。此外，它可能会增加噪声，并且可能会增加模型的偏差。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "14ef95ac",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 需要安装imbalanced-learn库\n",
    "# 这个库是专门用于处理不平衡数据集的，提供了多种重采样方法\n",
    "# !pip install -U imbalanced-learn "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a38b839e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "随机过采样后训练集的形状： (8656, 31) (8656,)\n",
      "随机过采样后训练与预测耗时: 1.2756 秒\n",
      "\n",
      "随机过采样后随机森林 在测试集上的分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.77      0.93      0.84      1059\n",
      "           1       0.67      0.34      0.46       441\n",
      "\n",
      "    accuracy                           0.76      1500\n",
      "   macro avg       0.72      0.64      0.65      1500\n",
      "weighted avg       0.74      0.76      0.73      1500\n",
      "\n",
      "随机过采样后随机森林 在测试集上的混淆矩阵：\n",
      "[[985  74]\n",
      " [289 152]]\n"
     ]
    }
   ],
   "source": [
    "# 以下是添加的过采样代码\n",
    "\n",
    "# 1. 随机过采样\n",
    "from imblearn.over_sampling import RandomOverSampler\n",
    "ros = RandomOverSampler(random_state=42) # 创建随机过采样对象\n",
    "X_train_ros, y_train_ros = ros.fit_resample(X_train, y_train) # 对训练集进行随机过采样\n",
    "\n",
    "print(\"随机过采样后训练集的形状：\", X_train_ros.shape, y_train_ros.shape) \n",
    "\n",
    "# 训练随机森林模型（使用随机过采样后的训练集）\n",
    "rf_model_ros = RandomForestClassifier(random_state=42)\n",
    "start_time_ros = time.time()\n",
    "rf_model_ros.fit(X_train_ros, y_train_ros)\n",
    "end_time_ros = time.time()\n",
    "\n",
    "print(f\"随机过采样后训练与预测耗时: {end_time_ros - start_time_ros:.4f} 秒\")\n",
    "\n",
    "# 在测试集上预测\n",
    "rf_pred_ros = rf_model_ros.predict(X_test)\n",
    "\n",
    "print(\"\\n随机过采样后随机森林 在测试集上的分类报告：\")\n",
    "print(classification_report(y_test, rf_pred_ros))\n",
    "print(\"随机过采样后随机森林 在测试集上的混淆矩阵：\")\n",
    "print(confusion_matrix(y_test, rf_pred_ros))\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "5169ea6d",
   "metadata": {},
   "source": [
    "### smote过采样\n",
    "\n",
    "smote过采样是合成样本的方法。\n",
    "\n",
    "\n",
    "1. 对于少数类中的每个样本，计算它与少数类中其他样本的距离，得到其$k$近邻（一般$k$取5或其他合适的值）。\n",
    "2. 从$k$近邻中随机选择一个样本。\n",
    "3. 计算选定的近邻样本与原始样本之间的差值。\n",
    "4. 生成一个在0到1之间的随机数。\n",
    "5. 将差值乘以随机数，然后加到原始样本上，得到一个新的合成样本。\n",
    "6. 重复上述步骤，直到合成出足够数量的少数类样本，使得少数类和多数类样本数量达到某种平衡。\n",
    "7. 使用过采样后的数据集训练模型并评估模型性能。\n",
    "\n",
    "SMOTE的核心思想是通过在少数类样本的特征空间中进行插值来合成新的样本\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cbd1b8b8",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "SMOTE过采样后训练集的形状： (8656, 31) (8656,)\n",
      "SMOTE过采样后训练与预测耗时: 1.3051 秒\n",
      "\n",
      "SMOTE过采样后随机森林 在测试集上的分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.77      0.92      0.84      1059\n",
      "           1       0.64      0.35      0.45       441\n",
      "\n",
      "    accuracy                           0.75      1500\n",
      "   macro avg       0.70      0.63      0.64      1500\n",
      "weighted avg       0.73      0.75      0.72      1500\n",
      "\n",
      "SMOTE过采样后随机森林 在测试集上的混淆矩阵：\n",
      "[[972  87]\n",
      " [288 153]]\n"
     ]
    }
   ],
   "source": [
    "# 2. SMOTE过采样\n",
    "from imblearn.over_sampling import SMOTE \n",
    "smote = SMOTE(random_state=42)\n",
    "X_train_smote, y_train_smote = smote.fit_resample(X_train, y_train)\n",
    "\n",
    "print(\"SMOTE过采样后训练集的形状：\", X_train_smote.shape, y_train_smote.shape)\n",
    "\n",
    "# 训练随机森林模型（使用SMOTE过采样后的训练集）\n",
    "rf_model_smote = RandomForestClassifier(random_state=42)\n",
    "start_time_smote = time.time()\n",
    "rf_model_smote.fit(X_train_smote, y_train_smote)\n",
    "end_time_smote = time.time()\n",
    "\n",
    "print(f\"SMOTE过采样后训练与预测耗时: {end_time_smote - start_time_smote:.4f} 秒\")\n",
    "\n",
    "# 在测试集上预测\n",
    "rf_pred_smote = rf_model_smote.predict(X_test)\n",
    "\n",
    "print(\"\\nSMOTE过采样后随机森林 在测试集上的分类报告：\")\n",
    "print(classification_report(y_test, rf_pred_smote))\n",
    "print(\"SMOTE过采样后随机森林 在测试集上的混淆矩阵：\")\n",
    "print(confusion_matrix(y_test, rf_pred_smote))\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "1c1c0154",
   "metadata": {},
   "source": [
    "## 修改权重\n",
    "\n",
    "\n",
    "在处理类别不平衡的数据集时，标准机器学习算法（如默认的随机森林）可能会过度偏向多数类，导致对少数类的预测性能很差。为了解决这个问题，常用的策略包括在数据层面（采样）和算法层面进行调整。本文重点讨论两种算法层面的方法：**修改类别权重**和**修改分类阈值**。\n",
    "\n",
    "*   **挑战：** 标准算法的优化目标（如最小化整体误差）会使其优先拟合多数类，因为这样做能更快地降低总误差。\n",
    "*   **后果：** 对少数类样本的识别能力不足（低召回率），即使整体准确率看起来很高。\n",
    "*   **目标：** 提高模型对少数类的预测性能，通常关注召回率（Recall）、F1分数（F1-Score）、AUC-PR等指标。\n",
    "\n",
    "## 方法一：修改类别权重 (Cost-Sensitive Learning)\n",
    "\n",
    "这种方法在模型**训练阶段**介入，通过调整不同类别样本对损失函数的贡献来影响模型的学习过程。\n",
    "\n",
    "*   **核心思想：** 为不同类别的错误分类分配不同的“代价”或“权重”。通常，将少数类样本错分为多数类的代价设置得远高于反过来的情况。\n",
    "*   **作用机制：** 修改模型的**损失函数**。当模型错误分类一个具有高权重的少数类样本时，会受到更大的惩罚（更高的损失值）。\n",
    "*   **目的：** 迫使学习算法在优化参数时更加**关注少数类**，努力学习到一个能够更好地区分少数类的**决策边界**。它试图从根本上让模型“学会”识别少数类。\n",
    "*   **影响：** 直接改变模型的**参数学习过程**和最终学到的**模型本身**。\n",
    "\n",
    "### 在 `RandomForestClassifier` 中应用 (`class_weight` 参数)\n",
    "\n",
    "Scikit-learn 中的 `RandomForestClassifier` 提供了 `class_weight` 参数来实现代价敏感学习：\n",
    "\n",
    "1.  **`class_weight=None` (默认值):**\n",
    "    *   所有类别被赋予**相同的权重 (1)**。\n",
    "    *   算法在构建树和计算分裂标准（如基尼不纯度）时，**不区分**多数类和少数类。\n",
    "    *   在不平衡数据上，这自然导致模型**偏向多数类**。\n",
    "\n",
    "2.  **`class_weight='balanced'`:**\n",
    "    *   算法**自动**根据训练数据 `y` 中各类别的频率来调整权重。\n",
    "    *   权重计算方式与类别频率成**反比**：`weight = n_samples / (n_classes * np.bincount(y))`。\n",
    "    *   这意味着**少数类样本获得更高的权重**，多数类样本获得较低的权重。\n",
    "    *   目的是在训练中“放大”少数类的重要性，促使模型提升对少数类的识别能力。\n",
    "\n",
    "3.  **`class_weight={dict}` (手动设置):**\n",
    "    *   可以提供一个字典，手动为每个类别标签指定权重，例如 `class_weight={0: 1, 1: 10}` 表示类别 1 的权重是类别 0 的 10 倍。\n",
    "\n",
    "*   **优点：**\n",
    "    *   从模型学习的根本上解决问题。\n",
    "    *   可能得到泛化能力更强的模型。\n",
    "    *   许多常用算法内置支持，实现方便。\n",
    "*   **注意：** 使用 `class_weight` 时，推荐结合**交叉验证**（特别是 `StratifiedKFold`）来可靠地评估其效果和模型的稳定性。\n",
    "\n",
    "## 方法二：修改分类阈值\n",
    "\n",
    "这种方法在模型**训练完成之后**介入，通过调整最终分类的决策规则来平衡不同类型的错误。\n",
    "\n",
    "*   **核心思想：** 改变将模型输出的概率（或得分）映射到最终类别标签的门槛。\n",
    "*   **作用机制：** 模型通常输出一个样本属于正类（通常设为少数类）的概率 `p`。默认情况下，如果 `p > 0.5`，则预测为正类。修改阈值意味着改变这个 `0.5`，例如，如果要求更高的召回率，可以将阈值降低（如 `p > 0.3` 就预测为正类）。\n",
    "*   **目的：** 在**不改变已训练好的模型**的情况下，根据业务需求调整精确率（Precision）和召回率（Recall）之间的权衡。通常用于**提高少数类的召回率**（但可能会牺牲精确率）。\n",
    "*   **影响：** **不改变**模型学到的参数或决策边界本身，只改变如何**解释**模型的输出。\n",
    "*   **优点：**\n",
    "    *   实现简单，无需重新训练模型。\n",
    "    *   非常直观，可以直接在 PR 曲线或 ROC 曲线上选择操作点。\n",
    "    *   适用于任何输出概率或分数的模型。\n",
    "*   **缺点：**\n",
    "    *   治标不治本。如果模型本身就没学好如何区分少数类（概率输出普遍很低），单纯降低阈值可能效果有限或导致大量误报（低精确率）。\n",
    "\n",
    "## 核心差异总结\n",
    "\n",
    "| 特性         | 修改类别权重 (`class_weight`)                  | 修改分类阈值                         |\n",
    "| :----------- | :--------------------------------------------- | :----------------------------------- |\n",
    "| **作用阶段** | 模型**训练**时                                 | 模型**预测**（或评估）时             |\n",
    "| **作用对象** | 模型的**损失函数**、**参数学习**过程             | 模型输出概率/分数到最终预测的**决策规则** |\n",
    "| **对模型影响** | **改变**学习到的模型本身和决策边界               | **不改变**已学习到的模型             |\n",
    "| **性质**     | **根本性**调整，代价敏感学习                   | **后处理**性质的调整                 |\n",
    "| **目标侧重** | 学习一个内在区分能力更强的模型                 | 在现有模型上调整性能指标的权衡       |\n",
    "| **实现方式** | 设置算法的参数（如 `class_weight='balanced'`） | 在预测后应用不同的概率门槛           |\n",
    "\n",
    "## 实践建议\n",
    "\n",
    "1.  **评估指标先行：** 明确你的目标，使用适合不平衡数据的指标（Recall, F1-Score, AUC-PR, Balanced Accuracy, MCC）来评估模型。\n",
    "2.  **优先尝试根本方法：** 通常建议首先尝试**修改权重 (`class_weight='balanced'`)** 或 **数据采样方法 (如 SMOTE)**，因为它们试图从源头改善模型学习。\n",
    "3.  **交叉验证评估：** 在使用 `class_weight` 或采样方法时，务必使用**分层交叉验证 (Stratified K-Fold)** 来获得对模型性能的可靠估计。\n",
    "4.  **阈值调整作为补充：** 修改阈值可以作为一种**补充手段**或**最后的微调**。即使使用了权重调整，有时仍需根据具体的业务需求（如必须达到某个召回率水平）来调整阈值，找到最佳的操作点。\n",
    "5.  **组合策略：** 有时结合多种方法（如 SMOTE + `class_weight`）可能会产生更好的结果。\n",
    "\n",
    "总之，修改权重旨在训练一个“更好”的模型，而修改阈值是在一个“已有”模型上调整其表现。理解它们的差异有助于你选择更合适的策略来应对不平衡数据集的挑战。\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "58a0db23",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "--- 1. 默认参数随机森林 (训练集 -> 测试集) ---\n",
      "默认模型训练与预测耗时: 0.9860 秒\n",
      "\n",
      "默认随机森林 在测试集上的分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.77      0.97      0.86      1059\n",
      "           1       0.79      0.30      0.43       441\n",
      "\n",
      "    accuracy                           0.77      1500\n",
      "   macro avg       0.78      0.63      0.64      1500\n",
      "weighted avg       0.77      0.77      0.73      1500\n",
      "\n",
      "默认随机森林 在测试集上的混淆矩阵：\n",
      "[[1023   36]\n",
      " [ 309  132]]\n",
      "--------------------------------------------------\n",
      "--- 2. 带权重随机森林 + 交叉验证 (在训练集上进行) ---\n",
      "训练集中各类别数量: [4328 1672]\n",
      "少数类标签: 1, 多数类标签: 0\n",
      "开始进行 5 折交叉验证...\n",
      "交叉验证耗时: 2.4092 秒\n",
      "\n",
      "带权重随机森林 交叉验证平均性能 (基于训练集划分)：\n",
      "  平均 accuracy: 0.7798 (+/- 0.0085)\n",
      "  平均 precision_minority: 0.8291 (+/- 0.0182)\n",
      "  平均 recall_minority: 0.2650 (+/- 0.0400)\n",
      "  平均 f1_minority: 0.3998 (+/- 0.0455)\n",
      "--------------------------------------------------\n",
      "--- 3. 训练最终的带权重模型 (整个训练集) 并在测试集上评估 ---\n",
      "最终带权重模型训练与预测耗时: 0.9214 秒\n",
      "\n",
      "带权重随机森林 在测试集上的分类报告：\n",
      "              precision    recall  f1-score   support\n",
      "\n",
      "           0       0.76      0.97      0.86      1059\n",
      "           1       0.81      0.27      0.41       441\n",
      "\n",
      "    accuracy                           0.77      1500\n",
      "   macro avg       0.78      0.62      0.63      1500\n",
      "weighted avg       0.78      0.77      0.72      1500\n",
      "\n",
      "带权重随机森林 在测试集上的混淆矩阵：\n",
      "[[1030   29]\n",
      " [ 320  121]]\n",
      "--------------------------------------------------\n",
      "性能对比 (测试集上的少数类召回率 Recall):\n",
      "  默认模型: 0.2993\n",
      "  带权重模型: 0.2744\n"
     ]
    }
   ],
   "source": [
    "import numpy as np # 引入 numpy 用于计算平均值等\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.model_selection import StratifiedKFold, cross_validate # 引入分层 K 折和交叉验证工具\n",
    "from sklearn.metrics import make_scorer, accuracy_score, precision_score, recall_score, f1_score, confusion_matrix, classification_report\n",
    "import time\n",
    "import warnings\n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "# 假设 X_train, y_train, X_test, y_test 已经准备好\n",
    "# X_train, y_train 用于交叉验证和最终模型训练\n",
    "# X_test, y_test 用于最终评估\n",
    "\n",
    "# --- 1. 默认参数的随机森林 (原始代码，作为对比基准) ---\n",
    "print(\"--- 1. 默认参数随机森林 (训练集 -> 测试集) ---\")\n",
    "start_time = time.time()\n",
    "rf_model_default = RandomForestClassifier(random_state=42)\n",
    "rf_model_default.fit(X_train, y_train)\n",
    "rf_pred_default = rf_model_default.predict(X_test)\n",
    "end_time = time.time()\n",
    "print(f\"默认模型训练与预测耗时: {end_time - start_time:.4f} 秒\")\n",
    "print(\"\\n默认随机森林 在测试集上的分类报告：\")\n",
    "print(classification_report(y_test, rf_pred_default))\n",
    "print(\"默认随机森林 在测试集上的混淆矩阵：\")\n",
    "print(confusion_matrix(y_test, rf_pred_default))\n",
    "print(\"-\" * 50)\n",
    "\n",
    "\n",
    "# --- 2. 带权重的随机森林 + 交叉验证 (在训练集上进行CV) ---\n",
    "print(\"--- 2. 带权重随机森林 + 交叉验证 (在训练集上进行) ---\")\n",
    "\n",
    "# 确定少数类标签 (非常重要！)\n",
    "# 假设是二分类问题，我们需要知道哪个是少数类标签才能正确解读 recall, precision, f1\n",
    "# 例如，如果标签是 0 和 1，可以这样查看：\n",
    "counts = np.bincount(y_train)\n",
    "minority_label = np.argmin(counts) # 找到计数最少的类别的标签\n",
    "majority_label = np.argmax(counts)\n",
    "print(f\"训练集中各类别数量: {counts}\")\n",
    "print(f\"少数类标签: {minority_label}, 多数类标签: {majority_label}\")\n",
    "# !!下面的 scorer 将使用这个 minority_label !!\n",
    "\n",
    "# 定义带权重的模型\n",
    "rf_model_weighted = RandomForestClassifier(\n",
    "    random_state=42,\n",
    "    class_weight='balanced'  # 关键：自动根据类别频率调整权重\n",
    "    # class_weight={minority_label: 10, majority_label: 1} # 或者可以手动设置权重字典\n",
    ")\n",
    "\n",
    "# 设置交叉验证策略 (使用 StratifiedKFold 保证每折类别比例相似)\n",
    "cv_strategy = StratifiedKFold(n_splits=5, shuffle=True, random_state=42) # 5折交叉验证\n",
    "\n",
    "# 定义用于交叉验证的评估指标\n",
    "# 特别关注少数类的指标，使用 make_scorer 指定 pos_label\n",
    "# 注意：如果你的少数类标签不是 1，需要修改 pos_label\n",
    "scoring = {\n",
    "    'accuracy': 'accuracy',\n",
    "    'precision_minority': make_scorer(precision_score, pos_label=minority_label, zero_division=0),\n",
    "    'recall_minority': make_scorer(recall_score, pos_label=minority_label),\n",
    "    'f1_minority': make_scorer(f1_score, pos_label=minority_label)\n",
    "}\n",
    "\n",
    "print(f\"开始进行 {cv_strategy.get_n_splits()} 折交叉验证...\")\n",
    "start_time_cv = time.time()\n",
    "\n",
    "# 执行交叉验证 (在 X_train, y_train 上进行)\n",
    "# cross_validate 会自动完成训练和评估过程\n",
    "cv_results = cross_validate(\n",
    "    estimator=rf_model_weighted,\n",
    "    X=X_train,\n",
    "    y=y_train,\n",
    "    cv=cv_strategy,\n",
    "    scoring=scoring,\n",
    "    n_jobs=-1, # 使用所有可用的 CPU 核心\n",
    "    return_train_score=False # 通常我们更关心测试折的得分\n",
    ")\n",
    "\n",
    "end_time_cv = time.time()\n",
    "print(f\"交叉验证耗时: {end_time_cv - start_time_cv:.4f} 秒\")\n",
    "\n",
    "# 打印交叉验证结果的平均值\n",
    "print(\"\\n带权重随机森林 交叉验证平均性能 (基于训练集划分)：\")\n",
    "for metric_name, scores in cv_results.items():\n",
    "    if metric_name.startswith('test_'): # 我们关心的是在验证折上的表现\n",
    "         # 提取指标名称（去掉 'test_' 前缀）\n",
    "        clean_metric_name = metric_name.split('test_')[1]\n",
    "        print(f\"  平均 {clean_metric_name}: {np.mean(scores):.4f} (+/- {np.std(scores):.4f})\")\n",
    "\n",
    "print(\"-\" * 50)\n",
    "\n",
    "\n",
    "# --- 3. 使用权重训练最终模型，并在测试集上评估 ---\n",
    "print(\"--- 3. 训练最终的带权重模型 (整个训练集) 并在测试集上评估 ---\")\n",
    "start_time_final = time.time()\n",
    "# 使用与交叉验证中相同的设置来训练最终模型\n",
    "rf_model_weighted_final = RandomForestClassifier(\n",
    "    random_state=42,\n",
    "    class_weight='balanced'\n",
    ")\n",
    "rf_model_weighted_final.fit(X_train, y_train) # 在整个训练集上训练\n",
    "rf_pred_weighted = rf_model_weighted_final.predict(X_test) # 在测试集上预测\n",
    "end_time_final = time.time()\n",
    "\n",
    "print(f\"最终带权重模型训练与预测耗时: {end_time_final - start_time_final:.4f} 秒\")\n",
    "print(\"\\n带权重随机森林 在测试集上的分类报告：\")\n",
    "# 确保 classification_report 也关注少数类 (可以通过 target_names 参数指定标签名称)\n",
    "# 或者直接查看报告中少数类标签对应的行\n",
    "print(classification_report(y_test, rf_pred_weighted)) # , target_names=[f'Class {majority_label}', f'Class {minority_label}'] 如果需要指定名称\n",
    "print(\"带权重随机森林 在测试集上的混淆矩阵：\")\n",
    "print(confusion_matrix(y_test, rf_pred_weighted))\n",
    "print(\"-\" * 50)\n",
    "\n",
    "# 对比总结 (简单示例)\n",
    "print(\"性能对比 (测试集上的少数类召回率 Recall):\")\n",
    "recall_default = recall_score(y_test, rf_pred_default, pos_label=minority_label)\n",
    "recall_weighted = recall_score(y_test, rf_pred_weighted, pos_label=minority_label)\n",
    "print(f\"  默认模型: {recall_default:.4f}\")\n",
    "print(f\"  带权重模型: {recall_weighted:.4f}\")\n",
    "\n"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "DL",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.16"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
