{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "colab_type": "text",
    "id": "view-in-github"
   },
   "source": [
    "<a href=\"https://colab.research.google.com/github/CoreTheGreat/HBPU-Machine-Learning-Course/blob/main/ML_Chapter3_Classification.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "lPboLx_o0UxI"
   },
   "source": [
    "# 第三章：分类\n",
    "湖北理工学院《机器学习》课程资料\n",
    "\n",
    "作者：李辉楚吴\n",
    "\n",
    "笔记内容概述:\n",
    "* 3.1 逻辑回归与二分类问题\n",
    "* 3.2 常用的二分类模型——支持向量机\n",
    "* 3.3 常用的二分类模型——决策树\n",
    "* 3.4 二分类模型的度量\n",
    "* 3.5 由二分类到多分类\n",
    "* 3.6 实验3：基于机器学习方法的手写字母识别\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Ifpm7Sql4U09"
   },
   "source": [
    "## 3.1 逻辑回归与二分类问题\n",
    "\n",
    "### 3.1.1 利用torchvision载入训练数据MINST\n",
    "\n",
    "MINST是一个小型的基于灰度图像(图像大小1x28x28)的手写字母识别数据集，包含60000个训练数据，10000个测试数据。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "id": "sM-ziKb94S9_",
    "outputId": "9a8bd59b-1ed4-47e3-e118-cee1849a610a"
   },
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "from sklearn import svm\n",
    "from sklearn import tree\n",
    "from sklearn.linear_model import LogisticRegression, LinearRegression\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, mean_absolute_error\n",
    "from sklearn.metrics import classification_report, confusion_matrix\n",
    "import seaborn as sns\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.multiclass import OneVsRestClassifier\n",
    "from sklearn.multiclass import OneVsOneClassifier\n",
    "from sklearn.metrics import roc_curve, auc\n",
    "\n",
    "import time\n",
    "\n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "color_list = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']\n",
    "\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "label_size = 18 # Label size\n",
    "ticklabel_size = 14 # Tick label size\n",
    "\n",
    "# Load the MNIST dataset to display\n",
    "imgDisp = torchvision.datasets.MNIST(root='./data', train=False, download=True)\n",
    "img, label = imgDisp[0]\n",
    "\n",
    "print(f'Image size is {img.size}')\n",
    "\n",
    "fig, ax = plt.subplots(figsize=(7,7))\n",
    "ax.imshow(img, cmap='gray') # Display image\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "ax.set_title(f\"Label: {label}\", fontsize=label_size)\n",
    "# plt.savefig(f'exp_character{label}.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "构建ftrExtract类，在读取数据的时候提取样本的特征"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "BWPSDyOq5qOO",
    "outputId": "9aed06f2-67a9-4a6f-f00c-d91554c3a260"
   },
   "outputs": [],
   "source": [
    "class ftrExtract(object):\n",
    "    '''\n",
    "    This class is used to extract features of images\n",
    "    '''\n",
    "    def __call__(self, tensor):\n",
    "        tensor = tensor.squeeze() # Compress redundant demensions\n",
    "\n",
    "        mean_width = tensor.mean(axis=0)\n",
    "        mean_height = tensor.mean(axis=1)\n",
    "\n",
    "        std_width = tensor.std(axis=0)\n",
    "        std_height = tensor.std(axis=1)\n",
    "\n",
    "        ftrs = torch.cat([mean_width, mean_height, std_width, std_height])\n",
    "\n",
    "        return ftrs\n",
    "\n",
    "# Define a transform to normalize the data\n",
    "transform = transforms.Compose([transforms.ToTensor(), ftrExtract()])\n",
    "\n",
    "# Load the MNIST dataset\n",
    "trainset = torchvision.datasets.MNIST(root='./Data', train=True, download=True, transform=transform)\n",
    "testset = torchvision.datasets.MNIST(root='./Data', train=False, download=True, transform=transform)\n",
    "\n",
    "# Count number of each class in trainset\n",
    "train_class_counts = {}\n",
    "for _, label in trainset:\n",
    "    if label not in train_class_counts:\n",
    "        train_class_counts[label] = 0\n",
    "    train_class_counts[label] += 1\n",
    "\n",
    "# Count number of each class in testset\n",
    "test_class_counts = {}\n",
    "for _, label in testset:\n",
    "    if label not in test_class_counts:\n",
    "        test_class_counts[label] = 0\n",
    "    test_class_counts[label] += 1\n",
    "\n",
    "# Print results\n",
    "for i in range(10):\n",
    "    cls_counts_train = train_class_counts.get(i, 0)\n",
    "    cls_ratio_train = cls_counts_train / len(trainset)\n",
    "    cls_counts_test = test_class_counts.get(i, 0)\n",
    "    cls_ratio_test = cls_counts_test / len(testset)\n",
    "\n",
    "    print(f\"Class {i}: Trainset - {cls_counts_train} ({cls_ratio_train:.2%}), Testset - {cls_counts_test} ({cls_ratio_test:.2%})\")\n",
    "\n",
    "batch_size = 42\n",
    "trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)\n",
    "testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)\n",
    "\n",
    "# Get a batch of training data\n",
    "dataiter = iter(trainloader)\n",
    "data, labels = next(dataiter)\n",
    "\n",
    "input_size = data[0].numpy().shape[0]\n",
    "print(f'Input_size is {input_size}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "BtGNbqlSKnC-"
   },
   "source": [
    "### 3.1.2 使用线性回归识别手写字母"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Convert data to numpy arrays\n",
    "X_train = []\n",
    "y_train = []\n",
    "for batch_image, batch_label in trainloader:\n",
    "    X_train.append(batch_image.view(-1, input_size).numpy())\n",
    "    y_train.append(batch_label.numpy())\n",
    "\n",
    "X_train = np.vstack(X_train)\n",
    "y_train = np.concatenate(y_train)\n",
    "\n",
    "print(f'Shapes of X_train and Y_train: {X_train.shape} and {y_train.shape}')\n",
    "\n",
    "X_test = []\n",
    "y_test = []\n",
    "for batch_image, batch_label in testloader:\n",
    "    X_test.append(batch_image.view(-1, input_size).numpy())\n",
    "    y_test.append(batch_label.numpy())\n",
    "\n",
    "X_test = np.vstack(X_test)\n",
    "y_test = np.concatenate(y_test)\n",
    "\n",
    "print(f'Shapes of X_test and y_test: {X_test.shape} and {y_test.shape}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用回归的方法做分类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Initialize the linear regression model\n",
    "lr_model = LinearRegression()\n",
    "\n",
    "# Train the model\n",
    "lr_model.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = lr_model.predict(X_test)\n",
    "\n",
    "# Round predictions to nearest integer for classification\n",
    "y_pred_constrained = np.clip(y_pred, 0, np.max(y_test))\n",
    "y_pred_rounded = np.round(y_pred_constrained).astype(int)\n",
    "print(f\"Predicted classes: {np.unique(y_pred_rounded)}\")\n",
    "\n",
    "# Calculate accuracy\n",
    "accuracy = np.mean(y_pred_rounded == y_test)\n",
    "print(f\"Real classes: {np.unique(y_test)}\")\n",
    "print(f\"Accuracy of linear regression model: {accuracy:.4f}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "用混淆矩阵展示线性回归分类的结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def manual_confusion_matrix(y_true, y_pred, num_classes):\n",
    "    cm = np.zeros((num_classes, num_classes), dtype=int)\n",
    "    for t, p in zip(y_true, y_pred):\n",
    "        cm[t][p] += 1\n",
    "    return cm\n",
    "\n",
    "# Get number of character types\n",
    "num_classes = len(np.unique(y_test)) \n",
    "\n",
    "# Calculate confusion matrix\n",
    "cm = manual_confusion_matrix(y_test, y_pred_rounded, num_classes)\n",
    "\n",
    "# Print the results in the specified format\n",
    "for i in range(num_classes):\n",
    "    output = f\"Real label {i}, \"\n",
    "    for j in range(num_classes):\n",
    "        output += f\"Predict label {j} ({cm[i, j]}), \"\n",
    "    print(output[:-2])  # Remove the last comma and space"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "画混淆矩阵图"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Now let's create the confusion matrix plot\n",
    "fig, ax = plt.subplots(figsize=(10, 8))\n",
    "\n",
    "# Plot the confusion matrix as a heatmap\n",
    "im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n",
    "\n",
    "# Set up the axes\n",
    "ax.set(xticks=np.arange(cm.shape[1]),\n",
    "       yticks=np.arange(cm.shape[0]),\n",
    "       xticklabels=np.arange(10), yticklabels=np.arange(10),\n",
    "       xlabel='Predicted label', ylabel='True label')\n",
    "\n",
    "# Loop over data dimensions and create text annotations\n",
    "for i in range(cm.shape[0]):\n",
    "    for j in range(cm.shape[1]):\n",
    "        ax.text(j, i, format(cm[i, j], 'd'),\n",
    "                ha=\"center\", va=\"center\", fontsize=ticklabel_size,\n",
    "                color=\"white\" if cm[i, j] > cm.max() / 2. else \"black\")\n",
    "\n",
    "# Adjust font sizes\n",
    "ax.set_xlabel('Predicted label', fontsize=label_size)\n",
    "ax.set_ylabel('True label', fontsize=label_size)\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "# Tight layout to ensure everything fits\n",
    "fig.tight_layout()\n",
    "\n",
    "# Save the figure if needed\n",
    "# plt.savefig('Regression_for_classification.png', dpi=300, bbox_inches='tight')\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "使用逻辑回归做分类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Initialize the linear regression model\n",
    "lr_model = LogisticRegression(max_iter=1000)\n",
    "\n",
    "# Train the model\n",
    "lr_model.fit(X_train, y_train)\n",
    "\n",
    "# Make predictions on the test set\n",
    "y_pred = lr_model.predict(X_test)\n",
    "y_pred_proba = lr_model.predict_proba(X_test)\n",
    "\n",
    "# Round predictions to nearest integer for classification\n",
    "y_pred_rounded = np.round(y_pred).astype(int)\n",
    "print(f\"Predicted classes: {np.unique(y_pred_rounded)}\")\n",
    "\n",
    "# Calculate accuracy\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(f\"Accuracy of logistic regression model: {accuracy:.4f}\")\n",
    "\n",
    "# Calculate and print classification report\n",
    "print(\"\\nClassification Report:\")\n",
    "print(classification_report(y_test, y_pred))\n",
    "\n",
    "cm = confusion_matrix(y_test, y_pred)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "用混淆矩阵展示逻辑回归的分类结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Now let's create the confusion matrix plot\n",
    "fig, ax = plt.subplots(figsize=(10, 8))\n",
    "\n",
    "# Plot the confusion matrix as a heatmap\n",
    "im = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n",
    "\n",
    "# Set up the axes\n",
    "ax.set(xticks=np.arange(cm.shape[1]),\n",
    "       yticks=np.arange(cm.shape[0]),\n",
    "       xticklabels=np.arange(10), yticklabels=np.arange(10),\n",
    "       xlabel='Predicted label', ylabel='True label')\n",
    "\n",
    "# Loop over data dimensions and create text annotations\n",
    "for i in range(cm.shape[0]):\n",
    "    for j in range(cm.shape[1]):\n",
    "        ax.text(j, i, format(cm[i, j], 'd'),\n",
    "                ha=\"center\", va=\"center\", fontsize=ticklabel_size,\n",
    "                color=\"white\" if cm[i, j] > cm.max() / 2. else \"black\")\n",
    "\n",
    "# Adjust font sizes\n",
    "ax.set_xlabel('Predicted label', fontsize=label_size)\n",
    "ax.set_ylabel('True label', fontsize=label_size)\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "# Tight layout to ensure everything fits\n",
    "fig.tight_layout()\n",
    "\n",
    "# Save the figure if needed\n",
    "# plt.savefig('Logicregression_for_classification.png', dpi=300, bbox_inches='tight')\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "选择三个随机样本来展示输出的形式，尤其是Softmax前后的概率分布"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Select an example of character 1 randomly\n",
    "# Select an example of 1 randomly\n",
    "char_1_indices = np.where(y_test == 1)[0]\n",
    "random_char_1_index = np.random.choice(char_1_indices)\n",
    "random_char_1 = X_test[random_char_1_index]\n",
    "\n",
    "# Select an example of 2 randomly\n",
    "char_2_indices = np.where(y_test == 2)[0]\n",
    "random_char_2_index = np.random.choice(char_2_indices)\n",
    "random_char_2 = X_test[random_char_2_index]\n",
    "\n",
    "# Select an example of 6 randomly\n",
    "char_6_indices = np.where(y_test == 6)[0]\n",
    "random_char_6_index = np.random.choice(char_6_indices)\n",
    "random_char_6 = X_test[random_char_6_index]\n",
    "\n",
    "# Get predictions and probabilities for these examples\n",
    "examples = [random_char_1, random_char_2, random_char_6]\n",
    "example_predictions = lr_model.predict(examples)\n",
    "example_probabilities = lr_model.predict_proba(examples)\n",
    "\n",
    "print(\"Predicted classes: \", example_predictions)\n",
    "\n",
    "# Display probabilities in percentage style, one line per example\n",
    "print(\"Probabilities:\")\n",
    "for i, probs in enumerate(example_probabilities):\n",
    "    prob_strings = [f\"{prob:.2%}\" for _, prob in enumerate(probs)]\n",
    "    print(f\"Example {i+1}: {', '.join(prob_strings)}\")"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.1.3 生成虚拟样本解释为什么不建议使用线性回归做分类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.linear_model import LinearRegression, LogisticRegression\n",
    "\n",
    "x_min, x_max = (-3, 6)\n",
    "y_min, y_max = (-2, 8)\n",
    "    \n",
    "# Function to plot decision boundary\n",
    "def plot_decision_boundary(ax, model, X, y, only_scatter=False):\n",
    "    global ticklabel_size, label_size\n",
    "    global x_min, x_max\n",
    "    global y_min, y_max\n",
    "    \n",
    "    # Display samples\n",
    "    ax.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdYlBu, edgecolor='black', s=8**2)\n",
    "    \n",
    "    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),\n",
    "                         np.linspace(y_min, y_max, 100))\n",
    "    X_plot = np.c_[xx.ravel(), yy.ravel()]\n",
    "    \n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "    \n",
    "    ax.set_xlim(x_min, x_max)\n",
    "    ax.set_ylim(y_min, y_max)\n",
    "    \n",
    "    ax.set_xlabel('Feature 1', fontsize=label_size)\n",
    "    ax.set_ylabel('Feature 2', fontsize=label_size)\n",
    "    \n",
    "    if not only_scatter:\n",
    "        # Display the decision lines\n",
    "        if isinstance(model, LinearRegression):\n",
    "            Z = model.predict(X_plot)\n",
    "        else:  # LogisticRegression\n",
    "            Z = model.decision_function(X_plot)\n",
    "            \n",
    "        Z = Z.reshape(xx.shape)    \n",
    "        ax.contour(xx, yy, Z, levels=[0.5], colors='k', linestyles=['-'], linewidths=3)   \n",
    "        \n",
    "# Set random seed for reproducibility\n",
    "np.random.seed(42)\n",
    "\n",
    "# Generate two dense sets\n",
    "n_samples = 100\n",
    "set1 = np.random.randn(n_samples, 2) + np.array([1, 1])\n",
    "set2 = np.random.randn(n_samples, 2) + np.array([1, 2])\n",
    "\n",
    "X = np.vstack([set1, set2])\n",
    "y = np.hstack([np.zeros(n_samples), np.ones(n_samples)])\n",
    "\n",
    "# Fit linear regression and logistic regression\n",
    "lr_model_init = LinearRegression()\n",
    "lr_model_init.fit(X, y)\n",
    "\n",
    "fig, ax0 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_boundary(ax0, lr_model_init, X, y, only_scatter=True)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('biclassification_scatter_init.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "# Plot initial decision boundary line of linear regression\n",
    "fig, ax1 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_boundary(ax1, lr_model_init, X, y)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('linear_biclassification_init.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "log_model_init = LogisticRegression(max_iter=1000)\n",
    "log_model_init.fit(X, y) \n",
    "\n",
    "# Plot initial decision boundary line of logistic regression\n",
    "fig, ax2 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_boundary(ax2, log_model_init, X, y)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('logistic_biclassification_init.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "# Print decision boundaries\n",
    "print(\"Init Linear Regression Decision Boundary:\")\n",
    "print(f\"y = {lr_model_init.intercept_:.3f} + {lr_model_init.coef_[0]:.3f}*x1 + {lr_model_init.coef_[1]:.3f}*x2\")\n",
    "\n",
    "print(\"\\nInit Logistic Regression Decision Boundary:\")\n",
    "print(f\"y = {log_model_init.intercept_[0]:.3f} + {log_model_init.coef_[0][0]:.3f}*x1 + {log_model_init.coef_[0][1]:.3f}*x2\")\n",
    "\n",
    "# Add a third set far from the first two\n",
    "n_samples = 20 * n_samples\n",
    "set3 = np.random.randn(n_samples, 2) + np.array([2, 4])\n",
    "X_new = np.vstack([X, set3])\n",
    "y_new = np.hstack([y, np.ones(n_samples)])\n",
    "\n",
    "# Refit the models\n",
    "lr_model_update = LinearRegression()\n",
    "lr_model_update.fit(X_new, y_new)\n",
    "\n",
    "fig, ax3 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_boundary(ax3, lr_model_update, X_new, y_new, only_scatter=True)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('biclassification_scatter_update.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "log_model_update = LogisticRegression(max_iter=1000)\n",
    "log_model_update.fit(X_new, y_new)\n",
    "\n",
    "# Plot initial decision boundary line of linear regression\n",
    "fig, ax4 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_boundary(ax4, lr_model_update, X_new, y_new)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('linear_biclassification_update.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "# Plot initial decision boundary line of logistic regression\n",
    "fig, ax5 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_boundary(ax5, log_model_update, X_new, y_new)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('logistic_biclassification_update.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "# Print decision boundaries\n",
    "print(\"Linear Regression Decision Boundary:\")\n",
    "print(f\"y = {lr_model_update.intercept_:.3f} + {lr_model_update.coef_[0]:.3f}*x1 + {lr_model_update.coef_[1]:.3f}*x2\")\n",
    "\n",
    "print(\"\\nLogistic Regression Decision Boundary:\")\n",
    "print(f\"y = {log_model_update.intercept_[0]:.3f} + {log_model_update.coef_[0][0]:.3f}*x1 + {log_model_update.coef_[0][1]:.3f}*x2\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "画出分类边界，进一步解释不用线性回归做分类的原因"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_decision_contourlines(ax, model, X, y):\n",
    "    global ticklabel_size, label_size\n",
    "    global x_min, x_max\n",
    "    global y_min, y_max\n",
    "    \n",
    "    ax.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.RdYlBu, edgecolor='black')\n",
    "    \n",
    "    xx, yy = np.meshgrid(np.linspace(x_min, x_max, 100),\n",
    "                         np.linspace(y_min, y_max, 100))\n",
    "    \n",
    "    if isinstance(model, LinearRegression):\n",
    "        Z = model.predict(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)\n",
    "        \n",
    "    else:  # LogisticRegression\n",
    "        Z = model.decision_function(np.c_[xx.ravel(), yy.ravel()]).reshape(xx.shape)\n",
    "        \n",
    "    ax.contour(xx, yy, Z, levels=[0, 0.5, 1], colors=['r', 'k', 'b'], \n",
    "                   linestyles=['--', '-', '--'], linewidths=3)\n",
    "        \n",
    "    # Shade the region where 0 < y < 1\n",
    "    ax.contourf(xx, yy, Z, levels=[0, 1], colors=['lightgray'], alpha=0.4)\n",
    "    \n",
    "    ax.set_xlabel('Feature 1', fontsize=label_size)\n",
    "    ax.set_ylabel('Feature 2', fontsize=label_size)\n",
    "    \n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "    \n",
    "    ax.set_xlim(x_min, x_max)\n",
    "    ax.set_ylim(y_min, y_max)\n",
    "    \n",
    "# Plot initial decision boundaries\n",
    "fig, ax6 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_contourlines(ax6, lr_model_init, X, y)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('linear_contour_init.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "fig, ax7 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_contourlines(ax7, log_model_init, X, y)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('logistic_contour_init.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "# Plot updated decision boundaries\n",
    "fig, ax8 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_contourlines(ax8, lr_model_update, X_new, y_new)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('linear_contour_update.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "fig, ax9 = plt.subplots(figsize=(10, 8))\n",
    "plot_decision_contourlines(ax9, log_model_update, X_new, y_new)\n",
    "plt.tight_layout()\n",
    "# plt.savefig('logistic_contour_update.png', dpi=300)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.1.4 逻辑回归\n",
    "\n",
    "构建Sigmoid函数，并画出Sigmoid函数的曲线\n",
    "\n",
    "直观的展示如何建立线性回归函数和概率之间的联系"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "# Generate x values\n",
    "x = np.linspace(-10, 10, 100)\n",
    "\n",
    "# Calculate sigmoid values\n",
    "y = sigmoid(x)\n",
    "\n",
    "# Plot the sigmoid curve\n",
    "fig, ax = plt.subplots(figsize=(10, 6))\n",
    "ax.plot(x, y, 'b-', linewidth=2)\n",
    "ax.set_xlabel('z', fontsize=label_size)\n",
    "ax.set_ylabel('y', fontsize=label_size)\n",
    "\n",
    "# Set x-ticks\n",
    "xticks = np.arange(-10.0, 10.1, 2.5)\n",
    "ax.set_xticks(xticks)\n",
    "\n",
    "# Modify tick labels\n",
    "xticklabels = ['-∞' if x == -10 else ('+∞' if x == 10 else str(x)) for x in xticks]\n",
    "ax.set_xticklabels(xticklabels)\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "ax.set_xlim(-10, 10)\n",
    "ax.set_ylim(-0.05, 1.05)\n",
    "\n",
    "# Add vertical line at x=0\n",
    "ax.axvline(x=0, color='r', linestyle='--')\n",
    "\n",
    "# Add horizontal lines at y=0.5 and y=1\n",
    "ax.axhline(y=0.5, color='g', linestyle='--')\n",
    "ax.axhline(y=1, color='g', linestyle='--')\n",
    "\n",
    "plt.tight_layout()\n",
    "# plt.savefig('sigmoid_function.png', dpi=300)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "j2CdFQFnL_jA"
   },
   "source": [
    "### 3.1.5 使用逻辑回归识别手写字母\"1\"\n",
    "\n",
    "构建二分类数据集"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "qdaSQw3bAouJ"
   },
   "outputs": [],
   "source": [
    "# Extract features and labels from trainset\n",
    "x_train = []\n",
    "y_train = []\n",
    "for image, label in trainset:\n",
    "    x_train.append(image.numpy())\n",
    "    y_train.append(1 if label == 1 else 0)  # Set label to 1 for character 1, 0 otherwise\n",
    "\n",
    "x_train = np.array(x_train)\n",
    "y_train = np.array(y_train)\n",
    "\n",
    "# Extract features and labels from trainset\n",
    "x_test = []\n",
    "y_test = []\n",
    "for image, label in testset:\n",
    "    x_test.append(image.numpy())\n",
    "    y_test.append(1 if label == 1 else 0)  # Set label to 1 for character 1, 0 otherwise\n",
    "\n",
    "x_test = np.array(x_test)\n",
    "y_test = np.array(y_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "D92HNTRc6MCC"
   },
   "source": [
    "定义Sigmoid分类函数和损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define linear function\n",
    "def linear(X, w, b):\n",
    "    '''        \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    '''\n",
    "    return np.dot(X, w) + b\n",
    "\n",
    "# Define sigmoid function\n",
    "def sigmoid(z):\n",
    "    return 1 / (1 + np.exp(-z))\n",
    "\n",
    "# Define forward function\n",
    "def forward(X, w, b):\n",
    "    '''        \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    '''\n",
    "    return sigmoid(linear(X, w, b))\n",
    "\n",
    "# Predict probability function\n",
    "def predict(X, w, b):\n",
    "    y_proba = forward(X, w, b)\n",
    "    y_pred = (y_proba >= 0.5).astype(int)\n",
    "    return y_pred, y_proba\n",
    "\n",
    "# Binary cross-entropy\n",
    "def binary_cross_entropy(y, y_pred, eps=1e-15):\n",
    "    return -(y * np.log(y_pred + eps) + (1 - y) * np.log(1 - y_pred + eps))\n",
    "    \n",
    "# Define compute_loss function\n",
    "def compute_loss(X, y, w, b):\n",
    "    \"\"\"\n",
    "    Compute the binary cross-entropy loss for logistic regression.\n",
    "    \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    y (numpy array): True labels, shape (n_samples,)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    \n",
    "    Returns:\n",
    "    float: Average binary cross-entropy loss\n",
    "    \"\"\"    \n",
    "    n = X.shape[0] # number of samples\n",
    "    \n",
    "    # Compute model predictions\n",
    "    y_pred = forward(X, w, b)\n",
    "    \n",
    "    # Compute loss\n",
    "    loss = 1/n * np.sum(binary_cross_entropy(y, y_pred))\n",
    "    \n",
    "    return loss\n",
    "\n",
    "# Compute gradients\n",
    "def compute_gradients(X, y, w, b):\n",
    "    \"\"\"\n",
    "    Compute the gradients for logistic regression.\n",
    "    \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    y (numpy array): True labels, shape (n_samples,)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    \n",
    "    Returns:\n",
    "    dw: gradients of weights\n",
    "    db: gradients of bias\n",
    "    \"\"\"\n",
    "    n = X.shape[0] # number of samples\n",
    "    \n",
    "    # Compute model predictions\n",
    "    y_pred = forward(X, w, b)\n",
    "    \n",
    "    dw = 1/n * np.dot(X.T, (y_pred - y))\n",
    "    db = 1/n * np.sum(y_pred - y)\n",
    "    \n",
    "    return dw, db\n",
    "\n",
    "# Train logistic regression model\n",
    "def train_logistic_regression(X, y, learning_rate=0.01, num_iterations=1000):\n",
    "    \"\"\"\n",
    "    Compute the gradients for logistic regression.\n",
    "    \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    y (numpy array): True labels, shape (n_samples,)\n",
    "    \n",
    "    Returns:\n",
    "    w: weights of logistic regression model\n",
    "    b: bias of logistic regression model\n",
    "    \"\"\"\n",
    "    eps = 1e-15\n",
    "    _, ftr_num = X.shape\n",
    "    \n",
    "    w = np.zeros(ftr_num)\n",
    "    b = 0.0\n",
    "    \n",
    "    # Initialize Adagrad accumulators\n",
    "    lr_w = np.zeros(ftr_num)\n",
    "    lr_b = 0.0\n",
    "    \n",
    "    for i in range(num_iterations):\n",
    "        # Compute loss and gradients\n",
    "        loss = compute_loss(X, y, w, b)\n",
    "        dw, db = compute_gradients(X, y, w, b)\n",
    "        \n",
    "        # Update accumulators\n",
    "        lr_w = dw ** 2\n",
    "        lr_b = db ** 2\n",
    "        \n",
    "        # Update parameters\n",
    "        w -= learning_rate / (np.sqrt(lr_w) + eps) * dw\n",
    "        b -= learning_rate / (np.sqrt(lr_b) + eps) * db\n",
    "        \n",
    "        # Print loss every 100 iterations\n",
    "        if i % 100 == 0:\n",
    "            print(f\"Iteration {i}, Loss: {loss}\")\n",
    "    \n",
    "    return w, b\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "用曲线图直观的展现损失函数的意义"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate prediction values\n",
    "y_pred = np.linspace(0.001, 0.999, 1000)\n",
    "\n",
    "# Compute loss for y_true = 1 and y_true = 0\n",
    "loss_y1 = binary_cross_entropy(1, y_pred)\n",
    "loss_y0 = binary_cross_entropy(0, y_pred)\n",
    "\n",
    "# Plotting\n",
    "fig, ax_y1 = plt.subplots(figsize=(10, 6))\n",
    "ax_y1.plot(y_pred, loss_y1, label='y_true = 1', color='blue')\n",
    "ax_y1.set_xlabel('Predicted y', fontsize=label_size)\n",
    "ax_y1.set_ylabel('Loss', fontsize=label_size)\n",
    "ax_y1.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "plt.tight_layout()\n",
    "plt.savefig('binary_cross_entropy_loss1.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "# Plotting\n",
    "fig, ax_y0 = plt.subplots(figsize=(10, 6))\n",
    "plt.plot(y_pred, loss_y0, label='y_true = 0', color='red')\n",
    "ax_y0.set_xlabel('Predicted y', fontsize=label_size)\n",
    "ax_y0.set_ylabel('Loss', fontsize=label_size)\n",
    "ax_y0.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "plt.tight_layout()\n",
    "plt.savefig('binary_cross_entropy_loss0.png', dpi=300)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate x values\n",
    "x = np.linspace(0.01, 5, 1000)\n",
    "\n",
    "# Compute natural logarithm\n",
    "y = -np.log(x)\n",
    "\n",
    "# Create the plot\n",
    "fig, ax_log = plt.subplots(figsize=(6, 6))\n",
    "\n",
    "ax_log.plot(x, y, label='ln(x)', color='k', linewidth=2)\n",
    "\n",
    "ax_log.set_xlabel('x', fontsize=label_size)\n",
    "ax_log.set_ylabel('log$_e$(x)', fontsize=label_size)\n",
    "ax_log.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "ax_log.set_xlim(-0.05, 5)\n",
    "ax_log.set_ylim(-2, 4)\n",
    "# Add vertical line at x=1\n",
    "ax_log.axvline(x=1, color='gray', linestyle='--')\n",
    "\n",
    "# Add horizontal line at y=0\n",
    "ax_log.axhline(y=0, color='gray', linestyle='--')\n",
    "\n",
    "plt.tight_layout()\n",
    "# plt.savefig('natural_logarithm_function.png', dpi=300)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "训练逻辑回归模型"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train the model\n",
    "w, b = train_logistic_regression(x_train, y_train)\n",
    "\n",
    "y_pred, y_proba = predict(x_test, w, b)\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "precision = precision_score(y_test, y_pred)\n",
    "recall = recall_score(y_test, y_pred)\n",
    "f1 = f1_score(y_test, y_pred)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "gg4PU499y9qj"
   },
   "source": [
    "案例演示：随机选取图片，输出判断结果"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "id": "JWaTWosQyt-o",
    "outputId": "1dac9a8c-4a1d-46d6-a5e3-35ec7fb4d605"
   },
   "outputs": [],
   "source": [
    "# Random select 3 examples from imgDisp and testset\n",
    "np.random.seed(42)\n",
    "idx = np.random.choice(len(imgDisp), 3)\n",
    "\n",
    "# Select instances\n",
    "imgDisp_select = [imgDisp[i] for i in idx]\n",
    "x_select = x_test[idx]\n",
    "y_select = y_test[idx]\n",
    "\n",
    "y_select_pred, y_select_proba = predict(x_select, w, b)\n",
    "\n",
    "# Check the selected instances' labels are the same\n",
    "for i in range(len(idx)):\n",
    "    print(f'Sample {i+1}: imgDisp label is {imgDisp_select[i][1]}, x label is {y_select[i]}')\n",
    "\n",
    "    # Display image from imgDisp\n",
    "    fig, ax = plt.subplots(figsize=(7,7))\n",
    "    ax.imshow(imgDisp_select[i][0], cmap='gray')\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "    ax.set_title(f\"Label: {imgDisp_select[i][1]}, Prediction: {y_select_proba[i]:.4f}\", fontsize=label_size)\n",
    "\n",
    "    # plt.savefig(f'binary_prediction_{i+1}.png', dpi=300) # Make figure clearer\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "vK7Nd0ATTjpC"
   },
   "source": [
    "## 3.2 常用的二分类模型——支持向量机"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### 3.2.1 支持向量机的基本逻辑"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "Pyu18U8POAqC",
    "outputId": "4675f0dc-2e39-4941-fa3f-4d39e2c4b912"
   },
   "outputs": [],
   "source": [
    "# Define SVM classifier\n",
    "mdl_svm = svm.SVC(kernel='linear', probability=True)\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_svm.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "NhRcX38R7GpP",
    "outputId": "db20fdca-acd4-4111-fb45-334f0c491b54"
   },
   "outputs": [],
   "source": [
    "# Make predictions and evaluate the model\n",
    "y_pred_svm = mdl_svm.predict(x_test)\n",
    "y_proba_svm = mdl_svm.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_svm)\n",
    "precision = precision_score(y_test, y_pred_svm)\n",
    "recall = recall_score(y_test, y_pred_svm)\n",
    "f1 = f1_score(y_test, y_pred_svm)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "m2njQ-yxpqju"
   },
   "source": [
    "## 3.3 常用的二分类模型——决策树和随机森林"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "vEwfIl7uazAA",
    "outputId": "4abe6249-5379-41a1-8ee8-be53eb0e2cea"
   },
   "outputs": [],
   "source": [
    "# Define DecisionTree classifier\n",
    "mdl_dt = tree.DecisionTreeClassifier()\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_dt.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Define Random Forest classifier\n",
    "mdl_rf = RandomForestClassifier(n_estimators=100)\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_rf.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "DiBlPKs9fNaM",
    "outputId": "70bf3572-3b2c-4888-f5cb-bee4d3aa2127"
   },
   "outputs": [],
   "source": [
    "y_pred_dt = mdl_dt.predict(x_test)\n",
    "y_proba_dt = mdl_dt.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_dt)\n",
    "precision = precision_score(y_test, y_pred_dt)\n",
    "recall = recall_score(y_test, y_pred_dt)\n",
    "f1 = f1_score(y_test, y_pred_dt)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "y_pred_rf = mdl_rf.predict(x_test)\n",
    "y_proba_rf = mdl_rf.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_rf)\n",
    "precision = precision_score(y_test, y_pred_rf)\n",
    "recall = recall_score(y_test, y_pred_rf)\n",
    "f1 = f1_score(y_test, y_pred_rf)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "Ok1YZCFqNAuH"
   },
   "source": [
    "## 3.4 二分类模型的度量"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "A9QJr7SMOCBM"
   },
   "source": [
    "准确率、召回率、敏感性、特异性、精确度、F1-Score"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "R1-5Gyl5SWOp",
    "outputId": "5db21c58-1745-4fee-817f-2e80615e28b9"
   },
   "outputs": [],
   "source": [
    "def cls_counts(y_test, y_proba, th=0.5):\n",
    "    y_pred = (y_proba > th).astype(int)\n",
    "\n",
    "    tp_idx = (y_test == 1) & (y_pred == 1)\n",
    "    fp_idx = (y_test == 0) & (y_pred == 1)\n",
    "    tn_idx = (y_test == 0) & (y_pred == 0)\n",
    "    fn_idx = (y_test == 1) & (y_pred == 0)\n",
    "\n",
    "    tp = np.sum(tp_idx)\n",
    "    fp = np.sum(fp_idx)\n",
    "    tn = np.sum(tn_idx)\n",
    "    fn = np.sum(fn_idx)\n",
    "\n",
    "    return th, (tp, fp, tn, fn)\n",
    "\n",
    "th, (tp, fp, tn, fn) = cls_counts(y_test, y_proba)\n",
    "print(f'Threshold {th}, TP: {tp}, FP: {fp}, TN: {tn}, FN: {fn}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 522
    },
    "id": "90MR_dKrArN_",
    "outputId": "202f045a-f9ff-4e2c-c4a3-41bc015ca26b"
   },
   "outputs": [],
   "source": [
    "def plot_confusion_matrix(th, tp, fp, tn, fn):\n",
    "    \"\"\"Plots a confusion matrix given the number of true positives, false positives,\n",
    "    true negatives, and false negatives.\"\"\"\n",
    "    global label_size, ticklabel_size # Set global variables of font size\n",
    "\n",
    "    cm = np.array([[tn, fp], [fn, tp]])\n",
    "\n",
    "    # Display the confusion matrix as a heatmap\n",
    "    fig, ax = plt.subplots(figsize=(5,5))\n",
    "    img = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n",
    "\n",
    "    # Add labels to the axes\n",
    "    tick_marks = np.arange(2)\n",
    "    ax.set_xticks(tick_marks, ['N', 'P'], fontsize=ticklabel_size)\n",
    "    ax.set_yticks(tick_marks, ['T', 'F'], fontsize=ticklabel_size)\n",
    "\n",
    "    # Add the count of each category to the plot\n",
    "    thresh = cm.max() / 2.\n",
    "    for i in range(cm.shape[0]):\n",
    "        for j in range(cm.shape[1]):\n",
    "            plt.text(j, i, format(cm[i, j], 'd'),\n",
    "                     fontsize=ticklabel_size,\n",
    "                     horizontalalignment=\"center\",\n",
    "                     color=\"white\" if cm[i, j] > thresh else \"black\")\n",
    "\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "\n",
    "    ax.set_ylabel('Real Label', fontsize=label_size)\n",
    "    ax.set_xlabel('Predicted Label', fontsize=label_size)\n",
    "    ax.set_title(f'Threshold: {th}', fontsize=label_size)\n",
    "\n",
    "    return fig, ax\n",
    "\n",
    "def get_scores(tp, fp, tn, fn):\n",
    "    precision = tp / (tp + fp)\n",
    "    recall = tp / (tp + fn) # Also called sensitivity\n",
    "    accuracy = (tp + tn) / (tp + fp + tn + fn)\n",
    "    f1 = 2 * precision * recall / (precision + recall)\n",
    "\n",
    "    specificity = tn / (tn + fp)\n",
    "\n",
    "    return precision, recall, specificity, accuracy, f1\n",
    "\n",
    "precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "print(f'Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "# Example usage (replace with your actual values)\n",
    "fig, ax = plot_confusion_matrix(th, tp, fp, tn, fn)\n",
    "\n",
    "# plt.savefig(f'binary_confusion_matrix.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 522
    },
    "id": "w-rkKSZfRhdL",
    "outputId": "6a5d29d3-4469-43bd-da89-7d7e670bf61d"
   },
   "outputs": [],
   "source": [
    "th = 0.1\n",
    "th, (tp, fp, tn, fn) = cls_counts(y_test, y_proba, th)\n",
    "\n",
    "precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "print(f'Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "fig, ax = plot_confusion_matrix(th, tp, fp, tn, fn)\n",
    "# plt.savefig(f'binary_confusion_matrix_0D1.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 522
    },
    "id": "GN6gBEqlSR7v",
    "outputId": "1526103a-97e4-4a41-f333-e2c06b8600fc"
   },
   "outputs": [],
   "source": [
    "th = 0.9\n",
    "th, (tp, fp, tn, fn) = cls_counts(y_test, y_proba, th)\n",
    "\n",
    "precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "print(f'Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "fig, ax = plot_confusion_matrix(th, tp, fp, tn, fn)\n",
    "# plt.savefig(f'binary_confusion_matrix_0D9.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "XNGhtZrLWSAY"
   },
   "source": [
    "ROC（Receiver operating characteristic curve）接收者操作特征曲线"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 559
    },
    "id": "qCfPvjQBYJJN",
    "outputId": "5bb82ba4-6844-4a32-82e4-6ef1398d2a57"
   },
   "outputs": [],
   "source": [
    "def plot_roc_curve_base():\n",
    "    \"\"\"Plots the ROC curve and computes AUC.\"\"\"\n",
    "    global label_size, ticklabel_size # Set global variables of font size\n",
    "\n",
    "    fig, ax = plt.subplots(figsize=(8,6))\n",
    "\n",
    "    ax.plot([0, 1], [0, 1], color='grey', lw=2, linestyle='--')\n",
    "    ax.set_xlim([0.0, 1.0])\n",
    "    ax.set_ylim([0.0, 1.0])\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "\n",
    "    ax.set_xlabel('False Positive Rate (FPR)', fontsize=label_size)\n",
    "    ax.set_ylabel('True Positive Rate (TPR)', fontsize=label_size)\n",
    "\n",
    "    return fig, ax\n",
    "\n",
    "def add_roc_curve(ax, y_true, y_proba, curve_color, curve_label):\n",
    "    \"\"\"Plots the ROC curve and computes AUC.\"\"\"\n",
    "\n",
    "    fpr, tpr, thresholds = roc_curve(y_true, y_proba)\n",
    "    roc_auc = auc(fpr, tpr)\n",
    "\n",
    "    roc = ax.plot(fpr, tpr, color=curve_color, lw=2, label=f'{curve_label} (AUC = {roc_auc:.4f})')\n",
    "\n",
    "    return roc_auc, fpr, tpr, thresholds\n",
    "\n",
    "fig, ax = plot_roc_curve_base()\n",
    "\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba, color_list[0], 'Logic Regression')\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba_svm[:,1], color_list[1], 'SVM')\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba_dt[:,1], color_list[2], 'Decision Tree')\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba_rf[:,1], color_list[3], 'Random Forests')\n",
    "\n",
    "plt.legend(loc=\"lower right\", fontsize=ticklabel_size)\n",
    "# plt.savefig(f'binary_roc_curve.png', dpi=300) # Make figure clearer\n",
    "plt.show()\n"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "gapReol-qyYW"
   },
   "source": [
    "## 3.5 由二分类到多分类"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "IcqIRYK3m_8e"
   },
   "outputs": [],
   "source": [
    "# Extract features and labels from trainset\n",
    "x_train = []\n",
    "y_train = []\n",
    "for image, label in trainset:\n",
    "    x_train.append(image.numpy())\n",
    "    y_train.append(label)\n",
    "\n",
    "x_train = np.array(x_train)\n",
    "y_train = np.array(y_train)\n",
    "\n",
    "# Extract features and labels from trainset\n",
    "x_test = []\n",
    "y_test = []\n",
    "for image, label in testset:\n",
    "    x_test.append(image.numpy())\n",
    "    y_test.append(label)\n",
    "\n",
    "x_test = np.array(x_test)\n",
    "y_test = np.array(y_test)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "5Sjl2lRIOT3W"
   },
   "source": [
    "3.5.1 一对多（One-vs-Rest）方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "ZCLadsuz1cyN",
    "outputId": "9d7fca6c-d1bc-4ce6-9c7d-954f16ca54b9"
   },
   "outputs": [],
   "source": [
    "# Define logic multi-classifier\n",
    "mdl_logic_ovr = OneVsRestClassifier(LogisticRegression(max_iter=1000))\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_logic_ovr.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Make predictions and evaluate the model\n",
    "y_pred_logic_ovr = mdl_logic_ovr.predict(x_test)\n",
    "y_proba_logic_ovr = mdl_logic_ovr.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_logic_ovr)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "JKwTr3HTO_ZG",
    "outputId": "d0ec57ad-eee1-411b-c422-225040901c63"
   },
   "outputs": [],
   "source": [
    "# Get class list: 0, 1, ..., 9\n",
    "class_list = np.sort(np.unique(y_train))\n",
    "\n",
    "# Create model list\n",
    "mdl_logic_list = []\n",
    "for c in class_list:\n",
    "    mdl_logic_list.append(LogisticRegression(max_iter=1000))\n",
    "\n",
    "# Train models seperately\n",
    "for i in range(len(class_list)):\n",
    "    start_time = time.time()\n",
    "    mdl_logic_list[i].fit(x_train, (y_train == class_list[i]).astype(int))\n",
    "    end_time = time.time()\n",
    "    print(f'Training class {class_list[i]}, Training time: {end_time - start_time:.2f} seconds')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 559
    },
    "id": "ypZ5d4NydeM1",
    "outputId": "b84e4420-fa31-4922-d51b-e13e61a571f3"
   },
   "outputs": [],
   "source": [
    "# Plot ROC curve\n",
    "fig, ax = plot_roc_curve_base()\n",
    "\n",
    "# Draw ROC of individual classifier\n",
    "for i in range(len(class_list)):\n",
    "    # Make predictions and evaluate the model\n",
    "    y_test_trans = (y_test == class_list[i]).astype(int)\n",
    "    y_proba = mdl_logic_list[i].predict_proba(x_test) # Output ratio\n",
    "\n",
    "    roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test_trans, y_proba[:,1], color_list[i], f'{class_list[i]}')\n",
    "\n",
    "plt.legend(loc=\"lower right\", fontsize=ticklabel_size)\n",
    "# plt.savefig(f'binary_roc_curve_ovr.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 1000
    },
    "id": "vsSNVXr4-Ir-",
    "outputId": "e209bb61-0e22-4ced-b74a-db1fbec50ac6"
   },
   "outputs": [],
   "source": [
    "sample_num = 10\n",
    "\n",
    "# Random select 3 examples from imgDisp and testset\n",
    "np.random.seed(1)\n",
    "idx = np.random.choice(len(imgDisp), sample_num)\n",
    "\n",
    "# Select instances\n",
    "imgDisp_select = [imgDisp[i] for i in idx]\n",
    "testset_select = [testset[i] for i in idx]\n",
    "\n",
    "# Check the selected instances' labels are the same\n",
    "for i in range(sample_num):\n",
    "    x = testset_select[i][0].view(-1, input_size)\n",
    "\n",
    "    # Using model to predict character\n",
    "    y_pred_list = []\n",
    "    for j in range(len(mdl_logic_list)):\n",
    "        y_pred_list.append(mdl_logic_list[j].predict(x))\n",
    "\n",
    "    y_pred = np.argmax(np.array(y_pred_list), axis=0)[0]\n",
    "\n",
    "    # Display image from imgDisp\n",
    "    fig, ax = plt.subplots(figsize=(7,7))\n",
    "    ax.imshow(imgDisp_select[i][0], cmap='gray')\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "    ax.set_title(f\"Label: {imgDisp_select[i][1]}, Prediction Label: {y_pred}\", fontsize=label_size)\n",
    "\n",
    "    print(f'Sample {i+1}: imgDisp label is {imgDisp_select[i][1]}, testset label is {testset_select[i][1]}, predict label is {y_pred}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "BZjng2_U0v1d",
    "outputId": "99c4f34a-fadc-49b1-d1ff-93c3be06014c"
   },
   "outputs": [],
   "source": [
    "# Prediction\n",
    "y_pred_list = []\n",
    "for i in range(len(mdl_logic_list)):\n",
    "    y_pred_list.append(mdl_logic_list[i].predict(x_test))\n",
    "\n",
    "y_pred = np.argmax(np.array(y_pred_list), axis=0)\n",
    "\n",
    "# Accuracy\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "wxvDfOE9HEsD"
   },
   "source": [
    "混淆矩阵"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 782
    },
    "id": "PXC8Yi62HHVb",
    "outputId": "c68a32c7-3a54-492e-fb6f-83ae02ca851c"
   },
   "outputs": [],
   "source": [
    "# Create confusion matrix\n",
    "cm_test = np.zeros((10, 10))\n",
    "for i in range(len(y_test)):\n",
    "    cm_test[y_test[i], y_pred[i]] += 1\n",
    "\n",
    "# Display confusion matrix\n",
    "fig, ax = plt.subplots(figsize=(9,9))\n",
    "im = ax.imshow(cm_test, cmap=plt.cm.Blues, interpolation='nearest')\n",
    "\n",
    "# Loop over data dimensions and create text annotations.\n",
    "for i in range(cm_test.shape[0]):\n",
    "    for j in range(cm_test.shape[1]):\n",
    "        ax.text(j, i, cm_test[i, j], fontsize=ticklabel_size, ha=\"center\", va=\"center\",\n",
    "                color=\"white\" if cm_test[i, j] > cm_test.max() / 2. else \"black\")\n",
    "\n",
    "ax.set_xlabel('Predicted label', fontsize=label_size)\n",
    "ax.set_ylabel('True label', fontsize=label_size)\n",
    "\n",
    "ax.set_xticks(np.arange(10))\n",
    "ax.set_xticklabels(np.arange(10))\n",
    "\n",
    "ax.set_yticks(np.arange(10))\n",
    "ax.set_yticklabels(np.arange(10))\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "# plt.savefig(f'confusion_matrix_numel.png', dpi=300) # Make figure clearer\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/",
     "height": 782
    },
    "id": "8y4cmMSMLNW0",
    "outputId": "b6cd9070-ada0-4fac-eb06-c535d8f358d2"
   },
   "outputs": [],
   "source": [
    "# Create confusion matrix\n",
    "cm_test = np.zeros((10, 10))\n",
    "for i in range(len(y_test)):\n",
    "    cm_test[y_test[i], y_pred[i]] += 1\n",
    "\n",
    "# Change value to ratio\n",
    "cm_test = cm_test / np.sum(cm_test, axis=1, keepdims=True)\n",
    "\n",
    "# Display confusion matrix\n",
    "fig, ax = plt.subplots(figsize=(9,9))\n",
    "im = ax.imshow(cm_test, cmap=plt.cm.Blues, interpolation='nearest')\n",
    "\n",
    "# Loop over data dimensions and create text annotations.\n",
    "for i in range(cm_test.shape[0]):\n",
    "    for j in range(cm_test.shape[1]):\n",
    "        ax.text(j, i, format(cm_test[i, j], '.2f'), fontsize=ticklabel_size, ha=\"center\", va=\"center\",\n",
    "                color=\"white\" if cm_test[i, j] > cm_test.max() / 2. else \"black\")\n",
    "\n",
    "ax.set_xlabel('Predicted label', fontsize=label_size)\n",
    "ax.set_ylabel('True label', fontsize=label_size)\n",
    "\n",
    "ax.set_xticks(np.arange(10))\n",
    "ax.set_xticklabels(np.arange(10))\n",
    "\n",
    "ax.set_yticks(np.arange(10))\n",
    "ax.set_yticklabels(np.arange(10))\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "# plt.savefig(f'confusion_matrix_ratio.png', dpi=300) # Make figure clearer\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "oRXvzocEbGWj"
   },
   "source": [
    "3.5.2 一对一（One-vs-One）方法"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "XP8-K-A7pwq_",
    "outputId": "912700a3-b4b8-40a1-f3c3-05424001cded"
   },
   "outputs": [],
   "source": [
    "# Define logic regression classifier\n",
    "mdl_logic_ovo = OneVsOneClassifier(LogisticRegression(max_iter=1000))\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_logic_ovo.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Make predictions and evaluate the model\n",
    "y_pred = mdl_logic_ovo.predict(x_test)\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "e69VMFy-lkg-",
    "outputId": "bd33372e-0617-4da7-e00d-ea7de45d3ad4"
   },
   "outputs": [],
   "source": [
    "# Get class list: 0, 1, ..., 9\n",
    "class_list = np.sort(np.unique(y_train))\n",
    "\n",
    "# Create model matrix to save models\n",
    "mdl_logic_matrix = {}\n",
    "for cls_p in class_list:\n",
    "    mdl_logic_matrix[cls_p] = {}\n",
    "    for cls_n in class_list:\n",
    "        if cls_p == cls_n:\n",
    "            continue\n",
    "        mdl_logic_matrix[cls_p][cls_n] = LogisticRegression(max_iter=1000)\n",
    "\n",
    "for cls_p in class_list:\n",
    "    # Training data of positive class\n",
    "    x_train_ovo_p = x_train[(y_train == cls_p), :]\n",
    "    y_train_ovo_p = np.ones(x_train_ovo_p.shape[0])\n",
    "\n",
    "    # Testing data of positive class\n",
    "    x_test_ovo_p = x_test[(y_test == cls_p), :]\n",
    "    y_test_ovo_p = np.ones(x_test_ovo_p.shape[0])\n",
    "\n",
    "    for cls_n in class_list:\n",
    "        if cls_p == cls_n:\n",
    "            continue\n",
    "\n",
    "        # Training data of negative class\n",
    "        x_train_ovo_n = x_train[(y_train == cls_n), :]\n",
    "        y_train_ovo_n = np.zeros(x_train_ovo_n.shape[0])\n",
    "\n",
    "        # Testing data of negative class\n",
    "        x_test_ovo_n = x_test[(y_test == cls_n), :]\n",
    "        y_test_ovo_n = np.zeros(x_test_ovo_n.shape[0])\n",
    "\n",
    "        # Concatenate data for training\n",
    "        x_train_ovo = np.concatenate((x_train_ovo_p, x_train_ovo_n), axis=0)\n",
    "        y_train_ovo = np.concatenate((y_train_ovo_p, y_train_ovo_n), axis=0)\n",
    "\n",
    "        # Model training\n",
    "        start_time = time.time()\n",
    "        mdl_logic_matrix[cls_p][cls_n].fit(x_train_ovo, y_train_ovo)\n",
    "        end_time = time.time()\n",
    "\n",
    "        # Concatenate data for testing\n",
    "        x_test_ovo = np.concatenate((x_test_ovo_p, x_test_ovo_n), axis=0)\n",
    "        y_test_ovo = np.concatenate((y_test_ovo_p, y_test_ovo_n), axis=0)\n",
    "\n",
    "        # Test model on sub-task\n",
    "        y_proba_ovo = mdl_logic_matrix[cls_p][cls_n].predict_proba(x_test_ovo) # Output ratio\n",
    "\n",
    "        # Display results\n",
    "        _, (tp, fp, tn, fn) = cls_counts(y_test_ovo, y_proba_ovo[:, 1])\n",
    "        precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "        print(f'Training class {cls_p} ({x_train_ovo_p.shape[0]}) vs class {cls_n} ({x_train_ovo_n.shape[0]}), Training time: {end_time - start_time:.2f} seconds, Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "sat6M2RLNBxB",
    "outputId": "7948d284-554e-46fe-98f4-e3bf90687bb5"
   },
   "outputs": [],
   "source": [
    "# Select class 1\n",
    "x_test_select = x_test[:, :]\n",
    "\n",
    "# Prediction\n",
    "y_pred_counts = np.zeros((x_test_select.shape[0], len(class_list)))\n",
    "\n",
    "for cls_p in class_list:\n",
    "    for cls_n in class_list:\n",
    "        if cls_p == cls_n:\n",
    "            continue\n",
    "\n",
    "        y_pred_counts[:, cls_p] = y_pred_counts[:, cls_p] + mdl_logic_matrix[cls_p][cls_n].predict(x_test_select)\n",
    "\n",
    "y_pred = np.argmax(y_pred_counts, axis=1)\n",
    "\n",
    "# Accuracy\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "RH0TMNsAoMjT"
   },
   "source": [
    "3.5.3 Softmax回归"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "-GlO3qSgtxgg",
    "outputId": "0776a013-a04f-4e88-c7e3-de0133f0d452"
   },
   "outputs": [],
   "source": [
    "mdl_softmax = LogisticRegression(max_iter=1000, solver='lbfgs')\n",
    "\n",
    "start_time = time.time()\n",
    "mdl_softmax.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Evaluate accuracy (or other metrics)\n",
    "y_pred = mdl_softmax.predict(x_test)\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(\"Accuracy:\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "LMvKrVWqpqJL",
    "outputId": "cdcb18c0-be0a-4d5d-c94a-490954f1f67d"
   },
   "outputs": [],
   "source": [
    "# One-hot encoding\n",
    "def one_hot_encode(y, num_classes):\n",
    "    \"\"\"Converts integer labels to one-hot encoding.\"\"\"\n",
    "    one_hot = np.zeros((y.shape[0], num_classes))\n",
    "    one_hot[np.arange(y.shape[0]), y] = 1\n",
    "    return one_hot\n",
    "\n",
    "# Example usage:\n",
    "num_classes = len(class_list)\n",
    "y_train_onehot = one_hot_encode(y_train, num_classes)\n",
    "\n",
    "# Display one-hot encoding results of ten random sample\n",
    "for _ in range(10):\n",
    "    idx = np.random.randint(0, y_train_onehot.shape[0])\n",
    "\n",
    "    print(f'Sample {idx+1},\\t Class {y_train[idx]}: {y_train_onehot[idx,:]}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "id": "5brE3L-UsCYa"
   },
   "outputs": [],
   "source": [
    "# Softmax function\n",
    "def softmax(x):\n",
    "    \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n",
    "    e_x = np.exp(x - np.max(x, axis=1, keepdims=True))\n",
    "    return e_x / e_x.sum(axis=1, keepdims=True)\n",
    "\n",
    "# Cross-entropy loss\n",
    "def cross_entropy_loss(y, y_pred):\n",
    "    \"\"\"Compute cross-entropy loss.\"\"\"\n",
    "    epsilon = 1e-15  # Small value to avoid log(0)\n",
    "    loss = -np.sum(y * np.log(y_pred + epsilon)) / y.shape[0]\n",
    "    return loss\n",
    "\n",
    "def gradient_descent(x, y, learning_rate, num_iterations):\n",
    "    \"\"\"Performs gradient descent optimization.\"\"\"\n",
    "    num_samples, num_features = x.shape\n",
    "    num_classes = y.shape[1]\n",
    "\n",
    "    # Initialize weights and bias\n",
    "    w = np.random.randn(num_features, num_classes)\n",
    "    b = np.zeros(num_classes)\n",
    "    \n",
    "    # Initialize Adagrad accumulators\n",
    "    lr_w = np.zeros(w.shape)\n",
    "    lr_b = 0.0\n",
    "\n",
    "    for i in range(num_iterations):\n",
    "        # Forward pass\n",
    "        scores = np.dot(x, w) + b\n",
    "        y_pred = softmax(scores)\n",
    "\n",
    "        # Compute loss\n",
    "        loss = cross_entropy_loss(y, y_pred)\n",
    "\n",
    "        # Backward pass (compute gradients)\n",
    "        dw = (1 / num_samples) * np.dot(x.T, (y_pred - y))\n",
    "        db = (1 / num_samples) * np.sum(y_pred - y, axis=0)\n",
    "\n",
    "        # Accumulate gradients\n",
    "        lr_w += dw ** 2\n",
    "        lr_b += db ** 2\n",
    "        \n",
    "        # Update parameters\n",
    "        w -= learning_rate / np.sqrt(lr_w) * dw\n",
    "        b -= learning_rate / np.sqrt(lr_b) * db\n",
    "\n",
    "        if i % 100 == 0:\n",
    "            print(f'Iteration {i}, Loss: {loss}')\n",
    "\n",
    "    return w, b\n",
    "\n",
    "def predict(x, w, b):\n",
    "    \"\"\"Predicts class labels for input data.\"\"\"\n",
    "    scores = np.dot(x, w) + b\n",
    "    y_pred = softmax(scores)\n",
    "    return np.argmax(y_pred, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "y_exp = np.array([0, 0, 1])\n",
    "y_exp_pred = np.array([0.22, 0.28, 0.50])\n",
    "print(cross_entropy_loss(y_exp, y_exp_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {
    "colab": {
     "base_uri": "https://localhost:8080/"
    },
    "id": "QiU6_YBPscjL",
    "outputId": "aafc1478-c6a8-4b31-f340-d0c4264c65d4"
   },
   "outputs": [],
   "source": [
    "# Perform gradient descent\n",
    "start_time = time.time()\n",
    "w, b = gradient_descent(x_train, y_train_onehot, learning_rate=1, num_iterations=1000)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Make predictions\n",
    "y_pred = predict(x_test, w, b)\n",
    "\n",
    "# Evaluate accuracy (or other metrics)\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(\"Accuracy:\", accuracy)"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "awWBO3bKYE9A"
   },
   "source": [
    "## 3.6 实验3：基于机器学习方法的手写字母识别\n",
    "\n",
    "此部分需要同学自行完成各个任务要求：\n",
    "* 数据读取、特征提取及分析\n",
    "* 分别使用逻辑回归、SVM、决策树、随机森林将手写字母分为大数（5-9）和小数（0-4）\n",
    "* 使用ROC展示并分析二分类模型的结果\n",
    "* 分别以One-vs-Rest, One-vs-One和softmax的方式识别手写字母\n",
    "* 画出手写字母识别精度的分布以及混淆矩阵，并进行必要的描述与分析"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "-------------------------------------------------------------------------------------------------\n",
    "## 实验3 回归模型的泛化及过拟合应对方法\n",
    "一、\t实验目的\n",
    "1.\t了解泛化能力和过拟合的概念\n",
    "2.\t掌握评估模型泛化能力的方法\n",
    "3.\t了解应对过拟合问题的常用方法，包括数据增强、正则化、交叉验证、早停法\n",
    "\n",
    "\n",
    "\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入必要库\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "from sklearn.model_selection import train_test_split, cross_val_score, KFold\n",
    "from sklearn.linear_model import Ridge\n",
    "from sklearn.metrics import mean_squared_error\n",
    "from sklearn.ensemble import RandomForestRegressor\n",
    "\n",
    "# 1. 数据准备与划分\n",
    "# 生成示例数据\n",
    "np.random.seed(42)\n",
    "X = np.random.rand(500, 1) * 10  # 特征\n",
    "y = 2.5 * X.squeeze() + np.random.randn(500) * 2  # 目标变量\n",
    "\n",
    "# 分割数据集\n",
    "X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2, random_state=42)\n",
    "\n",
    "# 2. 不同体量数据训练模型\n",
    "subset_ratios = [0.2, 0.4, 0.6, 0.8]\n",
    "results = []\n",
    "\n",
    "for ratio in subset_ratios:\n",
    "    subset_size = int(len(X_train) * ratio)\n",
    "    X_subset, y_subset = X_train[:subset_size], y_train[:subset_size]\n",
    "\n",
    "    # 使用简单线性模型\n",
    "    model = Ridge(alpha=0.0)  # 不使用正则化\n",
    "    model.fit(X_subset, y_subset)\n",
    "\n",
    "    # 在测试集上评估\n",
    "    y_pred = model.predict(X_test)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "\n",
    "    results.append((ratio, mse))\n",
    "\n",
    "    print(f\"训练集比例: {ratio:.1f}, 测试集 MSE: {mse:.4f}\")\n",
    "\n",
    "# 绘制结果\n",
    "ratios, mses = zip(*results)\n",
    "plt.plot(ratios, mses, marker='o')\n",
    "plt.title(\"训练集比例对模型性能的影响\")\n",
    "plt.xlabel(\"训练集比例\")\n",
    "plt.ylabel(\"测试集 MSE\")\n",
    "plt.grid()\n",
    "plt.show()\n",
    "\n",
    "# 3. 正则化训练\n",
    "alphas = [0.01, 0.1, 1, 10, 100]\n",
    "ridge_results = []\n",
    "\n",
    "for alpha in alphas:\n",
    "    model = Ridge(alpha=alpha)\n",
    "    model.fit(X_train, y_train)\n",
    "    y_pred = model.predict(X_test)\n",
    "    mse = mean_squared_error(y_test, y_pred)\n",
    "    ridge_results.append((alpha, mse))\n",
    "\n",
    "    print(f\"L2 正则化强度 (alpha): {alpha}, 测试集 MSE: {mse:.4f}\")\n",
    "\n",
    "# 绘制正则化结果\n",
    "alphas, ridge_mses = zip(*ridge_results)\n",
    "plt.semilogx(alphas, ridge_mses, marker='o')\n",
    "plt.title(\"L2 正则化强度对模型性能的影响\")\n",
    "plt.xlabel(\"正则化强度 (alpha)\")\n",
    "plt.ylabel(\"测试集 MSE\")\n",
    "plt.grid()\n",
    "plt.show()\n",
    "\n",
    "# 4. 交叉验证\n",
    "kf = KFold(n_splits=5, shuffle=True, random_state=42)\n",
    "model = Ridge(alpha=1.0)\n",
    "\n",
    "cv_scores = cross_val_score(model, X_train, y_train, cv=kf, scoring='neg_mean_squared_error')\n",
    "cv_mse = -cv_scores.mean()\n",
    "print(f\"5折交叉验证的平均 MSE: {cv_mse:.4f}\")\n",
    "\n",
    "# 绘制学习曲线\n",
    "train_sizes = np.linspace(0.1, 1.0, 10)\n",
    "train_errors = []\n",
    "test_errors = []\n",
    "\n",
    "for train_size in train_sizes:\n",
    "    size = int(len(X_train) * train_size)\n",
    "    X_train_subset, y_train_subset = X_train[:size], y_train[:size]\n",
    "    model.fit(X_train_subset, y_train_subset)\n",
    "    train_errors.append(mean_squared_error(y_train_subset, model.predict(X_train_subset)))\n",
    "    test_errors.append(mean_squared_error(y_test, model.predict(X_test)))\n",
    "\n",
    "plt.plot(train_sizes, train_errors, label=\"训练误差\")\n",
    "plt.plot(train_sizes, test_errors, label=\"测试误差\")\n",
    "plt.title(\"学习曲线\")\n",
    "plt.xlabel(\"训练集大小比例\")\n",
    "plt.ylabel(\"MSE\")\n",
    "plt.legend()\n",
    "plt.grid()\n",
    "plt.show()\n",
    "\n",
    "# 5. 早停法\n",
    "class EarlyStopping:\n",
    "    def __init__(self, patience=5):\n",
    "        self.patience = patience\n",
    "        self.best_score = None\n",
    "        self.counter = 0\n",
    "        self.early_stop = False\n",
    "\n",
    "    def __call__(self, val_loss):\n",
    "        if self.best_score is None or val_loss < self.best_score:\n",
    "            self.best_score = val_loss\n",
    "            self.counter = 0\n",
    "        else:\n",
    "            self.counter += 1\n",
    "            if self.counter >= self.patience:\n",
    "                self.early_stop = True\n",
    "\n",
    "# 模拟训练\n",
    "patience = 10\n",
    "early_stopping = EarlyStopping(patience=patience)\n",
    "losses = []\n",
    "\n",
    "for epoch in range(100):\n",
    "    model.fit(X_train, y_train)\n",
    "    val_loss = mean_squared_error(y_test, model.predict(X_test))\n",
    "    losses.append(val_loss)\n",
    "    early_stopping(val_loss)\n",
    "\n",
    "    print(f\"Epoch {epoch+1}, 验证集损失: {val_loss:.4f}\")\n",
    "\n",
    "    if early_stopping.early_stop:\n",
    "        print(\"早停触发，停止训练\")\n",
    "        break\n",
    "\n",
    "# 绘制验证集损失曲线\n",
    "plt.plot(range(1, len(losses)+1), losses)\n",
    "plt.title(\"验证集损失曲线\")\n",
    "plt.xlabel(\"迭代次数\")\n",
    "plt.ylabel(\"验证集损失\")\n",
    "plt.grid()\n",
    "plt.show()\n"
   ]
  }
 ],
 "metadata": {
  "colab": {
   "authorship_tag": "ABX9TyO5gS9/MePw+FDiXJA07L6y",
   "include_colab_link": true,
   "provenance": []
  },
  "kernelspec": {
   "display_name": "machinelearning",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.14"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
