{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a7e9b620-3490-4efc-9df9-035452f50f2b",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "\n",
    "import numpy as np\n",
    "\n",
    "from sklearn import svm\n",
    "from sklearn import tree\n",
    "from sklearn.linear_model import LogisticRegression, LinearRegression\n",
    "from sklearn.model_selection import train_test_split\n",
    "from sklearn.metrics import accuracy_score, precision_score, recall_score, f1_score, mean_absolute_error\n",
    "from sklearn.metrics import classification_report, confusion_matrix\n",
    "import seaborn as sns\n",
    "from sklearn.ensemble import RandomForestClassifier\n",
    "from sklearn.multiclass import OneVsRestClassifier\n",
    "from sklearn.multiclass import OneVsOneClassifier\n",
    "from sklearn.metrics import roc_curve, auc\n",
    "\n",
    "import time\n",
    "\n",
    "import matplotlib\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "color_list = ['tab:blue', 'tab:orange', 'tab:green', 'tab:red', 'tab:purple', 'tab:brown', 'tab:pink', 'tab:gray', 'tab:olive', 'tab:cyan']\n",
    "\n",
    "device = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "label_size = 18 # Label size\n",
    "ticklabel_size = 14 # Tick label size\n",
    "\n",
    "# Load the MNIST dataset to display\n",
    "imgDisp = torchvision.datasets.MNIST(root='./data', train=False, download=True)\n",
    "img, label = imgDisp[0]\n",
    "\n",
    "print(f'Image size is {img.size}')\n",
    "\n",
    "fig, ax = plt.subplots(figsize=(7,7))\n",
    "ax.imshow(img, cmap='gray') # Display image\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "ax.set_title(f\"Label: {label}\", fontsize=label_size)\n",
    "# plt.savefig(f'exp_character{label}.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ae0092c4-2f27-4c11-b7bc-b7065a0623be",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ftrExtract(object):\n",
    "    '''\n",
    "    This class is used to extract features of images\n",
    "    '''\n",
    "    def __call__(self, tensor):\n",
    "        tensor = tensor.squeeze() # Compress redundant demensions\n",
    "\n",
    "        mean_width = tensor.mean(axis=0)\n",
    "        mean_height = tensor.mean(axis=1)\n",
    "\n",
    "        std_width = tensor.std(axis=0)\n",
    "        std_height = tensor.std(axis=1)\n",
    "\n",
    "        ftrs = torch.cat([mean_width, mean_height, std_width, std_height])\n",
    "\n",
    "        return ftrs\n",
    "\n",
    "# Define a transform to normalize the data\n",
    "transform = transforms.Compose([transforms.ToTensor(), ftrExtract()])\n",
    "\n",
    "# Load the MNIST dataset\n",
    "trainset = torchvision.datasets.MNIST(root='D:\\BaiduNetdiskDownload', train=True, download=True, transform=transform)\n",
    "testset = torchvision.datasets.MNIST(root='D:\\BaiduNetdiskDownload', train=False, download=True, transform=transform)\n",
    "\n",
    "# Count number of each class in trainset\n",
    "train_class_counts = {}\n",
    "for _, label in trainset:\n",
    "    if label not in train_class_counts:\n",
    "        train_class_counts[label] = 0\n",
    "    train_class_counts[label] += 1\n",
    "\n",
    "# Count number of each class in testset\n",
    "test_class_counts = {}\n",
    "for _, label in testset:\n",
    "    if label not in test_class_counts:\n",
    "        test_class_counts[label] = 0\n",
    "    test_class_counts[label] += 1\n",
    "\n",
    "# Print results\n",
    "for i in range(10):\n",
    "    cls_counts_train = train_class_counts.get(i, 0)\n",
    "    cls_ratio_train = cls_counts_train / len(trainset)\n",
    "    cls_counts_test = test_class_counts.get(i, 0)\n",
    "    cls_ratio_test = cls_counts_test / len(testset)\n",
    "\n",
    "    print(f\"Class {i}: Trainset - {cls_counts_train} ({cls_ratio_train:.2%}), Testset - {cls_counts_test} ({cls_ratio_test:.2%})\")\n",
    "\n",
    "batch_size = 42\n",
    "trainloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True, num_workers=0)\n",
    "testloader = torch.utils.data.DataLoader(testset, batch_size=batch_size, shuffle=False, num_workers=0)\n",
    "\n",
    "# Get a batch of training data\n",
    "dataiter = iter(trainloader)\n",
    "data, labels = next(dataiter)\n",
    "\n",
    "input_size = data[0].numpy().shape[0]\n",
    "print(f'Input_size is {input_size}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d2b2621a-a633-4b61-bcfd-06da4a2b96c9",
   "metadata": {},
   "outputs": [],
   "source": [
    "def sigmoid(x):\n",
    "    return 1 / (1 + np.exp(-x))\n",
    "\n",
    "# Generate x values\n",
    "x = np.linspace(-10, 10, 100)\n",
    "\n",
    "# Calculate sigmoid values\n",
    "y = sigmoid(x)\n",
    "\n",
    "# Plot the sigmoid curve\n",
    "fig, ax = plt.subplots(figsize=(10, 6))\n",
    "ax.plot(x, y, 'b-', linewidth=2)\n",
    "ax.set_xlabel('z', fontsize=label_size)\n",
    "ax.set_ylabel('y', fontsize=label_size)\n",
    "\n",
    "# Set x-ticks\n",
    "xticks = np.arange(-10.0, 10.1, 2.5)\n",
    "ax.set_xticks(xticks)\n",
    "\n",
    "# Modify tick labels\n",
    "xticklabels = ['-∞' if x == -10 else ('+∞' if x == 10 else str(x)) for x in xticks]\n",
    "ax.set_xticklabels(xticklabels)\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "ax.set_xlim(-10, 10)\n",
    "ax.set_ylim(-0.05, 1.05)\n",
    "\n",
    "# Add vertical line at x=0\n",
    "ax.axvline(x=0, color='r', linestyle='--')\n",
    "\n",
    "# Add horizontal lines at y=0.5 and y=1\n",
    "ax.axhline(y=0.5, color='g', linestyle='--')\n",
    "ax.axhline(y=1, color='g', linestyle='--')\n",
    "\n",
    "plt.tight_layout()\n",
    "# plt.savefig('sigmoid_function.png', dpi=300)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "1df7cff5-42ad-4b1d-88cb-578ba2603520",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Extract features and labels from trainset\n",
    "x_train = []\n",
    "y_train = []\n",
    "for image, label in trainset:\n",
    "    x_train.append(image.numpy())\n",
    "    y_train.append(1 if label == 1 else 0)  # Set label to 1 for character 1, 0 otherwise\n",
    "\n",
    "x_train = np.array(x_train)\n",
    "y_train = np.array(y_train)\n",
    "\n",
    "# Extract features and labels from trainset\n",
    "x_test = []\n",
    "y_test = []\n",
    "for image, label in testset:\n",
    "    x_test.append(image.numpy())\n",
    "    y_test.append(1 if label == 1 else 0)  # Set label to 1 for character 1, 0 otherwise\n",
    "\n",
    "x_test = np.array(x_test)\n",
    "y_test = np.array(y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b6af9c50-200d-47d7-81eb-b70fed3a2a5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define linear function\n",
    "def linear(X, w, b):\n",
    "    '''        \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    '''\n",
    "    return np.dot(X, w) + b\n",
    "\n",
    "# Define sigmoid function\n",
    "def sigmoid(z):\n",
    "    return 1 / (1 + np.exp(-z))\n",
    "\n",
    "# Define forward function\n",
    "def forward(X, w, b):\n",
    "    '''        \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    '''\n",
    "    return sigmoid(linear(X, w, b))\n",
    "\n",
    "# Predict probability function\n",
    "def predict(X, w, b):\n",
    "    y_proba = forward(X, w, b)\n",
    "    y_pred = (y_proba >= 0.5).astype(int)\n",
    "    return y_pred, y_proba\n",
    "\n",
    "# Binary cross-entropy\n",
    "def binary_cross_entropy(y, y_pred, eps=1e-15):\n",
    "    return -(y * np.log(y_pred + eps) + (1 - y) * np.log(1 - y_pred + eps))\n",
    "    \n",
    "# Define compute_loss function\n",
    "def compute_loss(X, y, w, b):\n",
    "    \"\"\"\n",
    "    Compute the binary cross-entropy loss for logistic regression.\n",
    "    \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    y (numpy array): True labels, shape (n_samples,)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    \n",
    "    Returns:\n",
    "    float: Average binary cross-entropy loss\n",
    "    \"\"\"    \n",
    "    n = X.shape[0] # number of samples\n",
    "    \n",
    "    # Compute model predictions\n",
    "    y_pred = forward(X, w, b)\n",
    "    \n",
    "    # Compute loss\n",
    "    loss = 1/n * np.sum(binary_cross_entropy(y, y_pred))\n",
    "    \n",
    "    return loss\n",
    "\n",
    "# Compute gradients\n",
    "def compute_gradients(X, y, w, b):\n",
    "    \"\"\"\n",
    "    Compute the gradients for logistic regression.\n",
    "    \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    y (numpy array): True labels, shape (n_samples,)\n",
    "    w (numpy array): Weight vector, shape (n_features,)\n",
    "    b (float): Bias term\n",
    "    \n",
    "    Returns:\n",
    "    dw: gradients of weights\n",
    "    db: gradients of bias\n",
    "    \"\"\"\n",
    "    n = X.shape[0] # number of samples\n",
    "    \n",
    "    # Compute model predictions\n",
    "    y_pred = forward(X, w, b)\n",
    "    \n",
    "    dw = 1/n * np.dot(X.T, (y_pred - y))\n",
    "    db = 1/n * np.sum(y_pred - y)\n",
    "    \n",
    "    return dw, db\n",
    "\n",
    "# Train logistic regression model\n",
    "def train_logistic_regression(X, y, learning_rate=0.01, num_iterations=1000):\n",
    "    \"\"\"\n",
    "    Compute the gradients for logistic regression.\n",
    "    \n",
    "    Parameters:\n",
    "    X (numpy array): Input features, shape (n_samples, n_features)\n",
    "    y (numpy array): True labels, shape (n_samples,)\n",
    "    \n",
    "    Returns:\n",
    "    w: weights of logistic regression model\n",
    "    b: bias of logistic regression model\n",
    "    \"\"\"\n",
    "    eps = 1e-15\n",
    "    _, ftr_num = X.shape\n",
    "    \n",
    "    w = np.zeros(ftr_num)\n",
    "    b = 0.0\n",
    "    \n",
    "    # Initialize Adagrad accumulators\n",
    "    lr_w = np.zeros(ftr_num)\n",
    "    lr_b = 0.0\n",
    "    \n",
    "    for i in range(num_iterations):\n",
    "        # Compute loss and gradients\n",
    "        loss = compute_loss(X, y, w, b)\n",
    "        dw, db = compute_gradients(X, y, w, b)\n",
    "        \n",
    "        # Update accumulators\n",
    "        lr_w = dw ** 2\n",
    "        lr_b = db ** 2\n",
    "        \n",
    "        # Update parameters\n",
    "        w -= learning_rate / (np.sqrt(lr_w) + eps) * dw\n",
    "        b -= learning_rate / (np.sqrt(lr_b) + eps) * db\n",
    "        \n",
    "        # Print loss every 100 iterations\n",
    "        if i % 100 == 0:\n",
    "            print(f\"Iteration {i}, Loss: {loss}\")\n",
    "    \n",
    "    return w, b"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "408449f8-bfd1-472c-b1e6-e9dbfaaa9a1b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate prediction values\n",
    "y_pred = np.linspace(0.001, 1, 500)\n",
    "\n",
    "# Compute loss for y_true = 1 and y_true = 0\n",
    "loss_y1 = binary_cross_entropy(1, y_pred)\n",
    "loss_y0 = binary_cross_entropy(0, y_pred)\n",
    "\n",
    "# Plotting\n",
    "fig, ax_y1 = plt.subplots(figsize=(8, 6))\n",
    "ax_y1.plot(y_pred, loss_y1, label='y_true = 1', color='blue')\n",
    "ax_y1.set_xlabel('Predicted y', fontsize=label_size)\n",
    "ax_y1.set_ylabel('Loss', fontsize=label_size)\n",
    "ax_y1.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "plt.tight_layout()\n",
    "plt.savefig('binary_cross_entropy_loss1.png', dpi=300)\n",
    "plt.show()\n",
    "\n",
    "# Plotting\n",
    "fig, ax_y0 = plt.subplots(figsize=(8, 6))\n",
    "plt.plot(y_pred, loss_y0, label='y_true = 0', color='red')\n",
    "ax_y0.set_xlabel('Predicted y', fontsize=label_size)\n",
    "ax_y0.set_ylabel('Loss', fontsize=label_size)\n",
    "ax_y0.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "plt.tight_layout()\n",
    "plt.savefig('binary_cross_entropy_loss0.png', dpi=300)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c644101b-b378-4c81-be99-852633e2e568",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Generate x values\n",
    "x = np.linspace(0.01, 5, 500)\n",
    "\n",
    "# Compute natural logarithm\n",
    "y = -np.log(x)\n",
    "\n",
    "# Create the plot\n",
    "fig, ax_log = plt.subplots(figsize=(6, 6))\n",
    "\n",
    "ax_log.plot(x, y, label='ln(x)', color='k', linewidth=2)\n",
    "\n",
    "ax_log.set_xlabel('x', fontsize=label_size)\n",
    "ax_log.set_ylabel('log$_e$(x)', fontsize=label_size)\n",
    "ax_log.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "ax_log.set_xlim(-0.05, 5)\n",
    "ax_log.set_ylim(-2, 4)\n",
    "# Add vertical line at x=1\n",
    "ax_log.axvline(x=1, color='gray', linestyle='--')\n",
    "\n",
    "# Add horizontal line at y=0\n",
    "ax_log.axhline(y=0, color='gray', linestyle='--')\n",
    "\n",
    "plt.tight_layout()\n",
    "# plt.savefig('natural_logarithm_function.png', dpi=300)\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8e2433f6-0d74-43f6-9b19-26a09f4e270b",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Train the model\n",
    "w, b = train_logistic_regression(x_train, y_train)\n",
    "\n",
    "y_pred, y_proba = predict(x_test, w, b)\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "precision = precision_score(y_test, y_pred)\n",
    "recall = recall_score(y_test, y_pred)\n",
    "f1 = f1_score(y_test, y_pred)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7a14db57-2bc1-4942-b670-69c0b40b8dfa",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Random select 3 examples from imgDisp and testset\n",
    "np.random.seed(42)\n",
    "idx = np.random.choice(len(imgDisp), 3)\n",
    "\n",
    "# Select instances\n",
    "imgDisp_select = [imgDisp[i] for i in idx]\n",
    "x_select = x_test[idx]\n",
    "y_select = y_test[idx]\n",
    "\n",
    "y_select_pred, y_select_proba = predict(x_select, w, b)\n",
    "\n",
    "# Check the selected instances' labels are the same\n",
    "for i in range(len(idx)):\n",
    "    print(f'Sample {i+1}: imgDisp label is {imgDisp_select[i][1]}, x label is {y_select[i]}')\n",
    "\n",
    "    # Display image from imgDisp\n",
    "    fig, ax = plt.subplots(figsize=(7,7))\n",
    "    ax.imshow(imgDisp_select[i][0], cmap='gray')\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "    ax.set_title(f\"Label: {imgDisp_select[i][1]}, Prediction: {y_select_proba[i]:.4f}\", fontsize=label_size)\n",
    "\n",
    "    # plt.savefig(f'binary_prediction_{i+1}.png', dpi=300) # Make figure clearer\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "58533c2e-20c2-49d4-af79-dfd0f1a38063",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define SVM classifier\n",
    "mdl_svm = svm.SVC(kernel='linear', probability=True)\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_svm.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e0826a56-21fe-4b46-a45d-107dd7db3e96",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Make predictions and evaluate the model\n",
    "y_pred_svm = mdl_svm.predict(x_test)\n",
    "y_proba_svm = mdl_svm.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_svm)\n",
    "precision = precision_score(y_test, y_pred_svm)\n",
    "recall = recall_score(y_test, y_pred_svm)\n",
    "f1 = f1_score(y_test, y_pred_svm)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d1ac5503-80e6-41f3-b3ca-cad4916a241e",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define DecisionTree classifier\n",
    "mdl_dt = tree.DecisionTreeClassifier()\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_dt.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Define Random Forest classifier\n",
    "mdl_rf = RandomForestClassifier(n_estimators=100)\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_rf.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9aaf482-142d-4ea7-b8fd-c3c893668ad4",
   "metadata": {},
   "outputs": [],
   "source": [
    "y_pred_dt = mdl_dt.predict(x_test)\n",
    "y_proba_dt = mdl_dt.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_dt)\n",
    "precision = precision_score(y_test, y_pred_dt)\n",
    "recall = recall_score(y_test, y_pred_dt)\n",
    "f1 = f1_score(y_test, y_pred_dt)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "y_pred_rf = mdl_rf.predict(x_test)\n",
    "y_proba_rf = mdl_rf.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_rf)\n",
    "precision = precision_score(y_test, y_pred_rf)\n",
    "recall = recall_score(y_test, y_pred_rf)\n",
    "f1 = f1_score(y_test, y_pred_rf)\n",
    "\n",
    "print(f'Precision: {precision:.4f}, Recall: {recall:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "684486fa-41f8-4440-a084-80d17b9826f7",
   "metadata": {},
   "outputs": [],
   "source": [
    "def cls_counts(y_test, y_proba, th=0.5):\n",
    "    y_pred = (y_proba > th).astype(int)\n",
    "\n",
    "    tp_idx = (y_test == 1) & (y_pred == 1)\n",
    "    fp_idx = (y_test == 0) & (y_pred == 1)\n",
    "    tn_idx = (y_test == 0) & (y_pred == 0)\n",
    "    fn_idx = (y_test == 1) & (y_pred == 0)\n",
    "\n",
    "    tp = np.sum(tp_idx)\n",
    "    fp = np.sum(fp_idx)\n",
    "    tn = np.sum(tn_idx)\n",
    "    fn = np.sum(fn_idx)\n",
    "\n",
    "    return th, (tp, fp, tn, fn)\n",
    "\n",
    "th, (tp, fp, tn, fn) = cls_counts(y_test, y_proba)\n",
    "print(f'Threshold {th}, TP: {tp}, FP: {fp}, TN: {tn}, FN: {fn}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0da1ed32-20f2-4434-a235-e89da9cf9509",
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_confusion_matrix(th, tp, fp, tn, fn):\n",
    "    \"\"\"Plots a confusion matrix given the number of true positives, false positives,\n",
    "    true negatives, and false negatives.\"\"\"\n",
    "    global label_size, ticklabel_size # Set global variables of font size\n",
    "\n",
    "    cm = np.array([[tn, fp], [fn, tp]])\n",
    "\n",
    "    # Display the confusion matrix as a heatmap\n",
    "    fig, ax = plt.subplots(figsize=(5,5))\n",
    "    img = ax.imshow(cm, interpolation='nearest', cmap=plt.cm.Blues)\n",
    "\n",
    "    # Add labels to the axes\n",
    "    tick_marks = np.arange(2)\n",
    "    ax.set_xticks(tick_marks, ['N', 'P'], fontsize=ticklabel_size)\n",
    "    ax.set_yticks(tick_marks, ['T', 'F'], fontsize=ticklabel_size)\n",
    "\n",
    "    # Add the count of each category to the plot\n",
    "    thresh = cm.max() / 2.\n",
    "    for i in range(cm.shape[0]):\n",
    "        for j in range(cm.shape[1]):\n",
    "            plt.text(j, i, format(cm[i, j], 'd'),\n",
    "                     fontsize=ticklabel_size,\n",
    "                     horizontalalignment=\"center\",\n",
    "                     color=\"white\" if cm[i, j] > thresh else \"black\")\n",
    "\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "\n",
    "    ax.set_ylabel('Real Label', fontsize=label_size)\n",
    "    ax.set_xlabel('Predicted Label', fontsize=label_size)\n",
    "    ax.set_title(f'Threshold: {th}', fontsize=label_size)\n",
    "\n",
    "    return fig, ax\n",
    "\n",
    "def get_scores(tp, fp, tn, fn):\n",
    "    precision = tp / (tp + fp)\n",
    "    recall = tp / (tp + fn) # Also called sensitivity\n",
    "    accuracy = (tp + tn) / (tp + fp + tn + fn)\n",
    "    f1 = 2 * precision * recall / (precision + recall)\n",
    "\n",
    "    specificity = tn / (tn + fp)\n",
    "\n",
    "    return precision, recall, specificity, accuracy, f1\n",
    "\n",
    "precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "print(f'Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "# Example usage (replace with your actual values)\n",
    "fig, ax = plot_confusion_matrix(th, tp, fp, tn, fn)\n",
    "\n",
    "# plt.savefig(f'binary_confusion_matrix.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0de453f3-7802-4872-a67d-5d3767d273ab",
   "metadata": {},
   "outputs": [],
   "source": [
    "th = 0.1\n",
    "th, (tp, fp, tn, fn) = cls_counts(y_test, y_proba, th)\n",
    "\n",
    "precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "print(f'Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "fig, ax = plot_confusion_matrix(th, tp, fp, tn, fn)\n",
    "# plt.savefig(f'binary_confusion_matrix_0D1.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2fbec7b2-503d-44c9-82aa-7b3d5f5b0058",
   "metadata": {},
   "outputs": [],
   "source": [
    "th = 0.9\n",
    "th, (tp, fp, tn, fn) = cls_counts(y_test, y_proba, th)\n",
    "\n",
    "precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "print(f'Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')\n",
    "\n",
    "fig, ax = plot_confusion_matrix(th, tp, fp, tn, fn)\n",
    "# plt.savefig(f'binary_confusion_matrix_0D9.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "6eeb2032-3d4a-4a3f-9e34-8410b928f154",
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_roc_curve_base():\n",
    "    \"\"\"Plots the ROC curve and computes AUC.\"\"\"\n",
    "    global label_size, ticklabel_size # Set global variables of font size\n",
    "\n",
    "    fig, ax = plt.subplots(figsize=(8,6))\n",
    "\n",
    "    ax.plot([0, 1], [0, 1], color='grey', lw=2, linestyle='--')\n",
    "    ax.set_xlim([0.0, 1.0])\n",
    "    ax.set_ylim([0.0, 1.0])\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "\n",
    "    ax.set_xlabel('False Positive Rate (FPR)', fontsize=label_size)\n",
    "    ax.set_ylabel('True Positive Rate (TPR)', fontsize=label_size)\n",
    "\n",
    "    return fig, ax\n",
    "\n",
    "def add_roc_curve(ax, y_true, y_proba, curve_color, curve_label):\n",
    "    \"\"\"Plots the ROC curve and computes AUC.\"\"\"\n",
    "\n",
    "    fpr, tpr, thresholds = roc_curve(y_true, y_proba)\n",
    "    roc_auc = auc(fpr, tpr)\n",
    "\n",
    "    roc = ax.plot(fpr, tpr, color=curve_color, lw=2, label=f'{curve_label} (AUC = {roc_auc:.4f})')\n",
    "\n",
    "    return roc_auc, fpr, tpr, thresholds\n",
    "\n",
    "fig, ax = plot_roc_curve_base()\n",
    "\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba, color_list[0], 'Logic Regression')\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba_svm[:,1], color_list[1], 'SVM')\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba_dt[:,1], color_list[2], 'Decision Tree')\n",
    "roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test, y_proba_rf[:,1], color_list[3], 'Random Forests')\n",
    "\n",
    "plt.legend(loc=\"lower right\", fontsize=ticklabel_size)\n",
    "# plt.savefig(f'binary_roc_curve.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fc07613f-f923-444d-90de-630ccced34fc",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Extract features and labels from trainset\n",
    "x_train = []\n",
    "y_train = []\n",
    "for image, label in trainset:\n",
    "    x_train.append(image.numpy())\n",
    "    y_train.append(label)\n",
    "\n",
    "x_train = np.array(x_train)\n",
    "y_train = np.array(y_train)\n",
    "\n",
    "# Extract features and labels from trainset\n",
    "x_test = []\n",
    "y_test = []\n",
    "for image, label in testset:\n",
    "    x_test.append(image.numpy())\n",
    "    y_test.append(label)\n",
    "\n",
    "x_test = np.array(x_test)\n",
    "y_test = np.array(y_test)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "819a25f6-eb5a-41a2-90b8-19c56051e0af",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define logic multi-classifier\n",
    "mdl_logic_ovr = OneVsRestClassifier(LogisticRegression(max_iter=500))\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_logic_ovr.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Make predictions and evaluate the model\n",
    "y_pred_logic_ovr = mdl_logic_ovr.predict(x_test)\n",
    "y_proba_logic_ovr = mdl_logic_ovr.predict_proba(x_test) # Output ratio\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred_logic_ovr)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "8c826681-4dbb-4c8d-acc6-e793964935e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get class list: 0, 1, ..., 9\n",
    "class_list = np.sort(np.unique(y_train))\n",
    "\n",
    "# Create model list\n",
    "mdl_logic_list = []\n",
    "for c in class_list:\n",
    "    mdl_logic_list.append(LogisticRegression(max_iter=500))\n",
    "\n",
    "# Train models seperately\n",
    "for i in range(len(class_list)):\n",
    "    start_time = time.time()\n",
    "    mdl_logic_list[i].fit(x_train, (y_train == class_list[i]).astype(int))\n",
    "    end_time = time.time()\n",
    "    print(f'Training class {class_list[i]}, Training time: {end_time - start_time:.2f} seconds')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9186e726-189c-4e24-b294-491632101447",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Plot ROC curve\n",
    "fig, ax = plot_roc_curve_base()\n",
    "\n",
    "# Draw ROC of individual classifier\n",
    "for i in range(len(class_list)):\n",
    "    # Make predictions and evaluate the model\n",
    "    y_test_trans = (y_test == class_list[i]).astype(int)\n",
    "    y_proba = mdl_logic_list[i].predict_proba(x_test) # Output ratio\n",
    "\n",
    "    roc_auc_logic, fpr_logic, tpr_logic, thresholds_logic = add_roc_curve(ax, y_test_trans, y_proba[:,1], color_list[i], f'{class_list[i]}')\n",
    "\n",
    "plt.legend(loc=\"lower right\", fontsize=ticklabel_size)\n",
    "# plt.savefig(f'binary_roc_curve_ovr.png', dpi=300) # Make figure clearer\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "b35c66f3-67c7-4fb1-9d83-824b051536e6",
   "metadata": {},
   "outputs": [],
   "source": [
    "sample_num = 10\n",
    "\n",
    "# Random select 3 examples from imgDisp and testset\n",
    "np.random.seed(1)\n",
    "idx = np.random.choice(len(imgDisp), sample_num)\n",
    "\n",
    "# Select instances\n",
    "imgDisp_select = [imgDisp[i] for i in idx]\n",
    "testset_select = [testset[i] for i in idx]\n",
    "\n",
    "# Check the selected instances' labels are the same\n",
    "for i in range(sample_num):\n",
    "    x = testset_select[i][0].view(-1, input_size)\n",
    "\n",
    "    # Using model to predict character\n",
    "    y_pred_list = []\n",
    "    for j in range(len(mdl_logic_list)):\n",
    "        y_pred_list.append(mdl_logic_list[j].predict(x))\n",
    "\n",
    "    y_pred = np.argmax(np.array(y_pred_list), axis=0)[0]\n",
    "\n",
    "    # Display image from imgDisp\n",
    "    fig, ax = plt.subplots(figsize=(7,7))\n",
    "    ax.imshow(imgDisp_select[i][0], cmap='gray')\n",
    "    ax.tick_params(axis='both', which='major', labelsize=ticklabel_size) # Set tick label size\n",
    "    ax.set_title(f\"Label: {imgDisp_select[i][1]}, Prediction Label: {y_pred}\", fontsize=label_size)\n",
    "\n",
    "    print(f'Sample {i+1}: imgDisp label is {imgDisp_select[i][1]}, testset label is {testset_select[i][1]}, predict label is {y_pred}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "47e080ad-5588-4e94-ba98-5e70bffd4b93",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Prediction\n",
    "y_pred_list = []\n",
    "for i in range(len(mdl_logic_list)):\n",
    "    y_pred_list.append(mdl_logic_list[i].predict(x_test))\n",
    "\n",
    "y_pred = np.argmax(np.array(y_pred_list), axis=0)\n",
    "\n",
    "# Accuracy\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "698b0373-2b54-42d8-a0a5-efb6c59c9bf9",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create confusion matrix\n",
    "cm_test = np.zeros((10, 10))\n",
    "for i in range(len(y_test)):\n",
    "    cm_test[y_test[i], y_pred[i]] += 1\n",
    "\n",
    "# Display confusion matrix\n",
    "fig, ax = plt.subplots(figsize=(9,9))\n",
    "im = ax.imshow(cm_test, cmap=plt.cm.Blues, interpolation='nearest')\n",
    "\n",
    "# Loop over data dimensions and create text annotations.\n",
    "for i in range(cm_test.shape[0]):\n",
    "    for j in range(cm_test.shape[1]):\n",
    "        ax.text(j, i, cm_test[i, j], fontsize=ticklabel_size, ha=\"center\", va=\"center\",\n",
    "                color=\"white\" if cm_test[i, j] > cm_test.max() / 2. else \"black\")\n",
    "\n",
    "ax.set_xlabel('Predicted label', fontsize=label_size)\n",
    "ax.set_ylabel('True label', fontsize=label_size)\n",
    "\n",
    "ax.set_xticks(np.arange(10))\n",
    "ax.set_xticklabels(np.arange(10))\n",
    "\n",
    "ax.set_yticks(np.arange(10))\n",
    "ax.set_yticklabels(np.arange(10))\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "# plt.savefig(f'confusion_matrix_numel.png', dpi=300) # Make figure clearer\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a65db05d-da1b-497e-a2c7-62c833956326",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Create confusion matrix\n",
    "cm_test = np.zeros((10, 10))\n",
    "for i in range(len(y_test)):\n",
    "    cm_test[y_test[i], y_pred[i]] += 1\n",
    "\n",
    "# Change value to ratio\n",
    "cm_test = cm_test / np.sum(cm_test, axis=1, keepdims=True)\n",
    "\n",
    "# Display confusion matrix\n",
    "fig, ax = plt.subplots(figsize=(9,9))\n",
    "im = ax.imshow(cm_test, cmap=plt.cm.Blues, interpolation='nearest')\n",
    "\n",
    "# Loop over data dimensions and create text annotations.\n",
    "for i in range(cm_test.shape[0]):\n",
    "    for j in range(cm_test.shape[1]):\n",
    "        ax.text(j, i, format(cm_test[i, j], '.2f'), fontsize=ticklabel_size, ha=\"center\", va=\"center\",\n",
    "                color=\"white\" if cm_test[i, j] > cm_test.max() / 2. else \"black\")\n",
    "\n",
    "ax.set_xlabel('Predicted label', fontsize=label_size)\n",
    "ax.set_ylabel('True label', fontsize=label_size)\n",
    "\n",
    "ax.set_xticks(np.arange(10))\n",
    "ax.set_xticklabels(np.arange(10))\n",
    "\n",
    "ax.set_yticks(np.arange(10))\n",
    "ax.set_yticklabels(np.arange(10))\n",
    "\n",
    "ax.tick_params(axis='both', which='major', labelsize=ticklabel_size)\n",
    "\n",
    "# plt.savefig(f'confusion_matrix_ratio.png', dpi=300) # Make figure clearer\n",
    "\n",
    "plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "25a5056b-71b5-4316-ae17-1cb7b33b08ba",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Define logic regression classifier\n",
    "mdl_logic_ovo = OneVsOneClassifier(LogisticRegression(max_iter=1000))\n",
    "\n",
    "# Train model\n",
    "start_time = time.time()\n",
    "mdl_logic_ovo.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Make predictions and evaluate the model\n",
    "y_pred = mdl_logic_ovo.predict(x_test)\n",
    "\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "21447772-7063-459f-8fad-8c7c43877c60",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Get class list: 0, 1, ..., 9\n",
    "class_list = np.sort(np.unique(y_train))\n",
    "\n",
    "# Create model matrix to save models\n",
    "mdl_logic_matrix = {}\n",
    "for cls_p in class_list:\n",
    "    mdl_logic_matrix[cls_p] = {}\n",
    "    for cls_n in class_list:\n",
    "        if cls_p == cls_n:\n",
    "            continue\n",
    "        mdl_logic_matrix[cls_p][cls_n] = LogisticRegression(max_iter=500)\n",
    "\n",
    "for cls_p in class_list:\n",
    "    # Training data of positive class\n",
    "    x_train_ovo_p = x_train[(y_train == cls_p), :]\n",
    "    y_train_ovo_p = np.ones(x_train_ovo_p.shape[0])\n",
    "\n",
    "    # Testing data of positive class\n",
    "    x_test_ovo_p = x_test[(y_test == cls_p), :]\n",
    "    y_test_ovo_p = np.ones(x_test_ovo_p.shape[0])\n",
    "\n",
    "    for cls_n in class_list:\n",
    "        if cls_p == cls_n:\n",
    "            continue\n",
    "\n",
    "        # Training data of negative class\n",
    "        x_train_ovo_n = x_train[(y_train == cls_n), :]\n",
    "        y_train_ovo_n = np.zeros(x_train_ovo_n.shape[0])\n",
    "\n",
    "        # Testing data of negative class\n",
    "        x_test_ovo_n = x_test[(y_test == cls_n), :]\n",
    "        y_test_ovo_n = np.zeros(x_test_ovo_n.shape[0])\n",
    "\n",
    "        # Concatenate data for training\n",
    "        x_train_ovo = np.concatenate((x_train_ovo_p, x_train_ovo_n), axis=0)\n",
    "        y_train_ovo = np.concatenate((y_train_ovo_p, y_train_ovo_n), axis=0)\n",
    "\n",
    "        # Model training\n",
    "        start_time = time.time()\n",
    "        mdl_logic_matrix[cls_p][cls_n].fit(x_train_ovo, y_train_ovo)\n",
    "        end_time = time.time()\n",
    "\n",
    "        # Concatenate data for testing\n",
    "        x_test_ovo = np.concatenate((x_test_ovo_p, x_test_ovo_n), axis=0)\n",
    "        y_test_ovo = np.concatenate((y_test_ovo_p, y_test_ovo_n), axis=0)\n",
    "\n",
    "        # Test model on sub-task\n",
    "        y_proba_ovo = mdl_logic_matrix[cls_p][cls_n].predict_proba(x_test_ovo) # Output ratio\n",
    "\n",
    "        # Display results\n",
    "        _, (tp, fp, tn, fn) = cls_counts(y_test_ovo, y_proba_ovo[:, 1])\n",
    "        precision, recall, specificity, accuracy, f1 = get_scores(tp, fp, tn, fn)\n",
    "        print(f'Training class {cls_p} ({x_train_ovo_p.shape[0]}) vs class {cls_n} ({x_train_ovo_n.shape[0]}), Training time: {end_time - start_time:.2f} seconds, Precision: {precision:.4f}, Recall (Sensitivity): {recall:.4f}, Specificity: {specificity:.4f}, Accuracy: {accuracy:.4f}, F1-Score: {f1:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d5f070e0-ca52-4dec-86e6-6e987f749eac",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Select class 1\n",
    "x_test_select = x_test[:, :]\n",
    "\n",
    "# Prediction\n",
    "y_pred_counts = np.zeros((x_test_select.shape[0], len(class_list)))\n",
    "\n",
    "for cls_p in class_list:\n",
    "    for cls_n in class_list:\n",
    "        if cls_p == cls_n:\n",
    "            continue\n",
    "\n",
    "        y_pred_counts[:, cls_p] = y_pred_counts[:, cls_p] + mdl_logic_matrix[cls_p][cls_n].predict(x_test_select)\n",
    "\n",
    "y_pred = np.argmax(y_pred_counts, axis=1)\n",
    "\n",
    "# Accuracy\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(f'Accuracy: {accuracy:.4f}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "d6a53bc9-9da3-4e69-80a1-e4931bf235b6",
   "metadata": {},
   "outputs": [],
   "source": [
    "mdl_softmax = LogisticRegression(max_iter=500, solver='lbfgs')\n",
    "\n",
    "start_time = time.time()\n",
    "mdl_softmax.fit(x_train, y_train)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Evaluate accuracy (or other metrics)\n",
    "y_pred = mdl_softmax.predict(x_test)\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(\"Accuracy:\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "441fd932-017e-4827-b755-b74658d923a0",
   "metadata": {},
   "outputs": [],
   "source": [
    "# One-hot encoding\n",
    "def one_hot_encode(y, num_classes):\n",
    "    \"\"\"Converts integer labels to one-hot encoding.\"\"\"\n",
    "    one_hot = np.zeros((y.shape[0], num_classes))\n",
    "    one_hot[np.arange(y.shape[0]), y] = 1\n",
    "    return one_hot\n",
    "\n",
    "# Example usage:\n",
    "num_classes = len(class_list)\n",
    "y_train_onehot = one_hot_encode(y_train, num_classes)\n",
    "\n",
    "# Display one-hot encoding results of ten random sample\n",
    "for _ in range(10):\n",
    "    idx = np.random.randint(0, y_train_onehot.shape[0])\n",
    "\n",
    "    print(f'Sample {idx+1},\\t Class {y_train[idx]}: {y_train_onehot[idx,:]}')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4f099b1d-17ad-4ed0-9171-0b4729ee6b28",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Softmax function\n",
    "def softmax(x):\n",
    "    \"\"\"Compute softmax values for each sets of scores in x.\"\"\"\n",
    "    e_x = np.exp(x - np.max(x, axis=1, keepdims=True))\n",
    "    return e_x / e_x.sum(axis=1, keepdims=True)\n",
    "\n",
    "# Cross-entropy loss\n",
    "def cross_entropy_loss(y, y_pred):\n",
    "    \"\"\"Compute cross-entropy loss.\"\"\"\n",
    "    epsilon = 1e-15  # Small value to avoid log(0)\n",
    "    loss = -np.sum(y * np.log(y_pred + epsilon)) / y.shape[0]\n",
    "    return loss\n",
    "\n",
    "def gradient_descent(x, y, learning_rate, num_iterations):\n",
    "    \"\"\"Performs gradient descent optimization.\"\"\"\n",
    "    num_samples, num_features = x.shape\n",
    "    num_classes = y.shape[1]\n",
    "\n",
    "    # Initialize weights and bias\n",
    "    w = np.random.randn(num_features, num_classes)\n",
    "    b = np.zeros(num_classes)\n",
    "    \n",
    "    # Initialize Adagrad accumulators\n",
    "    lr_w = np.zeros(w.shape)\n",
    "    lr_b = 0.0\n",
    "\n",
    "    for i in range(num_iterations):\n",
    "        # Forward pass\n",
    "        scores = np.dot(x, w) + b\n",
    "        y_pred = softmax(scores)\n",
    "\n",
    "        # Compute loss\n",
    "        loss = cross_entropy_loss(y, y_pred)\n",
    "\n",
    "        # Backward pass (compute gradients)\n",
    "        dw = (1 / num_samples) * np.dot(x.T, (y_pred - y))\n",
    "        db = (1 / num_samples) * np.sum(y_pred - y, axis=0)\n",
    "\n",
    "        # Accumulate gradients\n",
    "        lr_w += dw ** 2\n",
    "        lr_b += db ** 2\n",
    "        \n",
    "        # Update parameters\n",
    "        w -= learning_rate / np.sqrt(lr_w) * dw\n",
    "        b -= learning_rate / np.sqrt(lr_b) * db\n",
    "\n",
    "        if i % 100 == 0:\n",
    "            print(f'Iteration {i}, Loss: {loss}')\n",
    "\n",
    "    return w, b\n",
    "\n",
    "def predict(x, w, b):\n",
    "    \"\"\"Predicts class labels for input data.\"\"\"\n",
    "    scores = np.dot(x, w) + b\n",
    "    y_pred = softmax(scores)\n",
    "    return np.argmax(y_pred, axis=1)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "42259d5f-792a-438c-8965-bc5ced5e78c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "y_exp = np.array([0, 0, 1])\n",
    "y_exp_pred = np.array([0.22, 0.28, 0.50])\n",
    "print(cross_entropy_loss(y_exp, y_exp_pred))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "59b65456-03bc-474c-bb1b-7fc1d4da1779",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Perform gradient descent\n",
    "start_time = time.time()\n",
    "w, b = gradient_descent(x_train, y_train_onehot, learning_rate=1, num_iterations=1000)\n",
    "end_time = time.time()\n",
    "\n",
    "print(f'Training time: {end_time - start_time:.2f} seconds')\n",
    "\n",
    "# Make predictions\n",
    "y_pred = predict(x_test, w, b)\n",
    "\n",
    "# Evaluate accuracy (or other metrics)\n",
    "accuracy = accuracy_score(y_test, y_pred)\n",
    "print(\"Accuracy:\", accuracy)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "374386af-3ba7-4ee4-8e94-50fb8ab9fe3a",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
