{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "3abd79a5",
   "metadata": {},
   "source": [
    "# 二分类模型，使用sigmoid计算损失函数"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "a8a1fbca",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "使用设备: cpu\n",
      "已设置中文字体支持（Windows系统）\n"
     ]
    }
   ],
   "source": [
    "import pandas as pd\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import DataLoader\n",
    "from torchvision.datasets import ImageFolder\n",
    "from torchvision import transforms\n",
    "import os\n",
    "from utils import *\n",
    "# 检查设备\n",
    "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
    "print(f\"使用设备: {device}\")\n",
    "# 设定中文字体\n",
    "set_chinese_font()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a117a3ca",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 准备数据集\n",
    "data_path = r\"D:\\desktop\\class\\DataSet\\cat_vs_dog(grey)\"\n",
    "\n",
    "\n",
    "simple_train_transform = transforms.Compose(\n",
    "    [\n",
    "        transforms.Grayscale(num_output_channels=1),  # 灰度图\n",
    "        transforms.Resize((64, 64)),  # 调整大小\n",
    "        transforms.ToTensor(),  # 转换为Tensor\n",
    "        transforms.Normalize(mean=[0.5], std=[0.5])  # 单通道的标准化参数\n",
    "    ]\n",
    ")\n",
    "\n",
    "simple_test_transform = transforms.Compose(\n",
    "    [\n",
    "        transforms.Grayscale(num_output_channels=1),  # 灰度图\n",
    "        transforms.Resize((64, 64)),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean=[0.5], std=[0.5])  # 单通道的标准化参数\n",
    "    ]\n",
    ")\n",
    "\n",
    "# 使用简化变换重新创建数据集\n",
    "train_dataset = ImageFolder(\n",
    "    os.path.join(data_path, \"train\"), transform=simple_train_transform\n",
    ")\n",
    "\n",
    "test_dataset = ImageFolder(\n",
    "    os.path.join(data_path, \"test\"),\n",
    "    transform=simple_test_transform\n",
    ")\n",
    "\n",
    "train_loader = DataLoader(train_dataset, batch_size=600, shuffle=True)\n",
    "# batchize=600 就一个批次\n",
    "test_loader = DataLoader(test_dataset, batch_size=50, shuffle=False)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "16a9300f",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "模型结构:\n",
      "SimpleCatDogCNN(\n",
      "  (network): Sequential(\n",
      "    (0): Conv2d(1, 2, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1))\n",
      "    (1): ReLU(inplace=True)\n",
      "    (2): Flatten(start_dim=1, end_dim=-1)\n",
      "    (3): Linear(in_features=8192, out_features=1, bias=True)\n",
      "  )\n",
      ")\n",
      "全连接层参数数量: 8193\n"
     ]
    }
   ],
   "source": [
    "# 建立模型\n",
    "class SimpleCatDogCNN(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(SimpleCatDogCNN, self).__init__()\n",
    "        self.network = nn.Sequential(\n",
    "            nn.Conv2d(1, 2, kernel_size=3, padding=1),\n",
    "            nn.ReLU(inplace=True),\n",
    "            # 以下激活函数可以替换比较一下\n",
    "            # 'ReLU': nn.ReLU(inplace=True),\n",
    "            # 'LeakyReLU': nn.LeakyReLU(0.01, inplace=True),\n",
    "            # 'PReLU': nn.PReLU(num_parameters=2),\n",
    "            # 'Mish': nn.Mish(inplace=True),\n",
    "            # 'Tanh': nn.Tanh()\n",
    "            nn.Flatten(),\n",
    "            nn.Linear(2 * 64 * 64, 1)  # 修改为1个输出\n",
    "        )\n",
    " \n",
    "    def forward(self, x):\n",
    "        return self.network(x)\n",
    "    \n",
    "model = SimpleCatDogCNN().to(device)\n",
    "print(\"模型结构:\")\n",
    "print(model)\n",
    "print(f\"全连接层参数数量: {sum(p.numel() for p in model.network[-1].parameters())}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "33a90b04",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 创建数据记录类\n",
    "class GradientRecorder:\n",
    "    def __init__(self):\n",
    "        self.df = pd.DataFrame(columns=[\n",
    "            'epoch', 'batch', 'fc_weight', 'fc_weight_grad', 'fc_bias', 'fc_bias_grad'\n",
    "        ])\n",
    "    \n",
    "    def save_gradients(self, epoch, batch_idx, model, optimizer):\n",
    "        \"\"\"保存全连接层的参数和梯度信息到DataFrame\"\"\"\n",
    "        # 获取全连接层\n",
    "        fc_layer = model.network[-1]\n",
    "        \n",
    "        # 获取权重和偏置的参数和梯度\n",
    "        fc_weight = fc_layer.weight.data\n",
    "        fc_bias = fc_layer.bias.data\n",
    "        fc_weight_grad = fc_layer.weight.grad\n",
    "        fc_bias_grad = fc_layer.bias.grad\n",
    "        # 计算梯度范数\n",
    "        fc_weight_grad_norm = torch.norm(fc_weight_grad).item()\n",
    "        fc_bias_grad_norm = torch.abs(fc_bias_grad).item()  # 偏置梯度是标量，用绝对值\n",
    "\n",
    "        # 获取学习率\n",
    "        lr = optimizer.param_groups[0]['lr']\n",
    "        # 保存到DataFrame\n",
    "        new_row = {\n",
    "            'epoch': epoch + 1,\n",
    "            'batch': batch_idx + 1,\n",
    "            'fc_weight': fc_weight.cpu().numpy().tolist(),\n",
    "            'fc_weight_grad': fc_weight_grad.cpu().numpy().tolist(),\n",
    "            'fc_weight_grad_norm': fc_weight_grad_norm,\n",
    "            'fc_bias': fc_bias.item(),\n",
    "            'fc_bias_grad': fc_bias_grad.item(),\n",
    "            'fc_bias_grad_norm': fc_bias_grad_norm,\n",
    "            'learning_rate': lr  # 添加学习率用于验证计算\n",
    "        }\n",
    "        \n",
    "        self.df = pd.concat([self.df, pd.DataFrame([new_row])], ignore_index=True)\n",
    "    \n",
    "    def save_to_excel(self, filename):\n",
    "        self.df.to_excel(filename, index=False)\n",
    "        print(f\"梯度信息已保存到 '{filename}'\")\n",
    "        print(f\"共记录了 {len(self.df)} 次梯度更新\")\n",
    "\n",
    "# 初始化梯度记录器\n",
    "gradient_recorder = GradientRecorder()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "425add0f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练函数\n",
    "def train_model(model, train_loader, test_loader, criterion, optimizer, num_epochs=50):\n",
    "    train_losses = []\n",
    "    test_accuracies = []\n",
    "    best_test_acc = 0.0\n",
    "\n",
    "    for epoch in range(num_epochs):\n",
    "        # 训练阶段\n",
    "        model.train()\n",
    "        total_loss = 0.0\n",
    "        total_correct = 0\n",
    "        total_samples = 0\n",
    "        \n",
    "        for batch_idx, (inputs, labels) in enumerate(train_loader):\n",
    "            inputs, labels = inputs.to(device), labels.to(device)\n",
    "            \n",
    "            # 前向传播\n",
    "            optimizer.zero_grad()\n",
    "            outputs = model(inputs)\n",
    "            \n",
    "            # 使用Sigmoid + BCELoss\n",
    "            probabilities = torch.sigmoid(outputs).squeeze()\n",
    "            loss = criterion(probabilities, labels.float())\n",
    "            \n",
    "            # 反向传播\n",
    "            loss.backward() # 计算参数的梯度\n",
    "\n",
    "            # 保存梯度信息（每次梯度下降前）\n",
    "            gradient_recorder.save_gradients(epoch, batch_idx, model, optimizer)\n",
    "\n",
    "            # 梯度下降\n",
    "            optimizer.step() # 更新参数\n",
    "\n",
    "            \n",
    "\n",
    "            # 统计\n",
    "            batch_size = inputs.size(0)\n",
    "            total_loss += loss.item() * batch_size\n",
    "            preds = (probabilities > 0.5).long()\n",
    "            total_correct += (preds == labels).sum().item()\n",
    "            total_samples += batch_size\n",
    "            \n",
    "\n",
    "        # 计算平均损失和准确率\n",
    "        avg_loss = total_loss / total_samples\n",
    "        train_acc = total_correct / total_samples\n",
    "        train_losses.append(avg_loss)\n",
    "\n",
    "        # 验证阶段\n",
    "        model.eval()\n",
    "        test_correct = 0\n",
    "        test_total = 0\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            for inputs, labels in test_loader:\n",
    "                inputs, labels = inputs.to(device), labels.to(device)\n",
    "                outputs = model(inputs)\n",
    "                probabilities = torch.sigmoid(outputs).squeeze()\n",
    "                preds = (probabilities > 0.5).long()\n",
    "                test_correct += (preds == labels).sum().item()\n",
    "                test_total += labels.size(0)\n",
    "\n",
    "        test_acc = test_correct / test_total\n",
    "        test_accuracies.append(test_acc)\n",
    "\n",
    "        # 输出进度\n",
    "        print(f'Epoch {epoch+1:2d}/{num_epochs} | '\n",
    "              f'Loss: {avg_loss:.4f} | '\n",
    "              f'Train Acc: {train_acc:.4f} | '\n",
    "              f'Test Acc: {test_acc:.4f}')\n",
    "\n",
    "        # 保存最佳模型\n",
    "        if test_acc > best_test_acc:\n",
    "            best_test_acc = test_acc\n",
    "            torch.save(model.state_dict(), \"best_model.pth\")\n",
    "            print(f\"↑ 保存最佳模型，准确率: {test_acc:.4f}\")\n",
    "\n",
    "    return train_losses, test_accuracies"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "1e801657",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始训练模型...\n",
      "============================================================\n"
     ]
    },
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "C:\\Users\\汤有为\\AppData\\Local\\Temp\\ipykernel_5180\\2960787274.py:37: FutureWarning: The behavior of DataFrame concatenation with empty or all-NA entries is deprecated. In a future version, this will no longer exclude empty or all-NA columns when determining the result dtypes. To retain the old behavior, exclude the relevant entries before the concat operation.\n",
      "  self.df = pd.concat([self.df, pd.DataFrame([new_row])], ignore_index=True)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "Epoch  1/10 | Loss: 0.7019 | Train Acc: 0.4583 | Test Acc: 0.4100\n",
      "↑ 保存最佳模型，准确率: 0.4100\n",
      "Epoch  2/10 | Loss: 0.6986 | Train Acc: 0.4767 | Test Acc: 0.4200\n",
      "↑ 保存最佳模型，准确率: 0.4200\n",
      "Epoch  3/10 | Loss: 0.6959 | Train Acc: 0.4867 | Test Acc: 0.4600\n",
      "↑ 保存最佳模型，准确率: 0.4600\n",
      "Epoch  4/10 | Loss: 0.6935 | Train Acc: 0.4967 | Test Acc: 0.5000\n",
      "↑ 保存最佳模型，准确率: 0.5000\n",
      "Epoch  5/10 | Loss: 0.6914 | Train Acc: 0.5150 | Test Acc: 0.5200\n",
      "↑ 保存最佳模型，准确率: 0.5200\n",
      "Epoch  6/10 | Loss: 0.6895 | Train Acc: 0.5183 | Test Acc: 0.5400\n",
      "↑ 保存最佳模型，准确率: 0.5400\n",
      "Epoch  7/10 | Loss: 0.6878 | Train Acc: 0.5417 | Test Acc: 0.5500\n",
      "↑ 保存最佳模型，准确率: 0.5500\n",
      "Epoch  8/10 | Loss: 0.6861 | Train Acc: 0.5383 | Test Acc: 0.5600\n",
      "↑ 保存最佳模型，准确率: 0.5600\n",
      "Epoch  9/10 | Loss: 0.6845 | Train Acc: 0.5417 | Test Acc: 0.5900\n",
      "↑ 保存最佳模型，准确率: 0.5900\n",
      "Epoch 10/10 | Loss: 0.6830 | Train Acc: 0.5483 | Test Acc: 0.5900\n",
      "梯度信息已保存到 'gradient_info.xlsx'\n",
      "共记录了 10 次梯度更新\n"
     ]
    }
   ],
   "source": [
    "# 开始训练\n",
    "criterion = nn.BCELoss()\n",
    "optimizer = optim.SGD(model.parameters(), lr=0.001, momentum=0)  # 无动量\n",
    "# optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "\n",
    "print(\"开始训练模型...\")\n",
    "print(\"=\" * 60)\n",
    "\n",
    "# 训练模型\n",
    "train_losses, test_accuracies = train_model(\n",
    "    model, train_loader, test_loader, criterion, optimizer, num_epochs=10\n",
    ")\n",
    "\n",
    "# 保存梯度信息到Excel\n",
    "gradient_recorder.save_to_excel('gradient_info.xlsx')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "362a2f3e",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "0.006049453593790529\n"
     ]
    }
   ],
   "source": [
    "x = 0.0060133496299386 + 0.001 * 0.0361039638519287\n",
    "print(x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e06932a8",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.7"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
