{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "23d4a4c6",
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import numpy as np\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "import torch.nn.functional as F\n",
    "import torch.optim as optim\n",
    "from torch.optim.lr_scheduler import CosineAnnealingLR, LambdaLR\n",
    "from torch.utils.data import DataLoader\n",
    "import torchvision\n",
    "import torchvision.transforms as transforms\n",
    "from timm.layers import trunc_normal_, DropPath\n",
    "from torchvision.datasets import CIFAR100\n",
    "import matplotlib.pyplot as plt\n",
    "from tqdm.notebook import tqdm\n",
    "import math\n",
    "import random\n",
    "\n",
    "def set_seed(seed=42):\n",
    "    np.random.seed(seed)\n",
    "    torch.manual_seed(seed)\n",
    "    torch.cuda.manual_seed(seed)\n",
    "    torch.backends.cudnn.deterministic = True\n",
    "    torch.backends.cudnn.benchmark = True\n",
    "\n",
    "set_seed(42)\n",
    "\n",
    "\n",
    "save_dir = './newenhanced_scratch_checkpoints'\n",
    "os.makedirs(save_dir, exist_ok=True)\n",
    "\n",
    "\n",
    "curve_path = os.path.join(save_dir, 'training_curves.png')\n",
    "final_curve_path = os.path.join(save_dir, 'final_training_curves.png')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "0e0c82a7",
   "metadata": {},
   "outputs": [],
   "source": [
    "class MixUp:\n",
    "    def __init__(self, alpha=1.0):\n",
    "        self.alpha = alpha\n",
    "    \n",
    "    def __call__(self, x, y):\n",
    "        if self.alpha > 0:\n",
    "            lam = np.random.beta(self.alpha, self.alpha)\n",
    "        else:\n",
    "            lam = 1\n",
    "        \n",
    "        batch_size = x.size(0)\n",
    "        index = torch.randperm(batch_size).to(x.device)\n",
    "        \n",
    "        mixed_x = lam * x + (1 - lam) * x[index, :]\n",
    "        y_a, y_b = y, y[index]\n",
    "        return mixed_x, y_a, y_b, lam\n",
    "\n",
    "class CutMix:\n",
    "    def __init__(self, alpha=1.0):\n",
    "        self.alpha = alpha\n",
    "    \n",
    "    def __call__(self, x, y):\n",
    "        if self.alpha > 0:\n",
    "            lam = np.random.beta(self.alpha, self.alpha)\n",
    "        else:\n",
    "            lam = 1\n",
    "        \n",
    "        batch_size = x.size(0)\n",
    "        index = torch.randperm(batch_size).to(x.device)\n",
    "        \n",
    "        bbx1, bby1, bbx2, bby2 = self.rand_bbox(x.size(), lam)\n",
    "        x[:, :, bbx1:bbx2, bby1:bby2] = x[index, :, bbx1:bbx2, bby1:bby2]\n",
    "        \n",
    "      \n",
    "        lam = 1 - ((bbx2 - bbx1) * (bby2 - bby1) / (x.size()[-1] * x.size()[-2]))\n",
    "        y_a, y_b = y, y[index]\n",
    "        return x, y_a, y_b, lam\n",
    "    \n",
    "    def rand_bbox(self, size, lam):\n",
    "        W = size[2]\n",
    "        H = size[3]\n",
    "        cut_rat = np.sqrt(1. - lam)\n",
    "        cut_w = np.int32(W * cut_rat)\n",
    "        cut_h = np.int32(H * cut_rat)\n",
    "        \n",
    "        cx = np.random.randint(W)\n",
    "        cy = np.random.randint(H)\n",
    "        \n",
    "        bbx1 = np.clip(cx - cut_w // 2, 0, W)\n",
    "        bby1 = np.clip(cy - cut_h // 2, 0, H)\n",
    "        bbx2 = np.clip(cx + cut_w // 2, 0, W)\n",
    "        bby2 = np.clip(cy + cut_h // 2, 0, H)\n",
    "        \n",
    "        return bbx1, bby1, bbx2, bby2\n",
    "\n",
    "\n",
    "def mixup_criterion(criterion, pred, y_a, y_b, lam):\n",
    "    return lam * criterion(pred, y_a) + (1 - lam) * criterion(pred, y_b)\n",
    "\n",
    "class LabelSmoothingCrossEntropy(nn.Module):\n",
    "    def __init__(self, smoothing=0.1):\n",
    "        super(LabelSmoothingCrossEntropy, self).__init__()\n",
    "        self.smoothing = smoothing\n",
    "    \n",
    "    def forward(self, x, target):\n",
    "        confidence = 1. - self.smoothing\n",
    "        logprobs = F.log_softmax(x, dim=-1)\n",
    "        nll_loss = -logprobs.gather(dim=-1, index=target.unsqueeze(1)).squeeze(1)\n",
    "        smooth_loss = -logprobs.mean(dim=-1)\n",
    "        loss = confidence * nll_loss + self.smoothing * smooth_loss\n",
    "        return loss.mean()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "71fea5a2",
   "metadata": {},
   "outputs": [],
   "source": [
    "# Warmup学习率调度器\n",
    "class WarmupCosineScheduler:\n",
    "    def __init__(self, optimizer, warmup_epochs, total_epochs, base_lr, min_lr=0):\n",
    "        self.optimizer = optimizer\n",
    "        self.warmup_epochs = warmup_epochs\n",
    "        self.total_epochs = total_epochs\n",
    "        self.base_lr = base_lr\n",
    "        self.min_lr = min_lr\n",
    "        \n",
    "    def step(self, epoch):\n",
    "        if epoch < self.warmup_epochs:\n",
    "   \n",
    "            lr = self.base_lr * (epoch + 1) / self.warmup_epochs\n",
    "        else:\n",
    "   \n",
    "            progress = (epoch - self.warmup_epochs) / (self.total_epochs - self.warmup_epochs)\n",
    "            lr = self.min_lr + (self.base_lr - self.min_lr) * 0.5 * (1 + math.cos(math.pi * progress))\n",
    "        \n",
    "        for param_group in self.optimizer.param_groups:\n",
    "            param_group['lr'] = lr\n",
    "        \n",
    "        return lr"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bb92cf6b",
   "metadata": {},
   "outputs": [],
   "source": [
    "class LayerNorm(nn.Module):\n",
    "\n",
    "    def __init__(self, normalized_shape, eps=1e-6, data_format=\"channels_last\"):\n",
    "        super().__init__()\n",
    "        self.weight = nn.Parameter(torch.ones(normalized_shape))\n",
    "        self.bias = nn.Parameter(torch.zeros(normalized_shape))\n",
    "        self.eps = eps\n",
    "        self.data_format = data_format\n",
    "        if self.data_format not in [\"channels_last\", \"channels_first\"]:\n",
    "            raise NotImplementedError \n",
    "        self.normalized_shape = (normalized_shape, )\n",
    "    \n",
    "    def forward(self, x):\n",
    "        if self.data_format == \"channels_last\":\n",
    "            return F.layer_norm(x, self.normalized_shape, self.weight, self.bias, self.eps)\n",
    "        elif self.data_format == \"channels_first\":\n",
    "            u = x.mean(1, keepdim=True)\n",
    "            s = (x - u).pow(2).mean(1, keepdim=True)\n",
    "            x = (x - u) / torch.sqrt(s + self.eps)\n",
    "            x = self.weight[:, None, None] * x + self.bias[:, None, None]\n",
    "            return x\n",
    "\n",
    "class Block(nn.Module):\n",
    "\n",
    "    def __init__(self, dim, drop_path=0., layer_scale_init_value=1e-6):\n",
    "        super().__init__()\n",
    "        self.dwconv = nn.Conv2d(dim, dim, kernel_size=7, padding=3, groups=dim)\n",
    "        self.norm = LayerNorm(dim, eps=1e-6)\n",
    "        self.pwconv1 = nn.Linear(dim, 4 * dim)\n",
    "        self.act = nn.GELU()\n",
    "        self.pwconv2 = nn.Linear(4 * dim, dim)\n",
    "        self.gamma = nn.Parameter(layer_scale_init_value * torch.ones((dim)), \n",
    "                                    requires_grad=True) if layer_scale_init_value > 0 else None\n",
    "        self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()\n",
    "\n",
    "    def forward(self, x):\n",
    "        input = x\n",
    "        x = self.dwconv(x)\n",
    "        x = x.permute(0, 2, 3, 1) \n",
    "        x = self.norm(x)\n",
    "        x = self.pwconv1(x)\n",
    "        x = self.act(x)\n",
    "        x = self.pwconv2(x)\n",
    "        if self.gamma is not None:\n",
    "            x = self.gamma * x\n",
    "        x = x.permute(0, 3, 1, 2)  \n",
    "\n",
    "        x = input + self.drop_path(x)  \n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "63073176",
   "metadata": {},
   "outputs": [],
   "source": [
    "class ConvNeXt(nn.Module):\n",
    "    \"\"\"ConvNeXt\"\"\"\n",
    "    def __init__(self, in_chans=3, num_classes=100, \n",
    "                 depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], drop_path_rate=0.2,\n",
    "                 layer_scale_init_value=1e-6, head_init_scale=1.):\n",
    "        super().__init__()\n",
    "\n",
    "        # 下采样层\n",
    "        self.downsample_layers = nn.ModuleList()\n",
    "        stem = nn.Sequential(\n",
    "            nn.Conv2d(in_chans, dims[0], kernel_size=4, stride=4),\n",
    "            LayerNorm(dims[0], eps=1e-6, data_format=\"channels_first\")\n",
    "        )\n",
    "        self.downsample_layers.append(stem)\n",
    "        for i in range(3):\n",
    "            downsample_layer = nn.Sequential(\n",
    "                LayerNorm(dims[i], eps=1e-6, data_format=\"channels_first\"),\n",
    "                nn.Conv2d(dims[i], dims[i+1], kernel_size=2, stride=2),\n",
    "            )\n",
    "            self.downsample_layers.append(downsample_layer)\n",
    "\n",
    "        # 主干网络阶段 - 使用线性递增的drop_path_rate\n",
    "        self.stages = nn.ModuleList()\n",
    "        dp_rates=[x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] \n",
    "        cur = 0\n",
    "        for i in range(4):\n",
    "            stage = nn.Sequential(\n",
    "                *[Block(dim=dims[i], drop_path=dp_rates[cur + j], \n",
    "                layer_scale_init_value=layer_scale_init_value) for j in range(depths[i])]\n",
    "            )\n",
    "            self.stages.append(stage)\n",
    "            cur += depths[i]\n",
    "\n",
    "        # 分类头\n",
    "        self.norm = nn.LayerNorm(dims[-1], eps=1e-6)\n",
    "        self.head = nn.Linear(dims[-1], num_classes)\n",
    "\n",
    "        # 权重初始化\n",
    "        self.apply(self._init_weights)\n",
    "        self.head.weight.data.mul_(head_init_scale)\n",
    "        self.head.bias.data.mul_(head_init_scale)\n",
    "\n",
    "    def _init_weights(self, m):\n",
    "        if isinstance(m, (nn.Conv2d, nn.Linear)):\n",
    "            trunc_normal_(m.weight, std=.02)\n",
    "            nn.init.constant_(m.bias, 0)\n",
    "\n",
    "    def forward_features(self, x):\n",
    "        for i in range(4):\n",
    "            x = self.downsample_layers[i](x)\n",
    "            x = self.stages[i](x)\n",
    "        return self.norm(x.mean([-2, -1]))  \n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.forward_features(x)\n",
    "        x = self.head(x)\n",
    "        return x\n",
    "\n",
    "def convnext_tiny_cifar100(pretrained=False):\n",
    "    \"\"\"ConvNeXt Tiny\"\"\"\n",
    "    model = ConvNeXt(depths=[3, 3, 9, 3], dims=[96, 192, 384, 768], \n",
    "                     num_classes=100, drop_path_rate=0.2)\n",
    "    return model"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f9c81b4a",
   "metadata": {},
   "outputs": [],
   "source": [
    "def get_cifar100_loaders(batch_size=256, num_workers=2):\n",
    "    \"\"\"数据加载和预处理\"\"\"\n",
    "    \n",
    "    # 训练集数据增强\n",
    "    train_transform = transforms.Compose([\n",
    "        transforms.Resize(224),  # ConvNeXt期望224x224输入\n",
    "        transforms.RandomCrop(224, padding=28),  # 随机裁剪，带padding\n",
    "        transforms.RandomHorizontalFlip(),  # 随机水平翻转\n",
    "        transforms.RandAugment(num_ops=2, magnitude=9),\n",
    "        transforms.RandomRotation(degrees=15),   # 随机旋转 ±15度\n",
    "        transforms.ColorJitter(\n",
    "            brightness=0.2,   # 亮度变化\n",
    "            contrast=0.2,     # 对比度变化\n",
    "            saturation=0.2,   # 饱和度变化\n",
    "            hue=0.1          # 色调变化\n",
    "        ),\n",
    "        transforms.RandomApply([\n",
    "            transforms.GaussianBlur(kernel_size=3, sigma=(0.1, 2.0))\n",
    "        ], p=0.2),  # 20%概率应用高斯模糊\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761]),\n",
    "        transforms.RandomErasing(p=0.25, scale=(0.02, 0.33), ratio=(0.3, 3.3))  # 随机擦除\n",
    "    ])\n",
    "    \n",
    "    # 验证集只做基本预处理，不使用数据增强\n",
    "    valid_transform = transforms.Compose([\n",
    "        transforms.Resize(224),\n",
    "        transforms.ToTensor(),\n",
    "        transforms.Normalize(mean=[0.5071, 0.4867, 0.4408], std=[0.2675, 0.2565, 0.2761])\n",
    "    ])\n",
    "    \n",
    "    # 加载CIFAR100数据集\n",
    "    train_dataset = CIFAR100(root='./data', train=True, download=True, transform=train_transform)\n",
    "    valid_dataset = CIFAR100(root='./data', train=False, download=True, transform=valid_transform)\n",
    "    \n",
    "    # 创建数据加载器\n",
    "    train_loader = DataLoader(\n",
    "        train_dataset, batch_size=batch_size, shuffle=True,\n",
    "        num_workers=num_workers, pin_memory=True, drop_last=True\n",
    "    )\n",
    "    \n",
    "    valid_loader = DataLoader(\n",
    "        valid_dataset, batch_size=batch_size, shuffle=False,\n",
    "        num_workers=num_workers, pin_memory=True\n",
    "    )\n",
    "    \n",
    "    return train_loader, valid_loader\n",
    "\n",
    "# 获取数据加载器\n",
    "train_loader, valid_loader = get_cifar100_loaders(batch_size=64)\n",
    "print(f\"数据集大小 - 训练集: {len(train_loader.dataset)}, 验证集: {len(valid_loader.dataset)}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c39cc2a4",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train_one_epoch(model, train_loader, criterion, optimizer, device, mixup=None, cutmix=None, use_mixup_prob=0.8):\n",
    "    \"\"\"训练一个epoch\"\"\"\n",
    "    model.train()\n",
    "    train_loss = 0\n",
    "    correct = 0\n",
    "    total = 0\n",
    "    \n",
    "    pbar = tqdm(train_loader, desc='训练中')\n",
    "    for batch_idx, (inputs, targets) in enumerate(pbar):\n",
    "        inputs, targets = inputs.to(device), targets.to(device)\n",
    "        \n",
    "       \n",
    "        use_mixup = random.random() < use_mixup_prob\n",
    "        use_cutmix = random.random() < 0.5\n",
    "        \n",
    "        if use_mixup:\n",
    "            if use_cutmix and cutmix is not None:\n",
    "                inputs, targets_a, targets_b, lam = cutmix(inputs, targets)\n",
    "            elif mixup is not None:\n",
    "                inputs, targets_a, targets_b, lam = mixup(inputs, targets)\n",
    "            else:\n",
    "                targets_a, targets_b, lam = targets, targets, 1.0\n",
    "        else:\n",
    "            targets_a, targets_b, lam = targets, targets, 1.0\n",
    "        \n",
    "        optimizer.zero_grad()\n",
    "        outputs = model(inputs)\n",
    "        \n",
    "        if use_mixup and (mixup is not None or cutmix is not None):\n",
    "            loss = mixup_criterion(criterion, outputs, targets_a, targets_b, lam)\n",
    "        else:\n",
    "            loss = criterion(outputs, targets)\n",
    "        \n",
    "        loss.backward()\n",
    "        optimizer.step()\n",
    "        \n",
    "        train_loss += loss.item()\n",
    "        _, predicted = outputs.max(1)\n",
    "        total += targets.size(0)\n",
    "        if use_mixup:\n",
    "          \n",
    "            correct += (lam * predicted.eq(targets_a).sum().item() + \n",
    "                       (1 - lam) * predicted.eq(targets_b).sum().item())\n",
    "        else:\n",
    "            correct += predicted.eq(targets).sum().item()\n",
    "        \n",
    "        pbar.set_postfix({\n",
    "            'loss': train_loss/(batch_idx+1),\n",
    "            'acc': 100.*correct/total\n",
    "        })\n",
    "    \n",
    "    return train_loss/len(train_loader), 100.*correct/total\n",
    "\n",
    "def validate(model, valid_loader, criterion, device):\n",
    "    \"\"\"验证函数\"\"\"\n",
    "    model.eval()\n",
    "    valid_loss = 0\n",
    "    correct = 0\n",
    "    total = 0\n",
    "    \n",
    "    with torch.no_grad():\n",
    "        pbar = tqdm(valid_loader, desc='验证中')\n",
    "        for batch_idx, (inputs, targets) in enumerate(pbar):\n",
    "            inputs, targets = inputs.to(device), targets.to(device)\n",
    "            \n",
    "            outputs = model(inputs)\n",
    "            loss = criterion(outputs, targets)\n",
    "            \n",
    "            valid_loss += loss.item()\n",
    "            _, predicted = outputs.max(1)\n",
    "            total += targets.size(0)\n",
    "            correct += predicted.eq(targets).sum().item()\n",
    "            \n",
    "            pbar.set_postfix({\n",
    "                'loss': valid_loss/(batch_idx+1),\n",
    "                'acc': 100.*correct/total\n",
    "            })\n",
    "    \n",
    "    return valid_loss/len(valid_loader), 100.*correct/total"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "c273723f",
   "metadata": {},
   "outputs": [],
   "source": [
    "def plot_enhanced_training_curves(train_losses, valid_losses, train_accs, valid_accs, learning_rates, save_path=curve_path):\n",
    "    \n",
    "    plt.figure(figsize=(18, 6))\n",
    "    \n",
    "    plt.subplot(1, 3, 1)\n",
    "    plt.plot(train_losses, label='train_loss')\n",
    "    plt.plot(valid_losses, label='valid_loss')\n",
    "    plt.xlabel('Epoch')\n",
    "    plt.ylabel('Loss')\n",
    "    plt.legend()\n",
    "    plt.title('Loss Curve')\n",
    "\n",
    "    \n",
    "    plt.subplot(1, 3, 2)\n",
    "    plt.plot(train_accs, label='train_accuracy')\n",
    "    plt.plot(valid_accs, label='valid_accuracy')\n",
    "    plt.xlabel('Epoch')\n",
    "    plt.ylabel('Accuracy(%)')\n",
    "    plt.legend()\n",
    "    plt.title('Accuracy Curve')\n",
    "\n",
    "    \n",
    "    \n",
    "    plt.tight_layout()\n",
    "    plt.savefig(save_path, dpi=300, bbox_inches='tight')\n",
    "    plt.show()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5c09234f",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 设置训练参数\n",
    "lr = 5e-4                 # 基础学习率\n",
    "min_lr = 1e-6            # 最小学习率\n",
    "batch_size = 256          # 批次大小  \n",
    "num_epochs = 200         # 训练轮次\n",
    "warmup_epochs = 10       # 预热轮次\n",
    "weight_decay = 0.05      # 权重衰减\n",
    "label_smoothing = 0.1    # 标签平滑\n",
    "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')\n",
    "\n",
    "print(f\"使用设备: {device}\")\n",
    "\n",
    "# 创建模型\n",
    "model = convnext_tiny_cifar100(pretrained=False)\n",
    "model = model.to(device)\n",
    "total_params = sum(p.numel() for p in model.parameters())\n",
    "trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
    "print(f\"总参数量: {total_params:,}\")\n",
    "print(f\"可训练参数量: {trainable_params:,}\")\n",
    "\n",
    "\n",
    "criterion = LabelSmoothingCrossEntropy(smoothing=label_smoothing)\n",
    "optimizer = optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)\n",
    "lr_scheduler = WarmupCosineScheduler(optimizer, warmup_epochs, num_epochs, lr, min_lr)\n",
    "\n",
    "mixup = MixUp(alpha=0.2)\n",
    "cutmix = CutMix(alpha=1.0)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "baa4cd94",
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练循环\n",
    "train_losses, valid_losses = [], []\n",
    "train_accs, valid_accs = [], []\n",
    "learning_rates = []  # 记录学习率变化\n",
    "best_acc = 0\n",
    "\n",
    "print(f\"\\n开始训练模型，总共{num_epochs}轮...\")\n",
    "\n",
    "for epoch in range(num_epochs):\n",
    "    print(f\"\\n=== 轮次: {epoch+1}/{num_epochs} ===\")\n",
    "    \n",
    "    # 更新学习率\n",
    "    current_lr = lr_scheduler.step(epoch)\n",
    "    learning_rates.append(current_lr)\n",
    "    print(f\"当前学习率: {current_lr:.6f}\")\n",
    "    \n",
    "    # 训练\n",
    "    train_loss, train_acc = train_one_epoch(\n",
    "        model, train_loader, criterion, optimizer, device, \n",
    "        mixup=mixup, cutmix=cutmix, use_mixup_prob=0.8 \n",
    "    )\n",
    "    train_losses.append(train_loss)\n",
    "    train_accs.append(train_acc)\n",
    "    \n",
    "    # 验证\n",
    "    valid_loss, valid_acc = validate(model, valid_loader, criterion, device)\n",
    "    valid_losses.append(valid_loss)\n",
    "    valid_accs.append(valid_acc)\n",
    "    \n",
    "    print(f\"训练 - Loss: {train_loss:.4f}, Acc: {train_acc:.2f}%\")\n",
    "    print(f\"验证 - Loss: {valid_loss:.4f}, Acc: {valid_acc:.2f}%\")\n",
    "    \n",
    "    # 保存最佳模型\n",
    "    if valid_acc > best_acc:\n",
    "        best_acc = valid_acc\n",
    "        torch.save({\n",
    "            'epoch': epoch,\n",
    "            'model_state_dict': model.state_dict(),\n",
    "            'optimizer_state_dict': optimizer.state_dict(),\n",
    "            'accuracy': valid_acc,\n",
    "            'config': {\n",
    "                'lr': lr,\n",
    "                'min_lr': min_lr,\n",
    "                'weight_decay': weight_decay,\n",
    "                'label_smoothing': label_smoothing,\n",
    "                'warmup_epochs': warmup_epochs,\n",
    "                'drop_path_rate': 0.2,\n",
    "                'mixup_alpha': 0.2,\n",
    "                'cutmix_alpha': 1.0\n",
    "            }\n",
    "        }, os.path.join(save_dir, 'best_enhanced_model.pth'))\n",
    "        print(f\"保存最佳增强模型，准确率: {valid_acc:.2f}%\")\n",
    "    \n",
    "    # 每10轮绘制一次训练曲线\n",
    "    if (epoch + 1) % 10 == 0 or epoch == 0:\n",
    "        plot_enhanced_training_curves(train_losses, valid_losses, train_accs, valid_accs, learning_rates, curve_path)\n",
    "\n",
    "# 最后绘制完整训练曲线\n",
    "plot_enhanced_training_curves(train_losses, valid_losses, train_accs, valid_accs, learning_rates, final_curve_path)\n",
    "print(f\"\\n 增强训练完成! 最佳验证准确率: {best_acc:.2f}%\")"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
