{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "metadata": {},
   "outputs": [],
   "source": [
    "import os\n",
    "import math\n",
    "import argparse\n",
    "\n",
    "import torch\n",
    "import torch.optim as optim\n",
    "import torch.optim.lr_scheduler as lr_scheduler\n",
    "from torch.utils.tensorboard import SummaryWriter\n",
    "from torchvision import transforms\n",
    "\n",
    "from my_dataset import MyDataSet\n",
    "from vit_model import vit_base_patch16_224_in21k as create_model\n",
    "from utils import read_split_data, train_one_epoch, evaluate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "def main(args):\n",
    "    device = torch.device(args.device if torch.cuda.is_available() else \"cpu\")\n",
    "\n",
    "    if os.path.exists(\"./weights_15\") is False:\n",
    "        os.makedirs(\"./weights_15\")\n",
    "\n",
    "    tb_writer = SummaryWriter()\n",
    "\n",
    "    train_images_path, train_images_label, val_images_path, val_images_label = read_split_data(args.data_path)\n",
    "\n",
    "    data_transform = {\n",
    "        \"train\": transforms.Compose([transforms.RandomResizedCrop(224),\n",
    "                                     transforms.RandomHorizontalFlip(),\n",
    "                                     transforms.ToTensor(),\n",
    "                                     transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])]),\n",
    "        \"val\": transforms.Compose([transforms.Resize(256),\n",
    "                                   transforms.CenterCrop(224),\n",
    "                                   transforms.ToTensor(),\n",
    "                                   transforms.Normalize([0.5, 0.5, 0.5], [0.5, 0.5, 0.5])])}\n",
    "\n",
    "    # 实例化训练数据集\n",
    "    train_dataset = MyDataSet(images_path=train_images_path,\n",
    "                              images_class=train_images_label,\n",
    "                              transform=data_transform[\"train\"])\n",
    "\n",
    "    # 实例化验证数据集\n",
    "    val_dataset = MyDataSet(images_path=val_images_path,\n",
    "                            images_class=val_images_label,\n",
    "                            transform=data_transform[\"val\"])\n",
    "\n",
    "    batch_size = args.batch_size\n",
    "    nw = min([os.cpu_count(), batch_size if batch_size > 1 else 0, 8])  # number of workers\n",
    "    print('Using {} dataloader workers every process'.format(nw))\n",
    "    train_loader = torch.utils.data.DataLoader(train_dataset,\n",
    "                                               batch_size=batch_size,\n",
    "                                               shuffle=True,\n",
    "                                               pin_memory=True,\n",
    "                                               num_workers=nw,\n",
    "                                               collate_fn=train_dataset.collate_fn)\n",
    "\n",
    "    val_loader = torch.utils.data.DataLoader(val_dataset,\n",
    "                                             batch_size=batch_size,\n",
    "                                             shuffle=False,\n",
    "                                             pin_memory=True,\n",
    "                                             num_workers=nw,\n",
    "                                             collate_fn=val_dataset.collate_fn)\n",
    "\n",
    "    model = create_model(num_classes=args.num_classes, has_logits=False).to(device)\n",
    "\n",
    "    if args.weights != \"\":\n",
    "        assert os.path.exists(args.weights), \"weights file: '{}' not exist.\".format(args.weights)\n",
    "        weights_dict = torch.load(args.weights, map_location=device)\n",
    "        # 删除不需要的权重\n",
    "        del_keys = ['head.weight', 'head.bias'] if model.has_logits \\\n",
    "            else ['pre_logits.fc.weight', 'pre_logits.fc.bias', 'head.weight', 'head.bias']\n",
    "        for k in del_keys:\n",
    "            del weights_dict[k]\n",
    "        print(model.load_state_dict(weights_dict, strict=False))\n",
    "\n",
    "    if args.freeze_layers:\n",
    "        for name, para in model.named_parameters():\n",
    "            # 除head, pre_logits外，其他权重全部冻结\n",
    "            if \"head\" not in name and \"pre_logits\" not in name:\n",
    "                para.requires_grad_(False)\n",
    "            else:\n",
    "                print(\"training {}\".format(name))\n",
    "\n",
    "    pg = [p for p in model.parameters() if p.requires_grad]\n",
    "    optimizer = optim.SGD(pg, lr=args.lr, momentum=0.9, weight_decay=5E-5)\n",
    "    # Scheduler https://arxiv.org/pdf/1812.01187.pdf\n",
    "    lf = lambda x: ((1 + math.cos(x * math.pi / args.epochs)) / 2) * (1 - args.lrf) + args.lrf  # cosine\n",
    "    scheduler = lr_scheduler.LambdaLR(optimizer, lr_lambda=lf)\n",
    "\n",
    "    for epoch in range(args.epochs):\n",
    "        # train\n",
    "        train_loss, train_acc = train_one_epoch(model=model,\n",
    "                                                optimizer=optimizer,\n",
    "                                                data_loader=train_loader,\n",
    "                                                device=device,\n",
    "                                                epoch=epoch)\n",
    "\n",
    "        scheduler.step()\n",
    "\n",
    "        # validate\n",
    "        val_loss, val_acc = evaluate(model=model,\n",
    "                                     data_loader=val_loader,\n",
    "                                     device=device,\n",
    "                                     epoch=epoch)\n",
    "\n",
    "        tags = [\"train_loss\", \"train_acc\", \"val_loss\", \"val_acc\", \"learning_rate\"]\n",
    "        tb_writer.add_scalar(tags[0], train_loss, epoch)\n",
    "        tb_writer.add_scalar(tags[1], train_acc, epoch)\n",
    "        tb_writer.add_scalar(tags[2], val_loss, epoch)\n",
    "        tb_writer.add_scalar(tags[3], val_acc, epoch)\n",
    "        tb_writer.add_scalar(tags[4], optimizer.param_groups[0][\"lr\"], epoch)\n",
    "\n",
    "        torch.save(model.state_dict(), \"./weights_15/model-{}.pth\".format(epoch))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "50000 images were found in the dataset.\n",
      "40000 images for training.\n",
      "10000 images for validation.\n",
      "Using 8 dataloader workers every process\n",
      "_IncompatibleKeys(missing_keys=['head.weight', 'head.bias'], unexpected_keys=[])\n",
      "training head.weight\n",
      "training head.bias\n",
      "[train epoch 0] loss: 3.592, acc: 0.572: 100%|██████████| 5000/5000 [01:02<00:00, 79.46it/s]\n",
      "[valid epoch 0] loss: 2.543, acc: 0.771: 100%|██████████| 1250/1250 [00:15<00:00, 82.68it/s]\n",
      "[train epoch 1] loss: 2.240, acc: 0.653: 100%|██████████| 5000/5000 [01:18<00:00, 64.05it/s]\n",
      "[valid epoch 1] loss: 1.599, acc: 0.788: 100%|██████████| 1250/1250 [00:15<00:00, 82.55it/s]\n",
      "[train epoch 2] loss: 1.750, acc: 0.661: 100%|██████████| 5000/5000 [01:02<00:00, 79.48it/s]\n",
      "[valid epoch 2] loss: 1.257, acc: 0.795: 100%|██████████| 1250/1250 [00:15<00:00, 82.73it/s]\n",
      "[train epoch 3] loss: 1.546, acc: 0.671: 100%|██████████| 5000/5000 [01:02<00:00, 79.72it/s]\n",
      "[valid epoch 3] loss: 1.101, acc: 0.799: 100%|██████████| 1250/1250 [00:15<00:00, 82.80it/s]\n",
      "[train epoch 4] loss: 1.445, acc: 0.678: 100%|██████████| 5000/5000 [01:02<00:00, 79.66it/s]\n",
      "[valid epoch 4] loss: 1.015, acc: 0.801: 100%|██████████| 1250/1250 [00:15<00:00, 83.16it/s]\n",
      "[train epoch 5] loss: 1.383, acc: 0.681: 100%|██████████| 5000/5000 [01:02<00:00, 79.58it/s]\n",
      "[valid epoch 5] loss: 0.962, acc: 0.803: 100%|██████████| 1250/1250 [00:15<00:00, 82.81it/s]\n",
      "[train epoch 6] loss: 1.364, acc: 0.679: 100%|██████████| 5000/5000 [01:03<00:00, 79.13it/s]\n",
      "[valid epoch 6] loss: 0.928, acc: 0.805: 100%|██████████| 1250/1250 [00:15<00:00, 82.63it/s]\n",
      "[train epoch 7] loss: 1.319, acc: 0.686: 100%|██████████| 5000/5000 [01:03<00:00, 78.88it/s]\n",
      "[valid epoch 7] loss: 0.904, acc: 0.806: 100%|██████████| 1250/1250 [00:15<00:00, 82.21it/s]\n",
      "[train epoch 8] loss: 1.301, acc: 0.687: 100%|██████████| 5000/5000 [01:18<00:00, 63.95it/s]\n",
      "[valid epoch 8] loss: 0.887, acc: 0.807: 100%|██████████| 1250/1250 [00:15<00:00, 82.52it/s]\n",
      "[train epoch 9] loss: 1.294, acc: 0.689: 100%|██████████| 5000/5000 [01:02<00:00, 79.37it/s]\n",
      "[valid epoch 9] loss: 0.875, acc: 0.809: 100%|██████████| 1250/1250 [00:15<00:00, 82.41it/s]\n",
      "[train epoch 10] loss: 1.286, acc: 0.690: 100%|██████████| 5000/5000 [01:03<00:00, 79.02it/s]\n",
      "[valid epoch 10] loss: 0.867, acc: 0.810: 100%|██████████| 1250/1250 [00:15<00:00, 81.85it/s]\n",
      "[train epoch 11] loss: 1.279, acc: 0.691: 100%|██████████| 5000/5000 [01:03<00:00, 79.23it/s]\n",
      "[valid epoch 11] loss: 0.863, acc: 0.810: 100%|██████████| 1250/1250 [00:15<00:00, 82.88it/s]\n",
      "[train epoch 12] loss: 1.269, acc: 0.694: 100%|██████████| 5000/5000 [01:02<00:00, 79.79it/s]\n",
      "[valid epoch 12] loss: 0.860, acc: 0.810: 100%|██████████| 1250/1250 [00:15<00:00, 83.11it/s]\n",
      "[train epoch 13] loss: 1.278, acc: 0.690: 100%|██████████| 5000/5000 [01:02<00:00, 79.73it/s]\n",
      "[valid epoch 13] loss: 0.858, acc: 0.811: 100%|██████████| 1250/1250 [00:15<00:00, 83.18it/s]\n",
      "[train epoch 14] loss: 1.266, acc: 0.693: 100%|██████████| 5000/5000 [01:02<00:00, 79.58it/s]\n",
      "[valid epoch 14] loss: 0.858, acc: 0.811: 100%|██████████| 1250/1250 [00:15<00:00, 82.76it/s]\n"
     ]
    }
   ],
   "source": [
    "if __name__ == '__main__':\n",
    "    class Args:\n",
    "        num_classes = 100  # CIFAR-100 的类别数\n",
    "        epochs = 15\n",
    "        batch_size = 8\n",
    "        lr = 0.001\n",
    "        lrf = 0.01\n",
    "        data_path = \"CIFAR-100/train\"  # 数据集路径\n",
    "        model_name = \"\"\n",
    "        weights = 'vit_base_patch16_224_in21k.pth'\n",
    "        freeze_layers = True\n",
    "        device = \"cuda:0\"\n",
    "\n",
    "    opt = Args()\n",
    "\n",
    "    main(opt)"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.12.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 4
}
