{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 1,
   "id": "3bc50f2b-ab7b-453d-a2a7-78c404d4acba",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torchvision.models import resnet50, ResNet50_Weights\n",
    "from torch import nn\n",
    "from torch import optim\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "\n",
    "import torch\n",
    "\n",
    "from PIL import Image, ImageDraw\n",
    "from torchvision import transforms\n",
    "import os"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "d818d456-f4eb-4666-bd95-a5633d218733",
   "metadata": {},
   "outputs": [],
   "source": [
    "device=torch.device('cuda' if torch.cuda.is_available() else 'cpu')"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "68b012d8-c12c-4d15-9a7f-d4051d44db54",
   "metadata": {},
   "outputs": [],
   "source": [
    "from torchvision.datasets import VOCDetection, VOCSegmentation\n",
    "from torchvision import transforms\n",
    "\n",
    "# 目标检测数据集\n",
    "voc_detection = VOCDetection(\n",
    "    root='~/code/AI/data-set/VOCDetection', \n",
    "    year='2012',\n",
    "    image_set='train',\n",
    "    download=True,\n",
    "    transform=transforms.ToTensor()\n",
    ")\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "a348cad2-7a53-492a-9a38-e3e7340d08f0",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "segmentation\n"
     ]
    }
   ],
   "source": [
    "print(\"segmentation\")\n",
    "# 语义分割数据集\n",
    "voc_segmentation = VOCSegmentation(\n",
    "    root='~/code/AI/data-set/VOCSegmentation',\n",
    "    year='2012',\n",
    "    image_set='train',\n",
    "    download=True,\n",
    "    transform=transforms.ToTensor(),\n",
    "    target_transform=transforms.ToTensor()\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "065435d5-140a-4bcb-a89b-a9afe03803e2",
   "metadata": {},
   "outputs": [],
   "source": [
    "IMAGE_SIZE = 224\n",
    "GRID_NUM = 7\n",
    "OBJ_KIND = 20\n",
    "LAMDA_NOOBJ = 0.5\n",
    "LAMDA_COORD = 0.25"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "0ea38bca-0cd9-4408-b14f-af73382d139d",
   "metadata": {},
   "outputs": [],
   "source": [
    "class VOCDataset4YOLO(Dataset):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        self.voc_ds = VOCDetection(\n",
    "            root='~/code/AI/data-set/VOCDetection', \n",
    "            year='2012',\n",
    "            image_set='train',\n",
    "            download=True\n",
    "        )\n",
    "        classdict = set()\n",
    "        for _, label in self.voc_ds:\n",
    "            for obj in label[\"annotation\"][\"object\"]:\n",
    "                classdict.add(obj[\"name\"])\n",
    "        names = sorted(list(classdict))\n",
    "        self.id2name = {i:n for i,n in enumerate(names)}\n",
    "        self.name2id = {n:i for i,n in enumerate(names)}\n",
    "\n",
    "    def __getitem__(self, index):\n",
    "        img, label = self.voc_ds[index]\n",
    "        x_scale = IMAGE_SIZE / img.width\n",
    "        y_scale = IMAGE_SIZE / img.height\n",
    "        img = img.resize((IMAGE_SIZE, IMAGE_SIZE))\n",
    "        x = transforms.ToTensor()(img) # 3 * 224 * 224\n",
    "        y = torch.zeros(GRID_NUM, GRID_NUM, 5 + 5 + OBJ_KIND) # 7 * 7 * 30\n",
    "        # x_center_offset, y_center_offset, x_width, y_width, confidence\n",
    "        grid_size = IMAGE_SIZE / GRID_NUM\n",
    "        boxes = []\n",
    "        for obj in label[\"annotation\"][\"object\"]:\n",
    "            box = (float(obj[\"bndbox\"][\"xmin\"])* x_scale,\n",
    "                float(obj[\"bndbox\"][\"ymin\"]) * y_scale, \n",
    "                float(obj[\"bndbox\"][\"xmax\"]) * x_scale,\n",
    "                float(obj[\"bndbox\"][\"ymax\"]) * y_scale)\n",
    "            center_x = (box[0] + box[2])/2\n",
    "            center_y = (box[1] + box[3])/2\n",
    "            index_x = int (center_x / grid_size)\n",
    "            index_y = int (center_y / grid_size)\n",
    "            boxes.append(box)\n",
    "            center_x_ingrid = (center_x % grid_size)/grid_size\n",
    "            center_y_ingrid = (center_y % grid_size)/grid_size\n",
    "            box_x_wid = (box[2] - box[0])/IMAGE_SIZE\n",
    "            box_y_wid = (box[3] - box[1])/IMAGE_SIZE\n",
    "            y[index_x][index_y][0:5] = torch.tensor([center_x_ingrid, center_y_ingrid, box_x_wid, box_y_wid, 1])\n",
    "            y[index_x][index_y][5:10] = y[index_x][index_y][0:5]\n",
    "            y[index_x][index_y][10:] = torch.zeros(20)\n",
    "            y[index_x][index_y][10 + self.name2id[obj[\"name\"]]] = 1.0\n",
    "        \n",
    "        return x, y\n",
    "        # return x, y , img, boxes  # for test\n",
    "    def __len__(self):\n",
    "        return len(self.voc_ds)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "6182c66b-e91c-427e-87a2-45c9ca7f32b9",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_set = VOCDataset4YOLO()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "faaf75c1-1267-44b0-b289-c22a77885e6b",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "5717"
      ]
     },
     "execution_count": 8,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "len(data_set)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "5b470774-1714-4f60-9036-51dfa99f7197",
   "metadata": {},
   "outputs": [],
   "source": [
    "def drawYOLO(img, boxes):\n",
    "    draw = ImageDraw.Draw(img)\n",
    "    for box in boxes:\n",
    "        print(box)\n",
    "        draw.rectangle(box, outline=\"red\", width=3)\n",
    "    grid_size = IMAGE_SIZE / GRID_NUM\n",
    "    for i in range(1, GRID_NUM):\n",
    "        draw.line([(0, i*grid_size), (IMAGE_SIZE, i * grid_size )], fill=\"green\", width =2)\n",
    "    for i in range(1, GRID_NUM):\n",
    "        draw.line([(i*grid_size, 0), (i * grid_size, IMAGE_SIZE)], fill=\"green\", width =2)    \n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "5e37c459-6966-42bc-972a-c4155e52bd0e",
   "metadata": {},
   "outputs": [],
   "source": [
    "class YOLOV1(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        #用 Resnet 50 作为网络的backbone\n",
    "        resnet = resnet50(weights=ResNet50_Weights.DEFAULT) # (batch, 2048, 14, 14)\n",
    "        self.backbone = nn.Sequential(\n",
    "            *list(resnet.children())[:-2]\n",
    "        )\n",
    "        for param in self.backbone.parameters():\n",
    "            param.requires_grad = False\n",
    "        self.head = nn.Sequential(\n",
    "            nn.Conv2d(in_channels=2048, out_channels=1024, kernel_size=3, stride=1, padding=1), # output:(batch, 1024, 7, 7)\n",
    "            nn.LeakyReLU(0.1),\n",
    "            nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding=1), # output:(batch, 1024, 7, 7)\n",
    "            nn.LeakyReLU(0.1),\n",
    "            nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding=1), # output:(batch, 1024, 7, 7)\n",
    "            nn.LeakyReLU(0.1),\n",
    "            \n",
    "            nn.Flatten(),\n",
    "            nn.Linear(in_features=GRID_NUM*GRID_NUM*1024, out_features=4096),\n",
    "            nn.Dropout(),\n",
    "            nn.LeakyReLU(0.1),\n",
    "            nn.Linear(in_features=4096, out_features=GRID_NUM*GRID_NUM*(10+OBJ_KIND)),\n",
    "            nn.Sigmoid(),\n",
    "            )\n",
    "    def forward(self, x): # x: (batch, 3, 224, 224)\n",
    "        y = self.backbone(x)  # (batch, 2048, 14, 14)\n",
    "        # print(y.size())\n",
    "        y = self.head(y)      \n",
    "        return y.view(-1, GRID_NUM, GRID_NUM, 10+OBJ_KIND)\n",
    "        "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 11,
   "id": "7aea629a-2cbd-4c7d-91f7-1a574d5acb2c",
   "metadata": {},
   "outputs": [],
   "source": [
    "data_loader = DataLoader(data_set, batch_size = 64, shuffle=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 12,
   "id": "66d96c9a-5873-42e6-b970-06d126ae5377",
   "metadata": {},
   "outputs": [],
   "source": [
    "resnet = resnet50(weights=ResNet50_Weights.DEFAULT) # (batch, 2048, 14, 14)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 13,
   "id": "03a9406c-6ea7-47c4-aa5f-10b9ed381634",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "ResNet(\n",
       "  (conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)\n",
       "  (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "  (relu): ReLU(inplace=True)\n",
       "  (maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
       "  (layer1): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(64, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "        (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(256, 64, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(64, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (layer2): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(256, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (3): Bottleneck(\n",
       "      (conv1): Conv2d(512, 128, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(128, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (layer3): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(512, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(512, 1024, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (3): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (4): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (5): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 256, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(256, 1024, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(1024, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (layer4): Sequential(\n",
       "    (0): Bottleneck(\n",
       "      (conv1): Conv2d(1024, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "      (downsample): Sequential(\n",
       "        (0): Conv2d(1024, 2048, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
       "        (1): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      )\n",
       "    )\n",
       "    (1): Bottleneck(\n",
       "      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "    (2): Bottleneck(\n",
       "      (conv1): Conv2d(2048, 512, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
       "      (bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (conv3): Conv2d(512, 2048, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
       "      (bn3): BatchNorm2d(2048, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
       "      (relu): ReLU(inplace=True)\n",
       "    )\n",
       "  )\n",
       "  (avgpool): AdaptiveAvgPool2d(output_size=(1, 1))\n",
       "  (fc): Linear(in_features=2048, out_features=1000, bias=True)\n",
       ")"
      ]
     },
     "execution_count": 13,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "resnet"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "77833cf3-70fc-4ec2-8df6-9ee98d16e210",
   "metadata": {},
   "outputs": [],
   "source": []
  },
  {
   "cell_type": "code",
   "execution_count": 14,
   "id": "b8931d37-b4df-47c8-b02d-62f0bd6915ad",
   "metadata": {},
   "outputs": [],
   "source": [
    "head = nn.Sequential(\n",
    "    nn.Conv2d(in_channels=2048, out_channels=1024, kernel_size=3, stride=2, padding=1), # output:(batch, 1024, 7, 7)\n",
    "    nn.LeakyReLU(0.1),\n",
    "    nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding=1), # output:(batch, 1024, 7, 7)\n",
    "    nn.LeakyReLU(0.1),\n",
    "    nn.Conv2d(in_channels=1024, out_channels=1024, kernel_size=3, stride=1, padding=1), # output:(batch, 1024, 7, 7)\n",
    "    nn.LeakyReLU(0.1),\n",
    "    \n",
    "    nn.Flatten(),\n",
    "    nn.Linear(in_features=GRID_NUM*GRID_NUM*1024, out_features=4096),\n",
    "    nn.Dropout(),\n",
    "    nn.LeakyReLU(0.1),\n",
    "    nn.Linear(in_features=4096, out_features=GRID_NUM*GRID_NUM*(10+OBJ_KIND)),\n",
    "    nn.Sigmoid(),\n",
    "    )"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 15,
   "id": "b727ee0f-aa82-41bd-a78f-a5594c9eb559",
   "metadata": {},
   "outputs": [
    {
     "data": {
      "text/plain": [
       "0.4799999999999999"
      ]
     },
     "execution_count": 15,
     "metadata": {},
     "output_type": "execute_result"
    }
   ],
   "source": [
    "def computeIOU(grid_x, grid_y, box1, box2):\n",
    "    offset_x1, offset_y1, width_x1, width_y1 = box1\n",
    "    offset_x2, offset_y2, width_x2, width_y2 = box2\n",
    "    grid_size = IMAGE_SIZE // GRID_NUM\n",
    "    x_min1 = grid_x * grid_size + offset_x1 * grid_size - width_x1 * IMAGE_SIZE / 2\n",
    "    x_max1 = x_min1 + width_x1 * IMAGE_SIZE\n",
    "    y_min1 = grid_y * grid_size + offset_y1 * grid_size - width_y1 * IMAGE_SIZE / 2\n",
    "    y_max1 = y_min1 + width_y1 * IMAGE_SIZE\n",
    "    \n",
    "    x_min2 = grid_x * grid_size + offset_x2 * grid_size - width_x2 * IMAGE_SIZE / 2\n",
    "    x_max2 = x_min2 + width_x2 * IMAGE_SIZE\n",
    "    y_min2 = grid_y * grid_size + offset_y2 * grid_size - width_y2 * IMAGE_SIZE / 2\n",
    "    y_max2 = y_min2 + width_y2 * IMAGE_SIZE\n",
    "\n",
    "    x_min_intersect = max(x_min1, x_min2)\n",
    "    y_min_intersect = max(y_min1, y_min2)\n",
    "    x_max_intersect = min(x_max1, x_max2)\n",
    "    y_max_intersect = min(y_max1, y_max2)\n",
    "    if(x_min_intersect >= x_max_intersect or y_min_intersect >= y_max_intersect):\n",
    "        return 0\n",
    "    area1 = width_x1 * width_y1 * IMAGE_SIZE * IMAGE_SIZE\n",
    "    area2 = width_x2 * width_y2 * IMAGE_SIZE * IMAGE_SIZE\n",
    "    area_inter = (x_max_intersect - x_min_intersect) * (y_max_intersect - y_min_intersect)\n",
    "    return area_inter / (area1 + area2 - area_inter)\n",
    "computeIOU(2,3, (0.2, 0.3, 0.5,0.5), (0.4, 0.3, 0.4, 0.3))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 16,
   "id": "e00e0f6c-f054-4cc2-9579-d987b78e534b",
   "metadata": {},
   "outputs": [],
   "source": [
    "def train(force_train=False, model_path='checkpoint.pth'):\n",
    "    \n",
    "     # 检查是否需要强制训练或模型文件不存在\n",
    "    if os.path.exists(model_path) and not force_train:\n",
    "        print(\"model trained, not start trainning\")\n",
    "        return\n",
    "    model = YOLOV1().to(device)\n",
    "    optimizer = optim.Adam([param for param in model.parameters() if param.requires_grad], lr = 3e-5)\n",
    "    model.train()  # batch norm, drop out\n",
    "    losses = []\n",
    "    for epoch in range(500):\n",
    "        epoch_loss = 0\n",
    "        for batch, (x_batch, y_batch) in enumerate(data_loader):\n",
    "            x_batch = x_batch.to(device)\n",
    "            y_batch = y_batch.to(device)\n",
    "    \n",
    "            y_model = model(x_batch)\n",
    "            loss = torch.tensor(0.0)\n",
    "            for i in range(len(x_batch)):\n",
    "                x = x_batch[i]\n",
    "                y = y_batch[i]\n",
    "                y_m = y_model[i]\n",
    "                # foreach grid\n",
    "                for grid_x_i in range(GRID_NUM):\n",
    "                    for grid_y_i in range(GRID_NUM):\n",
    "                        data_grid_true = y[grid_x_i][grid_y_i]\n",
    "                        data_grid_model = y_m[grid_x_i][grid_y_i]\n",
    "                        # no object\n",
    "                        if data_grid_true[4] <= 0:\n",
    "                            loss_c_noobj = data_grid_model[4]**2 + data_grid_model[9]**2\n",
    "                            loss = loss + LAMDA_NOOBJ * loss_c_noobj\n",
    "                            continue\n",
    "                        # has object\n",
    "                        iou_box1= computeIOU(grid_x_i, grid_y_i, data_grid_model[:4], data_grid_true[:4])\n",
    "                        iou_box2= computeIOU(grid_x_i, grid_y_i, data_grid_model[5:9], data_grid_true[5:9])\n",
    "                        if iou_box1 > iou_box2:\n",
    "                            xywh = data_grid_model[:4]\n",
    "                            c_obj, c_noobj = data_grid_model[4], data_grid_model[9]\n",
    "                            iou_obj, iou_noobj = iou_box1, iou_box2\n",
    "                        else:\n",
    "                            xywh = data_grid_model[5:9]\n",
    "                            c_obj, c_noobj = data_grid_model[9], data_grid_model[4]\n",
    "                            iou_obj, iou_noobj = iou_box2, iou_box1\n",
    "                        loss_xywh = (xywh[0] - data_grid_true[0])**2 + (xywh[1] - data_grid_true[1])**2 \n",
    "                        + (torch.sqrt(xywh[2]) - torch.sqrt(data_grid_true[2]))**2\n",
    "                        + (torch.sqrt(xywh[3]) - torch.sqrt(data_grid_true[3]))**2\n",
    "                        loss_c_obj = (c_obj - iou_obj)**2\n",
    "                        loss_c_noobj = (c_noobj)**2\n",
    "                        loss_class = ((data_grid_model[10:] - data_grid_true[10:])**2).sum()\n",
    "                        loss = loss + loss_xywh * LAMDA_COORD + loss_c_obj + loss_c_noobj * LAMDA_NOOBJ + loss_class\n",
    "            loss = loss/len(x_batch)\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            epoch_loss += loss.item()\n",
    "        epoch_loss = epoch_loss / len(data_loader)\n",
    "        losses.append(epoch_loss)\n",
    "        if(len(losses)==1) or losses[-1] < losses[-2]: # save\n",
    "            torch.save({'model': model.state_dict(),\n",
    "            'optimizer':optimizer.state_dict()}, '.checkpoint.pth')\n",
    "            os.replace('.checkpoint.pth', 'checkpoint.pth')\n",
    "        print (\"epoch {} loss: {}\", epoch, epoch_loss)\n",
    "        STOP_PATIENCE = 5\n",
    "        if(len(losses) > STOP_PATIENCE):\n",
    "            early_stop = True\n",
    "            for i in range(STOP_PATIENCE):\n",
    "                if(losses[-i] < losses[-i-1]):\n",
    "                    early_stop = False\n",
    "                    break\n",
    "            if early_stop:\n",
    "                print(\"end before 500 epoch, epoch:\", epoch)\n",
    "                return\n",
    "            \n",
    "    "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 17,
   "id": "85734f98-0834-40f1-8b8f-c6760219ab43",
   "metadata": {},
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch {} loss: {} 0 4.475739611519708\n",
      "epoch {} loss: {} 1 2.7115182081858316\n",
      "epoch {} loss: {} 2 2.2532238245010374\n",
      "epoch {} loss: {} 3 2.0978705591625637\n",
      "epoch {} loss: {} 4 1.9357559230592516\n",
      "epoch {} loss: {} 5 1.7515709320704143\n",
      "epoch {} loss: {} 6 1.557441606786516\n",
      "epoch {} loss: {} 7 1.3986847665574815\n",
      "epoch {} loss: {} 8 1.2624957190619575\n",
      "epoch {} loss: {} 9 1.119078321589364\n",
      "epoch {} loss: {} 10 1.0076611816883088\n",
      "epoch {} loss: {} 11 0.8828142503897349\n",
      "epoch {} loss: {} 12 0.7890591886308458\n",
      "epoch {} loss: {} 13 0.7014763447973463\n",
      "epoch {} loss: {} 14 0.6203861557775073\n",
      "epoch {} loss: {} 15 0.5471046308676402\n",
      "epoch {} loss: {} 16 0.49216561052534313\n",
      "epoch {} loss: {} 17 0.44082991066906185\n",
      "epoch {} loss: {} 18 0.3958037371436755\n",
      "epoch {} loss: {} 19 0.3595969998174243\n",
      "epoch {} loss: {} 20 0.3276558346218533\n",
      "epoch {} loss: {} 21 0.297458378970623\n",
      "epoch {} loss: {} 22 0.27894485692183174\n",
      "epoch {} loss: {} 23 0.255474481648869\n",
      "epoch {} loss: {} 24 0.23233680493301814\n",
      "epoch {} loss: {} 25 0.21511036389403873\n",
      "epoch {} loss: {} 26 0.2024344259666072\n",
      "epoch {} loss: {} 27 0.18875432287653288\n",
      "epoch {} loss: {} 28 0.1825632952981525\n",
      "epoch {} loss: {} 29 0.1715904970963796\n",
      "epoch {} loss: {} 30 0.16806608173582288\n",
      "epoch {} loss: {} 31 0.15721547934744093\n",
      "epoch {} loss: {} 32 0.15092652589082717\n",
      "epoch {} loss: {} 33 0.1466769225895405\n",
      "epoch {} loss: {} 34 0.140024988932742\n",
      "epoch {} loss: {} 35 0.1340801580912537\n",
      "epoch {} loss: {} 36 0.13050701055261824\n",
      "epoch {} loss: {} 37 0.12702500530415112\n",
      "epoch {} loss: {} 38 0.11985808428790834\n",
      "epoch {} loss: {} 39 0.11386901210579607\n",
      "epoch {} loss: {} 40 0.11233294854561487\n",
      "epoch {} loss: {} 41 0.10683409248789151\n",
      "epoch {} loss: {} 42 0.10189610189861721\n",
      "epoch {} loss: {} 43 0.1011971985300382\n",
      "epoch {} loss: {} 44 0.09627563709186183\n",
      "epoch {} loss: {} 45 0.09296908018489679\n",
      "epoch {} loss: {} 46 0.08923768078287443\n",
      "epoch {} loss: {} 47 0.08632322760919729\n",
      "epoch {} loss: {} 48 0.08455131500959397\n",
      "epoch {} loss: {} 49 0.08134643414782153\n",
      "epoch {} loss: {} 50 0.07956874519586563\n",
      "epoch {} loss: {} 51 0.07853400520980358\n",
      "epoch {} loss: {} 52 0.07450536526739597\n",
      "epoch {} loss: {} 53 0.07415725948909918\n",
      "epoch {} loss: {} 54 0.0696909983538919\n",
      "epoch {} loss: {} 55 0.068961383195387\n",
      "epoch {} loss: {} 56 0.06829824389682876\n",
      "epoch {} loss: {} 57 0.06532430739866363\n",
      "epoch {} loss: {} 58 0.0654543569104539\n",
      "epoch {} loss: {} 59 0.06631785275207626\n",
      "epoch {} loss: {} 60 0.06367197736269897\n",
      "epoch {} loss: {} 61 0.060897114293442835\n",
      "epoch {} loss: {} 62 0.061062281164858075\n",
      "epoch {} loss: {} 63 0.05965625022848447\n",
      "epoch {} loss: {} 64 0.05771289573361476\n",
      "epoch {} loss: {} 65 0.05649633132335213\n",
      "epoch {} loss: {} 66 0.05437536287224955\n",
      "epoch {} loss: {} 67 0.0559482107973761\n",
      "epoch {} loss: {} 68 0.05356195881548855\n",
      "epoch {} loss: {} 69 0.05146622266620397\n",
      "epoch {} loss: {} 70 0.052438986260030004\n",
      "epoch {} loss: {} 71 0.052154640211827225\n",
      "epoch {} loss: {} 72 0.05004947359363238\n",
      "epoch {} loss: {} 73 0.04780497029423714\n",
      "epoch {} loss: {} 74 0.04753285023487276\n",
      "epoch {} loss: {} 75 0.047523603815999294\n",
      "epoch {} loss: {} 76 0.04631628841161728\n",
      "epoch {} loss: {} 77 0.04581132791936397\n",
      "epoch {} loss: {} 78 0.04534772251629167\n",
      "epoch {} loss: {} 79 0.04553593099117279\n",
      "epoch {} loss: {} 80 0.0436293200072315\n",
      "epoch {} loss: {} 81 0.04170967561917172\n",
      "epoch {} loss: {} 82 0.04254235910872618\n",
      "epoch {} loss: {} 83 0.04249406158924103\n",
      "epoch {} loss: {} 84 0.04125313725736406\n",
      "epoch {} loss: {} 85 0.039922492144008476\n",
      "epoch {} loss: {} 86 0.03903641094350153\n",
      "epoch {} loss: {} 87 0.03808541012307008\n",
      "epoch {} loss: {} 88 0.03681274466216564\n",
      "epoch {} loss: {} 89 0.03616207701464494\n",
      "epoch {} loss: {} 90 0.03654176017476453\n",
      "epoch {} loss: {} 91 0.03921647214641174\n",
      "epoch {} loss: {} 92 0.03684853600958983\n",
      "epoch {} loss: {} 93 0.036595725868311196\n",
      "epoch {} loss: {} 94 0.037793664758404094\n",
      "epoch {} loss: {} 95 0.03739517097257906\n",
      "epoch {} loss: {} 96 0.034907924756407735\n",
      "epoch {} loss: {} 97 0.03333374286691348\n",
      "epoch {} loss: {} 98 0.03351033584525188\n",
      "epoch {} loss: {} 99 0.03343261515514718\n",
      "epoch {} loss: {} 100 0.03236017270634572\n",
      "epoch {} loss: {} 101 0.031120951742761666\n",
      "epoch {} loss: {} 102 0.030795480849014387\n",
      "epoch {} loss: {} 103 0.030597977805882692\n",
      "epoch {} loss: {} 104 0.029652924256192315\n",
      "epoch {} loss: {} 105 0.029487846998704804\n",
      "epoch {} loss: {} 106 0.02886639474373725\n",
      "epoch {} loss: {} 107 0.029288087350626785\n",
      "epoch {} loss: {} 108 0.02810936299049192\n",
      "epoch {} loss: {} 109 0.0276928730826411\n",
      "epoch {} loss: {} 110 0.028338481630716058\n",
      "epoch {} loss: {} 111 0.0279106429260638\n",
      "epoch {} loss: {} 112 0.02724914661505156\n",
      "epoch {} loss: {} 113 0.026570784466134176\n",
      "epoch {} loss: {} 114 0.026621904876083135\n",
      "epoch {} loss: {} 115 0.026027586404234172\n",
      "epoch {} loss: {} 116 0.02618635646584961\n",
      "epoch {} loss: {} 117 0.025224574469029905\n",
      "epoch {} loss: {} 118 0.024084832146763803\n",
      "epoch {} loss: {} 119 0.023711988195363017\n",
      "epoch {} loss: {} 120 0.023154870462086467\n",
      "epoch {} loss: {} 121 0.02218243560443322\n",
      "epoch {} loss: {} 122 0.02226262012910512\n",
      "epoch {} loss: {} 123 0.02178189755520887\n",
      "epoch {} loss: {} 124 0.021177272550347778\n",
      "epoch {} loss: {} 125 0.02069678727744354\n",
      "epoch {} loss: {} 126 0.020353039923227494\n",
      "epoch {} loss: {} 127 0.01972865860702263\n",
      "epoch {} loss: {} 128 0.01995363434155782\n",
      "epoch {} loss: {} 129 0.020192130386001535\n",
      "epoch {} loss: {} 130 0.022500802628282045\n",
      "epoch {} loss: {} 131 0.022554707071847386\n",
      "end before 500 epoch, epoch: 131\n"
     ]
    }
   ],
   "source": [
    "train()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e4e8c56f-d5d9-43b7-b2f7-a02c0daa6c7d",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.13.3"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
