{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [],
   "source": [
    "import numpy as np\n",
    "import torch\n",
    "from torchvision.models import resnet18\n",
    "from torch import nn\n",
    "import torch.optim as optim\n",
    "from torch.utils.data import Dataset, DataLoader\n",
    "from DamageDetectionModel import RegistrationModel\n",
    "from STN import SpatialTransformer\n",
    "from loss import compute_loss"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Evaluation 冒烟测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 2,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n",
      "  warnings.warn(\n",
      "/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
      "  warnings.warn(msg)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "merged shape: torch.Size([2, 4, 256, 256])\n",
      "bbox   shape: torch.Size([2, 4])\n"
     ]
    }
   ],
   "source": [
    "# ---------------- 测试脚本 ----------------\n",
    "if __name__ == \"__main__\":\n",
    "    model = RegistrationModel().eval()   # 测试阶段无需训练\n",
    "    B = 2                                # batch_size 随意\n",
    "    img_vis = torch.randn(B, 3, 256, 256)  # 可见光\n",
    "    img_ir  = torch.randn(B, 1, 256, 256)  # 红外\n",
    "\n",
    "    with torch.no_grad():\n",
    "        merged, bbox = model(img_vis, img_ir)\n",
    "\n",
    "    print(\"merged shape:\", merged.shape)  # 期望 torch.Size([B, 4, 256, 256])\n",
    "    print(\"bbox   shape:\", bbox.shape)    # 期望 torch.Size([B, 4])"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "print(\"merged image:\", merged)  # 期望 torch.Size([B, 4, 256, 256])\n",
    "print(\"bbox:\", bbox)    # 期望 torch.Size([B, 4])"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "Training 冒烟测试"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 自定义数据集生成随机数据\n",
    "class MockDataset(Dataset):\n",
    "    def __init__(self, num_samples=100):\n",
    "        self.num_samples = num_samples\n",
    "        \n",
    "    def __len__(self):\n",
    "        return self.num_samples\n",
    "        \n",
    "    def __getitem__(self, idx):\n",
    "        # 生成随机图像数据\n",
    "        img_vis = torch.randn(3, 256, 256)  # 可见光图像\n",
    "        img_ir = torch.randn(1, 256, 256)   # 红外图像\n",
    "        \n",
    "        # 生成随机边界框标签 (x1, y1, x2, y2)\n",
    "        bbox_label1 = torch.rand(4) * 256\n",
    "        bbox_label2 = torch.rand(4) * 256\n",
    "        \n",
    "        # 生成随机损伤类型标签 (0-4的整数)\n",
    "        damage_type = torch.tensor(np.random.randint(0, 5), dtype=torch.float32)\n",
    "        \n",
    "        return img_vis, img_ir, bbox_label1, bbox_label2, damage_type"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 训练循环\n",
    "def train(model, train_loader, optimizer, num_epochs):\n",
    "    model.train()  # 设置为训练模式\n",
    "    for epoch in range(num_epochs):\n",
    "        total_loss = 0.0\n",
    "        for batch in train_loader:\n",
    "            image_1, image_2, boundary_box_label_1, boundary_box_label_2, damage_type = batch\n",
    "            \n",
    "            # 前向传播\n",
    "            image_merged, outputs = model(image_1, image_2)\n",
    "            \n",
    "            # 计算损失\n",
    "            loss = compute_loss(\n",
    "                outputs, \n",
    "                boundary_box_label_1, \n",
    "                damage_type, \n",
    "                model.stn.transformed_boxes, \n",
    "                boundary_box_label_2\n",
    "            )\n",
    "            \n",
    "            # 反向传播和优化\n",
    "            optimizer.zero_grad()\n",
    "            loss.backward()\n",
    "            optimizer.step()\n",
    "            \n",
    "            total_loss += loss.item()\n",
    "        \n",
    "        # 打印每个epoch的平均损失\n",
    "        avg_loss = total_loss / len(train_loader)\n",
    "        print(f\"Epoch [{epoch+1}/{num_epochs}], Average Loss: {avg_loss:.4f}\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "metadata": {},
   "outputs": [
    {
     "name": "stderr",
     "output_type": "stream",
     "text": [
      "/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torchvision/models/_utils.py:208: UserWarning: The parameter 'pretrained' is deprecated since 0.13 and will be removed in 0.15, please use 'weights' instead.\n",
      "  warnings.warn(\n",
      "/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torchvision/models/_utils.py:223: UserWarning: Arguments other than a weight enum or `None` for 'weights' are deprecated since 0.13 and will be removed in 0.15. The current behavior is equivalent to passing `weights=ResNet18_Weights.IMAGENET1K_V1`. You can also use `weights=ResNet18_Weights.DEFAULT` to get the most up-to-date weights.\n",
      "  warnings.warn(msg)\n"
     ]
    },
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "开始训练...\n"
     ]
    },
    {
     "ename": "AttributeError",
     "evalue": "'SpatialTransformer' object has no attribute 'transformed_boxes'",
     "output_type": "error",
     "traceback": [
      "\u001b[0;31m---------------------------------------------------------------------------\u001b[0m",
      "\u001b[0;31mAttributeError\u001b[0m                            Traceback (most recent call last)",
      "\u001b[1;32m/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb Cell 8\u001b[0m line \u001b[0;36m1\n\u001b[1;32m      <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=8'>9</a>\u001b[0m \u001b[39m# 运行训练\u001b[39;00m\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=9'>10</a>\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39m开始训练...\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[0;32m---> <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=10'>11</a>\u001b[0m train(model, train_loader, optimizer, num_epochs\u001b[39m=\u001b[39;49m\u001b[39m10\u001b[39;49m)\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=11'>12</a>\u001b[0m \u001b[39mprint\u001b[39m(\u001b[39m\"\u001b[39m\u001b[39m训练完成!\u001b[39m\u001b[39m\"\u001b[39m)\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=13'>14</a>\u001b[0m \u001b[39m# 训练后的简单测试\u001b[39;00m\n",
      "\u001b[1;32m/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb Cell 8\u001b[0m line \u001b[0;36m1\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=9'>10</a>\u001b[0m image_merged, outputs \u001b[39m=\u001b[39m model(image_1, image_2)\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=11'>12</a>\u001b[0m \u001b[39m# 计算损失\u001b[39;00m\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=12'>13</a>\u001b[0m loss \u001b[39m=\u001b[39m compute_loss(\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=13'>14</a>\u001b[0m     outputs, \n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=14'>15</a>\u001b[0m     boundary_box_label_1, \n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=15'>16</a>\u001b[0m     damage_type, \n\u001b[0;32m---> <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=16'>17</a>\u001b[0m     model\u001b[39m.\u001b[39;49mstn\u001b[39m.\u001b[39;49mtransformed_boxes, \n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=17'>18</a>\u001b[0m     boundary_box_label_2\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=18'>19</a>\u001b[0m )\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=20'>21</a>\u001b[0m \u001b[39m# 反向传播和优化\u001b[39;00m\n\u001b[1;32m     <a href='vscode-notebook-cell://localhost:8080/root/MyCode/infrared-image-damage-detection-model/DamageDetectionModel_test.ipynb#W5sdnNjb2RlLXJlbW90ZQ%3D%3D?line=21'>22</a>\u001b[0m optimizer\u001b[39m.\u001b[39mzero_grad()\n",
      "File \u001b[0;32m/opt/anaconda3/envs/science39/lib/python3.9/site-packages/torch/nn/modules/module.py:1207\u001b[0m, in \u001b[0;36mModule.__getattr__\u001b[0;34m(self, name)\u001b[0m\n\u001b[1;32m   1205\u001b[0m     \u001b[39mif\u001b[39;00m name \u001b[39min\u001b[39;00m modules:\n\u001b[1;32m   1206\u001b[0m         \u001b[39mreturn\u001b[39;00m modules[name]\n\u001b[0;32m-> 1207\u001b[0m \u001b[39mraise\u001b[39;00m \u001b[39mAttributeError\u001b[39;00m(\u001b[39m\"\u001b[39m\u001b[39m'\u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m'\u001b[39m\u001b[39m object has no attribute \u001b[39m\u001b[39m'\u001b[39m\u001b[39m{}\u001b[39;00m\u001b[39m'\u001b[39m\u001b[39m\"\u001b[39m\u001b[39m.\u001b[39mformat(\n\u001b[1;32m   1208\u001b[0m     \u001b[39mtype\u001b[39m(\u001b[39mself\u001b[39m)\u001b[39m.\u001b[39m\u001b[39m__name__\u001b[39m, name))\n",
      "\u001b[0;31mAttributeError\u001b[0m: 'SpatialTransformer' object has no attribute 'transformed_boxes'"
     ]
    }
   ],
   "source": [
    "# 主函数\n",
    "if __name__ == \"__main__\":\n",
    "    # 初始化模型、数据加载器和优化器\n",
    "    model = RegistrationModel()\n",
    "    dataset = MockDataset(num_samples=100)  # 生成100个样本\n",
    "    train_loader = DataLoader(dataset, batch_size=8, shuffle=True)\n",
    "    optimizer = optim.Adam(model.parameters(), lr=0.001)\n",
    "    \n",
    "    # 运行训练\n",
    "    print(\"开始训练...\")\n",
    "    train(model, train_loader, optimizer, num_epochs=10)\n",
    "    print(\"训练完成!\")\n",
    "    \n",
    "    # 训练后的简单测试\n",
    "    print(\"\\n训练后测试:\")\n",
    "    model.eval()\n",
    "    with torch.no_grad():\n",
    "        img_vis = torch.randn(2, 3, 256, 256)\n",
    "        img_ir = torch.randn(2, 1, 256, 256)\n",
    "        merged, bbox = model(img_vis, img_ir)\n",
    "        \n",
    "    print(\"merged shape:\", merged.shape)  # 期望 torch.Size([2, 4, 256, 256])\n",
    "    print(\"bbox   shape:\", bbox.shape)    # 期望 torch.Size([2, 4])"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "science39",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.18"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
