{
 "cells": [
  {
   "cell_type": "code",
   "id": "initial_id",
   "metadata": {
    "collapsed": true,
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:52.584006Z",
     "start_time": "2025-03-23T08:30:35.014188Z"
    }
   },
   "source": [
    "import torch\n",
    "import torchvision\n",
    "import numpy as np\n",
    "import torch.nn as nn\n",
    "import torch.optim as optim\n",
    "import h5py\n",
    "import os\n",
    "from torch.utils.data import Dataset\n",
    "import torch.nn.functional as F"
   ],
   "outputs": [],
   "execution_count": 1
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "数据集处理",
   "id": "3f9d9f22077d80ad"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:54.887209Z",
     "start_time": "2025-03-23T08:30:54.880035Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class MultimodalDataset(Dataset):\n",
    "    def __init__(self, h5_path: str):\n",
    "        self.h5 = h5py.File(h5_path, 'r')\n",
    "        self.keys = list(self.h5.keys())\n",
    "        \n",
    "    def __len__(self):\n",
    "        return len(self.keys)\n",
    "    \n",
    "    def __getitem__(self, idx):\n",
    "        group = self.h5[self.keys[idx]]\n",
    "        return {\n",
    "            \"images\": (\n",
    "                torch.from_numpy(group[\"image1\"][:]),\n",
    "                torch.from_numpy(group[\"image2\"][:])\n",
    "            ),\n",
    "            \"text\": torch.from_numpy(group[\"text\"][:]),\n",
    "            \"meta\": {\"folder\": group.attrs[\"folder\"]}\n",
    "        }"
   ],
   "id": "86be931b225b43f8",
   "outputs": [],
   "execution_count": 2
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "模型单元",
   "id": "936ebafb546c6818"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:55.744485Z",
     "start_time": "2025-03-23T08:30:55.724485Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class MultiHeadCrossAttn(nn.Module):\n",
    "    def __init__(self, img_channels=1024, text_dim=768, num_heads=8):\n",
    "        super().__init__()\n",
    "        self.num_heads = num_heads\n",
    "        self.head_dim = img_channels // num_heads\n",
    "        \n",
    "        # 图像特征投影\n",
    "        self.img_proj_q = nn.Conv2d(img_channels, img_channels, 1)\n",
    "        self.img_proj_kv = nn.Conv2d(img_channels, img_channels*2, 1)\n",
    "        \n",
    "        # 文本特征投影\n",
    "        self.text_proj = nn.Sequential(\n",
    "            nn.Linear(text_dim, img_channels*2),\n",
    "            nn.GELU(),\n",
    "            nn.LayerNorm(img_channels*2)\n",
    "        )\n",
    "        \n",
    "        # 动态门控融合\n",
    "        self.gate_conv = nn.Sequential(\n",
    "            nn.Conv2d(img_channels*2, img_channels, 3, padding=1),\n",
    "            nn.Sigmoid()\n",
    "        )\n",
    "        \n",
    "        # 输出变换\n",
    "        self.output_conv = nn.Conv2d(img_channels, img_channels, 3, padding=1)\n",
    "        \n",
    "    def forward(self, img, text):\n",
    "    \n",
    "        n, _, h, w = img.shape\n",
    "        \n",
    "        q_img = self.img_proj_q(img)  # (n,1024,32,32)\n",
    "        kv_img = self.img_proj_kv(img)  # (n,2048,32,32)\n",
    "        k_img, v_img = torch.chunk(kv_img, 2, dim=1)  # (n,1024,32,32)\n",
    "        \n",
    "        text_proj = self.text_proj(text)  # (n,10,2048)\n",
    "        k_text, v_text = torch.chunk(text_proj, 2, dim=-1)  # (n,10,1024)\n",
    "        \n",
    "        def reshape_multi_head(x, dim):\n",
    "            return x.view(n, self.num_heads, self.head_dim, -1).permute(0,1,3,2)\n",
    "            \n",
    "        q = reshape_multi_head(q_img.flatten(2), 3)  # (n, h, 32 * 32, d)\n",
    "        k_img = reshape_multi_head(k_img.flatten(2), 3)  # (n, h, 32 * 32, d)\n",
    "        v_img = reshape_multi_head(v_img.flatten(2), 3)  # (n, h, 32 * 32, d)\n",
    "         \n",
    "        k_text = k_text.permute(0,2,1)  # (n,1024,10)\n",
    "        k_text = k_text.view(k_text.shape[0], self.num_heads, self.head_dim, 10)\n",
    "        k_text = k_text.permute(0,1,3,2)  # (n, h, 10, d)\n",
    "        v_text = v_text.permute(0,2,1)\n",
    "        v_text = v_text.view(v_text.shape[0], self.num_heads, self.head_dim, 10)\n",
    "        v_text = v_text.permute(0,1,3,2)  # (n, h, 10, d)\n",
    "        \n",
    "        attn_scores = torch.matmul(q, k_text.transpose(-2,-1)) / (self.head_dim**0.5)\n",
    "        attn_weights = F.softmax(attn_scores, dim=-1)\n",
    "        attended_text = torch.matmul(attn_weights, v_text)  # (n, h, 32 * 32, d)\n",
    "        \n",
    "        fused = torch.cat([\n",
    "            v_img.reshape(n, -1, h*w),\n",
    "            attended_text.reshape(n, -1, h*w)\n",
    "        ], dim=1).view(n, -1, h, w)\n",
    "        \n",
    "        gate = self.gate_conv(fused)\n",
    "        output = gate * v_img.reshape(n, -1, h, w) + (1-gate) * attended_text.reshape(n, -1, h, w)\n",
    "        \n",
    "        return self.output_conv(output)"
   ],
   "id": "18237e98c43efc5a",
   "outputs": [],
   "execution_count": 3
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": [
    "class MLA(nn.Module):\n",
    "    def __init__(self, img_channels, text_dim=768, num_heads=8, dropout=0.1):\n",
    "        super().__init__()\n",
    "        # 确保维度对齐\n",
    "        assert text_dim % num_heads == 0, \"text_dim must be divisible by num_heads\"\n",
    "        \n",
    "        # 图像通道投影层（将图像维度与文本对齐）\n",
    "        self.img_proj = nn.Conv2d(img_channels, text_dim, kernel_size=1)\n",
    "        \n",
    "        # 多头注意力层\n",
    "        self.multihead_attn = nn.MultiheadAttention(\n",
    "            embed_dim=text_dim,\n",
    "            num_heads=num_heads,\n",
    "            dropout=dropout,\n",
    "            batch_first=True\n",
    "        )\n",
    "        \n",
    "        # 输出投影层（恢复图像原始通道）\n",
    "        self.output_proj = nn.Conv2d(text_dim, img_channels, kernel_size=1)\n",
    "        \n",
    "        # 层归一化\n",
    "        self.norm = nn.LayerNorm(text_dim)\n",
    "        self.dropout = nn.Dropout(dropout)\n",
    "\n",
    "    def forward(self, img, text):\n",
    "        \n",
    "        batch, channels, h, w = img.shape\n",
    "\n",
    "        img_projected = self.img_proj(img)  # (b, text_dim, h, w)\n",
    "\n",
    "        img_sequence = img_projected.flatten(2).permute(0, 2, 1)  # (b, h*w, text_dim)\n",
    "        \n",
    "        attn_output, _ = self.multihead_attn(\n",
    "            query=img_sequence,\n",
    "            key=text,\n",
    "            value=text\n",
    "        )\n",
    "\n",
    "        attn_output = self.norm(img_sequence + self.dropout(attn_output))\n",
    "\n",
    "        output_sequence = attn_output.permute(0, 2, 1)  # (b, text_dim, h*w)\n",
    "        output_projected = output_sequence.view(batch, 768, h, w)\n",
    "\n",
    "        output_img = self.output_proj(output_projected)  # b, c, h, w\n",
    "        \n",
    "        return output_img"
   ],
   "id": "fe749968393e0caf"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:56.138988Z",
     "start_time": "2025-03-23T08:30:56.131118Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CNN_block_D(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels):\n",
    "        super(CNN_block_D, self).__init__()\n",
    "        self.conv1 = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)\n",
    "        self.batch = nn.BatchNorm2d(out_channels)\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        self.con2 = nn.Conv2d(out_channels, out_channels, kernel_size=3, padding=1)\n",
    "        self.attn = MLA(img_channels=out_channels, num_heads=4)\n",
    "        self.pool = nn.MaxPool2d(2, 2)\n",
    "    \n",
    "    def forward(self, x, text):\n",
    "        x = self.conv1(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.con2(x)\n",
    "        x = self.batch(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.attn(x, text)\n",
    "        x = self.batch(x)\n",
    "        x = self.pool(x)\n",
    "        return x"
   ],
   "id": "80804df5b56102c3",
   "outputs": [],
   "execution_count": 4
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:56.462157Z",
     "start_time": "2025-03-23T08:30:56.454416Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CNN_block_U(nn.Module):\n",
    "    def __init__(self, In_channels, Out_channels):\n",
    "        super().__init__()\n",
    "        # 通道数减半并尺寸倍增\n",
    "        self.conv_trans = nn.ConvTranspose2d(in_channels=In_channels,out_channels=Out_channels,kernel_size=3,stride=2,padding=1,output_padding=1)\n",
    "        self.bn = nn.BatchNorm2d(Out_channels)\n",
    "        self.relu = nn.SiLU()\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = self.conv_trans(x)\n",
    "        x = self.relu(x)\n",
    "        x = self.bn(x)\n",
    "        return x"
   ],
   "id": "4767627695175fcc",
   "outputs": [],
   "execution_count": 5
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:56.763910Z",
     "start_time": "2025-03-23T08:30:56.755270Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CNN_block_33(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels):\n",
    "        super(CNN_block_33, self).__init__()\n",
    "        self.conv = nn.Sequential(\n",
    "            nn.Conv2d(in_channels, out_channels, kernel_size=33, padding=16),\n",
    "            nn.BatchNorm2d(out_channels)\n",
    "        )\n",
    "        \n",
    "    def forward(self, x):\n",
    "        return self.conv(x)"
   ],
   "id": "af5026519f5fe696",
   "outputs": [],
   "execution_count": 6
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:57.114024Z",
     "start_time": "2025-03-23T08:30:57.107471Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CNN_block_3(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels):\n",
    "        super(CNN_block_3, self).__init__()\n",
    "        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=3, padding=1)\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        self.norm = nn.BatchNorm2d(out_channels)\n",
    "    \n",
    "    def forward(self, x):\n",
    "        return self.norm(self.relu(self.conv(x)))"
   ],
   "id": "1d18d4c3cd5ea3f6",
   "outputs": [],
   "execution_count": 7
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:57.497396Z",
     "start_time": "2025-03-23T08:30:57.489604Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class CNN_block_1(nn.Module):\n",
    "    def __init__(self, in_channels, out_channels):\n",
    "        super(CNN_block_1, self).__init__()\n",
    "        self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        \n",
    "    def forward(self, x):\n",
    "        return self.relu(self.conv(x))"
   ],
   "id": "5194dff42e29ea54",
   "outputs": [],
   "execution_count": 8
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:57.856663Z",
     "start_time": "2025-03-23T08:30:57.849902Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class atten_Gate(nn.Module):\n",
    "    def __init__(self,F_int):\n",
    "        super().__init__()\n",
    "        self.W_g = nn.Conv2d(F_int, F_int, 1)\n",
    "        self.W_x = nn.Conv2d(F_int, F_int, 1)\n",
    "        self.psi = nn.Conv2d(F_int, 1, 1)    # 注意力系数生成\n",
    "        self.relu = nn.LeakyReLU()\n",
    "        self.sigmoid = nn.Sigmoid()\n",
    "        self.norm = nn.BatchNorm2d(F_int)\n",
    "\n",
    "    def forward(self, g, x):\n",
    "        #print(g.shape)\n",
    "        g_trans = self.W_g(g)\n",
    "        x_trans = self.W_x(x)\n",
    "        \n",
    "        fused = self.relu(g_trans + x_trans)\n",
    "        alpha = self.sigmoid(self.psi(fused))\n",
    "\n",
    "        return self.norm(x * alpha)"
   ],
   "id": "890b230932b10589",
   "outputs": [],
   "execution_count": 9
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:58.802176Z",
     "start_time": "2025-03-23T08:30:58.224236Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class EdgeEnhancer(nn.Module):\n",
    "    def __init__(self, alpha=0.7, Device='cuda'):\n",
    "        super().__init__()\n",
    "        self.alpha = alpha  # 边缘增强强度系数\n",
    "        self.sobel_kernel = self._init_sobel_kernel().to(Device)\n",
    "\n",
    "    def _init_sobel_kernel(self):\n",
    "        # Sobel算子卷积核 [3,8]\n",
    "        kernel_x = torch.tensor([[1, 0, -1], \n",
    "                                [2, 0, -2],\n",
    "                                [1, 0, -1]], dtype=torch.float32).view(1,1,3,3)\n",
    "        \n",
    "        kernel_y = torch.tensor([[1, 2, 1],\n",
    "                                [0, 0, 0],\n",
    "                                [-1, -2, -1]], dtype=torch.float32).view(1,1,3,3)\n",
    "        return torch.cat([kernel_x, kernel_y], dim=0)  # shape(2,1,3,3)\n",
    "\n",
    "    def forward(self, x):\n",
    "        edges = F.conv2d(x, self.sobel_kernel, padding=1, groups=1)  # (n,2,512,512)\n",
    "        edges = torch.sqrt(edges[:,0]**2 + edges[:,1]**2)  \n",
    "        edges = edges.unsqueeze(1)\n",
    "        \n",
    "\n",
    "        enhanced = (1 - self.alpha) * x + self.alpha * edges\n",
    "        #print(enhanced.shape)\n",
    "        \n",
    "        return torch.clamp(enhanced, 0.0, 1.0)\n",
    "\n",
    "Edge = EdgeEnhancer()"
   ],
   "id": "8d2a76baf392c3d8",
   "outputs": [],
   "execution_count": 10
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:58.814923Z",
     "start_time": "2025-03-23T08:30:58.802176Z"
    }
   },
   "cell_type": "code",
   "source": [
    "class UNet(nn.Module):\n",
    "    def __init__(self):\n",
    "        super(UNet, self).__init__()\n",
    "        self.convD0 = CNN_block_33(1,16)\n",
    "        self.convF = CNN_block_3(16,64)\n",
    "        self.convD1 = CNN_block_D(64,128)\n",
    "        self.convD2 = CNN_block_D(128,256)\n",
    "        self.convD3 = CNN_block_D(256,512)\n",
    "        self.convD4 = CNN_block_D(512,1024)\n",
    "        \n",
    "        self.convU1 = CNN_block_U(1024,512)\n",
    "        self.convU2 = CNN_block_U(512,256)\n",
    "        self.convU3 = CNN_block_U(256,128)\n",
    "        self.convU4 = CNN_block_U(128,64)\n",
    "        \n",
    "        self.mixU = MLA(img_channels = 1024, num_heads = 8)\n",
    "        \n",
    "        self.jump1 = atten_Gate(64)\n",
    "        self.jump2 = atten_Gate(128)\n",
    "        self.jump3 = atten_Gate(256)\n",
    "        self.jump4 = atten_Gate(512)\n",
    "        \n",
    "        self.dec1 = CNN_block_3(64,16)\n",
    "        self.dec2 = CNN_block_3(16,4)\n",
    "        self.dec3 = CNN_block_1(4,1)\n",
    "        \n",
    "    def forward(self, img, text_emb):\n",
    "        x = self.convD0(img)\n",
    "        x = self.convF(x)   #c = 64\n",
    "        \n",
    "        mem1 = x\n",
    "        x = self.convD1(x,text_emb)\n",
    "        mem2 = x\n",
    "        x = self.convD2(x,text_emb)\n",
    "        mem3 = x\n",
    "        x = self.convD3(x,text_emb)\n",
    "        mem4 = x\n",
    "        x = self.convD4(x,text_emb)\n",
    "        x = self.mixU(x, text_emb)\n",
    "        \n",
    "        x = self.convU1(x)\n",
    "        x = self.jump4(mem4, x)\n",
    "        x = self.convU2(x)\n",
    "        x = self.jump3(mem3, x)\n",
    "        x = self.convU3(x)\n",
    "        x = self.jump2(mem2, x)\n",
    "        x = self.convU4(x)\n",
    "        x = self.jump1(mem1, x)\n",
    "        \n",
    "        x = self.dec1(x)\n",
    "        x = self.dec2(x)\n",
    "        return self.dec3(x)"
   ],
   "id": "6b9b3c8739968410",
   "outputs": [],
   "execution_count": 11
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:30:59.567910Z",
     "start_time": "2025-03-23T08:30:59.559945Z"
    }
   },
   "cell_type": "code",
   "source": "device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')",
   "id": "ba077aae2386db83",
   "outputs": [],
   "execution_count": 12
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:31:02.114929Z",
     "start_time": "2025-03-23T08:31:00.361361Z"
    }
   },
   "cell_type": "code",
   "source": "model = UNet().to(device)",
   "id": "a78c55d096f0fab",
   "outputs": [],
   "execution_count": 13
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:31:03.861845Z",
     "start_time": "2025-03-23T08:31:03.849922Z"
    }
   },
   "cell_type": "code",
   "source": [
    "optimizer = torch.optim.Adam(model.parameters(), lr=0.001)\n",
    "criterion = nn.MSELoss()"
   ],
   "id": "19ab5c52ac283fe1",
   "outputs": [],
   "execution_count": 14
  },
  {
   "metadata": {},
   "cell_type": "markdown",
   "source": "训练单元",
   "id": "7342ccbbf243ab3"
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:31:06.657145Z",
     "start_time": "2025-03-23T08:31:06.649145Z"
    }
   },
   "cell_type": "code",
   "source": [
    "def train():\n",
    "    model.train()\n",
    "    directory = 'C:/Users/Lenovo/Desktop/DC/ds'\n",
    "    norm = nn.BatchNorm2d(1)\n",
    "    \n",
    "    for filename in os.listdir(directory):\n",
    "        file_path = os.path.join(directory, filename)\n",
    "        #print(file_path)\n",
    "        dataset = MultimodalDataset(file_path)\n",
    "        for i in range(5):\n",
    "            dataloader = torch.utils.data.DataLoader(dataset, batch_size=1, shuffle=True)\n",
    "            for item in dataloader:\n",
    "                Input = norm(item['images'][0].to(device))\n",
    "                tar = norm(item['images'][1].to(device))\n",
    "                text = norm(item['text'].to(device).unsqueeze(1)).squeeze(1)\n",
    "                pre = model.forward(Edge(Input),text)\n",
    "                loss = criterion(pre, Edge(tar))\n",
    "                print(loss.item())\n",
    "                optimizer.zero_grad()\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                    "
   ],
   "id": "a1d990d86c307d9c",
   "outputs": [],
   "execution_count": 15
  },
  {
   "metadata": {
    "ExecuteTime": {
     "end_time": "2025-03-23T08:31:21.355380Z",
     "start_time": "2025-03-23T08:31:07.404380Z"
    }
   },
   "cell_type": "code",
   "source": [
    "train()\n",
    "torch.save(model,'model.pth')"
   ],
   "id": "8960dda7280349f2",
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "112.77003479003906\n"
     ]
    },
    {
     "ename": "RuntimeError",
     "evalue": "cuDNN error: CUDNN_STATUS_INTERNAL_ERROR",
     "output_type": "error",
     "traceback": [
      "\u001B[1;31m---------------------------------------------------------------------------\u001B[0m",
      "\u001B[1;31mRuntimeError\u001B[0m                              Traceback (most recent call last)",
      "Cell \u001B[1;32mIn[16], line 1\u001B[0m\n\u001B[1;32m----> 1\u001B[0m \u001B[43mtrain\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m      2\u001B[0m torch\u001B[38;5;241m.\u001B[39msave(model,\u001B[38;5;124m'\u001B[39m\u001B[38;5;124mmodel.pth\u001B[39m\u001B[38;5;124m'\u001B[39m)\n",
      "Cell \u001B[1;32mIn[15], line 19\u001B[0m, in \u001B[0;36mtrain\u001B[1;34m()\u001B[0m\n\u001B[0;32m     17\u001B[0m \u001B[38;5;28mprint\u001B[39m(loss\u001B[38;5;241m.\u001B[39mitem())\n\u001B[0;32m     18\u001B[0m optimizer\u001B[38;5;241m.\u001B[39mzero_grad()\n\u001B[1;32m---> 19\u001B[0m \u001B[43mloss\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbackward\u001B[49m\u001B[43m(\u001B[49m\u001B[43m)\u001B[49m\n\u001B[0;32m     20\u001B[0m optimizer\u001B[38;5;241m.\u001B[39mstep()\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\_tensor.py:525\u001B[0m, in \u001B[0;36mTensor.backward\u001B[1;34m(self, gradient, retain_graph, create_graph, inputs)\u001B[0m\n\u001B[0;32m    515\u001B[0m \u001B[38;5;28;01mif\u001B[39;00m has_torch_function_unary(\u001B[38;5;28mself\u001B[39m):\n\u001B[0;32m    516\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m handle_torch_function(\n\u001B[0;32m    517\u001B[0m         Tensor\u001B[38;5;241m.\u001B[39mbackward,\n\u001B[0;32m    518\u001B[0m         (\u001B[38;5;28mself\u001B[39m,),\n\u001B[1;32m   (...)\u001B[0m\n\u001B[0;32m    523\u001B[0m         inputs\u001B[38;5;241m=\u001B[39minputs,\n\u001B[0;32m    524\u001B[0m     )\n\u001B[1;32m--> 525\u001B[0m \u001B[43mtorch\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mautograd\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mbackward\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    526\u001B[0m \u001B[43m    \u001B[49m\u001B[38;5;28;43mself\u001B[39;49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mgradient\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mretain_graph\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43mcreate_graph\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[43minputs\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[43minputs\u001B[49m\n\u001B[0;32m    527\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\autograd\\__init__.py:267\u001B[0m, in \u001B[0;36mbackward\u001B[1;34m(tensors, grad_tensors, retain_graph, create_graph, grad_variables, inputs)\u001B[0m\n\u001B[0;32m    262\u001B[0m     retain_graph \u001B[38;5;241m=\u001B[39m create_graph\n\u001B[0;32m    264\u001B[0m \u001B[38;5;66;03m# The reason we repeat the same comment below is that\u001B[39;00m\n\u001B[0;32m    265\u001B[0m \u001B[38;5;66;03m# some Python versions print out the first line of a multi-line function\u001B[39;00m\n\u001B[0;32m    266\u001B[0m \u001B[38;5;66;03m# calls in the traceback and some print out the last line\u001B[39;00m\n\u001B[1;32m--> 267\u001B[0m \u001B[43m_engine_run_backward\u001B[49m\u001B[43m(\u001B[49m\n\u001B[0;32m    268\u001B[0m \u001B[43m    \u001B[49m\u001B[43mtensors\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    269\u001B[0m \u001B[43m    \u001B[49m\u001B[43mgrad_tensors_\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    270\u001B[0m \u001B[43m    \u001B[49m\u001B[43mretain_graph\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    271\u001B[0m \u001B[43m    \u001B[49m\u001B[43mcreate_graph\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    272\u001B[0m \u001B[43m    \u001B[49m\u001B[43minputs\u001B[49m\u001B[43m,\u001B[49m\n\u001B[0;32m    273\u001B[0m \u001B[43m    \u001B[49m\u001B[43mallow_unreachable\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mTrue\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[0;32m    274\u001B[0m \u001B[43m    \u001B[49m\u001B[43maccumulate_grad\u001B[49m\u001B[38;5;241;43m=\u001B[39;49m\u001B[38;5;28;43;01mTrue\u001B[39;49;00m\u001B[43m,\u001B[49m\n\u001B[0;32m    275\u001B[0m \u001B[43m\u001B[49m\u001B[43m)\u001B[49m\n",
      "File \u001B[1;32m~\\anaconda3\\Lib\\site-packages\\torch\\autograd\\graph.py:744\u001B[0m, in \u001B[0;36m_engine_run_backward\u001B[1;34m(t_outputs, *args, **kwargs)\u001B[0m\n\u001B[0;32m    742\u001B[0m     unregister_hooks \u001B[38;5;241m=\u001B[39m _register_logging_hooks_on_whole_graph(t_outputs)\n\u001B[0;32m    743\u001B[0m \u001B[38;5;28;01mtry\u001B[39;00m:\n\u001B[1;32m--> 744\u001B[0m     \u001B[38;5;28;01mreturn\u001B[39;00m \u001B[43mVariable\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43m_execution_engine\u001B[49m\u001B[38;5;241;43m.\u001B[39;49m\u001B[43mrun_backward\u001B[49m\u001B[43m(\u001B[49m\u001B[43m  \u001B[49m\u001B[38;5;66;43;03m# Calls into the C++ engine to run the backward pass\u001B[39;49;00m\n\u001B[0;32m    745\u001B[0m \u001B[43m        \u001B[49m\u001B[43mt_outputs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43margs\u001B[49m\u001B[43m,\u001B[49m\u001B[43m \u001B[49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[38;5;241;43m*\u001B[39;49m\u001B[43mkwargs\u001B[49m\n\u001B[0;32m    746\u001B[0m \u001B[43m    \u001B[49m\u001B[43m)\u001B[49m  \u001B[38;5;66;03m# Calls into the C++ engine to run the backward pass\u001B[39;00m\n\u001B[0;32m    747\u001B[0m \u001B[38;5;28;01mfinally\u001B[39;00m:\n\u001B[0;32m    748\u001B[0m     \u001B[38;5;28;01mif\u001B[39;00m attach_logging_hooks:\n",
      "\u001B[1;31mRuntimeError\u001B[0m: cuDNN error: CUDNN_STATUS_INTERNAL_ERROR"
     ]
    }
   ],
   "execution_count": 16
  },
  {
   "metadata": {},
   "cell_type": "code",
   "outputs": [],
   "execution_count": null,
   "source": "torch.save(model,'model.pth')",
   "id": "f9615eff7555f4ad"
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 2
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython2",
   "version": "2.7.6"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
