{
  "nbformat": 4,
  "nbformat_minor": 0,
  "metadata": {
    "colab": {
      "provenance": []
    },
    "kernelspec": {
      "name": "python3",
      "display_name": "Python 3"
    },
    "language_info": {
      "name": "python"
    }
  },
  "cells": [
    {
      "cell_type": "code",
      "execution_count": 3,
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "YW5D_Znf8Ndq",
        "outputId": "32c1254f-1fca-4672-84ad-d460a0464ce5"
      },
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "/usr/local/lib/python3.11/dist-packages/torch/functional.py:539: UserWarning: torch.meshgrid: in an upcoming release, it will be required to pass the indexing argument. (Triggered internally at /pytorch/aten/src/ATen/native/TensorShape.cpp:3637.)\n",
            "  return _VF.meshgrid(tensors, **kwargs)  # type: ignore[attr-defined]\n"
          ]
        },
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "torch.Size([1, 1000]) 17789624\n",
            "torch.Size([1, 1000]) 33170624\n",
            "torch.Size([1, 1000]) 55767564\n",
            "torch.Size([1, 1000]) 117724480\n",
            "torch.Size([1, 1000]) 203960368\n"
          ]
        }
      ],
      "source": [
        "import torch\n",
        "import torch.nn as nn\n",
        "\n",
        "from einops import rearrange\n",
        "from einops.layers.torch import Rearrange\n",
        "\n",
        "\n",
        "def conv_3x3_bn(inp, oup, image_size, downsample=False):\n",
        "    stride = 1 if downsample == False else 2\n",
        "    return nn.Sequential(\n",
        "        nn.Conv2d(inp, oup, 3, stride, 1, bias=False),\n",
        "        nn.BatchNorm2d(oup),\n",
        "        nn.GELU()\n",
        "    )\n",
        "\n",
        "\n",
        "class PreNorm(nn.Module):\n",
        "    def __init__(self, dim, fn, norm):\n",
        "        super().__init__()\n",
        "        self.norm = norm(dim)\n",
        "        self.fn = fn\n",
        "\n",
        "    def forward(self, x, **kwargs):\n",
        "        return self.fn(self.norm(x), **kwargs)\n",
        "\n",
        "\n",
        "class SE(nn.Module):\n",
        "    def __init__(self, inp, oup, expansion=0.25):\n",
        "        super().__init__()\n",
        "        self.avg_pool = nn.AdaptiveAvgPool2d(1)\n",
        "        self.fc = nn.Sequential(\n",
        "            nn.Linear(oup, int(inp * expansion), bias=False),\n",
        "            nn.GELU(),\n",
        "            nn.Linear(int(inp * expansion), oup, bias=False),\n",
        "            nn.Sigmoid()\n",
        "        )\n",
        "\n",
        "    def forward(self, x):\n",
        "        b, c, _, _ = x.size()\n",
        "        y = self.avg_pool(x).view(b, c)\n",
        "        y = self.fc(y).view(b, c, 1, 1)\n",
        "        return x * y\n",
        "\n",
        "\n",
        "class FeedForward(nn.Module):\n",
        "    def __init__(self, dim, hidden_dim, dropout=0.):\n",
        "        super().__init__()\n",
        "        self.net = nn.Sequential(\n",
        "            nn.Linear(dim, hidden_dim),\n",
        "            nn.GELU(),\n",
        "            nn.Dropout(dropout),\n",
        "            nn.Linear(hidden_dim, dim),\n",
        "            nn.Dropout(dropout)\n",
        "        )\n",
        "\n",
        "    def forward(self, x):\n",
        "        return self.net(x)\n",
        "\n",
        "\n",
        "class MBConv(nn.Module):\n",
        "    def __init__(self, inp, oup, image_size, downsample=False, expansion=4):\n",
        "        super().__init__()\n",
        "        self.downsample = downsample\n",
        "        stride = 1 if self.downsample == False else 2\n",
        "        hidden_dim = int(inp * expansion)\n",
        "\n",
        "        if self.downsample:\n",
        "            self.pool = nn.MaxPool2d(3, 2, 1)\n",
        "            self.proj = nn.Conv2d(inp, oup, 1, 1, 0, bias=False)\n",
        "\n",
        "        if expansion == 1:\n",
        "            self.conv = nn.Sequential(\n",
        "                # dw\n",
        "                nn.Conv2d(hidden_dim, hidden_dim, 3, stride,\n",
        "                          1, groups=hidden_dim, bias=False),\n",
        "                nn.BatchNorm2d(hidden_dim),\n",
        "                nn.GELU(),\n",
        "                # pw-linear\n",
        "                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n",
        "                nn.BatchNorm2d(oup),\n",
        "            )\n",
        "        else:\n",
        "            self.conv = nn.Sequential(\n",
        "                # pw\n",
        "                # down-sample in the first conv\n",
        "                nn.Conv2d(inp, hidden_dim, 1, stride, 0, bias=False),\n",
        "                nn.BatchNorm2d(hidden_dim),\n",
        "                nn.GELU(),\n",
        "                # dw\n",
        "                nn.Conv2d(hidden_dim, hidden_dim, 3, 1, 1,\n",
        "                          groups=hidden_dim, bias=False),\n",
        "                nn.BatchNorm2d(hidden_dim),\n",
        "                nn.GELU(),\n",
        "                SE(inp, hidden_dim),\n",
        "                # pw-linear\n",
        "                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),\n",
        "                nn.BatchNorm2d(oup),\n",
        "            )\n",
        "\n",
        "        self.conv = PreNorm(inp, self.conv, nn.BatchNorm2d)\n",
        "\n",
        "    def forward(self, x):\n",
        "        if self.downsample:\n",
        "            return self.proj(self.pool(x)) + self.conv(x)\n",
        "        else:\n",
        "            return x + self.conv(x)\n",
        "\n",
        "\n",
        "class Attention(nn.Module):\n",
        "    def __init__(self, inp, oup, image_size, heads=8, dim_head=32, dropout=0.):\n",
        "        super().__init__()\n",
        "        inner_dim = dim_head * heads\n",
        "        project_out = not (heads == 1 and dim_head == inp)\n",
        "\n",
        "        self.ih, self.iw = image_size\n",
        "\n",
        "        self.heads = heads\n",
        "        self.scale = dim_head ** -0.5\n",
        "\n",
        "        # parameter table of relative position bias\n",
        "        self.relative_bias_table = nn.Parameter(\n",
        "            torch.zeros((2 * self.ih - 1) * (2 * self.iw - 1), heads))\n",
        "\n",
        "        coords = torch.meshgrid((torch.arange(self.ih), torch.arange(self.iw)))\n",
        "        coords = torch.flatten(torch.stack(coords), 1)\n",
        "        relative_coords = coords[:, :, None] - coords[:, None, :]\n",
        "\n",
        "        relative_coords[0] += self.ih - 1\n",
        "        relative_coords[1] += self.iw - 1\n",
        "        relative_coords[0] *= 2 * self.iw - 1\n",
        "        relative_coords = rearrange(relative_coords, 'c h w -> h w c')\n",
        "        relative_index = relative_coords.sum(-1).flatten().unsqueeze(1)\n",
        "        self.register_buffer(\"relative_index\", relative_index)\n",
        "\n",
        "        self.attend = nn.Softmax(dim=-1)\n",
        "        self.to_qkv = nn.Linear(inp, inner_dim * 3, bias=False)\n",
        "\n",
        "        self.to_out = nn.Sequential(\n",
        "            nn.Linear(inner_dim, oup),\n",
        "            nn.Dropout(dropout)\n",
        "        ) if project_out else nn.Identity()\n",
        "\n",
        "    def forward(self, x):\n",
        "        qkv = self.to_qkv(x).chunk(3, dim=-1)\n",
        "        q, k, v = map(lambda t: rearrange(\n",
        "            t, 'b n (h d) -> b h n d', h=self.heads), qkv)\n",
        "\n",
        "        dots = torch.matmul(q, k.transpose(-1, -2)) * self.scale\n",
        "\n",
        "        # Use \"gather\" for more efficiency on GPUs\n",
        "        relative_bias = self.relative_bias_table.gather(\n",
        "            0, self.relative_index.repeat(1, self.heads))\n",
        "        relative_bias = rearrange(\n",
        "            relative_bias, '(h w) c -> 1 c h w', h=self.ih*self.iw, w=self.ih*self.iw)\n",
        "        dots = dots + relative_bias\n",
        "\n",
        "        attn = self.attend(dots)\n",
        "        out = torch.matmul(attn, v)\n",
        "        out = rearrange(out, 'b h n d -> b n (h d)')\n",
        "        out = self.to_out(out)\n",
        "        return out\n",
        "\n",
        "\n",
        "class Transformer(nn.Module):\n",
        "    def __init__(self, inp, oup, image_size, heads=8, dim_head=32, downsample=False, dropout=0.):\n",
        "        super().__init__()\n",
        "        hidden_dim = int(inp * 4)\n",
        "\n",
        "        self.ih, self.iw = image_size\n",
        "        self.downsample = downsample\n",
        "\n",
        "        if self.downsample:\n",
        "            self.pool1 = nn.MaxPool2d(3, 2, 1)\n",
        "            self.pool2 = nn.MaxPool2d(3, 2, 1)\n",
        "            self.proj = nn.Conv2d(inp, oup, 1, 1, 0, bias=False)\n",
        "\n",
        "        self.attn = Attention(inp, oup, image_size, heads, dim_head, dropout)\n",
        "        self.ff = FeedForward(oup, hidden_dim, dropout)\n",
        "\n",
        "        self.attn = nn.Sequential(\n",
        "            Rearrange('b c ih iw -> b (ih iw) c'),\n",
        "            PreNorm(inp, self.attn, nn.LayerNorm),\n",
        "            Rearrange('b (ih iw) c -> b c ih iw', ih=self.ih, iw=self.iw)\n",
        "        )\n",
        "\n",
        "        self.ff = nn.Sequential(\n",
        "            Rearrange('b c ih iw -> b (ih iw) c'),\n",
        "            PreNorm(oup, self.ff, nn.LayerNorm),\n",
        "            Rearrange('b (ih iw) c -> b c ih iw', ih=self.ih, iw=self.iw)\n",
        "        )\n",
        "\n",
        "    def forward(self, x):\n",
        "        if self.downsample:\n",
        "            x = self.proj(self.pool1(x)) + self.attn(self.pool2(x))\n",
        "        else:\n",
        "            x = x + self.attn(x)\n",
        "        x = x + self.ff(x)\n",
        "        return x\n",
        "\n",
        "\n",
        "class CoAtNet(nn.Module):\n",
        "    def __init__(self, image_size, in_channels, num_blocks, channels, num_classes=1000, block_types=['C', 'C', 'T', 'T']):\n",
        "        super().__init__()\n",
        "        ih, iw = image_size\n",
        "        block = {'C': MBConv, 'T': Transformer}\n",
        "\n",
        "        self.s0 = self._make_layer(\n",
        "            conv_3x3_bn, in_channels, channels[0], num_blocks[0], (ih // 2, iw // 2))\n",
        "        self.s1 = self._make_layer(\n",
        "            block[block_types[0]], channels[0], channels[1], num_blocks[1], (ih // 4, iw // 4))\n",
        "        self.s2 = self._make_layer(\n",
        "            block[block_types[1]], channels[1], channels[2], num_blocks[2], (ih // 8, iw // 8))\n",
        "        self.s3 = self._make_layer(\n",
        "            block[block_types[2]], channels[2], channels[3], num_blocks[3], (ih // 16, iw // 16))\n",
        "        self.s4 = self._make_layer(\n",
        "            block[block_types[3]], channels[3], channels[4], num_blocks[4], (ih // 32, iw // 32))\n",
        "\n",
        "        self.pool = nn.AvgPool2d(ih // 32, 1)\n",
        "        self.fc = nn.Linear(channels[-1], num_classes, bias=False)\n",
        "\n",
        "    def forward(self, x):\n",
        "        x = self.s0(x)\n",
        "        x = self.s1(x)\n",
        "        x = self.s2(x)\n",
        "        x = self.s3(x)\n",
        "        x = self.s4(x)\n",
        "\n",
        "        x = self.pool(x).view(-1, x.shape[1])\n",
        "        x = self.fc(x)\n",
        "        return x\n",
        "\n",
        "    def _make_layer(self, block, inp, oup, depth, image_size):\n",
        "        layers = nn.ModuleList([])\n",
        "        for i in range(depth):\n",
        "            if i == 0:\n",
        "                layers.append(block(inp, oup, image_size, downsample=True))\n",
        "            else:\n",
        "                layers.append(block(oup, oup, image_size))\n",
        "        return nn.Sequential(*layers)\n",
        "\n",
        "\n",
        "def coatnet_0():\n",
        "    num_blocks = [2, 2, 3, 5, 2]            # L\n",
        "    channels = [64, 96, 192, 384, 768]      # D\n",
        "    return CoAtNet((224, 224), 3, num_blocks, channels, num_classes=1000)\n",
        "\n",
        "\n",
        "def coatnet_1():\n",
        "    num_blocks = [2, 2, 6, 14, 2]           # L\n",
        "    channels = [64, 96, 192, 384, 768]      # D\n",
        "    return CoAtNet((224, 224), 3, num_blocks, channels, num_classes=1000)\n",
        "\n",
        "\n",
        "def coatnet_2():\n",
        "    num_blocks = [2, 2, 6, 14, 2]           # L\n",
        "    channels = [128, 128, 256, 512, 1026]   # D\n",
        "    return CoAtNet((224, 224), 3, num_blocks, channels, num_classes=1000)\n",
        "\n",
        "\n",
        "def coatnet_3():\n",
        "    num_blocks = [2, 2, 6, 14, 2]           # L\n",
        "    channels = [192, 192, 384, 768, 1536]   # D\n",
        "    return CoAtNet((224, 224), 3, num_blocks, channels, num_classes=1000)\n",
        "\n",
        "\n",
        "def coatnet_4():\n",
        "    num_blocks = [2, 2, 12, 28, 2]          # L\n",
        "    channels = [192, 192, 384, 768, 1536]   # D\n",
        "    return CoAtNet((224, 224), 3, num_blocks, channels, num_classes=1000)\n",
        "\n",
        "\n",
        "def count_parameters(model):\n",
        "    return sum(p.numel() for p in model.parameters() if p.requires_grad)\n",
        "\n",
        "\n",
        "if __name__ == '__main__':\n",
        "    img = torch.randn(1, 3, 224, 224)\n",
        "\n",
        "    net = coatnet_0()\n",
        "    out = net(img)\n",
        "    print(out.shape, count_parameters(net))\n",
        "\n",
        "    net = coatnet_1()\n",
        "    out = net(img)\n",
        "    print(out.shape, count_parameters(net))\n",
        "\n",
        "    net = coatnet_2()\n",
        "    out = net(img)\n",
        "    print(out.shape, count_parameters(net))\n",
        "\n",
        "    net = coatnet_3()\n",
        "    out = net(img)\n",
        "    print(out.shape, count_parameters(net))\n",
        "\n",
        "    net = coatnet_4()\n",
        "    out = net(img)\n",
        "    print(out.shape, count_parameters(net))"
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "import torch\n",
        "import torch.nn as nn\n",
        "import torch.optim as optim\n",
        "from torch.utils.data import DataLoader\n",
        "from torchvision import datasets, transforms\n",
        "from tqdm import tqdm\n",
        "transform_train = transforms.Compose([\n",
        "    transforms.Resize((224, 224)),\n",
        "    transforms.RandomHorizontalFlip(),\n",
        "    transforms.ToTensor(),\n",
        "    transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761))\n",
        "])\n",
        "\n",
        "transform_test = transforms.Compose([\n",
        "    transforms.Resize((224, 224)),\n",
        "    transforms.ToTensor(),\n",
        "    transforms.Normalize((0.5071, 0.4865, 0.4409), (0.2673, 0.2564, 0.2761))\n",
        "])\n",
        "\n",
        "train_dataset = datasets.CIFAR100(root='./data', train=True, download=True, transform=transform_train)\n",
        "test_dataset = datasets.CIFAR100(root='./data', train=False, download=True, transform=transform_test)\n",
        "\n",
        "train_loader = DataLoader(train_dataset, batch_size=64, shuffle=True, num_workers=2)\n",
        "test_loader = DataLoader(test_dataset, batch_size=64, shuffle=False, num_workers=2)\n"
      ],
      "metadata": {
        "id": "iki2xlfQRBkQ"
      },
      "execution_count": 4,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "def coatnet_cifar100():\n",
        "    image_size = (224, 224)\n",
        "    in_channels = 3\n",
        "    num_classes = 100\n",
        "    dims = [64, 96, 192, 384, 768]\n",
        "    depths = [2, 2, 3, 5, 2]\n",
        "    block_types = ['C', 'C', 'T', 'T', 'T']\n",
        "    return CoAtNet(image_size, in_channels, num_blocks=depths, channels=dims,\n",
        "                   num_classes=num_classes, block_types=block_types)\n",
        "\n",
        "\n",
        "device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n",
        "model = coatnet_cifar100().to(device)\n",
        "print(model)"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "bJS94rfJRFU1",
        "outputId": "10a92b59-4dad-4307-bcde-e45837439bbf"
      },
      "execution_count": 5,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stdout",
          "text": [
            "CoAtNet(\n",
            "  (s0): Sequential(\n",
            "    (0): Sequential(\n",
            "      (0): Conv2d(3, 64, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)\n",
            "      (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "      (2): GELU(approximate='none')\n",
            "    )\n",
            "    (1): Sequential(\n",
            "      (0): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)\n",
            "      (1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "      (2): GELU(approximate='none')\n",
            "    )\n",
            "  )\n",
            "  (s1): Sequential(\n",
            "    (0): MBConv(\n",
            "      (pool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
            "      (proj): Conv2d(64, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "      (conv): PreNorm(\n",
            "        (norm): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        (fn): Sequential(\n",
            "          (0): Conv2d(64, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
            "          (1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (2): GELU(approximate='none')\n",
            "          (3): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=256, bias=False)\n",
            "          (4): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (5): GELU(approximate='none')\n",
            "          (6): SE(\n",
            "            (avg_pool): AdaptiveAvgPool2d(output_size=1)\n",
            "            (fc): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=16, bias=False)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Linear(in_features=16, out_features=256, bias=False)\n",
            "              (3): Sigmoid()\n",
            "            )\n",
            "          )\n",
            "          (7): Conv2d(256, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (8): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "    (1): MBConv(\n",
            "      (conv): PreNorm(\n",
            "        (norm): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        (fn): Sequential(\n",
            "          (0): Conv2d(96, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (2): GELU(approximate='none')\n",
            "          (3): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)\n",
            "          (4): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (5): GELU(approximate='none')\n",
            "          (6): SE(\n",
            "            (avg_pool): AdaptiveAvgPool2d(output_size=1)\n",
            "            (fc): Sequential(\n",
            "              (0): Linear(in_features=384, out_features=24, bias=False)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Linear(in_features=24, out_features=384, bias=False)\n",
            "              (3): Sigmoid()\n",
            "            )\n",
            "          )\n",
            "          (7): Conv2d(384, 96, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (8): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "  )\n",
            "  (s2): Sequential(\n",
            "    (0): MBConv(\n",
            "      (pool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
            "      (proj): Conv2d(96, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "      (conv): PreNorm(\n",
            "        (norm): BatchNorm2d(96, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        (fn): Sequential(\n",
            "          (0): Conv2d(96, 384, kernel_size=(1, 1), stride=(2, 2), bias=False)\n",
            "          (1): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (2): GELU(approximate='none')\n",
            "          (3): Conv2d(384, 384, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=384, bias=False)\n",
            "          (4): BatchNorm2d(384, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (5): GELU(approximate='none')\n",
            "          (6): SE(\n",
            "            (avg_pool): AdaptiveAvgPool2d(output_size=1)\n",
            "            (fc): Sequential(\n",
            "              (0): Linear(in_features=384, out_features=24, bias=False)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Linear(in_features=24, out_features=384, bias=False)\n",
            "              (3): Sigmoid()\n",
            "            )\n",
            "          )\n",
            "          (7): Conv2d(384, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (8): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "    (1): MBConv(\n",
            "      (conv): PreNorm(\n",
            "        (norm): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        (fn): Sequential(\n",
            "          (0): Conv2d(192, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (1): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (2): GELU(approximate='none')\n",
            "          (3): Conv2d(768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)\n",
            "          (4): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (5): GELU(approximate='none')\n",
            "          (6): SE(\n",
            "            (avg_pool): AdaptiveAvgPool2d(output_size=1)\n",
            "            (fc): Sequential(\n",
            "              (0): Linear(in_features=768, out_features=48, bias=False)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Linear(in_features=48, out_features=768, bias=False)\n",
            "              (3): Sigmoid()\n",
            "            )\n",
            "          )\n",
            "          (7): Conv2d(768, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (8): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "    (2): MBConv(\n",
            "      (conv): PreNorm(\n",
            "        (norm): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        (fn): Sequential(\n",
            "          (0): Conv2d(192, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (1): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (2): GELU(approximate='none')\n",
            "          (3): Conv2d(768, 768, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), groups=768, bias=False)\n",
            "          (4): BatchNorm2d(768, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "          (5): GELU(approximate='none')\n",
            "          (6): SE(\n",
            "            (avg_pool): AdaptiveAvgPool2d(output_size=1)\n",
            "            (fc): Sequential(\n",
            "              (0): Linear(in_features=768, out_features=48, bias=False)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Linear(in_features=48, out_features=768, bias=False)\n",
            "              (3): Sigmoid()\n",
            "            )\n",
            "          )\n",
            "          (7): Conv2d(768, 192, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "          (8): BatchNorm2d(192, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)\n",
            "        )\n",
            "      )\n",
            "    )\n",
            "  )\n",
            "  (s3): Sequential(\n",
            "    (0): Transformer(\n",
            "      (pool1): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
            "      (pool2): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
            "      (proj): Conv2d(192, 384, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "      (attn): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((192,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): Attention(\n",
            "            (attend): Softmax(dim=-1)\n",
            "            (to_qkv): Linear(in_features=192, out_features=768, bias=False)\n",
            "            (to_out): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=384, bias=True)\n",
            "              (1): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "      (ff): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): FeedForward(\n",
            "            (net): Sequential(\n",
            "              (0): Linear(in_features=384, out_features=768, bias=True)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Dropout(p=0.0, inplace=False)\n",
            "              (3): Linear(in_features=768, out_features=384, bias=True)\n",
            "              (4): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "    )\n",
            "    (1): Transformer(\n",
            "      (attn): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): Attention(\n",
            "            (attend): Softmax(dim=-1)\n",
            "            (to_qkv): Linear(in_features=384, out_features=768, bias=False)\n",
            "            (to_out): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=384, bias=True)\n",
            "              (1): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "      (ff): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): FeedForward(\n",
            "            (net): Sequential(\n",
            "              (0): Linear(in_features=384, out_features=1536, bias=True)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Dropout(p=0.0, inplace=False)\n",
            "              (3): Linear(in_features=1536, out_features=384, bias=True)\n",
            "              (4): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "    )\n",
            "    (2): Transformer(\n",
            "      (attn): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): Attention(\n",
            "            (attend): Softmax(dim=-1)\n",
            "            (to_qkv): Linear(in_features=384, out_features=768, bias=False)\n",
            "            (to_out): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=384, bias=True)\n",
            "              (1): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "      (ff): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): FeedForward(\n",
            "            (net): Sequential(\n",
            "              (0): Linear(in_features=384, out_features=1536, bias=True)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Dropout(p=0.0, inplace=False)\n",
            "              (3): Linear(in_features=1536, out_features=384, bias=True)\n",
            "              (4): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "    )\n",
            "    (3): Transformer(\n",
            "      (attn): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): Attention(\n",
            "            (attend): Softmax(dim=-1)\n",
            "            (to_qkv): Linear(in_features=384, out_features=768, bias=False)\n",
            "            (to_out): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=384, bias=True)\n",
            "              (1): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "      (ff): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): FeedForward(\n",
            "            (net): Sequential(\n",
            "              (0): Linear(in_features=384, out_features=1536, bias=True)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Dropout(p=0.0, inplace=False)\n",
            "              (3): Linear(in_features=1536, out_features=384, bias=True)\n",
            "              (4): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "    )\n",
            "    (4): Transformer(\n",
            "      (attn): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): Attention(\n",
            "            (attend): Softmax(dim=-1)\n",
            "            (to_qkv): Linear(in_features=384, out_features=768, bias=False)\n",
            "            (to_out): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=384, bias=True)\n",
            "              (1): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "      (ff): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): FeedForward(\n",
            "            (net): Sequential(\n",
            "              (0): Linear(in_features=384, out_features=1536, bias=True)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Dropout(p=0.0, inplace=False)\n",
            "              (3): Linear(in_features=1536, out_features=384, bias=True)\n",
            "              (4): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=14, iw=14)\n",
            "      )\n",
            "    )\n",
            "  )\n",
            "  (s4): Sequential(\n",
            "    (0): Transformer(\n",
            "      (pool1): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
            "      (pool2): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)\n",
            "      (proj): Conv2d(384, 768, kernel_size=(1, 1), stride=(1, 1), bias=False)\n",
            "      (attn): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((384,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): Attention(\n",
            "            (attend): Softmax(dim=-1)\n",
            "            (to_qkv): Linear(in_features=384, out_features=768, bias=False)\n",
            "            (to_out): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=768, bias=True)\n",
            "              (1): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=7, iw=7)\n",
            "      )\n",
            "      (ff): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): FeedForward(\n",
            "            (net): Sequential(\n",
            "              (0): Linear(in_features=768, out_features=1536, bias=True)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Dropout(p=0.0, inplace=False)\n",
            "              (3): Linear(in_features=1536, out_features=768, bias=True)\n",
            "              (4): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=7, iw=7)\n",
            "      )\n",
            "    )\n",
            "    (1): Transformer(\n",
            "      (attn): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): Attention(\n",
            "            (attend): Softmax(dim=-1)\n",
            "            (to_qkv): Linear(in_features=768, out_features=768, bias=False)\n",
            "            (to_out): Sequential(\n",
            "              (0): Linear(in_features=256, out_features=768, bias=True)\n",
            "              (1): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=7, iw=7)\n",
            "      )\n",
            "      (ff): Sequential(\n",
            "        (0): Rearrange('b c ih iw -> b (ih iw) c')\n",
            "        (1): PreNorm(\n",
            "          (norm): LayerNorm((768,), eps=1e-05, elementwise_affine=True)\n",
            "          (fn): FeedForward(\n",
            "            (net): Sequential(\n",
            "              (0): Linear(in_features=768, out_features=3072, bias=True)\n",
            "              (1): GELU(approximate='none')\n",
            "              (2): Dropout(p=0.0, inplace=False)\n",
            "              (3): Linear(in_features=3072, out_features=768, bias=True)\n",
            "              (4): Dropout(p=0.0, inplace=False)\n",
            "            )\n",
            "          )\n",
            "        )\n",
            "        (2): Rearrange('b (ih iw) c -> b c ih iw', ih=7, iw=7)\n",
            "      )\n",
            "    )\n",
            "  )\n",
            "  (pool): AvgPool2d(kernel_size=7, stride=1, padding=0)\n",
            "  (fc): Linear(in_features=768, out_features=100, bias=False)\n",
            ")\n"
          ]
        }
      ]
    },
    {
      "cell_type": "code",
      "source": [
        "criterion = nn.CrossEntropyLoss()\n",
        "optimizer = optim.AdamW(model.parameters(), lr=1e-4, weight_decay=1e-4)\n",
        "def train(model, loader, optimizer, criterion):\n",
        "    model.train()\n",
        "    total_loss, correct, total = 0, 0, 0\n",
        "    for images, labels in tqdm(loader):\n",
        "        images, labels = images.to(device), labels.to(device)\n",
        "\n",
        "        outputs = model(images)\n",
        "        loss = criterion(outputs, labels)\n",
        "\n",
        "        optimizer.zero_grad()\n",
        "        loss.backward()\n",
        "        optimizer.step()\n",
        "\n",
        "        total_loss += loss.item()\n",
        "        _, predicted = outputs.max(1)\n",
        "        total += labels.size(0)\n",
        "        correct += predicted.eq(labels).sum().item()\n",
        "\n",
        "    acc = 100. * correct / total\n",
        "    return total_loss / len(loader), acc\n",
        "def evaluate(model, loader, criterion):\n",
        "    model.eval()\n",
        "    total_loss, correct, total = 0, 0, 0\n",
        "    with torch.no_grad():\n",
        "        for images, labels in loader:\n",
        "            images, labels = images.to(device), labels.to(device)\n",
        "\n",
        "            outputs = model(images)\n",
        "            loss = criterion(outputs, labels)\n",
        "\n",
        "            total_loss += loss.item()\n",
        "            _, predicted = outputs.max(1)\n",
        "            total += labels.size(0)\n",
        "            correct += predicted.eq(labels).sum().item()\n",
        "\n",
        "    acc = 100. * correct / total\n",
        "    return total_loss / len(loader), acc\n"
      ],
      "metadata": {
        "id": "NJJ-BFoJR-7X"
      },
      "execution_count": 6,
      "outputs": []
    },
    {
      "cell_type": "code",
      "source": [
        "num_epochs = 3\n",
        "for epoch in range(num_epochs):\n",
        "    train_loss, train_acc = train(model, train_loader, optimizer, criterion)\n",
        "    test_loss, test_acc = evaluate(model, test_loader, criterion)\n",
        "\n",
        "    print(f\"Epoch {epoch+1}:\")\n",
        "    print(f\"  Train Loss: {train_loss:.4f}, Accuracy: {train_acc:.2f}%\")\n",
        "    print(f\"  Test Loss : {test_loss:.4f}, Accuracy: {test_acc:.2f}%\")\n"
      ],
      "metadata": {
        "colab": {
          "base_uri": "https://localhost:8080/"
        },
        "id": "BwiRiASfSQ7v",
        "outputId": "16de1f69-c0c0-42b7-bf77-aa1934ca3706"
      },
      "execution_count": null,
      "outputs": [
        {
          "output_type": "stream",
          "name": "stderr",
          "text": [
            "\r  0%|          | 0/782 [00:00<?, ?it/s]"
          ]
        }
      ]
    }
  ]
}