{
  "cells": [
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "sKGTHDaOReQy"
      },
      "outputs": [],
      "source": [
        "!pip install datasets\n",
        "!pip install matplotlib\n",
        "!pip install torchvision"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "4vRnpFLY-N-0"
      },
      "outputs": [],
      "source": [
        "import torch\n",
        "import torch.nn as nn\n",
        "import torch.nn.functional as F\n",
        "import torch.optim as opt\n",
        "import matplotlib.pyplot as plt\n",
        "import numpy as np\n",
        "from tqdm import tqdm\n",
        "from datasets import load_dataset\n",
        "from torch.utils.data import DataLoader, TensorDataset, Dataset, Subset\n",
        "import torchvision\n",
        "from torchvision import transforms, datasets\n",
        "import copy\n",
        "import math\n",
        "import random\n",
        "from torch.cuda import amp\n",
        "torch.manual_seed(0)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "qCJ4HVwSPoJF"
      },
      "outputs": [],
      "source": [
        "# Hyperparameters\n",
        "beta_start = 1e-4\n",
        "beta_end = 0.02\n",
        "steps = 1000\n",
        "device = \"cuda\" if torch.cuda.is_available() else \"cpu\"\n",
        "image_size = 64\n",
        "image_channel = 3\n",
        "epochs = 300\n",
        "lr = 3e-4\n",
        "weight_decay = 0\n",
        "batch_size = 16\n",
        "num_class = 10\n",
        "pos_dim = 1024"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "1WrR7xeEYnT8"
      },
      "outputs": [],
      "source": [
        "# This is the utils file\n",
        "def zero_out(layer):\n",
        "    for p in layer.parameters():\n",
        "        p.detach().zero_()\n",
        "    return layer\n",
        "\n",
        "def positional_embedding_creator(num_step: int, pos_dim: int):\n",
        "    matrix = torch.zeros(num_step, pos_dim)\n",
        "    for i in range(num_step):\n",
        "        for j in range(0, pos_dim, 2):\n",
        "            matrix[i, j] = np.sin(i/(10000**(j/pos_dim)))\n",
        "            if(j+1<pos_dim):\n",
        "                matrix[i, j+1] = np.cos(i/(10000**(j/pos_dim)))\n",
        "\n",
        "    return matrix"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "wSnd1_xIQZ0X"
      },
      "outputs": [],
      "source": [
        "# Diffusion model\n",
        "\n",
        "# AdaGN according to paper \"Diffusion Models Beat GANs on Image Synthesis\"\n",
        "class AdaNorm(nn.Module):\n",
        "    def __init__(self, num_channel: int):\n",
        "        super().__init__()\n",
        "        num_group = int(num_channel/16) # According to group norm paper, 16 channels per group produces the best result\n",
        "        self.gnorm = nn.GroupNorm(num_group, num_channel, affine=False)\n",
        "\n",
        "    def forward(self, tensor: torch.Tensor, emb: torch.Tensor):\n",
        "        scale, shift = torch.chunk(emb, 2, dim=1)\n",
        "\n",
        "        tensor = self.gnorm(tensor)\n",
        "        tensor = tensor * (1 + scale) + shift\n",
        "        return tensor\n",
        "\n",
        "\n",
        "class MyGroupNorm(nn.Module):\n",
        "    def __init__(self, num_channel: int):\n",
        "        super().__init__()\n",
        "        num_group = int(num_channel/16) # According to group norm paper, 16 channels per group produces the best result\n",
        "        self.gnorm = nn.GroupNorm(num_group, num_channel, affine=False)\n",
        "\n",
        "    def forward(self, tensor: torch.Tensor):\n",
        "        return self.gnorm(tensor)\n",
        "\n",
        "\n",
        "class ResBlock(nn.Module):\n",
        "    def __init__(self, in_channel: int, out_channel: int, emb_dim: int = 1024, up: bool = False, down: bool = False):\n",
        "        super().__init__()\n",
        "        self.emb = nn.Sequential(nn.SiLU(), nn.Linear(emb_dim, 2*out_channel))\n",
        "        if up:\n",
        "            self.change_size = nn.Upsample(scale_factor=2, mode='nearest')\n",
        "        elif down:\n",
        "            self.change_size = nn.AvgPool2d(kernel_size=2, stride=2)\n",
        "        else:\n",
        "            self.change_size = nn.Identity()\n",
        "\n",
        "        # Normalization\n",
        "        self.gnorm1 = MyGroupNorm(in_channel)\n",
        "        self.gnorm2 = AdaNorm(out_channel)\n",
        "\n",
        "        # Convolution\n",
        "        self.conv1 = nn.Conv2d(in_channel, out_channel, kernel_size = 3, padding = 1)\n",
        "        self.conv2 = nn.Conv2d(out_channel, out_channel, kernel_size = 3, padding = 1)\n",
        "        if in_channel != out_channel:\n",
        "            self.conv3 = nn.Conv2d(in_channel, out_channel, kernel_size = 1)\n",
        "        else:\n",
        "            self.conv3 = nn.Identity()\n",
        "\n",
        "        # Combine input stage\n",
        "        self.input = nn.Sequential(\n",
        "            self.gnorm1,\n",
        "            nn.SiLU(),\n",
        "            self.change_size,\n",
        "            self.conv1\n",
        "        )\n",
        "\n",
        "        # Combine output stage\n",
        "        self.output = nn.Sequential(\n",
        "            nn.SiLU(),\n",
        "            zero_out(self.conv2)\n",
        "        )\n",
        "\n",
        "        # Skip connection\n",
        "        self.skip_connection = nn.Sequential(\n",
        "            self.change_size,\n",
        "            self.conv3\n",
        "        )\n",
        "\n",
        "        # Embedding\n",
        "        self.embed = nn.Sequential(\n",
        "            nn.Linear(emb_dim, emb_dim),\n",
        "            nn.SiLU(),\n",
        "            nn.Linear(emb_dim, 2 * out_channel)\n",
        "        )\n",
        "\n",
        "    def forward(self, tensor: torch.Tensor, emb: torch.Tensor):\n",
        "        emb = self.embed(emb).view(tensor.shape[0], -1, 1, 1)\n",
        "\n",
        "        h = self.input(tensor)\n",
        "        h = self.gnorm2(h, emb)\n",
        "        h = self.output(h)\n",
        "        x = self.skip_connection(tensor)\n",
        "\n",
        "        return x + h\n",
        "\n",
        "\n",
        "class SelfAttention(nn.Module):\n",
        "    def __init__(self, channel: int):\n",
        "        super().__init__()\n",
        "        self.num_head = int(channel/32)\n",
        "\n",
        "    def forward(self, tensor: torch.Tensor):\n",
        "        batch, channel, length = tensor.shape\n",
        "        ch = channel // 3 // self.num_head\n",
        "        q, k, v = tensor.chunk(3, dim = 1)\n",
        "        scale = 1 / math.sqrt(math.sqrt(ch))\n",
        "        # The code below is from Diffusion Model Beat GANs on Image Synthesis paper code\n",
        "        weight = torch.einsum(\n",
        "            \"bct,bcs->bts\",\n",
        "            (q * scale).view(batch * self.num_head, ch, length),\n",
        "            (k * scale).view(batch * self.num_head, ch, length),\n",
        "        )  # More stable with f16 than dividing afterwards\n",
        "        weight = torch.softmax(weight.float(), dim=-1).type(weight.dtype)\n",
        "        a = torch.einsum(\"bts,bcs->bct\", weight, v.reshape(batch * self.num_head, ch, length))\n",
        "        return a.reshape(batch, -1, length)\n",
        "\n",
        "\n",
        "class Attention(nn.Module):\n",
        "    def __init__(self, channel: int):\n",
        "        super().__init__()\n",
        "        self.gnorm = MyGroupNorm(channel)\n",
        "        self.qkv = nn.Conv1d(channel, channel * 3, 1)\n",
        "        self.attention = SelfAttention(channel)\n",
        "        self.output = zero_out(nn.Conv1d(channel, channel, 1))\n",
        "\n",
        "    def forward(self, tensor: torch.Tensor):\n",
        "        # Perform self attention\n",
        "        batch, channel, width, height = tensor.shape\n",
        "        tensor = tensor.reshape(batch, channel, -1)\n",
        "        # Skip connection\n",
        "        tensor_skip = tensor\n",
        "        tensor = self.gnorm(tensor)\n",
        "        tensor = self.qkv(tensor)\n",
        "        tensor = self.attention(tensor)\n",
        "        tensor = self.output(tensor)\n",
        "\n",
        "        # Adding the skip connection tensor back to the current tensor\n",
        "        tensor = tensor + tensor_skip\n",
        "\n",
        "        tensor = tensor.reshape(batch, channel, width, height)\n",
        "        return tensor\n",
        "\n",
        "\n",
        "class UNet(nn.Module):\n",
        "    def __init__(self, image_channel: int = 3, depth: int = 2, emb_dim: int = 1024, num_step = 1000, num_classes = 10):\n",
        "        super().__init__()\n",
        "\n",
        "        # Create model architecture\n",
        "        channels = [160, 320, 640, 1280]\n",
        "        attention_channel = [320, 640, 1280]\n",
        "        self.encoder = nn.ModuleList([nn.ModuleList([nn.Conv2d(image_channel, channels[0], 3, padding=1)])])\n",
        "        self.decoder = nn.ModuleList()\n",
        "\n",
        "        skip_channel = [channels[0]]\n",
        "\n",
        "        # Encoder\n",
        "        for i in range(len(channels)):\n",
        "            for _ in range(depth):\n",
        "                layer = nn.ModuleList()\n",
        "                layer.append(ResBlock(channels[i], channels[i], emb_dim = emb_dim))\n",
        "                if channels[i] in attention_channel:\n",
        "                    layer.append(Attention(channels[i]))\n",
        "                self.encoder.append(layer)\n",
        "                skip_channel.append(channels[i])\n",
        "\n",
        "            if i != len(channels)-1:\n",
        "                layer = nn.ModuleList()\n",
        "                layer.append(ResBlock(channels[i], channels[i + 1], down=True, emb_dim = emb_dim))\n",
        "                self.encoder.append(layer)\n",
        "                skip_channel.append(channels[i+1])\n",
        "\n",
        "        # Bottleneck\n",
        "        self.bottle_neck = nn.ModuleList([\n",
        "            ResBlock(channels[-1], channels[-1]),\n",
        "            Attention(channels[-1]),\n",
        "            ResBlock(channels[-1], channels[-1]),\n",
        "        ])\n",
        "\n",
        "        # Decoder\n",
        "        for i in range(len(channels)-1, -1, -1):\n",
        "            for block in range(depth+1):\n",
        "                layer = nn.ModuleList()\n",
        "                layer.append(ResBlock(channels[i] + skip_channel.pop(), channels[i], emb_dim = emb_dim))\n",
        "                if channels[i] in attention_channel:\n",
        "                    layer.append(Attention(channels[i]))\n",
        "\n",
        "                if i != 0 and block == depth:\n",
        "                    layer.append(ResBlock(channels[i], channels[i - 1], up=True, emb_dim = emb_dim))\n",
        "\n",
        "                self.decoder.append(layer)\n",
        "\n",
        "        # Create time embedding\n",
        "        self.time_embedding = nn.Sequential(\n",
        "            nn.Linear(emb_dim, emb_dim),\n",
        "            nn.SiLU(),\n",
        "            nn.Linear(emb_dim, emb_dim)\n",
        "        )\n",
        "\n",
        "        # Create class embedding\n",
        "        self.class_embedding = nn.Embedding(num_classes, emb_dim)\n",
        "\n",
        "        # Output kernels to change back to image channel\n",
        "        self.out = nn.Sequential(\n",
        "            MyGroupNorm(channels[0]),\n",
        "            nn.SiLU(),\n",
        "            zero_out(nn.Conv2d(channels[0], image_channel, 3, padding=1)),\n",
        "        )\n",
        "\n",
        "    def forward(self, tensor: torch.Tensor, time_embedding: torch.Tensor, label: torch.Tensor | None):\n",
        "        # Creating embedding\n",
        "        embedding = self.time_embedding(time_embedding)\n",
        "        if label != None:\n",
        "            class_embedding = self.class_embedding(label)\n",
        "            embedding = embedding + class_embedding\n",
        "\n",
        "        skip_connection = []\n",
        "\n",
        "        # Encoder\n",
        "        for layer in self.encoder:\n",
        "            for module in layer:\n",
        "                if(isinstance(module, ResBlock)):\n",
        "                    tensor = module(tensor, embedding)\n",
        "                else:\n",
        "                    tensor = module(tensor)\n",
        "\n",
        "            skip_connection.append(tensor)\n",
        "\n",
        "        # Bottleneck\n",
        "        for module in self.bottle_neck:\n",
        "            if(isinstance(module, ResBlock)):\n",
        "                tensor = module(tensor, embedding)\n",
        "            else:\n",
        "                tensor = module(tensor)\n",
        "\n",
        "        # Decoder\n",
        "        for layer in self.decoder:\n",
        "            tensor = torch.concatenate((tensor, skip_connection.pop()), dim = 1)\n",
        "            for module in layer:\n",
        "                if(isinstance(module, ResBlock)):\n",
        "                    tensor = module(tensor, embedding)\n",
        "                else:\n",
        "                    tensor = module(tensor)\n",
        "\n",
        "        tensor = self.out(tensor)\n",
        "\n",
        "        return tensor\n",
        "\n",
        "class EMA:\n",
        "    def __init__(self, beta):\n",
        "        super().__init__()\n",
        "        self.beta = beta\n",
        "        self.step = 0\n",
        "\n",
        "    def update_model_average(self, ma_model, current_model):\n",
        "        for current_params, ma_params in zip(current_model.parameters(), ma_model.parameters()):\n",
        "            old_weight, up_weight = ma_params.data, current_params.data\n",
        "            ma_params.data = self.update_average(old_weight, up_weight)\n",
        "\n",
        "    def update_average(self, old, new):\n",
        "        if old is None:\n",
        "            return new\n",
        "        return old * self.beta + (1 - self.beta) * new\n",
        "\n",
        "    def step_ema(self, ema_model, model, step_start_ema=2000):\n",
        "        if self.step < step_start_ema:\n",
        "            self.reset_parameters(ema_model, model)\n",
        "            self.step += 1\n",
        "            return\n",
        "        self.update_model_average(ema_model, model)\n",
        "        self.step += 1\n",
        "\n",
        "    def reset_parameters(self, ema_model, model):\n",
        "        ema_model.load_state_dict(model.state_dict())"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "JQkulHC6dXOC"
      },
      "outputs": [],
      "source": [
        "# Constants used for diffusion model\n",
        "beta = torch.linspace(beta_start, beta_end, steps).to(device)\n",
        "sqrt_beta = torch.sqrt(beta).view(-1, 1, 1, 1)\n",
        "alpha = 1 - beta\n",
        "alphas_cumprod = torch.cumprod(alpha, axis=0)\n",
        "sqrt_alphas_cumprod = torch.sqrt(alphas_cumprod).view(-1, 1, 1, 1)\n",
        "one_minus_alphas_cumprod = 1 - alphas_cumprod\n",
        "sqrt_one_minus_alphas_cumprod = torch.sqrt(one_minus_alphas_cumprod).view(-1, 1, 1, 1)\n",
        "one_over_sqrt_alpha = 1/torch.sqrt(alpha).view(-1, 1, 1, 1)\n",
        "one_minus_alpha = (1 - alpha).view(-1, 1, 1, 1)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "FxSXiWEtO0CK"
      },
      "outputs": [],
      "source": [
        "# Forward pass\n",
        "def forward_pass(images, t):\n",
        "    batch_sqrt_alphas_cumprod = sqrt_alphas_cumprod[t]\n",
        "    batch_sqrt_one_minus_alphas_cumprod = sqrt_one_minus_alphas_cumprod[t]\n",
        "    noise = torch.randn_like(images).to(device)\n",
        "\n",
        "    return batch_sqrt_alphas_cumprod * images + batch_sqrt_one_minus_alphas_cumprod * noise, noise"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "FR1H2Rw5QZ0a"
      },
      "outputs": [],
      "source": [
        "# Positional embedding\n",
        "pos_emb_matrix = positional_embedding_creator(steps, pos_dim)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "vctUMYq7ZLYQ"
      },
      "outputs": [],
      "source": [
        "# Sampling(inference)\n",
        "\n",
        "def sampling(model, labels, cfg_scale: int = 3):\n",
        "    model.eval()\n",
        "    with torch.no_grad():\n",
        "        x = torch.randn(labels.shape[0], image_channel, image_size, image_size).to(device)\n",
        "\n",
        "        for i in range(steps-1, -1, -1):\n",
        "            t = torch.tensor([i]*labels.shape[0])\n",
        "            pos_emb = pos_emb_matrix[t].to(device)\n",
        "\n",
        "            # Classifier free guidance\n",
        "            predicted_noise_no_label = model(x, pos_emb, None)\n",
        "            predicted_noise_with_label = model(x, pos_emb, labels)\n",
        "            predicted_noise = torch.lerp(predicted_noise_no_label, predicted_noise_with_label, cfg_scale)\n",
        "\n",
        "            if(i == 0):\n",
        "                noise = torch.zeros_like(x).to(device)\n",
        "            else:\n",
        "                noise = torch.randn_like(x).to(device)\n",
        "\n",
        "            x = one_over_sqrt_alpha[t] * (x - ((one_minus_alpha[t])/(sqrt_one_minus_alphas_cumprod[t]))*predicted_noise) + sqrt_beta[t] * noise\n",
        "\n",
        "    model.train()\n",
        "\n",
        "    x = (x.clamp(-1, 1) + 1) / 2\n",
        "    x = (x * 255).type(torch.uint8)\n",
        "\n",
        "    for i in range(x.shape[0]):\n",
        "        tensor = x[i].permute(1, 2, 0).to(\"cpu\")\n",
        "        plt.imshow(tensor)\n",
        "        plt.show()"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "OLB05htRAyPG"
      },
      "outputs": [],
      "source": [
        "# Define the transformation\n",
        "transform = transforms.Compose([\n",
        "    transforms.Resize(125),\n",
        "    transforms.RandomResizedCrop(image_size, scale=(0.8, 1.0)),\n",
        "    transforms.RandomHorizontalFlip(),\n",
        "    transforms.ToTensor(),\n",
        "    transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))\n",
        "])\n",
        "\n",
        "# Load the training set\n",
        "trainset = datasets.CIFAR10(root='./data', train=True, download=True, transform=transform)\n",
        "\n",
        "# Load the test set\n",
        "testset = datasets.CIFAR10(root='./data', train=False, download=True, transform=transform)\n",
        "\n",
        "# Create a DataLoader for the combined dataset\n",
        "train_dataloader = torch.utils.data.DataLoader(trainset, batch_size=batch_size, shuffle=True)\n",
        "valid_dataloader = torch.utils.data.DataLoader(testset, batch_size=1, shuffle=True)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "2jZa9DSQaF41"
      },
      "outputs": [],
      "source": [
        "# Instantiate the model\n",
        "unet = UNet().to(device)\n",
        "print(\"This model has\", sum(p.numel() for p in unet.parameters()), \"parameters.\")\n",
        "scaler = amp.GradScaler()\n",
        "loss_train = []\n",
        "loss_valid = []"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "_N7bgENXZYl4"
      },
      "outputs": [],
      "source": [
        "# Set up optimizer and loss\n",
        "optimizer = opt.Adam(unet.parameters(), lr = lr, weight_decay = weight_decay)\n",
        "criterion = nn.MSELoss()\n",
        "scheduler = torch.optim.lr_scheduler.ExponentialLR(optimizer, gamma=0.991)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "nb-yMwkoPoYB",
        "scrolled": true
      },
      "outputs": [],
      "source": [
        "# Training\n",
        "ema = EMA(0.9999) # 0.9999 according to the diffusion model beat GANs paper.\n",
        "ema_model = copy.deepcopy(unet).eval().requires_grad_(False)\n",
        "for epoch in range(epochs):\n",
        "    train_loss_list = []\n",
        "    valid_loss_list = []\n",
        "    for images, label in tqdm(train_dataloader):\n",
        "        # Zero out grad\n",
        "        optimizer.zero_grad()\n",
        "\n",
        "        # Preparing for forward pass\n",
        "        images = images.to(device)\n",
        "        label = label.to(device)\n",
        "        t = torch.randint(1, steps, size = (images.shape[0], ))\n",
        "        pos_emb = pos_emb_matrix[t].to(device)\n",
        "        x_t, noise = forward_pass(images, t)\n",
        "\n",
        "        # Classifier free guidance.\n",
        "        if random.random() < 0.1:\n",
        "            label = None\n",
        "\n",
        "        # Forward pass\n",
        "        with amp.autocast():\n",
        "            predicted_noise = unet(x_t, pos_emb, label)\n",
        "            loss = criterion(predicted_noise, noise)\n",
        "\n",
        "        # Back propagation\n",
        "        scaler.scale(loss).backward()\n",
        "        torch.nn.utils.clip_grad_norm_(unet.parameters(), max_norm=1.0)\n",
        "        scaler.step(optimizer)\n",
        "        scaler.update()\n",
        "\n",
        "        # EMA and loss\n",
        "        ema.step_ema(ema_model, unet)\n",
        "        train_loss_list.append(loss.item())\n",
        "\n",
        "    if(epoch % 10 == 0):\n",
        "        for images, label in tqdm(valid_dataloader):\n",
        "            # Preparing for forward pass\n",
        "            images = images.to(device)\n",
        "            label = label.to(device)\n",
        "            t = torch.randint(1, steps, size = (images.shape[0], ))\n",
        "            pos_emb = pos_emb_matrix[t].to(device)\n",
        "            x_t, noise = forward_pass(images, t)\n",
        "\n",
        "            # Forward pass\n",
        "            with amp.autocast():\n",
        "                predicted_noise = unet(x_t, pos_emb, label)\n",
        "                loss = criterion(predicted_noise, noise)\n",
        "            valid_loss_list.append(loss.item())\n",
        "\n",
        "    # Step the learning rate\n",
        "    scheduler.step()\n",
        "\n",
        "    print(f\"Epoch #{epoch}\")\n",
        "    print(f\"Current learning rate is {optimizer.param_groups[0]['lr']}\")\n",
        "    print(\"Train Loss is:\", sum(train_loss_list)/len(train_loss_list))\n",
        "    loss_train.append(sum(train_loss_list)/len(train_loss_list))\n",
        "    if(epoch % 10 == 0):\n",
        "        print(\"Valid Loss is:\", sum(valid_loss_list)/len(valid_loss_list))\n",
        "        loss_valid.append(sum(valid_loss_list)/len(valid_loss_list))\n",
        "    if(epoch % 10 == 0):\n",
        "        label = torch.tensor([0, 1, 2, 3, 4, 5, 6, 7, 8, 9]).to(device)\n",
        "        sampling(ema_model, label)"
      ]
    },
    {
      "cell_type": "markdown",
      "source": [
        "# Testing"
      ],
      "metadata": {
        "id": "OWq4Vc4YFsSV"
      }
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "wCwr_7gGQZ0d"
      },
      "outputs": [],
      "source": [
        "label = torch.tensor([0, 0, 0, 0, 0]).to(device)\n",
        "sampling(ema_model, label, 4)"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "KmJIkz76Fi7_"
      },
      "outputs": [],
      "source": [
        "torch.save(ema_model, \"diffusion_CIFAR10.pth\")"
      ]
    },
    {
      "cell_type": "code",
      "execution_count": null,
      "metadata": {
        "id": "PS1_M96eFi7_"
      },
      "outputs": [],
      "source": [
        "torch.save(unet, \"diffusion_CIFAR10_student.pth\")"
      ]
    }
  ],
  "metadata": {
    "accelerator": "GPU",
    "colab": {
      "gpuType": "T4",
      "provenance": []
    },
    "kernelspec": {
      "display_name": "Python 3 (ipykernel)",
      "language": "python",
      "name": "python3"
    },
    "language_info": {
      "codemirror_mode": {
        "name": "ipython",
        "version": 3
      },
      "file_extension": ".py",
      "mimetype": "text/x-python",
      "name": "python",
      "nbconvert_exporter": "python",
      "pygments_lexer": "ipython3",
      "version": "3.10.13"
    }
  },
  "nbformat": 4,
  "nbformat_minor": 0
}