{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "from math import log2\n",
    "\n",
    "from functools import partial\n",
    "import torch\n",
    "from torch import nn\n",
    "from torch.nn import functional as F\n",
    "from torch import nn, einsum\n",
    "from einops import rearrange, repeat\n",
    "from kornia.filters import filter2d\n",
    "import math"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "def exists(val):\n",
    "    return val is not None\n",
    "\n",
    "def noise(n, latent_dim, device):\n",
    "    return torch.randn(n, latent_dim).to(device)\n",
    "\n",
    "def noise_list(n, layers, latent_dim, device):\n",
    "    return [(noise(n, latent_dim, device), layers)]\n",
    "\n",
    "def mixed_list(n, layers, latent_dim, device): \n",
    "    tt = int(torch.rand(()).numpy() * layers)\n",
    "    return noise_list(n, tt, latent_dim, device) + noise_list(n, layers - tt, latent_dim, device)\n",
    "\n",
    "def latent_to_w(style_vectorizer, latent_descr):\n",
    "    return [(style_vectorizer(z), num_layers) for z, num_layers in latent_descr]\n",
    "\n",
    "def styles_def_to_tensor(styles_def):\n",
    "    return torch.cat([t[:, None, :].expand(-1, n, -1) for t, n in styles_def], dim=1)\n",
    "\n",
    "def image_noise(n, im_size, device):\n",
    "    return torch.FloatTensor(n, im_size, im_size, 1).uniform_(0., 1.).to(device)\n",
    "\n",
    "def leaky_relu(p=0.2):\n",
    "    return nn.LeakyReLU(p, inplace=True)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Flatten(nn.Module):\n",
    "    def forward(self, x):\n",
    "        return x.reshape(x.shape[0], -1)\n",
    "\n",
    "class ScaledLeakyReLU(nn.Module):\n",
    "    def __init__(self, negative_slope=0.2):\n",
    "        super().__init__()\n",
    "\n",
    "        self.negative_slope = negative_slope\n",
    "\n",
    "    def forward(self, input):\n",
    "        out = F.leaky_relu(input, negative_slope=self.negative_slope)\n",
    "\n",
    "        return out * math.sqrt(2)\n",
    "    \n",
    "class FusedLeakyReLU(nn.Module):\n",
    "    def __init__(self, channel, bias=True, negative_slope=0.2, scale=2 ** 0.5):\n",
    "        super().__init__()\n",
    "\n",
    "        if bias:\n",
    "            self.bias = nn.Parameter(torch.zeros(channel))\n",
    "\n",
    "        else:\n",
    "            self.bias = None\n",
    "\n",
    "        self.negative_slope = negative_slope\n",
    "        self.scale = scale\n",
    "\n",
    "    def forward(self, inputs):\n",
    "        return fused_leaky_relu(inputs, self.bias, self.negative_slope, self.scale)\n",
    "\n",
    "def fused_leaky_relu(inputs, bias=None, negative_slope=0.2, scale=2 ** 0.5):\n",
    "    if bias is not None:\n",
    "        rest_dim = [1] * (inputs.ndim - bias.ndim - 1)\n",
    "        return (\n",
    "            F.leaky_relu(\n",
    "                inputs + bias.view(1, bias.shape[0], *rest_dim), negative_slope=negative_slope\n",
    "            )\n",
    "            * scale\n",
    "        )\n",
    "\n",
    "    else:\n",
    "        return F.leaky_relu(inputs, negative_slope=negative_slope) * scale"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "class Blur(nn.Module):\n",
    "    def __init__(self):\n",
    "        super().__init__()\n",
    "        f = torch.Tensor([1, 2, 1])\n",
    "        self.register_buffer('f', f)\n",
    "    def forward(self, x):\n",
    "        f = self.f\n",
    "        f = f[None, None, :] * f [None, :, None]\n",
    "        return filter2d(x, f, normalized=True)\n",
    "    \n",
    "    \n",
    "class ConstantInput(nn.Module):\n",
    "    def __init__(self, channel, size=4):\n",
    "        super().__init__()\n",
    "\n",
    "        self.input = nn.Parameter(torch.randn(1, channel, size, size))\n",
    "\n",
    "    def forward(self, input):\n",
    "        batch = input.shape[0]\n",
    "        out = self.input.repeat(batch, 1, 1, 1)\n",
    "\n",
    "        return out\n",
    "\n",
    "# stylegan2 classes\n",
    "class EqualLinear(nn.Module):\n",
    "    def __init__(self, in_dim, out_dim, lr_mul = 1, bias = True):\n",
    "        super().__init__()\n",
    "        self.weight = nn.Parameter(torch.randn(out_dim, in_dim))\n",
    "        if bias:\n",
    "            self.bias = nn.Parameter(torch.zeros(out_dim))\n",
    "\n",
    "        self.lr_mul = lr_mul\n",
    "\n",
    "    def forward(self, input):\n",
    "        return F.linear(input, self.weight * self.lr_mul, bias=self.bias * self.lr_mul)\n",
    "\n",
    "\n",
    "class StyleVectorizer(nn.Module):\n",
    "    def __init__(self, emb, depth, lr_mul = 0.1):\n",
    "        super().__init__()\n",
    "\n",
    "        layers = []\n",
    "        for i in range(depth):\n",
    "            layers.extend([EqualLinear(emb, emb, lr_mul), leaky_relu()])\n",
    "\n",
    "        self.net = nn.Sequential(*layers)\n",
    "\n",
    "    def forward(self, x):\n",
    "        x = F.normalize(x, dim=1)\n",
    "        return self.net(x)\n",
    "\n",
    "\n",
    "class MRIBlock(nn.Module):\n",
    "    def __init__(self, latent_dim, input_channel, upsample):\n",
    "        super().__init__()\n",
    "        self.input_channel = input_channel\n",
    "        self.to_style = nn.Linear(latent_dim, input_channel)\n",
    "\n",
    "        self.conv = Conv2DMod(input_channel, 1,  1, demod=False)\n",
    "\n",
    "        self.upsample = nn.Sequential(\n",
    "            nn.Upsample(scale_factor = 2, mode='bilinear', align_corners=False),\n",
    "            Blur()\n",
    "        ) if upsample else None\n",
    "\n",
    "    def forward(self, x, prev_mri, istyle):\n",
    "        style = self.to_style(istyle)\n",
    "        x = self.conv(x, style)\n",
    "\n",
    "        if exists(prev_mri):\n",
    "            x = x + prev_mri\n",
    "\n",
    "        if exists(self.upsample):\n",
    "            x = self.upsample(x)\n",
    "\n",
    "        return x\n",
    "\n",
    "\n",
    "class Conv2DMod(nn.Module):\n",
    "    def __init__(self, in_chan, out_chan, kernel, demod=True, stride=1, dilation=1, eps = 1e-8, **kwargs):\n",
    "        super().__init__()\n",
    "        self.filters = out_chan\n",
    "        self.demod = demod\n",
    "        self.kernel = kernel\n",
    "        self.stride = stride\n",
    "        self.dilation = dilation\n",
    "        self.weight = nn.Parameter(torch.randn((out_chan, in_chan, kernel, kernel)))\n",
    "        self.eps = eps\n",
    "        nn.init.kaiming_normal_(self.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')\n",
    "\n",
    "    def _get_same_padding(self, size, kernel, dilation, stride):\n",
    "        return ((size - 1) * (stride - 1) + dilation * (kernel - 1)) // 2\n",
    "\n",
    "    def forward(self, x, y):\n",
    "        b, c, h, w = x.shape\n",
    "\n",
    "        w1 = y[:, None, :, None, None]\n",
    "        w2 = self.weight[None, :, :, :, :]\n",
    "        weights = w2 * (w1 + 1)\n",
    "\n",
    "        if self.demod:\n",
    "            d = torch.rsqrt((weights ** 2).sum(dim=(2, 3, 4), keepdim=True) + self.eps)\n",
    "            weights = weights * d\n",
    "\n",
    "        x = x.reshape(1, -1, h, w)\n",
    "\n",
    "        _, _, *ws = weights.shape\n",
    "        weights = weights.reshape(b * self.filters, *ws)\n",
    "\n",
    "        padding = self._get_same_padding(h, self.kernel, self.dilation, self.stride)\n",
    "        x = F.conv2d(x, weights, padding=padding, groups=b)\n",
    "\n",
    "        x = x.reshape(-1, self.filters, h, w)\n",
    "        return x\n",
    "\n",
    "\n",
    "class GeneratorBlock(nn.Module):\n",
    "    def __init__(self, latent_dim, input_channels, filters, upsample=True, upsample_mri=True):\n",
    "        super().__init__()\n",
    "        self.upsample = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False) if upsample else None\n",
    "\n",
    "        self.to_style1 = nn.Linear(latent_dim, input_channels)\n",
    "        self.to_noise1 = nn.Linear(1, filters)\n",
    "        self.conv1 = Conv2DMod(input_channels, filters, 3)\n",
    "        \n",
    "        self.to_style2 = nn.Linear(latent_dim, filters)\n",
    "        self.to_noise2 = nn.Linear(1, filters)\n",
    "        self.conv2 = Conv2DMod(filters, filters, 3)\n",
    "\n",
    "        self.activation = leaky_relu()\n",
    "        self.to_mri = MRIBlock(latent_dim, filters, upsample_mri)\n",
    "\n",
    "    def forward(self, x, prev_mri, istyle, inoise):\n",
    "        if exists(self.upsample):\n",
    "            x = self.upsample(x)\n",
    "\n",
    "        inoise = inoise[:, :x.shape[2], :x.shape[3], :]\n",
    "        noise1 = self.to_noise1(inoise).permute((0, 3, 2, 1))\n",
    "        noise2 = self.to_noise2(inoise).permute((0, 3, 2, 1))\n",
    "\n",
    "        style1 = self.to_style1(istyle)\n",
    "        x = self.conv1(x, style1)\n",
    "        x = self.activation(x + noise1)\n",
    "\n",
    "        style2 = self.to_style2(istyle)\n",
    "        x = self.conv2(x, style2)\n",
    "        x = self.activation(x + noise2)\n",
    "\n",
    "        mri = self.to_mri(x, prev_mri, istyle)\n",
    "        return x, mri\n",
    "\n",
    "class Generator(nn.Module):\n",
    "    def __init__(\n",
    "            self,\n",
    "            image_size,\n",
    "            style_dim,\n",
    "            network_capacity=16,\n",
    "            fmap_max = 512,\n",
    "            style_depth=8,\n",
    "            no_const=True,\n",
    "    ):\n",
    "        super().__init__()\n",
    "        self.style_dim = style_dim\n",
    "        self.num_layers = int(log2(image_size) - 1)\n",
    "        self.no_const = no_const\n",
    "        filters = [network_capacity * (2 ** (i + 1)) for i in range(self.num_layers)][::-1]\n",
    "        set_fmap_max = partial(min, fmap_max)\n",
    "        filters = list(map(set_fmap_max, filters))\n",
    "        init_channels = filters[0]\n",
    "        filters = [init_channels, *filters]\n",
    "\n",
    "        in_out_pairs = zip(filters[:-1], filters[1:])\n",
    "\n",
    "        if no_const:\n",
    "            self.to_initial_block = nn.ConvTranspose2d(style_dim, init_channels, 4, 1, 0, bias=False)\n",
    "        else:\n",
    "            self.initial_block = nn.Parameter(torch.randn((1, init_channels, 4, 4)))\n",
    "\n",
    "        self.initial_conv = nn.Conv2d(filters[0], filters[0], 3, padding=1)\n",
    "        \n",
    "        self.conv1 = nn.Conv2d(filters[0], filters[0], 3, padding=1)\n",
    "        self.blocks = nn.ModuleList([])\n",
    "        \n",
    "        for ind, (in_chan, out_chan) in enumerate(in_out_pairs):\n",
    "            not_first = ind != 0\n",
    "            not_last = ind != (self.num_layers - 1)\n",
    "            \n",
    "            block = GeneratorBlock(\n",
    "                style_dim,\n",
    "                in_chan,\n",
    "                out_chan,\n",
    "                upsample = not_first,\n",
    "                upsample_mri = not_last\n",
    "            )\n",
    "            self.blocks.append(block)\n",
    "            \n",
    "        self.S = StyleVectorizer(style_dim, depth=style_depth, lr_mul=0.1)\n",
    "        self.pool = nn.AdaptiveAvgPool2d(1)\n",
    "\n",
    "        \n",
    "    def _init_weights(self):\n",
    "        for m in self.modules():\n",
    "            if type(m) in {nn.Conv2d, nn.Linear}:\n",
    "                nn.init.kaiming_normal_(m.weight, a=0, mode='fan_in', nonlinearity='leaky_relu')\n",
    "\n",
    "        for block in self.blocks:\n",
    "            nn.init.zeros_(block.to_noise1.weight)\n",
    "            nn.init.zeros_(block.to_noise2.weight)\n",
    "            nn.init.zeros_(block.to_noise1.bias)\n",
    "            nn.init.zeros_(block.to_noise2.bias)\n",
    "\n",
    "    def make_image_noise(self, x):\n",
    "        batch_size, _, image_size, _ = x.size()\n",
    "        return image_noise(batch_size, image_size, device=x.device)\n",
    "\n",
    "    def get_w(self, x):   \n",
    "        batch_size, _, _, _ = x.size()   \n",
    "\n",
    "        style = mixed_list(batch_size, self.num_layers, self.style_dim, device=x.device)\n",
    "        \n",
    "        w = latent_to_w(self.S, style)\n",
    "        w = styles_def_to_tensor(w)\n",
    "\n",
    "        return w\n",
    "    \n",
    "    def forward(self, styles, input_noise):\n",
    "        \"\"\"_summary_\n",
    "\n",
    "        Args:\n",
    "            styles (_type_): [b, num_layer, style_dim]\n",
    "            input_noise (_type_): _description_\n",
    "\n",
    "        Returns:\n",
    "            _type_: _description_\n",
    "        \"\"\"\n",
    "        batch_size = styles.size()[0]\n",
    "        \n",
    "        if self.no_const:\n",
    "            avg_style = styles.mean(dim=1)[:, :, None, None]\n",
    "            x = self.to_initial_block(avg_style)\n",
    "        else:\n",
    "            x = self.initial_block.expand(batch_size, -1, -1, -1)\n",
    "        \n",
    "        mri = None\n",
    "        \n",
    "        styles = styles.transpose(0, 1)\n",
    "        x = self.conv1(x)\n",
    "\n",
    "        for style, block in zip(styles, self.blocks):\n",
    "            x, mri = block(x, mri, style, input_noise)\n",
    "\n",
    "        return mri\n",
    "\n",
    "\n",
    "class DiscriminatorBlock(nn.Module):\n",
    "    def __init__(self, input_channels, filters, downsample=True):\n",
    "        super().__init__()\n",
    "        self.conv_res = nn.Conv2d(input_channels, filters, 1, stride = (2 if downsample else 1))\n",
    "\n",
    "        self.net = nn.Sequential(\n",
    "            nn.Conv2d(input_channels, filters, 3, padding=1),\n",
    "            leaky_relu(),\n",
    "            nn.Conv2d(filters, filters, 3, padding=1),\n",
    "            leaky_relu()\n",
    "        )\n",
    "\n",
    "        self.downsample = nn.Sequential(\n",
    "            Blur(),\n",
    "            nn.Conv2d(filters, filters, 3, padding = 1, stride = 2)\n",
    "        ) if downsample else None\n",
    "\n",
    "    def forward(self, x):\n",
    "        res = self.conv_res(x)\n",
    "        x = self.net(x)\n",
    "        if exists(self.downsample):\n",
    "            x = self.downsample(x)\n",
    "        x = (x + res) * (1 / math.sqrt(2))\n",
    "        return x\n",
    "    \n",
    "    \n",
    "class Discriminator(nn.Module):\n",
    "    def __init__(self, \n",
    "                 image_size, \n",
    "                 network_capacity = 16, \n",
    "                 fmap_max = 512,\n",
    "                 cls=1):\n",
    "        super().__init__()\n",
    "        num_layers = int(log2(image_size) - 1)\n",
    "        num_init_filters = cls\n",
    "\n",
    "        blocks = []\n",
    "        filters = [num_init_filters] + [(network_capacity * 4) * (2 ** i) for i in range(num_layers + 1)]\n",
    "\n",
    "        set_fmap_max = partial(min, fmap_max)\n",
    "        filters = list(map(set_fmap_max, filters))\n",
    "        chan_in_out = list(zip(filters[:-1], filters[1:]))\n",
    "\n",
    "        blocks = []\n",
    "        quantize_blocks = []\n",
    "\n",
    "        for ind, (in_chan, out_chan) in enumerate(chan_in_out):\n",
    "            num_layer = ind + 1\n",
    "            is_not_last = ind != (len(chan_in_out) - 1)\n",
    "\n",
    "            block = DiscriminatorBlock(in_chan, out_chan, downsample = is_not_last)\n",
    "            blocks.append(block)\n",
    "\n",
    "        self.blocks = nn.ModuleList(blocks)\n",
    "        self.quantize_blocks = nn.ModuleList(quantize_blocks)\n",
    "\n",
    "        chan_last = filters[-1]\n",
    "        latent_dim = 2 * 2 * chan_last\n",
    "\n",
    "        self.final_conv = nn.Conv2d(chan_last, chan_last, 3, padding=1)\n",
    "        self.flatten = Flatten()\n",
    "        self.to_logit = nn.Linear(latent_dim, 1)\n",
    "\n",
    "    def forward(self, x):\n",
    "        b, *_ = x.shape\n",
    "\n",
    "        for block in self.blocks:\n",
    "            x = block(x)\n",
    "\n",
    "        x = self.final_conv(x)\n",
    "        x = self.flatten(x)\n",
    "        x = self.to_logit(x)\n",
    "        return x.squeeze()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "gen = Generator(256, 512)\n",
    "gen._init_weights()\n",
    "x = torch.randn((4, 3, 256, 256))\n",
    "\n",
    "f = torch.randn((4, 512))\n",
    "f_1 = torch.randn((4, 384, 4, 4))\n",
    "\n",
    "w = gen.get_w(x, None, None, f_1)\n",
    "\n",
    "noise = gen.make_image_noise(x)\n",
    "\n",
    "out = gen(w, noise)\n",
    "\n",
    "print(out.size())\n",
    "\n",
    "\n",
    "dis = Discriminator(256)\n",
    "\n",
    "out = dis(out)\n",
    "\n",
    "print(out)"
   ]
  }
 ],
 "metadata": {
  "language_info": {
   "name": "python"
  },
  "orig_nbformat": 4
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
