{
 "cells": [
  {
   "cell_type": "code",
   "execution_count": 2,
   "id": "a4d5d1bc-df65-47ae-8e2a-d1fe6218fb3e",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T13:48:13.440448Z",
     "iopub.status.busy": "2022-06-12T13:48:13.439795Z",
     "iopub.status.idle": "2022-06-12T13:48:16.866181Z",
     "shell.execute_reply": "2022-06-12T13:48:16.865544Z",
     "shell.execute_reply.started": "2022-06-12T13:48:13.440418Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "CPU times: user 1.58 s, sys: 127 ms, total: 1.71 s\n",
      "Wall time: 3.42 s\n"
     ]
    }
   ],
   "source": [
    "%%time\n",
    "import torch\n",
    "import torch.nn as nn\n",
    "from torch.utils.data import Dataset\n",
    "import numpy as np\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "\n",
    "import os\n",
    "\n",
    "import torchvision.transforms as transforms\n",
    "from torchvision.utils import save_image\n",
    "from torch.utils.data import DataLoader\n",
    "import torch.utils.data as Data"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 3,
   "id": "029bc639-dace-4661-875b-ec0605f5f8e2",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T13:48:16.867805Z",
     "iopub.status.busy": "2022-06-12T13:48:16.867368Z",
     "iopub.status.idle": "2022-06-12T13:48:16.873247Z",
     "shell.execute_reply": "2022-06-12T13:48:16.872741Z",
     "shell.execute_reply.started": "2022-06-12T13:48:16.867781Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "GPU:False\n"
     ]
    }
   ],
   "source": [
    "class Parse:\n",
    "    def __init__(self):  #定义超参数\n",
    "        # 下面是必须调整的\n",
    "        self.channels = 1\n",
    "        self.img_size = 28\n",
    "        self.latent_dim = 512\n",
    "        \n",
    "        self.epochs = 200\n",
    "        self.batch_size = 64\n",
    "        self.lr = 0.0002\n",
    "        self.b = (0.5, 0.999)\n",
    "        self.cpu_nums = 8\n",
    "        self.want_cuda = \"cuda:0\"\n",
    "        \n",
    "        # 下面是衍生的\n",
    "        self.img_shape = (self.channels, self.img_size, self.img_size)\n",
    "        self.cuda = torch.cuda.is_available()\n",
    "        self.device = self.want_cuda if self.cuda else 'cpu'\n",
    "        self.big_cuda = True  # 能不能把数据集全部放入GPU\n",
    "        print(f\"GPU:{self.cuda}\")\n",
    "opt = Parse()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 4,
   "id": "9a9197d7-5606-4b26-804b-c4e64a5f1354",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T13:48:16.874255Z",
     "iopub.status.busy": "2022-06-12T13:48:16.874084Z",
     "iopub.status.idle": "2022-06-12T13:48:16.881467Z",
     "shell.execute_reply": "2022-06-12T13:48:16.880929Z",
     "shell.execute_reply.started": "2022-06-12T13:48:16.874237Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class Tools:\n",
    "    # 装一些杂七杂八的工具\n",
    "    def block(in_feat, out_feat, normalize=True):\n",
    "        layers = [nn.Linear(in_feat, out_feat),nn.LeakyReLU(0.2, inplace=True)]\n",
    "        if normalize:\n",
    "            layers.insert(1, nn.BatchNorm1d(out_feat, 0.8))\n",
    "        return layers\n",
    "    \n",
    "    \n",
    "    def sample_image(n_row, batches_done, generator):\n",
    "\n",
    "        z = torch.randn(n_row ** 2, opt.latent_dim, device=opt.device)\n",
    "        labels = torch.tensor([num for _ in range(n_row) for num in range(n_row)], device=opt.device)\n",
    "        gen_imgs = generator(z, labels)\n",
    "        save_image(gen_imgs.data, \"images1/%d.png\" % batches_done, nrow=n_row, normalize=True)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 5,
   "id": "5ddd672f-b799-48a2-a7be-d7bcfc8ab5a9",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T13:48:16.882359Z",
     "iopub.status.busy": "2022-06-12T13:48:16.882210Z",
     "iopub.status.idle": "2022-06-12T13:48:16.888286Z",
     "shell.execute_reply": "2022-06-12T13:48:16.887809Z",
     "shell.execute_reply.started": "2022-06-12T13:48:16.882342Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class MyLinear(nn.Module):\n",
    "    \"\"\"Linear layer with equalized learning rate and custom learning rate multiplier.\"\"\"\n",
    "    def __init__(self, input_size, output_size, gain=2**(0.5), use_wscale=False, lrmul=1, bias=True):\n",
    "        super().__init__()\n",
    "        he_std = gain / input_size**(0.5) # He init\n",
    "        # Equalized learning rate and custom learning rate multiplier.\n",
    "        if use_wscale:\n",
    "            init_std = 1.0 / lrmul\n",
    "            self.w_mul = he_std * lrmul\n",
    "        else:\n",
    "            init_std = he_std / lrmul\n",
    "            self.w_mul = lrmul\n",
    "        self.weight = torch.nn.Parameter(torch.randn(output_size, input_size) * init_std)\n",
    "        if bias:\n",
    "            self.bias = torch.nn.Parameter(torch.zeros(output_size))\n",
    "            self.b_mul = lrmul\n",
    "        else:\n",
    "            self.bias = None\n",
    "\n",
    "    def forward(self, x):\n",
    "        bias = self.bias\n",
    "        if bias is not None:\n",
    "            bias = bias * self.b_mul\n",
    "        return F.linear(x, self.weight * self.w_mul, bias)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 6,
   "id": "865e9ba4-9d03-4d85-b180-ef7935cc8245",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T13:49:11.049414Z",
     "iopub.status.busy": "2022-06-12T13:49:11.048807Z",
     "iopub.status.idle": "2022-06-12T13:49:11.066101Z",
     "shell.execute_reply": "2022-06-12T13:49:11.065430Z",
     "shell.execute_reply.started": "2022-06-12T13:49:11.049377Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class MyConv2d(nn.Module):\n",
    "    \"\"\"Conv layer with equalized learning rate and custom learning rate multiplier.\"\"\"\n",
    "    def __init__(self, input_channels, output_channels, kernel_size, gain=2**(0.5), use_wscale=False, lrmul=1, bias=True,\n",
    "                intermediate=None, upscale=False):\n",
    "        super().__init__()\n",
    "        if upscale:\n",
    "            self.upscale = Upscale2d()\n",
    "        else:\n",
    "            self.upscale = None\n",
    "        he_std = gain / (input_channels * kernel_size ** 2) ** (0.5) # He init\n",
    "        self.kernel_size = kernel_size\n",
    "        if use_wscale:\n",
    "            init_std = 1.0 / lrmul\n",
    "            self.w_mul = he_std * lrmul\n",
    "        else:\n",
    "            init_std = he_std / lrmul\n",
    "            self.w_mul = lrmul\n",
    "        self.weight = torch.nn.Parameter(torch.randn(output_channels, input_channels, kernel_size, kernel_size) * init_std)\n",
    "        if bias:\n",
    "            self.bias = torch.nn.Parameter(torch.zeros(output_channels))\n",
    "            self.b_mul = lrmul\n",
    "        else:\n",
    "            self.bias = None\n",
    "        self.intermediate = intermediate\n",
    "\n",
    "    def forward(self, x):\n",
    "        bias = self.bias\n",
    "        if bias is not None:\n",
    "            bias = bias * self.b_mul\n",
    "        \n",
    "        have_convolution = False\n",
    "        if self.upscale is not None and min(x.shape[2:]) * 2 >= 128:\n",
    "            # this is the fused upscale + conv from StyleGAN, sadly this seems incompatible with the non-fused way\n",
    "            # this really needs to be cleaned up and go into the conv...\n",
    "            w = self.weight * self.w_mul\n",
    "            w = w.permute(1, 0, 2, 3)\n",
    "            # probably applying a conv on w would be more efficient. also this quadruples the weight (average)?!\n",
    "            w = F.pad(w, (1,1,1,1))\n",
    "            w = w[:, :, 1:, 1:]+ w[:, :, :-1, 1:] + w[:, :, 1:, :-1] + w[:, :, :-1, :-1]\n",
    "            x = F.conv_transpose2d(x, w, stride=2, padding=(w.size(-1)-1)//2)\n",
    "            have_convolution = True\n",
    "        elif self.upscale is not None:\n",
    "            x = self.upscale(x)\n",
    "    \n",
    "        if not have_convolution and self.intermediate is None:\n",
    "            return F.conv2d(x, self.weight * self.w_mul, bias, padding=self.kernel_size//2)\n",
    "        elif not have_convolution:\n",
    "            x = F.conv2d(x, self.weight * self.w_mul, None, padding=self.kernel_size//2)\n",
    "        \n",
    "        if self.intermediate is not None:\n",
    "            x = self.intermediate(x)\n",
    "        if bias is not None:\n",
    "            x = x + bias.view(1, -1, 1, 1)\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a3d922ef-7201-4e3a-9bbf-9b27ef077228",
   "metadata": {
    "tags": []
   },
   "outputs": [],
   "source": [
    "class NoiseLayer(nn.Module):\n",
    "    \"\"\"adds noise. noise is per pixel (constant over channels) with per-channel weight\"\"\"\n",
    "    def __init__(self, channels):\n",
    "        super().__init__()\n",
    "        self.weight = nn.Parameter(torch.zeros(channels))\n",
    "        self.noise = None\n",
    "    \n",
    "    def forward(self, x, noise=None):\n",
    "        if noise is None and self.noise is None:\n",
    "            noise = torch.randn(x.size(0), 1, x.size(2), x.size(3), device=x.device, dtype=x.dtype)\n",
    "        elif noise is None:\n",
    "            # here is a little trick: if you get all the noiselayers and set each\n",
    "            # modules .noise attribute, you can have pre-defined noise.\n",
    "            # Very useful for analysis\n",
    "            noise = self.noise\n",
    "        x = x + self.weight.view(1, -1, 1, 1) * noise\n",
    "        return x  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "ba13d176-9d9c-4f15-a25d-9cf7f15abfed",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T13:55:18.508300Z",
     "iopub.status.busy": "2022-06-12T13:55:18.508022Z",
     "iopub.status.idle": "2022-06-12T13:55:18.516267Z",
     "shell.execute_reply": "2022-06-12T13:55:18.515349Z",
     "shell.execute_reply.started": "2022-06-12T13:55:18.508269Z"
    }
   },
   "outputs": [],
   "source": [
    "class StyleMod(nn.Module):\n",
    "    def __init__(self, latent_size, channels, use_wscale):\n",
    "        super(StyleMod, self).__init__()\n",
    "        self.lin = MyLinear(latent_size,\n",
    "                            channels * 2,\n",
    "                            gain=1.0, use_wscale=use_wscale)\n",
    "        \n",
    "    def forward(self, x, latent):\n",
    "        style = self.lin(latent) # style => [batch_size, n_channels*2]\n",
    "        # shape = [-1, 2, x.size(1)] + (x.dim() - 2) * [1]\n",
    "        shape = [-1, 2, x.size(1), 1, 1] # 因为图像是64x3x128x128，对同一个通道相乘就应该是x64x1x1x1\n",
    "        style = style.view(shape)  # [batch_size, 2, n_channels, ...]\n",
    "        x = x * (style[:, 0] + 1.) + style[:, 1]\n",
    "        return x"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "c8d46030-6bfa-40f4-a98e-9208c741d7b9",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T14:02:38.404971Z",
     "iopub.status.busy": "2022-06-12T14:02:38.403900Z",
     "iopub.status.idle": "2022-06-12T14:02:38.409476Z",
     "shell.execute_reply": "2022-06-12T14:02:38.408902Z",
     "shell.execute_reply.started": "2022-06-12T14:02:38.404945Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class PixelNormLayer(nn.Module):\n",
    "    def __init__(self, epsilon=1e-8):\n",
    "        super().__init__()\n",
    "        self.epsilon = epsilon\n",
    "    def forward(self, x):\n",
    "        return x * torch.rsqrt(torch.mean(x**2, dim=1, keepdim=True) + self.epsilon)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 9,
   "id": "9724604b-1ac2-4ceb-a049-bb826b26aec3",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T14:03:48.421311Z",
     "iopub.status.busy": "2022-06-12T14:03:48.421078Z",
     "iopub.status.idle": "2022-06-12T14:03:48.430980Z",
     "shell.execute_reply": "2022-06-12T14:03:48.430452Z",
     "shell.execute_reply.started": "2022-06-12T14:03:48.421289Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class BlurLayer(nn.Module):\n",
    "    def __init__(self, kernel=[1, 2, 1], normalize=True, flip=False, stride=1):\n",
    "        super(BlurLayer, self).__init__()\n",
    "        kernel = [1, 2, 1]\n",
    "        kernel = torch.tensor(kernel, dtype=torch.float32)\n",
    "        kernel = kernel[:, None] * kernel[None, :]\n",
    "        kernel = kernel[None, None]\n",
    "        if normalize:\n",
    "            kernel = kernel / kernel.sum()\n",
    "        if flip:\n",
    "            kernel = kernel[:, :, ::-1, ::-1]\n",
    "        self.register_buffer('kernel', kernel)\n",
    "        self.stride = stride\n",
    "    \n",
    "    def forward(self, x):\n",
    "        # expand kernel channels\n",
    "        kernel = self.kernel.expand(x.size(1), -1, -1, -1)\n",
    "        x = F.conv2d(\n",
    "            x,\n",
    "            kernel,\n",
    "            stride=self.stride,\n",
    "            padding=int((self.kernel.size(2)-1)/2),\n",
    "            groups=x.size(1)\n",
    "        )\n",
    "        return x\n",
    "\n",
    "def upscale2d(x, factor=2, gain=1):\n",
    "    x *= gain\n",
    "    if factor != 1:\n",
    "        shape = x.shape\n",
    "        x = x.view(shape[0], shape[1], shape[2], 1, shape[3], 1).expand(-1, -1, -1, factor, -1, factor)\n",
    "        x = x.contiguous().view(shape[0], shape[1], factor * shape[2], factor * shape[3])\n",
    "    return x\n",
    "\n",
    "class Upscale2d(nn.Module):\n",
    "    def __init__(self, factor=2, gain=1):\n",
    "        super().__init__()\n",
    "        assert isinstance(factor, int) and factor >= 1\n",
    "        self.gain = gain\n",
    "        self.factor = factor\n",
    "    def forward(self, x):\n",
    "        return upscale2d(x, factor=self.factor, gain=self.gain)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 10,
   "id": "d3f43013-2776-4506-8ada-ea7b2674dc64",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-06-12T14:04:22.737863Z",
     "iopub.status.busy": "2022-06-12T14:04:22.737140Z",
     "iopub.status.idle": "2022-06-12T14:04:22.748670Z",
     "shell.execute_reply": "2022-06-12T14:04:22.748004Z",
     "shell.execute_reply.started": "2022-06-12T14:04:22.737827Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "class G_mapping(nn.Sequential):\n",
    "    def __init__(self, nonlinearity='lrelu', use_wscale=True):\n",
    "        act, gain = {'relu': (torch.relu, np.sqrt(2)),\n",
    "                     'lrelu': (nn.LeakyReLU(negative_slope=0.2), np.sqrt(2))}[nonlinearity]\n",
    "        layers = [\n",
    "            ('pixel_norm', PixelNormLayer()),\n",
    "            ('dense0', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense0_act', act),\n",
    "            ('dense1', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense1_act', act),\n",
    "            ('dense2', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense2_act', act),\n",
    "            ('dense3', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense3_act', act),\n",
    "            ('dense4', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense4_act', act),\n",
    "            ('dense5', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense5_act', act),\n",
    "            ('dense6', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense6_act', act),\n",
    "            ('dense7', MyLinear(512, 512, gain=gain, lrmul=0.01, use_wscale=use_wscale)),\n",
    "            ('dense7_act', act)\n",
    "        ]\n",
    "        super().__init__(OrderedDict(layers))\n",
    "        \n",
    "    def forward(self, x):\n",
    "        x = super().forward(x)\n",
    "        # Broadcast\n",
    "        x = x.unsqueeze(1).expand(-1, 18, -1)\n",
    "        #print(x.shape) 10x18x512\n",
    "        return x\n",
    "\n",
    "class Truncation(nn.Module):\n",
    "    def __init__(self, avg_latent, max_layer=8, threshold=0.7):\n",
    "        super().__init__()\n",
    "        self.max_layer = max_layer\n",
    "        self.threshold = threshold\n",
    "        self.register_buffer('avg_latent', avg_latent)\n",
    "    def forward(self, x):\n",
    "        assert x.dim() == 3\n",
    "        interp = torch.lerp(self.avg_latent, x, self.threshold)\n",
    "        do_trunc = (torch.arange(x.size(1)) < self.max_layer).view(1, -1, 1)\n",
    "        return torch.where(do_trunc, interp, x)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 7,
   "id": "550e8797-867c-4db5-8628-9ca63d19d811",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-05-31T02:33:00.985914Z",
     "iopub.status.busy": "2022-05-31T02:33:00.985629Z",
     "iopub.status.idle": "2022-05-31T02:33:04.125280Z",
     "shell.execute_reply": "2022-05-31T02:33:04.124630Z",
     "shell.execute_reply.started": "2022-05-31T02:33:00.985889Z"
    },
    "tags": []
   },
   "outputs": [],
   "source": [
    "dev = Dev()\n",
    "dev.load_data()\n",
    "# dev.to_cuda()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": 8,
   "id": "c6a63a06-bc71-4791-99b6-d888511378f3",
   "metadata": {
    "execution": {
     "iopub.execute_input": "2022-05-31T02:33:09.023937Z",
     "iopub.status.busy": "2022-05-31T02:33:09.023375Z",
     "iopub.status.idle": "2022-05-31T02:36:54.746894Z",
     "shell.execute_reply": "2022-05-31T02:36:54.746170Z",
     "shell.execute_reply.started": "2022-05-31T02:33:09.023908Z"
    },
    "tags": []
   },
   "outputs": [
    {
     "name": "stdout",
     "output_type": "stream",
     "text": [
      "epoch 1 0.5961221016179292 0.13719373324121803 耗时11.170854091644287s\n",
      "epoch 2 0.5359262083935686 0.14323080359743193 耗时11.237585306167603s\n",
      "epoch 3 0.6133088479694139 0.11821600167542441 耗时11.079909324645996s\n",
      "epoch 4 0.6480698742322687 0.10453037231460087 耗时11.330132722854614s\n",
      "epoch 5 0.6597657604381825 0.10610322939216071 耗时11.376296520233154s\n",
      "epoch 6 0.6517164810982525 0.1076386373840336 耗时11.434344053268433s\n",
      "epoch 7 0.6378891641258174 0.11128970802339733 耗时11.264440059661865s\n",
      "epoch 8 0.645541713692439 0.1123869847983886 耗时11.376632452011108s\n",
      "epoch 9 0.6614856984880235 0.10835004317709523 耗时11.162298917770386s\n",
      "epoch 10 0.6784296927289066 0.09997127294285685 耗时11.222132921218872s\n",
      "epoch 11 0.6709121485423838 0.10483129679137825 耗时11.012072563171387s\n",
      "epoch 12 0.6897916725048652 0.1008377105761797 耗时11.169281959533691s\n",
      "epoch 13 0.652095741727668 0.1113354414383061 耗时11.425838708877563s\n",
      "epoch 14 0.6558075335481738 0.11250857356139737 耗时11.496849775314331s\n",
      "epoch 15 0.6438840158984193 0.11445540756496608 耗时11.37925934791565s\n",
      "epoch 16 0.637530365656329 0.12008023704601149 耗时11.386048078536987s\n",
      "epoch 17 0.6167932097983156 0.1231515149339142 耗时11.09522032737732s\n",
      "epoch 18 0.6146744389691924 0.1253534499078225 耗时11.316214323043823s\n",
      "epoch 19 0.6302509679116755 0.12019476775302847 耗时11.326010704040527s\n",
      "epoch 20 0.6087428892397473 0.12931499521956485 耗时11.431272983551025s\n"
     ]
    }
   ],
   "source": [
    "import time\n",
    "time0 = time.time()\n",
    "for epoch in range(20):\n",
    "    print(f'epoch {epoch+1}', end=' ')\n",
    "    dev.train()\n",
    "    time0, time_spend = time.time(), time.time()-time0\n",
    "    print(f'耗时{time_spend}s')\n",
    "    if not epoch % 1:\n",
    "        Tools.sample_image(10, epoch+1, dev.g)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "ef07734d-ef95-45d9-b84b-ea6bd0d6e9ba",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.12"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
