{
 "nbformat": 4,
 "nbformat_minor": 0,
 "metadata": {
  "colab": {
   "name": "VQGAN+CLIP",
   "private_outputs": true,
   "provenance": [],
   "collapsed_sections": [],
   "machine_shape": "hm",
   "include_colab_link": true
  },
  "kernelspec": {
   "name": "python3",
   "display_name": "Python 3"
  },
  "language_info": {
   "name": "python"
  },
  "accelerator": "GPU"
 },
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {
    "id": "view-in-github",
    "colab_type": "text",
    "pycharm": {
     "name": "#%% md\n"
    }
   },
   "source": [
    "<a href=\"https://colab.research.google.com/github/NJUSTgzy/pythonProject3/blob/master/VQGAN%2BCLIP.ipynb\" target=\"_parent\"><img src=\"https://colab.research.google.com/assets/colab-badge.svg\" alt=\"Open In Colab\"/></a>"
   ]
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "VA1PHoJrRiK9",
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "source": [
    "!git clone https://github.com/openai/CLIP\n",
    "!git clone https://github.com/alibaba/AliceMind.git\n",
    "!git clone https://github.com/CompVis/taming-transformers.git\n",
    "!pip install ftfy regex tqdm omegaconf pytorch-lightning\n",
    "!pip install kornia\n",
    "!pip install imageio-ffmpeg   \n",
    "!pip install einops          \n",
    "!mkdir steps\n",
    "\n",
    "\n",
    "!wget --load-cookies /tmp/cookies.txt \"https://docs.google.com/uc?export=download&confirm=$(wget --quiet --save-cookies /tmp/cookies.txt --keep-session-cookies --no-check-certificate 'https://docs.google.com/uc?export=download&id=1mSp-4KfBwGKUAdWiW-ctOR9Qgi0a-w9B' -O- | sed -rn 's/.*confirm=([0-9A-Za-z_]+).*/\\1\\n/p')&id=1mSp-4KfBwGKUAdWiW-ctOR9Qgi0a-w9B\" -O palm_model_and_data.tar.gz && rm -rf /tmp/cookies.txt)\n",
    "!tar -zxvf palm_model_and_data.tar.gz"
   ],
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "FhhdWrSxQhwg",
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "source": [
    "#下载数据集\n",
    "!curl -L -o vqgan_imagenet_f16_16384.yaml -C - 'https://heibox.uni-heidelberg.de/d/a7530b09fed84f80a887/files/?p=%2Fconfigs%2Fmodel.yaml&dl=1' #ImageNet 16384\n",
    "!curl -L -o vqgan_imagenet_f16_16384.ckpt -C - 'https://heibox.uni-heidelberg.de/d/a7530b09fed84f80a887/files/?p=%2Fckpts%2Flast.ckpt&dl=1' #ImageNet 16384\n"
   ],
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "EXMSuW2EQWsd",
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "source": [
    "import argparse\n",
    "import math\n",
    "from pathlib import Path\n",
    "import sys\n",
    "\n",
    "\n",
    "sys.path.insert(1, '/content/taming-transformers')\n",
    "sys.path.insert(1, '/content/AliceMind')\n",
    "from IPython import display\n",
    "from base64 import b64encode\n",
    "from omegaconf import OmegaConf\n",
    "from PIL import Image\n",
    "from taming.models import cond_transformer, vqgan\n",
    "import taming.modules \n",
    "#加载模型\n",
    "from AliceMind.PALM.models import encoder\n",
    "import torch\n",
    "from torch import nn, optim\n",
    "from torch.nn import functional as F\n",
    "from torchvision import transforms\n",
    "from torchvision.transforms import functional as TF\n",
    "from tqdm.notebook import tqdm\n",
    "\n",
    "from CLIP import clip\n",
    "import kornia.augmentation as K\n",
    "import numpy as np\n",
    "import imageio\n",
    "from PIL import ImageFile, Image\n",
    "ImageFile.LOAD_TRUNCATED_IMAGES = True\n",
    "\n",
    "def sinc(x):\n",
    "    return torch.where(x != 0, torch.sin(math.pi * x) / (math.pi * x), x.new_ones([]))\n",
    "\n",
    "\n",
    "def lanczos(x, a):\n",
    "    cond = torch.logical_and(-a < x, x < a)\n",
    "    out = torch.where(cond, sinc(x) * sinc(x/a), x.new_zeros([]))\n",
    "    return out / out.sum()\n",
    "\n",
    "\n",
    "def ramp(ratio, width):\n",
    "    n = math.ceil(width / ratio + 1)\n",
    "    out = torch.empty([n])\n",
    "    cur = 0\n",
    "    for i in range(out.shape[0]):\n",
    "        out[i] = cur\n",
    "        cur += ratio\n",
    "    return torch.cat([-out[1:].flip([0]), out])[1:-1]\n",
    "\n",
    "\n",
    "def resample(input, size, align_corners=True):\n",
    "    n, c, h, w = input.shape\n",
    "    dh, dw = size\n",
    "\n",
    "    input = input.view([n * c, 1, h, w])\n",
    "\n",
    "    if dh < h:\n",
    "        kernel_h = lanczos(ramp(dh / h, 2), 2).to(input.device, input.dtype)\n",
    "        pad_h = (kernel_h.shape[0] - 1) // 2\n",
    "        input = F.pad(input, (0, 0, pad_h, pad_h), 'reflect')\n",
    "        input = F.conv2d(input, kernel_h[None, None, :, None])\n",
    "\n",
    "    if dw < w:\n",
    "        kernel_w = lanczos(ramp(dw / w, 2), 2).to(input.device, input.dtype)\n",
    "        pad_w = (kernel_w.shape[0] - 1) // 2\n",
    "        input = F.pad(input, (pad_w, pad_w, 0, 0), 'reflect')\n",
    "        input = F.conv2d(input, kernel_w[None, None, None, :])\n",
    "\n",
    "    input = input.view([n, c, h, w])\n",
    "    return F.interpolate(input, size, mode='bicubic', align_corners=align_corners)\n",
    "\n",
    "\n",
    "class ReplaceGrad(torch.autograd.Function):\n",
    "    @staticmethod\n",
    "    def forward(ctx, x_forward, x_backward):\n",
    "        ctx.shape = x_backward.shape\n",
    "        return x_forward\n",
    "\n",
    "    @staticmethod\n",
    "    def backward(ctx, grad_in):\n",
    "        return None, grad_in.sum_to_size(ctx.shape)\n",
    "\n",
    "\n",
    "replace_grad = ReplaceGrad.apply\n",
    "\n",
    "\n",
    "class ClampWithGrad(torch.autograd.Function):\n",
    "    @staticmethod\n",
    "    def forward(ctx, input, min, max):\n",
    "        ctx.min = min\n",
    "        ctx.max = max\n",
    "        ctx.save_for_backward(input)\n",
    "        return input.clamp(min, max)\n",
    "\n",
    "    @staticmethod\n",
    "    def backward(ctx, grad_in):\n",
    "        input, = ctx.saved_tensors\n",
    "        return grad_in * (grad_in * (input - input.clamp(ctx.min, ctx.max)) >= 0), None, None\n",
    "\n",
    "\n",
    "clamp_with_grad = ClampWithGrad.apply\n",
    "\n",
    "\n",
    "def vector_quantize(x, codebook):\n",
    "    d = x.pow(2).sum(dim=-1, keepdim=True) + codebook.pow(2).sum(dim=1) - 2 * x @ codebook.T\n",
    "    indices = d.argmin(-1)\n",
    "    x_q = F.one_hot(indices, codebook.shape[0]).to(d.dtype) @ codebook\n",
    "    return replace_grad(x_q, x)\n",
    "\n",
    "\n",
    "class Prompt(nn.Module):\n",
    "    def __init__(self, embed, weight=1., stop=float('-inf')):\n",
    "        super().__init__()\n",
    "        self.register_buffer('embed', embed)\n",
    "        self.register_buffer('weight', torch.as_tensor(weight))\n",
    "        self.register_buffer('stop', torch.as_tensor(stop))\n",
    "\n",
    "    def forward(self, input):\n",
    "        input_normed = F.normalize(input.unsqueeze(1), dim=2)\n",
    "        embed_normed = F.normalize(self.embed.unsqueeze(0), dim=2)\n",
    "        dists = input_normed.sub(embed_normed).norm(dim=2).div(2).arcsin().pow(2).mul(2)\n",
    "        dists = dists * self.weight.sign()\n",
    "        return self.weight.abs() * replace_grad(dists, torch.maximum(dists, self.stop)).mean()\n",
    "\n",
    "\n",
    "def parse_prompt(prompt):\n",
    "    vals = prompt.rsplit(':', 2)\n",
    "    vals = vals + ['', '1', '-inf'][len(vals):]\n",
    "    return vals[0], float(vals[1]), float(vals[2])\n",
    "\n",
    "\n",
    "class MakeCutouts(nn.Module):\n",
    "    def __init__(self, cut_size, cutn, cut_pow=1.):\n",
    "        super().__init__()\n",
    "        self.cut_size = cut_size\n",
    "        self.cutn = cutn\n",
    "        self.cut_pow = cut_pow\n",
    "\n",
    "        self.augs = nn.Sequential(\n",
    "\n",
    "            K.RandomAffine(degrees=15, translate=0.1, p=0.7, padding_mode='border'),\n",
    "            K.RandomPerspective(0.7,p=0.7),\n",
    "            K.ColorJitter(hue=0.1, saturation=0.1, p=0.7),\n",
    "            K.RandomErasing((.1, .4), (.3, 1/.3), same_on_batch=True, p=0.7),\n",
    "            \n",
    ")\n",
    "        self.noise_fac = 0.1\n",
    "        self.av_pool = nn.AdaptiveAvgPool2d((self.cut_size, self.cut_size))\n",
    "        self.max_pool = nn.AdaptiveMaxPool2d((self.cut_size, self.cut_size))\n",
    "\n",
    "    def forward(self, input):\n",
    "        sideY, sideX = input.shape[2:4]\n",
    "        max_size = min(sideX, sideY)\n",
    "        min_size = min(sideX, sideY, self.cut_size)\n",
    "        cutouts = []\n",
    "        \n",
    "        for _ in range(self.cutn):\n",
    "            \n",
    "            cutout = (self.av_pool(input) + self.max_pool(input))/2\n",
    "            cutouts.append(cutout)\n",
    "        batch = self.augs(torch.cat(cutouts, dim=0))\n",
    "        if self.noise_fac:\n",
    "            facs = batch.new_empty([self.cutn, 1, 1, 1]).uniform_(0, self.noise_fac)\n",
    "            batch = batch + facs * torch.randn_like(batch)\n",
    "        return batch\n",
    "\n",
    "#加载palm_encoder\n",
    "def load_alice_model():\n",
    "  device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n",
    "  path=\"/content/palm_model_and_data/model_palm_en_base.pt\"\n",
    "  model = encoder.ExtTransformerEncoder\n",
    "  checkpoint = torch.load(path, map_location=device)\n",
    "  model.load_state_dict(checkpoint)\n",
    "  return model\n",
    "\n",
    "\n",
    "def load_vqgan_model(config_path, checkpoint_path):\n",
    "    config = OmegaConf.load(config_path)\n",
    "    if config.model.target == 'taming.models.vqgan.VQModel':\n",
    "        model = vqgan.VQModel(**config.model.params)\n",
    "        model.eval().requires_grad_(False)\n",
    "        model.init_from_ckpt(checkpoint_path)\n",
    "    elif config.model.target == 'taming.models.vqgan.GumbelVQ':\n",
    "        model = vqgan.GumbelVQ(**config.model.params)\n",
    "        model.eval().requires_grad_(False)\n",
    "        model.init_from_ckpt(checkpoint_path)\n",
    "    elif config.model.target == 'taming.models.cond_transformer.Net2NetTransformer':\n",
    "        parent_model = cond_transformer.Net2NetTransformer(**config.model.params)\n",
    "        parent_model.eval().requires_grad_(False)\n",
    "        parent_model.init_from_ckpt(checkpoint_path)\n",
    "        model = parent_model.first_stage_model\n",
    "    else:\n",
    "        raise ValueError(f'unknown model type: {config.model.target}')\n",
    "    del model.loss\n",
    "    return model\n",
    "\n",
    "\n",
    "def resize_image(image, out_size):\n",
    "    ratio = image.size[0] / image.size[1]\n",
    "    area = min(image.size[0] * image.size[1], out_size[0] * out_size[1])\n",
    "    size = round((area * ratio)**0.5), round((area / ratio)**0.5)\n",
    "    return image.resize(size, Image.LANCZOS)"
   ],
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "ZdlpRFL8UAlW",
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "source": [
    "#文本\n",
    "texts = \"a fish\"\n",
    "width =  512\n",
    "height = 512\n",
    "model = \"vqgan_imagenet_f16_16384\" \n",
    "images_interval =  50\n",
    "init_image = \"\"\n",
    "target_images = \"\"\n",
    "seed = 42\n",
    "max_iterations = 200\n",
    "\n",
    "model_names={\"vqgan_imagenet_f16_16384\": 'ImageNet 16384',\"vqgan_imagenet_f16_1024\":\"ImageNet 1024\", 'vqgan_openimages_f16_8192':'OpenImages 8912',\n",
    "                 \"wikiart_1024\":\"WikiArt 1024\", \"wikiart_16384\":\"WikiArt 16384\", \"coco\":\"COCO-Stuff\", \"faceshq\":\"FacesHQ\", \"sflckr\":\"S-FLCKR\"}\n",
    "name_model = model_names[model]     \n",
    "\n",
    "if seed == -1:\n",
    "    seed = None\n",
    "if init_image == \"None\":\n",
    "    init_image = None\n",
    "if target_images == \"None\" or not target_images:\n",
    "    target_images = []\n",
    "else:\n",
    "    target_images = target_images.split(\"|\")\n",
    "    target_images = [image.strip() for image in target_images]\n",
    "\n",
    "texts = [phrase.strip() for phrase in texts.split(\"|\")]\n",
    "if texts == ['']:\n",
    "    texts = []\n",
    "\n",
    "\n",
    "args = argparse.Namespace(\n",
    "    prompts=texts,\n",
    "    image_prompts=target_images,\n",
    "    noise_prompt_seeds=[],\n",
    "    noise_prompt_weights=[],\n",
    "    size=[width, height],\n",
    "    init_image=init_image,\n",
    "    init_weight=0.,\n",
    "    clip_model='ViT-B/32',\n",
    "    vqgan_config=f'{model}.yaml',\n",
    "    vqgan_checkpoint=f'{model}.ckpt',\n",
    "    step_size=0.1,\n",
    "    cutn=32,\n",
    "    cut_pow=1.,\n",
    "    display_freq=images_interval,\n",
    "    seed=seed,\n",
    ")"
   ],
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "JX56bq4rEKIp",
    "cellView": "code",
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "source": [
    "#@title Actually do the run...\n",
    "from urllib.request import urlopen\n",
    "import numpy as np\n",
    "\n",
    "\n",
    "device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n",
    "print('Using device:', device)\n",
    "if texts:\n",
    "    print('Using texts:', texts)\n",
    "if target_images:\n",
    "    print('Using image prompts:', target_images)\n",
    "if args.seed is None:\n",
    "    seed = torch.seed()\n",
    "else:\n",
    "    seed = args.seed\n",
    "torch.manual_seed(seed)\n",
    "print('Using seed:', seed)\n",
    "\n",
    "\n",
    "#palm_encoder=load_alice_model()\n",
    "palm_encoder=encoder\n",
    "\n",
    "model = load_vqgan_model(args.vqgan_config, args.vqgan_checkpoint).to(device)\n",
    "perceptor = clip.load(args.clip_model, jit=False)[0].eval().requires_grad_(False).to(device)\n",
    "\n",
    "\n",
    "cut_size = perceptor.visual.input_resolution\n",
    "\n",
    "f = 2**(model.decoder.num_resolutions - 1)\n",
    "make_cutouts = MakeCutouts(cut_size, args.cutn, cut_pow=args.cut_pow)\n",
    "\n",
    "toksX, toksY = args.size[0] // f, args.size[1] // f\n",
    "sideX, sideY = toksX * f, toksY * f\n",
    "\n",
    "\n",
    "e_dim = model.quantize.e_dim\n",
    "n_toks = model.quantize.n_e\n",
    "z_min = model.quantize.embedding.weight.min(dim=0).values[None, :, None, None]\n",
    "z_max = model.quantize.embedding.weight.max(dim=0).values[None, :, None, None]\n",
    "\n",
    "\n",
    "\n",
    "#z,即图片的token\n",
    "one_hot = F.one_hot(torch.randint(n_toks, [toksY * toksX], device=device), n_toks).float()\n",
    "if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':\n",
    "    z = one_hot @ model.quantize.embedding.weight\n",
    "else:\n",
    "    z = one_hot @ model.quantize.embedding.weight\n",
    "  \n",
    "#reshape成固定大小,而后赋随机值\n",
    "z = z.view([-1, toksY, toksX, e_dim]).permute(0, 3, 1, 2) \n",
    "z = torch.rand_like(z)*2\n",
    "\n",
    "#z = palm_encoder(texts)\n",
    "z_orig = z.clone()\n",
    "z.requires_grad_(True)\n",
    "opt = optim.Adam([z], lr=args.step_size)\n",
    "\n",
    "normalize = transforms.Normalize(mean=[0.48145466, 0.4578275, 0.40821073],\n",
    "                                  std=[0.26862954, 0.26130258, 0.27577711])\n",
    "\n",
    "\n",
    "\n",
    "pMs = []\n",
    "\n",
    "for prompt in args.prompts:\n",
    "    txt, weight, stop = parse_prompt(prompt)\n",
    "    embed = perceptor.encode_text(clip.tokenize(txt).to(device)).float()\n",
    "    pMs.append(Prompt(embed, weight, stop).to(device))\n",
    "\n",
    "for prompt in args.image_prompts:\n",
    "    path, weight, stop = parse_prompt(prompt)\n",
    "    img = Image.open(path)\n",
    "    pil_image = img.convert('RGB')\n",
    "    img = resize_image(pil_image, (sideX, sideY))\n",
    "    batch = make_cutouts(TF.to_tensor(img).unsqueeze(0).to(device))\n",
    "    embed = perceptor.encode_image(normalize(batch)).float()\n",
    "    pMs.append(Prompt(embed, weight, stop).to(device))\n",
    "\n",
    "for seed, weight in zip(args.noise_prompt_seeds, args.noise_prompt_weights):\n",
    "    gen = torch.Generator().manual_seed(seed)\n",
    "    embed = torch.empty([1, perceptor.visual.output_dim]).normal_(generator=gen)\n",
    "    pMs.append(Prompt(embed, weight).to(device))\n",
    "\n",
    "# 生成图像\n",
    "def synth(z):\n",
    "    if args.vqgan_checkpoint == 'vqgan_openimages_f16_8192.ckpt':\n",
    "        z_q = vector_quantize(z.movedim(1, 3), model.quantize.embed.weight).movedim(3, 1)\n",
    "    else:\n",
    "        z_q = vector_quantize(z.movedim(1, 3), model.quantize.embedding.weight).movedim(3, 1)\n",
    "    return clamp_with_grad(model.decode(z_q).add(1).div(2), 0, 1)\n",
    "\n",
    "@torch.no_grad()\n",
    "def checkin(i, losses):\n",
    "    losses_str = ', '.join(f'{loss.item():g}' for loss in losses)\n",
    "    tqdm.write(f'i: {i}, loss: {sum(losses).item():g}, losses: {losses_str}')\n",
    "    out = synth(z)\n",
    "    TF.to_pil_image(out[0].cpu()).save('progress.png')\n",
    "    display.display(display.Image('progress.png'))\n",
    "\n",
    "def ascend_txt():\n",
    "    global i\n",
    "    out = synth(z)\n",
    "    iii = perceptor.encode_image(normalize(make_cutouts(out))).float()\n",
    "    \n",
    "    result = []\n",
    "\n",
    "    if args.init_weight:\n",
    "        result.append(F.mse_loss(z, torch.zeros_like(z_orig)) * ((1/torch.tensor(i*2 + 1))*args.init_weight) / 2)\n",
    "    for prompt in pMs:\n",
    "        result.append(prompt(iii))\n",
    "    img = np.array(out.mul(255).clamp(0, 255)[0].cpu().detach().numpy().astype(np.uint8))[:,:,:]\n",
    "    img = np.transpose(img, (1, 2, 0))\n",
    "    imageio.imwrite('./steps/' + str(i) + '.png', np.array(img))\n",
    "\n",
    "    return result\n",
    "\n",
    "def train(i):\n",
    "    opt.zero_grad()\n",
    "    lossAll = ascend_txt()\n",
    "    if i % args.display_freq == 0:\n",
    "        checkin(i, lossAll)\n",
    "       \n",
    "    loss = sum(lossAll)\n",
    "    loss.backward()\n",
    "    opt.step()\n",
    "    with torch.no_grad():\n",
    "        z.copy_(z.maximum(z_min).minimum(z_max))\n",
    "\n",
    "i = 0\n",
    "try:\n",
    "    with tqdm() as pbar:\n",
    "        while True:\n",
    "            train(i)\n",
    "            if i == max_iterations:\n",
    "                break\n",
    "            i += 1\n",
    "            pbar.update()\n",
    "except KeyboardInterrupt:\n",
    "    pass"
   ],
   "execution_count": null,
   "outputs": []
  },
  {
   "cell_type": "code",
   "metadata": {
    "id": "gmK0k5zQeT5u",
    "cellView": "form",
    "pycharm": {
     "name": "#%%\n"
    }
   },
   "source": [
    "#@title Generate a video with the result\n",
    "\n",
    "init_frame = 1 #This is the frame where the video will start\n",
    "last_frame = i #You can change i to the number of the last frame you want to generate. It will raise an error if that number of frames does not exist.\n",
    "\n",
    "min_fps = 10\n",
    "max_fps = 60\n",
    "\n",
    "total_frames = last_frame-init_frame\n",
    "\n",
    "length = 15 #Desired time of the video in seconds\n",
    "\n",
    "frames = []\n",
    "tqdm.write('Generating video...')\n",
    "for i in range(init_frame,last_frame): #\n",
    "    frames.append(Image.open(\"./steps/\"+ str(i) +'.png'))\n",
    "\n",
    "#fps = last_frame/10\n",
    "fps = np.clip(total_frames/length,min_fps,max_fps)\n",
    "\n",
    "from subprocess import Popen, PIPE\n",
    "p = Popen(['ffmpeg', '-y', '-f', 'image2pipe', '-vcodec', 'png', '-r', str(fps), '-i', '-', '-vcodec', 'libx264', '-r', str(fps), '-pix_fmt', 'yuv420p', '-crf', '17', '-preset', 'veryslow', 'video.mp4'], stdin=PIPE)\n",
    "for im in tqdm(frames):\n",
    "    im.save(p.stdin, 'PNG')\n",
    "p.stdin.close()\n",
    "p.wait()\n",
    "mp4 = open('video.mp4','rb').read()\n",
    "data_url = \"data:video/mp4;base64,\" + b64encode(mp4).decode()\n",
    "display.HTML(\"\"\"\n",
    "<video width=400 controls>\n",
    "      <source src=\"%s\" type=\"video/mp4\">\n",
    "</video>\n",
    "\"\"\" % data_url)"
   ],
   "execution_count": null,
   "outputs": []
  }
 ]
}