{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "af235b87",
   "metadata": {},
   "source": [
    "#### This demo might not work with T4 colab (please use GPU with higher VRAM). How? `Runtime` -> `Change run time type`"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "0138621c",
   "metadata": {},
   "source": [
    "[![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/rohitgandikota/sliders/blob/main/demo_image_editing.ipynb)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "31baacbf",
   "metadata": {},
   "outputs": [],
   "source": [
    "!git  clone https://github.com/rohitgandikota/sliders.git\n",
    "!cd sliders\n",
    "!pip install -r sliders/requirements.txt\n",
    "!pip install accelerate\n",
    "import os \n",
    "os.chdir('sliders')\n",
    "!wget https://sliders.baulab.info/weights/sd14_sliders/chubby_sd14.pt\n",
    "!wget https://sliders.baulab.info/sample_images/stock_photo_girl.jpg"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "9b52a31e",
   "metadata": {},
   "outputs": [],
   "source": [
    "import torch\n",
    "from PIL import Image\n",
    "import argparse\n",
    "import os, json, random\n",
    "import pandas as pd\n",
    "import matplotlib.pyplot as plt\n",
    "import glob, re\n",
    "import random\n",
    "\n",
    "\n",
    "from safetensors.torch import load_file\n",
    "import matplotlib.image as mpimg\n",
    "import copy\n",
    "import gc\n",
    "from transformers import CLIPTextModel, CLIPTokenizer\n",
    "\n",
    "import diffusers\n",
    "from diffusers import DiffusionPipeline\n",
    "from diffusers import AutoencoderKL, DDPMScheduler, DiffusionPipeline, UNet2DConditionModel, LMSDiscreteScheduler\n",
    "from diffusers.loaders import AttnProcsLayers\n",
    "from diffusers.models.attention_processor import LoRAAttnProcessor, AttentionProcessor\n",
    "\n",
    "\n",
    "from typing import Optional, Union, Tuple, List, Callable, Dict\n",
    "from tqdm.notebook import tqdm\n",
    "from diffusers import StableDiffusionPipeline, DDIMScheduler\n",
    "import torch.nn.functional as nnf\n",
    "import numpy as np\n",
    "import abc\n",
    "import shutil\n",
    "from torch.optim.adam import Adam\n",
    "\n",
    "try:\n",
    "    os.chdir('sliders')\n",
    "except:\n",
    "    pass\n",
    "\n",
    "import trainscripts.textsliders.ptp_utils as ptp_utils\n",
    "from trainscripts.textsliders.lora import LoRANetwork, DEFAULT_TARGET_REPLACE, UNET_TARGET_REPLACE_MODULE_CONV"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "11b0b7e7",
   "metadata": {},
   "outputs": [],
   "source": [
    "scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\", clip_sample=False, set_alpha_to_one=False)\n",
    "LOW_RESOURCE = False \n",
    "NUM_DDIM_STEPS = 50\n",
    "GUIDANCE_SCALE = 7.5\n",
    "MAX_NUM_WORDS = 77\n",
    "weight_dtype = torch.float32 # if you are using GPU >T4 in colab you can use bfloat16\n",
    "device = 'cuda'\n",
    "device = torch.device(device)\n",
    "ldm_stable = StableDiffusionPipeline.from_pretrained(\"CompVis/stable-diffusion-v1-4\", scheduler=scheduler, torch_dtype=weight_dtype).to(device)\n",
    "try:\n",
    "    ldm_stable.disable_xformers_memory_efficient_attention()\n",
    "except AttributeError:\n",
    "    print(\"Attribute disable_xformers_memory_efficient_attention() is missing\")\n",
    "tokenizer = ldm_stable.tokenizer"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "4ecae92b",
   "metadata": {},
   "source": [
    "# Null Inversion to the capture image "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fa8b8645",
   "metadata": {},
   "outputs": [],
   "source": [
    "def load_512(image_path, left=0, right=0, top=0, bottom=0):\n",
    "    if type(image_path) is str:\n",
    "        image = np.array(Image.open(image_path))[:, :, :3]\n",
    "    else:\n",
    "        image = image_path\n",
    "    h, w, c = image.shape\n",
    "    left = min(left, w-1)\n",
    "    right = min(right, w - left - 1)\n",
    "    top = min(top, h - left - 1)\n",
    "    bottom = min(bottom, h - top - 1)\n",
    "    image = image[top:h-bottom, left:w-right]\n",
    "    h, w, c = image.shape\n",
    "    if h < w:\n",
    "        offset = (w - h) // 2\n",
    "        image = image[:, offset:offset + h]\n",
    "    elif w < h:\n",
    "        offset = (h - w) // 2\n",
    "        image = image[offset:offset + w]\n",
    "    image = np.array(Image.fromarray(image).resize((512, 512)))\n",
    "    return image\n",
    "\n",
    "\n",
    "class NullInversion:\n",
    "    \n",
    "    def prev_step(self, model_output: Union[torch.FloatTensor, np.ndarray], timestep: int, sample: Union[torch.FloatTensor, np.ndarray]):\n",
    "        prev_timestep = timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps\n",
    "        alpha_prod_t = self.scheduler.alphas_cumprod[timestep]\n",
    "        alpha_prod_t_prev = self.scheduler.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.scheduler.final_alpha_cumprod\n",
    "        beta_prod_t = 1 - alpha_prod_t\n",
    "        pred_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5\n",
    "        pred_sample_direction = (1 - alpha_prod_t_prev) ** 0.5 * model_output\n",
    "        prev_sample = alpha_prod_t_prev ** 0.5 * pred_original_sample + pred_sample_direction\n",
    "        return prev_sample\n",
    "    \n",
    "    def next_step(self, model_output: Union[torch.FloatTensor, np.ndarray], timestep: int, sample: Union[torch.FloatTensor, np.ndarray]):\n",
    "        timestep, next_timestep = min(timestep - self.scheduler.config.num_train_timesteps // self.scheduler.num_inference_steps, 999), timestep\n",
    "        alpha_prod_t = self.scheduler.alphas_cumprod[timestep] if timestep >= 0 else self.scheduler.final_alpha_cumprod\n",
    "        alpha_prod_t_next = self.scheduler.alphas_cumprod[next_timestep]\n",
    "        beta_prod_t = 1 - alpha_prod_t\n",
    "        next_original_sample = (sample - beta_prod_t ** 0.5 * model_output) / alpha_prod_t ** 0.5\n",
    "        next_sample_direction = (1 - alpha_prod_t_next) ** 0.5 * model_output\n",
    "        next_sample = alpha_prod_t_next ** 0.5 * next_original_sample + next_sample_direction\n",
    "        return next_sample\n",
    "    \n",
    "    def get_noise_pred_single(self, latents, t, context):\n",
    "        noise_pred = self.model.unet(latents, t, encoder_hidden_states=context)[\"sample\"]\n",
    "        return noise_pred\n",
    "\n",
    "    def get_noise_pred(self, latents, t, is_forward=True, context=None):\n",
    "        latents_input = torch.cat([latents] * 2)\n",
    "        latents_input = latents_input.to(self.model.unet.dtype)\n",
    "        if context is None:\n",
    "            context = self.context\n",
    "        guidance_scale = 1 if is_forward else GUIDANCE_SCALE\n",
    "        noise_pred = self.model.unet(latents_input, t, encoder_hidden_states=context)[\"sample\"]\n",
    "        noise_pred_uncond, noise_prediction_text = noise_pred.chunk(2)\n",
    "        noise_pred = noise_pred_uncond + guidance_scale * (noise_prediction_text - noise_pred_uncond)\n",
    "        if is_forward:\n",
    "            latents = self.next_step(noise_pred, t, latents)\n",
    "        else:\n",
    "            latents = self.prev_step(noise_pred, t, latents)\n",
    "        return latents\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def latent2image(self, latents, return_type='np'):\n",
    "        latents = 1 / 0.18215 * latents.detach()\n",
    "        latents = latents.to(self.model.vae.dtype)\n",
    "        image = self.model.vae.decode(latents)['sample']\n",
    "        if return_type == 'np':\n",
    "            image = (image / 2 + 0.5).clamp(0, 1)\n",
    "            image = image.cpu().permute(0, 2, 3, 1).to(torch.float16).numpy()[0]\n",
    "            image = (image * 255).astype(np.uint8)\n",
    "        return image\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def image2latent(self, image):\n",
    "        with torch.no_grad():\n",
    "            if type(image) is Image:\n",
    "                image = np.array(image)\n",
    "            if type(image) is torch.Tensor and image.dim() == 4:\n",
    "                latents = image\n",
    "            else:\n",
    "                image = torch.from_numpy(image).float() / 127.5 - 1\n",
    "                image = image.permute(2, 0, 1).unsqueeze(0).to(device).to(self.model.vae.dtype)\n",
    "                latents = self.model.vae.encode(image)['latent_dist'].mean\n",
    "                latents = latents * 0.18215\n",
    "        return latents\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def init_prompt(self, prompt: str):\n",
    "        uncond_input = self.model.tokenizer(\n",
    "            [\"\"], padding=\"max_length\", max_length=self.model.tokenizer.model_max_length,\n",
    "            return_tensors=\"pt\"\n",
    "        )\n",
    "        uncond_embeddings = self.model.text_encoder(uncond_input.input_ids.to(self.model.device))[0]\n",
    "        text_input = self.model.tokenizer(\n",
    "            [prompt],\n",
    "            padding=\"max_length\",\n",
    "            max_length=self.model.tokenizer.model_max_length,\n",
    "            truncation=True,\n",
    "            return_tensors=\"pt\",\n",
    "        )\n",
    "        text_embeddings = self.model.text_encoder(text_input.input_ids.to(self.model.device))[0]\n",
    "        self.context = torch.cat([uncond_embeddings, text_embeddings])\n",
    "        self.prompt = prompt\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def ddim_loop(self, latent):\n",
    "        uncond_embeddings, cond_embeddings = self.context.chunk(2)\n",
    "        all_latent = [latent]\n",
    "        latent = latent.clone().detach()\n",
    "        latent = latent.to(self.model.unet.dtype)\n",
    "        for i in range(NUM_DDIM_STEPS):\n",
    "            t = self.model.scheduler.timesteps[len(self.model.scheduler.timesteps) - i - 1]\n",
    "            noise_pred = self.get_noise_pred_single(latent, t, cond_embeddings)\n",
    "            latent = self.next_step(noise_pred, t, latent)\n",
    "            all_latent.append(latent)\n",
    "        return all_latent\n",
    "\n",
    "    @property\n",
    "    def scheduler(self):\n",
    "        return self.model.scheduler\n",
    "\n",
    "    @torch.no_grad()\n",
    "    def ddim_inversion(self, image):\n",
    "        latent = self.image2latent(image)\n",
    "        image_rec = self.latent2image(latent)\n",
    "        ddim_latents = self.ddim_loop(latent)\n",
    "        \n",
    "        return image_rec, ddim_latents\n",
    "\n",
    "    def null_optimization(self, latents, num_inner_steps, epsilon):\n",
    "        uncond_embeddings, cond_embeddings = self.context.chunk(2)\n",
    "        uncond_embeddings_list = []\n",
    "        latent_cur = latents[-1]\n",
    "        bar = tqdm(total=num_inner_steps * NUM_DDIM_STEPS)\n",
    "        for i in range(NUM_DDIM_STEPS):\n",
    "            uncond_embeddings = uncond_embeddings.clone().detach()\n",
    "            uncond_embeddings.requires_grad = True\n",
    "            optimizer = Adam([uncond_embeddings], lr=1e-2 * (1. - i / 100.))\n",
    "            latent_prev = latents[len(latents) - i - 2]\n",
    "            t = self.model.scheduler.timesteps[i]\n",
    "            with torch.no_grad():\n",
    "                noise_pred_cond = self.get_noise_pred_single(latent_cur, t, cond_embeddings)\n",
    "            for j in range(num_inner_steps):\n",
    "                noise_pred_uncond = self.get_noise_pred_single(latent_cur, t, uncond_embeddings)\n",
    "                noise_pred = noise_pred_uncond + GUIDANCE_SCALE * (noise_pred_cond - noise_pred_uncond)\n",
    "                latents_prev_rec = self.prev_step(noise_pred, t, latent_cur)\n",
    "                loss = nnf.mse_loss(latents_prev_rec, latent_prev)\n",
    "                optimizer.zero_grad()\n",
    "                loss.backward()\n",
    "                optimizer.step()\n",
    "                loss_item = loss.item()\n",
    "                bar.update()\n",
    "                if loss_item < epsilon + i * 2e-5:\n",
    "                    break\n",
    "            for j in range(j + 1, num_inner_steps):\n",
    "                bar.update()\n",
    "            uncond_embeddings_list.append(uncond_embeddings[:1].detach())\n",
    "            with torch.no_grad():\n",
    "                context = torch.cat([uncond_embeddings, cond_embeddings])\n",
    "                latent_cur = self.get_noise_pred(latent_cur, t, False, context)\n",
    "        bar.close()\n",
    "        return uncond_embeddings_list\n",
    "    \n",
    "    def invert(self, image_path: str, prompt: str, offsets=(0,0,0,0), num_inner_steps=10, early_stop_epsilon=1e-5, verbose=False):\n",
    "        self.init_prompt(prompt)\n",
    "        ptp_utils.register_attention_control(self.model, None)\n",
    "        \n",
    "        image_gt = load_512(image_path, *offsets)\n",
    "        display(Image.fromarray(image_gt))\n",
    "        \n",
    "        if verbose:\n",
    "            print(\"DDIM inversion...\")\n",
    "        image_rec, ddim_latents = self.ddim_inversion(image_gt)\n",
    "        if verbose:\n",
    "            print(\"Null-text optimization...\")\n",
    "        uncond_embeddings = self.null_optimization(ddim_latents, num_inner_steps, early_stop_epsilon)\n",
    "        return (image_gt, image_rec), ddim_latents[-1], uncond_embeddings\n",
    "        \n",
    "    \n",
    "    def __init__(self, model):\n",
    "        scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\", clip_sample=False,\n",
    "                                  set_alpha_to_one=False)\n",
    "        self.model = model\n",
    "        self.tokenizer = self.model.tokenizer\n",
    "        self.model.scheduler.set_timesteps(NUM_DDIM_STEPS)\n",
    "        self.prompt = None\n",
    "        self.context = None\n",
    "\n",
    "null_inversion = NullInversion(ldm_stable)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "75a55386",
   "metadata": {},
   "outputs": [],
   "source": [
    "image_path = \"stock_photo_girl.jpg\"\n",
    "prompt = \"photo of a person\"\n",
    "(image_gt, image_enc), x_t, uncond_embeddings = null_inversion.invert(image_path, prompt, offsets=(0,0,0,0), verbose=True)\n",
    "\n",
    "print(\"Modify or remove offsets according to your image!\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a7956d5d",
   "metadata": {},
   "outputs": [],
   "source": [
    "Image.fromarray(image_enc)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "536e24dd",
   "metadata": {},
   "outputs": [],
   "source": [
    "import copy\n",
    "uncond_embeddings_copy = copy.deepcopy(uncond_embeddings)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "879f6150",
   "metadata": {},
   "outputs": [],
   "source": [
    "def flush():\n",
    "    torch.cuda.empty_cache()\n",
    "    gc.collect()\n",
    "flush()\n",
    "width = 512\n",
    "height = 512 \n",
    "steps = 50  \n",
    "cfg_scale = 7.5 \n",
    "\n",
    "del ldm_stable\n",
    "flush()"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "afaffa58",
   "metadata": {},
   "outputs": [],
   "source": [
    "pretrained_model_name_or_path = \"CompVis/stable-diffusion-v1-4\"\n",
    "\n",
    "revision = None\n",
    "device = 'cuda'\n",
    "rank = 4\n",
    "\n",
    "\n",
    "# Load scheduler, tokenizer and models.\n",
    "noise_scheduler = DDIMScheduler(beta_start=0.00085, beta_end=0.012, beta_schedule=\"scaled_linear\", clip_sample=False, set_alpha_to_one=False)\n",
    "tokenizer = CLIPTokenizer.from_pretrained(\n",
    "    pretrained_model_name_or_path, subfolder=\"tokenizer\", revision=revision\n",
    ")\n",
    "text_encoder = CLIPTextModel.from_pretrained(\n",
    "    pretrained_model_name_or_path, subfolder=\"text_encoder\", revision=revision\n",
    ")\n",
    "vae = AutoencoderKL.from_pretrained(pretrained_model_name_or_path, subfolder=\"vae\", revision=revision)\n",
    "unet = UNet2DConditionModel.from_pretrained(\n",
    "    pretrained_model_name_or_path, subfolder=\"unet\", revision=revision\n",
    ")\n",
    "# freeze parameters of models to save more memory\n",
    "unet.requires_grad_(False)\n",
    "unet.to(device, dtype=weight_dtype)\n",
    "vae.requires_grad_(False)\n",
    "\n",
    "text_encoder.requires_grad_(False)\n",
    "\n",
    "# For mixed precision training we cast all non-trainable weigths (vae, non-lora text_encoder and non-lora unet) to half-precision\n",
    "# as these weights are only used for inference, keeping weights in full precision is not required.\n",
    "\n",
    "\n",
    "# Move unet, vae and text_encoder to device and cast to weight_dtype\n",
    "vae.requires_grad_(False)\n",
    "vae.to(device, dtype=weight_dtype)\n",
    "text_encoder.to(device, dtype=weight_dtype)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "fe6a25cf",
   "metadata": {},
   "outputs": [],
   "source": [
    "slider1 = 'chubby_sd14.pt'\n",
    "\n",
    "train_method = 'noxattn'\n",
    "network_type = \"c3lier\"\n",
    "if train_method == 'xattn':\n",
    "    network_type = 'lierla'\n",
    "\n",
    "modules = DEFAULT_TARGET_REPLACE\n",
    "if network_type == \"c3lier\":\n",
    "    modules += UNET_TARGET_REPLACE_MODULE_CONV\n",
    "import os\n",
    "model_name = slider1\n",
    "\n",
    "name = os.path.basename(model_name)\n",
    "alpha = 1.0\n",
    "# freeze parameters of models to save more memory\n",
    "unet.requires_grad_(False)\n",
    "unet.to(device, dtype=weight_dtype)\n",
    "network1 = LoRANetwork(\n",
    "        unet,\n",
    "        rank=4,\n",
    "        multiplier=1.0,\n",
    "        alpha=alpha,\n",
    "        train_method=train_method,\n",
    "    ).to(device, dtype=weight_dtype)\n",
    "network1.load_state_dict(torch.load(slider1))\n",
    "\n",
    "torch_device = device\n",
    "negative_prompt = None\n",
    "batch_size = 1\n",
    "height = 512\n",
    "width = 512\n",
    "ddim_steps = 50\n",
    "guidance_scale = 7.5"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a1cadcd6",
   "metadata": {},
   "outputs": [],
   "source": [
    "scales1 = [0, 2, 4]\n",
    "start_noise = 500 # use smaller values for real image editing so that the identity does not change\n",
    "\n",
    "images_list = []\n",
    "for scale1 in scales1:\n",
    "    \n",
    "   \n",
    "    text_input = tokenizer(prompt, padding=\"max_length\", max_length=tokenizer.model_max_length, truncation=True, return_tensors=\"pt\")\n",
    "\n",
    "    text_embeddings_ = text_encoder(text_input.input_ids.to(torch_device))[0]\n",
    "\n",
    "    max_length = text_input.input_ids.shape[-1]\n",
    "\n",
    "    noise_scheduler.set_timesteps(ddim_steps)\n",
    "\n",
    "\n",
    "    latents = x_t* noise_scheduler.init_noise_sigma\n",
    "    latents = latents.to(unet.dtype)\n",
    "    cnt = -1\n",
    "    for t in tqdm(noise_scheduler.timesteps):\n",
    "        cnt+=1\n",
    "        if t>start_noise:\n",
    "            network1.set_lora_slider(scale=0)\n",
    "        else:\n",
    "            network1.set_lora_slider(scale=scale1)\n",
    "        \n",
    "        text_embeddings = torch.cat([uncond_embeddings_copy[cnt].expand(*text_embeddings_.shape), text_embeddings_])\n",
    "        # expand the latents if we are doing classifier-free guidance to avoid doing two forward passes.\n",
    "        latent_model_input = torch.cat([latents] * 2)\n",
    "        text_embeddings = text_embeddings.to(weight_dtype)\n",
    "        latent_model_input = noise_scheduler.scale_model_input(latent_model_input, timestep=t)\n",
    "        # predict the noise residual\n",
    "        \n",
    "        with torch.no_grad():\n",
    "            with network1:\n",
    "                noise_pred = unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample\n",
    "        # perform guidance\n",
    "        noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)\n",
    "        noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)\n",
    "\n",
    "        # compute the previous noisy sample x_t -> x_t-1\n",
    "        latents = noise_scheduler.step(noise_pred, t, latents).prev_sample\n",
    "\n",
    "    # scale and decode the image latents with vae\n",
    "    latents = 1 / 0.18215 * latents\n",
    "    with torch.no_grad():\n",
    "        image = vae.decode(latents).sample\n",
    "    image = (image / 2 + 0.5).clamp(0, 1)\n",
    "    image = image.detach().cpu().permute(0, 2, 3, 1).to(torch.float16).numpy()\n",
    "    images = (image * 255).round().astype(\"uint8\")\n",
    "    pil_images = [Image.fromarray(image) for image in images]\n",
    "    images_list.append(pil_images[0])\n",
    "\n",
    "fig, ax = plt.subplots(1, len(images_list), figsize=(20,4))\n",
    "for i, a in enumerate(ax):\n",
    "    a.imshow(images_list[i])\n",
    "    a.axis('off')\n",
    "\n",
    "\n",
    "plt.show()\n",
    "\n",
    "for im in images_list:\n",
    "    display(im)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "986b1180",
   "metadata": {},
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3 (ipykernel)",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.9.15"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
