{
 "cells": [
  {
   "cell_type": "markdown",
   "id": "2283b9bb-1eba-4c04-b87c-3c34468c4add",
   "metadata": {
    "tags": []
   },
   "source": [
    "<h1><font style=\"background: linear-gradient(to right,gold, red,pink, green);-webkit-background-clip: text;-webkit-text-fill-color: transparent;\">SDXL模型的使用</font></h1>\n",
    "\n",
    "**优化了模型加载，避免每次调用都要初始化模型。**\n",
    "\n",
    "\n",
    "包含文生图中 `lora`（只能开启和关闭权重无法调节）、`unet`、`vae`以及`controlnet`的使用方法。\n",
    "需要`32G` gpu环境。\n",
    "![](https://ai-studio-static-online.cdn.bcebos.com/18040721087745f4bbae0720f754917000d387bd1ad14c3c9ba3e5721585c49a)\n",
    "\n",
    "*注意：每个模块都是比较占用内存，记得重启内核*"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "6409de4f-0a14-4d2a-a8bc-2b5852a0de72",
   "metadata": {},
   "source": [
    "## 环境准备\n",
    "\n",
    "运行完后需要`重启内核`，以加载新安装依赖。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "7b154148-19bf-4c22-96b9-9a15651128bd",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "import os\n",
    "\n",
    "evn='''paddlenlp>=3.0.0b2\n",
    "safetensors>=0.3.1\n",
    "ftfy\n",
    "regex\n",
    "Pillow\n",
    "opencv-python\n",
    "av\n",
    "# for test\n",
    "parameterized\n",
    "requests_mock\n",
    "omegaconf\n",
    "note_seq\n",
    "urllib3<=2.0.0\n",
    "einops>=0.6.1\n",
    "paddlesde\n",
    "ligo-segments\n",
    "huggingface_hub==0.25.0\n",
    "hf_transfer\n",
    "ppdiffusers \n",
    "paddlenlp \n",
    "einops\n",
    "insightface \n",
    "onnxruntime \n",
    "gradio==3.40\n",
    "wandb\n",
    "munch\n",
    "scikit-image\n",
    "natsort\n",
    "'''\n",
    "with open(\"./requirements.txt\", \"w\", encoding=\"utf-8\") as fp:\n",
    "    fp.writelines(evn)\n",
    "# os.makedirs(\"/home/aistudio/external-libraries\", exist_ok=True)\n",
    "# os.system(\"pip install -r requirements.txt -t /home/aistudio/external-libraries\")\n",
    "os.system(\"pip install -r requirements.txt --user\")\n"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "3ae2651f-2358-41bf-b1c2-a366e9264359",
   "metadata": {},
   "source": [
    "## API测试\n",
    "用于测试api是否可以运行"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e9b8d838-b9b0-42ab-9db7-0403697d0a57",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "from PIL import Image\n",
    "\n",
    "# 生成结果批量保存\n",
    "def multi_dtype_images_save(image, fn, suffix='png'):  \n",
    "    try:\n",
    "        image = image if not isinstance(image, (np.ndarray, paddle.Tensor)) \\\n",
    "            else Image.fromarray(image.numpy()) if isinstance(image, paddle.Tensor) \\\n",
    "            else Image.fromarray(image) \n",
    "        img = load_image(image)\n",
    "        # 保存图像  \n",
    "        img.save(fn + '.' + suffix)\n",
    "    except Exception as e:\n",
    "        print(e)\n",
    "    \n",
    "def batch_save(images, base_fn, suffix='png'): \n",
    "    \"\"\"\n",
    "    批量保存pil、numpy、pd或path数据格式的图片。\n",
    "    Args：\n",
    "        images：\n",
    "            图片，支持pil、numpy、path的单个或者嵌套list对象；\n",
    "        base_fn：\n",
    "            保存文件的名称。\n",
    "    Exmaple：\n",
    "        from PIL import Image  \n",
    "        import numpy as np  \n",
    "        import paddle\n",
    "\n",
    "        arr = np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8)\n",
    "\n",
    "        pd = paddle.to_tensor(np.random.randint(0, 256, (100, 100, 3), dtype=np.uint8))\n",
    "        multi_dtype_images_save(image=arr, fn='test_pd', suffix='png')\n",
    "\n",
    "        pil = Image.open('test_pd.png')\n",
    "\n",
    "        images=arr\n",
    "        batch_save(images, base_fn=\"test_\")\n",
    "        images=[[arr], pil, pd]\n",
    "        batch_save(images, base_fn=\"test_\")\n",
    "    \"\"\"\n",
    "    \n",
    "    if not isinstance(images, list):\n",
    "        multi_dtype_images_save(images, base_fn, suffix)\n",
    "        return \n",
    "    for i, image in enumerate(images):  \n",
    "        base_fn = f\"{base_fn.split('_')[0]}_{i}\" \n",
    "        batch_save(image, base_fn, suffix=suffix)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "f9b75411-975a-49bf-b950-9e7af3230129",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# ppdiffusers>=0.24.0\"\n",
    "import numpy as np\n",
    "import cv2\n",
    "import re\n",
    "from PIL import Image\n",
    "import paddle\n",
    "from ppdiffusers import (\n",
    "    ControlNetModel, \n",
    "    StableDiffusionXLControlNetPipeline,\n",
    "    AutoencoderKL, \n",
    "\t# UNet2DConditionModel, \n",
    "\tEulerAncestralDiscreteScheduler, \n",
    "    KDPM2AncestralDiscreteScheduler, \n",
    "    KDPM2DiscreteScheduler\n",
    ")\n",
    "from ppdiffusers.utils import load_image, make_image_grid\n",
    "\n",
    "# load controlnet\n",
    "controlnet = ControlNetModel.from_pretrained(\"diffusers/controlnet-canny-sdxl-1.0\")\n",
    "\n",
    "pipe = StableDiffusionXLControlNetPipeline.from_pretrained(\n",
    "    \"SG161222/RealVisXL_V3.0\", \n",
    "    paddle_dtype=paddle.float16, \n",
    "    safety_checker=None, \n",
    "    variant=\"fp16\",\n",
    "    controlnet=controlnet, \n",
    "    low_cpu_mem_usage=True\n",
    ")\n",
    "\n",
    "# denoise策略\n",
    "pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n",
    "\n",
    "# 开启 xformers\n",
    "pipe.enable_xformers_memory_efficient_attention()\n",
    "\n",
    "# 定义prompt\n",
    "prompt = [\n",
    "    \"full body shot of a pretty Korean woman, red sweater\",\n",
    "    \"<lora:2>Beautiful girl, delicate, peerless face, delicate eyes, off-the-shoulder dress, pink, cherry blossoms, detailed light effect rendering, high-definition picture, beautiful two-dimensional.\", \n",
    "][-1]\n",
    "negative_prompt = [\n",
    "    'low quality, bad quality, sketches, blurry',\n",
    "    \"nsfw\",\n",
    "][-1]\n",
    "\n",
    "# 随机种子\n",
    "generator = paddle.Generator().manual_seed(42)\n",
    "\n",
    "# controlnet权重系数\n",
    "controlnet_conditioning_scale=0.5\n",
    "\n",
    "# controlnet参考图\n",
    "image = load_image(\n",
    "    \"./assets/bc.jpg\"\n",
    ")\n",
    "# controlnet图生成线稿\n",
    "image = np.array(image)\n",
    "image = cv2.Canny(image, 100, 200)\n",
    "image = image[:, :, None]\n",
    "image = np.concatenate([image, image, image], axis=2)\n",
    "image = Image.fromarray(image)\n",
    "\n",
    "image = pipe(\n",
    "    prompt,  \n",
    "    negative_prompt=negative_prompt, \n",
    "    width=1024, \n",
    "    height=768, \n",
    "    image=image,\n",
    "    num_inference_steps=30,\n",
    "    guidance_scale=4,\n",
    "    num_images_per_prompt=1,\n",
    "    generator=generator, \n",
    "    controlnet_conditioning_scale=controlnet_conditioning_scale, \n",
    ").images\n",
    "\n",
    "# 保存图片\n",
    "batch_save(image, base_fn='bcsz', suffix='png')\n",
    "# 释放内存，GPU有效\n",
    "paddle.device.cuda.empty_cache()\n",
    "# 生成网格化展示内容\n",
    "img = make_image_grid(image, rows=1, cols=1)\n",
    "# 网格化展示\n",
    "img"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "afa8cb08-ee28-4e27-b063-9ea7aeb93943",
   "metadata": {},
   "source": [
    "## 超分\n",
    "\n",
    "切块处理大图，可以放大任意尺寸。\n",
    "\n",
    "**StableDiffusion sr**块的大小 `size=(256,256)` ，适合256左右小而模糊的图片`method=\"sd\"`。\n",
    "\n",
    "**paddle srgan** 块的大小 `size=(1560,1560)`，效率远高于sdsr，适合生成图的放大，默认为`method=\"df2k\"`,\n",
    "\n",
    "`method=\"drn\"`质量低，但色彩明亮。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "128436dd-ec7f-4ddf-aa4e-b10bee12a07d",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# # WebUI中的超分函数测试\n",
    "from PIL import Image\n",
    "from utils.upscale import upscale_x4\n",
    "\n",
    "img_path = upscale_x4(\"./assets/bcsz_0.png\", method=\"df2k\", size=(1024, 1024), suffix='jpg', output_dir=\"./out_puts\")\n",
    "# img_path = upscale_x4(\"./assets/bcsz_0.png\", method=\"drn\", size=(1024, 1024), suffix='jpg', output_dir=\"./out_puts\")\n",
    "# img_path = upscale_x4(\"assets/bcsz_0.png\", method=\"esrgan\", size=(1024, 1024), suffix='jpg', output_dir=\"./out_puts\")\n",
    "# img_path = upscale_x4(\"assets/bcsz_0.png\", method=\"lesr\", size=(512, 512), suffix='jpg', output_dir=\"./out_puts\")\n",
    "Image.open(img_path[0])"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "10ba3efb-655c-49dd-ae5c-ceb6e7369461",
   "metadata": {},
   "source": [
    "## 二次元模型\n",
    "由torch权重转换而来，这里转换模型后可以在WebUI中使用。重启内核！！！"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "853e2e5c-baa3-46eb-8e32-ae511b77987b",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 解压文件\n",
    "!tar -xzvf /home/aistudio/data/data264747/animagine-xl-2.0.tar.gz .\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "90e11e0e-c74e-4767-a07f-a818abbcef07",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 待转换pytorch模型路径在此修改,多组件分离模型\n",
    "model_torch_name = [\n",
    "    \"animagine-xl-2.0\", \n",
    "][-1]\n",
    "!python convert_pt_2_pd/convert_pt_2_pd.py --model_name=$model_torch_name\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "a637b8c6-7e5b-4e1a-80fe-25a4ca1d893f",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 待转换pytorch模型路径在此修改,单文件模型权重\n",
    "# !python convert_single_weights_2_pd/convert_key.py \\\n",
    "#     --model_name=../data/data268105/animagine-xl-3.1.safetensors"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "779bbbca-5e5e-4b50-a160-a45bc2e0431f",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 模型测测试\n",
    "import warnings  \n",
    "warnings.filterwarnings(\"ignore\")\n",
    "\n",
    "import warnings  \n",
    "warnings.filterwarnings(\"ignore\")\n",
    "import paddle, os, sys\n",
    "from ppdiffusers import (\n",
    "    StableDiffusionXLPipeline,\n",
    "    StableDiffusionXLImg2ImgPipeline, \n",
    "    EulerAncestralDiscreteScheduler, \n",
    "    StableDiffusionXLInpaintPipeline,\n",
    "    UNet2DConditionModel\n",
    ")\n",
    "\n",
    "base_model_path = [\n",
    "    \"./Animagine-Xl-2.0\", \n",
    "][-1]\n",
    "\n",
    "text2img = StableDiffusionXLPipeline.from_pretrained(\n",
    "    pretrained_model_name_or_path=base_model_path,\n",
    "    paddle_dtype=paddle.float16,\n",
    "    low_cpu_mem_usage=True, \n",
    ")\n",
    "\n",
    "text2img.scheduler = EulerAncestralDiscreteScheduler.from_config(text2img.scheduler.config)\n",
    "# 使用预训练的模型初始化文本到图像的生成管道   \n",
    "  \n",
    "# 使用与text2img相同的组件初始化图像到图像的转换管道  \n",
    "img2img = StableDiffusionXLImg2ImgPipeline(**text2img.components)  \n",
    "\n",
    "# 使用与text2img相同的组件初始化图像修复管道  \n",
    "inpaint = StableDiffusionXLInpaintPipeline(**text2img.components)\n",
    "\n",
    "prompt = \"A pretty young woman with silky golden hair that shines with a captivating glow in the sunlight. Her eyes are deep like the sea, revealing boundless wisdom and tenderness. Her face is exquisite, with skin as white as jade, and a confident smile always plays at the corners of her mouth. She wears a simple yet elegant long dress, its hem fluttering gently, as if carrying a touch of ethereal charm. Her posture is upright, her gait composed, and wherever she goes, she becomes the focal point of attention. There's a unique charm about her that involuntarily draws people in.\"\n",
    "negative_prompt = \"lowres, bad anatomy, bad hands, text, error, missing fingers, extra digit, fewer digits, cropped, worst quality, low quality, normal quality, jpeg artifacts, signature, watermark, username, blurry\"\n",
    "\n",
    "text2img(\n",
    "    prompt, \n",
    "    negative_prompt=negative_prompt, \n",
    "    num_inference_steps=30,  \n",
    "    guidance_scale=5, \n",
    "    # guidance_rescale=10, \n",
    "    # generator= paddle.Generator().manual_seed(100) \n",
    ")[0][0]"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "380669de-7b5a-475c-8ec9-c0b51d1e57a2",
   "metadata": {},
   "source": [
    "<h2><font style=\"background: linear-gradient(to right,gold, red,pink, green);-webkit-background-clip: text;-webkit-text-fill-color: transparent;\">IPAdapterPlusXL</font></h2>\n",
    "\n",
    "与Photomaker和InstantX/InstantID 功能相似\n",
    "模糊的低分辨图片也可变成清晰图片。\n",
    "模型效果(左图输出，右图输入)\n",
    "\n",
    "![](https://ai-studio-static-online.cdn.bcebos.com/59f82054242f4d04b433c515ab58e527a2d2901eca94434a97565cf8a5f395ac)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "74006e96-a735-4a09-9800-cc32d795cdff",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "import warnings  \n",
    "# 忽略特定警告，调试勿开启此项\n",
    "warnings.filterwarnings(\"ignore\") \n",
    "\n",
    "from PIL import Image\n",
    "import paddle\n",
    "import os\n",
    "from ppdiffusers import (\n",
    "        StableDiffusionXLPipeline, \n",
    "        DDIMScheduler, \n",
    "        EulerAncestralDiscreteScheduler,\n",
    "        StableDiffusionXLImg2ImgPipeline,\n",
    "    )\n",
    "from ppdiffusers.utils import load_image, make_image_grid, smart_load\n",
    "from safetensors.paddle import load_file\n",
    "# 导入自定义类\n",
    "from utils.ip_adapter import IPAdapterPlusXL, StableDiffusionXLCustomPipeline\n",
    "\n",
    "HOME_DIR = os.path.expanduser('~')\n",
    "\n",
    "# 图片展示\n",
    "def image_grid(imgs, rows, cols):\n",
    "    assert len(imgs) == rows*cols\n",
    "\n",
    "    w, h = imgs[0].size\n",
    "    grid = Image.new('RGB', size=(cols*w, rows*h))\n",
    "    grid_w, grid_h = grid.size\n",
    "    \n",
    "    for i, img in enumerate(imgs):\n",
    "        grid.paste(img, box=(i%cols*w, i//cols*h))\n",
    "    return grid\n",
    "\n",
    "# 加载模型\n",
    "base_model_path = [\"SG161222/RealVisXL_V3.0\", \"./Animagine-Xl-2.0\"][-2]\n",
    "image_encoder_path = [\n",
    "    \"laion/CLIP-ViT-H-14-laion2B-s32B-b79K\", \n",
    "][-1]\n",
    "ip_ckpt = [\n",
    "    os.path.join(HOME_DIR, \"data/data264879/ip-adapter-plus-face_sdxl_vit-h.bin\"), \n",
    "    os.path.join(HOME_DIR, \"data/data264879/ip-adapter-plus-face_sdxl_vit-h.safetensors\"),\n",
    "][-1] \n",
    "\n",
    "noise_scheduler = DDIMScheduler(\n",
    "    num_train_timesteps=1000,\n",
    "    beta_start=0.00085,\n",
    "    beta_end=0.012,\n",
    "    beta_schedule=\"scaled_linear\",\n",
    "    clip_sample=False,\n",
    "    set_alpha_to_one=False,\n",
    "    steps_offset=1,\n",
    ")\n",
    "\n",
    "pipe = StableDiffusionXLCustomPipeline.from_pretrained( #plus\n",
    "    base_model_path, \n",
    "    paddle_dtype=paddle.float16, \n",
    "    variant=\"fp16\", \n",
    "    # scheduler=noise_scheduler,\n",
    "    low_cpu_mem_usage=True,\n",
    ")\n",
    "pipe.enable_xformers_memory_efficient_attention()\n",
    "\n",
    "ip_model = IPAdapterPlusXL(\n",
    "    pipe, \n",
    "    ip_ckpt=ip_ckpt, \n",
    "    image_encoder_path=image_encoder_path,\n",
    "    device=paddle.get_device(), \n",
    "    num_tokens=16\n",
    ")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "96f4468f-1cef-49b4-aa6d-c8c4a4d4fa06",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "ref_image = Image.open(\"./assets/img_ipadapter.png\")\n",
    "# 只要脸部特征并调整大小(224, 224)\n",
    "ref_image1 = ref_image.crop((\n",
    "    ref_image.size[0]*0.1,  # x left\n",
    "    0,  # y top\n",
    "    ref_image.size[0]*0.1+ref_image.size[1]*0.45,  # x right\n",
    "    ref_image.size[1]*0.45  # y bottom\n",
    ")).resize((224, 224))\n",
    "\n",
    "imgs = [ref_image, ref_image1]\n",
    "rows = 1\n",
    "image_grid(imgs, rows=rows, cols=len(imgs)//rows + (len(imgs)%rows!=0))\n",
    "\n",
    "paddle.device.cuda.empty_cache()\n",
    "prompt = [\n",
    "    \"A beautiful girl wearing Sun Dress are loking at viewer in garden\",\n",
    "][-1]\n",
    "negative_prompt = \"low quality, ugly, blurry, opened mouth\"\n",
    "\n",
    "image = ip_model.generate(\n",
    "    pil_image=ref_image1,  # crop and resize image\n",
    "    control_guidance_start=0., # 控制姿势，值越小越接近原图\n",
    "    control_guidance_end=0.8,\n",
    "    scale=1,  # 值越小与原图差异越大\n",
    "    prompt=prompt, \n",
    "    negative_prompt=negative_prompt,\n",
    "    num_samples=1, \n",
    "    num_inference_steps=30, #30\n",
    "    guidance_scale=5, \n",
    "    seed=42,\n",
    "    eta=0.3, # 只有使用DDIMScheduler才有效\n",
    ").images\n",
    "paddle.device.cuda.empty_cache()\n",
    "\n",
    "# 展示图片\n",
    "imgs = [image[0], ref_image]\n",
    "rows = 1\n",
    "image_grid(imgs, rows=rows, cols=len(imgs)//rows + (len(imgs)%rows!=0))\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "5beac279-c9d7-4d30-b5c3-e1034ff1722d",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 保存图片\n",
    "import time\n",
    "os.makedirs(\"./outputs\", exist_ok=True)\n",
    "image[0].save(\"./outputs/\" + str(time.ctime()) + \".jpg\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "7608cf38-102a-49bb-980c-81926bfbf7bb",
   "metadata": {},
   "source": [
    "### Adpater换脸\n",
    "\n",
    "\n",
    "![src+face=result](https://ai-studio-static-online.cdn.bcebos.com/cccacfc3be334672b046f6107516cbf497ad66468594490aa53bc8937d3e873e)\n"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "514e806c-0f72-4905-b04b-c3baf7e4e9d1",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "import warnings  \n",
    "# 忽略特定警告  \n",
    "warnings.filterwarnings(\"ignore\") \n",
    "\n",
    "import paddle\n",
    "import os\n",
    "from ppdiffusers import (\n",
    "        StableDiffusionXLPipeline, \n",
    "        DDIMScheduler, \n",
    "        EulerAncestralDiscreteScheduler, \n",
    "        StableDiffusionXLImg2ImgPipeline,\n",
    "        StableDiffusionXLInpaintPipeline\n",
    "    )\n",
    "from ppdiffusers.utils import load_image, make_image_grid, smart_load\n",
    "from safetensors.paddle import load_file\n",
    "# 导入自定义类\n",
    "from utils.ip_adapter import IPAdapterPlusXL\n",
    "\n",
    "HOME_DIR = os.path.expanduser('~')\n",
    "\n",
    "# 加载模型\n",
    "base_model_path = [\"SG161222/RealVisXL_V3.0\", \"./Animagine-Xl-2.0\"][-2]\n",
    "image_encoder_path = [\n",
    "    \"laion/CLIP-ViT-H-14-laion2B-s32B-b79K\", \n",
    "][-1]\n",
    "ip_ckpt = [\n",
    "    os.path.join(HOME_DIR, \"data/data264879/ip-adapter-plus-face_sdxl_vit-h.bin\"), \n",
    "    os.path.join(HOME_DIR, \"data/data264879/ip-adapter-plus-face_sdxl_vit-h.safetensors\"),\n",
    "][-1] \n",
    "\n",
    "noise_scheduler = DDIMScheduler(\n",
    "    num_train_timesteps=1000,\n",
    "    beta_start=0.00085,\n",
    "    beta_end=0.012,\n",
    "    beta_schedule=\"scaled_linear\",\n",
    "    clip_sample=False,\n",
    "    set_alpha_to_one=False,\n",
    "    steps_offset=1,\n",
    ")\n",
    "pipe = StableDiffusionXLImg2ImgPipeline.from_pretrained( #plus\n",
    "    base_model_path, \n",
    "    paddle_dtype=paddle.float16, \n",
    "    variant=\"fp16\", \n",
    "    # scheduler=noise_scheduler,\n",
    "    low_cpu_mem_usage=True,\n",
    ")\n",
    "\n",
    "pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)\n",
    "\n",
    "pipe.enable_xformers_memory_efficient_attention()\n",
    "\n",
    "ip_model = IPAdapterPlusXL(\n",
    "    pipe, \n",
    "    ip_ckpt=ip_ckpt, \n",
    "    image_encoder_path=image_encoder_path,\n",
    "    device=paddle.get_device(), \n",
    "    num_tokens=16\n",
    ").generate"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "44ef38ba-4033-4d42-86ac-7e4e026e997c",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "paddle.device.cuda.empty_cache()\n",
    "src_image = load_image(\"./assets/text_691436129_ddpm_30_4_0_1712919483.194813.png\")\n",
    "face_image = load_image(\"./assets/img_ipadapter.png\")\n",
    "# 裁剪面部特征\n",
    "face_image1 = face_image.crop((face_image.size[0]*0.1, 0, face_image.size[0]*0.1+face_image.size[1]*0.42, face_image.size[1]*0.42)).resize((224, 224))\n",
    "\n",
    "prompt = [\n",
    "    \"A beautiful Chinese girl wearing Sun Dress are loking at viewer\",\n",
    "][-1]\n",
    "negative_prompt = \"low quality, ugly, blurry\"\n",
    "\n",
    "image = ip_model(\n",
    "    pil_image=face_image1, \n",
    "    image=src_image,\n",
    "    prompt=prompt, \n",
    "    negative_prompt=negative_prompt,\n",
    "    scale=0.75, # 脸部参考图比重\n",
    "    strength= 0.2, # 重绘步数 = num_inference_steps * strength\n",
    "    num_samples=1, \n",
    "    num_inference_steps=30, \n",
    "    guidance_scale=4, \n",
    "    seed=42,\n",
    ").images\n",
    "\n",
    "# # 保存图片\n",
    "# image[0].save(\"out_puts/img_ipadapter_out.png\")\n",
    "paddle.device.cuda.empty_cache()\n",
    "# 展示图片\n",
    "from PIL import Image\n",
    "def image_grid(imgs, rows, cols, figure_size=368):\n",
    "    assert len(imgs) == rows*cols\n",
    "\n",
    "    grid = Image.new('RGB', size=(cols*figure_size, rows*figure_size))\n",
    "    \n",
    "    for i, img in enumerate(imgs):\n",
    "        w, h = img.size\n",
    "        scale = figure_size/max(w, h)\n",
    "        img = img.resize((int(w*scale), int(h*scale)))\n",
    "        grid.paste(img, box=(i%cols*figure_size, i//cols*figure_size))\n",
    "    return grid\n",
    "imgs = [src_image, face_image, image[0]]\n",
    "image_grid(imgs, rows=1, cols=len(imgs))"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "adefb145-1031-4f68-8068-f9692e86e10b",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 保存图片\n",
    "import time\n",
    "os.makedirs(\"./outputs\", exist_ok=True)\n",
    "image[0].save(\"./outputs/\" + str(time.ctime()) + \".jpg\")"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "66517dca-03cf-4fd0-af1b-e5ae3077fc99",
   "metadata": {},
   "source": [
    "<h2><font style=\"background: linear-gradient(to right,gold, red,pink, green);-webkit-background-clip: text;-webkit-text-fill-color: transparent;\">WebUI</font></h2>\n",
    "\n",
    "1. *先运行测试！*\n",
    "\n",
    "2. 运行webui前***重启内核***！\n",
    "双击运行`webui.gradio.py`（controlnet cany使用） 或 `webui_adapter.gradio.py` （Gradio UI的Face Image输入模块点击编辑裁剪出脸部特征）即可,二者只可选一运行。"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "77d3dfde-406c-4c70-a030-094b54161157",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 测试webui 1\n",
    "!python webui_adapter.gradio.py"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "bbf7b28b-96ec-40c6-b8c5-4ed9dc9f999d",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# 重启内核，运行测试webui 2\n",
    "!python webui.gradio.py"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "45637cb6-fae9-4c90-b953-ea40bc70ef92",
   "metadata": {},
   "source": [
    "## Lora 训练\n",
    "\n",
    "重启内核！"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "cd48c95e-7e34-43ad-a2b0-384d7f58e662",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "!git clone https://gitee.com/Covirtue/PaddleMIX_latest.git --depth=1"
   ]
  },
  {
   "cell_type": "markdown",
   "id": "ad9b98ba-1ea9-4f67-bb52-9d8438062fd7",
   "metadata": {},
   "source": [
    "lora图片自行准备，放在`beauty/dlrb`目录下。\n",
    "cmd中执行如下命令：\n",
    "\n",
    "```cmd\n",
    "\n",
    "cd PPdiffusersWebUI\n",
    "\n",
    "rm -r ./beauty/dlrb/.ipynb_checkpoints\n",
    "\n",
    "python PaddleMIX_latest/ppdiffusers/examples/dreambooth/train_dreambooth_lora_sdxl.py \\\n",
    "  --pretrained_model_name_or_path=stabilityai/stable-diffusion-xl-base-1.0 \\\n",
    "  --instance_data_dir=beauty/dlrb \\\n",
    "  --output_dir=lora_trained_xl \\\n",
    "  --report_to=\"wandb\" \\\n",
    "  --mixed_precision=\"fp16\" \\\n",
    "  --instance_prompt=\"shot_of_dlrb_v1\" \\\n",
    "  --resolution=1024 \\\n",
    "  --train_batch_size=1 \\\n",
    "  --gradient_accumulation_steps=4 \\\n",
    "  --learning_rate=1e-4 \\\n",
    "  --lr_scheduler=\"constant\" \\\n",
    "  --lr_warmup_steps=0 \\\n",
    "  --max_train_steps=1000 \\\n",
    "  --validation_prompt=\"A photo of shot_of_dlrb_v1 in garden.\" \\\n",
    "  --validation_epochs=2000 \\\n",
    "  --seed=\"42\" \\\n",
    "  --checkpointing_steps=100 \\\n",
    "  --variant=\"fp16\" \\\n",
    "  --rank=64\n",
    "```"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "2cb1c290-6613-4fb6-9c4d-5d0f48b052e5",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "import os\n",
    "os.makedirs(\"lora_trained_xl\", exist_ok=True)\n",
    "!rm -r ./beauty/dlrb/.ipynb_checkpoints\n",
    "!python PaddleMIX_latest/ppdiffusers/examples/dreambooth/train_dreambooth_lora_sdxl.py \\\n",
    "  --pretrained_model_name_or_path=SG161222/RealVisXL_V3.0  \\\n",
    "  --instance_data_dir=beauty/dlrb \\\n",
    "  --output_dir=lora_trained_xl \\\n",
    "  --mixed_precision=\"fp16\" \\\n",
    "  --instance_prompt=\"a photo of dlrb\" \\\n",
    "  --resolution=1024 \\\n",
    "  --train_batch_size=1 \\\n",
    "  --gradient_accumulation_steps=4 \\\n",
    "  --learning_rate=1e-4 \\\n",
    "  --lr_scheduler=\"constant\" \\\n",
    "  --lr_warmup_steps=0 \\\n",
    "  --max_train_steps=100 \\\n",
    "  --validation_prompt=\"A photo of dlrb in garden.\" \\\n",
    "  --validation_epochs=200 \\\n",
    "  --seed=\"42\" \\\n",
    "  --checkpointing_steps=100 \\\n",
    "  --variant=\"fp16\" \\\n",
    "  --rank=64 # 默认为4(22MB)，32(177MB)或者64(354MB)\n",
    "  \n",
    "  "
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "4f5ee8a1-7d74-416f-b16c-bef7e9dfdaf7",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "# lora测试\n",
    "import paddle\n",
    "from ppdiffusers import (\n",
    "    AutoencoderKL, \n",
    "    DDPMScheduler, \n",
    "    EulerAncestralDiscreteScheduler, \n",
    "    EulerDiscreteScheduler, \n",
    "    KDPM2AncestralDiscreteScheduler, \n",
    "    StableDiffusionXLPipeline, \n",
    "    UNet2DConditionModel, \n",
    ")\n",
    "from ppdiffusers.loaders.lora import USE_PEFT_BACKEND\n",
    "\n",
    "# Enable PEFT backend\n",
    "USE_PEFT_BACKEND = True\n",
    "\n",
    "model_name = [\n",
    "    \"stabilityai/stable-diffusion-xl-base-1.0\",\n",
    "    \"SG161222/RealVisXL_V3.0\",\n",
    "]\n",
    "\n",
    "pipe = StableDiffusionXLPipeline.from_pretrained(\n",
    "    model_name[1], \n",
    "    paddle_dtype=paddle.float16, \n",
    "    variant=\"fp16\",\n",
    "    use_peft_backend=True\n",
    ")\n",
    "\n",
    "pipe.scheduler = EulerAncestralDiscreteScheduler.from_config(pipe.scheduler.config)"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "52a9d4bb-9a83-496f-a95a-89ad9c7f5cdb",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": [
    "# 加载lora\n",
    "pipe.load_lora_weights(\"./lora_trained_xl/checkpoint-1000/paddle_lora_weights.safetensors\")"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "84269e64-a35f-4169-b427-df404385e908",
   "metadata": {
    "scrolled": true,
    "tags": []
   },
   "outputs": [],
   "source": [
    "image = pipe( # shot_of_dlrb\n",
    "    \"shot_of_dlrb, Photorealistic:1.1, full body shot of a beautiful woman with delicate features, glowing porcelain skin, and hazel eyes, wearing a shimmering emerald green gown with intricate beading, standing in a misty, green-lit environment with soft foliage in the background. The lighting highlights her silhouette, creating an elegant, cinematic atmosphere.\", \n",
    "    negative_prompt = \"bad hands:1.2, shortten fingers, strech fingers, bad fingers:1.2, bad nails:1.3, low quality, blurry, ugly, bad face, bad eyes, bad toes, bad feet\", \n",
    "    num_inference_steps=30, \n",
    "    width=768, \n",
    "    height=1024, \n",
    "    guidance_scale=15,\n",
    "    generator=paddle.Generator().manual_seed(42)\n",
    ").images[0]\n",
    "paddle.device.cuda.empty_cache()\n",
    "image"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "id": "e57d454c-f897-49dc-b755-d1878b7f3b5a",
   "metadata": {
    "scrolled": true
   },
   "outputs": [],
   "source": []
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "Python 3",
   "language": "python",
   "name": "py35-paddle1.2.0"
  },
  "language_info": {
   "codemirror_mode": {
    "name": "ipython",
    "version": 3
   },
   "file_extension": ".py",
   "mimetype": "text/x-python",
   "name": "python",
   "nbconvert_exporter": "python",
   "pygments_lexer": "ipython3",
   "version": "3.10.10"
  },
  "toc-autonumbering": false,
  "vscode": {
   "interpreter": {
    "hash": "09f0dbf7b1569c1ab842ae2f41770fe6aa1b54326d081112fa5944b99abb5899"
   }
  }
 },
 "nbformat": 4,
 "nbformat_minor": 5
}
