{
 "cells": [
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### ControlNet控制整体布局，Stable Diffusion调整全局风格"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入所需的模块，用于图像处理、模型加载和生成图像\n",
    "import torch  # PyTorch库，用于张量操作和模型推理\n",
    "import numpy as np  # NumPy库，用于数组操作\n",
    "from PIL import Image  # PIL库，用于图像加载和处理\n",
    "import requests  # Requests库，用于发送HTTP请求\n",
    "import torchvision.transforms as transforms  # Torchvision的变换模块，用于图像预处理\n",
    "from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation  # Transformers库，用于加载Mask2Former模型\n",
    "from controlnet_aux import MLSDdetector  # ControlNet辅助工具，用于生成控制图像\n",
    "from diffusers import ControlNetModel, StableDiffusionControlNetPipeline, UniPCMultistepScheduler  # Diffusers库，用于加载ControlNet和Stable Diffusion\n",
    "from diffusers.utils import load_image  # Diffusers工具函数，用于加载图像\n",
    "import matplotlib.pyplot as plt  # Matplotlib库，用于图像可视化\n",
    "import urllib.request  # urllib.request模块，用于从URL下载文件\n",
    "\n",
    "# 定义示例图片的URL地址\n",
    "raw_url = \"https://raw.githubusercontent.com/naderAsadi/DesignGenie/main/examples/images/sample_input.png\"\n",
    "try:\n",
    "    # 从指定URL下载图片并保存为\"sample_input.png\"\n",
    "    urllib.request.urlretrieve(raw_url, \"sample_input.png\")\n",
    "    print(\"图片下载成功！\")  # 下载成功时打印提示信息\n",
    "except Exception as e:\n",
    "    print(f\"图片下载失败: {e}\")  # 下载失败时打印错误信息\n",
    "\n",
    "# 设置全局变量\n",
    "img_src = \"sample_input.png\"  # 输入图像的本地路径\n",
    "model_name = \"runwayml/stable-diffusion-v1-5\"  # Stable Diffusion模型的名称\n",
    "mask_ID = 5  # 指定用于后续处理的掩码ID\n",
    "\n",
    "# 通过HTTP请求获取ADE20K数据集的标签信息，用于语义分割的类别标注\n",
    "LABELS = requests.get(\"https://huggingface.co/datasets/huggingface/label-files/raw/main/ade20k-id2label.json\").json()\n",
    "\n",
    "# 定义函数，从语义分割图生成掩码\n",
    "def get_mask_from_segmentation_map(seg_map: torch.Tensor):\n",
    "    \"\"\"从语义分割图生成掩码，每个掩码对应一个类别\"\"\"\n",
    "    masks = []  # 存储生成的掩码列表\n",
    "    labels = []  # 存储对应的类别标签列表\n",
    "    for label in range(150):  # 遍历ADE20K的150个类别\n",
    "        mask = np.ones((seg_map.shape[0], seg_map.shape[1]), dtype=np.uint8)  # 创建与分割图同尺寸的全1掩码\n",
    "        indices = (seg_map == label)  # 找到分割图中等于当前类别ID的像素位置\n",
    "        mask[indices] = 0  # 将对应类别的像素设为0，其他保持为1\n",
    "        if indices.sum() > 0:  # 如果当前类别在图像中存在\n",
    "            masks.append(mask)  # 将掩码添加到列表\n",
    "            labels.append(label)  # 将类别ID添加到列表\n",
    "    print(f\"创建了 {len(masks)} 个掩码\")  # 打印生成的掩码数量\n",
    "    for idx, label in enumerate(labels):  # 遍历生成的掩码和标签\n",
    "        print(f\"索引: {idx}\\t类别ID: {label}\\t标签: {LABELS[str(label)]}\")  # 打印每个掩码的索引、类别ID和标签名\n",
    "    return masks, labels  # 返回掩码和标签列表\n",
    "\n",
    "# 加载和预处理输入图像\n",
    "image = load_image(img_src).resize((768, 512))  # 加载本地图像并调整大小为768x512像素\n",
    "\n",
    "# 使用Mask2Former模型进行语义分割\n",
    "processor = AutoImageProcessor.from_pretrained(\"facebook/mask2former-swin-large-ade-semantic\")  # 加载预训练的图像处理器\n",
    "inputs = processor(images=[image], return_tensors=\"pt\")  # 预处理图像并转换为PyTorch张量\n",
    "model = Mask2FormerForUniversalSegmentation.from_pretrained(\"facebook/mask2former-swin-large-ade-semantic\")  # 加载预训练的Mask2Former模型\n",
    "outputs = model(**inputs)  # 运行模型进行语义分割推理\n",
    "predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]  # 后处理获取语义分割图\n",
    "\n",
    "# 从语义分割图生成掩码和对应的标签\n",
    "masks, labels = get_mask_from_segmentation_map(predicted_semantic_map)  # 调用函数生成掩码和标签\n",
    "\n",
    "# 定义模型缓存目录，避免重复下载模型\n",
    "cache_dir = \"./model_cache\"\n",
    "\n",
    "# 加载MLSD检测器并生成控制图像，用于ControlNet输入\n",
    "processor = MLSDdetector.from_pretrained(\"lllyasviel/Annotators\")  # 加载预训练的MLSD检测器\n",
    "control_image = processor(image)  # 处理输入图像生成控制图像\n",
    "checkpoint_name = \"lllyasviel/control_v11p_sd15_mlsd\"  # ControlNet检查点名称\n",
    "model_name = \"runwayml/stable-diffusion-v1-5\"  # Stable Diffusion模型名称\n",
    "\n",
    "# 加载ControlNet模型和Stable Diffusion生成管道\n",
    "controlnet = ControlNetModel.from_pretrained(\n",
    "    checkpoint_name, \n",
    "    torch_dtype=torch.float16,  # 使用半精度浮点数以节省内存\n",
    "    cache_dir=cache_dir  # 指定缓存目录\n",
    ")\n",
    "pipe = StableDiffusionControlNetPipeline.from_pretrained(\n",
    "    model_name, \n",
    "    controlnet=controlnet,  # 将ControlNet集成到管道中\n",
    "    torch_dtype=torch.float16,  # 使用半精度浮点数\n",
    "    cache_dir=cache_dir  # 指定缓存目录\n",
    ")\n",
    "pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)  # 设置调度器为UniPCMultistepScheduler\n",
    "pipe.enable_model_cpu_offload()  # 启用模型CPU卸载，优化GPU内存使用\n",
    "\n",
    "# 准备掩码和控制图像\n",
    "mask = torch.Tensor(masks[mask_ID]).repeat(3, 1, 1)  # 选取指定ID的掩码并重复为3通道，与控制图像匹配\n",
    "object_mask = torch.Tensor(masks[mask_ID])  # 选取指定ID的掩码作为对象掩码\n",
    "control = transforms.ToTensor()(control_image)  # 将控制图像转换为PyTorch张量\n",
    "masked_control_image = transforms.ToPILImage()(mask * control)  # 使用掩码遮蔽控制图像并转换为PIL图像\n",
    "object_mask = 1 - object_mask  # 反转掩码，0变为1，1变为0\n",
    "object_mask = transforms.ToPILImage()(object_mask.unsqueeze(0))  # 将对象掩码转换为PIL图像\n",
    "\n",
    "# 使用管道生成图像\n",
    "prompt = [\"A warm and cozy bedroom, furnished with blue and gray colors, and a wooden armchair. simplistic style\"] * 4  # 定义生成图像的提示词，重复4次\n",
    "generator = [torch.Generator(device=\"cuda\").manual_seed(int(i)) for i in np.random.randint(50, size=len(prompt))]  # 为每张图像生成随机种子\n",
    "output = pipe(\n",
    "    prompt,  # 输入提示词\n",
    "    image=masked_control_image,  # 输入遮蔽后的控制图像\n",
    "    negative_prompt=[\"monochrome, lowres, bad anatomy, worst quality, low quality\"] * len(prompt),  # 输入负面提示词，避免生成低质量图像\n",
    "    num_inference_steps=30,  # 设置推理步数为30\n",
    "    generator=generator,  # 使用指定的随机种子\n",
    ")\n",
    "\n",
    "# 使用Matplotlib显示生成的图像网格\n",
    "fig, axes = plt.subplots(3, 3, figsize=(15, 15))  # 创建3x3的子图布局，设置画布大小为15x15英寸\n",
    "\n",
    "# 显示原始图像\n",
    "axes[0, 0].imshow(image)  # 在第1行第1列显示原始图像\n",
    "axes[0, 0].set_title(\"Original Image\")  # 设置标题\n",
    "axes[0, 0].axis('off')  # 关闭坐标轴\n",
    "\n",
    "# 显示遮蔽后的控制图像\n",
    "axes[0, 1].imshow(masked_control_image)  # 在第1行第2列显示遮蔽后的控制图像\n",
    "axes[0, 1].set_title(\"Masked Control Image\")  # 设置标题\n",
    "axes[0, 1].axis('off')  # 关闭坐标轴\n",
    "\n",
    "# 显示对象掩码\n",
    "axes[0, 2].imshow(object_mask, cmap='gray')  # 在第1行第3列显示对象掩码，使用灰度颜色映射\n",
    "axes[0, 2].set_title(\"Object Mask\")  # 设置标题\n",
    "axes[0, 2].axis('off')  # 关闭坐标轴\n",
    "\n",
    "# 显示四张生成的图像\n",
    "axes[1, 0].imshow(output.images[0])  # 在第2行第1列显示第1张生成图像\n",
    "axes[1, 0].set_title(\"Generated Image 1\")  # 设置标题\n",
    "axes[1, 0].axis('off')  # 关闭坐标轴\n",
    "\n",
    "axes[1, 1].imshow(output.images[1])  # 在第2行第2列显示第2张生成图像\n",
    "axes[1, 1].set_title(\"Generated Image 2\")  # 设置标题\n",
    "axes[1, 1].axis('off')  # 关闭坐标轴\n",
    "\n",
    "axes[1, 2].imshow(output.images[2])  # 在第2行第3列显示第3张生成图像\n",
    "axes[1, 2].set_title(\"Generated Image 3\")  # 设置标题\n",
    "axes[1, 2].axis('off')  # 关闭坐标轴\n",
    "\n",
    "axes[2, 0].imshow(output.images[3])  # 在第3行第1列显示第4张生成图像\n",
    "axes[2, 0].set_title(\"Generated Image 4\")  # 设置标题\n",
    "axes[2, 0].axis('off')  # 关闭坐标轴\n",
    "\n",
    "# 关闭未使用的子图\n",
    "axes[2, 1].axis('off')  # 关闭第3行第2列的子图\n",
    "axes[2, 2].axis('off')  # 关闭第3行第3列的子图\n",
    "\n",
    "plt.tight_layout()  # 调整子图布局以避免重叠\n",
    "plt.show()  # 显示所有图像"
   ]
  },
  {
   "cell_type": "markdown",
   "metadata": {},
   "source": [
    "### ControlNet控制整体布局，Stable Diffusion调整局部(inpaint)风格"
   ]
  },
  {
   "cell_type": "code",
   "execution_count": null,
   "metadata": {},
   "outputs": [],
   "source": [
    "# 导入必要的库\n",
    "import torch  # PyTorch用于深度学习操作\n",
    "import numpy as np  # 用于数值计算\n",
    "from PIL import Image  # 图像处理\n",
    "import requests  # 网络请求\n",
    "import torchvision.transforms as transforms  # 图像转换工具\n",
    "from transformers import AutoImageProcessor, Mask2FormerForUniversalSegmentation  # 用于语义分割\n",
    "from controlnet_aux import MLSDdetector  # 线条检测器\n",
    "from diffusers import ControlNetModel, StableDiffusionControlNetInpaintPipeline, UniPCMultistepScheduler  # 图像生成和修复\n",
    "from diffusers.utils import load_image  # 图像加载工具\n",
    "import matplotlib.pyplot as plt  # 可视化工具\n",
    "\n",
    "# 设置全局变量\n",
    "img_src = \"sample_input.png\"  # 输入图像路径\n",
    "mask_ID = 4  # 掩码ID，用于选择要处理的区域\n",
    "cache_dir = \"./model_cache\"  # 模型缓存目录\n",
    "\n",
    "# 获取ADE20K数据集的标签信息，用于语义分割\n",
    "LABELS = requests.get(\"https://huggingface.co/datasets/huggingface/label-files/raw/main/ade20k-id2label.json\").json()\n",
    "\n",
    "def get_mask_from_segmentation_map(seg_map: torch.Tensor):\n",
    "    \"\"\"从分割图生成掩码，每个类别对应一个掩码\"\"\"\n",
    "    masks, labels = [], []\n",
    "    for label in range(150):  # ADE20K数据集有150个类别\n",
    "        mask = np.ones((seg_map.shape[0], seg_map.shape[1]), dtype=np.uint8)\n",
    "        indices = (seg_map == label)\n",
    "        mask[indices] = 0  # 将目标区域设为0，背景为1\n",
    "        if indices.sum() > 0:  # 如果存在该类别\n",
    "            masks.append(mask)\n",
    "            labels.append(label)\n",
    "    return masks, labels\n",
    "\n",
    "# 加载和预处理输入图像\n",
    "image = load_image(img_src).resize((768, 512))  # 调整图像大小为标准尺寸\n",
    "\n",
    "# 使用Mask2Former进行语义分割\n",
    "processor = AutoImageProcessor.from_pretrained(\"facebook/mask2former-swin-large-ade-semantic\")\n",
    "inputs = processor(images=[image], return_tensors=\"pt\")\n",
    "model = Mask2FormerForUniversalSegmentation.from_pretrained(\"facebook/mask2former-swin-large-ade-semantic\")\n",
    "outputs = model(**inputs)\n",
    "predicted_semantic_map = processor.post_process_semantic_segmentation(outputs, target_sizes=[image.size[::-1]])[0]\n",
    "\n",
    "# 生成分割掩码\n",
    "masks, labels = get_mask_from_segmentation_map(predicted_semantic_map)\n",
    "\n",
    "# 使用MLSD检测器生成控制图像并与原始图像混合\n",
    "mlsd_processor = MLSDdetector.from_pretrained(\"lllyasviel/Annotators\")\n",
    "control_image = mlsd_processor(image)  # 生成线条检测图\n",
    "# 将控制图像和原始图像混合，创建更自然的控制引导\n",
    "control_tensor = transforms.ToTensor()(control_image)\n",
    "image_tensor = transforms.ToTensor()(image)\n",
    "mixed_control_tensor = control_tensor * 0.5 + image_tensor * 0.5\n",
    "mixed_control_image = transforms.ToPILImage()(mixed_control_tensor)\n",
    "\n",
    "# 处理掩码并创建用于修复的遮罩图像\n",
    "mask = torch.Tensor(masks[mask_ID])\n",
    "# 生成修复用的掩码图像，0表示需要修复的区域\n",
    "object_mask = 1 - mask\n",
    "mask_image = transforms.ToPILImage()(object_mask.unsqueeze(0))\n",
    "\n",
    "# 加载ControlNet和StableDiffusion修复模型\n",
    "# 使用use_safetensors=False来明确接受pickle格式的模型文件\n",
    "controlnet = ControlNetModel.from_pretrained(\n",
    "    \"lllyasviel/control_v11p_sd15_mlsd\",\n",
    "    torch_dtype=torch.float16,\n",
    "    cache_dir=cache_dir,\n",
    "    use_safetensors=False  # 明确接受非safetensors格式\n",
    ")\n",
    "# 创建带有ControlNet的图像修复管道\n",
    "pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(\n",
    "    \"runwayml/stable-diffusion-inpainting\",\n",
    "    controlnet=controlnet,\n",
    "    torch_dtype=torch.float16,\n",
    "    cache_dir=cache_dir,\n",
    "    use_safetensors=False  # 明确接受非safetensors格式\n",
    ")\n",
    "# 配置模型参数\n",
    "pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config)\n",
    "pipe.enable_model_cpu_offload()  # 启用CPU卸载以节省显存\n",
    "pipe.enable_xformers_memory_efficient_attention()  # 启用高效注意力机制\n",
    "\n",
    "# 设置生成参数并生成图像\n",
    "prompt = [\"A luxurious Scandinavian style living room, minimalist furniture, natural wood elements, large windows with sunlight, cream colored walls, tasteful art pieces\"] * 4\n",
    "negative_prompt = [\"cluttered, dark, oversaturated, poor quality, blurry, unrealistic, artificial lighting, overdecorated\"] * 4\n",
    "# 设置随机种子以确保可重复性\n",
    "generator = [torch.Generator(device=\"cuda\").manual_seed(int(i)) for i in np.random.randint(50, size=4)]\n",
    "\n",
    "# 执行图像生成\n",
    "output = pipe(\n",
    "    prompt,\n",
    "    image=image,  # 原始图像\n",
    "    mask_image=mask_image,  # 指定需要修复的区域\n",
    "    control_image=mixed_control_image,  # 控制图像用于引导生成\n",
    "    negative_prompt=negative_prompt,\n",
    "    num_inference_steps=30,  # 推理步数\n",
    "    generator=generator,\n",
    "    controlnet_conditioning_scale=0.7,  # 控制网络的影响程度\n",
    "    guidance_scale=7.5,  # 提示词引导强度\n",
    ")\n",
    "\n",
    "# 使用matplotlib显示结果\n",
    "fig, axes = plt.subplots(3, 3, figsize=(15, 15))\n",
    "\n",
    "# 显示原始图像和处理过程的中间结果\n",
    "axes[0, 0].imshow(image)\n",
    "axes[0, 0].set_title(\"Input Scene\")\n",
    "axes[0, 0].axis('off')\n",
    "\n",
    "axes[0, 1].imshow(mixed_control_image)\n",
    "axes[0, 1].set_title(\"Structure Guidance\")\n",
    "axes[0, 1].axis('off')\n",
    "\n",
    "axes[0, 2].imshow(mask_image, cmap='gray')\n",
    "axes[0, 2].set_title(\"Region to Redesign\")\n",
    "axes[0, 2].axis('off')\n",
    "\n",
    "# 显示生成的图像\n",
    "for i in range(4):\n",
    "    row = (i // 3) + 1\n",
    "    col = i % 3\n",
    "    axes[row, col].imshow(output.images[i])\n",
    "    axes[row, col].set_title(f\"Design Variation {i+1}\")\n",
    "    axes[row, col].axis('off')\n",
    "\n",
    "# 关闭未使用的子图\n",
    "axes[2, 1].axis('off')\n",
    "axes[2, 2].axis('off')\n",
    "\n",
    "plt.tight_layout()\n",
    "plt.show()"
   ]
  }
 ],
 "metadata": {
  "kernelspec": {
   "display_name": "designgenie",
   "language": "python",
   "name": "python3"
  },
  "language_info": {
   "name": "python",
   "version": "3.10.11"
  }
 },
 "nbformat": 4,
 "nbformat_minor": 2
}
