|
import inspect |
|
from typing import List, Optional, Union |
|
|
|
import numpy as np |
|
import torch |
|
|
|
import PIL |
|
from tqdm.auto import tqdm |
|
from transformers import CLIPFeatureExtractor, CLIPTextModel, CLIPTokenizer |
|
|
|
from ...models import AutoencoderKL, UNet2DConditionModel |
|
from ...pipeline_utils import DiffusionPipeline |
|
from ...schedulers import DDIMScheduler, PNDMScheduler |
|
from ...utils import logging |
|
from . import StableDiffusionPipelineOutput |
|
from .safety_checker import StableDiffusionSafetyChecker |
|
|
|
|
|
logger = logging.get_logger(__name__) |
|
|
|
|
|
def preprocess_image(image): |
|
w, h = image.size |
|
w, h = map(lambda x: x - x % 32, (w, h)) |
|
image = image.resize((w, h), resample=PIL.Image.LANCZOS) |
|
image = np.array(image).astype(np.float32) / 255.0 |
|
image = image[None].transpose(0, 3, 1, 2) |
|
image = torch.from_numpy(image) |
|
return 2.0 * image - 1.0 |
|
|
|
|
|
def preprocess_mask(mask): |
|
mask = mask.convert("L") |
|
w, h = mask.size |
|
w, h = map(lambda x: x - x % 32, (w, h)) |
|
mask = mask.resize((w // 8, h // 8), resample=PIL.Image.NEAREST) |
|
mask = np.array(mask).astype(np.float32) / 255.0 |
|
mask = np.tile(mask, (4, 1, 1)) |
|
mask = mask[None].transpose(0, 1, 2, 3) |
|
mask = 1 - mask |
|
mask = torch.from_numpy(mask) |
|
return mask |
|
|
|
|
|
class StableDiffusionInpaintPipeline(DiffusionPipeline): |
|
r""" |
|
Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. |
|
|
|
This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the |
|
library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) |
|
|
|
Args: |
|
vae ([`AutoencoderKL`]): |
|
Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. |
|
text_encoder ([`CLIPTextModel`]): |
|
Frozen text-encoder. Stable Diffusion uses the text portion of |
|
[CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically |
|
the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. |
|
tokenizer (`CLIPTokenizer`): |
|
Tokenizer of class |
|
[CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). |
|
unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. |
|
scheduler ([`SchedulerMixin`]): |
|
A scheduler to be used in combination with `unet` to denoise the encoded image latens. Can be one of |
|
[`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. |
|
safety_checker ([`StableDiffusionSafetyChecker`]): |
|
Classification module that estimates whether generated images could be considered offsensive or harmful. |
|
Please, refer to the [model card](https://huggingface.co/CompVis/stable-diffusion-v1-4) for details. |
|
feature_extractor ([`CLIPFeatureExtractor`]): |
|
Model that extracts features from generated images to be used as inputs for the `safety_checker`. |
|
""" |
|
|
|
def __init__( |
|
self, |
|
vae: AutoencoderKL, |
|
text_encoder: CLIPTextModel, |
|
tokenizer: CLIPTokenizer, |
|
unet: UNet2DConditionModel, |
|
scheduler: Union[DDIMScheduler, PNDMScheduler], |
|
safety_checker: StableDiffusionSafetyChecker, |
|
feature_extractor: CLIPFeatureExtractor, |
|
): |
|
super().__init__() |
|
scheduler = scheduler.set_format("pt") |
|
logger.info("`StableDiffusionInpaintPipeline` is experimental and will very likely change in the future.") |
|
self.register_modules( |
|
vae=vae, |
|
text_encoder=text_encoder, |
|
tokenizer=tokenizer, |
|
unet=unet, |
|
scheduler=scheduler, |
|
safety_checker=safety_checker, |
|
feature_extractor=feature_extractor, |
|
) |
|
|
|
def enable_attention_slicing(self, slice_size: Optional[Union[str, int]] = "auto"): |
|
r""" |
|
Enable sliced attention computation. |
|
|
|
When this option is enabled, the attention module will split the input tensor in slices, to compute attention |
|
in several steps. This is useful to save some memory in exchange for a small speed decrease. |
|
|
|
Args: |
|
slice_size (`str` or `int`, *optional*, defaults to `"auto"`): |
|
When `"auto"`, halves the input to the attention heads, so attention will be computed in two steps. If |
|
a number is provided, uses as many slices as `attention_head_dim // slice_size`. In this case, |
|
`attention_head_dim` must be a multiple of `slice_size`. |
|
""" |
|
if slice_size == "auto": |
|
|
|
|
|
slice_size = self.unet.config.attention_head_dim // 2 |
|
self.unet.set_attention_slice(slice_size) |
|
|
|
def disable_attention_slicing(self): |
|
r""" |
|
Disable sliced attention computation. If `enable_attention_slicing` was previously invoked, this method will go |
|
back to computing attention in one step. |
|
""" |
|
|
|
self.enable_attention_slice(None) |
|
|
|
@torch.no_grad() |
|
def __call__( |
|
self, |
|
prompt: Union[str, List[str]], |
|
init_image: Union[torch.FloatTensor, PIL.Image.Image], |
|
mask_image: Union[torch.FloatTensor, PIL.Image.Image], |
|
strength: float = 0.8, |
|
num_inference_steps: Optional[int] = 50, |
|
guidance_scale: Optional[float] = 7.5, |
|
eta: Optional[float] = 0.0, |
|
generator: Optional[torch.Generator] = None, |
|
output_type: Optional[str] = "pil", |
|
return_dict: bool = True, |
|
): |
|
r""" |
|
Function invoked when calling the pipeline for generation. |
|
|
|
Args: |
|
prompt (`str` or `List[str]`): |
|
The prompt or prompts to guide the image generation. |
|
init_image (`torch.FloatTensor` or `PIL.Image.Image`): |
|
`Image`, or tensor representing an image batch, that will be used as the starting point for the |
|
process. This is the image whose masked region will be inpainted. |
|
mask_image (`torch.FloatTensor` or `PIL.Image.Image`): |
|
`Image`, or tensor representing an image batch, to mask `init_image`. White pixels in the mask will be |
|
replaced by noise and therefore repainted, while black pixels will be preserved. The mask image will be |
|
converted to a single channel (luminance) before use. |
|
strength (`float`, *optional*, defaults to 0.8): |
|
Conceptually, indicates how much to inpaint the masked area. Must be between 0 and 1. When `strength` |
|
is 1, the denoising process will be run on the masked area for the full number of iterations specified |
|
in `num_inference_steps`. `init_image` will be used as a reference for the masked area, adding more |
|
noise to that region the larger the `strength`. If `strength` is 0, no inpainting will occur. |
|
num_inference_steps (`int`, *optional*, defaults to 50): |
|
The reference number of denoising steps. More denoising steps usually lead to a higher quality image at |
|
the expense of slower inference. This parameter will be modulated by `strength`, as explained above. |
|
guidance_scale (`float`, *optional*, defaults to 7.5): |
|
Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
|
`guidance_scale` is defined as `w` of equation 2. of [Imagen |
|
Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
|
1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
|
usually at the expense of lower image quality. |
|
eta (`float`, *optional*, defaults to 0.0): |
|
Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to |
|
[`schedulers.DDIMScheduler`], will be ignored for others. |
|
generator (`torch.Generator`, *optional*): |
|
A [torch generator](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make generation |
|
deterministic. |
|
output_type (`str`, *optional*, defaults to `"pil"`): |
|
The output format of the generate image. Choose between |
|
[PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `nd.array`. |
|
return_dict (`bool`, *optional*, defaults to `True`): |
|
Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a |
|
plain tuple. |
|
|
|
Returns: |
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: |
|
[`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. |
|
When returning a tuple, the first element is a list with the generated images, and the second element is a |
|
list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" |
|
(nsfw) content, according to the `safety_checker`. |
|
""" |
|
if isinstance(prompt, str): |
|
batch_size = 1 |
|
elif isinstance(prompt, list): |
|
batch_size = len(prompt) |
|
else: |
|
raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") |
|
|
|
if strength < 0 or strength > 1: |
|
raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") |
|
|
|
|
|
accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys()) |
|
extra_set_kwargs = {} |
|
offset = 0 |
|
if accepts_offset: |
|
offset = 1 |
|
extra_set_kwargs["offset"] = 1 |
|
|
|
self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs) |
|
|
|
|
|
init_image = preprocess_image(init_image).to(self.device) |
|
|
|
|
|
init_latent_dist = self.vae.encode(init_image.to(self.device)).latent_dist |
|
init_latents = init_latent_dist.sample(generator=generator) |
|
|
|
init_latents = 0.18215 * init_latents |
|
|
|
|
|
init_latents = torch.cat([init_latents] * batch_size) |
|
init_latents_orig = init_latents |
|
|
|
|
|
mask = preprocess_mask(mask_image).to(self.device) |
|
mask = torch.cat([mask] * batch_size) |
|
|
|
|
|
if not mask.shape == init_latents.shape: |
|
raise ValueError("The mask and init_image should be the same size!") |
|
|
|
|
|
init_timestep = int(num_inference_steps * strength) + offset |
|
init_timestep = min(init_timestep, num_inference_steps) |
|
timesteps = self.scheduler.timesteps[-init_timestep] |
|
timesteps = torch.tensor([timesteps] * batch_size, dtype=torch.long, device=self.device) |
|
|
|
|
|
noise = torch.randn(init_latents.shape, generator=generator, device=self.device) |
|
init_latents = self.scheduler.add_noise(init_latents, noise, timesteps) |
|
|
|
|
|
text_input = self.tokenizer( |
|
prompt, |
|
padding="max_length", |
|
max_length=self.tokenizer.model_max_length, |
|
truncation=True, |
|
return_tensors="pt", |
|
) |
|
text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0] |
|
|
|
|
|
|
|
|
|
do_classifier_free_guidance = guidance_scale > 1.0 |
|
|
|
if do_classifier_free_guidance: |
|
max_length = text_input.input_ids.shape[-1] |
|
uncond_input = self.tokenizer( |
|
[""] * batch_size, padding="max_length", max_length=max_length, return_tensors="pt" |
|
) |
|
uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0] |
|
|
|
|
|
|
|
|
|
text_embeddings = torch.cat([uncond_embeddings, text_embeddings]) |
|
|
|
|
|
|
|
|
|
|
|
accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
|
extra_step_kwargs = {} |
|
if accepts_eta: |
|
extra_step_kwargs["eta"] = eta |
|
|
|
latents = init_latents |
|
t_start = max(num_inference_steps - init_timestep + offset, 0) |
|
for i, t in tqdm(enumerate(self.scheduler.timesteps[t_start:])): |
|
|
|
latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents |
|
|
|
|
|
noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample |
|
|
|
|
|
if do_classifier_free_guidance: |
|
noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) |
|
noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) |
|
|
|
|
|
latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample |
|
|
|
|
|
init_latents_proper = self.scheduler.add_noise(init_latents_orig, noise, t) |
|
latents = (init_latents_proper * mask) + (latents * (1 - mask)) |
|
|
|
|
|
latents = 1 / 0.18215 * latents |
|
image = self.vae.decode(latents).sample |
|
|
|
image = (image / 2 + 0.5).clamp(0, 1) |
|
image = image.cpu().permute(0, 2, 3, 1).numpy() |
|
|
|
|
|
safety_cheker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(self.device) |
|
image, has_nsfw_concept = self.safety_checker(images=image, clip_input=safety_cheker_input.pixel_values) |
|
|
|
if output_type == "pil": |
|
image = self.numpy_to_pil(image) |
|
|
|
if not return_dict: |
|
return (image, has_nsfw_concept) |
|
|
|
return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) |
|
|