| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | import html |
| | import inspect |
| | import re |
| | import urllib.parse as ul |
| | from typing import Callable, List, Optional, Tuple, Union |
| |
|
| | import torch |
| |
|
| | from diffusers.image_processor import PixArtImageProcessor |
| | from diffusers.models import AutoencoderKL |
| | from diffusers.schedulers import DPMSolverMultistepScheduler |
| | from diffusers.utils import ( |
| | BACKENDS_MAPPING, |
| | deprecate, |
| | logging, |
| | replace_example_docstring, |
| | ) |
| | from diffusers.utils.torch_utils import randn_tensor |
| | from diffusers.pipelines.pipeline_utils import DiffusionPipeline, ImagePipelineOutput |
| |
|
| | from .pixcell_controlnet import PixCellControlNet |
| | from .pixcell_controlnet_transformer_2d import PixCellTransformer2DModelControlNet |
| |
|
| |
|
| | |
| | |
| | |
| | |
| |
|
| |
|
| | logger = logging.get_logger(__name__) |
| |
|
| |
|
| | EXAMPLE_DOC_STRING = """ |
| | Examples: |
| | ```py |
| | >>> import torch |
| | >>> from diffusers import PixCellSigmaPipeline |
| | |
| | >>> # You can replace the checkpoint id with "PixArt-alpha/PixArt-Sigma-XL-2-512-MS" too. |
| | >>> pipe = PixArtSigmaPipeline.from_pretrained( |
| | ... "PixArt-alpha/PixArt-Sigma-XL-2-1024-MS", torch_dtype=torch.float16 |
| | ... ) |
| | >>> # Enable memory optimizations. |
| | >>> # pipe.enable_model_cpu_offload() |
| | |
| | >>> prompt = "A small cactus with a happy face in the Sahara desert." |
| | >>> image = pipe(prompt).images[0] |
| | ``` |
| | """ |
| |
|
| |
|
| | |
| | def retrieve_timesteps( |
| | scheduler, |
| | num_inference_steps: Optional[int] = None, |
| | device: Optional[Union[str, torch.device]] = None, |
| | timesteps: Optional[List[int]] = None, |
| | sigmas: Optional[List[float]] = None, |
| | **kwargs, |
| | ): |
| | r""" |
| | Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles |
| | custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. |
| | |
| | Args: |
| | scheduler (`SchedulerMixin`): |
| | The scheduler to get timesteps from. |
| | num_inference_steps (`int`): |
| | The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` |
| | must be `None`. |
| | device (`str` or `torch.device`, *optional*): |
| | The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. |
| | timesteps (`List[int]`, *optional*): |
| | Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, |
| | `num_inference_steps` and `sigmas` must be `None`. |
| | sigmas (`List[float]`, *optional*): |
| | Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, |
| | `num_inference_steps` and `timesteps` must be `None`. |
| | |
| | Returns: |
| | `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the |
| | second element is the number of inference steps. |
| | """ |
| | if timesteps is not None and sigmas is not None: |
| | raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") |
| | if timesteps is not None: |
| | accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) |
| | if not accepts_timesteps: |
| | raise ValueError( |
| | f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" |
| | f" timestep schedules. Please check whether you are using the correct scheduler." |
| | ) |
| | scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) |
| | timesteps = scheduler.timesteps |
| | num_inference_steps = len(timesteps) |
| | elif sigmas is not None: |
| | accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) |
| | if not accept_sigmas: |
| | raise ValueError( |
| | f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" |
| | f" sigmas schedules. Please check whether you are using the correct scheduler." |
| | ) |
| | scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) |
| | timesteps = scheduler.timesteps |
| | num_inference_steps = len(timesteps) |
| | else: |
| | scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) |
| | timesteps = scheduler.timesteps |
| | return timesteps, num_inference_steps |
| |
|
| |
|
| | class PixCellControlNetPipeline(DiffusionPipeline): |
| | r""" |
| | Pipeline for SSL-to-image generation using PixCell. |
| | """ |
| |
|
| | model_cpu_offload_seq = "transformer->vae" |
| |
|
| | def __init__( |
| | self, |
| | vae: AutoencoderKL, |
| | transformer: PixCellTransformer2DModelControlNet, |
| | controlnet: PixCellControlNet, |
| | scheduler: DPMSolverMultistepScheduler, |
| | ): |
| | super().__init__() |
| |
|
| | self.register_modules( |
| | vae=vae, transformer=transformer, controlnet=controlnet, scheduler=scheduler |
| | ) |
| |
|
| | self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) |
| | self.image_processor = PixArtImageProcessor(vae_scale_factor=self.vae_scale_factor) |
| |
|
| | |
| | def prepare_extra_step_kwargs(self, generator, eta): |
| | |
| | |
| | |
| | |
| |
|
| | accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
| | extra_step_kwargs = {} |
| | if accepts_eta: |
| | extra_step_kwargs["eta"] = eta |
| |
|
| | |
| | accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) |
| | if accepts_generator: |
| | extra_step_kwargs["generator"] = generator |
| | return extra_step_kwargs |
| |
|
| | def get_unconditional_embedding(self, batch_size=1): |
| | |
| | uncond = self.transformer.caption_projection.uncond_embedding.clone().tile(batch_size,1,1) |
| | return uncond |
| |
|
| | |
| | def check_inputs( |
| | self, |
| | height, |
| | width, |
| | callback_steps, |
| | uni_embeds=None, |
| | negative_uni_embeds=None, |
| | guidance_scale=None, |
| | ): |
| | if height % 8 != 0 or width % 8 != 0: |
| | raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") |
| |
|
| | if (callback_steps is None) or ( |
| | callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) |
| | ): |
| | raise ValueError( |
| | f"`callback_steps` has to be a positive integer but is {callback_steps} of type" |
| | f" {type(callback_steps)}." |
| | ) |
| |
|
| | if uni_embeds is None: |
| | raise ValueError( |
| | "Provide a UNI embedding `uni_embeds`." |
| | ) |
| | elif len(uni_embeds.shape) != 3: |
| | raise ValueError( |
| | "UNI embedding given is not in (B,N,D)." |
| | ) |
| | elif uni_embeds.shape[1] != self.transformer.config.caption_num_tokens: |
| | raise ValueError( |
| | f"Number of UNI embeddings must match the ones used in training ({self.transformer.config.caption_num_tokens})." |
| | ) |
| | elif uni_embeds.shape[2] != self.transformer.config.caption_channels: |
| | raise ValueError( |
| | "UNI embedding given has incorrect dimenions." |
| | ) |
| |
|
| | if guidance_scale > 1.0: |
| | if negative_uni_embeds is None: |
| | raise ValueError( |
| | "Provide a negative UNI embedding `negative_uni_embeds`." |
| | ) |
| | elif len(negative_uni_embeds.shape) != 3: |
| | raise ValueError( |
| | "Negative UNI embedding given is not in (B,N,D)." |
| | ) |
| | elif negative_uni_embeds.shape[1] != self.transformer.config.caption_num_tokens: |
| | raise ValueError( |
| | f"Number of negative UNI embeddings must match the ones used in training ({self.transformer.config.caption_num_tokens})." |
| | ) |
| | elif negative_uni_embeds.shape[2] != self.transformer.config.caption_channels: |
| | raise ValueError( |
| | "Negative UNI embedding given has incorrect dimenions." |
| | ) |
| |
|
| | |
| | def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None): |
| | shape = ( |
| | batch_size, |
| | num_channels_latents, |
| | int(height) // self.vae_scale_factor, |
| | int(width) // self.vae_scale_factor, |
| | ) |
| | if isinstance(generator, list) and len(generator) != batch_size: |
| | raise ValueError( |
| | f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" |
| | f" size of {batch_size}. Make sure the batch size matches the length of the generators." |
| | ) |
| |
|
| | if latents is None: |
| | latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype) |
| | else: |
| | latents = latents.to(device) |
| |
|
| | |
| | latents = latents * self.scheduler.init_noise_sigma |
| | return latents |
| |
|
| | @torch.no_grad() |
| | @replace_example_docstring(EXAMPLE_DOC_STRING) |
| | def __call__( |
| | self, |
| | num_inference_steps: int = 20, |
| | timesteps: List[int] = None, |
| | sigmas: List[float] = None, |
| | guidance_scale: float = 1.5, |
| | controlnet_input: Optional[torch.Tensor] = None, |
| | num_images_per_prompt: Optional[int] = 1, |
| | height: Optional[int] = None, |
| | width: Optional[int] = None, |
| | eta: float = 0.0, |
| | generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, |
| | latents: Optional[torch.Tensor] = None, |
| | uni_embeds: Optional[torch.Tensor] = None, |
| | negative_uni_embeds: Optional[torch.Tensor] = None, |
| | output_type: Optional[str] = "pil", |
| | return_dict: bool = True, |
| | callback: Optional[Callable[[int, int, torch.Tensor], None]] = None, |
| | callback_steps: int = 1, |
| | **kwargs, |
| | ) -> Union[ImagePipelineOutput, Tuple]: |
| | """ |
| | Function invoked when calling the pipeline for generation. |
| | |
| | Args: |
| | num_inference_steps (`int`, *optional*, defaults to 100): |
| | The number of denoising steps. More denoising steps usually lead to a higher quality image at the |
| | expense of slower inference. |
| | timesteps (`List[int]`, *optional*): |
| | Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument |
| | in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is |
| | passed will be used. Must be in descending order. |
| | sigmas (`List[float]`, *optional*): |
| | Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in |
| | their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed |
| | will be used. |
| | guidance_scale (`float`, *optional*, defaults to 4.5): |
| | Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). |
| | `guidance_scale` is defined as `w` of equation 2. of [Imagen |
| | Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > |
| | 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, |
| | usually at the expense of lower image quality. |
| | controlnet_input (`torch.Tensor`, *optional*, defaults to None): |
| | The conditioning input to the ControlNet. If none is provided then the ControlNet is not used. |
| | num_images_per_prompt (`int`, *optional*, defaults to 1): |
| | The number of images to generate per prompt. |
| | height (`int`, *optional*, defaults to self.unet.config.sample_size): |
| | The height in pixels of the generated image. |
| | width (`int`, *optional*, defaults to self.unet.config.sample_size): |
| | The width in pixels of the generated image. |
| | eta (`float`, *optional*, defaults to 0.0): |
| | Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to |
| | [`schedulers.DDIMScheduler`], will be ignored for others. |
| | generator (`torch.Generator` or `List[torch.Generator]`, *optional*): |
| | One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) |
| | to make generation deterministic. |
| | latents (`torch.Tensor`, *optional*): |
| | Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image |
| | generation. Can be used to tweak the same generation with different prompts. If not provided, a latents |
| | tensor will ge generated by sampling using the supplied random `generator`. |
| | uni_embeds (`torch.Tensor`, *optional*): |
| | Pre-generated UNI embeddings. |
| | negative_uni_embeds (`torch.Tensor`, *optional*): |
| | Pre-generated negative UNI embeddings. |
| | output_type (`str`, *optional*, defaults to `"pil"`): |
| | The output format of the generate image. Choose between |
| | [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. |
| | return_dict (`bool`, *optional*, defaults to `True`): |
| | Whether or not to return a [`~pipelines.stable_diffusion.IFPipelineOutput`] instead of a plain tuple. |
| | callback (`Callable`, *optional*): |
| | A function that will be called every `callback_steps` steps during inference. The function will be |
| | called with the following arguments: `callback(step: int, timestep: int, latents: torch.Tensor)`. |
| | callback_steps (`int`, *optional*, defaults to 1): |
| | The frequency at which the `callback` function will be called. If not specified, the callback will be |
| | called at every step. |
| | |
| | Examples: |
| | |
| | Returns: |
| | [`~pipelines.ImagePipelineOutput`] or `tuple`: |
| | If `return_dict` is `True`, [`~pipelines.ImagePipelineOutput`] is returned, otherwise a `tuple` is |
| | returned where the first element is a list with the generated images |
| | """ |
| | |
| | height = height or self.transformer.config.sample_size * self.vae_scale_factor |
| | width = width or self.transformer.config.sample_size * self.vae_scale_factor |
| |
|
| | self.check_inputs( |
| | height, |
| | width, |
| | callback_steps, |
| | uni_embeds, |
| | negative_uni_embeds, |
| | guidance_scale, |
| | ) |
| |
|
| | |
| | batch_size = uni_embeds.shape[0] |
| |
|
| | device = self._execution_device |
| |
|
| | |
| |
|
| | |
| | |
| | |
| | do_classifier_free_guidance = guidance_scale > 1.0 |
| |
|
| | |
| | uni_embeds = uni_embeds.repeat_interleave(num_images_per_prompt, dim=0) |
| | |
| | |
| | if do_classifier_free_guidance: |
| | negative_uni_embeds = negative_uni_embeds.repeat_interleave(num_images_per_prompt, dim=0) |
| | |
| |
|
| | |
| | if controlnet_input is not None: |
| | controlnet_input_torch = torch.from_numpy(controlnet_input.copy()/255.).float().to(device).permute([2,0,1]).unsqueeze(0) |
| | controlnet_input_torch = 2*(controlnet_input_torch-0.5) |
| |
|
| | vae_scale = self.vae.config.scaling_factor |
| | vae_shift = getattr(self.vae.config, "shift_factor", 0) |
| | controlnet_input_latent = self.vae.encode(controlnet_input_torch).latent_dist.mean |
| | controlnet_input_latent = (controlnet_input_latent-vae_shift)*vae_scale |
| |
|
| |
|
| | |
| | timesteps, num_inference_steps = retrieve_timesteps( |
| | self.scheduler, num_inference_steps, device, timesteps, sigmas |
| | ) |
| |
|
| | |
| | latent_channels = self.transformer.config.in_channels |
| | latents = self.prepare_latents( |
| | batch_size * num_images_per_prompt, |
| | latent_channels, |
| | height, |
| | width, |
| | uni_embeds.dtype, |
| | device, |
| | generator, |
| | latents, |
| | ) |
| |
|
| | |
| | extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) |
| |
|
| | added_cond_kwargs = {} |
| |
|
| | |
| | num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) |
| |
|
| | with self.progress_bar(total=num_inference_steps) as progress_bar: |
| | for i, t in enumerate(timesteps): |
| | |
| | |
| | |
| | latent_model_input = self.scheduler.scale_model_input(latents, t) |
| |
|
| | current_timestep = t |
| | if not torch.is_tensor(current_timestep): |
| | |
| | |
| | is_mps = latent_model_input.device.type == "mps" |
| | if isinstance(current_timestep, float): |
| | dtype = torch.float32 if is_mps else torch.float64 |
| | else: |
| | dtype = torch.int32 if is_mps else torch.int64 |
| | current_timestep = torch.tensor([current_timestep], dtype=dtype, device=latent_model_input.device) |
| | elif len(current_timestep.shape) == 0: |
| | current_timestep = current_timestep[None].to(latent_model_input.device) |
| | |
| | current_timestep = current_timestep.expand(latent_model_input.shape[0]) |
| |
|
| | |
| | if controlnet_input is not None: |
| | controlnet_outputs = self.controlnet( |
| | hidden_states=latent_model_input, |
| | conditioning=controlnet_input_latent, |
| | encoder_hidden_states=uni_embeds, |
| | timestep=current_timestep, |
| | |
| | return_dict=False, |
| | )[0] |
| | else: |
| | controlnet_outputs = None |
| |
|
| | |
| | noise_pred_cond = self.transformer( |
| | latent_model_input, |
| | encoder_hidden_states=uni_embeds, |
| | controlnet_outputs=controlnet_outputs, |
| | timestep=current_timestep, |
| | added_cond_kwargs=added_cond_kwargs, |
| | return_dict=False, |
| | )[0] |
| |
|
| | |
| | if do_classifier_free_guidance: |
| | |
| | |
| | |
| | noise_pred_uncond = self.transformer( |
| | latent_model_input, |
| | encoder_hidden_states=negative_uni_embeds, |
| | controlnet_outputs=None, |
| | timestep=current_timestep, |
| | added_cond_kwargs=added_cond_kwargs, |
| | return_dict=False, |
| | )[0] |
| | noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_cond - noise_pred_uncond) |
| | else: |
| | noise_pred = noise_pred_cond |
| |
|
| |
|
| | |
| | if self.transformer.config.out_channels // 2 == latent_channels: |
| | noise_pred = noise_pred.chunk(2, dim=1)[0] |
| | else: |
| | noise_pred = noise_pred |
| |
|
| | |
| | latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] |
| |
|
| | |
| | if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): |
| | progress_bar.update() |
| | if callback is not None and i % callback_steps == 0: |
| | step_idx = i // getattr(self.scheduler, "order", 1) |
| | callback(step_idx, t, latents) |
| |
|
| | if not output_type == "latent": |
| | vae_scale = self.vae.config.scaling_factor |
| | vae_shift = getattr(self.vae.config, "shift_factor", 0) |
| |
|
| | image = self.vae.decode((latents / vae_scale) + vae_shift, return_dict=False)[0] |
| |
|
| | else: |
| | image = latents |
| |
|
| | if not output_type == "latent": |
| | image = self.image_processor.postprocess(image, output_type=output_type) |
| |
|
| | |
| | self.maybe_free_model_hooks() |
| |
|
| | if not return_dict: |
| | return (image,) |
| |
|
| | return ImagePipelineOutput(images=image) |
| |
|