Spaces:
Running
on
Zero
Running
on
Zero
| # Copyright 2024 Stability AI, Kwai-Kolors Team and The HuggingFace Team. All rights reserved. | |
| # | |
| # Licensed under the Apache License, Version 2.0 (the "License"); | |
| # you may not use this file except in compliance with the License. | |
| # You may obtain a copy of the License at | |
| # | |
| # http://www.apache.org/licenses/LICENSE-2.0 | |
| # | |
| # Unless required by applicable law or agreed to in writing, software | |
| # distributed under the License is distributed on an "AS IS" BASIS, | |
| # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
| # See the License for the specific language governing permissions and | |
| # limitations under the License. | |
| import inspect | |
| from typing import Any, Callable, Dict, List, Optional, Tuple, Union | |
| import PIL.Image | |
| import torch | |
| from transformers import CLIPImageProcessor, CLIPVisionModelWithProjection | |
| from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback | |
| from diffusers.image_processor import PipelineImageInput, VaeImageProcessor | |
| from diffusers.loaders import IPAdapterMixin, StableDiffusionXLLoraLoaderMixin | |
| from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel | |
| from diffusers.models.attention_processor import AttnProcessor2_0, FusedAttnProcessor2_0, XFormersAttnProcessor | |
| from diffusers.pipelines.kolors.pipeline_output import KolorsPipelineOutput | |
| from diffusers.pipelines.kolors.text_encoder import ChatGLMModel | |
| from diffusers.pipelines.kolors.tokenizer import ChatGLMTokenizer | |
| from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin | |
| from diffusers.schedulers import KarrasDiffusionSchedulers | |
| from diffusers.utils import is_torch_xla_available, logging, replace_example_docstring | |
| from diffusers.utils.torch_utils import randn_tensor | |
| if is_torch_xla_available(): | |
| import torch_xla.core.xla_model as xm | |
| XLA_AVAILABLE = True | |
| else: | |
| XLA_AVAILABLE = False | |
| logger = logging.get_logger(__name__) # pylint: disable=invalid-name | |
| EXAMPLE_DOC_STRING = """ | |
| Examples: | |
| ```py | |
| >>> import torch | |
| >>> from diffusers import KolorsDifferentialImg2ImgPipeline | |
| >>> from diffusers.utils import load_image | |
| >>> pipe = KolorsDifferentialImg2ImgPipeline.from_pretrained( | |
| ... "Kwai-Kolors/Kolors-diffusers", variant="fp16", torch_dtype=torch.float16 | |
| ... ) | |
| >>> pipe = pipe.to("cuda") | |
| >>> url = ( | |
| ... "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/kolors/bunny_source.png" | |
| ... ) | |
| >>> init_image = load_image(url) | |
| >>> prompt = "high quality image of a capybara wearing sunglasses. In the background of the image there are trees, poles, grass and other objects. At the bottom of the object there is the road., 8k, highly detailed." | |
| >>> image = pipe(prompt, image=init_image).images[0] | |
| ``` | |
| """ | |
| # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents | |
| def retrieve_latents( | |
| encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample" | |
| ): | |
| if hasattr(encoder_output, "latent_dist") and sample_mode == "sample": | |
| return encoder_output.latent_dist.sample(generator) | |
| elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax": | |
| return encoder_output.latent_dist.mode() | |
| elif hasattr(encoder_output, "latents"): | |
| return encoder_output.latents | |
| else: | |
| raise AttributeError("Could not access latents of provided encoder_output") | |
| # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps | |
| def retrieve_timesteps( | |
| scheduler, | |
| num_inference_steps: Optional[int] = None, | |
| device: Optional[Union[str, torch.device]] = None, | |
| timesteps: Optional[List[int]] = None, | |
| sigmas: Optional[List[float]] = None, | |
| **kwargs, | |
| ): | |
| """ | |
| Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles | |
| custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`. | |
| Args: | |
| scheduler (`SchedulerMixin`): | |
| The scheduler to get timesteps from. | |
| num_inference_steps (`int`): | |
| The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps` | |
| must be `None`. | |
| device (`str` or `torch.device`, *optional*): | |
| The device to which the timesteps should be moved to. If `None`, the timesteps are not moved. | |
| timesteps (`List[int]`, *optional*): | |
| Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed, | |
| `num_inference_steps` and `sigmas` must be `None`. | |
| sigmas (`List[float]`, *optional*): | |
| Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed, | |
| `num_inference_steps` and `timesteps` must be `None`. | |
| Returns: | |
| `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the | |
| second element is the number of inference steps. | |
| """ | |
| if timesteps is not None and sigmas is not None: | |
| raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values") | |
| if timesteps is not None: | |
| accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) | |
| if not accepts_timesteps: | |
| raise ValueError( | |
| f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" | |
| f" timestep schedules. Please check whether you are using the correct scheduler." | |
| ) | |
| scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs) | |
| timesteps = scheduler.timesteps | |
| num_inference_steps = len(timesteps) | |
| elif sigmas is not None: | |
| accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys()) | |
| if not accept_sigmas: | |
| raise ValueError( | |
| f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom" | |
| f" sigmas schedules. Please check whether you are using the correct scheduler." | |
| ) | |
| scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs) | |
| timesteps = scheduler.timesteps | |
| num_inference_steps = len(timesteps) | |
| else: | |
| scheduler.set_timesteps(num_inference_steps, device=device, **kwargs) | |
| timesteps = scheduler.timesteps | |
| return timesteps, num_inference_steps | |
| class KolorsDifferentialImg2ImgPipeline( | |
| DiffusionPipeline, StableDiffusionMixin, StableDiffusionXLLoraLoaderMixin, IPAdapterMixin | |
| ): | |
| r""" | |
| Pipeline for text-to-image generation using Kolors. | |
| This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the | |
| library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) | |
| The pipeline also inherits the following loading methods: | |
| - [`~loaders.StableDiffusionXLLoraLoaderMixin.load_lora_weights`] for loading LoRA weights | |
| - [`~loaders.StableDiffusionXLLoraLoaderMixin.save_lora_weights`] for saving LoRA weights | |
| - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters | |
| Args: | |
| vae ([`AutoencoderKL`]): | |
| Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. | |
| text_encoder ([`ChatGLMModel`]): | |
| Frozen text-encoder. Kolors uses [ChatGLM3-6B](https://huggingface.co/THUDM/chatglm3-6b). | |
| tokenizer (`ChatGLMTokenizer`): | |
| Tokenizer of class | |
| [ChatGLMTokenizer](https://huggingface.co/THUDM/chatglm3-6b/blob/main/tokenization_chatglm.py). | |
| unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. | |
| scheduler ([`SchedulerMixin`]): | |
| A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of | |
| [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. | |
| force_zeros_for_empty_prompt (`bool`, *optional*, defaults to `"False"`): | |
| Whether the negative prompt embeddings shall be forced to always be set to 0. Also see the config of | |
| `Kwai-Kolors/Kolors-diffusers`. | |
| """ | |
| model_cpu_offload_seq = "text_encoder->image_encoder-unet->vae" | |
| _optional_components = [ | |
| "image_encoder", | |
| "feature_extractor", | |
| ] | |
| _callback_tensor_inputs = [ | |
| "latents", | |
| "prompt_embeds", | |
| "negative_prompt_embeds", | |
| "add_text_embeds", | |
| "add_time_ids", | |
| "negative_pooled_prompt_embeds", | |
| "negative_add_time_ids", | |
| ] | |
| def __init__( | |
| self, | |
| vae: AutoencoderKL, | |
| text_encoder: ChatGLMModel, | |
| tokenizer: ChatGLMTokenizer, | |
| unet: UNet2DConditionModel, | |
| scheduler: KarrasDiffusionSchedulers, | |
| image_encoder: CLIPVisionModelWithProjection = None, | |
| feature_extractor: CLIPImageProcessor = None, | |
| force_zeros_for_empty_prompt: bool = False, | |
| ): | |
| super().__init__() | |
| self.register_modules( | |
| vae=vae, | |
| text_encoder=text_encoder, | |
| tokenizer=tokenizer, | |
| unet=unet, | |
| scheduler=scheduler, | |
| image_encoder=image_encoder, | |
| feature_extractor=feature_extractor, | |
| ) | |
| self.register_to_config(force_zeros_for_empty_prompt=force_zeros_for_empty_prompt) | |
| self.vae_scale_factor = ( | |
| 2 ** (len(self.vae.config.block_out_channels) - 1) if hasattr(self, "vae") and self.vae is not None else 8 | |
| ) | |
| self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) | |
| self.mask_processor = VaeImageProcessor( | |
| vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_convert_grayscale=True | |
| ) | |
| self.default_sample_size = self.unet.config.sample_size | |
| # Copied from diffusers.pipelines.kolors.pipeline_kolors.KolorsPipeline.encode_prompt | |
| def encode_prompt( | |
| self, | |
| prompt, | |
| device: Optional[torch.device] = None, | |
| num_images_per_prompt: int = 1, | |
| do_classifier_free_guidance: bool = True, | |
| negative_prompt=None, | |
| prompt_embeds: Optional[torch.FloatTensor] = None, | |
| pooled_prompt_embeds: Optional[torch.Tensor] = None, | |
| negative_prompt_embeds: Optional[torch.FloatTensor] = None, | |
| negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, | |
| max_sequence_length: int = 256, | |
| ): | |
| r""" | |
| Encodes the prompt into text encoder hidden states. | |
| Args: | |
| prompt (`str` or `List[str]`, *optional*): | |
| prompt to be encoded | |
| device: (`torch.device`): | |
| torch device | |
| num_images_per_prompt (`int`): | |
| number of images that should be generated per prompt | |
| do_classifier_free_guidance (`bool`): | |
| whether to use classifier free guidance or not | |
| negative_prompt (`str` or `List[str]`, *optional*): | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is | |
| less than `1`). | |
| prompt_embeds (`torch.FloatTensor`, *optional*): | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | |
| provided, text embeddings will be generated from `prompt` input argument. | |
| pooled_prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. | |
| If not provided, pooled text embeddings will be generated from `prompt` input argument. | |
| negative_prompt_embeds (`torch.FloatTensor`, *optional*): | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | |
| argument. | |
| negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | |
| weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` | |
| input argument. | |
| max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. | |
| """ | |
| # from IPython import embed; embed(); exit() | |
| device = device or self._execution_device | |
| if prompt is not None and isinstance(prompt, str): | |
| batch_size = 1 | |
| elif prompt is not None and isinstance(prompt, list): | |
| batch_size = len(prompt) | |
| else: | |
| batch_size = prompt_embeds.shape[0] | |
| # Define tokenizers and text encoders | |
| tokenizers = [self.tokenizer] | |
| text_encoders = [self.text_encoder] | |
| if prompt_embeds is None: | |
| prompt_embeds_list = [] | |
| for tokenizer, text_encoder in zip(tokenizers, text_encoders): | |
| text_inputs = tokenizer( | |
| prompt, | |
| padding="max_length", | |
| max_length=max_sequence_length, | |
| truncation=True, | |
| return_tensors="pt", | |
| ).to(device) | |
| output = text_encoder( | |
| input_ids=text_inputs["input_ids"], | |
| attention_mask=text_inputs["attention_mask"], | |
| position_ids=text_inputs["position_ids"], | |
| output_hidden_states=True, | |
| ) | |
| # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] | |
| # clone to have a contiguous tensor | |
| prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() | |
| # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] | |
| pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() | |
| bs_embed, seq_len, _ = prompt_embeds.shape | |
| prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) | |
| prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1) | |
| prompt_embeds_list.append(prompt_embeds) | |
| prompt_embeds = prompt_embeds_list[0] | |
| # get unconditional embeddings for classifier free guidance | |
| zero_out_negative_prompt = negative_prompt is None and self.config.force_zeros_for_empty_prompt | |
| if do_classifier_free_guidance and negative_prompt_embeds is None and zero_out_negative_prompt: | |
| negative_prompt_embeds = torch.zeros_like(prompt_embeds) | |
| elif do_classifier_free_guidance and negative_prompt_embeds is None: | |
| uncond_tokens: List[str] | |
| if negative_prompt is None: | |
| uncond_tokens = [""] * batch_size | |
| elif prompt is not None and type(prompt) is not type(negative_prompt): | |
| raise TypeError( | |
| f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !=" | |
| f" {type(prompt)}." | |
| ) | |
| elif isinstance(negative_prompt, str): | |
| uncond_tokens = [negative_prompt] | |
| elif batch_size != len(negative_prompt): | |
| raise ValueError( | |
| f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:" | |
| f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches" | |
| " the batch size of `prompt`." | |
| ) | |
| else: | |
| uncond_tokens = negative_prompt | |
| negative_prompt_embeds_list = [] | |
| for tokenizer, text_encoder in zip(tokenizers, text_encoders): | |
| uncond_input = tokenizer( | |
| uncond_tokens, | |
| padding="max_length", | |
| max_length=max_sequence_length, | |
| truncation=True, | |
| return_tensors="pt", | |
| ).to(device) | |
| output = text_encoder( | |
| input_ids=uncond_input["input_ids"], | |
| attention_mask=uncond_input["attention_mask"], | |
| position_ids=uncond_input["position_ids"], | |
| output_hidden_states=True, | |
| ) | |
| # [max_sequence_length, batch, hidden_size] -> [batch, max_sequence_length, hidden_size] | |
| # clone to have a contiguous tensor | |
| negative_prompt_embeds = output.hidden_states[-2].permute(1, 0, 2).clone() | |
| # [max_sequence_length, batch, hidden_size] -> [batch, hidden_size] | |
| negative_pooled_prompt_embeds = output.hidden_states[-1][-1, :, :].clone() | |
| if do_classifier_free_guidance: | |
| # duplicate unconditional embeddings for each generation per prompt, using mps friendly method | |
| seq_len = negative_prompt_embeds.shape[1] | |
| negative_prompt_embeds = negative_prompt_embeds.to(dtype=text_encoder.dtype, device=device) | |
| negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1) | |
| negative_prompt_embeds = negative_prompt_embeds.view( | |
| batch_size * num_images_per_prompt, seq_len, -1 | |
| ) | |
| negative_prompt_embeds_list.append(negative_prompt_embeds) | |
| negative_prompt_embeds = negative_prompt_embeds_list[0] | |
| bs_embed = pooled_prompt_embeds.shape[0] | |
| pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( | |
| bs_embed * num_images_per_prompt, -1 | |
| ) | |
| if do_classifier_free_guidance: | |
| negative_pooled_prompt_embeds = negative_pooled_prompt_embeds.repeat(1, num_images_per_prompt).view( | |
| bs_embed * num_images_per_prompt, -1 | |
| ) | |
| return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds | |
| # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image | |
| def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None): | |
| dtype = next(self.image_encoder.parameters()).dtype | |
| if not isinstance(image, torch.Tensor): | |
| image = self.feature_extractor(image, return_tensors="pt").pixel_values | |
| image = image.to(device=device, dtype=dtype) | |
| if output_hidden_states: | |
| image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2] | |
| image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0) | |
| uncond_image_enc_hidden_states = self.image_encoder( | |
| torch.zeros_like(image), output_hidden_states=True | |
| ).hidden_states[-2] | |
| uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave( | |
| num_images_per_prompt, dim=0 | |
| ) | |
| return image_enc_hidden_states, uncond_image_enc_hidden_states | |
| else: | |
| image_embeds = self.image_encoder(image).image_embeds | |
| image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0) | |
| uncond_image_embeds = torch.zeros_like(image_embeds) | |
| return image_embeds, uncond_image_embeds | |
| # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds | |
| def prepare_ip_adapter_image_embeds( | |
| self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance | |
| ): | |
| image_embeds = [] | |
| if do_classifier_free_guidance: | |
| negative_image_embeds = [] | |
| if ip_adapter_image_embeds is None: | |
| if not isinstance(ip_adapter_image, list): | |
| ip_adapter_image = [ip_adapter_image] | |
| if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers): | |
| raise ValueError( | |
| f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters." | |
| ) | |
| for single_ip_adapter_image, image_proj_layer in zip( | |
| ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers | |
| ): | |
| output_hidden_state = not isinstance(image_proj_layer, ImageProjection) | |
| single_image_embeds, single_negative_image_embeds = self.encode_image( | |
| single_ip_adapter_image, device, 1, output_hidden_state | |
| ) | |
| image_embeds.append(single_image_embeds[None, :]) | |
| if do_classifier_free_guidance: | |
| negative_image_embeds.append(single_negative_image_embeds[None, :]) | |
| else: | |
| for single_image_embeds in ip_adapter_image_embeds: | |
| if do_classifier_free_guidance: | |
| single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2) | |
| negative_image_embeds.append(single_negative_image_embeds) | |
| image_embeds.append(single_image_embeds) | |
| ip_adapter_image_embeds = [] | |
| for i, single_image_embeds in enumerate(image_embeds): | |
| single_image_embeds = torch.cat([single_image_embeds] * num_images_per_prompt, dim=0) | |
| if do_classifier_free_guidance: | |
| single_negative_image_embeds = torch.cat([negative_image_embeds[i]] * num_images_per_prompt, dim=0) | |
| single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds], dim=0) | |
| single_image_embeds = single_image_embeds.to(device=device) | |
| ip_adapter_image_embeds.append(single_image_embeds) | |
| return ip_adapter_image_embeds | |
| # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs | |
| def prepare_extra_step_kwargs(self, generator, eta): | |
| # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature | |
| # eta (Ξ·) is only used with the DDIMScheduler, it will be ignored for other schedulers. | |
| # eta corresponds to Ξ· in DDIM paper: https://arxiv.org/abs/2010.02502 | |
| # and should be between [0, 1] | |
| accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) | |
| extra_step_kwargs = {} | |
| if accepts_eta: | |
| extra_step_kwargs["eta"] = eta | |
| # check if the scheduler accepts generator | |
| accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) | |
| if accepts_generator: | |
| extra_step_kwargs["generator"] = generator | |
| return extra_step_kwargs | |
| def check_inputs( | |
| self, | |
| prompt, | |
| strength, | |
| num_inference_steps, | |
| height, | |
| width, | |
| negative_prompt=None, | |
| prompt_embeds=None, | |
| pooled_prompt_embeds=None, | |
| negative_prompt_embeds=None, | |
| negative_pooled_prompt_embeds=None, | |
| ip_adapter_image=None, | |
| ip_adapter_image_embeds=None, | |
| callback_on_step_end_tensor_inputs=None, | |
| max_sequence_length=None, | |
| ): | |
| if strength < 0 or strength > 1: | |
| raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}") | |
| if not isinstance(num_inference_steps, int) or num_inference_steps <= 0: | |
| raise ValueError( | |
| f"`num_inference_steps` has to be a positive integer but is {num_inference_steps} of type" | |
| f" {type(num_inference_steps)}." | |
| ) | |
| if height % 8 != 0 or width % 8 != 0: | |
| raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") | |
| if callback_on_step_end_tensor_inputs is not None and not all( | |
| k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs | |
| ): | |
| raise ValueError( | |
| f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}" | |
| ) | |
| if prompt is not None and prompt_embeds is not None: | |
| raise ValueError( | |
| f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to" | |
| " only forward one of the two." | |
| ) | |
| elif prompt is None and prompt_embeds is None: | |
| raise ValueError( | |
| "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined." | |
| ) | |
| elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)): | |
| raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}") | |
| if negative_prompt is not None and negative_prompt_embeds is not None: | |
| raise ValueError( | |
| f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:" | |
| f" {negative_prompt_embeds}. Please make sure to only forward one of the two." | |
| ) | |
| if prompt_embeds is not None and negative_prompt_embeds is not None: | |
| if prompt_embeds.shape != negative_prompt_embeds.shape: | |
| raise ValueError( | |
| "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but" | |
| f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`" | |
| f" {negative_prompt_embeds.shape}." | |
| ) | |
| if prompt_embeds is not None and pooled_prompt_embeds is None: | |
| raise ValueError( | |
| "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`." | |
| ) | |
| if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None: | |
| raise ValueError( | |
| "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`." | |
| ) | |
| if ip_adapter_image is not None and ip_adapter_image_embeds is not None: | |
| raise ValueError( | |
| "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined." | |
| ) | |
| if ip_adapter_image_embeds is not None: | |
| if not isinstance(ip_adapter_image_embeds, list): | |
| raise ValueError( | |
| f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}" | |
| ) | |
| elif ip_adapter_image_embeds[0].ndim not in [3, 4]: | |
| raise ValueError( | |
| f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D" | |
| ) | |
| if max_sequence_length is not None and max_sequence_length > 256: | |
| raise ValueError(f"`max_sequence_length` cannot be greater than 256 but is {max_sequence_length}") | |
| # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.get_timesteps | |
| def get_timesteps(self, num_inference_steps, strength, device, denoising_start=None): | |
| # get the original timestep using init_timestep | |
| if denoising_start is None: | |
| init_timestep = min(int(num_inference_steps * strength), num_inference_steps) | |
| t_start = max(num_inference_steps - init_timestep, 0) | |
| else: | |
| t_start = 0 | |
| timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] | |
| # Strength is irrelevant if we directly request a timestep to start at; | |
| # that is, strength is determined by the denoising_start instead. | |
| if denoising_start is not None: | |
| discrete_timestep_cutoff = int( | |
| round( | |
| self.scheduler.config.num_train_timesteps | |
| - (denoising_start * self.scheduler.config.num_train_timesteps) | |
| ) | |
| ) | |
| num_inference_steps = (timesteps < discrete_timestep_cutoff).sum().item() | |
| if self.scheduler.order == 2 and num_inference_steps % 2 == 0: | |
| # if the scheduler is a 2nd order scheduler we might have to do +1 | |
| # because `num_inference_steps` might be even given that every timestep | |
| # (except the highest one) is duplicated. If `num_inference_steps` is even it would | |
| # mean that we cut the timesteps in the middle of the denoising step | |
| # (between 1st and 2nd derivative) which leads to incorrect results. By adding 1 | |
| # we ensure that the denoising process always ends after the 2nd derivate step of the scheduler | |
| num_inference_steps = num_inference_steps + 1 | |
| # because t_n+1 >= t_n, we slice the timesteps starting from the end | |
| timesteps = timesteps[-num_inference_steps:] | |
| return timesteps, num_inference_steps | |
| return timesteps, num_inference_steps - t_start | |
| # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl_img2img.StableDiffusionXLImg2ImgPipeline.prepare_latents | |
| def prepare_latents( | |
| self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None, add_noise=True | |
| ): | |
| if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): | |
| raise ValueError( | |
| f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" | |
| ) | |
| latents_mean = latents_std = None | |
| if hasattr(self.vae.config, "latents_mean") and self.vae.config.latents_mean is not None: | |
| latents_mean = torch.tensor(self.vae.config.latents_mean).view(1, 4, 1, 1) | |
| if hasattr(self.vae.config, "latents_std") and self.vae.config.latents_std is not None: | |
| latents_std = torch.tensor(self.vae.config.latents_std).view(1, 4, 1, 1) | |
| # Offload text encoder if `enable_model_cpu_offload` was enabled | |
| if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: | |
| self.text_encoder_2.to("cpu") | |
| torch.cuda.empty_cache() | |
| image = image.to(device=device, dtype=dtype) | |
| batch_size = batch_size * num_images_per_prompt | |
| if image.shape[1] == 4: | |
| init_latents = image | |
| else: | |
| # make sure the VAE is in float32 mode, as it overflows in float16 | |
| if self.vae.config.force_upcast: | |
| image = image.float() | |
| self.vae.to(dtype=torch.float32) | |
| if isinstance(generator, list) and len(generator) != batch_size: | |
| raise ValueError( | |
| f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" | |
| f" size of {batch_size}. Make sure the batch size matches the length of the generators." | |
| ) | |
| elif isinstance(generator, list): | |
| if image.shape[0] < batch_size and batch_size % image.shape[0] == 0: | |
| image = torch.cat([image] * (batch_size // image.shape[0]), dim=0) | |
| elif image.shape[0] < batch_size and batch_size % image.shape[0] != 0: | |
| raise ValueError( | |
| f"Cannot duplicate `image` of batch size {image.shape[0]} to effective batch_size {batch_size} " | |
| ) | |
| init_latents = [ | |
| retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) | |
| for i in range(batch_size) | |
| ] | |
| init_latents = torch.cat(init_latents, dim=0) | |
| else: | |
| init_latents = retrieve_latents(self.vae.encode(image), generator=generator) | |
| if self.vae.config.force_upcast: | |
| self.vae.to(dtype) | |
| init_latents = init_latents.to(dtype) | |
| if latents_mean is not None and latents_std is not None: | |
| latents_mean = latents_mean.to(device=device, dtype=dtype) | |
| latents_std = latents_std.to(device=device, dtype=dtype) | |
| init_latents = (init_latents - latents_mean) * self.vae.config.scaling_factor / latents_std | |
| else: | |
| init_latents = self.vae.config.scaling_factor * init_latents | |
| if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0: | |
| # expand init_latents for batch_size | |
| additional_image_per_prompt = batch_size // init_latents.shape[0] | |
| init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0) | |
| elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0: | |
| raise ValueError( | |
| f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." | |
| ) | |
| else: | |
| init_latents = torch.cat([init_latents], dim=0) | |
| if add_noise: | |
| shape = init_latents.shape | |
| noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) | |
| # get latents | |
| init_latents = self.scheduler.add_noise(init_latents, noise, timestep) | |
| latents = init_latents | |
| return latents | |
| # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline._get_add_time_ids | |
| def _get_add_time_ids( | |
| self, original_size, crops_coords_top_left, target_size, dtype, text_encoder_projection_dim=None | |
| ): | |
| add_time_ids = list(original_size + crops_coords_top_left + target_size) | |
| passed_add_embed_dim = ( | |
| self.unet.config.addition_time_embed_dim * len(add_time_ids) + text_encoder_projection_dim | |
| ) | |
| expected_add_embed_dim = self.unet.add_embedding.linear_1.in_features | |
| if expected_add_embed_dim != passed_add_embed_dim: | |
| raise ValueError( | |
| f"Model expects an added time embedding vector of length {expected_add_embed_dim}, but a vector of {passed_add_embed_dim} was created. The model has an incorrect config. Please check `unet.config.time_embedding_type` and `text_encoder_2.config.projection_dim`." | |
| ) | |
| add_time_ids = torch.tensor([add_time_ids], dtype=dtype) | |
| return add_time_ids | |
| # Copied from diffusers.pipelines.stable_diffusion_xl.pipeline_stable_diffusion_xl.StableDiffusionXLPipeline.upcast_vae | |
| def upcast_vae(self): | |
| dtype = self.vae.dtype | |
| self.vae.to(dtype=torch.float32) | |
| use_torch_2_0_or_xformers = isinstance( | |
| self.vae.decoder.mid_block.attentions[0].processor, | |
| ( | |
| AttnProcessor2_0, | |
| XFormersAttnProcessor, | |
| FusedAttnProcessor2_0, | |
| ), | |
| ) | |
| # if xformers or torch_2_0 is used attention block does not need | |
| # to be in float32 which can save lots of memory | |
| if use_torch_2_0_or_xformers: | |
| self.vae.post_quant_conv.to(dtype) | |
| self.vae.decoder.conv_in.to(dtype) | |
| self.vae.decoder.mid_block.to(dtype) | |
| # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding | |
| def get_guidance_scale_embedding( | |
| self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32 | |
| ) -> torch.Tensor: | |
| """ | |
| See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 | |
| Args: | |
| w (`torch.Tensor`): | |
| Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings. | |
| embedding_dim (`int`, *optional*, defaults to 512): | |
| Dimension of the embeddings to generate. | |
| dtype (`torch.dtype`, *optional*, defaults to `torch.float32`): | |
| Data type of the generated embeddings. | |
| Returns: | |
| `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`. | |
| """ | |
| assert len(w.shape) == 1 | |
| w = w * 1000.0 | |
| half_dim = embedding_dim // 2 | |
| emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) | |
| emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) | |
| emb = w.to(dtype)[:, None] * emb[None, :] | |
| emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) | |
| if embedding_dim % 2 == 1: # zero pad | |
| emb = torch.nn.functional.pad(emb, (0, 1)) | |
| assert emb.shape == (w.shape[0], embedding_dim) | |
| return emb | |
| def guidance_scale(self): | |
| return self._guidance_scale | |
| # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) | |
| # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` | |
| # corresponds to doing no classifier free guidance. | |
| def do_classifier_free_guidance(self): | |
| return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None | |
| def cross_attention_kwargs(self): | |
| return self._cross_attention_kwargs | |
| def denoising_start(self): | |
| return self._denoising_start | |
| def denoising_end(self): | |
| return self._denoising_end | |
| def num_timesteps(self): | |
| return self._num_timesteps | |
| def interrupt(self): | |
| return self._interrupt | |
| def __call__( | |
| self, | |
| prompt: Union[str, List[str]] = None, | |
| image: PipelineImageInput = None, | |
| strength: float = 0.3, | |
| height: Optional[int] = None, | |
| width: Optional[int] = None, | |
| num_inference_steps: int = 50, | |
| timesteps: List[int] = None, | |
| sigmas: List[float] = None, | |
| denoising_start: Optional[float] = None, | |
| denoising_end: Optional[float] = None, | |
| guidance_scale: float = 5.0, | |
| negative_prompt: Optional[Union[str, List[str]]] = None, | |
| num_images_per_prompt: Optional[int] = 1, | |
| eta: float = 0.0, | |
| generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None, | |
| latents: Optional[torch.Tensor] = None, | |
| prompt_embeds: Optional[torch.Tensor] = None, | |
| pooled_prompt_embeds: Optional[torch.Tensor] = None, | |
| negative_prompt_embeds: Optional[torch.Tensor] = None, | |
| negative_pooled_prompt_embeds: Optional[torch.Tensor] = None, | |
| ip_adapter_image: Optional[PipelineImageInput] = None, | |
| ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None, | |
| output_type: Optional[str] = "pil", | |
| return_dict: bool = True, | |
| cross_attention_kwargs: Optional[Dict[str, Any]] = None, | |
| original_size: Optional[Tuple[int, int]] = None, | |
| crops_coords_top_left: Tuple[int, int] = (0, 0), | |
| target_size: Optional[Tuple[int, int]] = None, | |
| negative_original_size: Optional[Tuple[int, int]] = None, | |
| negative_crops_coords_top_left: Tuple[int, int] = (0, 0), | |
| negative_target_size: Optional[Tuple[int, int]] = None, | |
| callback_on_step_end: Optional[ | |
| Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks] | |
| ] = None, | |
| callback_on_step_end_tensor_inputs: List[str] = ["latents"], | |
| max_sequence_length: int = 256, | |
| map: PipelineImageInput = None, | |
| ): | |
| r""" | |
| Function invoked when calling the pipeline for generation. | |
| Args: | |
| prompt (`str` or `List[str]`, *optional*): | |
| The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`. | |
| instead. | |
| image (`torch.Tensor` or `PIL.Image.Image` or `np.ndarray` or `List[torch.Tensor]` or `List[PIL.Image.Image]` or `List[np.ndarray]`): | |
| The image(s) to modify with the pipeline. | |
| strength (`float`, *optional*, defaults to 0.3): | |
| Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image` | |
| will be used as a starting point, adding more noise to it the larger the `strength`. The number of | |
| denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will | |
| be maximum and the denoising process will run for the full number of iterations specified in | |
| `num_inference_steps`. A value of 1, therefore, essentially ignores `image`. Note that in the case of | |
| `denoising_start` being declared as an integer, the value of `strength` will be ignored. | |
| height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): | |
| The height in pixels of the generated image. This is set to 1024 by default for the best results. | |
| Anything below 512 pixels won't work well for | |
| [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints | |
| that are not specifically fine-tuned on low resolutions. | |
| width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): | |
| The width in pixels of the generated image. This is set to 1024 by default for the best results. | |
| Anything below 512 pixels won't work well for | |
| [Kwai-Kolors/Kolors-diffusers](https://huggingface.co/Kwai-Kolors/Kolors-diffusers) and checkpoints | |
| that are not specifically fine-tuned on low resolutions. | |
| num_inference_steps (`int`, *optional*, defaults to 50): | |
| The number of denoising steps. More denoising steps usually lead to a higher quality image at the | |
| expense of slower inference. | |
| timesteps (`List[int]`, *optional*): | |
| Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument | |
| in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is | |
| passed will be used. Must be in descending order. | |
| sigmas (`List[float]`, *optional*): | |
| Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in | |
| their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed | |
| will be used. | |
| denoising_start (`float`, *optional*): | |
| When specified, indicates the fraction (between 0.0 and 1.0) of the total denoising process to be | |
| bypassed before it is initiated. Consequently, the initial part of the denoising process is skipped and | |
| it is assumed that the passed `image` is a partly denoised image. Note that when this is specified, | |
| strength will be ignored. The `denoising_start` parameter is particularly beneficial when this pipeline | |
| is integrated into a "Mixture of Denoisers" multi-pipeline setup, as detailed in [**Refine Image | |
| Quality**](https://huggingface.co/docs/diffusers/using-diffusers/sdxl#refine-image-quality). | |
| denoising_end (`float`, *optional*): | |
| When specified, determines the fraction (between 0.0 and 1.0) of the total denoising process to be | |
| completed before it is intentionally prematurely terminated. As a result, the returned sample will | |
| still retain a substantial amount of noise as determined by the discrete timesteps selected by the | |
| scheduler. The denoising_end parameter should ideally be utilized when this pipeline forms a part of a | |
| "Mixture of Denoisers" multi-pipeline setup, as elaborated in [**Refining the Image | |
| Output**](https://huggingface.co/docs/diffusers/api/pipelines/stable_diffusion/stable_diffusion_xl#refining-the-image-output) | |
| guidance_scale (`float`, *optional*, defaults to 5.0): | |
| Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). | |
| `guidance_scale` is defined as `w` of equation 2. of [Imagen | |
| Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > | |
| 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, | |
| usually at the expense of lower image quality. | |
| negative_prompt (`str` or `List[str]`, *optional*): | |
| The prompt or prompts not to guide the image generation. If not defined, one has to pass | |
| `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is | |
| less than `1`). | |
| num_images_per_prompt (`int`, *optional*, defaults to 1): | |
| The number of images to generate per prompt. | |
| eta (`float`, *optional*, defaults to 0.0): | |
| Corresponds to parameter eta (Ξ·) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to | |
| [`schedulers.DDIMScheduler`], will be ignored for others. | |
| generator (`torch.Generator` or `List[torch.Generator]`, *optional*): | |
| One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) | |
| to make generation deterministic. | |
| latents (`torch.Tensor`, *optional*): | |
| Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image | |
| generation. Can be used to tweak the same generation with different prompts. If not provided, a latents | |
| tensor will ge generated by sampling using the supplied random `generator`. | |
| prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | |
| provided, text embeddings will be generated from `prompt` input argument. | |
| pooled_prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. | |
| If not provided, pooled text embeddings will be generated from `prompt` input argument. | |
| negative_prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | |
| weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input | |
| argument. | |
| negative_pooled_prompt_embeds (`torch.Tensor`, *optional*): | |
| Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt | |
| weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt` | |
| input argument. | |
| ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters. | |
| ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*): | |
| Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of | |
| IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should | |
| contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not | |
| provided, embeddings are computed from the `ip_adapter_image` input argument. | |
| output_type (`str`, *optional*, defaults to `"pil"`): | |
| The output format of the generate image. Choose between | |
| [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. | |
| return_dict (`bool`, *optional*, defaults to `True`): | |
| Whether or not to return a [`~pipelines.kolors.KolorsPipelineOutput`] instead of a plain tuple. | |
| cross_attention_kwargs (`dict`, *optional*): | |
| A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under | |
| `self.processor` in | |
| [diffusers.models.attention_processor](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py). | |
| original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): | |
| If `original_size` is not the same as `target_size` the image will appear to be down- or upsampled. | |
| `original_size` defaults to `(height, width)` if not specified. Part of SDXL's micro-conditioning as | |
| explained in section 2.2 of | |
| [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). | |
| crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): | |
| `crops_coords_top_left` can be used to generate an image that appears to be "cropped" from the position | |
| `crops_coords_top_left` downwards. Favorable, well-centered images are usually achieved by setting | |
| `crops_coords_top_left` to (0, 0). Part of SDXL's micro-conditioning as explained in section 2.2 of | |
| [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). | |
| target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): | |
| For most cases, `target_size` should be set to the desired height and width of the generated image. If | |
| not specified it will default to `(height, width)`. Part of SDXL's micro-conditioning as explained in | |
| section 2.2 of [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). | |
| negative_original_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): | |
| To negatively condition the generation process based on a specific image resolution. Part of SDXL's | |
| micro-conditioning as explained in section 2.2 of | |
| [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more | |
| information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. | |
| negative_crops_coords_top_left (`Tuple[int]`, *optional*, defaults to (0, 0)): | |
| To negatively condition the generation process based on a specific crop coordinates. Part of SDXL's | |
| micro-conditioning as explained in section 2.2 of | |
| [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more | |
| information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. | |
| negative_target_size (`Tuple[int]`, *optional*, defaults to (1024, 1024)): | |
| To negatively condition the generation process based on a target image resolution. It should be as same | |
| as the `target_size` for most cases. Part of SDXL's micro-conditioning as explained in section 2.2 of | |
| [https://huggingface.co/papers/2307.01952](https://huggingface.co/papers/2307.01952). For more | |
| information, refer to this issue thread: https://github.com/huggingface/diffusers/issues/4208. | |
| callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*): | |
| A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of | |
| each denoising step during the inference. with the following arguments: `callback_on_step_end(self: | |
| DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a | |
| list of all tensors as specified by `callback_on_step_end_tensor_inputs`. | |
| callback_on_step_end_tensor_inputs (`List`, *optional*): | |
| The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list | |
| will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the | |
| `._callback_tensor_inputs` attribute of your pipeline class. | |
| max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`. | |
| Examples: | |
| Returns: | |
| [`~pipelines.kolors.KolorsPipelineOutput`] or `tuple`: [`~pipelines.kolors.KolorsPipelineOutput`] if | |
| `return_dict` is True, otherwise a `tuple`. When returning a tuple, the first element is a list with the | |
| generated images. | |
| """ | |
| if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)): | |
| callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs | |
| # 0. Default height and width to unet | |
| height = height or self.default_sample_size * self.vae_scale_factor | |
| width = width or self.default_sample_size * self.vae_scale_factor | |
| original_size = original_size or (height, width) | |
| target_size = target_size or (height, width) | |
| # 1. Check inputs. Raise error if not correct | |
| self.check_inputs( | |
| prompt, | |
| strength, | |
| num_inference_steps, | |
| height, | |
| width, | |
| negative_prompt, | |
| prompt_embeds, | |
| pooled_prompt_embeds, | |
| negative_prompt_embeds, | |
| negative_pooled_prompt_embeds, | |
| ip_adapter_image, | |
| ip_adapter_image_embeds, | |
| callback_on_step_end_tensor_inputs, | |
| max_sequence_length=max_sequence_length, | |
| ) | |
| self._guidance_scale = guidance_scale | |
| self._cross_attention_kwargs = cross_attention_kwargs | |
| self._denoising_end = denoising_end | |
| self._denoising_start = denoising_start | |
| self._interrupt = False | |
| # 2. Define call parameters | |
| if prompt is not None and isinstance(prompt, str): | |
| batch_size = 1 | |
| elif prompt is not None and isinstance(prompt, list): | |
| batch_size = len(prompt) | |
| else: | |
| batch_size = prompt_embeds.shape[0] | |
| device = self._execution_device | |
| # 3. Encode input prompt | |
| ( | |
| prompt_embeds, | |
| negative_prompt_embeds, | |
| pooled_prompt_embeds, | |
| negative_pooled_prompt_embeds, | |
| ) = self.encode_prompt( | |
| prompt=prompt, | |
| device=device, | |
| num_images_per_prompt=num_images_per_prompt, | |
| do_classifier_free_guidance=self.do_classifier_free_guidance, | |
| negative_prompt=negative_prompt, | |
| prompt_embeds=prompt_embeds, | |
| negative_prompt_embeds=negative_prompt_embeds, | |
| ) | |
| # 4. Preprocess image | |
| init_image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32) | |
| map = self.mask_processor.preprocess( | |
| map, height=height // self.vae_scale_factor, width=width // self.vae_scale_factor | |
| ).to(device) | |
| # 5. Prepare timesteps | |
| def denoising_value_valid(dnv): | |
| return isinstance(dnv, float) and 0 < dnv < 1 | |
| timesteps, num_inference_steps = retrieve_timesteps( | |
| self.scheduler, num_inference_steps, device, timesteps, sigmas | |
| ) | |
| # begin diff diff change | |
| total_time_steps = num_inference_steps | |
| # end diff diff change | |
| timesteps, num_inference_steps = self.get_timesteps( | |
| num_inference_steps, | |
| strength, | |
| device, | |
| denoising_start=self.denoising_start if denoising_value_valid(self.denoising_start) else None, | |
| ) | |
| latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) | |
| add_noise = True if self.denoising_start is None else False | |
| # 6. Prepare latent variables | |
| if latents is None: | |
| latents = self.prepare_latents( | |
| init_image, | |
| latent_timestep, | |
| batch_size, | |
| num_images_per_prompt, | |
| prompt_embeds.dtype, | |
| device, | |
| generator, | |
| add_noise, | |
| ) | |
| # 7. Prepare extra step kwargs. | |
| extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) | |
| height, width = latents.shape[-2:] | |
| height = height * self.vae_scale_factor | |
| width = width * self.vae_scale_factor | |
| original_size = original_size or (height, width) | |
| target_size = target_size or (height, width) | |
| # 8. Prepare added time ids & embeddings | |
| add_text_embeds = pooled_prompt_embeds | |
| text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1]) | |
| add_time_ids = self._get_add_time_ids( | |
| original_size, | |
| crops_coords_top_left, | |
| target_size, | |
| dtype=prompt_embeds.dtype, | |
| text_encoder_projection_dim=text_encoder_projection_dim, | |
| ) | |
| if negative_original_size is not None and negative_target_size is not None: | |
| negative_add_time_ids = self._get_add_time_ids( | |
| negative_original_size, | |
| negative_crops_coords_top_left, | |
| negative_target_size, | |
| dtype=prompt_embeds.dtype, | |
| text_encoder_projection_dim=text_encoder_projection_dim, | |
| ) | |
| else: | |
| negative_add_time_ids = add_time_ids | |
| if self.do_classifier_free_guidance: | |
| prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0) | |
| add_text_embeds = torch.cat([negative_pooled_prompt_embeds, add_text_embeds], dim=0) | |
| add_time_ids = torch.cat([negative_add_time_ids, add_time_ids], dim=0) | |
| prompt_embeds = prompt_embeds.to(device) | |
| add_text_embeds = add_text_embeds.to(device) | |
| add_time_ids = add_time_ids.to(device).repeat(batch_size * num_images_per_prompt, 1) | |
| if ip_adapter_image is not None or ip_adapter_image_embeds is not None: | |
| image_embeds = self.prepare_ip_adapter_image_embeds( | |
| ip_adapter_image, | |
| ip_adapter_image_embeds, | |
| device, | |
| batch_size * num_images_per_prompt, | |
| self.do_classifier_free_guidance, | |
| ) | |
| # 9. Denoising loop | |
| num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0) | |
| # preparations for diff diff | |
| original_with_noise = self.prepare_latents( | |
| init_image, timesteps, batch_size, num_images_per_prompt, prompt_embeds.dtype, device, generator | |
| ) | |
| thresholds = torch.arange(total_time_steps, dtype=map.dtype) / total_time_steps | |
| thresholds = thresholds.unsqueeze(1).unsqueeze(1).to(device) | |
| masks = map.squeeze() > thresholds | |
| # end diff diff preparations | |
| # 9.1 Apply denoising_end | |
| if ( | |
| self.denoising_end is not None | |
| and self.denoising_start is not None | |
| and denoising_value_valid(self.denoising_end) | |
| and denoising_value_valid(self.denoising_start) | |
| and self.denoising_start >= self.denoising_end | |
| ): | |
| raise ValueError( | |
| f"`denoising_start`: {self.denoising_start} cannot be larger than or equal to `denoising_end`: " | |
| + f" {self.denoising_end} when using type float." | |
| ) | |
| elif self.denoising_end is not None and denoising_value_valid(self.denoising_end): | |
| discrete_timestep_cutoff = int( | |
| round( | |
| self.scheduler.config.num_train_timesteps | |
| - (self.denoising_end * self.scheduler.config.num_train_timesteps) | |
| ) | |
| ) | |
| num_inference_steps = len(list(filter(lambda ts: ts >= discrete_timestep_cutoff, timesteps))) | |
| timesteps = timesteps[:num_inference_steps] | |
| # 9.2 Optionally get Guidance Scale Embedding | |
| timestep_cond = None | |
| if self.unet.config.time_cond_proj_dim is not None: | |
| guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt) | |
| timestep_cond = self.get_guidance_scale_embedding( | |
| guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim | |
| ).to(device=device, dtype=latents.dtype) | |
| self._num_timesteps = len(timesteps) | |
| with self.progress_bar(total=num_inference_steps) as progress_bar: | |
| for i, t in enumerate(timesteps): | |
| if self.interrupt: | |
| continue | |
| # diff diff | |
| if i == 0: | |
| latents = original_with_noise[:1] | |
| else: | |
| mask = masks[i].unsqueeze(0).to(latents.dtype) | |
| mask = mask.unsqueeze(1) # fit shape | |
| latents = original_with_noise[i] * mask + latents * (1 - mask) | |
| # end diff diff | |
| # expand the latents if we are doing classifier free guidance | |
| latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents | |
| latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) | |
| # predict the noise residual | |
| added_cond_kwargs = {"text_embeds": add_text_embeds, "time_ids": add_time_ids} | |
| if ip_adapter_image is not None or ip_adapter_image_embeds is not None: | |
| added_cond_kwargs["image_embeds"] = image_embeds | |
| noise_pred = self.unet( | |
| latent_model_input, | |
| t, | |
| encoder_hidden_states=prompt_embeds, | |
| timestep_cond=timestep_cond, | |
| cross_attention_kwargs=self.cross_attention_kwargs, | |
| added_cond_kwargs=added_cond_kwargs, | |
| return_dict=False, | |
| )[0] | |
| # perform guidance | |
| if self.do_classifier_free_guidance: | |
| noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) | |
| noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond) | |
| # compute the previous noisy sample x_t -> x_t-1 | |
| latents_dtype = latents.dtype | |
| latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0] | |
| if latents.dtype != latents_dtype: | |
| if torch.backends.mps.is_available(): | |
| # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 | |
| latents = latents.to(latents_dtype) | |
| if callback_on_step_end is not None: | |
| callback_kwargs = {} | |
| for k in callback_on_step_end_tensor_inputs: | |
| callback_kwargs[k] = locals()[k] | |
| callback_outputs = callback_on_step_end(self, i, t, callback_kwargs) | |
| latents = callback_outputs.pop("latents", latents) | |
| prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds) | |
| negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds) | |
| add_text_embeds = callback_outputs.pop("add_text_embeds", add_text_embeds) | |
| negative_pooled_prompt_embeds = callback_outputs.pop( | |
| "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds | |
| ) | |
| add_time_ids = callback_outputs.pop("add_time_ids", add_time_ids) | |
| negative_add_time_ids = callback_outputs.pop("negative_add_time_ids", negative_add_time_ids) | |
| if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): | |
| progress_bar.update() | |
| if XLA_AVAILABLE: | |
| xm.mark_step() | |
| if not output_type == "latent": | |
| # make sure the VAE is in float32 mode, as it overflows in float16 | |
| needs_upcasting = self.vae.dtype == torch.float16 and self.vae.config.force_upcast | |
| if needs_upcasting: | |
| self.upcast_vae() | |
| latents = latents.to(next(iter(self.vae.post_quant_conv.parameters())).dtype) | |
| elif latents.dtype != self.vae.dtype: | |
| if torch.backends.mps.is_available(): | |
| # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272 | |
| self.vae = self.vae.to(latents.dtype) | |
| # unscale/denormalize the latents | |
| latents = latents / self.vae.config.scaling_factor | |
| image = self.vae.decode(latents, return_dict=False)[0] | |
| # cast back to fp16 if needed | |
| if needs_upcasting: | |
| self.vae.to(dtype=torch.float16) | |
| else: | |
| image = latents | |
| if not output_type == "latent": | |
| image = self.image_processor.postprocess(image, output_type=output_type) | |
| # Offload all models | |
| self.maybe_free_model_hooks() | |
| if not return_dict: | |
| return (image,) | |
| return KolorsPipelineOutput(images=image) | |