| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						import math | 
					
					
						
						| 
							 | 
						from dataclasses import dataclass | 
					
					
						
						| 
							 | 
						from typing import Any, Dict, List, Optional, Tuple, Union | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						import numpy as np | 
					
					
						
						| 
							 | 
						import torch | 
					
					
						
						| 
							 | 
						from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						from diffusers import ( | 
					
					
						
						| 
							 | 
						    AutoencoderKL, | 
					
					
						
						| 
							 | 
						    AutoencoderTiny, | 
					
					
						
						| 
							 | 
						    ConfigMixin, | 
					
					
						
						| 
							 | 
						    DiffusionPipeline, | 
					
					
						
						| 
							 | 
						    SchedulerMixin, | 
					
					
						
						| 
							 | 
						    UNet2DConditionModel, | 
					
					
						
						| 
							 | 
						    ControlNetModel, | 
					
					
						
						| 
							 | 
						    logging, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						from diffusers.configuration_utils import register_to_config | 
					
					
						
						| 
							 | 
						from diffusers.image_processor import VaeImageProcessor, PipelineImageInput | 
					
					
						
						| 
							 | 
						from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput | 
					
					
						
						| 
							 | 
						from diffusers.pipelines.stable_diffusion.safety_checker import ( | 
					
					
						
						| 
							 | 
						    StableDiffusionSafetyChecker, | 
					
					
						
						| 
							 | 
						) | 
					
					
						
						| 
							 | 
						from diffusers.utils import BaseOutput | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						from diffusers.utils.torch_utils import randn_tensor, is_compiled_module | 
					
					
						
						| 
							 | 
						from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						import PIL.Image | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						logger = logging.get_logger(__name__)   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class LatentConsistencyModelPipeline_controlnet(DiffusionPipeline): | 
					
					
						
						| 
							 | 
						    _optional_components = ["scheduler"] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __init__( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        vae: AutoencoderKL, | 
					
					
						
						| 
							 | 
						        text_encoder: CLIPTextModel, | 
					
					
						
						| 
							 | 
						        tokenizer: CLIPTokenizer, | 
					
					
						
						| 
							 | 
						        controlnet: Union[ | 
					
					
						
						| 
							 | 
						            ControlNetModel, | 
					
					
						
						| 
							 | 
						            List[ControlNetModel], | 
					
					
						
						| 
							 | 
						            Tuple[ControlNetModel], | 
					
					
						
						| 
							 | 
						            MultiControlNetModel, | 
					
					
						
						| 
							 | 
						        ], | 
					
					
						
						| 
							 | 
						        unet: UNet2DConditionModel, | 
					
					
						
						| 
							 | 
						        scheduler: "LCMScheduler", | 
					
					
						
						| 
							 | 
						        safety_checker: StableDiffusionSafetyChecker, | 
					
					
						
						| 
							 | 
						        feature_extractor: CLIPImageProcessor, | 
					
					
						
						| 
							 | 
						        requires_safety_checker: bool = True, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        super().__init__() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        scheduler = ( | 
					
					
						
						| 
							 | 
						            scheduler | 
					
					
						
						| 
							 | 
						            if scheduler is not None | 
					
					
						
						| 
							 | 
						            else LCMScheduler_X( | 
					
					
						
						| 
							 | 
						                beta_start=0.00085, | 
					
					
						
						| 
							 | 
						                beta_end=0.0120, | 
					
					
						
						| 
							 | 
						                beta_schedule="scaled_linear", | 
					
					
						
						| 
							 | 
						                prediction_type="epsilon", | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.register_modules( | 
					
					
						
						| 
							 | 
						            vae=vae, | 
					
					
						
						| 
							 | 
						            text_encoder=text_encoder, | 
					
					
						
						| 
							 | 
						            tokenizer=tokenizer, | 
					
					
						
						| 
							 | 
						            unet=unet, | 
					
					
						
						| 
							 | 
						            controlnet=controlnet, | 
					
					
						
						| 
							 | 
						            scheduler=scheduler, | 
					
					
						
						| 
							 | 
						            safety_checker=safety_checker, | 
					
					
						
						| 
							 | 
						            feature_extractor=feature_extractor, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) | 
					
					
						
						| 
							 | 
						        self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor) | 
					
					
						
						| 
							 | 
						        self.control_image_processor = VaeImageProcessor( | 
					
					
						
						| 
							 | 
						            vae_scale_factor=self.vae_scale_factor, | 
					
					
						
						| 
							 | 
						            do_convert_rgb=True, | 
					
					
						
						| 
							 | 
						            do_normalize=False, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def _encode_prompt( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        prompt, | 
					
					
						
						| 
							 | 
						        device, | 
					
					
						
						| 
							 | 
						        num_images_per_prompt, | 
					
					
						
						| 
							 | 
						        prompt_embeds: None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        r""" | 
					
					
						
						| 
							 | 
						        Encodes the prompt into text encoder hidden states. | 
					
					
						
						| 
							 | 
						        Args: | 
					
					
						
						| 
							 | 
						            prompt (`str` or `List[str]`, *optional*): | 
					
					
						
						| 
							 | 
						                prompt to be encoded | 
					
					
						
						| 
							 | 
						            device: (`torch.device`): | 
					
					
						
						| 
							 | 
						                torch device | 
					
					
						
						| 
							 | 
						            num_images_per_prompt (`int`): | 
					
					
						
						| 
							 | 
						                number of images that should be generated per prompt | 
					
					
						
						| 
							 | 
						            prompt_embeds (`torch.FloatTensor`, *optional*): | 
					
					
						
						| 
							 | 
						                Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not | 
					
					
						
						| 
							 | 
						                provided, text embeddings will be generated from `prompt` input argument. | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if prompt is not None and isinstance(prompt, str): | 
					
					
						
						| 
							 | 
						            pass | 
					
					
						
						| 
							 | 
						        elif prompt is not None and isinstance(prompt, list): | 
					
					
						
						| 
							 | 
						            len(prompt) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            prompt_embeds.shape[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if prompt_embeds is None: | 
					
					
						
						| 
							 | 
						            text_inputs = self.tokenizer( | 
					
					
						
						| 
							 | 
						                prompt, | 
					
					
						
						| 
							 | 
						                padding="max_length", | 
					
					
						
						| 
							 | 
						                max_length=self.tokenizer.model_max_length, | 
					
					
						
						| 
							 | 
						                truncation=True, | 
					
					
						
						| 
							 | 
						                return_tensors="pt", | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						            text_input_ids = text_inputs.input_ids | 
					
					
						
						| 
							 | 
						            untruncated_ids = self.tokenizer( | 
					
					
						
						| 
							 | 
						                prompt, padding="longest", return_tensors="pt" | 
					
					
						
						| 
							 | 
						            ).input_ids | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            if untruncated_ids.shape[-1] >= text_input_ids.shape[ | 
					
					
						
						| 
							 | 
						                -1 | 
					
					
						
						| 
							 | 
						            ] and not torch.equal(text_input_ids, untruncated_ids): | 
					
					
						
						| 
							 | 
						                removed_text = self.tokenizer.batch_decode( | 
					
					
						
						| 
							 | 
						                    untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1] | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						                logger.warning( | 
					
					
						
						| 
							 | 
						                    "The following part of your input was truncated because CLIP can only handle sequences up to" | 
					
					
						
						| 
							 | 
						                    f" {self.tokenizer.model_max_length} tokens: {removed_text}" | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            if ( | 
					
					
						
						| 
							 | 
						                hasattr(self.text_encoder.config, "use_attention_mask") | 
					
					
						
						| 
							 | 
						                and self.text_encoder.config.use_attention_mask | 
					
					
						
						| 
							 | 
						            ): | 
					
					
						
						| 
							 | 
						                attention_mask = text_inputs.attention_mask.to(device) | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                attention_mask = None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            prompt_embeds = self.text_encoder( | 
					
					
						
						| 
							 | 
						                text_input_ids.to(device), | 
					
					
						
						| 
							 | 
						                attention_mask=attention_mask, | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						            prompt_embeds = prompt_embeds[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if self.text_encoder is not None: | 
					
					
						
						| 
							 | 
						            prompt_embeds_dtype = self.text_encoder.dtype | 
					
					
						
						| 
							 | 
						        elif self.unet is not None: | 
					
					
						
						| 
							 | 
						            prompt_embeds_dtype = self.unet.dtype | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            prompt_embeds_dtype = prompt_embeds.dtype | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        bs_embed, seq_len, _ = prompt_embeds.shape | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1) | 
					
					
						
						| 
							 | 
						        prompt_embeds = prompt_embeds.view( | 
					
					
						
						| 
							 | 
						            bs_embed * num_images_per_prompt, seq_len, -1 | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        return prompt_embeds | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def run_safety_checker(self, image, device, dtype): | 
					
					
						
						| 
							 | 
						        if self.safety_checker is None: | 
					
					
						
						| 
							 | 
						            has_nsfw_concept = None | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            if torch.is_tensor(image): | 
					
					
						
						| 
							 | 
						                feature_extractor_input = self.image_processor.postprocess( | 
					
					
						
						| 
							 | 
						                    image, output_type="pil" | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                feature_extractor_input = self.image_processor.numpy_to_pil(image) | 
					
					
						
						| 
							 | 
						            safety_checker_input = self.feature_extractor( | 
					
					
						
						| 
							 | 
						                feature_extractor_input, return_tensors="pt" | 
					
					
						
						| 
							 | 
						            ).to(device) | 
					
					
						
						| 
							 | 
						            image, has_nsfw_concept = self.safety_checker( | 
					
					
						
						| 
							 | 
						                images=image, clip_input=safety_checker_input.pixel_values.to(dtype) | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        return image, has_nsfw_concept | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def prepare_control_image( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        image, | 
					
					
						
						| 
							 | 
						        width, | 
					
					
						
						| 
							 | 
						        height, | 
					
					
						
						| 
							 | 
						        batch_size, | 
					
					
						
						| 
							 | 
						        num_images_per_prompt, | 
					
					
						
						| 
							 | 
						        device, | 
					
					
						
						| 
							 | 
						        dtype, | 
					
					
						
						| 
							 | 
						        do_classifier_free_guidance=False, | 
					
					
						
						| 
							 | 
						        guess_mode=False, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        image = self.control_image_processor.preprocess( | 
					
					
						
						| 
							 | 
						            image, height=height, width=width | 
					
					
						
						| 
							 | 
						        ).to(dtype=dtype) | 
					
					
						
						| 
							 | 
						        image_batch_size = image.shape[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if image_batch_size == 1: | 
					
					
						
						| 
							 | 
						            repeat_by = batch_size | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            repeat_by = num_images_per_prompt | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        image = image.repeat_interleave(repeat_by, dim=0) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        image = image.to(device=device, dtype=dtype) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if do_classifier_free_guidance and not guess_mode: | 
					
					
						
						| 
							 | 
						            image = torch.cat([image] * 2) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return image | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def prepare_latents( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        image, | 
					
					
						
						| 
							 | 
						        timestep, | 
					
					
						
						| 
							 | 
						        batch_size, | 
					
					
						
						| 
							 | 
						        num_channels_latents, | 
					
					
						
						| 
							 | 
						        height, | 
					
					
						
						| 
							 | 
						        width, | 
					
					
						
						| 
							 | 
						        dtype, | 
					
					
						
						| 
							 | 
						        device, | 
					
					
						
						| 
							 | 
						        latents=None, | 
					
					
						
						| 
							 | 
						        generator=None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        shape = ( | 
					
					
						
						| 
							 | 
						            batch_size, | 
					
					
						
						| 
							 | 
						            num_channels_latents, | 
					
					
						
						| 
							 | 
						            height // self.vae_scale_factor, | 
					
					
						
						| 
							 | 
						            width // self.vae_scale_factor, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)): | 
					
					
						
						| 
							 | 
						            raise ValueError( | 
					
					
						
						| 
							 | 
						                f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}" | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        image = image.to(device=device, dtype=dtype) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if image.shape[1] == 4: | 
					
					
						
						| 
							 | 
						            init_latents = image | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            if isinstance(generator, list) and len(generator) != batch_size: | 
					
					
						
						| 
							 | 
						                raise ValueError( | 
					
					
						
						| 
							 | 
						                    f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" | 
					
					
						
						| 
							 | 
						                    f" size of {batch_size}. Make sure the batch size matches the length of the generators." | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            elif isinstance(generator, list): | 
					
					
						
						| 
							 | 
						                if isinstance(self.vae, AutoencoderTiny): | 
					
					
						
						| 
							 | 
						                    init_latents = [ | 
					
					
						
						| 
							 | 
						                        self.vae.encode(image[i : i + 1]).latents | 
					
					
						
						| 
							 | 
						                        for i in range(batch_size) | 
					
					
						
						| 
							 | 
						                    ] | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    init_latents = [ | 
					
					
						
						| 
							 | 
						                        self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) | 
					
					
						
						| 
							 | 
						                        for i in range(batch_size) | 
					
					
						
						| 
							 | 
						                    ] | 
					
					
						
						| 
							 | 
						                init_latents = torch.cat(init_latents, dim=0) | 
					
					
						
						| 
							 | 
						            else: | 
					
					
						
						| 
							 | 
						                if isinstance(self.vae, AutoencoderTiny): | 
					
					
						
						| 
							 | 
						                    init_latents = self.vae.encode(image).latents | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    init_latents = self.vae.encode(image).latent_dist.sample(generator) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            init_latents = self.vae.config.scaling_factor * init_latents | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if ( | 
					
					
						
						| 
							 | 
						            batch_size > init_latents.shape[0] | 
					
					
						
						| 
							 | 
						            and batch_size % init_latents.shape[0] == 0 | 
					
					
						
						| 
							 | 
						        ): | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            deprecation_message = ( | 
					
					
						
						| 
							 | 
						                f"You have passed {batch_size} text prompts (`prompt`), but only {init_latents.shape[0]} initial" | 
					
					
						
						| 
							 | 
						                " images (`image`). Initial images are now duplicating to match the number of text prompts. Note" | 
					
					
						
						| 
							 | 
						                " that this behavior is deprecated and will be removed in a version 1.0.0. Please make sure to update" | 
					
					
						
						| 
							 | 
						                " your script to pass as many initial images as text prompts to suppress this warning." | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            additional_image_per_prompt = batch_size // init_latents.shape[0] | 
					
					
						
						| 
							 | 
						            init_latents = torch.cat( | 
					
					
						
						| 
							 | 
						                [init_latents] * additional_image_per_prompt, dim=0 | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        elif ( | 
					
					
						
						| 
							 | 
						            batch_size > init_latents.shape[0] | 
					
					
						
						| 
							 | 
						            and batch_size % init_latents.shape[0] != 0 | 
					
					
						
						| 
							 | 
						        ): | 
					
					
						
						| 
							 | 
						            raise ValueError( | 
					
					
						
						| 
							 | 
						                f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts." | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            init_latents = torch.cat([init_latents], dim=0) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        shape = init_latents.shape | 
					
					
						
						| 
							 | 
						        noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        init_latents = self.scheduler.add_noise(init_latents, noise, timestep) | 
					
					
						
						| 
							 | 
						        latents = init_latents | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return latents | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if latents is None: | 
					
					
						
						| 
							 | 
						            latents = torch.randn(shape, dtype=dtype).to(device) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            latents = latents.to(device) | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        latents = latents * self.scheduler.init_noise_sigma | 
					
					
						
						| 
							 | 
						        return latents | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_w_embedding(self, w, embedding_dim=512, dtype=torch.float32): | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        see https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298 | 
					
					
						
						| 
							 | 
						        Args: | 
					
					
						
						| 
							 | 
						        timesteps: torch.Tensor: generate embedding vectors at these timesteps | 
					
					
						
						| 
							 | 
						        embedding_dim: int: dimension of the embeddings to generate | 
					
					
						
						| 
							 | 
						        dtype: data type of the generated embeddings | 
					
					
						
						| 
							 | 
						        Returns: | 
					
					
						
						| 
							 | 
						        embedding vectors with shape `(len(timesteps), embedding_dim)` | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        assert len(w.shape) == 1 | 
					
					
						
						| 
							 | 
						        w = w * 1000.0 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        half_dim = embedding_dim // 2 | 
					
					
						
						| 
							 | 
						        emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1) | 
					
					
						
						| 
							 | 
						        emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb) | 
					
					
						
						| 
							 | 
						        emb = w.to(dtype)[:, None] * emb[None, :] | 
					
					
						
						| 
							 | 
						        emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1) | 
					
					
						
						| 
							 | 
						        if embedding_dim % 2 == 1:   | 
					
					
						
						| 
							 | 
						            emb = torch.nn.functional.pad(emb, (0, 1)) | 
					
					
						
						| 
							 | 
						        assert emb.shape == (w.shape[0], embedding_dim) | 
					
					
						
						| 
							 | 
						        return emb | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_timesteps(self, num_inference_steps, strength, device): | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        init_timestep = min(int(num_inference_steps * strength), num_inference_steps) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        t_start = max(num_inference_steps - init_timestep, 0) | 
					
					
						
						| 
							 | 
						        timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return timesteps, num_inference_steps - t_start | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @torch.no_grad() | 
					
					
						
						| 
							 | 
						    def __call__( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        prompt: Union[str, List[str]] = None, | 
					
					
						
						| 
							 | 
						        image: PipelineImageInput = None, | 
					
					
						
						| 
							 | 
						        control_image: PipelineImageInput = None, | 
					
					
						
						| 
							 | 
						        strength: float = 0.8, | 
					
					
						
						| 
							 | 
						        height: Optional[int] = 768, | 
					
					
						
						| 
							 | 
						        width: Optional[int] = 768, | 
					
					
						
						| 
							 | 
						        guidance_scale: float = 7.5, | 
					
					
						
						| 
							 | 
						        num_images_per_prompt: Optional[int] = 1, | 
					
					
						
						| 
							 | 
						        latents: Optional[torch.FloatTensor] = None, | 
					
					
						
						| 
							 | 
						        generator: Optional[torch.Generator] = None, | 
					
					
						
						| 
							 | 
						        num_inference_steps: int = 4, | 
					
					
						
						| 
							 | 
						        lcm_origin_steps: int = 50, | 
					
					
						
						| 
							 | 
						        prompt_embeds: Optional[torch.FloatTensor] = None, | 
					
					
						
						| 
							 | 
						        output_type: Optional[str] = "pil", | 
					
					
						
						| 
							 | 
						        return_dict: bool = True, | 
					
					
						
						| 
							 | 
						        cross_attention_kwargs: Optional[Dict[str, Any]] = None, | 
					
					
						
						| 
							 | 
						        controlnet_conditioning_scale: Union[float, List[float]] = 0.8, | 
					
					
						
						| 
							 | 
						        guess_mode: bool = True, | 
					
					
						
						| 
							 | 
						        control_guidance_start: Union[float, List[float]] = 0.0, | 
					
					
						
						| 
							 | 
						        control_guidance_end: Union[float, List[float]] = 1.0, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        controlnet = ( | 
					
					
						
						| 
							 | 
						            self.controlnet._orig_mod | 
					
					
						
						| 
							 | 
						            if is_compiled_module(self.controlnet) | 
					
					
						
						| 
							 | 
						            else self.controlnet | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        height = height or self.unet.config.sample_size * self.vae_scale_factor | 
					
					
						
						| 
							 | 
						        width = width or self.unet.config.sample_size * self.vae_scale_factor | 
					
					
						
						| 
							 | 
						        if not isinstance(control_guidance_start, list) and isinstance( | 
					
					
						
						| 
							 | 
						            control_guidance_end, list | 
					
					
						
						| 
							 | 
						        ): | 
					
					
						
						| 
							 | 
						            control_guidance_start = len(control_guidance_end) * [ | 
					
					
						
						| 
							 | 
						                control_guidance_start | 
					
					
						
						| 
							 | 
						            ] | 
					
					
						
						| 
							 | 
						        elif not isinstance(control_guidance_end, list) and isinstance( | 
					
					
						
						| 
							 | 
						            control_guidance_start, list | 
					
					
						
						| 
							 | 
						        ): | 
					
					
						
						| 
							 | 
						            control_guidance_end = len(control_guidance_start) * [control_guidance_end] | 
					
					
						
						| 
							 | 
						        elif not isinstance(control_guidance_start, list) and not isinstance( | 
					
					
						
						| 
							 | 
						            control_guidance_end, list | 
					
					
						
						| 
							 | 
						        ): | 
					
					
						
						| 
							 | 
						            mult = ( | 
					
					
						
						| 
							 | 
						                len(controlnet.nets) | 
					
					
						
						| 
							 | 
						                if isinstance(controlnet, MultiControlNetModel) | 
					
					
						
						| 
							 | 
						                else 1 | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						            control_guidance_start, control_guidance_end = mult * [ | 
					
					
						
						| 
							 | 
						                control_guidance_start | 
					
					
						
						| 
							 | 
						            ], mult * [control_guidance_end] | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if prompt is not None and isinstance(prompt, str): | 
					
					
						
						| 
							 | 
						            batch_size = 1 | 
					
					
						
						| 
							 | 
						        elif prompt is not None and isinstance(prompt, list): | 
					
					
						
						| 
							 | 
						            batch_size = len(prompt) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            batch_size = prompt_embeds.shape[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        device = self._execution_device | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        global_pool_conditions = ( | 
					
					
						
						| 
							 | 
						            controlnet.config.global_pool_conditions | 
					
					
						
						| 
							 | 
						            if isinstance(controlnet, ControlNetModel) | 
					
					
						
						| 
							 | 
						            else controlnet.nets[0].config.global_pool_conditions | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        guess_mode = guess_mode or global_pool_conditions | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        prompt_embeds = self._encode_prompt( | 
					
					
						
						| 
							 | 
						            prompt, | 
					
					
						
						| 
							 | 
						            device, | 
					
					
						
						| 
							 | 
						            num_images_per_prompt, | 
					
					
						
						| 
							 | 
						            prompt_embeds=prompt_embeds, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        image = self.image_processor.preprocess(image) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if isinstance(controlnet, ControlNetModel): | 
					
					
						
						| 
							 | 
						            control_image = self.prepare_control_image( | 
					
					
						
						| 
							 | 
						                image=control_image, | 
					
					
						
						| 
							 | 
						                width=width, | 
					
					
						
						| 
							 | 
						                height=height, | 
					
					
						
						| 
							 | 
						                batch_size=batch_size * num_images_per_prompt, | 
					
					
						
						| 
							 | 
						                num_images_per_prompt=num_images_per_prompt, | 
					
					
						
						| 
							 | 
						                device=device, | 
					
					
						
						| 
							 | 
						                dtype=controlnet.dtype, | 
					
					
						
						| 
							 | 
						                guess_mode=guess_mode, | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        elif isinstance(controlnet, MultiControlNetModel): | 
					
					
						
						| 
							 | 
						            control_images = [] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            for control_image_ in control_image: | 
					
					
						
						| 
							 | 
						                control_image_ = self.prepare_control_image( | 
					
					
						
						| 
							 | 
						                    image=control_image_, | 
					
					
						
						| 
							 | 
						                    width=width, | 
					
					
						
						| 
							 | 
						                    height=height, | 
					
					
						
						| 
							 | 
						                    batch_size=batch_size * num_images_per_prompt, | 
					
					
						
						| 
							 | 
						                    num_images_per_prompt=num_images_per_prompt, | 
					
					
						
						| 
							 | 
						                    device=device, | 
					
					
						
						| 
							 | 
						                    dtype=controlnet.dtype, | 
					
					
						
						| 
							 | 
						                    do_classifier_free_guidance=do_classifier_free_guidance, | 
					
					
						
						| 
							 | 
						                    guess_mode=guess_mode, | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                control_images.append(control_image_) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						            control_image = control_images | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            assert False | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.scheduler.set_timesteps(strength, num_inference_steps, lcm_origin_steps) | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        timesteps = self.scheduler.timesteps | 
					
					
						
						| 
							 | 
						        latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        print("timesteps: ", timesteps) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        num_channels_latents = self.unet.config.in_channels | 
					
					
						
						| 
							 | 
						        latents = self.prepare_latents( | 
					
					
						
						| 
							 | 
						            image, | 
					
					
						
						| 
							 | 
						            latent_timestep, | 
					
					
						
						| 
							 | 
						            batch_size * num_images_per_prompt, | 
					
					
						
						| 
							 | 
						            num_channels_latents, | 
					
					
						
						| 
							 | 
						            height, | 
					
					
						
						| 
							 | 
						            width, | 
					
					
						
						| 
							 | 
						            prompt_embeds.dtype, | 
					
					
						
						| 
							 | 
						            device, | 
					
					
						
						| 
							 | 
						            latents, | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        bs = batch_size * num_images_per_prompt | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        w = torch.tensor(guidance_scale).repeat(bs) | 
					
					
						
						| 
							 | 
						        w_embedding = self.get_w_embedding(w, embedding_dim=256).to( | 
					
					
						
						| 
							 | 
						            device=device, dtype=latents.dtype | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        controlnet_keep = [] | 
					
					
						
						| 
							 | 
						        for i in range(len(timesteps)): | 
					
					
						
						| 
							 | 
						            keeps = [ | 
					
					
						
						| 
							 | 
						                1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e) | 
					
					
						
						| 
							 | 
						                for s, e in zip(control_guidance_start, control_guidance_end) | 
					
					
						
						| 
							 | 
						            ] | 
					
					
						
						| 
							 | 
						            controlnet_keep.append( | 
					
					
						
						| 
							 | 
						                keeps[0] if isinstance(controlnet, ControlNetModel) else keeps | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        with self.progress_bar(total=num_inference_steps) as progress_bar: | 
					
					
						
						| 
							 | 
						            for i, t in enumerate(timesteps): | 
					
					
						
						| 
							 | 
						                ts = torch.full((bs,), t, device=device, dtype=torch.long) | 
					
					
						
						| 
							 | 
						                latents = latents.to(prompt_embeds.dtype) | 
					
					
						
						| 
							 | 
						                if guess_mode: | 
					
					
						
						| 
							 | 
						                     | 
					
					
						
						| 
							 | 
						                    control_model_input = latents | 
					
					
						
						| 
							 | 
						                    control_model_input = self.scheduler.scale_model_input( | 
					
					
						
						| 
							 | 
						                        control_model_input, ts | 
					
					
						
						| 
							 | 
						                    ) | 
					
					
						
						| 
							 | 
						                    controlnet_prompt_embeds = prompt_embeds | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    control_model_input = latents | 
					
					
						
						| 
							 | 
						                    controlnet_prompt_embeds = prompt_embeds | 
					
					
						
						| 
							 | 
						                if isinstance(controlnet_keep[i], list): | 
					
					
						
						| 
							 | 
						                    cond_scale = [ | 
					
					
						
						| 
							 | 
						                        c * s | 
					
					
						
						| 
							 | 
						                        for c, s in zip( | 
					
					
						
						| 
							 | 
						                            controlnet_conditioning_scale, controlnet_keep[i] | 
					
					
						
						| 
							 | 
						                        ) | 
					
					
						
						| 
							 | 
						                    ] | 
					
					
						
						| 
							 | 
						                else: | 
					
					
						
						| 
							 | 
						                    controlnet_cond_scale = controlnet_conditioning_scale | 
					
					
						
						| 
							 | 
						                    if isinstance(controlnet_cond_scale, list): | 
					
					
						
						| 
							 | 
						                        controlnet_cond_scale = controlnet_cond_scale[0] | 
					
					
						
						| 
							 | 
						                    cond_scale = controlnet_cond_scale * controlnet_keep[i] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                down_block_res_samples, mid_block_res_sample = self.controlnet( | 
					
					
						
						| 
							 | 
						                    control_model_input, | 
					
					
						
						| 
							 | 
						                    ts, | 
					
					
						
						| 
							 | 
						                    encoder_hidden_states=controlnet_prompt_embeds, | 
					
					
						
						| 
							 | 
						                    controlnet_cond=control_image, | 
					
					
						
						| 
							 | 
						                    conditioning_scale=cond_scale, | 
					
					
						
						| 
							 | 
						                    guess_mode=guess_mode, | 
					
					
						
						| 
							 | 
						                    return_dict=False, | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                model_pred = self.unet( | 
					
					
						
						| 
							 | 
						                    latents, | 
					
					
						
						| 
							 | 
						                    ts, | 
					
					
						
						| 
							 | 
						                    timestep_cond=w_embedding, | 
					
					
						
						| 
							 | 
						                    encoder_hidden_states=prompt_embeds, | 
					
					
						
						| 
							 | 
						                    cross_attention_kwargs=cross_attention_kwargs, | 
					
					
						
						| 
							 | 
						                    down_block_additional_residuals=down_block_res_samples, | 
					
					
						
						| 
							 | 
						                    mid_block_additional_residual=mid_block_res_sample, | 
					
					
						
						| 
							 | 
						                    return_dict=False, | 
					
					
						
						| 
							 | 
						                )[0] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                latents, denoised = self.scheduler.step( | 
					
					
						
						| 
							 | 
						                    model_pred, i, t, latents, return_dict=False | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                 | 
					
					
						
						| 
							 | 
						                progress_bar.update() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        denoised = denoised.to(prompt_embeds.dtype) | 
					
					
						
						| 
							 | 
						        if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None: | 
					
					
						
						| 
							 | 
						            self.unet.to("cpu") | 
					
					
						
						| 
							 | 
						            self.controlnet.to("cpu") | 
					
					
						
						| 
							 | 
						            torch.cuda.empty_cache() | 
					
					
						
						| 
							 | 
						        if not output_type == "latent": | 
					
					
						
						| 
							 | 
						            image = self.vae.decode( | 
					
					
						
						| 
							 | 
						                denoised / self.vae.config.scaling_factor, return_dict=False | 
					
					
						
						| 
							 | 
						            )[0] | 
					
					
						
						| 
							 | 
						            image, has_nsfw_concept = self.run_safety_checker( | 
					
					
						
						| 
							 | 
						                image, device, prompt_embeds.dtype | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            image = denoised | 
					
					
						
						| 
							 | 
						            has_nsfw_concept = None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if has_nsfw_concept is None: | 
					
					
						
						| 
							 | 
						            do_denormalize = [True] * image.shape[0] | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept] | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        image = self.image_processor.postprocess( | 
					
					
						
						| 
							 | 
						            image, output_type=output_type, do_denormalize=do_denormalize | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            return (image, has_nsfw_concept) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return StableDiffusionPipelineOutput( | 
					
					
						
						| 
							 | 
						            images=image, nsfw_content_detected=has_nsfw_concept | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						@dataclass | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						class LCMSchedulerOutput(BaseOutput): | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Output class for the scheduler's `step` function output. | 
					
					
						
						| 
							 | 
						    Args: | 
					
					
						
						| 
							 | 
						        prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): | 
					
					
						
						| 
							 | 
						            Computed sample `(x_{t-1})` of previous timestep. `prev_sample` should be used as next model input in the | 
					
					
						
						| 
							 | 
						            denoising loop. | 
					
					
						
						| 
							 | 
						        pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): | 
					
					
						
						| 
							 | 
						            The predicted denoised sample `(x_{0})` based on the model output from the current timestep. | 
					
					
						
						| 
							 | 
						            `pred_original_sample` can be used to preview progress or for guidance. | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    prev_sample: torch.FloatTensor | 
					
					
						
						| 
							 | 
						    denoised: Optional[torch.FloatTensor] = None | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						 | 
					
					
						
						| 
							 | 
						def betas_for_alpha_bar( | 
					
					
						
						| 
							 | 
						    num_diffusion_timesteps, | 
					
					
						
						| 
							 | 
						    max_beta=0.999, | 
					
					
						
						| 
							 | 
						    alpha_transform_type="cosine", | 
					
					
						
						| 
							 | 
						): | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of | 
					
					
						
						| 
							 | 
						    (1-beta) over time from t = [0,1]. | 
					
					
						
						| 
							 | 
						    Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up | 
					
					
						
						| 
							 | 
						    to that part of the diffusion process. | 
					
					
						
						| 
							 | 
						    Args: | 
					
					
						
						| 
							 | 
						        num_diffusion_timesteps (`int`): the number of betas to produce. | 
					
					
						
						| 
							 | 
						        max_beta (`float`): the maximum beta to use; use values lower than 1 to | 
					
					
						
						| 
							 | 
						                     prevent singularities. | 
					
					
						
						| 
							 | 
						        alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. | 
					
					
						
						| 
							 | 
						                     Choose from `cosine` or `exp` | 
					
					
						
						| 
							 | 
						    Returns: | 
					
					
						
						| 
							 | 
						        betas (`np.ndarray`): the betas used by the scheduler to step the model outputs | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    if alpha_transform_type == "cosine": | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        def alpha_bar_fn(t): | 
					
					
						
						| 
							 | 
						            return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    elif alpha_transform_type == "exp": | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        def alpha_bar_fn(t): | 
					
					
						
						| 
							 | 
						            return math.exp(t * -12.0) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    else: | 
					
					
						
						| 
							 | 
						        raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    betas = [] | 
					
					
						
						| 
							 | 
						    for i in range(num_diffusion_timesteps): | 
					
					
						
						| 
							 | 
						        t1 = i / num_diffusion_timesteps | 
					
					
						
						| 
							 | 
						        t2 = (i + 1) / num_diffusion_timesteps | 
					
					
						
						| 
							 | 
						        betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) | 
					
					
						
						| 
							 | 
						    return torch.tensor(betas, dtype=torch.float32) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						def rescale_zero_terminal_snr(betas): | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    Rescales betas to have zero terminal SNR Based on https://arxiv.org/pdf/2305.08891.pdf (Algorithm 1) | 
					
					
						
						| 
							 | 
						    Args: | 
					
					
						
						| 
							 | 
						        betas (`torch.FloatTensor`): | 
					
					
						
						| 
							 | 
						            the betas that the scheduler is being initialized with. | 
					
					
						
						| 
							 | 
						    Returns: | 
					
					
						
						| 
							 | 
						        `torch.FloatTensor`: rescaled betas with zero terminal SNR | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    alphas = 1.0 - betas | 
					
					
						
						| 
							 | 
						    alphas_cumprod = torch.cumprod(alphas, dim=0) | 
					
					
						
						| 
							 | 
						    alphas_bar_sqrt = alphas_cumprod.sqrt() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    alphas_bar_sqrt_0 = alphas_bar_sqrt[0].clone() | 
					
					
						
						| 
							 | 
						    alphas_bar_sqrt_T = alphas_bar_sqrt[-1].clone() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    alphas_bar_sqrt -= alphas_bar_sqrt_T | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    alphas_bar_sqrt *= alphas_bar_sqrt_0 / (alphas_bar_sqrt_0 - alphas_bar_sqrt_T) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    alphas_bar = alphas_bar_sqrt**2   | 
					
					
						
						| 
							 | 
						    alphas = alphas_bar[1:] / alphas_bar[:-1]   | 
					
					
						
						| 
							 | 
						    alphas = torch.cat([alphas_bar[0:1], alphas]) | 
					
					
						
						| 
							 | 
						    betas = 1 - alphas | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    return betas | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						class LCMScheduler_X(SchedulerMixin, ConfigMixin): | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						    `LCMScheduler` extends the denoising procedure introduced in denoising diffusion probabilistic models (DDPMs) with | 
					
					
						
						| 
							 | 
						    non-Markovian guidance. | 
					
					
						
						| 
							 | 
						    This model inherits from [`SchedulerMixin`] and [`ConfigMixin`]. Check the superclass documentation for the generic | 
					
					
						
						| 
							 | 
						    methods the library implements for all schedulers such as loading and saving. | 
					
					
						
						| 
							 | 
						    Args: | 
					
					
						
						| 
							 | 
						        num_train_timesteps (`int`, defaults to 1000): | 
					
					
						
						| 
							 | 
						            The number of diffusion steps to train the model. | 
					
					
						
						| 
							 | 
						        beta_start (`float`, defaults to 0.0001): | 
					
					
						
						| 
							 | 
						            The starting `beta` value of inference. | 
					
					
						
						| 
							 | 
						        beta_end (`float`, defaults to 0.02): | 
					
					
						
						| 
							 | 
						            The final `beta` value. | 
					
					
						
						| 
							 | 
						        beta_schedule (`str`, defaults to `"linear"`): | 
					
					
						
						| 
							 | 
						            The beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from | 
					
					
						
						| 
							 | 
						            `linear`, `scaled_linear`, or `squaredcos_cap_v2`. | 
					
					
						
						| 
							 | 
						        trained_betas (`np.ndarray`, *optional*): | 
					
					
						
						| 
							 | 
						            Pass an array of betas directly to the constructor to bypass `beta_start` and `beta_end`. | 
					
					
						
						| 
							 | 
						        clip_sample (`bool`, defaults to `True`): | 
					
					
						
						| 
							 | 
						            Clip the predicted sample for numerical stability. | 
					
					
						
						| 
							 | 
						        clip_sample_range (`float`, defaults to 1.0): | 
					
					
						
						| 
							 | 
						            The maximum magnitude for sample clipping. Valid only when `clip_sample=True`. | 
					
					
						
						| 
							 | 
						        set_alpha_to_one (`bool`, defaults to `True`): | 
					
					
						
						| 
							 | 
						            Each diffusion step uses the alphas product value at that step and at the previous one. For the final step | 
					
					
						
						| 
							 | 
						            there is no previous alpha. When this option is `True` the previous alpha product is fixed to `1`, | 
					
					
						
						| 
							 | 
						            otherwise it uses the alpha value at step 0. | 
					
					
						
						| 
							 | 
						        steps_offset (`int`, defaults to 0): | 
					
					
						
						| 
							 | 
						            An offset added to the inference steps. You can use a combination of `offset=1` and | 
					
					
						
						| 
							 | 
						            `set_alpha_to_one=False` to make the last step use step 0 for the previous alpha product like in Stable | 
					
					
						
						| 
							 | 
						            Diffusion. | 
					
					
						
						| 
							 | 
						        prediction_type (`str`, defaults to `epsilon`, *optional*): | 
					
					
						
						| 
							 | 
						            Prediction type of the scheduler function; can be `epsilon` (predicts the noise of the diffusion process), | 
					
					
						
						| 
							 | 
						            `sample` (directly predicts the noisy sample`) or `v_prediction` (see section 2.4 of [Imagen | 
					
					
						
						| 
							 | 
						            Video](https://imagen.research.google/video/paper.pdf) paper). | 
					
					
						
						| 
							 | 
						        thresholding (`bool`, defaults to `False`): | 
					
					
						
						| 
							 | 
						            Whether to use the "dynamic thresholding" method. This is unsuitable for latent-space diffusion models such | 
					
					
						
						| 
							 | 
						            as Stable Diffusion. | 
					
					
						
						| 
							 | 
						        dynamic_thresholding_ratio (`float`, defaults to 0.995): | 
					
					
						
						| 
							 | 
						            The ratio for the dynamic thresholding method. Valid only when `thresholding=True`. | 
					
					
						
						| 
							 | 
						        sample_max_value (`float`, defaults to 1.0): | 
					
					
						
						| 
							 | 
						            The threshold value for dynamic thresholding. Valid only when `thresholding=True`. | 
					
					
						
						| 
							 | 
						        timestep_spacing (`str`, defaults to `"leading"`): | 
					
					
						
						| 
							 | 
						            The way the timesteps should be scaled. Refer to Table 2 of the [Common Diffusion Noise Schedules and | 
					
					
						
						| 
							 | 
						            Sample Steps are Flawed](https://huggingface.co/papers/2305.08891) for more information. | 
					
					
						
						| 
							 | 
						        rescale_betas_zero_snr (`bool`, defaults to `False`): | 
					
					
						
						| 
							 | 
						            Whether to rescale the betas to have zero terminal SNR. This enables the model to generate very bright and | 
					
					
						
						| 
							 | 
						            dark samples instead of limiting it to samples with medium brightness. Loosely related to | 
					
					
						
						| 
							 | 
						            [`--offset_noise`](https://github.com/huggingface/diffusers/blob/74fd735eb073eb1d774b1ab4154a0876eb82f055/examples/dreambooth/train_dreambooth.py#L506). | 
					
					
						
						| 
							 | 
						    """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    order = 1 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    @register_to_config | 
					
					
						
						| 
							 | 
						    def __init__( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        num_train_timesteps: int = 1000, | 
					
					
						
						| 
							 | 
						        beta_start: float = 0.0001, | 
					
					
						
						| 
							 | 
						        beta_end: float = 0.02, | 
					
					
						
						| 
							 | 
						        beta_schedule: str = "linear", | 
					
					
						
						| 
							 | 
						        trained_betas: Optional[Union[np.ndarray, List[float]]] = None, | 
					
					
						
						| 
							 | 
						        clip_sample: bool = True, | 
					
					
						
						| 
							 | 
						        set_alpha_to_one: bool = True, | 
					
					
						
						| 
							 | 
						        steps_offset: int = 0, | 
					
					
						
						| 
							 | 
						        prediction_type: str = "epsilon", | 
					
					
						
						| 
							 | 
						        thresholding: bool = False, | 
					
					
						
						| 
							 | 
						        dynamic_thresholding_ratio: float = 0.995, | 
					
					
						
						| 
							 | 
						        clip_sample_range: float = 1.0, | 
					
					
						
						| 
							 | 
						        sample_max_value: float = 1.0, | 
					
					
						
						| 
							 | 
						        timestep_spacing: str = "leading", | 
					
					
						
						| 
							 | 
						        rescale_betas_zero_snr: bool = False, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        if trained_betas is not None: | 
					
					
						
						| 
							 | 
						            self.betas = torch.tensor(trained_betas, dtype=torch.float32) | 
					
					
						
						| 
							 | 
						        elif beta_schedule == "linear": | 
					
					
						
						| 
							 | 
						            self.betas = torch.linspace( | 
					
					
						
						| 
							 | 
						                beta_start, beta_end, num_train_timesteps, dtype=torch.float32 | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        elif beta_schedule == "scaled_linear": | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            self.betas = ( | 
					
					
						
						| 
							 | 
						                torch.linspace( | 
					
					
						
						| 
							 | 
						                    beta_start**0.5, | 
					
					
						
						| 
							 | 
						                    beta_end**0.5, | 
					
					
						
						| 
							 | 
						                    num_train_timesteps, | 
					
					
						
						| 
							 | 
						                    dtype=torch.float32, | 
					
					
						
						| 
							 | 
						                ) | 
					
					
						
						| 
							 | 
						                ** 2 | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        elif beta_schedule == "squaredcos_cap_v2": | 
					
					
						
						| 
							 | 
						             | 
					
					
						
						| 
							 | 
						            self.betas = betas_for_alpha_bar(num_train_timesteps) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            raise NotImplementedError( | 
					
					
						
						| 
							 | 
						                f"{beta_schedule} does is not implemented for {self.__class__}" | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if rescale_betas_zero_snr: | 
					
					
						
						| 
							 | 
						            self.betas = rescale_zero_terminal_snr(self.betas) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.alphas = 1.0 - self.betas | 
					
					
						
						| 
							 | 
						        self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.final_alpha_cumprod = ( | 
					
					
						
						| 
							 | 
						            torch.tensor(1.0) if set_alpha_to_one else self.alphas_cumprod[0] | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.init_noise_sigma = 1.0 | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        self.num_inference_steps = None | 
					
					
						
						| 
							 | 
						        self.timesteps = torch.from_numpy( | 
					
					
						
						| 
							 | 
						            np.arange(0, num_train_timesteps)[::-1].copy().astype(np.int64) | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def scale_model_input( | 
					
					
						
						| 
							 | 
						        self, sample: torch.FloatTensor, timestep: Optional[int] = None | 
					
					
						
						| 
							 | 
						    ) -> torch.FloatTensor: | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        Ensures interchangeability with schedulers that need to scale the denoising model input depending on the | 
					
					
						
						| 
							 | 
						        current timestep. | 
					
					
						
						| 
							 | 
						        Args: | 
					
					
						
						| 
							 | 
						            sample (`torch.FloatTensor`): | 
					
					
						
						| 
							 | 
						                The input sample. | 
					
					
						
						| 
							 | 
						            timestep (`int`, *optional*): | 
					
					
						
						| 
							 | 
						                The current timestep in the diffusion chain. | 
					
					
						
						| 
							 | 
						        Returns: | 
					
					
						
						| 
							 | 
						            `torch.FloatTensor`: | 
					
					
						
						| 
							 | 
						                A scaled input sample. | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        return sample | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def _get_variance(self, timestep, prev_timestep): | 
					
					
						
						| 
							 | 
						        alpha_prod_t = self.alphas_cumprod[timestep] | 
					
					
						
						| 
							 | 
						        alpha_prod_t_prev = ( | 
					
					
						
						| 
							 | 
						            self.alphas_cumprod[prev_timestep] | 
					
					
						
						| 
							 | 
						            if prev_timestep >= 0 | 
					
					
						
						| 
							 | 
						            else self.final_alpha_cumprod | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        beta_prod_t = 1 - alpha_prod_t | 
					
					
						
						| 
							 | 
						        beta_prod_t_prev = 1 - alpha_prod_t_prev | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        variance = (beta_prod_t_prev / beta_prod_t) * ( | 
					
					
						
						| 
							 | 
						            1 - alpha_prod_t / alpha_prod_t_prev | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return variance | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    def _threshold_sample(self, sample: torch.FloatTensor) -> torch.FloatTensor: | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        "Dynamic thresholding: At each sampling step we set s to a certain percentile absolute pixel value in xt0 (the | 
					
					
						
						| 
							 | 
						        prediction of x_0 at timestep t), and if s > 1, then we threshold xt0 to the range [-s, s] and then divide by | 
					
					
						
						| 
							 | 
						        s. Dynamic thresholding pushes saturated pixels (those near -1 and 1) inwards, thereby actively preventing | 
					
					
						
						| 
							 | 
						        pixels from saturation at each step. We find that dynamic thresholding results in significantly better | 
					
					
						
						| 
							 | 
						        photorealism as well as better image-text alignment, especially when using very large guidance weights." | 
					
					
						
						| 
							 | 
						        https://arxiv.org/abs/2205.11487 | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        dtype = sample.dtype | 
					
					
						
						| 
							 | 
						        batch_size, channels, height, width = sample.shape | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if dtype not in (torch.float32, torch.float64): | 
					
					
						
						| 
							 | 
						            sample = ( | 
					
					
						
						| 
							 | 
						                sample.float() | 
					
					
						
						| 
							 | 
						            )   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        sample = sample.reshape(batch_size, channels * height * width) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        abs_sample = sample.abs()   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        s = torch.quantile(abs_sample, self.config.dynamic_thresholding_ratio, dim=1) | 
					
					
						
						| 
							 | 
						        s = torch.clamp( | 
					
					
						
						| 
							 | 
						            s, min=1, max=self.config.sample_max_value | 
					
					
						
						| 
							 | 
						        )   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        s = s.unsqueeze(1)   | 
					
					
						
						| 
							 | 
						        sample = ( | 
					
					
						
						| 
							 | 
						            torch.clamp(sample, -s, s) / s | 
					
					
						
						| 
							 | 
						        )   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sample = sample.reshape(batch_size, channels, height, width) | 
					
					
						
						| 
							 | 
						        sample = sample.to(dtype) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return sample | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def set_timesteps( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        stength, | 
					
					
						
						| 
							 | 
						        num_inference_steps: int, | 
					
					
						
						| 
							 | 
						        lcm_origin_steps: int, | 
					
					
						
						| 
							 | 
						        device: Union[str, torch.device] = None, | 
					
					
						
						| 
							 | 
						    ): | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        Sets the discrete timesteps used for the diffusion chain (to be run before inference). | 
					
					
						
						| 
							 | 
						        Args: | 
					
					
						
						| 
							 | 
						            num_inference_steps (`int`): | 
					
					
						
						| 
							 | 
						                The number of diffusion steps used when generating samples with a pre-trained model. | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if num_inference_steps > self.config.num_train_timesteps: | 
					
					
						
						| 
							 | 
						            raise ValueError( | 
					
					
						
						| 
							 | 
						                f"`num_inference_steps`: {num_inference_steps} cannot be larger than `self.config.train_timesteps`:" | 
					
					
						
						| 
							 | 
						                f" {self.config.num_train_timesteps} as the unet model trained with this scheduler can only handle" | 
					
					
						
						| 
							 | 
						                f" maximal {self.config.num_train_timesteps} timesteps." | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.num_inference_steps = num_inference_steps | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        c = self.config.num_train_timesteps // lcm_origin_steps | 
					
					
						
						| 
							 | 
						        lcm_origin_timesteps = ( | 
					
					
						
						| 
							 | 
						            np.asarray(list(range(1, int(lcm_origin_steps * stength) + 1))) * c - 1 | 
					
					
						
						| 
							 | 
						        )   | 
					
					
						
						| 
							 | 
						        skipping_step = max(len(lcm_origin_timesteps) // num_inference_steps, 1) | 
					
					
						
						| 
							 | 
						        timesteps = lcm_origin_timesteps[::-skipping_step][ | 
					
					
						
						| 
							 | 
						            :num_inference_steps | 
					
					
						
						| 
							 | 
						        ]   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        self.timesteps = torch.from_numpy(timesteps.copy()).to(device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def get_scalings_for_boundary_condition_discrete(self, t): | 
					
					
						
						| 
							 | 
						        self.sigma_data = 0.5   | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        c_skip = self.sigma_data**2 / ((t / 0.1) ** 2 + self.sigma_data**2) | 
					
					
						
						| 
							 | 
						        c_out = (t / 0.1) / ((t / 0.1) ** 2 + self.sigma_data**2) ** 0.5 | 
					
					
						
						| 
							 | 
						        return c_skip, c_out | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def step( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        model_output: torch.FloatTensor, | 
					
					
						
						| 
							 | 
						        timeindex: int, | 
					
					
						
						| 
							 | 
						        timestep: int, | 
					
					
						
						| 
							 | 
						        sample: torch.FloatTensor, | 
					
					
						
						| 
							 | 
						        eta: float = 0.0, | 
					
					
						
						| 
							 | 
						        use_clipped_model_output: bool = False, | 
					
					
						
						| 
							 | 
						        generator=None, | 
					
					
						
						| 
							 | 
						        variance_noise: Optional[torch.FloatTensor] = None, | 
					
					
						
						| 
							 | 
						        return_dict: bool = True, | 
					
					
						
						| 
							 | 
						    ) -> Union[LCMSchedulerOutput, Tuple]: | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        Predict the sample from the previous timestep by reversing the SDE. This function propagates the diffusion | 
					
					
						
						| 
							 | 
						        process from the learned model outputs (most often the predicted noise). | 
					
					
						
						| 
							 | 
						        Args: | 
					
					
						
						| 
							 | 
						            model_output (`torch.FloatTensor`): | 
					
					
						
						| 
							 | 
						                The direct output from learned diffusion model. | 
					
					
						
						| 
							 | 
						            timestep (`float`): | 
					
					
						
						| 
							 | 
						                The current discrete timestep in the diffusion chain. | 
					
					
						
						| 
							 | 
						            sample (`torch.FloatTensor`): | 
					
					
						
						| 
							 | 
						                A current instance of a sample created by the diffusion process. | 
					
					
						
						| 
							 | 
						            eta (`float`): | 
					
					
						
						| 
							 | 
						                The weight of noise for added noise in diffusion step. | 
					
					
						
						| 
							 | 
						            use_clipped_model_output (`bool`, defaults to `False`): | 
					
					
						
						| 
							 | 
						                If `True`, computes "corrected" `model_output` from the clipped predicted original sample. Necessary | 
					
					
						
						| 
							 | 
						                because predicted original sample is clipped to [-1, 1] when `self.config.clip_sample` is `True`. If no | 
					
					
						
						| 
							 | 
						                clipping has happened, "corrected" `model_output` would coincide with the one provided as input and | 
					
					
						
						| 
							 | 
						                `use_clipped_model_output` has no effect. | 
					
					
						
						| 
							 | 
						            generator (`torch.Generator`, *optional*): | 
					
					
						
						| 
							 | 
						                A random number generator. | 
					
					
						
						| 
							 | 
						            variance_noise (`torch.FloatTensor`): | 
					
					
						
						| 
							 | 
						                Alternative to generating noise with `generator` by directly providing the noise for the variance | 
					
					
						
						| 
							 | 
						                itself. Useful for methods such as [`CycleDiffusion`]. | 
					
					
						
						| 
							 | 
						            return_dict (`bool`, *optional*, defaults to `True`): | 
					
					
						
						| 
							 | 
						                Whether or not to return a [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] or `tuple`. | 
					
					
						
						| 
							 | 
						        Returns: | 
					
					
						
						| 
							 | 
						            [`~schedulers.scheduling_utils.LCMSchedulerOutput`] or `tuple`: | 
					
					
						
						| 
							 | 
						                If return_dict is `True`, [`~schedulers.scheduling_lcm.LCMSchedulerOutput`] is returned, otherwise a | 
					
					
						
						| 
							 | 
						                tuple is returned where the first element is the sample tensor. | 
					
					
						
						| 
							 | 
						        """ | 
					
					
						
						| 
							 | 
						        if self.num_inference_steps is None: | 
					
					
						
						| 
							 | 
						            raise ValueError( | 
					
					
						
						| 
							 | 
						                "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler" | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        prev_timeindex = timeindex + 1 | 
					
					
						
						| 
							 | 
						        if prev_timeindex < len(self.timesteps): | 
					
					
						
						| 
							 | 
						            prev_timestep = self.timesteps[prev_timeindex] | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            prev_timestep = timestep | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        alpha_prod_t = self.alphas_cumprod[timestep] | 
					
					
						
						| 
							 | 
						        alpha_prod_t_prev = ( | 
					
					
						
						| 
							 | 
						            self.alphas_cumprod[prev_timestep] | 
					
					
						
						| 
							 | 
						            if prev_timestep >= 0 | 
					
					
						
						| 
							 | 
						            else self.final_alpha_cumprod | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        beta_prod_t = 1 - alpha_prod_t | 
					
					
						
						| 
							 | 
						        beta_prod_t_prev = 1 - alpha_prod_t_prev | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        c_skip, c_out = self.get_scalings_for_boundary_condition_discrete(timestep) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        parameterization = self.config.prediction_type | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if parameterization == "epsilon":   | 
					
					
						
						| 
							 | 
						            pred_x0 = (sample - beta_prod_t.sqrt() * model_output) / alpha_prod_t.sqrt() | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        elif parameterization == "sample":   | 
					
					
						
						| 
							 | 
						            pred_x0 = model_output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        elif parameterization == "v_prediction":   | 
					
					
						
						| 
							 | 
						            pred_x0 = alpha_prod_t.sqrt() * sample - beta_prod_t.sqrt() * model_output | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        denoised = c_out * pred_x0 + c_skip * sample | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        if len(self.timesteps) > 1: | 
					
					
						
						| 
							 | 
						            noise = torch.randn(model_output.shape).to(model_output.device) | 
					
					
						
						| 
							 | 
						            prev_sample = ( | 
					
					
						
						| 
							 | 
						                alpha_prod_t_prev.sqrt() * denoised + beta_prod_t_prev.sqrt() * noise | 
					
					
						
						| 
							 | 
						            ) | 
					
					
						
						| 
							 | 
						        else: | 
					
					
						
						| 
							 | 
						            prev_sample = denoised | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        if not return_dict: | 
					
					
						
						| 
							 | 
						            return (prev_sample, denoised) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        return LCMSchedulerOutput(prev_sample=prev_sample, denoised=denoised) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    def add_noise( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        original_samples: torch.FloatTensor, | 
					
					
						
						| 
							 | 
						        noise: torch.FloatTensor, | 
					
					
						
						| 
							 | 
						        timesteps: torch.IntTensor, | 
					
					
						
						| 
							 | 
						    ) -> torch.FloatTensor: | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        alphas_cumprod = self.alphas_cumprod.to( | 
					
					
						
						| 
							 | 
						            device=original_samples.device, dtype=original_samples.dtype | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        timesteps = timesteps.to(original_samples.device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 | 
					
					
						
						| 
							 | 
						        sqrt_alpha_prod = sqrt_alpha_prod.flatten() | 
					
					
						
						| 
							 | 
						        while len(sqrt_alpha_prod.shape) < len(original_samples.shape): | 
					
					
						
						| 
							 | 
						            sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 | 
					
					
						
						| 
							 | 
						        sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() | 
					
					
						
						| 
							 | 
						        while len(sqrt_one_minus_alpha_prod.shape) < len(original_samples.shape): | 
					
					
						
						| 
							 | 
						            sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        noisy_samples = ( | 
					
					
						
						| 
							 | 
						            sqrt_alpha_prod * original_samples + sqrt_one_minus_alpha_prod * noise | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        return noisy_samples | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						     | 
					
					
						
						| 
							 | 
						    def get_velocity( | 
					
					
						
						| 
							 | 
						        self, | 
					
					
						
						| 
							 | 
						        sample: torch.FloatTensor, | 
					
					
						
						| 
							 | 
						        noise: torch.FloatTensor, | 
					
					
						
						| 
							 | 
						        timesteps: torch.IntTensor, | 
					
					
						
						| 
							 | 
						    ) -> torch.FloatTensor: | 
					
					
						
						| 
							 | 
						         | 
					
					
						
						| 
							 | 
						        alphas_cumprod = self.alphas_cumprod.to( | 
					
					
						
						| 
							 | 
						            device=sample.device, dtype=sample.dtype | 
					
					
						
						| 
							 | 
						        ) | 
					
					
						
						| 
							 | 
						        timesteps = timesteps.to(sample.device) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sqrt_alpha_prod = alphas_cumprod[timesteps] ** 0.5 | 
					
					
						
						| 
							 | 
						        sqrt_alpha_prod = sqrt_alpha_prod.flatten() | 
					
					
						
						| 
							 | 
						        while len(sqrt_alpha_prod.shape) < len(sample.shape): | 
					
					
						
						| 
							 | 
						            sqrt_alpha_prod = sqrt_alpha_prod.unsqueeze(-1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        sqrt_one_minus_alpha_prod = (1 - alphas_cumprod[timesteps]) ** 0.5 | 
					
					
						
						| 
							 | 
						        sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.flatten() | 
					
					
						
						| 
							 | 
						        while len(sqrt_one_minus_alpha_prod.shape) < len(sample.shape): | 
					
					
						
						| 
							 | 
						            sqrt_one_minus_alpha_prod = sqrt_one_minus_alpha_prod.unsqueeze(-1) | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						        velocity = sqrt_alpha_prod * noise - sqrt_one_minus_alpha_prod * sample | 
					
					
						
						| 
							 | 
						        return velocity | 
					
					
						
						| 
							 | 
						
 | 
					
					
						
						| 
							 | 
						    def __len__(self): | 
					
					
						
						| 
							 | 
						        return self.config.num_train_timesteps | 
					
					
						
						| 
							 | 
						
 |