Datasets:

ArXiv:
diffusers-bot commited on
Commit
c86cbfb
1 Parent(s): cb94276

Upload folder using huggingface_hub

Browse files
main/pipeline_stable_diffusion_3_differential_img2img.py ADDED
@@ -0,0 +1,981 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 Stability AI and The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Callable, Dict, List, Optional, Union
17
+
18
+ import torch
19
+ from transformers import (
20
+ CLIPTextModelWithProjection,
21
+ CLIPTokenizer,
22
+ T5EncoderModel,
23
+ T5TokenizerFast,
24
+ )
25
+
26
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
27
+ from diffusers.models.autoencoders import AutoencoderKL
28
+ from diffusers.models.transformers import SD3Transformer2DModel
29
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
30
+ from diffusers.pipelines.stable_diffusion_3.pipeline_output import StableDiffusion3PipelineOutput
31
+ from diffusers.schedulers import FlowMatchEulerDiscreteScheduler
32
+ from diffusers.utils import (
33
+ is_torch_xla_available,
34
+ logging,
35
+ replace_example_docstring,
36
+ )
37
+ from diffusers.utils.torch_utils import randn_tensor
38
+
39
+
40
+ if is_torch_xla_available():
41
+ import torch_xla.core.xla_model as xm
42
+
43
+ XLA_AVAILABLE = True
44
+ else:
45
+ XLA_AVAILABLE = False
46
+
47
+
48
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
49
+
50
+ EXAMPLE_DOC_STRING = """
51
+ Examples:
52
+ ```py
53
+ >>> import torch
54
+
55
+ >>> from diffusers import AutoPipelineForImage2Image
56
+ >>> from diffusers.utils import load_image
57
+
58
+ >>> device = "cuda"
59
+ >>> model_id_or_path = "stabilityai/stable-diffusion-3-medium-diffusers"
60
+ >>> pipe = AutoPipelineForImage2Image.from_pretrained(model_id_or_path, torch_dtype=torch.float16)
61
+ >>> pipe = pipe.to(device)
62
+
63
+ >>> url = "https://raw.githubusercontent.com/CompVis/stable-diffusion/main/assets/stable-samples/img2img/sketch-mountains-input.jpg"
64
+ >>> init_image = load_image(url).resize((512, 512))
65
+
66
+ >>> prompt = "cat wizard, gandalf, lord of the rings, detailed, fantasy, cute, adorable, Pixar, Disney, 8k"
67
+
68
+ >>> images = pipe(prompt=prompt, image=init_image, strength=0.95, guidance_scale=7.5).images[0]
69
+ ```
70
+ """
71
+
72
+
73
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
74
+ def retrieve_latents(
75
+ encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
76
+ ):
77
+ if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
78
+ return encoder_output.latent_dist.sample(generator)
79
+ elif hasattr(encoder_output, "latent_dist") and sample_mode == "argmax":
80
+ return encoder_output.latent_dist.mode()
81
+ elif hasattr(encoder_output, "latents"):
82
+ return encoder_output.latents
83
+ else:
84
+ raise AttributeError("Could not access latents of provided encoder_output")
85
+
86
+
87
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.retrieve_timesteps
88
+ def retrieve_timesteps(
89
+ scheduler,
90
+ num_inference_steps: Optional[int] = None,
91
+ device: Optional[Union[str, torch.device]] = None,
92
+ timesteps: Optional[List[int]] = None,
93
+ sigmas: Optional[List[float]] = None,
94
+ **kwargs,
95
+ ):
96
+ """
97
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
98
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
99
+
100
+ Args:
101
+ scheduler (`SchedulerMixin`):
102
+ The scheduler to get timesteps from.
103
+ num_inference_steps (`int`):
104
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
105
+ must be `None`.
106
+ device (`str` or `torch.device`, *optional*):
107
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
108
+ timesteps (`List[int]`, *optional*):
109
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
110
+ `num_inference_steps` and `sigmas` must be `None`.
111
+ sigmas (`List[float]`, *optional*):
112
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
113
+ `num_inference_steps` and `timesteps` must be `None`.
114
+
115
+ Returns:
116
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
117
+ second element is the number of inference steps.
118
+ """
119
+ if timesteps is not None and sigmas is not None:
120
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
121
+ if timesteps is not None:
122
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
123
+ if not accepts_timesteps:
124
+ raise ValueError(
125
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
126
+ f" timestep schedules. Please check whether you are using the correct scheduler."
127
+ )
128
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
129
+ timesteps = scheduler.timesteps
130
+ num_inference_steps = len(timesteps)
131
+ elif sigmas is not None:
132
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
133
+ if not accept_sigmas:
134
+ raise ValueError(
135
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
136
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
137
+ )
138
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
139
+ timesteps = scheduler.timesteps
140
+ num_inference_steps = len(timesteps)
141
+ else:
142
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
143
+ timesteps = scheduler.timesteps
144
+ return timesteps, num_inference_steps
145
+
146
+
147
+ class StableDiffusion3DifferentialImg2ImgPipeline(DiffusionPipeline):
148
+ r"""
149
+ Args:
150
+ transformer ([`SD3Transformer2DModel`]):
151
+ Conditional Transformer (MMDiT) architecture to denoise the encoded image latents.
152
+ scheduler ([`FlowMatchEulerDiscreteScheduler`]):
153
+ A scheduler to be used in combination with `transformer` to denoise the encoded image latents.
154
+ vae ([`AutoencoderKL`]):
155
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
156
+ text_encoder ([`CLIPTextModelWithProjection`]):
157
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
158
+ specifically the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant,
159
+ with an additional added projection layer that is initialized with a diagonal matrix with the `hidden_size`
160
+ as its dimension.
161
+ text_encoder_2 ([`CLIPTextModelWithProjection`]):
162
+ [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModelWithProjection),
163
+ specifically the
164
+ [laion/CLIP-ViT-bigG-14-laion2B-39B-b160k](https://huggingface.co/laion/CLIP-ViT-bigG-14-laion2B-39B-b160k)
165
+ variant.
166
+ text_encoder_3 ([`T5EncoderModel`]):
167
+ Frozen text-encoder. Stable Diffusion 3 uses
168
+ [T5](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5EncoderModel), specifically the
169
+ [t5-v1_1-xxl](https://huggingface.co/google/t5-v1_1-xxl) variant.
170
+ tokenizer (`CLIPTokenizer`):
171
+ Tokenizer of class
172
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
173
+ tokenizer_2 (`CLIPTokenizer`):
174
+ Second Tokenizer of class
175
+ [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer).
176
+ tokenizer_3 (`T5TokenizerFast`):
177
+ Tokenizer of class
178
+ [T5Tokenizer](https://huggingface.co/docs/transformers/model_doc/t5#transformers.T5Tokenizer).
179
+ """
180
+
181
+ model_cpu_offload_seq = "text_encoder->text_encoder_2->text_encoder_3->transformer->vae"
182
+ _optional_components = []
183
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds", "negative_pooled_prompt_embeds"]
184
+
185
+ def __init__(
186
+ self,
187
+ transformer: SD3Transformer2DModel,
188
+ scheduler: FlowMatchEulerDiscreteScheduler,
189
+ vae: AutoencoderKL,
190
+ text_encoder: CLIPTextModelWithProjection,
191
+ tokenizer: CLIPTokenizer,
192
+ text_encoder_2: CLIPTextModelWithProjection,
193
+ tokenizer_2: CLIPTokenizer,
194
+ text_encoder_3: T5EncoderModel,
195
+ tokenizer_3: T5TokenizerFast,
196
+ ):
197
+ super().__init__()
198
+
199
+ self.register_modules(
200
+ vae=vae,
201
+ text_encoder=text_encoder,
202
+ text_encoder_2=text_encoder_2,
203
+ text_encoder_3=text_encoder_3,
204
+ tokenizer=tokenizer,
205
+ tokenizer_2=tokenizer_2,
206
+ tokenizer_3=tokenizer_3,
207
+ transformer=transformer,
208
+ scheduler=scheduler,
209
+ )
210
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
211
+ self.image_processor = VaeImageProcessor(
212
+ vae_scale_factor=self.vae_scale_factor, vae_latent_channels=self.vae.config.latent_channels
213
+ )
214
+ self.mask_processor = VaeImageProcessor(
215
+ vae_scale_factor=self.vae_scale_factor, do_normalize=False, do_convert_grayscale=True
216
+ )
217
+
218
+ self.tokenizer_max_length = self.tokenizer.model_max_length
219
+ self.default_sample_size = self.transformer.config.sample_size
220
+
221
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_t5_prompt_embeds
222
+ def _get_t5_prompt_embeds(
223
+ self,
224
+ prompt: Union[str, List[str]] = None,
225
+ num_images_per_prompt: int = 1,
226
+ max_sequence_length: int = 256,
227
+ device: Optional[torch.device] = None,
228
+ dtype: Optional[torch.dtype] = None,
229
+ ):
230
+ device = device or self._execution_device
231
+ dtype = dtype or self.text_encoder.dtype
232
+
233
+ prompt = [prompt] if isinstance(prompt, str) else prompt
234
+ batch_size = len(prompt)
235
+
236
+ if self.text_encoder_3 is None:
237
+ return torch.zeros(
238
+ (
239
+ batch_size * num_images_per_prompt,
240
+ self.tokenizer_max_length,
241
+ self.transformer.config.joint_attention_dim,
242
+ ),
243
+ device=device,
244
+ dtype=dtype,
245
+ )
246
+
247
+ text_inputs = self.tokenizer_3(
248
+ prompt,
249
+ padding="max_length",
250
+ max_length=max_sequence_length,
251
+ truncation=True,
252
+ add_special_tokens=True,
253
+ return_tensors="pt",
254
+ )
255
+ text_input_ids = text_inputs.input_ids
256
+ untruncated_ids = self.tokenizer_3(prompt, padding="longest", return_tensors="pt").input_ids
257
+
258
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
259
+ removed_text = self.tokenizer_3.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
260
+ logger.warning(
261
+ "The following part of your input was truncated because `max_sequence_length` is set to "
262
+ f" {max_sequence_length} tokens: {removed_text}"
263
+ )
264
+
265
+ prompt_embeds = self.text_encoder_3(text_input_ids.to(device))[0]
266
+
267
+ dtype = self.text_encoder_3.dtype
268
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
269
+
270
+ _, seq_len, _ = prompt_embeds.shape
271
+
272
+ # duplicate text embeddings and attention mask for each generation per prompt, using mps friendly method
273
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
274
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
275
+
276
+ return prompt_embeds
277
+
278
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline._get_clip_prompt_embeds
279
+ def _get_clip_prompt_embeds(
280
+ self,
281
+ prompt: Union[str, List[str]],
282
+ num_images_per_prompt: int = 1,
283
+ device: Optional[torch.device] = None,
284
+ clip_skip: Optional[int] = None,
285
+ clip_model_index: int = 0,
286
+ ):
287
+ device = device or self._execution_device
288
+
289
+ clip_tokenizers = [self.tokenizer, self.tokenizer_2]
290
+ clip_text_encoders = [self.text_encoder, self.text_encoder_2]
291
+
292
+ tokenizer = clip_tokenizers[clip_model_index]
293
+ text_encoder = clip_text_encoders[clip_model_index]
294
+
295
+ prompt = [prompt] if isinstance(prompt, str) else prompt
296
+ batch_size = len(prompt)
297
+
298
+ text_inputs = tokenizer(
299
+ prompt,
300
+ padding="max_length",
301
+ max_length=self.tokenizer_max_length,
302
+ truncation=True,
303
+ return_tensors="pt",
304
+ )
305
+
306
+ text_input_ids = text_inputs.input_ids
307
+ untruncated_ids = tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
308
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(text_input_ids, untruncated_ids):
309
+ removed_text = tokenizer.batch_decode(untruncated_ids[:, self.tokenizer_max_length - 1 : -1])
310
+ logger.warning(
311
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
312
+ f" {self.tokenizer_max_length} tokens: {removed_text}"
313
+ )
314
+ prompt_embeds = text_encoder(text_input_ids.to(device), output_hidden_states=True)
315
+ pooled_prompt_embeds = prompt_embeds[0]
316
+
317
+ if clip_skip is None:
318
+ prompt_embeds = prompt_embeds.hidden_states[-2]
319
+ else:
320
+ prompt_embeds = prompt_embeds.hidden_states[-(clip_skip + 2)]
321
+
322
+ prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
323
+
324
+ _, seq_len, _ = prompt_embeds.shape
325
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
326
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
327
+ prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
328
+
329
+ pooled_prompt_embeds = pooled_prompt_embeds.repeat(1, num_images_per_prompt, 1)
330
+ pooled_prompt_embeds = pooled_prompt_embeds.view(batch_size * num_images_per_prompt, -1)
331
+
332
+ return prompt_embeds, pooled_prompt_embeds
333
+
334
+ # Copied from diffusers.pipelines.stable_diffusion_3.pipeline_stable_diffusion_3.StableDiffusion3Pipeline.encode_prompt
335
+ def encode_prompt(
336
+ self,
337
+ prompt: Union[str, List[str]],
338
+ prompt_2: Union[str, List[str]],
339
+ prompt_3: Union[str, List[str]],
340
+ device: Optional[torch.device] = None,
341
+ num_images_per_prompt: int = 1,
342
+ do_classifier_free_guidance: bool = True,
343
+ negative_prompt: Optional[Union[str, List[str]]] = None,
344
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
345
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
346
+ prompt_embeds: Optional[torch.FloatTensor] = None,
347
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
348
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
349
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
350
+ clip_skip: Optional[int] = None,
351
+ max_sequence_length: int = 256,
352
+ ):
353
+ r"""
354
+
355
+ Args:
356
+ prompt (`str` or `List[str]`, *optional*):
357
+ prompt to be encoded
358
+ prompt_2 (`str` or `List[str]`, *optional*):
359
+ The prompt or prompts to be sent to the `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
360
+ used in all text-encoders
361
+ prompt_3 (`str` or `List[str]`, *optional*):
362
+ The prompt or prompts to be sent to the `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
363
+ used in all text-encoders
364
+ device: (`torch.device`):
365
+ torch device
366
+ num_images_per_prompt (`int`):
367
+ number of images that should be generated per prompt
368
+ do_classifier_free_guidance (`bool`):
369
+ whether to use classifier free guidance or not
370
+ negative_prompt (`str` or `List[str]`, *optional*):
371
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
372
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
373
+ less than `1`).
374
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
375
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
376
+ `text_encoder_2`. If not defined, `negative_prompt` is used in all the text-encoders.
377
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
378
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
379
+ `text_encoder_3`. If not defined, `negative_prompt` is used in both text-encoders
380
+ prompt_embeds (`torch.FloatTensor`, *optional*):
381
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
382
+ provided, text embeddings will be generated from `prompt` input argument.
383
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
384
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
385
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
386
+ argument.
387
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
388
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
389
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
390
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
391
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
392
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
393
+ input argument.
394
+ clip_skip (`int`, *optional*):
395
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
396
+ the output of the pre-final layer will be used for computing the prompt embeddings.
397
+ """
398
+ device = device or self._execution_device
399
+
400
+ prompt = [prompt] if isinstance(prompt, str) else prompt
401
+ if prompt is not None:
402
+ batch_size = len(prompt)
403
+ else:
404
+ batch_size = prompt_embeds.shape[0]
405
+
406
+ if prompt_embeds is None:
407
+ prompt_2 = prompt_2 or prompt
408
+ prompt_2 = [prompt_2] if isinstance(prompt_2, str) else prompt_2
409
+
410
+ prompt_3 = prompt_3 or prompt
411
+ prompt_3 = [prompt_3] if isinstance(prompt_3, str) else prompt_3
412
+
413
+ prompt_embed, pooled_prompt_embed = self._get_clip_prompt_embeds(
414
+ prompt=prompt,
415
+ device=device,
416
+ num_images_per_prompt=num_images_per_prompt,
417
+ clip_skip=clip_skip,
418
+ clip_model_index=0,
419
+ )
420
+ prompt_2_embed, pooled_prompt_2_embed = self._get_clip_prompt_embeds(
421
+ prompt=prompt_2,
422
+ device=device,
423
+ num_images_per_prompt=num_images_per_prompt,
424
+ clip_skip=clip_skip,
425
+ clip_model_index=1,
426
+ )
427
+ clip_prompt_embeds = torch.cat([prompt_embed, prompt_2_embed], dim=-1)
428
+
429
+ t5_prompt_embed = self._get_t5_prompt_embeds(
430
+ prompt=prompt_3,
431
+ num_images_per_prompt=num_images_per_prompt,
432
+ max_sequence_length=max_sequence_length,
433
+ device=device,
434
+ )
435
+
436
+ clip_prompt_embeds = torch.nn.functional.pad(
437
+ clip_prompt_embeds, (0, t5_prompt_embed.shape[-1] - clip_prompt_embeds.shape[-1])
438
+ )
439
+
440
+ prompt_embeds = torch.cat([clip_prompt_embeds, t5_prompt_embed], dim=-2)
441
+ pooled_prompt_embeds = torch.cat([pooled_prompt_embed, pooled_prompt_2_embed], dim=-1)
442
+
443
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
444
+ negative_prompt = negative_prompt or ""
445
+ negative_prompt_2 = negative_prompt_2 or negative_prompt
446
+ negative_prompt_3 = negative_prompt_3 or negative_prompt
447
+
448
+ # normalize str to list
449
+ negative_prompt = batch_size * [negative_prompt] if isinstance(negative_prompt, str) else negative_prompt
450
+ negative_prompt_2 = (
451
+ batch_size * [negative_prompt_2] if isinstance(negative_prompt_2, str) else negative_prompt_2
452
+ )
453
+ negative_prompt_3 = (
454
+ batch_size * [negative_prompt_3] if isinstance(negative_prompt_3, str) else negative_prompt_3
455
+ )
456
+
457
+ if prompt is not None and type(prompt) is not type(negative_prompt):
458
+ raise TypeError(
459
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
460
+ f" {type(prompt)}."
461
+ )
462
+ elif batch_size != len(negative_prompt):
463
+ raise ValueError(
464
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
465
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
466
+ " the batch size of `prompt`."
467
+ )
468
+
469
+ negative_prompt_embed, negative_pooled_prompt_embed = self._get_clip_prompt_embeds(
470
+ negative_prompt,
471
+ device=device,
472
+ num_images_per_prompt=num_images_per_prompt,
473
+ clip_skip=None,
474
+ clip_model_index=0,
475
+ )
476
+ negative_prompt_2_embed, negative_pooled_prompt_2_embed = self._get_clip_prompt_embeds(
477
+ negative_prompt_2,
478
+ device=device,
479
+ num_images_per_prompt=num_images_per_prompt,
480
+ clip_skip=None,
481
+ clip_model_index=1,
482
+ )
483
+ negative_clip_prompt_embeds = torch.cat([negative_prompt_embed, negative_prompt_2_embed], dim=-1)
484
+
485
+ t5_negative_prompt_embed = self._get_t5_prompt_embeds(
486
+ prompt=negative_prompt_3,
487
+ num_images_per_prompt=num_images_per_prompt,
488
+ max_sequence_length=max_sequence_length,
489
+ device=device,
490
+ )
491
+
492
+ negative_clip_prompt_embeds = torch.nn.functional.pad(
493
+ negative_clip_prompt_embeds,
494
+ (0, t5_negative_prompt_embed.shape[-1] - negative_clip_prompt_embeds.shape[-1]),
495
+ )
496
+
497
+ negative_prompt_embeds = torch.cat([negative_clip_prompt_embeds, t5_negative_prompt_embed], dim=-2)
498
+ negative_pooled_prompt_embeds = torch.cat(
499
+ [negative_pooled_prompt_embed, negative_pooled_prompt_2_embed], dim=-1
500
+ )
501
+
502
+ return prompt_embeds, negative_prompt_embeds, pooled_prompt_embeds, negative_pooled_prompt_embeds
503
+
504
+ def check_inputs(
505
+ self,
506
+ prompt,
507
+ prompt_2,
508
+ prompt_3,
509
+ strength,
510
+ negative_prompt=None,
511
+ negative_prompt_2=None,
512
+ negative_prompt_3=None,
513
+ prompt_embeds=None,
514
+ negative_prompt_embeds=None,
515
+ pooled_prompt_embeds=None,
516
+ negative_pooled_prompt_embeds=None,
517
+ callback_on_step_end_tensor_inputs=None,
518
+ max_sequence_length=None,
519
+ ):
520
+ if strength < 0 or strength > 1:
521
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
522
+
523
+ if callback_on_step_end_tensor_inputs is not None and not all(
524
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
525
+ ):
526
+ raise ValueError(
527
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
528
+ )
529
+
530
+ if prompt is not None and prompt_embeds is not None:
531
+ raise ValueError(
532
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
533
+ " only forward one of the two."
534
+ )
535
+ elif prompt_2 is not None and prompt_embeds is not None:
536
+ raise ValueError(
537
+ f"Cannot forward both `prompt_2`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
538
+ " only forward one of the two."
539
+ )
540
+ elif prompt_3 is not None and prompt_embeds is not None:
541
+ raise ValueError(
542
+ f"Cannot forward both `prompt_3`: {prompt_2} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
543
+ " only forward one of the two."
544
+ )
545
+ elif prompt is None and prompt_embeds is None:
546
+ raise ValueError(
547
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
548
+ )
549
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
550
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
551
+ elif prompt_2 is not None and (not isinstance(prompt_2, str) and not isinstance(prompt_2, list)):
552
+ raise ValueError(f"`prompt_2` has to be of type `str` or `list` but is {type(prompt_2)}")
553
+ elif prompt_3 is not None and (not isinstance(prompt_3, str) and not isinstance(prompt_3, list)):
554
+ raise ValueError(f"`prompt_3` has to be of type `str` or `list` but is {type(prompt_3)}")
555
+
556
+ if negative_prompt is not None and negative_prompt_embeds is not None:
557
+ raise ValueError(
558
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
559
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
560
+ )
561
+ elif negative_prompt_2 is not None and negative_prompt_embeds is not None:
562
+ raise ValueError(
563
+ f"Cannot forward both `negative_prompt_2`: {negative_prompt_2} and `negative_prompt_embeds`:"
564
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
565
+ )
566
+ elif negative_prompt_3 is not None and negative_prompt_embeds is not None:
567
+ raise ValueError(
568
+ f"Cannot forward both `negative_prompt_3`: {negative_prompt_3} and `negative_prompt_embeds`:"
569
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
570
+ )
571
+
572
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
573
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
574
+ raise ValueError(
575
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
576
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
577
+ f" {negative_prompt_embeds.shape}."
578
+ )
579
+
580
+ if prompt_embeds is not None and pooled_prompt_embeds is None:
581
+ raise ValueError(
582
+ "If `prompt_embeds` are provided, `pooled_prompt_embeds` also have to be passed. Make sure to generate `pooled_prompt_embeds` from the same text encoder that was used to generate `prompt_embeds`."
583
+ )
584
+
585
+ if negative_prompt_embeds is not None and negative_pooled_prompt_embeds is None:
586
+ raise ValueError(
587
+ "If `negative_prompt_embeds` are provided, `negative_pooled_prompt_embeds` also have to be passed. Make sure to generate `negative_pooled_prompt_embeds` from the same text encoder that was used to generate `negative_prompt_embeds`."
588
+ )
589
+
590
+ if max_sequence_length is not None and max_sequence_length > 512:
591
+ raise ValueError(f"`max_sequence_length` cannot be greater than 512 but is {max_sequence_length}")
592
+
593
+ def get_timesteps(self, num_inference_steps, strength, device):
594
+ # get the original timestep using init_timestep
595
+ init_timestep = min(num_inference_steps * strength, num_inference_steps)
596
+
597
+ t_start = int(max(num_inference_steps - init_timestep, 0))
598
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
599
+
600
+ return timesteps, num_inference_steps - t_start
601
+
602
+ def prepare_latents(
603
+ self, batch_size, num_channels_latents, height, width, image, timestep, dtype, device, generator=None
604
+ ):
605
+ shape = (
606
+ batch_size,
607
+ num_channels_latents,
608
+ int(height) // self.vae_scale_factor,
609
+ int(width) // self.vae_scale_factor,
610
+ )
611
+
612
+ image = image.to(device=device, dtype=dtype)
613
+
614
+ if isinstance(generator, list) and len(generator) != batch_size:
615
+ raise ValueError(
616
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
617
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
618
+ )
619
+ elif isinstance(generator, list):
620
+ init_latents = [
621
+ retrieve_latents(self.vae.encode(image[i : i + 1]), generator=generator[i]) for i in range(batch_size)
622
+ ]
623
+ init_latents = torch.cat(init_latents, dim=0)
624
+ else:
625
+ init_latents = retrieve_latents(self.vae.encode(image), generator=generator)
626
+
627
+ init_latents = (init_latents - self.vae.config.shift_factor) * self.vae.config.scaling_factor
628
+
629
+ if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
630
+ # expand init_latents for batch_size
631
+ additional_image_per_prompt = batch_size // init_latents.shape[0]
632
+ init_latents = torch.cat([init_latents] * additional_image_per_prompt, dim=0)
633
+ elif batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] != 0:
634
+ raise ValueError(
635
+ f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
636
+ )
637
+ else:
638
+ init_latents = torch.cat([init_latents], dim=0)
639
+
640
+ shape = init_latents.shape
641
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
642
+
643
+ init_latents = self.scheduler.scale_noise(init_latents, timestep, noise)
644
+ latents = init_latents.to(device=device, dtype=dtype)
645
+
646
+ return latents
647
+
648
+ @property
649
+ def guidance_scale(self):
650
+ return self._guidance_scale
651
+
652
+ @property
653
+ def clip_skip(self):
654
+ return self._clip_skip
655
+
656
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
657
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
658
+ # corresponds to doing no classifier free guidance.
659
+ @property
660
+ def do_classifier_free_guidance(self):
661
+ return self._guidance_scale > 1
662
+
663
+ @property
664
+ def num_timesteps(self):
665
+ return self._num_timesteps
666
+
667
+ @property
668
+ def interrupt(self):
669
+ return self._interrupt
670
+
671
+ @torch.no_grad()
672
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
673
+ def __call__(
674
+ self,
675
+ prompt: Union[str, List[str]] = None,
676
+ prompt_2: Optional[Union[str, List[str]]] = None,
677
+ prompt_3: Optional[Union[str, List[str]]] = None,
678
+ height: Optional[int] = None,
679
+ width: Optional[int] = None,
680
+ image: PipelineImageInput = None,
681
+ strength: float = 0.6,
682
+ num_inference_steps: int = 50,
683
+ timesteps: List[int] = None,
684
+ guidance_scale: float = 7.0,
685
+ negative_prompt: Optional[Union[str, List[str]]] = None,
686
+ negative_prompt_2: Optional[Union[str, List[str]]] = None,
687
+ negative_prompt_3: Optional[Union[str, List[str]]] = None,
688
+ num_images_per_prompt: Optional[int] = 1,
689
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
690
+ latents: Optional[torch.FloatTensor] = None,
691
+ prompt_embeds: Optional[torch.FloatTensor] = None,
692
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
693
+ pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
694
+ negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
695
+ output_type: Optional[str] = "pil",
696
+ return_dict: bool = True,
697
+ clip_skip: Optional[int] = None,
698
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
699
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
700
+ max_sequence_length: int = 256,
701
+ map: PipelineImageInput = None,
702
+ ):
703
+ r"""
704
+ Function invoked when calling the pipeline for generation.
705
+
706
+ Args:
707
+ prompt (`str` or `List[str]`, *optional*):
708
+ The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
709
+ instead.
710
+ prompt_2 (`str` or `List[str]`, *optional*):
711
+ The prompt or prompts to be sent to `tokenizer_2` and `text_encoder_2`. If not defined, `prompt` is
712
+ will be used instead
713
+ prompt_3 (`str` or `List[str]`, *optional*):
714
+ The prompt or prompts to be sent to `tokenizer_3` and `text_encoder_3`. If not defined, `prompt` is
715
+ will be used instead
716
+ height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
717
+ The height in pixels of the generated image. This is set to 1024 by default for the best results.
718
+ width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
719
+ The width in pixels of the generated image. This is set to 1024 by default for the best results.
720
+ num_inference_steps (`int`, *optional*, defaults to 50):
721
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
722
+ expense of slower inference.
723
+ timesteps (`List[int]`, *optional*):
724
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
725
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
726
+ passed will be used. Must be in descending order.
727
+ guidance_scale (`float`, *optional*, defaults to 5.0):
728
+ Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
729
+ `guidance_scale` is defined as `w` of equation 2. of [Imagen
730
+ Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
731
+ 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
732
+ usually at the expense of lower image quality.
733
+ negative_prompt (`str` or `List[str]`, *optional*):
734
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
735
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
736
+ less than `1`).
737
+ negative_prompt_2 (`str` or `List[str]`, *optional*):
738
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_2` and
739
+ `text_encoder_2`. If not defined, `negative_prompt` is used instead
740
+ negative_prompt_3 (`str` or `List[str]`, *optional*):
741
+ The prompt or prompts not to guide the image generation to be sent to `tokenizer_3` and
742
+ `text_encoder_3`. If not defined, `negative_prompt` is used instead
743
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
744
+ The number of images to generate per prompt.
745
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
746
+ One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
747
+ to make generation deterministic.
748
+ latents (`torch.FloatTensor`, *optional*):
749
+ Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
750
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
751
+ tensor will ge generated by sampling using the supplied random `generator`.
752
+ prompt_embeds (`torch.FloatTensor`, *optional*):
753
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
754
+ provided, text embeddings will be generated from `prompt` input argument.
755
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
756
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
757
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
758
+ argument.
759
+ pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
760
+ Pre-generated pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting.
761
+ If not provided, pooled text embeddings will be generated from `prompt` input argument.
762
+ negative_pooled_prompt_embeds (`torch.FloatTensor`, *optional*):
763
+ Pre-generated negative pooled text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
764
+ weighting. If not provided, pooled negative_prompt_embeds will be generated from `negative_prompt`
765
+ input argument.
766
+ output_type (`str`, *optional*, defaults to `"pil"`):
767
+ The output format of the generate image. Choose between
768
+ [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
769
+ return_dict (`bool`, *optional*, defaults to `True`):
770
+ Whether or not to return a [`~pipelines.stable_diffusion_xl.StableDiffusionXLPipelineOutput`] instead
771
+ of a plain tuple.
772
+ callback_on_step_end (`Callable`, *optional*):
773
+ A function that calls at the end of each denoising steps during the inference. The function is called
774
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
775
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
776
+ `callback_on_step_end_tensor_inputs`.
777
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
778
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
779
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
780
+ `._callback_tensor_inputs` attribute of your pipeline class.
781
+ max_sequence_length (`int` defaults to 256): Maximum sequence length to use with the `prompt`.
782
+
783
+ Examples:
784
+
785
+ Returns:
786
+ [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] or `tuple`:
787
+ [`~pipelines.stable_diffusion_3.StableDiffusion3PipelineOutput`] if `return_dict` is True, otherwise a
788
+ `tuple`. When returning a tuple, the first element is a list with the generated images.
789
+ """
790
+
791
+ # 0. Default height and width
792
+ height = height or self.default_sample_size * self.vae_scale_factor
793
+ width = width or self.default_sample_size * self.vae_scale_factor
794
+
795
+ # 1. Check inputs. Raise error if not correct
796
+ self.check_inputs(
797
+ prompt,
798
+ prompt_2,
799
+ prompt_3,
800
+ strength,
801
+ negative_prompt=negative_prompt,
802
+ negative_prompt_2=negative_prompt_2,
803
+ negative_prompt_3=negative_prompt_3,
804
+ prompt_embeds=prompt_embeds,
805
+ negative_prompt_embeds=negative_prompt_embeds,
806
+ pooled_prompt_embeds=pooled_prompt_embeds,
807
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
808
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
809
+ max_sequence_length=max_sequence_length,
810
+ )
811
+
812
+ self._guidance_scale = guidance_scale
813
+ self._clip_skip = clip_skip
814
+ self._interrupt = False
815
+
816
+ # 2. Define call parameters
817
+ if prompt is not None and isinstance(prompt, str):
818
+ batch_size = 1
819
+ elif prompt is not None and isinstance(prompt, list):
820
+ batch_size = len(prompt)
821
+ else:
822
+ batch_size = prompt_embeds.shape[0]
823
+
824
+ device = self._execution_device
825
+
826
+ (
827
+ prompt_embeds,
828
+ negative_prompt_embeds,
829
+ pooled_prompt_embeds,
830
+ negative_pooled_prompt_embeds,
831
+ ) = self.encode_prompt(
832
+ prompt=prompt,
833
+ prompt_2=prompt_2,
834
+ prompt_3=prompt_3,
835
+ negative_prompt=negative_prompt,
836
+ negative_prompt_2=negative_prompt_2,
837
+ negative_prompt_3=negative_prompt_3,
838
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
839
+ prompt_embeds=prompt_embeds,
840
+ negative_prompt_embeds=negative_prompt_embeds,
841
+ pooled_prompt_embeds=pooled_prompt_embeds,
842
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds,
843
+ device=device,
844
+ clip_skip=self.clip_skip,
845
+ num_images_per_prompt=num_images_per_prompt,
846
+ max_sequence_length=max_sequence_length,
847
+ )
848
+
849
+ if self.do_classifier_free_guidance:
850
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds], dim=0)
851
+ pooled_prompt_embeds = torch.cat([negative_pooled_prompt_embeds, pooled_prompt_embeds], dim=0)
852
+
853
+ # 3. Preprocess image
854
+ init_image = self.image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
855
+
856
+ map = self.mask_processor.preprocess(
857
+ map, height=height // self.vae_scale_factor, width=width // self.vae_scale_factor
858
+ ).to(device)
859
+
860
+ # 4. Prepare timesteps
861
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
862
+
863
+ # begin diff diff change
864
+ total_time_steps = num_inference_steps
865
+ # end diff diff change
866
+
867
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
868
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
869
+
870
+ # 5. Prepare latent variables
871
+ num_channels_latents = self.transformer.config.in_channels
872
+ if latents is None:
873
+ latents = self.prepare_latents(
874
+ batch_size * num_images_per_prompt,
875
+ num_channels_latents,
876
+ height,
877
+ width,
878
+ init_image,
879
+ latent_timestep,
880
+ prompt_embeds.dtype,
881
+ device,
882
+ generator,
883
+ )
884
+
885
+ # 6. Denoising loop
886
+ num_warmup_steps = max(len(timesteps) - num_inference_steps * self.scheduler.order, 0)
887
+ self._num_timesteps = len(timesteps)
888
+
889
+ # preparations for diff diff
890
+ original_with_noise = self.prepare_latents(
891
+ batch_size * num_images_per_prompt,
892
+ num_channels_latents,
893
+ height,
894
+ width,
895
+ init_image,
896
+ timesteps,
897
+ prompt_embeds.dtype,
898
+ device,
899
+ generator,
900
+ )
901
+ thresholds = torch.arange(total_time_steps, dtype=map.dtype) / total_time_steps
902
+ thresholds = thresholds.unsqueeze(1).unsqueeze(1).to(device)
903
+ masks = map.squeeze() > thresholds
904
+ # end diff diff preparations
905
+
906
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
907
+ for i, t in enumerate(timesteps):
908
+ if self.interrupt:
909
+ continue
910
+
911
+ # diff diff
912
+ if i == 0:
913
+ latents = original_with_noise[:1]
914
+ else:
915
+ mask = masks[i].unsqueeze(0).to(latents.dtype)
916
+ mask = mask.unsqueeze(1) # fit shape
917
+ latents = original_with_noise[i] * mask + latents * (1 - mask)
918
+ # end diff diff
919
+
920
+ # expand the latents if we are doing classifier free guidance
921
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
922
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
923
+ timestep = t.expand(latent_model_input.shape[0])
924
+
925
+ noise_pred = self.transformer(
926
+ hidden_states=latent_model_input,
927
+ timestep=timestep,
928
+ encoder_hidden_states=prompt_embeds,
929
+ pooled_projections=pooled_prompt_embeds,
930
+ return_dict=False,
931
+ )[0]
932
+
933
+ # perform guidance
934
+ if self.do_classifier_free_guidance:
935
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
936
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
937
+
938
+ # compute the previous noisy sample x_t -> x_t-1
939
+ latents_dtype = latents.dtype
940
+ latents = self.scheduler.step(noise_pred, t, latents, return_dict=False)[0]
941
+
942
+ if latents.dtype != latents_dtype:
943
+ if torch.backends.mps.is_available():
944
+ # some platforms (eg. apple mps) misbehave due to a pytorch bug: https://github.com/pytorch/pytorch/pull/99272
945
+ latents = latents.to(latents_dtype)
946
+
947
+ if callback_on_step_end is not None:
948
+ callback_kwargs = {}
949
+ for k in callback_on_step_end_tensor_inputs:
950
+ callback_kwargs[k] = locals()[k]
951
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
952
+
953
+ latents = callback_outputs.pop("latents", latents)
954
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
955
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
956
+ negative_pooled_prompt_embeds = callback_outputs.pop(
957
+ "negative_pooled_prompt_embeds", negative_pooled_prompt_embeds
958
+ )
959
+
960
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
961
+ progress_bar.update()
962
+
963
+ if XLA_AVAILABLE:
964
+ xm.mark_step()
965
+
966
+ if output_type == "latent":
967
+ image = latents
968
+
969
+ else:
970
+ latents = (latents / self.vae.config.scaling_factor) + self.vae.config.shift_factor
971
+
972
+ image = self.vae.decode(latents, return_dict=False)[0]
973
+ image = self.image_processor.postprocess(image, output_type=output_type)
974
+
975
+ # Offload all models
976
+ self.maybe_free_model_hooks()
977
+
978
+ if not return_dict:
979
+ return (image,)
980
+
981
+ return StableDiffusion3PipelineOutput(images=image)