Datasets:

ArXiv:
diffusers-benchmarking-bot commited on
Commit
2f798b6
·
verified ·
1 Parent(s): ee7e9f5

Upload folder using huggingface_hub

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. main/README_community_scripts.md +2 -2
  2. main/adaptive_mask_inpainting.py +1 -1
  3. main/bit_diffusion.py +2 -2
  4. main/clip_guided_images_mixing_stable_diffusion.py +4 -4
  5. main/clip_guided_stable_diffusion.py +2 -2
  6. main/clip_guided_stable_diffusion_img2img.py +2 -2
  7. main/composable_stable_diffusion.py +3 -3
  8. main/ddim_noise_comparative_analysis.py +1 -1
  9. main/dps_pipeline.py +1 -1
  10. main/edict_pipeline.py +5 -7
  11. main/fresco_v2v.py +2 -2
  12. main/gluegen.py +1 -1
  13. main/hd_painter.py +2 -2
  14. main/iadb.py +1 -1
  15. main/imagic_stable_diffusion.py +3 -3
  16. main/img2img_inpainting.py +2 -2
  17. main/instaflow_one_step.py +1 -1
  18. main/interpolate_stable_diffusion.py +4 -4
  19. main/ip_adapter_face_id.py +1 -1
  20. main/latent_consistency_img2img.py +1 -1
  21. main/latent_consistency_interpolate.py +1 -1
  22. main/latent_consistency_txt2img.py +1 -1
  23. main/llm_grounded_diffusion.py +1 -1
  24. main/lpw_stable_diffusion.py +4 -4
  25. main/lpw_stable_diffusion_onnx.py +8 -8
  26. main/lpw_stable_diffusion_xl.py +20 -20
  27. main/masked_stable_diffusion_img2img.py +1 -1
  28. main/masked_stable_diffusion_xl_img2img.py +1 -1
  29. main/matryoshka.py +19 -19
  30. main/mixture_tiling_sdxl.py +4 -4
  31. main/mod_controlnet_tile_sr_sdxl.py +5 -5
  32. main/multilingual_stable_diffusion.py +2 -2
  33. main/pipeline_animatediff_controlnet.py +1 -1
  34. main/pipeline_animatediff_img2video.py +2 -2
  35. main/pipeline_animatediff_ipex.py +2 -2
  36. main/pipeline_controlnet_xl_kolors.py +2 -2
  37. main/pipeline_controlnet_xl_kolors_img2img.py +2 -2
  38. main/pipeline_controlnet_xl_kolors_inpaint.py +2 -2
  39. main/pipeline_demofusion_sdxl.py +4 -4
  40. main/pipeline_fabric.py +1 -1
  41. main/pipeline_faithdiff_stable_diffusion_xl.py +14 -14
  42. main/pipeline_flux_differential_img2img.py +2 -2
  43. main/pipeline_flux_kontext_multiple_images.py +2 -2
  44. main/pipeline_flux_rf_inversion.py +1 -1
  45. main/pipeline_flux_semantic_guidance.py +1 -1
  46. main/pipeline_flux_with_cfg.py +1 -1
  47. main/pipeline_hunyuandit_differential_img2img.py +3 -3
  48. main/pipeline_kolors_differential_img2img.py +2 -2
  49. main/pipeline_kolors_inpainting.py +2 -2
  50. main/pipeline_prompt2prompt.py +1 -1
main/README_community_scripts.md CHANGED
@@ -260,7 +260,7 @@ class SDPromptSchedulingCallback(PipelineCallback):
260
 
261
  def callback_fn(
262
  self, pipeline, step_index, timestep, callback_kwargs
263
- ) -> Dict[str, Any]:
264
  cutoff_step_ratio = self.config.cutoff_step_ratio
265
  cutoff_step_index = self.config.cutoff_step_index
266
  if isinstance(self.config.encoded_prompt, tuple):
@@ -343,7 +343,7 @@ class SDXLPromptSchedulingCallback(PipelineCallback):
343
 
344
  def callback_fn(
345
  self, pipeline, step_index, timestep, callback_kwargs
346
- ) -> Dict[str, Any]:
347
  cutoff_step_ratio = self.config.cutoff_step_ratio
348
  cutoff_step_index = self.config.cutoff_step_index
349
  if isinstance(self.config.encoded_prompt, tuple):
 
260
 
261
  def callback_fn(
262
  self, pipeline, step_index, timestep, callback_kwargs
263
+ ) -> dict[str, Any]:
264
  cutoff_step_ratio = self.config.cutoff_step_ratio
265
  cutoff_step_index = self.config.cutoff_step_index
266
  if isinstance(self.config.encoded_prompt, tuple):
 
343
 
344
  def callback_fn(
345
  self, pipeline, step_index, timestep, callback_kwargs
346
+ ) -> dict[str, Any]:
347
  cutoff_step_ratio = self.config.cutoff_step_ratio
348
  cutoff_step_index = self.config.cutoff_step_index
349
  if isinstance(self.config.encoded_prompt, tuple):
main/adaptive_mask_inpainting.py CHANGED
@@ -871,7 +871,7 @@ class AdaptiveMaskInpaintPipeline(
871
  latents: Optional[torch.FloatTensor] = None,
872
  prompt_embeds: Optional[torch.FloatTensor] = None,
873
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
874
- output_type: Optional[str] = "pil",
875
  return_dict: bool = True,
876
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
877
  callback_steps: int = 1,
 
871
  latents: Optional[torch.FloatTensor] = None,
872
  prompt_embeds: Optional[torch.FloatTensor] = None,
873
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
874
+ output_type: str | None = "pil",
875
  return_dict: bool = True,
876
  callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
877
  callback_steps: int = 1,
main/bit_diffusion.py CHANGED
@@ -231,9 +231,9 @@ class BitDiffusion(DiffusionPipeline):
231
  height: Optional[int] = 256,
232
  width: Optional[int] = 256,
233
  num_inference_steps: Optional[int] = 50,
234
- generator: Optional[torch.Generator] = None,
235
  batch_size: Optional[int] = 1,
236
- output_type: Optional[str] = "pil",
237
  return_dict: bool = True,
238
  **kwargs,
239
  ) -> Union[Tuple, ImagePipelineOutput]:
 
231
  height: Optional[int] = 256,
232
  width: Optional[int] = 256,
233
  num_inference_steps: Optional[int] = 50,
234
+ generator: torch.Generator | None = None,
235
  batch_size: Optional[int] = 1,
236
+ output_type: str | None = "pil",
237
  return_dict: bool = True,
238
  **kwargs,
239
  ) -> Union[Tuple, ImagePipelineOutput]:
main/clip_guided_images_mixing_stable_diffusion.py CHANGED
@@ -235,8 +235,8 @@ class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMi
235
  self,
236
  style_image: Union[torch.Tensor, PIL.Image.Image],
237
  content_image: Union[torch.Tensor, PIL.Image.Image],
238
- style_prompt: Optional[str] = None,
239
- content_prompt: Optional[str] = None,
240
  height: Optional[int] = 512,
241
  width: Optional[int] = 512,
242
  noise_strength: float = 0.6,
@@ -245,8 +245,8 @@ class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMi
245
  batch_size: Optional[int] = 1,
246
  eta: float = 0.0,
247
  clip_guidance_scale: Optional[float] = 100,
248
- generator: Optional[torch.Generator] = None,
249
- output_type: Optional[str] = "pil",
250
  return_dict: bool = True,
251
  slerp_latent_style_strength: float = 0.8,
252
  slerp_prompt_style_strength: float = 0.1,
 
235
  self,
236
  style_image: Union[torch.Tensor, PIL.Image.Image],
237
  content_image: Union[torch.Tensor, PIL.Image.Image],
238
+ style_prompt: str | None = None,
239
+ content_prompt: str | None = None,
240
  height: Optional[int] = 512,
241
  width: Optional[int] = 512,
242
  noise_strength: float = 0.6,
 
245
  batch_size: Optional[int] = 1,
246
  eta: float = 0.0,
247
  clip_guidance_scale: Optional[float] = 100,
248
+ generator: torch.Generator | None = None,
249
+ output_type: str | None = "pil",
250
  return_dict: bool = True,
251
  slerp_latent_style_strength: float = 0.8,
252
  slerp_prompt_style_strength: float = 0.1,
main/clip_guided_stable_diffusion.py CHANGED
@@ -179,9 +179,9 @@ class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
179
  clip_prompt: Optional[Union[str, List[str]]] = None,
180
  num_cutouts: Optional[int] = 4,
181
  use_cutouts: Optional[bool] = True,
182
- generator: Optional[torch.Generator] = None,
183
  latents: Optional[torch.Tensor] = None,
184
- output_type: Optional[str] = "pil",
185
  return_dict: bool = True,
186
  ):
187
  if isinstance(prompt, str):
 
179
  clip_prompt: Optional[Union[str, List[str]]] = None,
180
  num_cutouts: Optional[int] = 4,
181
  use_cutouts: Optional[bool] = True,
182
+ generator: torch.Generator | None = None,
183
  latents: Optional[torch.Tensor] = None,
184
+ output_type: str | None = "pil",
185
  return_dict: bool = True,
186
  ):
187
  if isinstance(prompt, str):
main/clip_guided_stable_diffusion_img2img.py CHANGED
@@ -316,9 +316,9 @@ class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
316
  clip_prompt: Optional[Union[str, List[str]]] = None,
317
  num_cutouts: Optional[int] = 4,
318
  use_cutouts: Optional[bool] = True,
319
- generator: Optional[torch.Generator] = None,
320
  latents: Optional[torch.Tensor] = None,
321
- output_type: Optional[str] = "pil",
322
  return_dict: bool = True,
323
  ):
324
  if isinstance(prompt, str):
 
316
  clip_prompt: Optional[Union[str, List[str]]] = None,
317
  num_cutouts: Optional[int] = 4,
318
  use_cutouts: Optional[bool] = True,
319
+ generator: torch.Generator | None = None,
320
  latents: Optional[torch.Tensor] = None,
321
+ output_type: str | None = "pil",
322
  return_dict: bool = True,
323
  ):
324
  if isinstance(prompt, str):
main/composable_stable_diffusion.py CHANGED
@@ -357,13 +357,13 @@ class ComposableStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin)
357
  negative_prompt: Optional[Union[str, List[str]]] = None,
358
  num_images_per_prompt: Optional[int] = 1,
359
  eta: float = 0.0,
360
- generator: Optional[torch.Generator] = None,
361
  latents: Optional[torch.Tensor] = None,
362
- output_type: Optional[str] = "pil",
363
  return_dict: bool = True,
364
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
365
  callback_steps: int = 1,
366
- weights: Optional[str] = "",
367
  ):
368
  r"""
369
  Function invoked when calling the pipeline for generation.
 
357
  negative_prompt: Optional[Union[str, List[str]]] = None,
358
  num_images_per_prompt: Optional[int] = 1,
359
  eta: float = 0.0,
360
+ generator: torch.Generator | None = None,
361
  latents: Optional[torch.Tensor] = None,
362
+ output_type: str | None = "pil",
363
  return_dict: bool = True,
364
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
365
  callback_steps: int = 1,
366
+ weights: str | None = "",
367
  ):
368
  r"""
369
  Function invoked when calling the pipeline for generation.
main/ddim_noise_comparative_analysis.py CHANGED
@@ -110,7 +110,7 @@ class DDIMNoiseComparativeAnalysisPipeline(DiffusionPipeline):
110
  eta: float = 0.0,
111
  num_inference_steps: int = 50,
112
  use_clipped_model_output: Optional[bool] = None,
113
- output_type: Optional[str] = "pil",
114
  return_dict: bool = True,
115
  ) -> Union[ImagePipelineOutput, Tuple]:
116
  r"""
 
110
  eta: float = 0.0,
111
  num_inference_steps: int = 50,
112
  use_clipped_model_output: Optional[bool] = None,
113
+ output_type: str | None = "pil",
114
  return_dict: bool = True,
115
  ) -> Union[ImagePipelineOutput, Tuple]:
116
  r"""
main/dps_pipeline.py CHANGED
@@ -54,7 +54,7 @@ class DPSPipeline(DiffusionPipeline):
54
  batch_size: int = 1,
55
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
56
  num_inference_steps: int = 1000,
57
- output_type: Optional[str] = "pil",
58
  return_dict: bool = True,
59
  zeta: float = 0.3,
60
  ) -> Union[ImagePipelineOutput, Tuple]:
 
54
  batch_size: int = 1,
55
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
56
  num_inference_steps: int = 1000,
57
+ output_type: str | None = "pil",
58
  return_dict: bool = True,
59
  zeta: float = 0.3,
60
  ) -> Union[ImagePipelineOutput, Tuple]:
main/edict_pipeline.py CHANGED
@@ -1,5 +1,3 @@
1
- from typing import Optional
2
-
3
  import torch
4
  from PIL import Image
5
  from tqdm.auto import tqdm
@@ -39,7 +37,7 @@ class EDICTPipeline(DiffusionPipeline):
39
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
40
 
41
  def _encode_prompt(
42
- self, prompt: str, negative_prompt: Optional[str] = None, do_classifier_free_guidance: bool = False
43
  ):
44
  text_inputs = self.tokenizer(
45
  prompt,
@@ -141,7 +139,7 @@ class EDICTPipeline(DiffusionPipeline):
141
  text_embeds: torch.Tensor,
142
  timesteps: torch.Tensor,
143
  guidance_scale: float,
144
- generator: Optional[torch.Generator] = None,
145
  ):
146
  do_classifier_free_guidance = guidance_scale > 1.0
147
 
@@ -194,9 +192,9 @@ class EDICTPipeline(DiffusionPipeline):
194
  guidance_scale: float = 3.0,
195
  num_inference_steps: int = 50,
196
  strength: float = 0.8,
197
- negative_prompt: Optional[str] = None,
198
- generator: Optional[torch.Generator] = None,
199
- output_type: Optional[str] = "pil",
200
  ):
201
  do_classifier_free_guidance = guidance_scale > 1.0
202
 
 
 
 
1
  import torch
2
  from PIL import Image
3
  from tqdm.auto import tqdm
 
37
  self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
38
 
39
  def _encode_prompt(
40
+ self, prompt: str, negative_prompt: str | None = None, do_classifier_free_guidance: bool = False
41
  ):
42
  text_inputs = self.tokenizer(
43
  prompt,
 
139
  text_embeds: torch.Tensor,
140
  timesteps: torch.Tensor,
141
  guidance_scale: float,
142
+ generator: torch.Generator | None = None,
143
  ):
144
  do_classifier_free_guidance = guidance_scale > 1.0
145
 
 
192
  guidance_scale: float = 3.0,
193
  num_inference_steps: int = 50,
194
  strength: float = 0.8,
195
+ negative_prompt: str | None = None,
196
+ generator: torch.Generator | None = None,
197
+ output_type: str | None = "pil",
198
  ):
199
  do_classifier_free_guidance = guidance_scale > 1.0
200
 
main/fresco_v2v.py CHANGED
@@ -1208,7 +1208,7 @@ def apply_FRESCO_attn(pipe):
1208
 
1209
 
1210
  def retrieve_latents(
1211
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
1212
  ):
1213
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
1214
  return encoder_output.latent_dist.sample(generator)
@@ -2064,7 +2064,7 @@ class FrescoV2VPipeline(StableDiffusionControlNetImg2ImgPipeline):
2064
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
2065
  ip_adapter_image: Optional[PipelineImageInput] = None,
2066
  ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
2067
- output_type: Optional[str] = "pil",
2068
  return_dict: bool = True,
2069
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
2070
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
 
1208
 
1209
 
1210
  def retrieve_latents(
1211
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
1212
  ):
1213
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
1214
  return encoder_output.latent_dist.sample(generator)
 
2064
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
2065
  ip_adapter_image: Optional[PipelineImageInput] = None,
2066
  ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
2067
+ output_type: str | None = "pil",
2068
  return_dict: bool = True,
2069
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
2070
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
main/gluegen.py CHANGED
@@ -597,7 +597,7 @@ class GlueGenStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin, St
597
  latents: Optional[torch.Tensor] = None,
598
  prompt_embeds: Optional[torch.Tensor] = None,
599
  negative_prompt_embeds: Optional[torch.Tensor] = None,
600
- output_type: Optional[str] = "pil",
601
  return_dict: bool = True,
602
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
603
  guidance_rescale: float = 0.0,
 
597
  latents: Optional[torch.Tensor] = None,
598
  prompt_embeds: Optional[torch.Tensor] = None,
599
  negative_prompt_embeds: Optional[torch.Tensor] = None,
600
+ output_type: str | None = "pil",
601
  return_dict: bool = True,
602
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
603
  guidance_rescale: float = 0.0,
main/hd_painter.py CHANGED
@@ -462,7 +462,7 @@ class StableDiffusionHDPainterPipeline(StableDiffusionInpaintPipeline):
462
  num_inference_steps: int = 50,
463
  timesteps: List[int] = None,
464
  guidance_scale: float = 7.5,
465
- positive_prompt: Optional[str] = "",
466
  negative_prompt: Optional[Union[str, List[str]]] = None,
467
  num_images_per_prompt: Optional[int] = 1,
468
  eta: float = 0.01,
@@ -471,7 +471,7 @@ class StableDiffusionHDPainterPipeline(StableDiffusionInpaintPipeline):
471
  prompt_embeds: Optional[torch.Tensor] = None,
472
  negative_prompt_embeds: Optional[torch.Tensor] = None,
473
  ip_adapter_image: Optional[PipelineImageInput] = None,
474
- output_type: Optional[str] = "pil",
475
  return_dict: bool = True,
476
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
477
  clip_skip: int = None,
 
462
  num_inference_steps: int = 50,
463
  timesteps: List[int] = None,
464
  guidance_scale: float = 7.5,
465
+ positive_prompt: str | None = "",
466
  negative_prompt: Optional[Union[str, List[str]]] = None,
467
  num_images_per_prompt: Optional[int] = 1,
468
  eta: float = 0.01,
 
471
  prompt_embeds: Optional[torch.Tensor] = None,
472
  negative_prompt_embeds: Optional[torch.Tensor] = None,
473
  ip_adapter_image: Optional[PipelineImageInput] = None,
474
+ output_type: str | None = "pil",
475
  return_dict: bool = True,
476
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
477
  clip_skip: int = None,
main/iadb.py CHANGED
@@ -86,7 +86,7 @@ class IADBPipeline(DiffusionPipeline):
86
  batch_size: int = 1,
87
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
88
  num_inference_steps: int = 50,
89
- output_type: Optional[str] = "pil",
90
  return_dict: bool = True,
91
  ) -> Union[ImagePipelineOutput, Tuple]:
92
  r"""
 
86
  batch_size: int = 1,
87
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
88
  num_inference_steps: int = 50,
89
+ output_type: str | None = "pil",
90
  return_dict: bool = True,
91
  ) -> Union[ImagePipelineOutput, Tuple]:
92
  r"""
main/imagic_stable_diffusion.py CHANGED
@@ -113,7 +113,7 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
113
  image: Union[torch.Tensor, PIL.Image.Image],
114
  height: Optional[int] = 512,
115
  width: Optional[int] = 512,
116
- generator: Optional[torch.Generator] = None,
117
  embedding_learning_rate: float = 0.001,
118
  diffusion_model_learning_rate: float = 2e-6,
119
  text_embedding_optimization_steps: int = 500,
@@ -314,8 +314,8 @@ class ImagicStableDiffusionPipeline(DiffusionPipeline, StableDiffusionMixin):
314
  height: Optional[int] = 512,
315
  width: Optional[int] = 512,
316
  num_inference_steps: Optional[int] = 50,
317
- generator: Optional[torch.Generator] = None,
318
- output_type: Optional[str] = "pil",
319
  return_dict: bool = True,
320
  guidance_scale: float = 7.5,
321
  eta: float = 0.0,
 
113
  image: Union[torch.Tensor, PIL.Image.Image],
114
  height: Optional[int] = 512,
115
  width: Optional[int] = 512,
116
+ generator: torch.Generator | None = None,
117
  embedding_learning_rate: float = 0.001,
118
  diffusion_model_learning_rate: float = 2e-6,
119
  text_embedding_optimization_steps: int = 500,
 
314
  height: Optional[int] = 512,
315
  width: Optional[int] = 512,
316
  num_inference_steps: Optional[int] = 50,
317
+ generator: torch.Generator | None = None,
318
+ output_type: str | None = "pil",
319
  return_dict: bool = True,
320
  guidance_scale: float = 7.5,
321
  eta: float = 0.0,
main/img2img_inpainting.py CHANGED
@@ -143,9 +143,9 @@ class ImageToImageInpaintingPipeline(DiffusionPipeline):
143
  negative_prompt: Optional[Union[str, List[str]]] = None,
144
  num_images_per_prompt: Optional[int] = 1,
145
  eta: float = 0.0,
146
- generator: Optional[torch.Generator] = None,
147
  latents: Optional[torch.Tensor] = None,
148
- output_type: Optional[str] = "pil",
149
  return_dict: bool = True,
150
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
151
  callback_steps: int = 1,
 
143
  negative_prompt: Optional[Union[str, List[str]]] = None,
144
  num_images_per_prompt: Optional[int] = 1,
145
  eta: float = 0.0,
146
+ generator: torch.Generator | None = None,
147
  latents: Optional[torch.Tensor] = None,
148
+ output_type: str | None = "pil",
149
  return_dict: bool = True,
150
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
151
  callback_steps: int = 1,
main/instaflow_one_step.py CHANGED
@@ -512,7 +512,7 @@ class InstaFlowPipeline(
512
  latents: Optional[torch.Tensor] = None,
513
  prompt_embeds: Optional[torch.Tensor] = None,
514
  negative_prompt_embeds: Optional[torch.Tensor] = None,
515
- output_type: Optional[str] = "pil",
516
  return_dict: bool = True,
517
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
518
  callback_steps: int = 1,
 
512
  latents: Optional[torch.Tensor] = None,
513
  prompt_embeds: Optional[torch.Tensor] = None,
514
  negative_prompt_embeds: Optional[torch.Tensor] = None,
515
+ output_type: str | None = "pil",
516
  return_dict: bool = True,
517
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
518
  callback_steps: int = 1,
main/interpolate_stable_diffusion.py CHANGED
@@ -131,9 +131,9 @@ class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin):
131
  negative_prompt: Optional[Union[str, List[str]]] = None,
132
  num_images_per_prompt: Optional[int] = 1,
133
  eta: float = 0.0,
134
- generator: Optional[torch.Generator] = None,
135
  latents: Optional[torch.Tensor] = None,
136
- output_type: Optional[str] = "pil",
137
  return_dict: bool = True,
138
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
139
  callback_steps: int = 1,
@@ -401,8 +401,8 @@ class StableDiffusionWalkPipeline(DiffusionPipeline, StableDiffusionMixin):
401
  prompts: List[str],
402
  seeds: List[int],
403
  num_interpolation_steps: Optional[int] = 6,
404
- output_dir: Optional[str] = "./dreams",
405
- name: Optional[str] = None,
406
  batch_size: Optional[int] = 1,
407
  height: Optional[int] = 512,
408
  width: Optional[int] = 512,
 
131
  negative_prompt: Optional[Union[str, List[str]]] = None,
132
  num_images_per_prompt: Optional[int] = 1,
133
  eta: float = 0.0,
134
+ generator: torch.Generator | None = None,
135
  latents: Optional[torch.Tensor] = None,
136
+ output_type: str | None = "pil",
137
  return_dict: bool = True,
138
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
139
  callback_steps: int = 1,
 
401
  prompts: List[str],
402
  seeds: List[int],
403
  num_interpolation_steps: Optional[int] = 6,
404
+ output_dir: str | None = "./dreams",
405
+ name: str | None = None,
406
  batch_size: Optional[int] = 1,
407
  height: Optional[int] = 512,
408
  width: Optional[int] = 512,
main/ip_adapter_face_id.py CHANGED
@@ -855,7 +855,7 @@ class IPAdapterFaceIDStableDiffusionPipeline(
855
  prompt_embeds: Optional[torch.Tensor] = None,
856
  negative_prompt_embeds: Optional[torch.Tensor] = None,
857
  image_embeds: Optional[torch.Tensor] = None,
858
- output_type: Optional[str] = "pil",
859
  return_dict: bool = True,
860
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
861
  guidance_rescale: float = 0.0,
 
855
  prompt_embeds: Optional[torch.Tensor] = None,
856
  negative_prompt_embeds: Optional[torch.Tensor] = None,
857
  image_embeds: Optional[torch.Tensor] = None,
858
+ output_type: str | None = "pil",
859
  return_dict: bool = True,
860
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
861
  guidance_rescale: float = 0.0,
main/latent_consistency_img2img.py CHANGED
@@ -286,7 +286,7 @@ class LatentConsistencyModelImg2ImgPipeline(DiffusionPipeline):
286
  num_inference_steps: int = 4,
287
  lcm_origin_steps: int = 50,
288
  prompt_embeds: Optional[torch.Tensor] = None,
289
- output_type: Optional[str] = "pil",
290
  return_dict: bool = True,
291
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
292
  ):
 
286
  num_inference_steps: int = 4,
287
  lcm_origin_steps: int = 50,
288
  prompt_embeds: Optional[torch.Tensor] = None,
289
+ output_type: str | None = "pil",
290
  return_dict: bool = True,
291
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
292
  ):
main/latent_consistency_interpolate.py CHANGED
@@ -669,7 +669,7 @@ class LatentConsistencyModelWalkPipeline(
669
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
670
  latents: Optional[torch.Tensor] = None,
671
  prompt_embeds: Optional[torch.Tensor] = None,
672
- output_type: Optional[str] = "pil",
673
  return_dict: bool = True,
674
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
675
  clip_skip: Optional[int] = None,
 
669
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
670
  latents: Optional[torch.Tensor] = None,
671
  prompt_embeds: Optional[torch.Tensor] = None,
672
+ output_type: str | None = "pil",
673
  return_dict: bool = True,
674
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
675
  clip_skip: Optional[int] = None,
main/latent_consistency_txt2img.py CHANGED
@@ -212,7 +212,7 @@ class LatentConsistencyModelPipeline(DiffusionPipeline):
212
  num_inference_steps: int = 4,
213
  lcm_origin_steps: int = 50,
214
  prompt_embeds: Optional[torch.Tensor] = None,
215
- output_type: Optional[str] = "pil",
216
  return_dict: bool = True,
217
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
218
  ):
 
212
  num_inference_steps: int = 4,
213
  lcm_origin_steps: int = 50,
214
  prompt_embeds: Optional[torch.Tensor] = None,
215
+ output_type: str | None = "pil",
216
  return_dict: bool = True,
217
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
218
  ):
main/llm_grounded_diffusion.py CHANGED
@@ -769,7 +769,7 @@ class LLMGroundedDiffusionPipeline(
769
  prompt_embeds: Optional[torch.Tensor] = None,
770
  negative_prompt_embeds: Optional[torch.Tensor] = None,
771
  ip_adapter_image: Optional[PipelineImageInput] = None,
772
- output_type: Optional[str] = "pil",
773
  return_dict: bool = True,
774
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
775
  callback_steps: int = 1,
 
769
  prompt_embeds: Optional[torch.Tensor] = None,
770
  negative_prompt_embeds: Optional[torch.Tensor] = None,
771
  ip_adapter_image: Optional[PipelineImageInput] = None,
772
+ output_type: str | None = "pil",
773
  return_dict: bool = True,
774
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
775
  callback_steps: int = 1,
main/lpw_stable_diffusion.py CHANGED
@@ -830,7 +830,7 @@ class StableDiffusionLongPromptWeightingPipeline(
830
  prompt_embeds: Optional[torch.Tensor] = None,
831
  negative_prompt_embeds: Optional[torch.Tensor] = None,
832
  max_embeddings_multiples: Optional[int] = 3,
833
- output_type: Optional[str] = "pil",
834
  return_dict: bool = True,
835
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
836
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
@@ -1091,7 +1091,7 @@ class StableDiffusionLongPromptWeightingPipeline(
1091
  prompt_embeds: Optional[torch.Tensor] = None,
1092
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1093
  max_embeddings_multiples: Optional[int] = 3,
1094
- output_type: Optional[str] = "pil",
1095
  return_dict: bool = True,
1096
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1097
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
@@ -1209,7 +1209,7 @@ class StableDiffusionLongPromptWeightingPipeline(
1209
  prompt_embeds: Optional[torch.Tensor] = None,
1210
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1211
  max_embeddings_multiples: Optional[int] = 3,
1212
- output_type: Optional[str] = "pil",
1213
  return_dict: bool = True,
1214
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1215
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
@@ -1323,7 +1323,7 @@ class StableDiffusionLongPromptWeightingPipeline(
1323
  prompt_embeds: Optional[torch.Tensor] = None,
1324
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1325
  max_embeddings_multiples: Optional[int] = 3,
1326
- output_type: Optional[str] = "pil",
1327
  return_dict: bool = True,
1328
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1329
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
 
830
  prompt_embeds: Optional[torch.Tensor] = None,
831
  negative_prompt_embeds: Optional[torch.Tensor] = None,
832
  max_embeddings_multiples: Optional[int] = 3,
833
+ output_type: str | None = "pil",
834
  return_dict: bool = True,
835
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
836
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
 
1091
  prompt_embeds: Optional[torch.Tensor] = None,
1092
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1093
  max_embeddings_multiples: Optional[int] = 3,
1094
+ output_type: str | None = "pil",
1095
  return_dict: bool = True,
1096
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1097
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
 
1209
  prompt_embeds: Optional[torch.Tensor] = None,
1210
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1211
  max_embeddings_multiples: Optional[int] = 3,
1212
+ output_type: str | None = "pil",
1213
  return_dict: bool = True,
1214
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1215
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
 
1323
  prompt_embeds: Optional[torch.Tensor] = None,
1324
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1325
  max_embeddings_multiples: Optional[int] = 3,
1326
+ output_type: str | None = "pil",
1327
  return_dict: bool = True,
1328
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
1329
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
main/lpw_stable_diffusion_onnx.py CHANGED
@@ -664,10 +664,10 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
664
  strength: float = 0.8,
665
  num_images_per_prompt: Optional[int] = 1,
666
  eta: float = 0.0,
667
- generator: Optional[torch.Generator] = None,
668
  latents: Optional[np.ndarray] = None,
669
  max_embeddings_multiples: Optional[int] = 3,
670
- output_type: Optional[str] = "pil",
671
  return_dict: bool = True,
672
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
673
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
@@ -877,10 +877,10 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
877
  guidance_scale: float = 7.5,
878
  num_images_per_prompt: Optional[int] = 1,
879
  eta: float = 0.0,
880
- generator: Optional[torch.Generator] = None,
881
  latents: Optional[np.ndarray] = None,
882
  max_embeddings_multiples: Optional[int] = 3,
883
- output_type: Optional[str] = "pil",
884
  return_dict: bool = True,
885
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
886
  callback_steps: int = 1,
@@ -969,9 +969,9 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
969
  guidance_scale: Optional[float] = 7.5,
970
  num_images_per_prompt: Optional[int] = 1,
971
  eta: Optional[float] = 0.0,
972
- generator: Optional[torch.Generator] = None,
973
  max_embeddings_multiples: Optional[int] = 3,
974
- output_type: Optional[str] = "pil",
975
  return_dict: bool = True,
976
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
977
  callback_steps: int = 1,
@@ -1061,9 +1061,9 @@ class OnnxStableDiffusionLongPromptWeightingPipeline(OnnxStableDiffusionPipeline
1061
  guidance_scale: Optional[float] = 7.5,
1062
  num_images_per_prompt: Optional[int] = 1,
1063
  eta: Optional[float] = 0.0,
1064
- generator: Optional[torch.Generator] = None,
1065
  max_embeddings_multiples: Optional[int] = 3,
1066
- output_type: Optional[str] = "pil",
1067
  return_dict: bool = True,
1068
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
1069
  callback_steps: int = 1,
 
664
  strength: float = 0.8,
665
  num_images_per_prompt: Optional[int] = 1,
666
  eta: float = 0.0,
667
+ generator: torch.Generator | None = None,
668
  latents: Optional[np.ndarray] = None,
669
  max_embeddings_multiples: Optional[int] = 3,
670
+ output_type: str | None = "pil",
671
  return_dict: bool = True,
672
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
673
  is_cancelled_callback: Optional[Callable[[], bool]] = None,
 
877
  guidance_scale: float = 7.5,
878
  num_images_per_prompt: Optional[int] = 1,
879
  eta: float = 0.0,
880
+ generator: torch.Generator | None = None,
881
  latents: Optional[np.ndarray] = None,
882
  max_embeddings_multiples: Optional[int] = 3,
883
+ output_type: str | None = "pil",
884
  return_dict: bool = True,
885
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
886
  callback_steps: int = 1,
 
969
  guidance_scale: Optional[float] = 7.5,
970
  num_images_per_prompt: Optional[int] = 1,
971
  eta: Optional[float] = 0.0,
972
+ generator: torch.Generator | None = None,
973
  max_embeddings_multiples: Optional[int] = 3,
974
+ output_type: str | None = "pil",
975
  return_dict: bool = True,
976
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
977
  callback_steps: int = 1,
 
1061
  guidance_scale: Optional[float] = 7.5,
1062
  num_images_per_prompt: Optional[int] = 1,
1063
  eta: Optional[float] = 0.0,
1064
+ generator: torch.Generator | None = None,
1065
  max_embeddings_multiples: Optional[int] = 3,
1066
+ output_type: str | None = "pil",
1067
  return_dict: bool = True,
1068
  callback: Optional[Callable[[int, int, np.ndarray], None]] = None,
1069
  callback_steps: int = 1,
main/lpw_stable_diffusion_xl.py CHANGED
@@ -519,7 +519,7 @@ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
519
 
520
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
521
  def retrieve_latents(
522
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
523
  ):
524
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
525
  return encoder_output.latent_dist.sample(generator)
@@ -724,12 +724,12 @@ class SDXLLongPromptWeightingPipeline(
724
  def encode_prompt(
725
  self,
726
  prompt: str,
727
- prompt_2: Optional[str] = None,
728
  device: Optional[torch.device] = None,
729
  num_images_per_prompt: int = 1,
730
  do_classifier_free_guidance: bool = True,
731
- negative_prompt: Optional[str] = None,
732
- negative_prompt_2: Optional[str] = None,
733
  prompt_embeds: Optional[torch.Tensor] = None,
734
  negative_prompt_embeds: Optional[torch.Tensor] = None,
735
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
@@ -1399,7 +1399,7 @@ class SDXLLongPromptWeightingPipeline(
1399
  def __call__(
1400
  self,
1401
  prompt: str = None,
1402
- prompt_2: Optional[str] = None,
1403
  image: Optional[PipelineImageInput] = None,
1404
  mask_image: Optional[PipelineImageInput] = None,
1405
  masked_image_latents: Optional[torch.Tensor] = None,
@@ -1411,8 +1411,8 @@ class SDXLLongPromptWeightingPipeline(
1411
  denoising_start: Optional[float] = None,
1412
  denoising_end: Optional[float] = None,
1413
  guidance_scale: float = 5.0,
1414
- negative_prompt: Optional[str] = None,
1415
- negative_prompt_2: Optional[str] = None,
1416
  num_images_per_prompt: Optional[int] = 1,
1417
  eta: float = 0.0,
1418
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
@@ -1422,7 +1422,7 @@ class SDXLLongPromptWeightingPipeline(
1422
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1423
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
1424
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1425
- output_type: Optional[str] = "pil",
1426
  return_dict: bool = True,
1427
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1428
  guidance_rescale: float = 0.0,
@@ -1955,7 +1955,7 @@ class SDXLLongPromptWeightingPipeline(
1955
  def text2img(
1956
  self,
1957
  prompt: str = None,
1958
- prompt_2: Optional[str] = None,
1959
  height: Optional[int] = None,
1960
  width: Optional[int] = None,
1961
  num_inference_steps: int = 50,
@@ -1963,8 +1963,8 @@ class SDXLLongPromptWeightingPipeline(
1963
  denoising_start: Optional[float] = None,
1964
  denoising_end: Optional[float] = None,
1965
  guidance_scale: float = 5.0,
1966
- negative_prompt: Optional[str] = None,
1967
- negative_prompt_2: Optional[str] = None,
1968
  num_images_per_prompt: Optional[int] = 1,
1969
  eta: float = 0.0,
1970
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
@@ -1974,7 +1974,7 @@ class SDXLLongPromptWeightingPipeline(
1974
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1975
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
1976
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1977
- output_type: Optional[str] = "pil",
1978
  return_dict: bool = True,
1979
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1980
  guidance_rescale: float = 0.0,
@@ -2028,7 +2028,7 @@ class SDXLLongPromptWeightingPipeline(
2028
  def img2img(
2029
  self,
2030
  prompt: str = None,
2031
- prompt_2: Optional[str] = None,
2032
  image: Optional[PipelineImageInput] = None,
2033
  height: Optional[int] = None,
2034
  width: Optional[int] = None,
@@ -2038,8 +2038,8 @@ class SDXLLongPromptWeightingPipeline(
2038
  denoising_start: Optional[float] = None,
2039
  denoising_end: Optional[float] = None,
2040
  guidance_scale: float = 5.0,
2041
- negative_prompt: Optional[str] = None,
2042
- negative_prompt_2: Optional[str] = None,
2043
  num_images_per_prompt: Optional[int] = 1,
2044
  eta: float = 0.0,
2045
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
@@ -2049,7 +2049,7 @@ class SDXLLongPromptWeightingPipeline(
2049
  negative_prompt_embeds: Optional[torch.Tensor] = None,
2050
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
2051
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
2052
- output_type: Optional[str] = "pil",
2053
  return_dict: bool = True,
2054
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
2055
  guidance_rescale: float = 0.0,
@@ -2105,7 +2105,7 @@ class SDXLLongPromptWeightingPipeline(
2105
  def inpaint(
2106
  self,
2107
  prompt: str = None,
2108
- prompt_2: Optional[str] = None,
2109
  image: Optional[PipelineImageInput] = None,
2110
  mask_image: Optional[PipelineImageInput] = None,
2111
  masked_image_latents: Optional[torch.Tensor] = None,
@@ -2117,8 +2117,8 @@ class SDXLLongPromptWeightingPipeline(
2117
  denoising_start: Optional[float] = None,
2118
  denoising_end: Optional[float] = None,
2119
  guidance_scale: float = 5.0,
2120
- negative_prompt: Optional[str] = None,
2121
- negative_prompt_2: Optional[str] = None,
2122
  num_images_per_prompt: Optional[int] = 1,
2123
  eta: float = 0.0,
2124
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
@@ -2128,7 +2128,7 @@ class SDXLLongPromptWeightingPipeline(
2128
  negative_prompt_embeds: Optional[torch.Tensor] = None,
2129
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
2130
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
2131
- output_type: Optional[str] = "pil",
2132
  return_dict: bool = True,
2133
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
2134
  guidance_rescale: float = 0.0,
 
519
 
520
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
521
  def retrieve_latents(
522
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
523
  ):
524
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
525
  return encoder_output.latent_dist.sample(generator)
 
724
  def encode_prompt(
725
  self,
726
  prompt: str,
727
+ prompt_2: str | None = None,
728
  device: Optional[torch.device] = None,
729
  num_images_per_prompt: int = 1,
730
  do_classifier_free_guidance: bool = True,
731
+ negative_prompt: str | None = None,
732
+ negative_prompt_2: str | None = None,
733
  prompt_embeds: Optional[torch.Tensor] = None,
734
  negative_prompt_embeds: Optional[torch.Tensor] = None,
735
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
 
1399
  def __call__(
1400
  self,
1401
  prompt: str = None,
1402
+ prompt_2: str | None = None,
1403
  image: Optional[PipelineImageInput] = None,
1404
  mask_image: Optional[PipelineImageInput] = None,
1405
  masked_image_latents: Optional[torch.Tensor] = None,
 
1411
  denoising_start: Optional[float] = None,
1412
  denoising_end: Optional[float] = None,
1413
  guidance_scale: float = 5.0,
1414
+ negative_prompt: str | None = None,
1415
+ negative_prompt_2: str | None = None,
1416
  num_images_per_prompt: Optional[int] = 1,
1417
  eta: float = 0.0,
1418
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
 
1422
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1423
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
1424
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1425
+ output_type: str | None = "pil",
1426
  return_dict: bool = True,
1427
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1428
  guidance_rescale: float = 0.0,
 
1955
  def text2img(
1956
  self,
1957
  prompt: str = None,
1958
+ prompt_2: str | None = None,
1959
  height: Optional[int] = None,
1960
  width: Optional[int] = None,
1961
  num_inference_steps: int = 50,
 
1963
  denoising_start: Optional[float] = None,
1964
  denoising_end: Optional[float] = None,
1965
  guidance_scale: float = 5.0,
1966
+ negative_prompt: str | None = None,
1967
+ negative_prompt_2: str | None = None,
1968
  num_images_per_prompt: Optional[int] = 1,
1969
  eta: float = 0.0,
1970
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
 
1974
  negative_prompt_embeds: Optional[torch.Tensor] = None,
1975
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
1976
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1977
+ output_type: str | None = "pil",
1978
  return_dict: bool = True,
1979
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1980
  guidance_rescale: float = 0.0,
 
2028
  def img2img(
2029
  self,
2030
  prompt: str = None,
2031
+ prompt_2: str | None = None,
2032
  image: Optional[PipelineImageInput] = None,
2033
  height: Optional[int] = None,
2034
  width: Optional[int] = None,
 
2038
  denoising_start: Optional[float] = None,
2039
  denoising_end: Optional[float] = None,
2040
  guidance_scale: float = 5.0,
2041
+ negative_prompt: str | None = None,
2042
+ negative_prompt_2: str | None = None,
2043
  num_images_per_prompt: Optional[int] = 1,
2044
  eta: float = 0.0,
2045
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
 
2049
  negative_prompt_embeds: Optional[torch.Tensor] = None,
2050
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
2051
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
2052
+ output_type: str | None = "pil",
2053
  return_dict: bool = True,
2054
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
2055
  guidance_rescale: float = 0.0,
 
2105
  def inpaint(
2106
  self,
2107
  prompt: str = None,
2108
+ prompt_2: str | None = None,
2109
  image: Optional[PipelineImageInput] = None,
2110
  mask_image: Optional[PipelineImageInput] = None,
2111
  masked_image_latents: Optional[torch.Tensor] = None,
 
2117
  denoising_start: Optional[float] = None,
2118
  denoising_end: Optional[float] = None,
2119
  guidance_scale: float = 5.0,
2120
+ negative_prompt: str | None = None,
2121
+ negative_prompt_2: str | None = None,
2122
  num_images_per_prompt: Optional[int] = 1,
2123
  eta: float = 0.0,
2124
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
 
2128
  negative_prompt_embeds: Optional[torch.Tensor] = None,
2129
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
2130
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
2131
+ output_type: str | None = "pil",
2132
  return_dict: bool = True,
2133
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
2134
  guidance_rescale: float = 0.0,
main/masked_stable_diffusion_img2img.py CHANGED
@@ -32,7 +32,7 @@ class MaskedStableDiffusionImg2ImgPipeline(StableDiffusionImg2ImgPipeline):
32
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
33
  prompt_embeds: Optional[torch.Tensor] = None,
34
  negative_prompt_embeds: Optional[torch.Tensor] = None,
35
- output_type: Optional[str] = "pil",
36
  return_dict: bool = True,
37
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
38
  callback_steps: int = 1,
 
32
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
33
  prompt_embeds: Optional[torch.Tensor] = None,
34
  negative_prompt_embeds: Optional[torch.Tensor] = None,
35
+ output_type: str | None = "pil",
36
  return_dict: bool = True,
37
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
38
  callback_steps: int = 1,
main/masked_stable_diffusion_xl_img2img.py CHANGED
@@ -59,7 +59,7 @@ class MaskedStableDiffusionXLImg2ImgPipeline(StableDiffusionXLImg2ImgPipeline):
59
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
60
  ip_adapter_image: Optional[PipelineImageInput] = None,
61
  ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
62
- output_type: Optional[str] = "pil",
63
  return_dict: bool = True,
64
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
65
  guidance_rescale: float = 0.0,
 
59
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
60
  ip_adapter_image: Optional[PipelineImageInput] = None,
61
  ip_adapter_image_embeds: Optional[List[torch.FloatTensor]] = None,
62
+ output_type: str | None = "pil",
63
  return_dict: bool = True,
64
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
65
  guidance_rescale: float = 0.0,
main/matryoshka.py CHANGED
@@ -783,7 +783,7 @@ class CrossAttnDownBlock2D(nn.Module):
783
  norm_type: str = "layer_norm",
784
  num_attention_heads: int = 1,
785
  cross_attention_dim: int = 1280,
786
- cross_attention_norm: Optional[str] = None,
787
  output_scale_factor: float = 1.0,
788
  downsample_padding: int = 1,
789
  add_downsample: bool = True,
@@ -922,7 +922,7 @@ class UNetMidBlock2DCrossAttn(nn.Module):
922
  num_attention_heads: int = 1,
923
  output_scale_factor: float = 1.0,
924
  cross_attention_dim: int = 1280,
925
- cross_attention_norm: Optional[str] = None,
926
  dual_cross_attention: bool = False,
927
  use_linear_projection: bool = False,
928
  upcast_attention: bool = False,
@@ -1055,7 +1055,7 @@ class CrossAttnUpBlock2D(nn.Module):
1055
  norm_type: str = "layer_norm",
1056
  num_attention_heads: int = 1,
1057
  cross_attention_dim: int = 1280,
1058
- cross_attention_norm: Optional[str] = None,
1059
  output_scale_factor: float = 1.0,
1060
  add_upsample: bool = True,
1061
  dual_cross_attention: bool = False,
@@ -1617,10 +1617,10 @@ def get_down_block(
1617
  attention_pre_only: bool = False,
1618
  resnet_skip_time_act: bool = False,
1619
  resnet_out_scale_factor: float = 1.0,
1620
- cross_attention_norm: Optional[str] = None,
1621
  attention_head_dim: Optional[int] = None,
1622
  use_attention_ffn: bool = True,
1623
- downsample_type: Optional[str] = None,
1624
  dropout: float = 0.0,
1625
  ):
1626
  # If attn head dim is not defined, we default it to the number of heads
@@ -1695,7 +1695,7 @@ def get_mid_block(
1695
  attention_type: str = "default",
1696
  attention_pre_only: bool = False,
1697
  resnet_skip_time_act: bool = False,
1698
- cross_attention_norm: Optional[str] = None,
1699
  attention_head_dim: Optional[int] = 1,
1700
  dropout: float = 0.0,
1701
  ):
@@ -1747,10 +1747,10 @@ def get_up_block(
1747
  attention_pre_only: bool = False,
1748
  resnet_skip_time_act: bool = False,
1749
  resnet_out_scale_factor: float = 1.0,
1750
- cross_attention_norm: Optional[str] = None,
1751
  attention_head_dim: Optional[int] = None,
1752
  use_attention_ffn: bool = True,
1753
- upsample_type: Optional[str] = None,
1754
  dropout: float = 0.0,
1755
  ) -> nn.Module:
1756
  # If attn head dim is not defined, we default it to the number of heads
@@ -1972,7 +1972,7 @@ class MatryoshkaUNet2DConditionModel(
1972
  "CrossAttnDownBlock2D",
1973
  "DownBlock2D",
1974
  ),
1975
- mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
1976
  up_block_types: Tuple[str, ...] = (
1977
  "UpBlock2D",
1978
  "CrossAttnUpBlock2D",
@@ -1993,14 +1993,14 @@ class MatryoshkaUNet2DConditionModel(
1993
  transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
1994
  reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
1995
  encoder_hid_dim: Optional[int] = None,
1996
- encoder_hid_dim_type: Optional[str] = None,
1997
  attention_head_dim: Union[int, Tuple[int]] = 8,
1998
  num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
1999
  dual_cross_attention: bool = False,
2000
  use_attention_ffn: bool = True,
2001
  use_linear_projection: bool = False,
2002
- class_embed_type: Optional[str] = None,
2003
- addition_embed_type: Optional[str] = None,
2004
  addition_time_embed_dim: Optional[int] = None,
2005
  num_class_embeds: Optional[int] = None,
2006
  upcast_attention: bool = False,
@@ -2009,8 +2009,8 @@ class MatryoshkaUNet2DConditionModel(
2009
  resnet_out_scale_factor: float = 1.0,
2010
  time_embedding_type: str = "positional",
2011
  time_embedding_dim: Optional[int] = None,
2012
- time_embedding_act_fn: Optional[str] = None,
2013
- timestep_post_act: Optional[str] = None,
2014
  time_cond_proj_dim: Optional[int] = None,
2015
  conv_in_kernel: int = 3,
2016
  conv_out_kernel: int = 3,
@@ -2021,7 +2021,7 @@ class MatryoshkaUNet2DConditionModel(
2021
  micro_conditioning_scale: int = None,
2022
  class_embeddings_concat: bool = False,
2023
  mid_block_only_cross_attention: Optional[bool] = None,
2024
- cross_attention_norm: Optional[str] = None,
2025
  addition_embed_type_num_heads: int = 64,
2026
  temporal_mode: bool = False,
2027
  temporal_spatial_ds: bool = False,
@@ -2384,7 +2384,7 @@ class MatryoshkaUNet2DConditionModel(
2384
 
2385
  def _set_encoder_hid_proj(
2386
  self,
2387
- encoder_hid_dim_type: Optional[str],
2388
  cross_attention_dim: Union[int, Tuple[int]],
2389
  encoder_hid_dim: Optional[int],
2390
  ):
@@ -2424,7 +2424,7 @@ class MatryoshkaUNet2DConditionModel(
2424
 
2425
  def _set_class_embedding(
2426
  self,
2427
- class_embed_type: Optional[str],
2428
  act_fn: str,
2429
  num_class_embeds: Optional[int],
2430
  projection_class_embeddings_input_dim: Optional[int],
@@ -2524,7 +2524,7 @@ class MatryoshkaUNet2DConditionModel(
2524
  )
2525
 
2526
  @property
2527
- def attn_processors(self) -> Dict[str, AttentionProcessor]:
2528
  r"""
2529
  Returns:
2530
  `dict` of attention processors: A dictionary containing all attention processors used in the model with
@@ -4264,7 +4264,7 @@ class MatryoshkaPipeline(
4264
  negative_prompt_embeds: Optional[torch.Tensor] = None,
4265
  ip_adapter_image: Optional[PipelineImageInput] = None,
4266
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
4267
- output_type: Optional[str] = "pil",
4268
  return_dict: bool = True,
4269
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
4270
  guidance_rescale: float = 0.0,
 
783
  norm_type: str = "layer_norm",
784
  num_attention_heads: int = 1,
785
  cross_attention_dim: int = 1280,
786
+ cross_attention_norm: str | None = None,
787
  output_scale_factor: float = 1.0,
788
  downsample_padding: int = 1,
789
  add_downsample: bool = True,
 
922
  num_attention_heads: int = 1,
923
  output_scale_factor: float = 1.0,
924
  cross_attention_dim: int = 1280,
925
+ cross_attention_norm: str | None = None,
926
  dual_cross_attention: bool = False,
927
  use_linear_projection: bool = False,
928
  upcast_attention: bool = False,
 
1055
  norm_type: str = "layer_norm",
1056
  num_attention_heads: int = 1,
1057
  cross_attention_dim: int = 1280,
1058
+ cross_attention_norm: str | None = None,
1059
  output_scale_factor: float = 1.0,
1060
  add_upsample: bool = True,
1061
  dual_cross_attention: bool = False,
 
1617
  attention_pre_only: bool = False,
1618
  resnet_skip_time_act: bool = False,
1619
  resnet_out_scale_factor: float = 1.0,
1620
+ cross_attention_norm: str | None = None,
1621
  attention_head_dim: Optional[int] = None,
1622
  use_attention_ffn: bool = True,
1623
+ downsample_type: str | None = None,
1624
  dropout: float = 0.0,
1625
  ):
1626
  # If attn head dim is not defined, we default it to the number of heads
 
1695
  attention_type: str = "default",
1696
  attention_pre_only: bool = False,
1697
  resnet_skip_time_act: bool = False,
1698
+ cross_attention_norm: str | None = None,
1699
  attention_head_dim: Optional[int] = 1,
1700
  dropout: float = 0.0,
1701
  ):
 
1747
  attention_pre_only: bool = False,
1748
  resnet_skip_time_act: bool = False,
1749
  resnet_out_scale_factor: float = 1.0,
1750
+ cross_attention_norm: str | None = None,
1751
  attention_head_dim: Optional[int] = None,
1752
  use_attention_ffn: bool = True,
1753
+ upsample_type: str | None = None,
1754
  dropout: float = 0.0,
1755
  ) -> nn.Module:
1756
  # If attn head dim is not defined, we default it to the number of heads
 
1972
  "CrossAttnDownBlock2D",
1973
  "DownBlock2D",
1974
  ),
1975
+ mid_block_type: str | None = "UNetMidBlock2DCrossAttn",
1976
  up_block_types: Tuple[str, ...] = (
1977
  "UpBlock2D",
1978
  "CrossAttnUpBlock2D",
 
1993
  transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
1994
  reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
1995
  encoder_hid_dim: Optional[int] = None,
1996
+ encoder_hid_dim_type: str | None = None,
1997
  attention_head_dim: Union[int, Tuple[int]] = 8,
1998
  num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
1999
  dual_cross_attention: bool = False,
2000
  use_attention_ffn: bool = True,
2001
  use_linear_projection: bool = False,
2002
+ class_embed_type: str | None = None,
2003
+ addition_embed_type: str | None = None,
2004
  addition_time_embed_dim: Optional[int] = None,
2005
  num_class_embeds: Optional[int] = None,
2006
  upcast_attention: bool = False,
 
2009
  resnet_out_scale_factor: float = 1.0,
2010
  time_embedding_type: str = "positional",
2011
  time_embedding_dim: Optional[int] = None,
2012
+ time_embedding_act_fn: str | None = None,
2013
+ timestep_post_act: str | None = None,
2014
  time_cond_proj_dim: Optional[int] = None,
2015
  conv_in_kernel: int = 3,
2016
  conv_out_kernel: int = 3,
 
2021
  micro_conditioning_scale: int = None,
2022
  class_embeddings_concat: bool = False,
2023
  mid_block_only_cross_attention: Optional[bool] = None,
2024
+ cross_attention_norm: str | None = None,
2025
  addition_embed_type_num_heads: int = 64,
2026
  temporal_mode: bool = False,
2027
  temporal_spatial_ds: bool = False,
 
2384
 
2385
  def _set_encoder_hid_proj(
2386
  self,
2387
+ encoder_hid_dim_type: str | None,
2388
  cross_attention_dim: Union[int, Tuple[int]],
2389
  encoder_hid_dim: Optional[int],
2390
  ):
 
2424
 
2425
  def _set_class_embedding(
2426
  self,
2427
+ class_embed_type: str | None,
2428
  act_fn: str,
2429
  num_class_embeds: Optional[int],
2430
  projection_class_embeddings_input_dim: Optional[int],
 
2524
  )
2525
 
2526
  @property
2527
+ def attn_processors(self) -> dict[str, AttentionProcessor]:
2528
  r"""
2529
  Returns:
2530
  `dict` of attention processors: A dictionary containing all attention processors used in the model with
 
4264
  negative_prompt_embeds: Optional[torch.Tensor] = None,
4265
  ip_adapter_image: Optional[PipelineImageInput] = None,
4266
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
4267
+ output_type: str | None = "pil",
4268
  return_dict: bool = True,
4269
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
4270
  guidance_rescale: float = 0.0,
main/mixture_tiling_sdxl.py CHANGED
@@ -388,12 +388,12 @@ class StableDiffusionXLTilingPipeline(
388
  def encode_prompt(
389
  self,
390
  prompt: str,
391
- prompt_2: Optional[str] = None,
392
  device: Optional[torch.device] = None,
393
  num_images_per_prompt: int = 1,
394
  do_classifier_free_guidance: bool = True,
395
- negative_prompt: Optional[str] = None,
396
- negative_prompt_2: Optional[str] = None,
397
  prompt_embeds: Optional[torch.Tensor] = None,
398
  negative_prompt_embeds: Optional[torch.Tensor] = None,
399
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
@@ -780,7 +780,7 @@ class StableDiffusionXLTilingPipeline(
780
  num_images_per_prompt: Optional[int] = 1,
781
  eta: float = 0.0,
782
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
783
- output_type: Optional[str] = "pil",
784
  return_dict: bool = True,
785
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
786
  original_size: Optional[Tuple[int, int]] = None,
 
388
  def encode_prompt(
389
  self,
390
  prompt: str,
391
+ prompt_2: str | None = None,
392
  device: Optional[torch.device] = None,
393
  num_images_per_prompt: int = 1,
394
  do_classifier_free_guidance: bool = True,
395
+ negative_prompt: str | None = None,
396
+ negative_prompt_2: str | None = None,
397
  prompt_embeds: Optional[torch.Tensor] = None,
398
  negative_prompt_embeds: Optional[torch.Tensor] = None,
399
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
 
780
  num_images_per_prompt: Optional[int] = 1,
781
  eta: float = 0.0,
782
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
783
+ output_type: str | None = "pil",
784
  return_dict: bool = True,
785
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
786
  original_size: Optional[Tuple[int, int]] = None,
main/mod_controlnet_tile_sr_sdxl.py CHANGED
@@ -243,7 +243,7 @@ def _tile2latent_indices(
243
 
244
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
245
  def retrieve_latents(
246
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
247
  ):
248
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
249
  return encoder_output.latent_dist.sample(generator)
@@ -395,12 +395,12 @@ class StableDiffusionXLControlNetTileSRPipeline(
395
  def encode_prompt(
396
  self,
397
  prompt: str,
398
- prompt_2: Optional[str] = None,
399
  device: Optional[torch.device] = None,
400
  num_images_per_prompt: int = 1,
401
  do_classifier_free_guidance: bool = True,
402
- negative_prompt: Optional[str] = None,
403
- negative_prompt_2: Optional[str] = None,
404
  prompt_embeds: Optional[torch.Tensor] = None,
405
  negative_prompt_embeds: Optional[torch.Tensor] = None,
406
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
@@ -1265,7 +1265,7 @@ class StableDiffusionXLControlNetTileSRPipeline(
1265
  eta: float = 0.0,
1266
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1267
  latents: Optional[torch.Tensor] = None,
1268
- output_type: Optional[str] = "pil",
1269
  return_dict: bool = True,
1270
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1271
  controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
 
243
 
244
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
245
  def retrieve_latents(
246
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
247
  ):
248
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
249
  return encoder_output.latent_dist.sample(generator)
 
395
  def encode_prompt(
396
  self,
397
  prompt: str,
398
+ prompt_2: str | None = None,
399
  device: Optional[torch.device] = None,
400
  num_images_per_prompt: int = 1,
401
  do_classifier_free_guidance: bool = True,
402
+ negative_prompt: str | None = None,
403
+ negative_prompt_2: str | None = None,
404
  prompt_embeds: Optional[torch.Tensor] = None,
405
  negative_prompt_embeds: Optional[torch.Tensor] = None,
406
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
 
1265
  eta: float = 0.0,
1266
  generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
1267
  latents: Optional[torch.Tensor] = None,
1268
+ output_type: str | None = "pil",
1269
  return_dict: bool = True,
1270
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1271
  controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
main/multilingual_stable_diffusion.py CHANGED
@@ -146,9 +146,9 @@ class MultilingualStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
146
  negative_prompt: Optional[Union[str, List[str]]] = None,
147
  num_images_per_prompt: Optional[int] = 1,
148
  eta: float = 0.0,
149
- generator: Optional[torch.Generator] = None,
150
  latents: Optional[torch.Tensor] = None,
151
- output_type: Optional[str] = "pil",
152
  return_dict: bool = True,
153
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
154
  callback_steps: int = 1,
 
146
  negative_prompt: Optional[Union[str, List[str]]] = None,
147
  num_images_per_prompt: Optional[int] = 1,
148
  eta: float = 0.0,
149
+ generator: torch.Generator | None = None,
150
  latents: Optional[torch.Tensor] = None,
151
+ output_type: str | None = "pil",
152
  return_dict: bool = True,
153
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
154
  callback_steps: int = 1,
main/pipeline_animatediff_controlnet.py CHANGED
@@ -762,7 +762,7 @@ class AnimateDiffControlNetPipeline(
762
  ip_adapter_image: Optional[PipelineImageInput] = None,
763
  ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
764
  conditioning_frames: Optional[List[PipelineImageInput]] = None,
765
- output_type: Optional[str] = "pil",
766
  return_dict: bool = True,
767
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
768
  controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
 
762
  ip_adapter_image: Optional[PipelineImageInput] = None,
763
  ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
764
  conditioning_frames: Optional[List[PipelineImageInput]] = None,
765
+ output_type: str | None = "pil",
766
  return_dict: bool = True,
767
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
768
  controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
main/pipeline_animatediff_img2video.py CHANGED
@@ -182,7 +182,7 @@ def tensor2vid(video: torch.Tensor, processor, output_type="np"):
182
 
183
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
184
  def retrieve_latents(
185
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
186
  ):
187
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
188
  return encoder_output.latent_dist.sample(generator)
@@ -755,7 +755,7 @@ class AnimateDiffImgToVideoPipeline(
755
  negative_prompt_embeds: Optional[torch.Tensor] = None,
756
  ip_adapter_image: Optional[PipelineImageInput] = None,
757
  ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
758
- output_type: Optional[str] = "pil",
759
  return_dict: bool = True,
760
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
761
  callback_steps: Optional[int] = 1,
 
182
 
183
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
184
  def retrieve_latents(
185
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
186
  ):
187
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
188
  return encoder_output.latent_dist.sample(generator)
 
755
  negative_prompt_embeds: Optional[torch.Tensor] = None,
756
  ip_adapter_image: Optional[PipelineImageInput] = None,
757
  ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
758
+ output_type: str | None = "pil",
759
  return_dict: bool = True,
760
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
761
  callback_steps: Optional[int] = 1,
main/pipeline_animatediff_ipex.py CHANGED
@@ -588,7 +588,7 @@ class AnimateDiffPipelineIpex(
588
  negative_prompt_embeds: Optional[torch.Tensor] = None,
589
  ip_adapter_image: Optional[PipelineImageInput] = None,
590
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
591
- output_type: Optional[str] = "pil",
592
  return_dict: bool = True,
593
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
594
  clip_skip: Optional[int] = None,
@@ -844,7 +844,7 @@ class AnimateDiffPipelineIpex(
844
  negative_prompt_embeds: Optional[torch.Tensor] = None,
845
  ip_adapter_image: Optional[PipelineImageInput] = None,
846
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
847
- output_type: Optional[str] = "pil",
848
  return_dict: bool = True,
849
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
850
  clip_skip: Optional[int] = None,
 
588
  negative_prompt_embeds: Optional[torch.Tensor] = None,
589
  ip_adapter_image: Optional[PipelineImageInput] = None,
590
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
591
+ output_type: str | None = "pil",
592
  return_dict: bool = True,
593
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
594
  clip_skip: Optional[int] = None,
 
844
  negative_prompt_embeds: Optional[torch.Tensor] = None,
845
  ip_adapter_image: Optional[PipelineImageInput] = None,
846
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
847
+ output_type: str | None = "pil",
848
  return_dict: bool = True,
849
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
850
  clip_skip: Optional[int] = None,
main/pipeline_controlnet_xl_kolors.py CHANGED
@@ -111,7 +111,7 @@ EXAMPLE_DOC_STRING = """
111
 
112
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
113
  def retrieve_latents(
114
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
115
  ):
116
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
117
  return encoder_output.latent_dist.sample(generator)
@@ -800,7 +800,7 @@ class KolorsControlNetPipeline(
800
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
801
  ip_adapter_image: Optional[PipelineImageInput] = None,
802
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
803
- output_type: Optional[str] = "pil",
804
  return_dict: bool = True,
805
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
806
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
 
111
 
112
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
113
  def retrieve_latents(
114
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
115
  ):
116
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
117
  return encoder_output.latent_dist.sample(generator)
 
800
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
801
  ip_adapter_image: Optional[PipelineImageInput] = None,
802
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
803
+ output_type: str | None = "pil",
804
  return_dict: bool = True,
805
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
806
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
main/pipeline_controlnet_xl_kolors_img2img.py CHANGED
@@ -131,7 +131,7 @@ EXAMPLE_DOC_STRING = """
131
 
132
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
133
  def retrieve_latents(
134
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
135
  ):
136
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
137
  return encoder_output.latent_dist.sample(generator)
@@ -972,7 +972,7 @@ class KolorsControlNetImg2ImgPipeline(
972
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
973
  ip_adapter_image: Optional[PipelineImageInput] = None,
974
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
975
- output_type: Optional[str] = "pil",
976
  return_dict: bool = True,
977
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
978
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
 
131
 
132
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
133
  def retrieve_latents(
134
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
135
  ):
136
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
137
  return encoder_output.latent_dist.sample(generator)
 
972
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
973
  ip_adapter_image: Optional[PipelineImageInput] = None,
974
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
975
+ output_type: str | None = "pil",
976
  return_dict: bool = True,
977
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
978
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
main/pipeline_controlnet_xl_kolors_inpaint.py CHANGED
@@ -118,7 +118,7 @@ EXAMPLE_DOC_STRING = """
118
 
119
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
120
  def retrieve_latents(
121
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
122
  ):
123
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
124
  return encoder_output.latent_dist.sample(generator)
@@ -1139,7 +1139,7 @@ class KolorsControlNetInpaintPipeline(
1139
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1140
  ip_adapter_image: Optional[PipelineImageInput] = None,
1141
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1142
- output_type: Optional[str] = "pil",
1143
  return_dict: bool = True,
1144
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1145
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
 
118
 
119
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
120
  def retrieve_latents(
121
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
122
  ):
123
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
124
  return encoder_output.latent_dist.sample(generator)
 
1139
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1140
  ip_adapter_image: Optional[PipelineImageInput] = None,
1141
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1142
+ output_type: str | None = "pil",
1143
  return_dict: bool = True,
1144
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1145
  controlnet_conditioning_scale: Union[float, List[float]] = 0.8,
main/pipeline_demofusion_sdxl.py CHANGED
@@ -184,12 +184,12 @@ class DemoFusionSDXLPipeline(
184
  def encode_prompt(
185
  self,
186
  prompt: str,
187
- prompt_2: Optional[str] = None,
188
  device: Optional[torch.device] = None,
189
  num_images_per_prompt: int = 1,
190
  do_classifier_free_guidance: bool = True,
191
- negative_prompt: Optional[str] = None,
192
- negative_prompt_2: Optional[str] = None,
193
  prompt_embeds: Optional[torch.Tensor] = None,
194
  negative_prompt_embeds: Optional[torch.Tensor] = None,
195
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
@@ -637,7 +637,7 @@ class DemoFusionSDXLPipeline(
637
  negative_prompt_embeds: Optional[torch.Tensor] = None,
638
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
639
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
640
- output_type: Optional[str] = "pil",
641
  return_dict: bool = False,
642
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
643
  callback_steps: int = 1,
 
184
  def encode_prompt(
185
  self,
186
  prompt: str,
187
+ prompt_2: str | None = None,
188
  device: Optional[torch.device] = None,
189
  num_images_per_prompt: int = 1,
190
  do_classifier_free_guidance: bool = True,
191
+ negative_prompt: str | None = None,
192
+ negative_prompt_2: str | None = None,
193
  prompt_embeds: Optional[torch.Tensor] = None,
194
  negative_prompt_embeds: Optional[torch.Tensor] = None,
195
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
 
637
  negative_prompt_embeds: Optional[torch.Tensor] = None,
638
  pooled_prompt_embeds: Optional[torch.Tensor] = None,
639
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
640
+ output_type: str | None = "pil",
641
  return_dict: bool = False,
642
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
643
  callback_steps: int = 1,
main/pipeline_fabric.py CHANGED
@@ -508,7 +508,7 @@ class FabricPipeline(DiffusionPipeline):
508
  num_images: int = 4,
509
  guidance_scale: float = 7.0,
510
  num_inference_steps: int = 20,
511
- output_type: Optional[str] = "pil",
512
  feedback_start_ratio: float = 0.33,
513
  feedback_end_ratio: float = 0.66,
514
  min_weight: float = 0.05,
 
508
  num_images: int = 4,
509
  guidance_scale: float = 7.0,
510
  num_inference_steps: int = 20,
511
+ output_type: str | None = "pil",
512
  feedback_start_ratio: float = 0.33,
513
  feedback_end_ratio: float = 0.66,
514
  min_weight: float = 0.05,
main/pipeline_faithdiff_stable_diffusion_xl.py CHANGED
@@ -437,7 +437,7 @@ class UNet2DConditionModel(OriginalUNet2DConditionModel, ConfigMixin, UNet2DCond
437
  "CrossAttnDownBlock2D",
438
  "DownBlock2D",
439
  ),
440
- mid_block_type: Optional[str] = "UNetMidBlock2DCrossAttn",
441
  up_block_types: Tuple[str, ...] = (
442
  "UpBlock2D",
443
  "CrossAttnUpBlock2D",
@@ -457,13 +457,13 @@ class UNet2DConditionModel(OriginalUNet2DConditionModel, ConfigMixin, UNet2DCond
457
  transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
458
  reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
459
  encoder_hid_dim: Optional[int] = None,
460
- encoder_hid_dim_type: Optional[str] = None,
461
  attention_head_dim: Union[int, Tuple[int]] = 8,
462
  num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
463
  dual_cross_attention: bool = False,
464
  use_linear_projection: bool = False,
465
- class_embed_type: Optional[str] = None,
466
- addition_embed_type: Optional[str] = None,
467
  addition_time_embed_dim: Optional[int] = None,
468
  num_class_embeds: Optional[int] = None,
469
  upcast_attention: bool = False,
@@ -472,8 +472,8 @@ class UNet2DConditionModel(OriginalUNet2DConditionModel, ConfigMixin, UNet2DCond
472
  resnet_out_scale_factor: float = 1.0,
473
  time_embedding_type: str = "positional",
474
  time_embedding_dim: Optional[int] = None,
475
- time_embedding_act_fn: Optional[str] = None,
476
- timestep_post_act: Optional[str] = None,
477
  time_cond_proj_dim: Optional[int] = None,
478
  conv_in_kernel: int = 3,
479
  conv_out_kernel: int = 3,
@@ -481,7 +481,7 @@ class UNet2DConditionModel(OriginalUNet2DConditionModel, ConfigMixin, UNet2DCond
481
  attention_type: str = "default",
482
  class_embeddings_concat: bool = False,
483
  mid_block_only_cross_attention: Optional[bool] = None,
484
- cross_attention_norm: Optional[str] = None,
485
  addition_embed_type_num_heads: int = 64,
486
  ):
487
  """Initialize the UnifiedUNet2DConditionModel."""
@@ -565,7 +565,7 @@ class UNet2DConditionModel(OriginalUNet2DConditionModel, ConfigMixin, UNet2DCond
565
  self.agg_net = nn.ModuleList()
566
 
567
  def load_additional_layers(
568
- self, dtype: Optional[torch.dtype] = torch.float16, channel: int = 512, weight_path: Optional[str] = None
569
  ):
570
  """Load additional layers and weights from a file.
571
 
@@ -1096,7 +1096,7 @@ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
1096
 
1097
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
1098
  def retrieve_latents(
1099
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
1100
  ):
1101
  """Retrieve latents from an encoder output.
1102
 
@@ -1267,12 +1267,12 @@ class FaithDiffStableDiffusionXLPipeline(
1267
  def encode_prompt(
1268
  self,
1269
  prompt: str,
1270
- prompt_2: Optional[str] = None,
1271
  device: Optional[torch.device] = None,
1272
  num_images_per_prompt: int = 1,
1273
  do_classifier_free_guidance: bool = True,
1274
- negative_prompt: Optional[str] = None,
1275
- negative_prompt_2: Optional[str] = None,
1276
  prompt_embeds: Optional[torch.FloatTensor] = None,
1277
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1278
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
@@ -1808,7 +1808,7 @@ class FaithDiffStableDiffusionXLPipeline(
1808
  height: Optional[int] = None,
1809
  width: Optional[int] = None,
1810
  num_inference_steps: int = 50,
1811
- start_point: Optional[str] = "noise",
1812
  timesteps: List[int] = None,
1813
  denoising_end: Optional[float] = None,
1814
  overlap: float = 0.5,
@@ -1823,7 +1823,7 @@ class FaithDiffStableDiffusionXLPipeline(
1823
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1824
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1825
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1826
- output_type: Optional[str] = "pil",
1827
  return_dict: bool = True,
1828
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1829
  guidance_rescale: float = 0.0,
 
437
  "CrossAttnDownBlock2D",
438
  "DownBlock2D",
439
  ),
440
+ mid_block_type: str | None = "UNetMidBlock2DCrossAttn",
441
  up_block_types: Tuple[str, ...] = (
442
  "UpBlock2D",
443
  "CrossAttnUpBlock2D",
 
457
  transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
458
  reverse_transformer_layers_per_block: Optional[Tuple[Tuple[int]]] = None,
459
  encoder_hid_dim: Optional[int] = None,
460
+ encoder_hid_dim_type: str | None = None,
461
  attention_head_dim: Union[int, Tuple[int]] = 8,
462
  num_attention_heads: Optional[Union[int, Tuple[int]]] = None,
463
  dual_cross_attention: bool = False,
464
  use_linear_projection: bool = False,
465
+ class_embed_type: str | None = None,
466
+ addition_embed_type: str | None = None,
467
  addition_time_embed_dim: Optional[int] = None,
468
  num_class_embeds: Optional[int] = None,
469
  upcast_attention: bool = False,
 
472
  resnet_out_scale_factor: float = 1.0,
473
  time_embedding_type: str = "positional",
474
  time_embedding_dim: Optional[int] = None,
475
+ time_embedding_act_fn: str | None = None,
476
+ timestep_post_act: str | None = None,
477
  time_cond_proj_dim: Optional[int] = None,
478
  conv_in_kernel: int = 3,
479
  conv_out_kernel: int = 3,
 
481
  attention_type: str = "default",
482
  class_embeddings_concat: bool = False,
483
  mid_block_only_cross_attention: Optional[bool] = None,
484
+ cross_attention_norm: str | None = None,
485
  addition_embed_type_num_heads: int = 64,
486
  ):
487
  """Initialize the UnifiedUNet2DConditionModel."""
 
565
  self.agg_net = nn.ModuleList()
566
 
567
  def load_additional_layers(
568
+ self, dtype: Optional[torch.dtype] = torch.float16, channel: int = 512, weight_path: str | None = None
569
  ):
570
  """Load additional layers and weights from a file.
571
 
 
1096
 
1097
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
1098
  def retrieve_latents(
1099
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
1100
  ):
1101
  """Retrieve latents from an encoder output.
1102
 
 
1267
  def encode_prompt(
1268
  self,
1269
  prompt: str,
1270
+ prompt_2: str | None = None,
1271
  device: Optional[torch.device] = None,
1272
  num_images_per_prompt: int = 1,
1273
  do_classifier_free_guidance: bool = True,
1274
+ negative_prompt: str | None = None,
1275
+ negative_prompt_2: str | None = None,
1276
  prompt_embeds: Optional[torch.FloatTensor] = None,
1277
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1278
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
 
1808
  height: Optional[int] = None,
1809
  width: Optional[int] = None,
1810
  num_inference_steps: int = 50,
1811
+ start_point: str | None = "noise",
1812
  timesteps: List[int] = None,
1813
  denoising_end: Optional[float] = None,
1814
  overlap: float = 0.5,
 
1823
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
1824
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1825
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
1826
+ output_type: str | None = "pil",
1827
  return_dict: bool = True,
1828
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1829
  guidance_rescale: float = 0.0,
main/pipeline_flux_differential_img2img.py CHANGED
@@ -97,7 +97,7 @@ def calculate_shift(
97
 
98
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
99
  def retrieve_latents(
100
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
101
  ):
102
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
103
  return encoder_output.latent_dist.sample(generator)
@@ -698,7 +698,7 @@ class FluxDifferentialImg2ImgPipeline(DiffusionPipeline, FluxLoraLoaderMixin):
698
  latents: Optional[torch.FloatTensor] = None,
699
  prompt_embeds: Optional[torch.FloatTensor] = None,
700
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
701
- output_type: Optional[str] = "pil",
702
  return_dict: bool = True,
703
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
704
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
 
97
 
98
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
99
  def retrieve_latents(
100
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
101
  ):
102
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
103
  return encoder_output.latent_dist.sample(generator)
 
698
  latents: Optional[torch.FloatTensor] = None,
699
  prompt_embeds: Optional[torch.FloatTensor] = None,
700
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
701
+ output_type: str | None = "pil",
702
  return_dict: bool = True,
703
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
704
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
main/pipeline_flux_kontext_multiple_images.py CHANGED
@@ -188,7 +188,7 @@ def retrieve_timesteps(
188
 
189
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
190
  def retrieve_latents(
191
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
192
  ):
193
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
194
  return encoder_output.latent_dist.sample(generator)
@@ -838,7 +838,7 @@ class FluxKontextPipeline(
838
  negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
839
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
840
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
841
- output_type: Optional[str] = "pil",
842
  return_dict: bool = True,
843
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
844
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
 
188
 
189
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
190
  def retrieve_latents(
191
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
192
  ):
193
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
194
  return encoder_output.latent_dist.sample(generator)
 
838
  negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
839
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
840
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
841
+ output_type: str | None = "pil",
842
  return_dict: bool = True,
843
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
844
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
main/pipeline_flux_rf_inversion.py CHANGED
@@ -685,7 +685,7 @@ class RFInversionFluxPipeline(
685
  latents: Optional[torch.FloatTensor] = None,
686
  prompt_embeds: Optional[torch.FloatTensor] = None,
687
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
688
- output_type: Optional[str] = "pil",
689
  return_dict: bool = True,
690
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
691
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
 
685
  latents: Optional[torch.FloatTensor] = None,
686
  prompt_embeds: Optional[torch.FloatTensor] = None,
687
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
688
+ output_type: str | None = "pil",
689
  return_dict: bool = True,
690
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
691
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
main/pipeline_flux_semantic_guidance.py CHANGED
@@ -802,7 +802,7 @@ class FluxSemanticGuidancePipeline(
802
  negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
803
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
804
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
805
- output_type: Optional[str] = "pil",
806
  return_dict: bool = True,
807
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
808
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
 
802
  negative_ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
803
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
804
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
805
+ output_type: str | None = "pil",
806
  return_dict: bool = True,
807
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
808
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
main/pipeline_flux_with_cfg.py CHANGED
@@ -622,7 +622,7 @@ class FluxCFGPipeline(DiffusionPipeline, FluxLoraLoaderMixin, FromSingleFileMixi
622
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
623
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
624
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
625
- output_type: Optional[str] = "pil",
626
  return_dict: bool = True,
627
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
628
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
 
622
  pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
623
  negative_prompt_embeds: Optional[torch.FloatTensor] = None,
624
  negative_pooled_prompt_embeds: Optional[torch.FloatTensor] = None,
625
+ output_type: str | None = "pil",
626
  return_dict: bool = True,
627
  joint_attention_kwargs: Optional[Dict[str, Any]] = None,
628
  callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
main/pipeline_hunyuandit_differential_img2img.py CHANGED
@@ -164,7 +164,7 @@ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
164
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
165
  def retrieve_latents(
166
  encoder_output: torch.Tensor,
167
- generator: Optional[torch.Generator] = None,
168
  sample_mode: str = "sample",
169
  ):
170
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
@@ -349,7 +349,7 @@ class HunyuanDiTDifferentialImg2ImgPipeline(DiffusionPipeline):
349
  dtype: torch.dtype = None,
350
  num_images_per_prompt: int = 1,
351
  do_classifier_free_guidance: bool = True,
352
- negative_prompt: Optional[str] = None,
353
  prompt_embeds: Optional[torch.Tensor] = None,
354
  negative_prompt_embeds: Optional[torch.Tensor] = None,
355
  prompt_attention_mask: Optional[torch.Tensor] = None,
@@ -749,7 +749,7 @@ class HunyuanDiTDifferentialImg2ImgPipeline(DiffusionPipeline):
749
  prompt_attention_mask_2: Optional[torch.Tensor] = None,
750
  negative_prompt_attention_mask: Optional[torch.Tensor] = None,
751
  negative_prompt_attention_mask_2: Optional[torch.Tensor] = None,
752
- output_type: Optional[str] = "pil",
753
  return_dict: bool = True,
754
  callback_on_step_end: Optional[
755
  Union[
 
164
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
165
  def retrieve_latents(
166
  encoder_output: torch.Tensor,
167
+ generator: torch.Generator | None = None,
168
  sample_mode: str = "sample",
169
  ):
170
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
 
349
  dtype: torch.dtype = None,
350
  num_images_per_prompt: int = 1,
351
  do_classifier_free_guidance: bool = True,
352
+ negative_prompt: str | None = None,
353
  prompt_embeds: Optional[torch.Tensor] = None,
354
  negative_prompt_embeds: Optional[torch.Tensor] = None,
355
  prompt_attention_mask: Optional[torch.Tensor] = None,
 
749
  prompt_attention_mask_2: Optional[torch.Tensor] = None,
750
  negative_prompt_attention_mask: Optional[torch.Tensor] = None,
751
  negative_prompt_attention_mask_2: Optional[torch.Tensor] = None,
752
+ output_type: str | None = "pil",
753
  return_dict: bool = True,
754
  callback_on_step_end: Optional[
755
  Union[
main/pipeline_kolors_differential_img2img.py CHANGED
@@ -67,7 +67,7 @@ EXAMPLE_DOC_STRING = """
67
 
68
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
69
  def retrieve_latents(
70
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
71
  ):
72
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
73
  return encoder_output.latent_dist.sample(generator)
@@ -800,7 +800,7 @@ class KolorsDifferentialImg2ImgPipeline(
800
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
801
  ip_adapter_image: Optional[PipelineImageInput] = None,
802
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
803
- output_type: Optional[str] = "pil",
804
  return_dict: bool = True,
805
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
806
  original_size: Optional[Tuple[int, int]] = None,
 
67
 
68
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
69
  def retrieve_latents(
70
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
71
  ):
72
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
73
  return encoder_output.latent_dist.sample(generator)
 
800
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
801
  ip_adapter_image: Optional[PipelineImageInput] = None,
802
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
803
+ output_type: str | None = "pil",
804
  return_dict: bool = True,
805
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
806
  original_size: Optional[Tuple[int, int]] = None,
main/pipeline_kolors_inpainting.py CHANGED
@@ -239,7 +239,7 @@ def prepare_mask_and_masked_image(image, mask, height, width, return_image: bool
239
 
240
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
241
  def retrieve_latents(
242
- encoder_output: torch.Tensor, generator: Optional[torch.Generator] = None, sample_mode: str = "sample"
243
  ):
244
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
245
  return encoder_output.latent_dist.sample(generator)
@@ -1100,7 +1100,7 @@ class KolorsInpaintPipeline(
1100
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1101
  ip_adapter_image: Optional[PipelineImageInput] = None,
1102
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1103
- output_type: Optional[str] = "pil",
1104
  return_dict: bool = True,
1105
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1106
  guidance_rescale: float = 0.0,
 
239
 
240
  # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.retrieve_latents
241
  def retrieve_latents(
242
+ encoder_output: torch.Tensor, generator: torch.Generator | None = None, sample_mode: str = "sample"
243
  ):
244
  if hasattr(encoder_output, "latent_dist") and sample_mode == "sample":
245
  return encoder_output.latent_dist.sample(generator)
 
1100
  negative_pooled_prompt_embeds: Optional[torch.Tensor] = None,
1101
  ip_adapter_image: Optional[PipelineImageInput] = None,
1102
  ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
1103
+ output_type: str | None = "pil",
1104
  return_dict: bool = True,
1105
  cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1106
  guidance_rescale: float = 0.0,
main/pipeline_prompt2prompt.py CHANGED
@@ -571,7 +571,7 @@ class Prompt2PromptPipeline(
571
  latents: Optional[torch.Tensor] = None,
572
  prompt_embeds: Optional[torch.Tensor] = None,
573
  negative_prompt_embeds: Optional[torch.Tensor] = None,
574
- output_type: Optional[str] = "pil",
575
  return_dict: bool = True,
576
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
577
  callback_steps: Optional[int] = 1,
 
571
  latents: Optional[torch.Tensor] = None,
572
  prompt_embeds: Optional[torch.Tensor] = None,
573
  negative_prompt_embeds: Optional[torch.Tensor] = None,
574
+ output_type: str | None = "pil",
575
  return_dict: bool = True,
576
  callback: Optional[Callable[[int, int, torch.Tensor], None]] = None,
577
  callback_steps: Optional[int] = 1,