luluxxx commited on
Commit
051027c
1 Parent(s): dc29d6e
Files changed (1) hide show
  1. pipeline.py +1131 -0
pipeline.py ADDED
@@ -0,0 +1,1131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ import inspect
16
+ from typing import Any, Callable, Dict, List, Optional, Tuple, Union
17
+
18
+ import numpy as np
19
+ import torch
20
+ import torch.nn.functional as F
21
+ from PIL import Image
22
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
23
+
24
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
25
+ from diffusers.loaders import IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
26
+ from diffusers.models import AutoencoderKL, ControlNetModel, ImageProjection, UNet2DConditionModel, UNetMotionModel
27
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
28
+ from diffusers.models.unets.unet_motion_model import MotionAdapter
29
+ from diffusers.pipelines.animatediff.pipeline_output import AnimateDiffPipelineOutput
30
+ from diffusers.pipelines.controlnet.multicontrolnet import MultiControlNetModel
31
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
32
+ from diffusers.schedulers import (
33
+ DDIMScheduler,
34
+ DPMSolverMultistepScheduler,
35
+ EulerAncestralDiscreteScheduler,
36
+ EulerDiscreteScheduler,
37
+ LMSDiscreteScheduler,
38
+ PNDMScheduler,
39
+ )
40
+ from diffusers.utils import USE_PEFT_BACKEND, deprecate, logging, scale_lora_layers, unscale_lora_layers
41
+ from diffusers.utils.torch_utils import is_compiled_module, randn_tensor
42
+
43
+
44
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
45
+
46
+ EXAMPLE_DOC_STRING = """
47
+ Examples:
48
+ ```py
49
+ >>> import torch
50
+ >>> from diffusers import AutoencoderKL, ControlNetModel, MotionAdapter
51
+ >>> from diffusers.pipelines import DiffusionPipeline
52
+ >>> from diffusers.schedulers import DPMSolverMultistepScheduler
53
+ >>> from PIL import Image
54
+
55
+ >>> motion_id = "guoyww/animatediff-motion-adapter-v1-5-2"
56
+ >>> adapter = MotionAdapter.from_pretrained(motion_id)
57
+ >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/control_v11p_sd15_openpose", torch_dtype=torch.float16)
58
+ >>> vae = AutoencoderKL.from_pretrained("stabilityai/sd-vae-ft-mse", torch_dtype=torch.float16)
59
+
60
+ >>> model_id = "SG161222/Realistic_Vision_V5.1_noVAE"
61
+ >>> pipe = DiffusionPipeline.from_pretrained(
62
+ ... model_id,
63
+ ... motion_adapter=adapter,
64
+ ... controlnet=controlnet,
65
+ ... vae=vae,
66
+ ... custom_pipeline="pipeline_animatediff_controlnet",
67
+ ... ).to(device="cuda", dtype=torch.float16)
68
+ >>> pipe.scheduler = DPMSolverMultistepScheduler.from_pretrained(
69
+ ... model_id, subfolder="scheduler", clip_sample=False, timestep_spacing="linspace", steps_offset=1, beta_schedule="linear",
70
+ ... )
71
+ >>> pipe.enable_vae_slicing()
72
+
73
+ >>> conditioning_frames = []
74
+ >>> for i in range(1, 16 + 1):
75
+ ... conditioning_frames.append(Image.open(f"frame_{i}.png"))
76
+
77
+ >>> prompt = "astronaut in space, dancing"
78
+ >>> negative_prompt = "bad quality, worst quality, jpeg artifacts, ugly"
79
+ >>> result = pipe(
80
+ ... prompt=prompt,
81
+ ... negative_prompt=negative_prompt,
82
+ ... width=512,
83
+ ... height=768,
84
+ ... conditioning_frames=conditioning_frames,
85
+ ... num_inference_steps=12,
86
+ ... )
87
+
88
+ >>> from diffusers.utils import export_to_gif
89
+ >>> export_to_gif(result.frames[0], "result.gif")
90
+ ```
91
+ """
92
+
93
+
94
+ # Copied from diffusers.pipelines.animatediff.pipeline_animatediff.tensor2vid
95
+ def tensor2vid(video: torch.Tensor, processor, output_type="np"):
96
+ batch_size, channels, num_frames, height, width = video.shape
97
+ outputs = []
98
+ for batch_idx in range(batch_size):
99
+ batch_vid = video[batch_idx].permute(1, 0, 2, 3)
100
+ batch_output = processor.postprocess(batch_vid, output_type)
101
+
102
+ outputs.append(batch_output)
103
+
104
+ if output_type == "np":
105
+ outputs = np.stack(outputs)
106
+
107
+ elif output_type == "pt":
108
+ outputs = torch.stack(outputs)
109
+
110
+ elif not output_type == "pil":
111
+ raise ValueError(f"{output_type} does not exist. Please choose one of ['np', 'pt', 'pil']")
112
+
113
+ return outputs
114
+
115
+
116
+ class AnimateDiffControlNetPipeline(
117
+ DiffusionPipeline, StableDiffusionMixin, TextualInversionLoaderMixin, IPAdapterMixin, LoraLoaderMixin
118
+ ):
119
+ r"""
120
+ Pipeline for text-to-video generation.
121
+
122
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
123
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
124
+
125
+ The pipeline also inherits the following loading methods:
126
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
127
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
128
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
129
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
130
+
131
+ Args:
132
+ vae ([`AutoencoderKL`]):
133
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
134
+ text_encoder ([`CLIPTextModel`]):
135
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
136
+ tokenizer (`CLIPTokenizer`):
137
+ A [`~transformers.CLIPTokenizer`] to tokenize text.
138
+ unet ([`UNet2DConditionModel`]):
139
+ A [`UNet2DConditionModel`] used to create a UNetMotionModel to denoise the encoded video latents.
140
+ motion_adapter ([`MotionAdapter`]):
141
+ A [`MotionAdapter`] to be used in combination with `unet` to denoise the encoded video latents.
142
+ scheduler ([`SchedulerMixin`]):
143
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
144
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
145
+ """
146
+
147
+ model_cpu_offload_seq = "text_encoder->unet->vae"
148
+ _optional_components = ["feature_extractor", "image_encoder"]
149
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
150
+
151
+ def __init__(
152
+ self,
153
+ vae: AutoencoderKL,
154
+ text_encoder: CLIPTextModel,
155
+ tokenizer: CLIPTokenizer,
156
+ unet: UNet2DConditionModel,
157
+ motion_adapter: MotionAdapter,
158
+ controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
159
+ scheduler: Union[
160
+ DDIMScheduler,
161
+ PNDMScheduler,
162
+ LMSDiscreteScheduler,
163
+ EulerDiscreteScheduler,
164
+ EulerAncestralDiscreteScheduler,
165
+ DPMSolverMultistepScheduler,
166
+ ],
167
+ feature_extractor: Optional[CLIPImageProcessor] = None,
168
+ image_encoder: Optional[CLIPVisionModelWithProjection] = None,
169
+ ):
170
+ super().__init__()
171
+ unet = UNetMotionModel.from_unet2d(unet, motion_adapter)
172
+
173
+ if isinstance(controlnet, (list, tuple)):
174
+ controlnet = MultiControlNetModel(controlnet)
175
+
176
+ self.register_modules(
177
+ vae=vae,
178
+ text_encoder=text_encoder,
179
+ tokenizer=tokenizer,
180
+ unet=unet,
181
+ motion_adapter=motion_adapter,
182
+ controlnet=controlnet,
183
+ scheduler=scheduler,
184
+ feature_extractor=feature_extractor,
185
+ image_encoder=image_encoder,
186
+ )
187
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
188
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
189
+ self.control_image_processor = VaeImageProcessor(
190
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
191
+ )
192
+
193
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_prompt with num_images_per_prompt -> num_videos_per_prompt
194
+ def encode_prompt(
195
+ self,
196
+ prompt,
197
+ device,
198
+ num_images_per_prompt,
199
+ do_classifier_free_guidance,
200
+ negative_prompt=None,
201
+ prompt_embeds: Optional[torch.FloatTensor] = None,
202
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
203
+ lora_scale: Optional[float] = None,
204
+ clip_skip: Optional[int] = None,
205
+ ):
206
+ r"""
207
+ Encodes the prompt into text encoder hidden states.
208
+
209
+ Args:
210
+ prompt (`str` or `List[str]`, *optional*):
211
+ prompt to be encoded
212
+ device: (`torch.device`):
213
+ torch device
214
+ num_images_per_prompt (`int`):
215
+ number of images that should be generated per prompt
216
+ do_classifier_free_guidance (`bool`):
217
+ whether to use classifier free guidance or not
218
+ negative_prompt (`str` or `List[str]`, *optional*):
219
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
220
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
221
+ less than `1`).
222
+ prompt_embeds (`torch.FloatTensor`, *optional*):
223
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
224
+ provided, text embeddings will be generated from `prompt` input argument.
225
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
226
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
227
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
228
+ argument.
229
+ lora_scale (`float`, *optional*):
230
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
231
+ clip_skip (`int`, *optional*):
232
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
233
+ the output of the pre-final layer will be used for computing the prompt embeddings.
234
+ """
235
+ # set lora scale so that monkey patched LoRA
236
+ # function of text encoder can correctly access it
237
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
238
+ self._lora_scale = lora_scale
239
+
240
+ # dynamically adjust the LoRA scale
241
+ if not USE_PEFT_BACKEND:
242
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
243
+ else:
244
+ scale_lora_layers(self.text_encoder, lora_scale)
245
+
246
+ if prompt is not None and isinstance(prompt, str):
247
+ batch_size = 1
248
+ elif prompt is not None and isinstance(prompt, list):
249
+ batch_size = len(prompt)
250
+ else:
251
+ batch_size = prompt_embeds.shape[0]
252
+
253
+ if prompt_embeds is None:
254
+ # textual inversion: process multi-vector tokens if necessary
255
+ if isinstance(self, TextualInversionLoaderMixin):
256
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
257
+
258
+ text_inputs = self.tokenizer(
259
+ prompt,
260
+ padding="max_length",
261
+ max_length=self.tokenizer.model_max_length,
262
+ truncation=True,
263
+ return_tensors="pt",
264
+ )
265
+ text_input_ids = text_inputs.input_ids
266
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
267
+
268
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
269
+ text_input_ids, untruncated_ids
270
+ ):
271
+ removed_text = self.tokenizer.batch_decode(
272
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
273
+ )
274
+ logger.warning(
275
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
276
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
277
+ )
278
+
279
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
280
+ attention_mask = text_inputs.attention_mask.to(device)
281
+ else:
282
+ attention_mask = None
283
+
284
+ if clip_skip is None:
285
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
286
+ prompt_embeds = prompt_embeds[0]
287
+ else:
288
+ prompt_embeds = self.text_encoder(
289
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
290
+ )
291
+ # Access the `hidden_states` first, that contains a tuple of
292
+ # all the hidden states from the encoder layers. Then index into
293
+ # the tuple to access the hidden states from the desired layer.
294
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
295
+ # We also need to apply the final LayerNorm here to not mess with the
296
+ # representations. The `last_hidden_states` that we typically use for
297
+ # obtaining the final prompt representations passes through the LayerNorm
298
+ # layer.
299
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
300
+
301
+ if self.text_encoder is not None:
302
+ prompt_embeds_dtype = self.text_encoder.dtype
303
+ elif self.unet is not None:
304
+ prompt_embeds_dtype = self.unet.dtype
305
+ else:
306
+ prompt_embeds_dtype = prompt_embeds.dtype
307
+
308
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
309
+
310
+ bs_embed, seq_len, _ = prompt_embeds.shape
311
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
312
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
313
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
314
+
315
+ # get unconditional embeddings for classifier free guidance
316
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
317
+ uncond_tokens: List[str]
318
+ if negative_prompt is None:
319
+ uncond_tokens = [""] * batch_size
320
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
321
+ raise TypeError(
322
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
323
+ f" {type(prompt)}."
324
+ )
325
+ elif isinstance(negative_prompt, str):
326
+ uncond_tokens = [negative_prompt]
327
+ elif batch_size != len(negative_prompt):
328
+ raise ValueError(
329
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
330
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
331
+ " the batch size of `prompt`."
332
+ )
333
+ else:
334
+ uncond_tokens = negative_prompt
335
+
336
+ # textual inversion: process multi-vector tokens if necessary
337
+ if isinstance(self, TextualInversionLoaderMixin):
338
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
339
+
340
+ max_length = prompt_embeds.shape[1]
341
+ uncond_input = self.tokenizer(
342
+ uncond_tokens,
343
+ padding="max_length",
344
+ max_length=max_length,
345
+ truncation=True,
346
+ return_tensors="pt",
347
+ )
348
+
349
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
350
+ attention_mask = uncond_input.attention_mask.to(device)
351
+ else:
352
+ attention_mask = None
353
+
354
+ negative_prompt_embeds = self.text_encoder(
355
+ uncond_input.input_ids.to(device),
356
+ attention_mask=attention_mask,
357
+ )
358
+ negative_prompt_embeds = negative_prompt_embeds[0]
359
+
360
+ if do_classifier_free_guidance:
361
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
362
+ seq_len = negative_prompt_embeds.shape[1]
363
+
364
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
365
+
366
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
367
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
368
+
369
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
370
+ # Retrieve the original scale by scaling back the LoRA layers
371
+ unscale_lora_layers(self.text_encoder, lora_scale)
372
+
373
+ return prompt_embeds, negative_prompt_embeds
374
+
375
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.encode_image
376
+ def encode_image(self, image, device, num_images_per_prompt):
377
+ dtype = next(self.image_encoder.parameters()).dtype
378
+
379
+ if not isinstance(image, torch.Tensor):
380
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
381
+
382
+ image = image.to(device=device, dtype=dtype)
383
+ image_embeds = self.image_encoder(image).image_embeds
384
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
385
+
386
+ uncond_image_embeds = torch.zeros_like(image_embeds)
387
+ return image_embeds, uncond_image_embeds
388
+
389
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_ip_adapter_image_embeds
390
+ def prepare_ip_adapter_image_embeds(
391
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
392
+ ):
393
+ if ip_adapter_image_embeds is None:
394
+ if not isinstance(ip_adapter_image, list):
395
+ ip_adapter_image = [ip_adapter_image]
396
+
397
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
398
+ raise ValueError(
399
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
400
+ )
401
+
402
+ image_embeds = []
403
+ for single_ip_adapter_image, image_proj_layer in zip(
404
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
405
+ ):
406
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
407
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
408
+ single_ip_adapter_image, device, 1, output_hidden_state
409
+ )
410
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
411
+ single_negative_image_embeds = torch.stack(
412
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
413
+ )
414
+
415
+ if do_classifier_free_guidance:
416
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
417
+ single_image_embeds = single_image_embeds.to(device)
418
+
419
+ image_embeds.append(single_image_embeds)
420
+ else:
421
+ repeat_dims = [1]
422
+ image_embeds = []
423
+ for single_image_embeds in ip_adapter_image_embeds:
424
+ if do_classifier_free_guidance:
425
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
426
+ single_image_embeds = single_image_embeds.repeat(
427
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
428
+ )
429
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
430
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
431
+ )
432
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
433
+ else:
434
+ single_image_embeds = single_image_embeds.repeat(
435
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
436
+ )
437
+ image_embeds.append(single_image_embeds)
438
+
439
+ return image_embeds
440
+
441
+ # Copied from diffusers.pipelines.text_to_video_synthesis/pipeline_text_to_video_synth.TextToVideoSDPipeline.decode_latents
442
+ def decode_latents(self, latents):
443
+ latents = 1 / self.vae.config.scaling_factor * latents
444
+
445
+ batch_size, channels, num_frames, height, width = latents.shape
446
+ latents = latents.permute(0, 2, 1, 3, 4).reshape(batch_size * num_frames, channels, height, width)
447
+
448
+ image = self.vae.decode(latents).sample
449
+ video = (
450
+ image[None, :]
451
+ .reshape(
452
+ (
453
+ batch_size,
454
+ num_frames,
455
+ -1,
456
+ )
457
+ + image.shape[2:]
458
+ )
459
+ .permute(0, 2, 1, 3, 4)
460
+ )
461
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
462
+ video = video.float()
463
+ return video
464
+
465
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
466
+ def prepare_extra_step_kwargs(self, generator, eta):
467
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
468
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
469
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
470
+ # and should be between [0, 1]
471
+
472
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
473
+ extra_step_kwargs = {}
474
+ if accepts_eta:
475
+ extra_step_kwargs["eta"] = eta
476
+
477
+ # check if the scheduler accepts generator
478
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
479
+ if accepts_generator:
480
+ extra_step_kwargs["generator"] = generator
481
+ return extra_step_kwargs
482
+
483
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.check_inputs
484
+ def check_inputs(
485
+ self,
486
+ prompt,
487
+ height,
488
+ width,
489
+ num_frames,
490
+ callback_steps,
491
+ negative_prompt=None,
492
+ prompt_embeds=None,
493
+ negative_prompt_embeds=None,
494
+ callback_on_step_end_tensor_inputs=None,
495
+ image=None,
496
+ controlnet_conditioning_scale=1.0,
497
+ control_guidance_start=0.0,
498
+ control_guidance_end=1.0,
499
+ ):
500
+ if height % 8 != 0 or width % 8 != 0:
501
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
502
+
503
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
504
+ raise ValueError(
505
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
506
+ f" {type(callback_steps)}."
507
+ )
508
+ if callback_on_step_end_tensor_inputs is not None and not all(
509
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
510
+ ):
511
+ raise ValueError(
512
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
513
+ )
514
+
515
+ if prompt is not None and prompt_embeds is not None:
516
+ raise ValueError(
517
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
518
+ " only forward one of the two."
519
+ )
520
+ elif prompt is None and prompt_embeds is None:
521
+ raise ValueError(
522
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
523
+ )
524
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
525
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
526
+
527
+ if negative_prompt is not None and negative_prompt_embeds is not None:
528
+ raise ValueError(
529
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
530
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
531
+ )
532
+
533
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
534
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
535
+ raise ValueError(
536
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
537
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
538
+ f" {negative_prompt_embeds.shape}."
539
+ )
540
+
541
+ # `prompt` needs more sophisticated handling when there are multiple
542
+ # conditionings.
543
+ if isinstance(self.controlnet, MultiControlNetModel):
544
+ if isinstance(prompt, list):
545
+ logger.warning(
546
+ f"You have {len(self.controlnet.nets)} ControlNets and you have passed {len(prompt)}"
547
+ " prompts. The conditionings will be fixed across the prompts."
548
+ )
549
+
550
+ # Check `image`
551
+ is_compiled = hasattr(F, "scaled_dot_product_attention") and isinstance(
552
+ self.controlnet, torch._dynamo.eval_frame.OptimizedModule
553
+ )
554
+ if (
555
+ isinstance(self.controlnet, ControlNetModel)
556
+ or is_compiled
557
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
558
+ ):
559
+ if not isinstance(image, list):
560
+ raise TypeError(f"For single controlnet, `image` must be of type `list` but got {type(image)}")
561
+ if len(image) != num_frames:
562
+ raise ValueError(f"Excepted image to have length {num_frames} but got {len(image)=}")
563
+ elif (
564
+ isinstance(self.controlnet, MultiControlNetModel)
565
+ or is_compiled
566
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
567
+ ):
568
+ if not isinstance(image, list) or not isinstance(image[0], list):
569
+ raise TypeError(f"For multiple controlnets: `image` must be type list of lists but got {type(image)=}")
570
+ if len(image[0]) != num_frames:
571
+ raise ValueError(f"Expected length of image sublist as {num_frames} but got {len(image[0])=}")
572
+ if any(len(img) != len(image[0]) for img in image):
573
+ raise ValueError("All conditioning frame batches for multicontrolnet must be same size")
574
+ else:
575
+ assert False
576
+
577
+ # Check `controlnet_conditioning_scale`
578
+ if (
579
+ isinstance(self.controlnet, ControlNetModel)
580
+ or is_compiled
581
+ and isinstance(self.controlnet._orig_mod, ControlNetModel)
582
+ ):
583
+ if not isinstance(controlnet_conditioning_scale, float):
584
+ raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
585
+ elif (
586
+ isinstance(self.controlnet, MultiControlNetModel)
587
+ or is_compiled
588
+ and isinstance(self.controlnet._orig_mod, MultiControlNetModel)
589
+ ):
590
+ if isinstance(controlnet_conditioning_scale, list):
591
+ if any(isinstance(i, list) for i in controlnet_conditioning_scale):
592
+ raise ValueError("A single batch of multiple conditionings are supported at the moment.")
593
+ elif isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
594
+ self.controlnet.nets
595
+ ):
596
+ raise ValueError(
597
+ "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
598
+ " the same length as the number of controlnets"
599
+ )
600
+ else:
601
+ assert False
602
+
603
+ if not isinstance(control_guidance_start, (tuple, list)):
604
+ control_guidance_start = [control_guidance_start]
605
+
606
+ if not isinstance(control_guidance_end, (tuple, list)):
607
+ control_guidance_end = [control_guidance_end]
608
+
609
+ if len(control_guidance_start) != len(control_guidance_end):
610
+ raise ValueError(
611
+ f"`control_guidance_start` has {len(control_guidance_start)} elements, but `control_guidance_end` has {len(control_guidance_end)} elements. Make sure to provide the same number of elements to each list."
612
+ )
613
+
614
+ if isinstance(self.controlnet, MultiControlNetModel):
615
+ if len(control_guidance_start) != len(self.controlnet.nets):
616
+ raise ValueError(
617
+ f"`control_guidance_start`: {control_guidance_start} has {len(control_guidance_start)} elements but there are {len(self.controlnet.nets)} controlnets available. Make sure to provide {len(self.controlnet.nets)}."
618
+ )
619
+
620
+ for start, end in zip(control_guidance_start, control_guidance_end):
621
+ if start >= end:
622
+ raise ValueError(
623
+ f"control guidance start: {start} cannot be larger or equal to control guidance end: {end}."
624
+ )
625
+ if start < 0.0:
626
+ raise ValueError(f"control guidance start: {start} can't be smaller than 0.")
627
+ if end > 1.0:
628
+ raise ValueError(f"control guidance end: {end} can't be larger than 1.0.")
629
+
630
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.check_image
631
+ def check_image(self, image, prompt, prompt_embeds):
632
+ image_is_pil = isinstance(image, Image.Image)
633
+ image_is_tensor = isinstance(image, torch.Tensor)
634
+ image_is_np = isinstance(image, np.ndarray)
635
+ image_is_pil_list = isinstance(image, list) and isinstance(image[0], Image.Image)
636
+ image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
637
+ image_is_np_list = isinstance(image, list) and isinstance(image[0], np.ndarray)
638
+
639
+ if (
640
+ not image_is_pil
641
+ and not image_is_tensor
642
+ and not image_is_np
643
+ and not image_is_pil_list
644
+ and not image_is_tensor_list
645
+ and not image_is_np_list
646
+ ):
647
+ raise TypeError(
648
+ f"image must be passed and be one of PIL image, numpy array, torch tensor, list of PIL images, list of numpy arrays or list of torch tensors, but is {type(image)}"
649
+ )
650
+
651
+ if image_is_pil:
652
+ image_batch_size = 1
653
+ else:
654
+ image_batch_size = len(image)
655
+
656
+ if prompt is not None and isinstance(prompt, str):
657
+ prompt_batch_size = 1
658
+ elif prompt is not None and isinstance(prompt, list):
659
+ prompt_batch_size = len(prompt)
660
+ elif prompt_embeds is not None:
661
+ prompt_batch_size = prompt_embeds.shape[0]
662
+
663
+ if image_batch_size != 1 and image_batch_size != prompt_batch_size:
664
+ raise ValueError(
665
+ f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
666
+ )
667
+
668
+ # Copied from diffusers.pipelines.text_to_video_synthesis.pipeline_text_to_video_synth.TextToVideoSDPipeline.prepare_latents
669
+ def prepare_latents(
670
+ self, batch_size, num_channels_latents, num_frames, height, width, dtype, device, generator, latents=None
671
+ ):
672
+ shape = (
673
+ batch_size,
674
+ num_channels_latents,
675
+ num_frames,
676
+ height // self.vae_scale_factor,
677
+ width // self.vae_scale_factor,
678
+ )
679
+ if isinstance(generator, list) and len(generator) != batch_size:
680
+ raise ValueError(
681
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
682
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
683
+ )
684
+
685
+ if latents is None:
686
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
687
+ else:
688
+ latents = latents.to(device)
689
+
690
+ # scale the initial noise by the standard deviation required by the scheduler
691
+ latents = latents * self.scheduler.init_noise_sigma
692
+ return latents
693
+
694
+ # Copied from diffusers.pipelines.controlnet.pipeline_controlnet.StableDiffusionControlNetPipeline.prepare_image
695
+ def prepare_image(
696
+ self,
697
+ image,
698
+ width,
699
+ height,
700
+ batch_size,
701
+ num_images_per_prompt,
702
+ device,
703
+ dtype,
704
+ do_classifier_free_guidance=False,
705
+ guess_mode=False,
706
+ ):
707
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
708
+ image_batch_size = image.shape[0]
709
+
710
+ if image_batch_size == 1:
711
+ repeat_by = batch_size
712
+ else:
713
+ # image batch size is the same as prompt batch size
714
+ repeat_by = num_images_per_prompt
715
+
716
+ image = image.repeat_interleave(repeat_by, dim=0)
717
+
718
+ image = image.to(device=device, dtype=dtype)
719
+
720
+ if do_classifier_free_guidance and not guess_mode:
721
+ image = torch.cat([image] * 2)
722
+
723
+ return image
724
+
725
+ @property
726
+ def guidance_scale(self):
727
+ return self._guidance_scale
728
+
729
+ @property
730
+ def clip_skip(self):
731
+ return self._clip_skip
732
+
733
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
734
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
735
+ # corresponds to doing no classifier free guidance.
736
+ @property
737
+ def do_classifier_free_guidance(self):
738
+ return self._guidance_scale > 1
739
+
740
+ @property
741
+ def cross_attention_kwargs(self):
742
+ return self._cross_attention_kwargs
743
+
744
+ @property
745
+ def num_timesteps(self):
746
+ return self._num_timesteps
747
+
748
+ @torch.no_grad()
749
+ def __call__(
750
+ self,
751
+ prompt: Union[str, List[str]] = None,
752
+ num_frames: Optional[int] = 16,
753
+ height: Optional[int] = None,
754
+ width: Optional[int] = None,
755
+ num_inference_steps: int = 50,
756
+ guidance_scale: float = 7.5,
757
+ negative_prompt: Optional[Union[str, List[str]]] = None,
758
+ num_videos_per_prompt: Optional[int] = 1,
759
+ eta: float = 0.0,
760
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
761
+ latents: Optional[torch.FloatTensor] = None,
762
+ prompt_embeds: Optional[torch.FloatTensor] = None,
763
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
764
+ ip_adapter_image: Optional[PipelineImageInput] = None,
765
+ ip_adapter_image_embeds: Optional[PipelineImageInput] = None,
766
+ conditioning_frames: Optional[List[PipelineImageInput]] = None,
767
+ output_type: Optional[str] = "pil",
768
+ return_dict: bool = True,
769
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
770
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
771
+ guess_mode: bool = False,
772
+ control_guidance_start: Union[float, List[float]] = 0.0,
773
+ control_guidance_end: Union[float, List[float]] = 1.0,
774
+ clip_skip: Optional[int] = None,
775
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
776
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
777
+ **kwargs,
778
+ ):
779
+ r"""
780
+ The call function to the pipeline for generation.
781
+
782
+ Args:
783
+ prompt (`str` or `List[str]`, *optional*):
784
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
785
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
786
+ The height in pixels of the generated video.
787
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
788
+ The width in pixels of the generated video.
789
+ num_frames (`int`, *optional*, defaults to 16):
790
+ The number of video frames that are generated. Defaults to 16 frames which at 8 frames per seconds
791
+ amounts to 2 seconds of video.
792
+ num_inference_steps (`int`, *optional*, defaults to 50):
793
+ The number of denoising steps. More denoising steps usually lead to a higher quality videos at the
794
+ expense of slower inference.
795
+ guidance_scale (`float`, *optional*, defaults to 7.5):
796
+ A higher guidance scale value encourages the model to generate images closely linked to the text
797
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
798
+ negative_prompt (`str` or `List[str]`, *optional*):
799
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
800
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
801
+ eta (`float`, *optional*, defaults to 0.0):
802
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
803
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
804
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
805
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
806
+ generation deterministic.
807
+ latents (`torch.FloatTensor`, *optional*):
808
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for video
809
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
810
+ tensor is generated by sampling using the supplied random `generator`. Latents should be of shape
811
+ `(batch_size, num_channel, num_frames, height, width)`.
812
+ prompt_embeds (`torch.FloatTensor`, *optional*):
813
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
814
+ provided, text embeddings are generated from the `prompt` input argument.
815
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
816
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
817
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
818
+ ip_adapter_image (`PipelineImageInput`, *optional*):
819
+ Optional image input to work with IP Adapters.
820
+ ip_adapter_image_embeds (`List[torch.FloatTensor]`, *optional*):
821
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of IP-adapters.
822
+ Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should contain the negative image embedding
823
+ if `do_classifier_free_guidance` is set to `True`.
824
+ If not provided, embeddings are computed from the `ip_adapter_image` input argument.
825
+ conditioning_frames (`List[PipelineImageInput]`, *optional*):
826
+ The ControlNet input condition to provide guidance to the `unet` for generation. If multiple ControlNets
827
+ are specified, images must be passed as a list such that each element of the list can be correctly
828
+ batched for input to a single ControlNet.
829
+ output_type (`str`, *optional*, defaults to `"pil"`):
830
+ The output format of the generated video. Choose between `torch.FloatTensor`, `PIL.Image` or
831
+ `np.array`.
832
+ return_dict (`bool`, *optional*, defaults to `True`):
833
+ Whether or not to return a [`~pipelines.text_to_video_synthesis.TextToVideoSDPipelineOutput`] instead
834
+ of a plain tuple.
835
+ cross_attention_kwargs (`dict`, *optional*):
836
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
837
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
838
+ controlnet_conditioning_scale (`float` or `List[float]`, *optional*, defaults to 1.0):
839
+ The outputs of the ControlNet are multiplied by `controlnet_conditioning_scale` before they are added
840
+ to the residual in the original `unet`. If multiple ControlNets are specified in `init`, you can set
841
+ the corresponding scale as a list.
842
+ guess_mode (`bool`, *optional*, defaults to `False`):
843
+ The ControlNet encoder tries to recognize the content of the input image even if you remove all
844
+ prompts. A `guidance_scale` value between 3.0 and 5.0 is recommended.
845
+ control_guidance_start (`float` or `List[float]`, *optional*, defaults to 0.0):
846
+ The percentage of total steps at which the ControlNet starts applying.
847
+ control_guidance_end (`float` or `List[float]`, *optional*, defaults to 1.0):
848
+ The percentage of total steps at which the ControlNet stops applying.
849
+ clip_skip (`int`, *optional*):
850
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
851
+ the output of the pre-final layer will be used for computing the prompt embeddings.
852
+ allback_on_step_end (`Callable`, *optional*):
853
+ A function that calls at the end of each denoising steps during the inference. The function is called
854
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
855
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
856
+ `callback_on_step_end_tensor_inputs`.
857
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
858
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
859
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
860
+ `._callback_tensor_inputs` attribute of your pipeine class.
861
+
862
+ Examples:
863
+
864
+ Returns:
865
+ [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] or `tuple`:
866
+ If `return_dict` is `True`, [`~pipelines.animatediff.pipeline_output.AnimateDiffPipelineOutput`] is
867
+ returned, otherwise a `tuple` is returned where the first element is a list with the generated frames.
868
+ """
869
+
870
+ callback = kwargs.pop("callback", None)
871
+ callback_steps = kwargs.pop("callback_steps", None)
872
+
873
+ if callback is not None:
874
+ deprecate(
875
+ "callback",
876
+ "1.0.0",
877
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
878
+ )
879
+ if callback_steps is not None:
880
+ deprecate(
881
+ "callback_steps",
882
+ "1.0.0",
883
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
884
+ )
885
+
886
+ controlnet = self.controlnet._orig_mod if is_compiled_module(self.controlnet) else self.controlnet
887
+
888
+ # align format for control guidance
889
+ if not isinstance(control_guidance_start, list) and isinstance(control_guidance_end, list):
890
+ control_guidance_start = len(control_guidance_end) * [control_guidance_start]
891
+ elif not isinstance(control_guidance_end, list) and isinstance(control_guidance_start, list):
892
+ control_guidance_end = len(control_guidance_start) * [control_guidance_end]
893
+ elif not isinstance(control_guidance_start, list) and not isinstance(control_guidance_end, list):
894
+ mult = len(controlnet.nets) if isinstance(controlnet, MultiControlNetModel) else 1
895
+ control_guidance_start, control_guidance_end = (
896
+ mult * [control_guidance_start],
897
+ mult * [control_guidance_end],
898
+ )
899
+
900
+ # 0. Default height and width to unet
901
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
902
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
903
+
904
+ num_videos_per_prompt = 1
905
+
906
+ # 1. Check inputs. Raise error if not correct
907
+ self.check_inputs(
908
+ prompt=prompt,
909
+ height=height,
910
+ width=width,
911
+ num_frames=num_frames,
912
+ callback_steps=callback_steps,
913
+ negative_prompt=negative_prompt,
914
+ callback_on_step_end_tensor_inputs=callback_on_step_end_tensor_inputs,
915
+ prompt_embeds=prompt_embeds,
916
+ negative_prompt_embeds=negative_prompt_embeds,
917
+ image=conditioning_frames,
918
+ controlnet_conditioning_scale=controlnet_conditioning_scale,
919
+ control_guidance_start=control_guidance_start,
920
+ control_guidance_end=control_guidance_end,
921
+ )
922
+
923
+ self._guidance_scale = guidance_scale
924
+ self._clip_skip = clip_skip
925
+ self._cross_attention_kwargs = cross_attention_kwargs
926
+
927
+ # 2. Define call parameters
928
+ if prompt is not None and isinstance(prompt, str):
929
+ batch_size = 1
930
+ elif prompt is not None and isinstance(prompt, list):
931
+ batch_size = len(prompt)
932
+ else:
933
+ batch_size = prompt_embeds.shape[0]
934
+
935
+ device = self._execution_device
936
+
937
+ if isinstance(controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
938
+ controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(controlnet.nets)
939
+
940
+ global_pool_conditions = (
941
+ controlnet.config.global_pool_conditions
942
+ if isinstance(controlnet, ControlNetModel)
943
+ else controlnet.nets[0].config.global_pool_conditions
944
+ )
945
+ guess_mode = guess_mode or global_pool_conditions
946
+
947
+ # 3. Encode input prompt
948
+ text_encoder_lora_scale = (
949
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
950
+ )
951
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
952
+ prompt,
953
+ device,
954
+ num_videos_per_prompt,
955
+ self.do_classifier_free_guidance,
956
+ negative_prompt,
957
+ prompt_embeds=prompt_embeds,
958
+ negative_prompt_embeds=negative_prompt_embeds,
959
+ lora_scale=text_encoder_lora_scale,
960
+ clip_skip=self.clip_skip,
961
+ )
962
+ # For classifier free guidance, we need to do two forward passes.
963
+ # Here we concatenate the unconditional and text embeddings into a single batch
964
+ # to avoid doing two forward passes
965
+ if self.do_classifier_free_guidance:
966
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
967
+
968
+ if ip_adapter_image is not None:
969
+ image_embeds = self.prepare_ip_adapter_image_embeds(
970
+ ip_adapter_image, ip_adapter_image_embeds, device, batch_size * num_videos_per_prompt
971
+ )
972
+
973
+ if isinstance(controlnet, ControlNetModel):
974
+ conditioning_frames = self.prepare_image(
975
+ image=conditioning_frames,
976
+ width=width,
977
+ height=height,
978
+ batch_size=batch_size * num_videos_per_prompt * num_frames,
979
+ num_images_per_prompt=num_videos_per_prompt,
980
+ device=device,
981
+ dtype=controlnet.dtype,
982
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
983
+ guess_mode=guess_mode,
984
+ )
985
+ elif isinstance(controlnet, MultiControlNetModel):
986
+ cond_prepared_frames = []
987
+ for frame_ in conditioning_frames:
988
+ prepared_frame = self.prepare_image(
989
+ image=frame_,
990
+ width=width,
991
+ height=height,
992
+ batch_size=batch_size * num_videos_per_prompt * num_frames,
993
+ num_images_per_prompt=num_videos_per_prompt,
994
+ device=device,
995
+ dtype=controlnet.dtype,
996
+ do_classifier_free_guidance=self.do_classifier_free_guidance,
997
+ guess_mode=guess_mode,
998
+ )
999
+ cond_prepared_frames.append(prepared_frame)
1000
+ conditioning_frames = cond_prepared_frames
1001
+ else:
1002
+ assert False
1003
+
1004
+ # 4. Prepare timesteps
1005
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1006
+ timesteps = self.scheduler.timesteps
1007
+ self._num_timesteps = len(timesteps)
1008
+
1009
+ # 5. Prepare latent variables
1010
+ num_channels_latents = self.unet.config.in_channels
1011
+ latents = self.prepare_latents(
1012
+ batch_size * num_videos_per_prompt,
1013
+ num_channels_latents,
1014
+ num_frames,
1015
+ height,
1016
+ width,
1017
+ prompt_embeds.dtype,
1018
+ device,
1019
+ generator,
1020
+ latents,
1021
+ )
1022
+
1023
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1024
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1025
+
1026
+ # 7. Add image embeds for IP-Adapter
1027
+ added_cond_kwargs = (
1028
+ {"image_embeds": image_embeds}
1029
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None
1030
+ else None
1031
+ )
1032
+
1033
+ # 7.1 Create tensor stating which controlnets to keep
1034
+ controlnet_keep = []
1035
+ for i in range(len(timesteps)):
1036
+ keeps = [
1037
+ 1.0 - float(i / len(timesteps) < s or (i + 1) / len(timesteps) > e)
1038
+ for s, e in zip(control_guidance_start, control_guidance_end)
1039
+ ]
1040
+ controlnet_keep.append(keeps[0] if isinstance(controlnet, ControlNetModel) else keeps)
1041
+
1042
+ # 8. Denoising loop
1043
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1044
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1045
+ for i, t in enumerate(timesteps):
1046
+ # expand the latents if we are doing classifier free guidance
1047
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
1048
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1049
+
1050
+ if guess_mode and self.do_classifier_free_guidance:
1051
+ # Infer ControlNet only for the conditional batch.
1052
+ control_model_input = latents
1053
+ control_model_input = self.scheduler.scale_model_input(control_model_input, t)
1054
+ controlnet_prompt_embeds = prompt_embeds.chunk(2)[1]
1055
+ else:
1056
+ control_model_input = latent_model_input
1057
+ controlnet_prompt_embeds = prompt_embeds
1058
+ controlnet_prompt_embeds = controlnet_prompt_embeds.repeat_interleave(num_frames, dim=0)
1059
+
1060
+ if isinstance(controlnet_keep[i], list):
1061
+ cond_scale = [c * s for c, s in zip(controlnet_conditioning_scale, controlnet_keep[i])]
1062
+ else:
1063
+ controlnet_cond_scale = controlnet_conditioning_scale
1064
+ if isinstance(controlnet_cond_scale, list):
1065
+ controlnet_cond_scale = controlnet_cond_scale[0]
1066
+ cond_scale = controlnet_cond_scale * controlnet_keep[i]
1067
+
1068
+ control_model_input = torch.transpose(control_model_input, 1, 2)
1069
+ control_model_input = control_model_input.reshape(
1070
+ (-1, control_model_input.shape[2], control_model_input.shape[3], control_model_input.shape[4])
1071
+ )
1072
+
1073
+ down_block_res_samples, mid_block_res_sample = self.controlnet(
1074
+ control_model_input,
1075
+ t,
1076
+ encoder_hidden_states=controlnet_prompt_embeds,
1077
+ controlnet_cond=conditioning_frames,
1078
+ conditioning_scale=cond_scale,
1079
+ guess_mode=guess_mode,
1080
+ return_dict=False,
1081
+ )
1082
+
1083
+ # predict the noise residual
1084
+ noise_pred = self.unet(
1085
+ latent_model_input,
1086
+ t,
1087
+ encoder_hidden_states=prompt_embeds,
1088
+ cross_attention_kwargs=self.cross_attention_kwargs,
1089
+ added_cond_kwargs=added_cond_kwargs,
1090
+ down_block_additional_residuals=down_block_res_samples,
1091
+ mid_block_additional_residual=mid_block_res_sample,
1092
+ ).sample
1093
+
1094
+ # perform guidance
1095
+ if self.do_classifier_free_guidance:
1096
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1097
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1098
+
1099
+ # compute the previous noisy sample x_t -> x_t-1
1100
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
1101
+
1102
+ if callback_on_step_end is not None:
1103
+ callback_kwargs = {}
1104
+ for k in callback_on_step_end_tensor_inputs:
1105
+ callback_kwargs[k] = locals()[k]
1106
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1107
+
1108
+ latents = callback_outputs.pop("latents", latents)
1109
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1110
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1111
+
1112
+ # call the callback, if provided
1113
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1114
+ progress_bar.update()
1115
+ if callback is not None and i % callback_steps == 0:
1116
+ callback(i, t, latents)
1117
+
1118
+ # 9. Post processing
1119
+ if output_type == "latent":
1120
+ video = latents
1121
+ else:
1122
+ video_tensor = self.decode_latents(latents)
1123
+ video = tensor2vid(video_tensor, self.image_processor, output_type=output_type)
1124
+
1125
+ # 10. Offload all models
1126
+ self.maybe_free_model_hooks()
1127
+
1128
+ if not return_dict:
1129
+ return (video,)
1130
+
1131
+ return AnimateDiffPipelineOutput(frames=video)