lzq49 commited on
Commit
0c0f76e
1 Parent(s): 16586b3

Upload 2 files

Browse files
Files changed (2) hide show
  1. attention_processor.py +118 -0
  2. pipeline_mvdream.py +1048 -0
attention_processor.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Callable, Optional
2
+
3
+ import torch
4
+ from einops import rearrange
5
+
6
+ from diffusers.models.attention_processor import Attention
7
+ from diffusers.utils.import_utils import is_xformers_available
8
+
9
+ if is_xformers_available:
10
+ import xformers
11
+ import xformers.ops
12
+ else:
13
+ xformers = None
14
+
15
+ class CrossViewAttnProcessor:
16
+ def __init__(self, num_views: int = 1):
17
+ self.num_views = num_views
18
+
19
+ def __call__(
20
+ self,
21
+ attn: Attention,
22
+ hidden_states,
23
+ encoder_hidden_states=None,
24
+ attention_mask=None,
25
+ ):
26
+ batch_size, sequence_length, _ = (
27
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
28
+ )
29
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
30
+ query = attn.to_q(hidden_states)
31
+
32
+ is_cross_attention = encoder_hidden_states is not None
33
+ if encoder_hidden_states is None:
34
+ encoder_hidden_states = hidden_states
35
+ elif attn.cross_attention_norm:
36
+ encoder_hidden_states = attn.norm_cross(encoder_hidden_states)
37
+
38
+ key = attn.to_k(encoder_hidden_states)
39
+ value = attn.to_v(encoder_hidden_states)
40
+
41
+ if not is_cross_attention and self.num_views > 1:
42
+ query = rearrange(query, "(b n) l d -> b (n l) d", n=self.num_views)
43
+ key = rearrange(key, "(b n) l d -> b (n l) d", n=self.num_views)
44
+ value = rearrange(value, "(b n) l d -> b (n l) d", n=self.num_views)
45
+
46
+ query = attn.head_to_batch_dim(query)
47
+ key = attn.head_to_batch_dim(key)
48
+ value = attn.head_to_batch_dim(value)
49
+
50
+ attention_probs = attn.get_attention_scores(query, key, attention_mask)
51
+ hidden_states = torch.bmm(attention_probs, value)
52
+ hidden_states = attn.batch_to_head_dim(hidden_states)
53
+
54
+ # linear proj
55
+ hidden_states = attn.to_out[0](hidden_states)
56
+ # dropout
57
+ hidden_states = attn.to_out[1](hidden_states)
58
+
59
+ if not is_cross_attention and self.num_views > 1:
60
+ hidden_states = rearrange(hidden_states, "b (n l) d -> (b n) l d", n=self.num_views)
61
+
62
+ return hidden_states
63
+
64
+ class XFormersCrossViewAttnProcessor:
65
+ def __init__(
66
+ self,
67
+ num_views: int = 1,
68
+ attention_op: Optional[Callable] = None,
69
+ ):
70
+ self.num_views = num_views
71
+ self.attention_op = attention_op
72
+
73
+ def __call__(
74
+ self,
75
+ attn: Attention,
76
+ hidden_states,
77
+ encoder_hidden_states=None,
78
+ attention_mask=None,
79
+ ):
80
+ batch_size, sequence_length, _ = (
81
+ hidden_states.shape if encoder_hidden_states is None else encoder_hidden_states.shape
82
+ )
83
+ attention_mask = attn.prepare_attention_mask(attention_mask, sequence_length, batch_size)
84
+ query = attn.to_q(hidden_states)
85
+
86
+ is_cross_attention = encoder_hidden_states is not None
87
+ if encoder_hidden_states is None:
88
+ encoder_hidden_states = hidden_states
89
+ elif attn.cross_attention_norm:
90
+ encoder_hidden_states = attn.norm_cross(encoder_hidden_states)
91
+
92
+ key = attn.to_k(encoder_hidden_states)
93
+ value = attn.to_v(encoder_hidden_states)
94
+
95
+ if not is_cross_attention and self.num_views > 1:
96
+ query = rearrange(query, "(b n) l d -> b (n l) d", n=self.num_views)
97
+ key = rearrange(key, "(b n) l d -> b (n l) d", n=self.num_views)
98
+ value = rearrange(value, "(b n) l d -> b (n l) d", n=self.num_views)
99
+
100
+ query = attn.head_to_batch_dim(query)
101
+ key = attn.head_to_batch_dim(key)
102
+ value = attn.head_to_batch_dim(value)
103
+
104
+ hidden_states = xformers.ops.memory_efficient_attention(
105
+ query, key, value, attn_bias=attention_mask, op=self.attention_op, scale=attn.scale
106
+ )
107
+ hidden_states = hidden_states.to(query.dtype)
108
+ hidden_states = attn.batch_to_head_dim(hidden_states)
109
+
110
+ # linear proj
111
+ hidden_states = attn.to_out[0](hidden_states)
112
+ # dropout
113
+ hidden_states = attn.to_out[1](hidden_states)
114
+
115
+ if not is_cross_attention and self.num_views > 1:
116
+ hidden_states = rearrange(hidden_states, "b (n l) d -> (b n) l d", n=self.num_views)
117
+
118
+ return hidden_states
pipeline_mvdream.py ADDED
@@ -0,0 +1,1048 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pipeline of MVDream sampling
2
+ # Modified from ..stable_diffusion.pipeline_stable_diffusion.py but with safety_checker deprecated
3
+
4
+ import inspect
5
+ from typing import Any, Callable, Dict, List, Optional, Union
6
+ from dataclasses import dataclass
7
+
8
+ import PIL
9
+ import numpy as np
10
+ import torch
11
+ from packaging import version
12
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection
13
+
14
+ from diffusers.configuration_utils import FrozenDict
15
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
16
+ from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
17
+ from diffusers.models import AutoencoderKL, UNet2DConditionModel
18
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
19
+ from diffusers.models.attention_processor import (
20
+ AttnProcessor,
21
+ XFormersAttnProcessor
22
+ )
23
+ from diffusers.schedulers import KarrasDiffusionSchedulers
24
+ from diffusers.utils import (
25
+ BaseOutput,
26
+ USE_PEFT_BACKEND,
27
+ deprecate,
28
+ logging,
29
+ replace_example_docstring,
30
+ scale_lora_layers,
31
+ unscale_lora_layers,
32
+ is_xformers_available
33
+ )
34
+ from diffusers.utils.torch_utils import randn_tensor
35
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
36
+
37
+ from unet import UNet2DConditionModel
38
+ from camera_proj import CameraMatrixEmbedding
39
+ from attention_processor import (
40
+ CrossViewAttnProcessor,
41
+ XFormersCrossViewAttnProcessor,
42
+ )
43
+
44
+
45
+
46
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
47
+
48
+
49
+ @dataclass
50
+ class MVDreamPipelineOutput(BaseOutput):
51
+ """
52
+ Output class for Stable Diffusion pipelines.
53
+
54
+ Args:
55
+ images (`List[PIL.Image.Image]` or `np.ndarray`)
56
+ List of denoised PIL images of length `batch_size` or numpy array of shape `(batch_size, height, width,
57
+ num_channels)`. PIL images or numpy array present the denoised images of the diffusion pipeline.
58
+ """
59
+
60
+ images: Union[List[PIL.Image.Image], np.ndarray, torch.Tensor]
61
+
62
+
63
+
64
+ def set_self_attn_processor(model, processor):
65
+ r"""
66
+ Parameters:
67
+ `processor (`dict` of `AttentionProcessor` or `AttentionProcessor`):
68
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
69
+ of **all** `Attention` layers.
70
+ In case `processor` is a dict, the key needs to define the path to the corresponding cross attention processor. This is strongly recommended when setting trainable attention processors.:
71
+
72
+ """
73
+
74
+ def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor):
75
+ if hasattr(module, "set_processor") and 'attn1' in name:
76
+ if not isinstance(processor, dict):
77
+ module.set_processor(processor)
78
+ else:
79
+ module.set_processor(processor.pop(f"{name}.processor"))
80
+
81
+ for sub_name, child in module.named_children():
82
+ fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor)
83
+
84
+ for name, module in model.named_children():
85
+ fn_recursive_attn_processor(name, module, processor)
86
+
87
+
88
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
89
+ """
90
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
91
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
92
+ """
93
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
94
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
95
+ # rescale the results from guidance (fixes overexposure)
96
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
97
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
98
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
99
+ return noise_cfg
100
+
101
+
102
+ def retrieve_timesteps(
103
+ scheduler,
104
+ num_inference_steps: Optional[int] = None,
105
+ device: Optional[Union[str, torch.device]] = None,
106
+ timesteps: Optional[List[int]] = None,
107
+ **kwargs,
108
+ ):
109
+ """
110
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
111
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
112
+
113
+ Args:
114
+ scheduler (`SchedulerMixin`):
115
+ The scheduler to get timesteps from.
116
+ num_inference_steps (`int`):
117
+ The number of diffusion steps used when generating samples with a pre-trained model. If used,
118
+ `timesteps` must be `None`.
119
+ device (`str` or `torch.device`, *optional*):
120
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
121
+ timesteps (`List[int]`, *optional*):
122
+ Custom timesteps used to support arbitrary spacing between timesteps. If `None`, then the default
123
+ timestep spacing strategy of the scheduler is used. If `timesteps` is passed, `num_inference_steps`
124
+ must be `None`.
125
+
126
+ Returns:
127
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
128
+ second element is the number of inference steps.
129
+ """
130
+ if timesteps is not None:
131
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
132
+ if not accepts_timesteps:
133
+ raise ValueError(
134
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
135
+ f" timestep schedules. Please check whether you are using the correct scheduler."
136
+ )
137
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
138
+ timesteps = scheduler.timesteps
139
+ num_inference_steps = len(timesteps)
140
+ else:
141
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
142
+ timesteps = scheduler.timesteps
143
+ return timesteps, num_inference_steps
144
+
145
+
146
+ class MVDreamPipeline(
147
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, IPAdapterMixin, FromSingleFileMixin
148
+ ):
149
+ r"""
150
+ Pipeline for text-to-image generation using Stable Diffusion.
151
+
152
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
153
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
154
+
155
+ The pipeline also inherits the following loading methods:
156
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
157
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
158
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
159
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
160
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
161
+
162
+ Args:
163
+ vae ([`AutoencoderKL`]):
164
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
165
+ text_encoder ([`~transformers.CLIPTextModel`]):
166
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
167
+ tokenizer ([`~transformers.CLIPTokenizer`]):
168
+ A `CLIPTokenizer` to tokenize text.
169
+ unet ([`UNet2DConditionModel`]):
170
+ A `UNet2DConditionModel` to denoise the encoded image latents.
171
+ camera_proj ([`CameraMatrixEmbedding`]):
172
+ A `CameraMatrixEmbedding` to project the camera extrinsic matrices to embeddings.
173
+ scheduler ([`SchedulerMixin`]):
174
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
175
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
176
+ safety_checker ([`StableDiffusionSafetyChecker`]):
177
+ Classification module that estimates whether generated images could be considered offensive or harmful.
178
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
179
+ about a model's potential harms.
180
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
181
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
182
+ """
183
+
184
+ model_cpu_offload_seq = "text_encoder->unet->vae"
185
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
186
+ _exclude_from_cpu_offload = ["safety_checker"]
187
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
188
+
189
+ def __init__(
190
+ self,
191
+ vae: AutoencoderKL,
192
+ text_encoder: CLIPTextModel,
193
+ tokenizer: CLIPTokenizer,
194
+ unet: UNet2DConditionModel,
195
+ camera_proj: CameraMatrixEmbedding,
196
+ scheduler: KarrasDiffusionSchedulers,
197
+ feature_extractor: CLIPImageProcessor,
198
+ safety_checker = None,
199
+ image_encoder: CLIPVisionModelWithProjection = None,
200
+ requires_safety_checker: bool = False,
201
+ ):
202
+ super().__init__()
203
+
204
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
205
+ deprecation_message = (
206
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
207
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
208
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
209
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
210
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
211
+ " file"
212
+ )
213
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
214
+ new_config = dict(scheduler.config)
215
+ new_config["steps_offset"] = 1
216
+ scheduler._internal_dict = FrozenDict(new_config)
217
+
218
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
219
+ deprecation_message = (
220
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
221
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
222
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
223
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
224
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
225
+ )
226
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
227
+ new_config = dict(scheduler.config)
228
+ new_config["clip_sample"] = False
229
+ scheduler._internal_dict = FrozenDict(new_config)
230
+
231
+ if safety_checker is None and requires_safety_checker:
232
+ logger.warning(
233
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
234
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
235
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
236
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
237
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
238
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
239
+ )
240
+
241
+ if safety_checker is not None and feature_extractor is None:
242
+ raise ValueError(
243
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
244
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
245
+ )
246
+
247
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
248
+ version.parse(unet.config._diffusers_version).base_version
249
+ ) < version.parse("0.9.0.dev0")
250
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
251
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
252
+ deprecation_message = (
253
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
254
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
255
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
256
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
257
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
258
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
259
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
260
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
261
+ " the `unet/config.json` file"
262
+ )
263
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
264
+ new_config = dict(unet.config)
265
+ new_config["sample_size"] = 64
266
+ unet._internal_dict = FrozenDict(new_config)
267
+
268
+
269
+ self.register_modules(
270
+ vae=vae,
271
+ text_encoder=text_encoder,
272
+ tokenizer=tokenizer,
273
+ unet=unet,
274
+ camera_proj=camera_proj,
275
+ scheduler=scheduler,
276
+ safety_checker=safety_checker,
277
+ feature_extractor=feature_extractor,
278
+ image_encoder=image_encoder,
279
+ )
280
+
281
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
282
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
283
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
284
+
285
+ def enable_vae_slicing(self):
286
+ r"""
287
+ Enable sliced VAE decoding. When this option is enabled, the VAE will split the input tensor in slices to
288
+ compute decoding in several steps. This is useful to save some memory and allow larger batch sizes.
289
+ """
290
+ self.vae.enable_slicing()
291
+
292
+ def disable_vae_slicing(self):
293
+ r"""
294
+ Disable sliced VAE decoding. If `enable_vae_slicing` was previously enabled, this method will go back to
295
+ computing decoding in one step.
296
+ """
297
+ self.vae.disable_slicing()
298
+
299
+ def enable_vae_tiling(self):
300
+ r"""
301
+ Enable tiled VAE decoding. When this option is enabled, the VAE will split the input tensor into tiles to
302
+ compute decoding and encoding in several steps. This is useful for saving a large amount of memory and to allow
303
+ processing larger images.
304
+ """
305
+ self.vae.enable_tiling()
306
+
307
+ def disable_vae_tiling(self):
308
+ r"""
309
+ Disable tiled VAE decoding. If `enable_vae_tiling` was previously enabled, this method will go back to
310
+ computing decoding in one step.
311
+ """
312
+ self.vae.disable_tiling()
313
+
314
+ def _set_unet_self_attn_cross_view_processor(self, num_views=4):
315
+ attn_procs_cls = XFormersCrossViewAttnProcessor if is_xformers_available() else CrossViewAttnProcessor
316
+ set_self_attn_processor(
317
+ self.unet, attn_procs_cls(num_views=num_views)
318
+ )
319
+
320
+ def _set_unet_self_attn_vanilla_processor(self):
321
+ attn_procs_cls = XFormersAttnProcessor if is_xformers_available() else AttnProcessor
322
+ set_self_attn_processor(self.unet, attn_procs_cls())
323
+
324
+ def _encode_prompt(
325
+ self,
326
+ prompt,
327
+ device,
328
+ num_images_per_prompt,
329
+ do_classifier_free_guidance,
330
+ negative_prompt=None,
331
+ prompt_embeds: Optional[torch.FloatTensor] = None,
332
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
333
+ lora_scale: Optional[float] = None,
334
+ **kwargs,
335
+ ):
336
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
337
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
338
+
339
+ prompt_embeds_tuple = self.encode_prompt(
340
+ prompt=prompt,
341
+ device=device,
342
+ num_images_per_prompt=num_images_per_prompt,
343
+ do_classifier_free_guidance=do_classifier_free_guidance,
344
+ negative_prompt=negative_prompt,
345
+ prompt_embeds=prompt_embeds,
346
+ negative_prompt_embeds=negative_prompt_embeds,
347
+ lora_scale=lora_scale,
348
+ **kwargs,
349
+ )
350
+
351
+ # concatenate for backwards comp
352
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
353
+
354
+ return prompt_embeds
355
+
356
+ def encode_prompt(
357
+ self,
358
+ prompt,
359
+ device,
360
+ num_images_per_prompt,
361
+ do_classifier_free_guidance,
362
+ negative_prompt=None,
363
+ prompt_embeds: Optional[torch.FloatTensor] = None,
364
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
365
+ lora_scale: Optional[float] = None,
366
+ clip_skip: Optional[int] = None,
367
+ ):
368
+ r"""
369
+ Encodes the prompt into text encoder hidden states.
370
+
371
+ Args:
372
+ prompt (`str` or `List[str]`, *optional*):
373
+ prompt to be encoded
374
+ device: (`torch.device`):
375
+ torch device
376
+ num_images_per_prompt (`int`):
377
+ number of images that should be generated per prompt
378
+ do_classifier_free_guidance (`bool`):
379
+ whether to use classifier free guidance or not
380
+ negative_prompt (`str` or `List[str]`, *optional*):
381
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
382
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
383
+ less than `1`).
384
+ prompt_embeds (`torch.FloatTensor`, *optional*):
385
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
386
+ provided, text embeddings will be generated from `prompt` input argument.
387
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
388
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
389
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
390
+ argument.
391
+ lora_scale (`float`, *optional*):
392
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
393
+ clip_skip (`int`, *optional*):
394
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
395
+ the output of the pre-final layer will be used for computing the prompt embeddings.
396
+ """
397
+ # set lora scale so that monkey patched LoRA
398
+ # function of text encoder can correctly access it
399
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
400
+ self._lora_scale = lora_scale
401
+
402
+ # dynamically adjust the LoRA scale
403
+ if not USE_PEFT_BACKEND:
404
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
405
+ else:
406
+ scale_lora_layers(self.text_encoder, lora_scale)
407
+
408
+ if prompt is not None and isinstance(prompt, str):
409
+ batch_size = 1
410
+ elif prompt is not None and isinstance(prompt, list):
411
+ batch_size = len(prompt)
412
+ else:
413
+ batch_size = prompt_embeds.shape[0]
414
+
415
+ if prompt_embeds is None:
416
+ # textual inversion: procecss multi-vector tokens if necessary
417
+ if isinstance(self, TextualInversionLoaderMixin):
418
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
419
+
420
+ text_inputs = self.tokenizer(
421
+ prompt,
422
+ padding="max_length",
423
+ max_length=self.tokenizer.model_max_length,
424
+ truncation=True,
425
+ return_tensors="pt",
426
+ )
427
+ text_input_ids = text_inputs.input_ids
428
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
429
+
430
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
431
+ text_input_ids, untruncated_ids
432
+ ):
433
+ removed_text = self.tokenizer.batch_decode(
434
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
435
+ )
436
+ logger.warning(
437
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
438
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
439
+ )
440
+
441
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
442
+ attention_mask = text_inputs.attention_mask.to(device)
443
+ else:
444
+ attention_mask = None
445
+
446
+ if clip_skip is None:
447
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
448
+ prompt_embeds = prompt_embeds[0]
449
+ else:
450
+ prompt_embeds = self.text_encoder(
451
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
452
+ )
453
+ # Access the `hidden_states` first, that contains a tuple of
454
+ # all the hidden states from the encoder layers. Then index into
455
+ # the tuple to access the hidden states from the desired layer.
456
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
457
+ # We also need to apply the final LayerNorm here to not mess with the
458
+ # representations. The `last_hidden_states` that we typically use for
459
+ # obtaining the final prompt representations passes through the LayerNorm
460
+ # layer.
461
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
462
+
463
+ if self.text_encoder is not None:
464
+ prompt_embeds_dtype = self.text_encoder.dtype
465
+ elif self.unet is not None:
466
+ prompt_embeds_dtype = self.unet.dtype
467
+ else:
468
+ prompt_embeds_dtype = prompt_embeds.dtype
469
+
470
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
471
+
472
+ bs_embed, seq_len, _ = prompt_embeds.shape
473
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
474
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
475
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
476
+
477
+ # get unconditional embeddings for classifier free guidance
478
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
479
+ uncond_tokens: List[str]
480
+ if negative_prompt is None:
481
+ uncond_tokens = [""] * batch_size
482
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
483
+ raise TypeError(
484
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
485
+ f" {type(prompt)}."
486
+ )
487
+ elif isinstance(negative_prompt, str):
488
+ uncond_tokens = [negative_prompt]
489
+ elif batch_size != len(negative_prompt):
490
+ raise ValueError(
491
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
492
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
493
+ " the batch size of `prompt`."
494
+ )
495
+ else:
496
+ uncond_tokens = negative_prompt
497
+
498
+ # textual inversion: procecss multi-vector tokens if necessary
499
+ if isinstance(self, TextualInversionLoaderMixin):
500
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
501
+
502
+ max_length = prompt_embeds.shape[1]
503
+ uncond_input = self.tokenizer(
504
+ uncond_tokens,
505
+ padding="max_length",
506
+ max_length=max_length,
507
+ truncation=True,
508
+ return_tensors="pt",
509
+ )
510
+
511
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
512
+ attention_mask = uncond_input.attention_mask.to(device)
513
+ else:
514
+ attention_mask = None
515
+
516
+ negative_prompt_embeds = self.text_encoder(
517
+ uncond_input.input_ids.to(device),
518
+ attention_mask=attention_mask,
519
+ )
520
+ negative_prompt_embeds = negative_prompt_embeds[0]
521
+
522
+ if do_classifier_free_guidance:
523
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
524
+ seq_len = negative_prompt_embeds.shape[1]
525
+
526
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
527
+
528
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
529
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
530
+
531
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
532
+ # Retrieve the original scale by scaling back the LoRA layers
533
+ unscale_lora_layers(self.text_encoder, lora_scale)
534
+
535
+ return prompt_embeds, negative_prompt_embeds
536
+
537
+ def encode_image(self, image, device, num_images_per_prompt):
538
+ dtype = next(self.image_encoder.parameters()).dtype
539
+
540
+ if not isinstance(image, torch.Tensor):
541
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
542
+
543
+ image = image.to(device=device, dtype=dtype)
544
+ image_embeds = self.image_encoder(image).image_embeds
545
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
546
+
547
+ uncond_image_embeds = torch.zeros_like(image_embeds)
548
+ return image_embeds, uncond_image_embeds
549
+
550
+ def run_safety_checker(self, image, device, dtype):
551
+ if self.safety_checker is None:
552
+ has_nsfw_concept = None
553
+ else:
554
+ if torch.is_tensor(image):
555
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
556
+ else:
557
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
558
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
559
+ image, has_nsfw_concept = self.safety_checker(
560
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
561
+ )
562
+ return image, has_nsfw_concept
563
+
564
+ def decode_latents(self, latents):
565
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
566
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
567
+
568
+ latents = 1 / self.vae.config.scaling_factor * latents
569
+ image = self.vae.decode(latents, return_dict=False)[0]
570
+ image = (image / 2 + 0.5).clamp(0, 1)
571
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
572
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
573
+ return image
574
+
575
+ def prepare_extra_step_kwargs(self, generator, eta):
576
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
577
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
578
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
579
+ # and should be between [0, 1]
580
+
581
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
582
+ extra_step_kwargs = {}
583
+ if accepts_eta:
584
+ extra_step_kwargs["eta"] = eta
585
+
586
+ # check if the scheduler accepts generator
587
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
588
+ if accepts_generator:
589
+ extra_step_kwargs["generator"] = generator
590
+ return extra_step_kwargs
591
+
592
+ def check_inputs(
593
+ self,
594
+ prompt,
595
+ c2ws,
596
+ num_views,
597
+ height,
598
+ width,
599
+ callback_steps,
600
+ negative_prompt=None,
601
+ prompt_embeds=None,
602
+ negative_prompt_embeds=None,
603
+ callback_on_step_end_tensor_inputs=None,
604
+ ):
605
+ if height % 8 != 0 or width % 8 != 0:
606
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
607
+
608
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
609
+ raise ValueError(
610
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
611
+ f" {type(callback_steps)}."
612
+ )
613
+ if callback_on_step_end_tensor_inputs is not None and not all(
614
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
615
+ ):
616
+ raise ValueError(
617
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
618
+ )
619
+
620
+ if prompt is not None and prompt_embeds is not None:
621
+ raise ValueError(
622
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
623
+ " only forward one of the two."
624
+ )
625
+ elif prompt is None and prompt_embeds is None:
626
+ raise ValueError(
627
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
628
+ )
629
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
630
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
631
+
632
+ if negative_prompt is not None and negative_prompt_embeds is not None:
633
+ raise ValueError(
634
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
635
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
636
+ )
637
+
638
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
639
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
640
+ raise ValueError(
641
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
642
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
643
+ f" {negative_prompt_embeds.shape}."
644
+ )
645
+
646
+ if c2ws is not None:
647
+ assert isinstance(c2ws, torch.Tensor)
648
+ if c2ws.ndim == 3:
649
+ c2ws = c2ws.unsqueeze(0)
650
+ assert c2ws.shape[1] == num_views
651
+ # if c2ws.shape[0] % num_views != 0:
652
+ # raise ValueError(
653
+ # f"when `c2ws` is with ndim as 3, the first dim must can be exactly divided by `num_views` which is {num_views}, "
654
+ # f"but the first dim of `c2ws` is {c2ws.shape[0]}."
655
+ # )
656
+
657
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
658
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
659
+ if isinstance(generator, list) and len(generator) != batch_size:
660
+ raise ValueError(
661
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
662
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
663
+ )
664
+
665
+ if latents is None:
666
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
667
+ else:
668
+ latents = latents.to(device)
669
+
670
+ # scale the initial noise by the standard deviation required by the scheduler
671
+ latents = latents * self.scheduler.init_noise_sigma
672
+ return latents
673
+
674
+ def enable_freeu(self, s1: float, s2: float, b1: float, b2: float):
675
+ r"""Enables the FreeU mechanism as in https://arxiv.org/abs/2309.11497.
676
+
677
+ The suffixes after the scaling factors represent the stages where they are being applied.
678
+
679
+ Please refer to the [official repository](https://github.com/ChenyangSi/FreeU) for combinations of the values
680
+ that are known to work well for different pipelines such as Stable Diffusion v1, v2, and Stable Diffusion XL.
681
+
682
+ Args:
683
+ s1 (`float`):
684
+ Scaling factor for stage 1 to attenuate the contributions of the skip features. This is done to
685
+ mitigate "oversmoothing effect" in the enhanced denoising process.
686
+ s2 (`float`):
687
+ Scaling factor for stage 2 to attenuate the contributions of the skip features. This is done to
688
+ mitigate "oversmoothing effect" in the enhanced denoising process.
689
+ b1 (`float`): Scaling factor for stage 1 to amplify the contributions of backbone features.
690
+ b2 (`float`): Scaling factor for stage 2 to amplify the contributions of backbone features.
691
+ """
692
+ if not hasattr(self, "unet"):
693
+ raise ValueError("The pipeline must have `unet` for using FreeU.")
694
+ self.unet.enable_freeu(s1=s1, s2=s2, b1=b1, b2=b2)
695
+
696
+ def disable_freeu(self):
697
+ """Disables the FreeU mechanism if enabled."""
698
+ self.unet.disable_freeu()
699
+
700
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
701
+ def get_guidance_scale_embedding(self, w, embedding_dim=512, dtype=torch.float32):
702
+ """
703
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
704
+
705
+ Args:
706
+ timesteps (`torch.Tensor`):
707
+ generate embedding vectors at these timesteps
708
+ embedding_dim (`int`, *optional*, defaults to 512):
709
+ dimension of the embeddings to generate
710
+ dtype:
711
+ data type of the generated embeddings
712
+
713
+ Returns:
714
+ `torch.FloatTensor`: Embedding vectors with shape `(len(timesteps), embedding_dim)`
715
+ """
716
+ assert len(w.shape) == 1
717
+ w = w * 1000.0
718
+
719
+ half_dim = embedding_dim // 2
720
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
721
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
722
+ emb = w.to(dtype)[:, None] * emb[None, :]
723
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
724
+ if embedding_dim % 2 == 1: # zero pad
725
+ emb = torch.nn.functional.pad(emb, (0, 1))
726
+ assert emb.shape == (w.shape[0], embedding_dim)
727
+ return emb
728
+
729
+ @property
730
+ def guidance_scale(self):
731
+ return self._guidance_scale
732
+
733
+ @property
734
+ def guidance_rescale(self):
735
+ return self._guidance_rescale
736
+
737
+ @property
738
+ def clip_skip(self):
739
+ return self._clip_skip
740
+
741
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
742
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
743
+ # corresponds to doing no classifier free guidance.
744
+ @property
745
+ def do_classifier_free_guidance(self):
746
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
747
+
748
+ @property
749
+ def cross_attention_kwargs(self):
750
+ return self._cross_attention_kwargs
751
+
752
+ @property
753
+ def num_timesteps(self):
754
+ return self._num_timesteps
755
+
756
+ @torch.no_grad()
757
+ # @replace_example_docstring(EXAMPLE_DOC_STRING)
758
+ def __call__(
759
+ self,
760
+ prompt: Union[str, List[str]] = None,
761
+ c2ws: Optional[torch.FloatTensor] = None,
762
+ num_views: int = 4,
763
+ height: Optional[int] = None,
764
+ width: Optional[int] = None,
765
+ num_inference_steps: int = 50,
766
+ timesteps: List[int] = None,
767
+ guidance_scale: float = 10,
768
+ negative_prompt: Optional[Union[str, List[str]]] = None,
769
+ num_images_per_prompt: Optional[int] = 1,
770
+ eta: float = 0.0,
771
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
772
+ latents: Optional[torch.FloatTensor] = None,
773
+ prompt_embeds: Optional[torch.FloatTensor] = None,
774
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
775
+ # ip_adapter_image: Optional[PipelineImageInput] = None,
776
+ output_type: Optional[str] = "pil",
777
+ return_dict: bool = True,
778
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
779
+ guidance_rescale: float = 0.0,
780
+ clip_skip: Optional[int] = None,
781
+ callback_on_step_end: Optional[Callable[[int, int, Dict], None]] = None,
782
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
783
+ **kwargs,
784
+ ):
785
+ r"""
786
+ The call function to the pipeline for generation.
787
+
788
+ Args:
789
+ prompt (`str` or `List[str]`, *optional*):
790
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
791
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
792
+ The height in pixels of the generated image.
793
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
794
+ The width in pixels of the generated image.
795
+ num_inference_steps (`int`, *optional*, defaults to 50):
796
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
797
+ expense of slower inference.
798
+ timesteps (`List[int]`, *optional*):
799
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
800
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
801
+ passed will be used. Must be in descending order.
802
+ guidance_scale (`float`, *optional*, defaults to 7.5):
803
+ A higher guidance scale value encourages the model to generate images closely linked to the text
804
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
805
+ negative_prompt (`str` or `List[str]`, *optional*):
806
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
807
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
808
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
809
+ The number of images to generate per prompt.
810
+ eta (`float`, *optional*, defaults to 0.0):
811
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
812
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
813
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
814
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
815
+ generation deterministic.
816
+ latents (`torch.FloatTensor`, *optional*):
817
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
818
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
819
+ tensor is generated by sampling using the supplied random `generator`.
820
+ prompt_embeds (`torch.FloatTensor`, *optional*):
821
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
822
+ provided, text embeddings are generated from the `prompt` input argument.
823
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
824
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
825
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
826
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
827
+ output_type (`str`, *optional*, defaults to `"pil"`):
828
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
829
+ return_dict (`bool`, *optional*, defaults to `True`):
830
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
831
+ plain tuple.
832
+ cross_attention_kwargs (`dict`, *optional*):
833
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
834
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
835
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
836
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
837
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
838
+ using zero terminal SNR.
839
+ clip_skip (`int`, *optional*):
840
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
841
+ the output of the pre-final layer will be used for computing the prompt embeddings.
842
+ callback_on_step_end (`Callable`, *optional*):
843
+ A function that calls at the end of each denoising steps during the inference. The function is called
844
+ with the following arguments: `callback_on_step_end(self: DiffusionPipeline, step: int, timestep: int,
845
+ callback_kwargs: Dict)`. `callback_kwargs` will include a list of all tensors as specified by
846
+ `callback_on_step_end_tensor_inputs`.
847
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
848
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
849
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
850
+ `._callback_tensor_inputs` attribute of your pipeline class.
851
+
852
+ Examples:
853
+
854
+ Returns:
855
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
856
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
857
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
858
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
859
+ "not-safe-for-work" (nsfw) content.
860
+ """
861
+
862
+ callback = kwargs.pop("callback", None)
863
+ callback_steps = kwargs.pop("callback_steps", None)
864
+
865
+ if callback is not None:
866
+ deprecate(
867
+ "callback",
868
+ "1.0.0",
869
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
870
+ )
871
+ if callback_steps is not None:
872
+ deprecate(
873
+ "callback_steps",
874
+ "1.0.0",
875
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
876
+ )
877
+
878
+ # 0. Default height and width to unet
879
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
880
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
881
+ # to deal with lora scaling and other possible forward hooks
882
+
883
+ # 1. Check inputs. Raise error if not correct
884
+ self.check_inputs(
885
+ prompt,
886
+ c2ws,
887
+ num_views,
888
+ height,
889
+ width,
890
+ callback_steps,
891
+ negative_prompt,
892
+ prompt_embeds,
893
+ negative_prompt_embeds,
894
+ callback_on_step_end_tensor_inputs,
895
+ )
896
+
897
+ self._guidance_scale = guidance_scale
898
+ self._guidance_rescale = guidance_rescale
899
+ self._clip_skip = clip_skip
900
+ self._cross_attention_kwargs = cross_attention_kwargs
901
+ self._num_views = num_views
902
+
903
+ # 2. Define call parameters
904
+ if prompt is not None and isinstance(prompt, str):
905
+ batch_size = 1
906
+ elif prompt is not None and isinstance(prompt, list):
907
+ batch_size = len(prompt)
908
+ else:
909
+ batch_size = prompt_embeds.shape[0]
910
+
911
+ device = self._execution_device
912
+
913
+ # 3. Encode input prompt
914
+ lora_scale = (
915
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
916
+ )
917
+
918
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
919
+ prompt,
920
+ device,
921
+ num_images_per_prompt * num_views,
922
+ self.do_classifier_free_guidance,
923
+ negative_prompt,
924
+ prompt_embeds=prompt_embeds,
925
+ negative_prompt_embeds=negative_prompt_embeds,
926
+ lora_scale=lora_scale,
927
+ clip_skip=self.clip_skip,
928
+ )
929
+
930
+ # For classifier free guidance, we need to do two forward passes.
931
+ # Here we concatenate the unconditional and text embeddings into a single batch
932
+ # to avoid doing two forward passes
933
+ if self.do_classifier_free_guidance:
934
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
935
+
936
+ # 4. Prepare timesteps
937
+ timesteps, num_inference_steps = retrieve_timesteps(self.scheduler, num_inference_steps, device, timesteps)
938
+
939
+ # 5. Prepare latent variables
940
+ num_channels_latents = self.unet.config.in_channels
941
+ latents = self.prepare_latents(
942
+ batch_size * num_images_per_prompt * num_views,
943
+ num_channels_latents,
944
+ height,
945
+ width,
946
+ prompt_embeds.dtype,
947
+ device,
948
+ generator,
949
+ latents,
950
+ )
951
+
952
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
953
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
954
+
955
+ # 6.1 Prepare camera matrix embeddings
956
+ if c2ws is not None:
957
+ if c2ws.ndim == 3:
958
+ c2ws = c2ws.unsqueeze(0)
959
+ if c2ws.shape[0] != batch_size and c2ws.shape[0] != 1:
960
+ raise ValueError("Size mismatch between `c2ws` and batch size.")
961
+ elif c2ws.shape[0] == 1:
962
+ c2ws = torch.cat([c2ws] * batch_size, dim=0)
963
+ c2ws = c2ws.repeat_interleave(num_images_per_prompt).reshape(-1, 4, 4).flatten(1, 2)
964
+ c2ws = c2ws.to(device, dtype=self.camera_proj.dtype)
965
+ camera_matrix_embeds = self.camera_proj(c2ws)
966
+ if self.do_classifier_free_guidance:
967
+ camera_matrix_embeds = torch.cat([camera_matrix_embeds] * 2)
968
+ # UNet use cross-view attention
969
+ self._set_unet_self_attn_cross_view_processor(num_views)
970
+
971
+ # 6.2 Optionally get Guidance Scale Embedding
972
+ timestep_cond = None
973
+ if self.unet.config.time_cond_proj_dim is not None:
974
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
975
+ timestep_cond = self.get_guidance_scale_embedding(
976
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
977
+ ).to(device=device, dtype=latents.dtype)
978
+
979
+ # 7. Denoising loop
980
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
981
+ self._num_timesteps = len(timesteps)
982
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
983
+ for i, t in enumerate(timesteps):
984
+ # expand the latents if we are doing classifier free guidance
985
+ latent_model_input = torch.cat([latents] * 2) if self.do_classifier_free_guidance else latents
986
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
987
+
988
+ # predict the noise residual
989
+ noise_pred = self.unet(
990
+ latent_model_input,
991
+ t,
992
+ encoder_hidden_states=prompt_embeds,
993
+ camera_matrix_embeds=camera_matrix_embeds,
994
+ timestep_cond=timestep_cond,
995
+ cross_attention_kwargs=self.cross_attention_kwargs,
996
+ return_dict=False,
997
+ )[0]
998
+
999
+ # perform guidance
1000
+ if self.do_classifier_free_guidance:
1001
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1002
+ noise_pred = noise_pred_uncond + self.guidance_scale * (noise_pred_text - noise_pred_uncond)
1003
+
1004
+ if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1005
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1006
+ noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1007
+
1008
+ # compute the previous noisy sample x_t -> x_t-1
1009
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1010
+
1011
+ if callback_on_step_end is not None:
1012
+ callback_kwargs = {}
1013
+ for k in callback_on_step_end_tensor_inputs:
1014
+ callback_kwargs[k] = locals()[k]
1015
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1016
+
1017
+ latents = callback_outputs.pop("latents", latents)
1018
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1019
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1020
+
1021
+ # call the callback, if provided
1022
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1023
+ progress_bar.update()
1024
+ if callback is not None and i % callback_steps == 0:
1025
+ step_idx = i // getattr(self.scheduler, "order", 1)
1026
+ callback(step_idx, t, latents)
1027
+
1028
+ if not output_type == "latent":
1029
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1030
+ 0
1031
+ ]
1032
+ # image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1033
+ else:
1034
+ image = latents
1035
+ # has_nsfw_concept = None
1036
+
1037
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=[True]*image.shape[0])
1038
+
1039
+ if output_type != "pil":
1040
+ image = image.reshape(-1, num_views, *image.shape[-3:])
1041
+
1042
+ # Offload all models
1043
+ self.maybe_free_model_hooks()
1044
+
1045
+ if not return_dict:
1046
+ return image
1047
+
1048
+ return MVDreamPipelineOutput(images=image)