CiaraRowles commited on
Commit
934bde2
1 Parent(s): 8984489

Upload 4 files

Browse files
controlnet/attention_autoencoder.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import math
2
+ from dataclasses import dataclass
3
+ from typing import Any, Dict, List, Optional, Tuple, Union
4
+ import datetime
5
+ import torch
6
+ import torch.utils.checkpoint
7
+ from torch import nn
8
+ from torch.nn import functional as F
9
+ from torch.nn.modules.normalization import GroupNorm
10
+ import base64
11
+ import numpy as np
12
+
13
+ class PositionalEncoding(nn.Module):
14
+ def __init__(self, d_model, max_len=5000):
15
+ super(PositionalEncoding, self).__init__()
16
+ pe = torch.zeros(max_len, d_model)
17
+ position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
18
+ div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
19
+ pe[:, 0::2] = torch.sin(position * div_term)
20
+ pe[:, 1::2] = torch.cos(position * div_term)
21
+ pe = pe.unsqueeze(0)
22
+ self.register_buffer('pe', pe)
23
+
24
+ def forward(self, x):
25
+ return x + self.pe[:, :x.size(1)]
26
+
27
+
28
+ class AttentionAutoencoder(nn.Module):
29
+ def __init__(self, input_dim=768,output_dim=1280, d_model=512, latent_dim=20, seq_len=196, num_heads=4, num_layers=3, out_intermediate=512):
30
+ super().__init__()
31
+ self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
32
+
33
+ self.input_dim = input_dim # Adjusted to 768
34
+ self.d_model = d_model
35
+ self.latent_dim = latent_dim
36
+ self.seq_len = seq_len # Adjusted to 196
37
+ self.out_intermediate = out_intermediate
38
+ self.output_dim = output_dim
39
+
40
+ # Positional Encoding
41
+ self.pos_encoder = PositionalEncoding(d_model)
42
+
43
+ # Input Projection (adjusted to project from input_dim=768 to d_model=512)
44
+ self.input_proj = nn.Linear(input_dim, d_model)
45
+
46
+ # Latent Initialization
47
+ self.latent_init = nn.Parameter(torch.randn(1, d_model))
48
+
49
+ # Cross-Attention Encoder
50
+ self.num_layers = num_layers
51
+ self.attention_layers = nn.ModuleList([
52
+ nn.MultiheadAttention(embed_dim=d_model, num_heads=num_heads, batch_first=True)
53
+ for _ in range(num_layers)
54
+ ])
55
+
56
+ # Latent Space Refinement
57
+ self.latent_proj = nn.Linear(d_model, latent_dim)
58
+ self.latent_norm = nn.LayerNorm(latent_dim)
59
+ self.latent_to_d_model = nn.Linear(latent_dim, d_model)
60
+
61
+ # Mapping latent to intermediate feature map
62
+ self.transformer_decoder = nn.TransformerDecoder(
63
+ nn.TransformerDecoderLayer(d_model=d_model, nhead=num_heads, batch_first=True),
64
+ num_layers=2
65
+ )
66
+
67
+ # Output projection
68
+ self.output_proj = nn.Linear(d_model, output_dim)
69
+ self.tgt_init = nn.Parameter(torch.randn(1, d_model))
70
+
71
+
72
+
73
+ def encode(self, src):
74
+ # src shape: [batch_size, seq_len (196), input_dim (768)]
75
+ batch_size, seq_len, input_dim = src.shape
76
+
77
+ # Project input_dim (768) to d_model (512)
78
+ src = self.input_proj(src) # Shape: [batch_size, seq_len (196), d_model (512)]
79
+ src = self.pos_encoder(src) # Add positional encoding
80
+
81
+ # Latent initialization
82
+ latent = self.latent_init.repeat(batch_size, 1).unsqueeze(1) # Shape: [batch_size, 1, d_model]
83
+
84
+ # Cross-attend latent with input sequence
85
+ for i in range(self.num_layers):
86
+ latent, _ = self.attention_layers[i](latent, src, src)
87
+
88
+ # Project to latent dimension and normalize
89
+ latent = self.latent_proj(latent.squeeze(1)) # Shape: [batch_size, latent_dim]
90
+ latent = self.latent_norm(latent)
91
+
92
+ return latent
93
+
94
+ def decode(self, latent, seq_w, seq_h):
95
+ batch_size = latent.size(0)
96
+
97
+ target_seq_len = seq_w * seq_h
98
+
99
+ # Project latent_dim back to d_model
100
+ memory = self.latent_to_d_model(latent).unsqueeze(1) # Shape: [batch_size, 1, d_model]
101
+
102
+ # Target initialization
103
+ # Repeat the learned target initialization to match the target sequence length
104
+ tgt = self.tgt_init.repeat(batch_size, target_seq_len, 1) # Shape: [batch_size, target_seq_len, d_model]
105
+
106
+ # Apply positional encoding
107
+ tgt = self.pos_encoder(tgt)
108
+
109
+ # Apply transformer decoder
110
+ output = self.transformer_decoder(tgt, memory) # Shape: [batch_size, target_seq_len, d_model]
111
+
112
+ # Project to output_dim
113
+ output = self.output_proj(output) # Shape: [batch_size, target_seq_len, output_dim]
114
+
115
+ # Reshape output to (batch_size, seq_w, seq_h, output_dim)
116
+ output = output.view(batch_size, seq_w, seq_h, self.output_dim)
117
+
118
+ # Permute dimensions to (batch_size, output_dim, seq_w, seq_h)
119
+ output = output.permute(0, 3, 1, 2) # Shape: [batch_size, output_dim, seq_w, seq_h]
120
+
121
+ return output
122
+
123
+ def forward(self, src, seq_w, seq_h):
124
+ latent = self.encode(src)
125
+ output = self.decode(latent, seq_w, seq_h)
126
+ return output
127
+
128
+ def encode_to_base64(self, latent_vector, bits_per_element):
129
+ max_int = 2 ** bits_per_element - 1
130
+ q_latent = ((latent_vector + 1) * (max_int / 2)).clip(0, max_int).astype(np.uint8)
131
+ byte_array = q_latent.tobytes()
132
+ encoded_string = base64.b64encode(byte_array).decode('utf-8')
133
+ # Remove padding characters
134
+ return encoded_string.rstrip('=')
135
+
136
+ def decode_from_base64(self, encoded_string, bits_per_element, latentdim):
137
+
138
+ # Add back padding if it's missing
139
+ missing_padding = len(encoded_string) % 4
140
+ if missing_padding:
141
+ encoded_string += '=' * (4 - missing_padding)
142
+ byte_array = base64.b64decode(encoded_string)
143
+ q_latent = np.frombuffer(byte_array, dtype=np.uint8)[:latentdim]
144
+ max_int = 2 ** bits_per_element - 1
145
+ latent_vector = q_latent.astype(np.float32) * 2 / max_int - 1
146
+ return latent_vector
147
+
148
+ def forward_encoding(self, src, seq_w, seq_h):
149
+ """
150
+ Encodes the input `src` into a latent representation, encodes it to a Base64 string,
151
+ decodes it back to the latent space, and then decodes it to the output.
152
+
153
+ Args:
154
+ src: The input data to encode.
155
+
156
+ Returns:
157
+ output: The decoded output from the latent representation.
158
+ """
159
+ # Step 1: Encode the input to latent space
160
+ latent = self.encode(src) # latent is of shape (batch_size, self.latentdim)
161
+ batch_size, latentdim = latent.shape
162
+
163
+ # Ensure bits_per_element is appropriate
164
+ bits_per_element = int(120 / latentdim) # Example: latentdim = 20, bits_per_element = 6
165
+ if bits_per_element > 8:
166
+ raise ValueError("bits_per_element cannot exceed 8 when using uint8 for encoding.")
167
+
168
+ encoded_strings = []
169
+
170
+ # Step 2: Encode each latent vector to a Base64 string
171
+ for i in range(batch_size):
172
+ latent_vector = latent[i].cpu().numpy()
173
+ encoded_string = self.encode_to_base64(latent_vector, bits_per_element)
174
+ encoded_strings.append(encoded_string)
175
+
176
+ decoded_latents = []
177
+
178
+ # Step 3: Decode each Base64 string back to the latent vector
179
+ for i, encoded_string in enumerate(encoded_strings):
180
+ print(encoded_string)
181
+ decoded_latent = self.decode_from_base64(encoded_string, bits_per_element, latentdim)
182
+ decoded_latents.append(decoded_latent)
183
+
184
+ # Step 4: Convert the list of decoded latents back to a tensor
185
+ decoded_latents = torch.tensor(decoded_latents, dtype=latent.dtype, device=latent.device)
186
+
187
+ # Step 5: Decode the latent tensor into the output
188
+ output = self.decode(decoded_latents,seq_w, seq_h)
189
+
190
+ return output, encoded_strings
191
+
192
+ def forward_from_stylecode (self, stylecode, seq_w, seq_h,dtyle,device):
193
+
194
+ latentdim = 20
195
+ bits_per_element = 6
196
+ decoded_latents = []
197
+
198
+
199
+ #for i, encoded_string in enumerate(stylecode):
200
+ decoded_latent = self.decode_from_base64(stylecode, bits_per_element, latentdim)
201
+ decoded_latents.append(decoded_latent)
202
+
203
+ # Step 4: Convert the list of decoded latents back to a tensor
204
+ decoded_latents = torch.tensor(decoded_latents, dtype=dtyle, device=device)
205
+
206
+ output = self.decode(decoded_latents, seq_w, seq_h)
207
+ return output
208
+
209
+ @torch.no_grad()
210
+ def make_stylecode (self,src):
211
+ src = src.to("cuda")
212
+ self = self.to("cuda")
213
+ print(src.device,self.device,self.input_proj.weight.device)
214
+ latent = self.encode(src) # latent is of shape (batch_size, self.latentdim)
215
+ batch_size, latentdim = latent.shape
216
+
217
+ # Ensure bits_per_element is appropriate
218
+ bits_per_element = int(120 / latentdim) # Example: latentdim = 20, bits_per_element = 6
219
+ if bits_per_element > 8:
220
+ raise ValueError("bits_per_element cannot exceed 8 when using uint8 for encoding.")
221
+
222
+ encoded_strings = []
223
+
224
+ # Step 2: Encode each latent vector to a Base64 string
225
+ for i in range(batch_size):
226
+ latent_vector = latent[i].cpu().numpy()
227
+ encoded_string = self.encode_to_base64(latent_vector, bits_per_element)
228
+ encoded_strings.append(encoded_string)
229
+ return encoded_strings
controlnet/callable_functions.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import os
3
+ import torch
4
+ from PIL import Image
5
+ from diffusers import DDIMScheduler
6
+ from controlnet.pipline_controlnet_xs_v2 import StableDiffusionPipelineXSv2
7
+ from controlnet.controlnetxs_appearance import StyleCodesModel
8
+ from diffusers.models import UNet2DConditionModel
9
+ from transformers import AutoProcessor, SiglipVisionModel
10
+
11
+
12
+
13
+ def process_single_image(image_path, image=None):
14
+
15
+ # Set up model components
16
+ unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
17
+ stylecodes_model = StyleCodesModel.from_unet(unet, size_ratio=1.0).to(dtype=torch.float16, device="cuda")
18
+ stylecodes_model.requires_grad_(False)
19
+ stylecodes_model= stylecodes_model.to("cuda")
20
+
21
+
22
+ stylecodes_model.load_model("models/controlnet_model_11_80000.bin")
23
+ # Load and preprocess image
24
+ if image is None:
25
+ image = Image.open(image_path).convert("RGB")
26
+ image = image.resize((512, 512))
27
+
28
+ # Set up generator with a fixed seed for reproducibility
29
+ seed = 238
30
+ clip_image_processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
31
+ image_encoder = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224").to(dtype=torch.float16,device=stylecodes_model.device)
32
+ clip_image = clip_image_processor(images=image, return_tensors="pt").pixel_values
33
+ clip_image = clip_image.to(stylecodes_model.device, dtype=torch.float16)
34
+ clip_image = {"pixel_values": clip_image}
35
+ clip_image_embeds = image_encoder(**clip_image, output_hidden_states=True).hidden_states[-2]
36
+
37
+ # Run the image through the pipeline with the specified prompt
38
+ code = stylecodes_model.sref_autoencoder.make_stylecode(clip_image_embeds)
39
+ print("stylecode = ",code)
40
+ return code
41
+
42
+
43
+ def process_single_image_both_ways(image_path, prompt, num_inference_steps,image=None):
44
+ # Load and preprocess image
45
+ # Set up model components
46
+ unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
47
+ stylecodes_model = StyleCodesModel.from_unet(unet, size_ratio=1.0).to(dtype=torch.float16, device="cuda")
48
+
49
+ noise_scheduler = DDIMScheduler(
50
+ num_train_timesteps=1000,
51
+ beta_start=0.00085,
52
+ beta_end=0.012,
53
+ beta_schedule="scaled_linear",
54
+ clip_sample=False,
55
+ set_alpha_to_one=False,
56
+ steps_offset=1,
57
+ )
58
+
59
+ stylecodes_model.load_model("models/controlnet_model_11_80000.bin")
60
+
61
+ pipe = StableDiffusionPipelineXSv2.from_pretrained(
62
+ "runwayml/stable-diffusion-v1-5",
63
+ unet=unet,
64
+ stylecodes_model=stylecodes_model,
65
+ torch_dtype=torch.float16,
66
+ device="cuda",
67
+ scheduler=noise_scheduler,
68
+ feature_extractor=None,
69
+ safety_checker=None,
70
+ )
71
+
72
+ pipe.enable_model_cpu_offload()
73
+
74
+ if image is None:
75
+ image = Image.open(image_path).convert("RGB")
76
+ image = image.resize((512, 512))
77
+
78
+ # Set up generator with a fixed seed for reproducibility
79
+ seed = 238
80
+ generator = torch.Generator(device="cuda").manual_seed(seed)
81
+
82
+ # Run the image through the pipeline with the specified prompt
83
+ output_images = pipe(
84
+ prompt=prompt,
85
+ guidance_scale=3,
86
+ image=image,
87
+ num_inference_steps=num_inference_steps,
88
+ generator=generator,
89
+ controlnet_conditioning_scale=0.9,
90
+ width=512,
91
+ height=512,
92
+ stylecode=None,
93
+ ).images
94
+ return output_images
95
+ # Save the output image
96
+
97
+
98
+ def make_stylecode(image_path, image=None):
99
+
100
+ # Set up model components
101
+ unet = UNet2DConditionModel.from_pretrained("runwayml/stable-diffusion-v1-5", subfolder="unet", torch_dtype=torch.float16, device="cuda")
102
+ stylecodes_model = StyleCodesModel.from_unet(unet, size_ratio=1.0).to(dtype=torch.float16, device="cuda")
103
+ stylecodes_model.requires_grad_(False)
104
+ stylecodes_model= stylecodes_model.to("cuda")
105
+
106
+
107
+ stylecodes_model.load_model("models/controlnet_model_11_80000.bin")
108
+ # Load and preprocess image
109
+ if image is None:
110
+ image = Image.open(image_path).convert("RGB")
111
+ image = image.resize((512, 512))
112
+
113
+ # Set up generator with a fixed seed for reproducibility
114
+ seed = 238
115
+ clip_image_processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
116
+ image_encoder = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224").to(dtype=torch.float16,device=stylecodes_model.device)
117
+ clip_image = clip_image_processor(images=image, return_tensors="pt").pixel_values
118
+ clip_image = clip_image.to(stylecodes_model.device, dtype=torch.float16)
119
+ clip_image = {"pixel_values": clip_image}
120
+ clip_image_embeds = image_encoder(**clip_image, output_hidden_states=True).hidden_states[-2]
121
+
122
+ # Run the image through the pipeline with the specified prompt
123
+ code = stylecodes_model.sref_autoencoder.make_stylecode(clip_image_embeds)
124
+ print("stylecode = ",code)
125
+ return code
controlnet/controlnetxs_appearance.py ADDED
@@ -0,0 +1,1603 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import math
15
+ from dataclasses import dataclass
16
+ from typing import Any, Dict, List, Optional, Tuple, Union
17
+ import datetime
18
+ import torch
19
+ import torch.utils.checkpoint
20
+ from torch import nn
21
+ from torch.nn import functional as F
22
+ from torch.nn.modules.normalization import GroupNorm
23
+ import os
24
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
25
+ from diffusers.models.attention_processor import AttentionProcessor
26
+ from diffusers.utils import USE_PEFT_BACKEND
27
+ from diffusers.models.autoencoders import AutoencoderKL
28
+ from diffusers.models.lora import LoRACompatibleConv
29
+ from diffusers.models.modeling_utils import ModelMixin
30
+ from diffusers.models.unets.unet_2d_blocks import (
31
+ CrossAttnDownBlock2D,
32
+ CrossAttnUpBlock2D,
33
+ DownBlock2D,
34
+ Downsample2D,
35
+ ResnetBlock2D,
36
+ Transformer2DModel,
37
+ UpBlock2D,
38
+ Upsample2D,
39
+ )
40
+ from diffusers.models.unets.unet_2d_condition import UNet2DConditionModel
41
+ from diffusers.utils import BaseOutput, logging
42
+ import numpy as np
43
+ from PIL import Image
44
+ from safetensors import safe_open
45
+ from .attention_autoencoder import AttentionAutoencoder, PositionalEncoding
46
+
47
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
48
+
49
+
50
+
51
+
52
+ @dataclass
53
+ class ControlNetXSOutput(BaseOutput):
54
+ """
55
+ The output of [`ControlNetXSModel`].
56
+
57
+ Args:
58
+ sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)`):
59
+ The output of the `ControlNetXSModel`. Unlike `ControlNetOutput` this is NOT to be added to the base model
60
+ output, but is already the final output.
61
+ """
62
+
63
+ sample: torch.FloatTensor = None
64
+
65
+
66
+ # copied from diffusers.models.controlnet.ControlNetConditioningEmbedding
67
+ class ControlNetConditioningEmbedding(nn.Module):
68
+ """
69
+ Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN
70
+ [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized
71
+ training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the
72
+ convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides
73
+ (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full
74
+ model) to encode image-space conditions ... into feature maps ..."
75
+ """
76
+
77
+ def __init__(
78
+ self,
79
+ conditioning_embedding_channels: int,
80
+ conditioning_channels: int = 3,
81
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
82
+ ):
83
+ super().__init__()
84
+
85
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
86
+
87
+ self.blocks = nn.ModuleList([])
88
+
89
+ for i in range(len(block_out_channels) - 1):
90
+ channel_in = block_out_channels[i]
91
+ channel_out = block_out_channels[i + 1]
92
+ self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1))
93
+ self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2))
94
+
95
+ self.conv_out = zero_module(
96
+ nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1)
97
+ )
98
+
99
+ def forward(self, conditioning):
100
+ embedding = self.conv_in(conditioning)
101
+ embedding = F.silu(embedding)
102
+
103
+ for block in self.blocks:
104
+ embedding = block(embedding)
105
+ embedding = F.silu(embedding)
106
+
107
+ embedding = self.conv_out(embedding)
108
+
109
+ return embedding
110
+
111
+
112
+
113
+
114
+ class ControlNetConditioningEmbeddingBig(nn.Module):
115
+ def __init__(
116
+ self,
117
+ conditioning_embedding_channels: int,
118
+ conditioning_channels: int = 4,
119
+ block_out_channels: Tuple[int, ...] = (16, 32, 96, 256),
120
+ text_embed_dim: int = 768,
121
+ ):
122
+ super().__init__()
123
+
124
+ self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1)
125
+ self.cross_attention = CrossAttention(block_out_channels[0], text_embed_dim)
126
+
127
+ # Encoder with increasing feature maps and more downsampling
128
+ self.encoder = nn.ModuleList([
129
+ nn.Conv2d(block_out_channels[0], 64, kernel_size=3, stride=2, padding=1),
130
+ nn.Conv2d(64, 128, kernel_size=3, stride=2, padding=1),
131
+ nn.Conv2d(128, 256, kernel_size=3, stride=2, padding=1),
132
+ nn.Conv2d(256, 320, kernel_size=3, stride=2, padding=1),
133
+ nn.Conv2d(320, 512, kernel_size=3, stride=2, padding=1),
134
+ nn.Conv2d(512, 640, kernel_size=3, stride=2, padding=1),
135
+ ])
136
+
137
+ # Global embedding processing
138
+ self.global_fc = nn.Linear(640, 640)
139
+
140
+ # Bottleneck
141
+ self.bottleneck_down = nn.Conv2d(640, 6, kernel_size=3, stride=1, padding=1)
142
+ self.bottleneck_up = nn.Conv2d(6, 320, kernel_size=3, stride=1, padding=1)
143
+
144
+ # Smaller decoder to get back to 320x64x64
145
+ self.decoder = nn.ModuleList([
146
+ nn.ConvTranspose2d(320, 320, kernel_size=4, stride=2, padding=1), # 4x4 -> 8x8
147
+ nn.ConvTranspose2d(320, 320, kernel_size=4, stride=2, padding=1), # 8x8 -> 16x16
148
+ nn.ConvTranspose2d(320, 320, kernel_size=4, stride=2, padding=1), # 16x16 -> 32x32
149
+ ])
150
+
151
+ def forward(self, x, text_embeds):
152
+ x = self.conv_in(x)
153
+ x = self.cross_attention(x, text_embeds)
154
+
155
+ # Encoder
156
+ for encoder_layer in self.encoder:
157
+ x = encoder_layer(x)
158
+ x = F.relu(x)
159
+
160
+ # Global embedding processing
161
+ b, c, h, w = x.shape
162
+ x_flat = x.view(b, c, -1).mean(dim=2) # Global average pooling
163
+ x_global = self.global_fc(x_flat).view(b, c, 1, 1)
164
+ x = x + x_global.expand_as(x) # Add global features to local features
165
+
166
+ # Bottleneck
167
+ x = self.bottleneck_down(x)
168
+ x = self.bottleneck_up(x)
169
+
170
+ # Decoder
171
+ for decoder_layer in self.decoder:
172
+ x = decoder_layer(x)
173
+ x = F.relu(x)
174
+ #print(x.shape)
175
+ return x
176
+
177
+ class CrossAttention(nn.Module):
178
+ def __init__(self, dim, context_dim):
179
+ super().__init__()
180
+ self.to_q = nn.Conv2d(dim, dim, 1)
181
+ self.to_k = nn.Linear(context_dim, dim)
182
+ self.to_v = nn.Linear(context_dim, dim)
183
+ self.scale = dim ** -0.5
184
+
185
+ def forward(self, x, context):
186
+ b, c, h, w = x.shape
187
+ q = self.to_q(x).view(b, c, -1).permute(0, 2, 1) # (B, H*W, C)
188
+ k = self.to_k(context) # (B, T, C)
189
+ v = self.to_v(context) # (B, T, C)
190
+
191
+ attn = torch.matmul(q, k.transpose(-2, -1)) * self.scale # (B, H*W, T)
192
+ attn = attn.softmax(dim=-1)
193
+ out = torch.matmul(attn, v) # (B, H*W, C)
194
+ out = out.permute(0, 2, 1).view(b, c, h, w) # (B, C, H, W)
195
+ return out + x
196
+
197
+
198
+ def zero_module(module):
199
+ for p in module.parameters():
200
+ nn.init.zeros_(p)
201
+ return module
202
+
203
+
204
+ class StyleCodesModel(ModelMixin, ConfigMixin):
205
+ r"""
206
+ Based off ControlNet-XS
207
+ """
208
+ @classmethod
209
+ def init_original(cls, base_model: UNet2DConditionModel, is_sdxl=True):
210
+ """
211
+ Create a ControlNetXS model with the same parameters as in the original paper (https://github.com/vislearn/ControlNet-XS).
212
+
213
+ Parameters:
214
+ base_model (`UNet2DConditionModel`):
215
+ Base UNet model. Needs to be either StableDiffusion or StableDiffusion-XL.
216
+ is_sdxl (`bool`, defaults to `True`):
217
+ Whether passed `base_model` is a StableDiffusion-XL model.
218
+ """
219
+
220
+ def get_dim_attn_heads(base_model: UNet2DConditionModel, size_ratio: float, num_attn_heads: int):
221
+ """
222
+ Currently, diffusers can only set the dimension of attention heads (see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why).
223
+ The original ControlNet-XS model, however, define the number of attention heads.
224
+ That's why compute the dimensions needed to get the correct number of attention heads.
225
+ """
226
+ block_out_channels = [int(size_ratio * c) for c in base_model.config.block_out_channels]
227
+ dim_attn_heads = [math.ceil(c / num_attn_heads) for c in block_out_channels]
228
+ return dim_attn_heads
229
+
230
+ if is_sdxl:
231
+ return StyleCodesModel.from_unet(
232
+ base_model,
233
+ time_embedding_mix=0.95,
234
+ learn_embedding=True,
235
+ size_ratio=0.1,
236
+ conditioning_embedding_out_channels=(16, 32, 96, 256),
237
+ num_attention_heads=get_dim_attn_heads(base_model, 0.1, 64),
238
+ )
239
+ else:
240
+ return StyleCodesModel.from_unet(
241
+ base_model,
242
+ time_embedding_mix=1.0,
243
+ learn_embedding=True,
244
+ size_ratio=0.0125,
245
+ conditioning_embedding_out_channels=(16, 32, 96, 256),
246
+ num_attention_heads=get_dim_attn_heads(base_model, 0.0125, 8),
247
+ )
248
+
249
+ @classmethod
250
+ def _gather_subblock_sizes(cls, unet: UNet2DConditionModel, base_or_control: str):
251
+ """To create correctly sized connections between base and control model, we need to know
252
+ the input and output channels of each subblock.
253
+
254
+ Parameters:
255
+ unet (`UNet2DConditionModel`):
256
+ Unet of which the subblock channels sizes are to be gathered.
257
+ base_or_control (`str`):
258
+ Needs to be either "base" or "control". If "base", decoder is also considered.
259
+ """
260
+ if base_or_control not in ["base", "control"]:
261
+ raise ValueError("`base_or_control` needs to be either `base` or `control`")
262
+
263
+ channel_sizes = {"down": [], "mid": [], "up": []}
264
+
265
+ # input convolution
266
+ channel_sizes["down"].append((unet.conv_in.in_channels, unet.conv_in.out_channels))
267
+
268
+ # encoder blocks
269
+ for module in unet.down_blocks:
270
+ if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)):
271
+ for r in module.resnets:
272
+ channel_sizes["down"].append((r.in_channels, r.out_channels))
273
+ if module.downsamplers:
274
+ channel_sizes["down"].append(
275
+ (module.downsamplers[0].channels, module.downsamplers[0].out_channels)
276
+ )
277
+ else:
278
+ raise ValueError(f"Encountered unknown module of type {type(module)} while creating ControlNet-XS.")
279
+
280
+ # middle block
281
+ channel_sizes["mid"].append((unet.mid_block.resnets[0].in_channels, unet.mid_block.resnets[0].out_channels))
282
+
283
+ # decoder blocks
284
+ #if base_or_control == "base":
285
+ for module in unet.up_blocks:
286
+ if isinstance(module, (CrossAttnUpBlock2D, UpBlock2D)):
287
+ for r in module.resnets:
288
+ channel_sizes["up"].append((r.in_channels, r.out_channels))
289
+ else:
290
+ raise ValueError(
291
+ f"Encountered unknown module of type {type(module)} while creating ControlNet-XS."
292
+ )
293
+
294
+ return channel_sizes
295
+ def _make_colab_linear_layer(self, in_channels, out_channels):
296
+ # Create a Linear layer where in_features = in_channels + out_channels
297
+ #in_features = in_channels + out_channels
298
+ linear_layer = nn.Linear(in_channels, out_channels)
299
+
300
+ # Initialize weights as identity
301
+ with torch.no_grad():
302
+ linear_layer.weight.copy_(torch.eye(in_channels))
303
+
304
+ return linear_layer
305
+ @register_to_config
306
+ def __init__(
307
+ self,
308
+ conditioning_channels: int = 3,
309
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
310
+ controlnet_conditioning_channel_order: str = "rgb",
311
+ time_embedding_input_dim: int = 320,
312
+ time_embedding_dim: int = 1280,
313
+ time_embedding_mix: float = 1.0,
314
+ learn_embedding: bool = False,
315
+ base_model_channel_sizes: Dict[str, List[Tuple[int]]] = {
316
+ "down": [
317
+ (4, 320),
318
+ (320, 320),
319
+ (320, 320),
320
+ (320, 320),
321
+ (320, 640),
322
+ (640, 640),
323
+ (640, 640),
324
+ (640, 1280),
325
+ (1280, 1280),
326
+ ],
327
+ "mid": [(1280, 1280)],
328
+ "up": [
329
+ (2560, 1280),
330
+ (2560, 1280),
331
+ (1920, 1280),
332
+ (1920, 640),
333
+ (1280, 640),
334
+ (960, 640),
335
+ (960, 320),
336
+ (640, 320),
337
+ (640, 320),
338
+ ],
339
+ },
340
+ sample_size: Optional[int] = None,
341
+ down_block_types: Tuple[str] = (
342
+ "CrossAttnDownBlock2D",
343
+ "CrossAttnDownBlock2D",
344
+ "CrossAttnDownBlock2D",
345
+ "DownBlock2D",
346
+ ),
347
+ up_block_types: Tuple[str] = ("UpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D", "CrossAttnUpBlock2D"),
348
+ block_out_channels: Tuple[int] = (320, 640, 1280, 1280),
349
+ norm_num_groups: Optional[int] = 32,
350
+ cross_attention_dim: Union[int, Tuple[int]] = 1280,
351
+ transformer_layers_per_block: Union[int, Tuple[int], Tuple[Tuple]] = 1,
352
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = 8,
353
+ upcast_attention: bool = False,
354
+ ):
355
+ super().__init__()
356
+
357
+ # 1 - Create control unet
358
+ self.control_model = UNet2DConditionModel(
359
+ sample_size=sample_size,
360
+ down_block_types=down_block_types,
361
+ up_block_types=up_block_types,
362
+ block_out_channels=block_out_channels,
363
+ norm_num_groups=norm_num_groups,
364
+ cross_attention_dim=cross_attention_dim,
365
+ transformer_layers_per_block=transformer_layers_per_block,
366
+ attention_head_dim=num_attention_heads,
367
+ use_linear_projection=True,
368
+ upcast_attention=upcast_attention,
369
+ time_embedding_dim=time_embedding_dim,
370
+ )
371
+
372
+ # 2 - Do model surgery on control model
373
+ # 2.1 - Allow to use the same time information as the base model
374
+ adjust_time_dims(self.control_model, time_embedding_input_dim, time_embedding_dim)
375
+
376
+ # 2.2 - Allow for information infusion from base model
377
+
378
+ # We concat the output of each base encoder subblocks to the input of the next control encoder subblock
379
+ # (We ignore the 1st element, as it represents the `conv_in`.)
380
+ extra_input_channels = [input_channels for input_channels, _ in base_model_channel_sizes["down"][1:]]
381
+ it_extra_input_channels = iter(extra_input_channels)
382
+
383
+ # print(extra_input_channels)
384
+ # for b, block in enumerate(self.control_model.down_blocks):
385
+ # for r in range(len(block.resnets)):
386
+ # increase_block_input_in_encoder_resnet(
387
+ # self.control_model, block_no=b, resnet_idx=r, by=next(it_extra_input_channels)
388
+ # )
389
+ # if block.downsamplers:
390
+ # increase_block_input_in_encoder_downsampler(
391
+ # self.control_model, block_no=b, by=next(it_extra_input_channels)
392
+ # )
393
+
394
+ # increase_block_input_in_mid_resnet(self.control_model, by=extra_input_channels[-1])
395
+
396
+ def get_flat_subblock_channel_sizes_down(model):
397
+ subblock_channel_sizes = []
398
+
399
+ for block in model.down_blocks:
400
+ # Iterate through ResnetBlock2D subblocks
401
+ for resnet in block.resnets:
402
+ # Only handle the first convolution for ResnetBlock2D
403
+ if hasattr(resnet, 'conv1'):
404
+ input_channels = resnet.conv1.in_channels
405
+ output_channels = resnet.conv1.out_channels
406
+ subblock_channel_sizes.append((input_channels, output_channels))
407
+
408
+ # Check and iterate through Upsample2D subblocks only if they exist
409
+ if hasattr(block, 'upsamplers') and block.upsamplers:
410
+ for upsampler in block.upsamplers:
411
+ if hasattr(upsampler, 'conv'):
412
+ input_channels = upsampler.conv.in_channels
413
+ output_channels = upsampler.conv.out_channels
414
+ subblock_channel_sizes.append((input_channels, output_channels))
415
+ print("down" ,subblock_channel_sizes)
416
+ return subblock_channel_sizes
417
+ def get_flat_subblock_channel_sizes(model):
418
+ subblock_channel_sizes = []
419
+
420
+ for block in model.up_blocks:
421
+ # Iterate through ResnetBlock2D subblocks
422
+ for resnet in block.resnets:
423
+ # Only handle the first convolution for ResnetBlock2D
424
+ if hasattr(resnet, 'conv1'):
425
+ input_channels = resnet.conv1.in_channels
426
+ output_channels = resnet.conv1.out_channels
427
+ subblock_channel_sizes.append((input_channels, output_channels))
428
+
429
+ # Check and iterate through Upsample2D subblocks only if they exist
430
+ if hasattr(block, 'upsamplers') and block.upsamplers:
431
+ for upsampler in block.upsamplers:
432
+ if hasattr(upsampler, 'conv'):
433
+ input_channels = upsampler.conv.in_channels
434
+ output_channels = upsampler.conv.out_channels
435
+ # subblock_channel_sizes.append((input_channels, output_channels))
436
+ print("up", subblock_channel_sizes)
437
+ return subblock_channel_sizes
438
+
439
+
440
+ get_flat_subblock_channel_sizes_down(self.control_model)
441
+ # Now use this function to dynamically get the extra input channels
442
+ #extra_input_channels_up = [t[1] for t in get_flat_subblock_channel_sizes(self.control_model)]
443
+ #all_channels_up = get_flat_subblock_channel_sizes(self.control_model)
444
+ #print(extra_input_channels_up)
445
+
446
+ # it_extra_input_channels = iter(extra_input_channels_up)
447
+ # #print(self.control_model.up_blocks)
448
+ # for b, block in enumerate(self.control_model.up_blocks):
449
+
450
+ # for r in range(len(block.resnets)):
451
+ # increase_block_input_in_decoder_resnet(
452
+ # self.control_model, block_no=b, resnet_idx=r, by=next(it_extra_input_channels)
453
+ # )
454
+
455
+ # print(len(block.resnets))
456
+
457
+ # # if block.upsamplers:
458
+ # #increase_block_input_in_decoder_downsampler(
459
+ # # self.control_model, block_no=b, by=next(it_extra_input_channels)
460
+ # #)
461
+
462
+
463
+ # 2.3 - Make group norms work with modified channel sizes
464
+ adjust_group_norms(self.control_model)
465
+
466
+ # 3 - Gather Channel Sizes
467
+ self.ch_inout_ctrl = StyleCodesModel._gather_subblock_sizes(self.control_model, base_or_control="control")
468
+ self.ch_inout_base = base_model_channel_sizes
469
+
470
+ # 4 - Build connections between base and control model
471
+ self.control_model.down_zero_convs_in = nn.ModuleList([])
472
+ self.control_model.middle_block_out = nn.ModuleList([])
473
+ #self.control_model.middle_block_in = nn.ModuleList([])
474
+ self.control_model.up_zero_convs_out = nn.ModuleList([])
475
+ #self.control_model.up_zero_convs_in = nn.ModuleList([])
476
+
477
+ #for ch_io_base in self.ch_inout_base["down"]:
478
+ # for i in range(len(self.ch_inout_base["down"])):
479
+ # if i < len(self.ch_inout_ctrl["down"]) - 1:
480
+ # ch_io_base = self.ch_inout_base["down"][i]
481
+ # self.control_model.down_zero_convs_in.append(self._make_zero_conv(in_channels=ch_io_base[1], out_channels=ch_io_base[1]))
482
+ #self.control_model.down_zero_convs_in.append(self._make_zero_conv(in_channels=ch_io_base[1], out_channels=ch_io_base[1]))
483
+
484
+ linear_shape = self.ch_inout_ctrl["mid"][-1][1] + self.ch_inout_ctrl["mid"][-1][1]
485
+ self.middle_block_out = self._make_colab_linear_layer(in_channels=linear_shape, out_channels=linear_shape)
486
+
487
+
488
+ #self.up_zero_convs_out.append(
489
+ # self._make_zero_conv(self.ch_inout_ctrl["down"][-1][1], self.ch_inout_base["mid"][-1][1])
490
+ #)
491
+ #skip connections i dont care about these
492
+ #for i in range(1, len(self.ch_inout_ctrl["down"])):
493
+ # self.up_zero_convs_out.append(
494
+ # self._make_zero_conv(self.ch_inout_ctrl["down"][-(i + 1)][1], self.ch_inout_base["up"][i - 1][1])
495
+ # )
496
+
497
+
498
+
499
+ #up blocks for output
500
+ #need to check the input sizes
501
+ #need to implement the increased input size for the up blocks as done already with the down blocks
502
+ base_last_out_channels = [1280,1280, 1280, 1280, 1280, 1280, 1280, 640, 640, 640, 320, 320,320]
503
+ base_current_in_channels = [1280, 1280, 1280, 1280, 1280, 1280, 640, 640, 640, 320, 320,320]
504
+ #JANK WARNING REMEMBER TO FIX LATER BEFORE ACTUALLY PUTTING THIS CODE ANYWHERE
505
+ print(f"subblock up sizes {self.ch_inout_ctrl}")
506
+ # for i in range(len(base_current_in_channels)):
507
+ # self.control_model.up_zero_convs_in.append(
508
+ # self._make_zero_conv(base_last_out_channels[i], base_current_in_channels[i])
509
+ # )
510
+
511
+ for i in range(len(self.ch_inout_base["up"])):
512
+ #for ch_io_base in self.ch_inout_base["up"]:
513
+ ch_io_base = self.ch_inout_base["up"][i]
514
+ if i < len(self.ch_inout_ctrl["up"]):
515
+ linear_shape = ch_io_base[1] + ch_io_base[1]
516
+ self.control_model.up_zero_convs_out.append(
517
+ self._make_colab_linear_layer(in_channels=linear_shape, out_channels=linear_shape)
518
+ )
519
+ # for i in range(len(self.ch_inout_ctrl["up"])):
520
+ # self.control_model.up_zero_convs_out.append(
521
+ # self._make_zero_conv(self.ch_inout_ctrl["up"][i][1], self.ch_inout_base["up"][i][1])
522
+ # )
523
+
524
+
525
+ # 5 - Create conditioning hint embedding
526
+ # self.controlnet_cond_embedding = ControlNetConditioningEmbedding(
527
+ # conditioning_embedding_channels=block_out_channels[0],
528
+ # block_out_channels=conditioning_embedding_out_channels,
529
+ # conditioning_channels=conditioning_channels,
530
+ # )
531
+ self.sref_autoencoder = AttentionAutoencoder().to(device='cuda')
532
+ # In the mininal implementation setting, we only need the control model up to the mid block
533
+ #del self.control_model.up_blocks
534
+ del self.control_model.down_blocks
535
+ del self.control_model.conv_norm_out
536
+ del self.control_model.conv_out
537
+ del self.control_model.time_embedding
538
+ del self.control_model.conv_in
539
+
540
+
541
+ def load_model(self, path: str):
542
+ """Load the model from the given path.
543
+
544
+ Parameters:
545
+ path (`str`):
546
+ Path to the model checkpoint.
547
+ """
548
+
549
+ if os.path.splitext(path)[-1] == ".safetensors":
550
+ state_dict = {"image_proj": {}, "ip_adapter": {}, "controlnet": {}}
551
+ with safe_open(path, framework="pt", device="cpu") as f:
552
+ for key in f.keys():
553
+ if key.startswith("image_proj."):
554
+ state_dict["image_proj"][key.replace("image_proj.", "")] = f.get_tensor(key)
555
+ elif key.startswith("ip_adapter."):
556
+ state_dict["ip_adapter"][key.replace("ip_adapter.", "")] = f.get_tensor(key)
557
+ elif key.startswith("controlnet."):
558
+ state_dict["controlnet"][key.replace("controlnet.", "")] = f.get_tensor(key)
559
+ else:
560
+ state_dict = torch.load(path, map_location="cpu")
561
+
562
+ print("load controlnet", self.load_state_dict(state_dict["controlnet"],strict=False))
563
+
564
+
565
+
566
+ @classmethod
567
+ def from_unet(
568
+ cls,
569
+ unet: UNet2DConditionModel,
570
+ conditioning_channels: int = 3,
571
+ conditioning_embedding_out_channels: Tuple[int] = (16, 32, 96, 256),
572
+ controlnet_conditioning_channel_order: str = "rgb",
573
+ learn_embedding: bool = False,
574
+ time_embedding_mix: float = 1.0,
575
+ block_out_channels: Optional[Tuple[int]] = None,
576
+ size_ratio: Optional[float] = None,
577
+ num_attention_heads: Optional[Union[int, Tuple[int]]] = 8,
578
+ norm_num_groups: Optional[int] = None,
579
+ ):
580
+ r"""
581
+ Instantiate a [`ControlNetXSModel`] from [`UNet2DConditionModel`].
582
+
583
+ Parameters:
584
+ unet (`UNet2DConditionModel`):
585
+ The UNet model we want to control. The dimensions of the ControlNetXSModel will be adapted to it.
586
+ conditioning_channels (`int`, defaults to 3):
587
+ Number of channels of conditioning input (e.g. an image)
588
+ conditioning_embedding_out_channels (`tuple[int]`, defaults to `(16, 32, 96, 256)`):
589
+ The tuple of output channel for each block in the `controlnet_cond_embedding` layer.
590
+ controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`):
591
+ The channel order of conditional image. Will convert to `rgb` if it's `bgr`.
592
+ learn_embedding (`bool`, defaults to `False`):
593
+ Wether to use time embedding of the control model. If yes, the time embedding is a linear interpolation
594
+ of the time embeddings of the control and base model with interpolation parameter
595
+ `time_embedding_mix**3`.
596
+ time_embedding_mix (`float`, defaults to 1.0):
597
+ Linear interpolation parameter used if `learn_embedding` is `True`.
598
+ block_out_channels (`Tuple[int]`, *optional*):
599
+ Down blocks output channels in control model. Either this or `size_ratio` must be given.
600
+ size_ratio (float, *optional*):
601
+ When given, block_out_channels is set to a relative fraction of the base model's block_out_channels.
602
+ Either this or `block_out_channels` must be given.
603
+ num_attention_heads (`Union[int, Tuple[int]]`, *optional*):
604
+ The dimension of the attention heads. The naming seems a bit confusing and it is, see https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 for why.
605
+ norm_num_groups (int, *optional*, defaults to `None`):
606
+ The number of groups to use for the normalization of the control unet. If `None`,
607
+ `int(unet.config.norm_num_groups * size_ratio)` is taken.
608
+ """
609
+
610
+ # Check input
611
+ fixed_size = block_out_channels is not None
612
+ relative_size = size_ratio is not None
613
+ if not (fixed_size ^ relative_size):
614
+ raise ValueError(
615
+ "Pass exactly one of `block_out_channels` (for absolute sizing) or `control_model_ratio` (for relative sizing)."
616
+ )
617
+
618
+ # Create model
619
+ if block_out_channels is None:
620
+ block_out_channels = [int(size_ratio * c) for c in unet.config.block_out_channels]
621
+
622
+ # Check that attention heads and group norms match channel sizes
623
+ # - attention heads
624
+ def attn_heads_match_channel_sizes(attn_heads, channel_sizes):
625
+ if isinstance(attn_heads, (tuple, list)):
626
+ return all(c % a == 0 for a, c in zip(attn_heads, channel_sizes))
627
+ else:
628
+ return all(c % attn_heads == 0 for c in channel_sizes)
629
+
630
+ num_attention_heads = num_attention_heads or unet.config.attention_head_dim
631
+ if not attn_heads_match_channel_sizes(num_attention_heads, block_out_channels):
632
+ raise ValueError(
633
+ f"The dimension of attention heads ({num_attention_heads}) must divide `block_out_channels` ({block_out_channels}). If you didn't set `num_attention_heads` the default settings don't match your model. Set `num_attention_heads` manually."
634
+ )
635
+
636
+ # - group norms
637
+ def group_norms_match_channel_sizes(num_groups, channel_sizes):
638
+ return all(c % num_groups == 0 for c in channel_sizes)
639
+
640
+ if norm_num_groups is None:
641
+ if group_norms_match_channel_sizes(unet.config.norm_num_groups, block_out_channels):
642
+ norm_num_groups = unet.config.norm_num_groups
643
+ else:
644
+ norm_num_groups = min(block_out_channels)
645
+
646
+ if group_norms_match_channel_sizes(norm_num_groups, block_out_channels):
647
+ print(
648
+ f"`norm_num_groups` was set to `min(block_out_channels)` (={norm_num_groups}) so it divides all block_out_channels` ({block_out_channels}). Set it explicitly to remove this information."
649
+ )
650
+ else:
651
+ raise ValueError(
652
+ f"`block_out_channels` ({block_out_channels}) don't match the base models `norm_num_groups` ({unet.config.norm_num_groups}). Setting `norm_num_groups` to `min(block_out_channels)` ({norm_num_groups}) didn't fix this. Pass `norm_num_groups` explicitly so it divides all block_out_channels."
653
+ )
654
+
655
+ def get_time_emb_input_dim(unet: UNet2DConditionModel):
656
+ return unet.time_embedding.linear_1.in_features
657
+
658
+ def get_time_emb_dim(unet: UNet2DConditionModel):
659
+ return unet.time_embedding.linear_2.out_features
660
+
661
+ # Clone params from base unet if
662
+ # (i) it's required to build SD or SDXL, and
663
+ # (ii) it's not used for the time embedding (as time embedding of control model is never used), and
664
+ # (iii) it's not set further below anyway
665
+ to_keep = [
666
+ "cross_attention_dim",
667
+ "down_block_types",
668
+ "sample_size",
669
+ "transformer_layers_per_block",
670
+ "up_block_types",
671
+ "upcast_attention",
672
+ ]
673
+ kwargs = {k: v for k, v in dict(unet.config).items() if k in to_keep}
674
+ kwargs.update(block_out_channels=block_out_channels)
675
+ kwargs.update(num_attention_heads=num_attention_heads)
676
+ kwargs.update(norm_num_groups=norm_num_groups)
677
+
678
+ # Add controlnetxs-specific params
679
+ kwargs.update(
680
+ conditioning_channels=conditioning_channels,
681
+ controlnet_conditioning_channel_order=controlnet_conditioning_channel_order,
682
+ time_embedding_input_dim=get_time_emb_input_dim(unet),
683
+ time_embedding_dim=get_time_emb_dim(unet),
684
+ time_embedding_mix=time_embedding_mix,
685
+ learn_embedding=learn_embedding,
686
+ base_model_channel_sizes=StyleCodesModel._gather_subblock_sizes(unet, base_or_control="base"),
687
+ conditioning_embedding_out_channels=conditioning_embedding_out_channels,
688
+ )
689
+
690
+ return cls(**kwargs)
691
+
692
+ @property
693
+ def attn_processors(self) -> Dict[str, AttentionProcessor]:
694
+ r"""
695
+ Returns:
696
+ `dict` of attention processors: A dictionary containing all attention processors used in the model with
697
+ indexed by its weight name.
698
+ """
699
+ return self.control_model.attn_processors
700
+
701
+ def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]):
702
+ r"""
703
+ Sets the attention processor to use to compute attention.
704
+
705
+ Parameters:
706
+ processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`):
707
+ The instantiated processor class or a dictionary of processor classes that will be set as the processor
708
+ for **all** `Attention` layers.
709
+
710
+ If `processor` is a dict, the key needs to define the path to the corresponding cross attention
711
+ processor. This is strongly recommended when setting trainable attention processors.
712
+
713
+ """
714
+ self.control_model.set_attn_processor(processor)
715
+
716
+ def set_default_attn_processor(self):
717
+ """
718
+ Disables custom attention processors and sets the default attention implementation.
719
+ """
720
+ self.control_model.set_default_attn_processor()
721
+
722
+ def set_attention_slice(self, slice_size):
723
+ r"""
724
+ Enable sliced attention computation.
725
+
726
+ When this option is enabled, the attention module splits the input tensor in slices to compute attention in
727
+ several steps. This is useful for saving some memory in exchange for a small decrease in speed.
728
+
729
+ Args:
730
+ slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`):
731
+ When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If
732
+ `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is
733
+ provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim`
734
+ must be a multiple of `slice_size`.
735
+ """
736
+ self.control_model.set_attention_slice(slice_size)
737
+
738
+ def _set_gradient_checkpointing(self, module, value=False):
739
+ if isinstance(module, (UNet2DConditionModel)):
740
+ if value:
741
+ module.enable_gradient_checkpointing()
742
+ else:
743
+ module.disable_gradient_checkpointing()
744
+
745
+
746
+ def forward(
747
+ self,
748
+ base_model: UNet2DConditionModel,
749
+ sample: torch.FloatTensor,
750
+ timestep: Union[torch.Tensor, float, int],
751
+ encoder_hidden_states: torch.Tensor,
752
+ encoder_hidden_states_controlnet: torch.Tensor,
753
+ controlnet_cond: torch.Tensor,
754
+ conditioning_scale: float = 1.0,
755
+ class_labels: Optional[torch.Tensor] = None,
756
+ timestep_cond: Optional[torch.Tensor] = None,
757
+ attention_mask: Optional[torch.Tensor] = None,
758
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
759
+ added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
760
+ return_dict: bool = True,
761
+ stylecode=None,
762
+ ) -> Union[ControlNetXSOutput, Tuple]:
763
+ """
764
+ The [`ControlNetModel`] forward method.
765
+
766
+ Args:
767
+ base_model (`UNet2DConditionModel`):
768
+ The base unet model we want to control.
769
+ sample (`torch.FloatTensor`):
770
+ The noisy input tensor.
771
+ timestep (`Union[torch.Tensor, float, int]`):
772
+ The number of timesteps to denoise an input.
773
+ encoder_hidden_states (`torch.Tensor`):
774
+ The encoder hidden states.
775
+ controlnet_cond (`torch.FloatTensor`):
776
+ The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
777
+ conditioning_scale (`float`, defaults to `1.0`):
778
+ How much the control model affects the base model outputs.
779
+ class_labels (`torch.Tensor`, *optional*, defaults to `None`):
780
+ Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
781
+ timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
782
+ Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
783
+ timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
784
+ embeddings.
785
+ attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
786
+ An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
787
+ is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
788
+ negative values to the attention scores corresponding to "discard" tokens.
789
+ added_cond_kwargs (`dict`):
790
+ Additional conditions for the Stable Diffusion XL UNet.
791
+ cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
792
+ A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
793
+ return_dict (`bool`, defaults to `True`):
794
+ Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
795
+
796
+ Returns:
797
+ [`~models.controlnetxs.ControlNetXSOutput`] **or** `tuple`:
798
+ If `return_dict` is `True`, a [`~models.controlnetxs.ControlNetXSOutput`] is returned, otherwise a
799
+ tuple is returned where the first element is the sample tensor.
800
+ """
801
+ # check channel order
802
+ channel_order = self.config.controlnet_conditioning_channel_order
803
+
804
+ if channel_order == "rgb":
805
+ # in rgb order by default
806
+ ...
807
+ elif channel_order == "bgr":
808
+ controlnet_cond = torch.flip(controlnet_cond, dims=[1])
809
+ else:
810
+ raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
811
+
812
+ # scale control strength
813
+ n_connections = 0 + 1 + len(self.control_model.up_zero_convs_out)
814
+ scale_list = torch.full((n_connections,), conditioning_scale)
815
+
816
+ # prepare attention_mask
817
+ if attention_mask is not None:
818
+ attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
819
+ attention_mask = attention_mask.unsqueeze(1)
820
+
821
+ # 1. time
822
+ timesteps = timestep
823
+ if not torch.is_tensor(timesteps):
824
+ # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
825
+ # This would be a good case for the `match` statement (Python 3.10+)
826
+ is_mps = sample.device.type == "mps"
827
+ if isinstance(timestep, float):
828
+ dtype = torch.float32 if is_mps else torch.float64
829
+ else:
830
+ dtype = torch.int32 if is_mps else torch.int64
831
+ timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
832
+ elif len(timesteps.shape) == 0:
833
+ timesteps = timesteps[None].to(sample.device)
834
+
835
+ # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
836
+ timesteps = timesteps.expand(sample.shape[0])
837
+
838
+ t_emb = base_model.time_proj(timesteps)
839
+
840
+ # timesteps does not contain any weights and will always return f32 tensors
841
+ # but time_embedding might actually be running in fp16. so we need to cast here.
842
+ # there might be better ways to encapsulate this.
843
+ t_emb = t_emb.to(dtype=sample.dtype)
844
+
845
+ if self.config.learn_embedding:
846
+ ctrl_temb = self.control_model.time_embedding(t_emb, timestep_cond)
847
+ base_temb = base_model.time_embedding(t_emb, timestep_cond)
848
+ interpolation_param = self.config.time_embedding_mix**0.3
849
+
850
+ temb = ctrl_temb * interpolation_param + base_temb * (1 - interpolation_param)
851
+ else:
852
+ temb = base_model.time_embedding(t_emb)
853
+
854
+ # added time & text embeddings
855
+ aug_emb = None
856
+ aug_emb_ctrl = None
857
+ if base_model.class_embedding is not None:
858
+ if class_labels is None:
859
+ raise ValueError("class_labels should be provided when num_class_embeds > 0")
860
+
861
+ if base_model.config.class_embed_type == "timestep":
862
+ class_labels = base_model.time_proj(class_labels)
863
+
864
+ class_emb = base_model.class_embedding(class_labels).to(dtype=self.dtype)
865
+ temb = temb + class_emb
866
+
867
+ if base_model.config.addition_embed_type is not None:
868
+ if base_model.config.addition_embed_type == "text":
869
+ aug_emb = base_model.add_embedding(encoder_hidden_states)
870
+ aug_emb_ctrl = base_model.add_embedding(encoder_hidden_states_controlnet)
871
+ elif base_model.config.addition_embed_type == "text_image":
872
+ raise NotImplementedError()
873
+ elif base_model.config.addition_embed_type == "text_time":
874
+ # SDXL - style
875
+ if "text_embeds" not in added_cond_kwargs:
876
+ raise ValueError(
877
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
878
+ )
879
+ text_embeds = added_cond_kwargs.get("text_embeds")
880
+ if "time_ids" not in added_cond_kwargs:
881
+ raise ValueError(
882
+ f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
883
+ )
884
+ time_ids = added_cond_kwargs.get("time_ids")
885
+ time_embeds = base_model.add_time_proj(time_ids.flatten())
886
+ time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
887
+ add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
888
+ add_embeds = add_embeds.to(temb.dtype)
889
+ aug_emb = base_model.add_embedding(add_embeds)
890
+ elif base_model.config.addition_embed_type == "image":
891
+ raise NotImplementedError()
892
+ elif base_model.config.addition_embed_type == "image_hint":
893
+ raise NotImplementedError()
894
+
895
+ temb = temb + aug_emb if aug_emb is not None else temb
896
+
897
+ #temb_ctrl = torch.zeros_like(temb)
898
+ temb_ctrl = temb + aug_emb_ctrl if aug_emb_ctrl is not None else temb
899
+ # text embeddings
900
+ #note when i have more time actually skip the cross attention layers
901
+ cemb = encoder_hidden_states
902
+ #cemb_ctrl = torch.zeros_like(encoder_hidden_states)
903
+ cemb_ctrl = encoder_hidden_states
904
+
905
+ # Preparation
906
+ #print("1:cond, 2: embeddings",controlnet_cond.shape,encoder_hidden_states_controlnet.shape)
907
+
908
+ #save_debug_image(controlnet_cond[0])
909
+ #guided_hint = self.controlnet_cond_embedding(controlnet_cond)
910
+ #guided_hint=None
911
+ h_ctrl = h_base = sample
912
+ hs_base, hs_ctrl = [], []
913
+ it_up_convs_out = iter (self.control_model.up_zero_convs_out)
914
+ scales = iter(scale_list)
915
+
916
+ base_down_subblocks = self.to_sub_blocks(base_model.down_blocks)
917
+ #ctrl_down_subblocks = self.to_sub_blocks(self.control_model.down_blocks)
918
+ base_mid_subblocks = self.to_sub_blocks([base_model.mid_block])
919
+ ctrl_mid_subblocks = self.to_sub_blocks([self.control_model.mid_block])
920
+ base_up_subblocks = self.to_sub_blocks(base_model.up_blocks)
921
+ ctrl_up_subblocks = self.to_sub_blocks(self.control_model.up_blocks)
922
+
923
+ # Cross Control
924
+ # 0 - conv in
925
+ h_base = base_model.conv_in(h_base)
926
+ #h_ctrl = self.control_model.conv_in(h_ctrl)
927
+ #if guided_hint is not None:
928
+ h_ctrl = controlnet_cond
929
+ # h_base = h_base + next(it_down_convs_out)(h_ctrl) * next(scales) # D - add ctrl -> base
930
+
931
+ hs_base.append(h_base)
932
+ #hs_ctrl.append(h_ctrl)
933
+
934
+ # 1 - down
935
+ for m_base in base_down_subblocks:
936
+ #h_ctrl = torch.cat([h_ctrl, next(it_down_convs_in)(h_base)], dim=1) # A - concat base -> ctrl
937
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs) # B - apply base subblock
938
+ #h_ctrl = m_ctrl(h_ctrl, temb_ctrl, cemb_ctrl, attention_mask, cross_attention_kwargs) # C - apply ctrl subblock
939
+ #h_base = h_base + next(it_down_convs_out)(h_ctrl) * next(scales) # D - add ctrl -> base
940
+ hs_base.append(h_base)
941
+ #hs_ctrl.append(h_ctrl)
942
+
943
+ print("using stylecode",stylecode)
944
+ if stylecode is None:
945
+ h_ctrl,encoded_strings = self.sref_autoencoder.forward_encoding(h_ctrl,h_base.shape[2],h_base.shape[3])
946
+ else:
947
+ h_ctrl = self.sref_autoencoder.forward_from_stylecode(stylecode,h_base.shape[2],h_base.shape[3],h_base.dtype, h_base.device)
948
+
949
+ # 2 - mid
950
+ #h_ctrl = torch.cat([h_ctrl, next(it_down_convs_in)(h_base)], dim=1) # A - concat base -> ctrl
951
+ for m_base, m_ctrl in zip(base_mid_subblocks, ctrl_mid_subblocks):
952
+ h_base = m_base(h_base, temb, cemb, attention_mask, cross_attention_kwargs) # B - apply base subblock
953
+ h_ctrl = m_ctrl(h_ctrl, temb_ctrl, cemb_ctrl, attention_mask, cross_attention_kwargs) # C - apply ctrl subblock
954
+
955
+
956
+ #taken from https://github.com/dvlab-research/ControlNeXt/blob/main/ControlNeXt-SD1.5/models/unet.py
957
+ #mid_block_additional_residual = self.middle_block_out(h_ctrl)
958
+ # mid_block_additional_residual = mid_block_out
959
+ # mid_block_additional_residual=nn.functional.adaptive_avg_pool2d(mid_block_additional_residual, h_base.shape[-2:])
960
+ # mid_block_additional_residual = mid_block_additional_residual.to(h_base)
961
+ # mean_latents, std_latents = torch.mean(h_base, dim=(1, 2, 3), keepdim=True), torch.std(h_base, dim=(1, 2, 3), keepdim=True)
962
+ # mean_control, std_control = torch.mean(mid_block_additional_residual, dim=(1, 2, 3), keepdim=True), torch.std(mid_block_additional_residual, dim=(1, 2, 3), keepdim=True)
963
+ # mid_block_additional_residual = (mid_block_additional_residual - mean_control) * (std_latents / (std_control + 1e-12)) + mean_latents
964
+ # h_base = h_base + mid_block_additional_residual * next(scales)
965
+
966
+ batch_size, channels, height, width = h_ctrl.shape
967
+ colab_input = torch.cat([h_ctrl, h_base], dim=1).view(batch_size, channels * 2, height * width).permute(0, 2, 1)
968
+ colab_output = self.middle_block_out(colab_input)
969
+ sequence_len = height * width
970
+ colab_output = colab_output.permute(0, 2, 1).view(batch_size, channels * 2, height, width) # Reshape back
971
+ h_ctrl, h_base_output = torch.chunk(colab_output, 2, dim=1)
972
+
973
+ #mix using cond scale
974
+ h_base = h_base * (1 - conditioning_scale) + h_base_output * conditioning_scale
975
+ #h_base = h_base + mid_block_additional_residual * next(scales) # D - add ctrl -> base
976
+
977
+ # 3 - up
978
+ for m_base,m_ctrl in zip(base_up_subblocks,ctrl_up_subblocks):
979
+ hs_base_new = hs_base.pop()
980
+ h_base_with_skip = torch.cat([h_base, hs_base_new], dim=1) # concat info from base encoder+ctrl encoder
981
+
982
+ empty = torch.zeros_like(hs_base_new)
983
+
984
+ h_ctrl = torch.cat([h_ctrl, empty], dim=1) # concat info from ctrl encoder + skip connections
985
+ h_ctrl = m_ctrl(h_ctrl, temb_ctrl, cemb_ctrl, attention_mask, cross_attention_kwargs) # C - apply ctrl subblock
986
+ h_base = m_base(h_base_with_skip, temb, cemb, attention_mask, cross_attention_kwargs)
987
+
988
+ batch_size, channels, height, width = h_ctrl.shape
989
+ colab_input = torch.cat([h_ctrl, h_base], dim=1).view(batch_size, channels * 2, height * width).permute(0, 2, 1)
990
+ colab_output = next(it_up_convs_out)(colab_input)
991
+ colab_output = colab_output.permute(0, 2, 1).view(batch_size, channels * 2, height, width)
992
+ h_ctrl, h_base_output = torch.chunk(colab_output, 2, dim=1)
993
+ h_base = h_base * (1 - conditioning_scale) + h_base_output * conditioning_scale
994
+
995
+
996
+
997
+
998
+
999
+ #hn_ctrl = next(it_up_convs_out)(h_ctrl)
1000
+ #print(hn_ctrl)
1001
+ #h_base = h_base + hn_ctrl * next(scales) # D - add ctrl -> base
1002
+
1003
+
1004
+
1005
+
1006
+
1007
+
1008
+
1009
+
1010
+ h_base = base_model.conv_norm_out(h_base)
1011
+ h_base = base_model.conv_act(h_base)
1012
+ h_base = base_model.conv_out(h_base)
1013
+
1014
+ if not return_dict:
1015
+ return h_base
1016
+
1017
+ return ControlNetXSOutput(sample=h_base)
1018
+
1019
+ #needs new stuff to work correctly
1020
+ # def pre_process(
1021
+ # self,
1022
+ # base_model: UNet2DConditionModel,
1023
+ # sample: torch.FloatTensor,
1024
+ # timestep: Union[torch.Tensor, float, int],
1025
+ # encoder_hidden_states: torch.Tensor,
1026
+ # controlnet_cond: torch.Tensor,
1027
+ # conditioning_scale: float = 1.0,
1028
+ # class_labels: Optional[torch.Tensor] = None,
1029
+ # timestep_cond: Optional[torch.Tensor] = None,
1030
+ # attention_mask: Optional[torch.Tensor] = None,
1031
+ # cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1032
+ # added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None,
1033
+ # return_dict: bool = True
1034
+ # ):
1035
+ # """
1036
+ # The [`ControlNetModel`] forward method.
1037
+
1038
+ # Args:
1039
+ # base_model (`UNet2DConditionModel`):
1040
+ # The base unet model we want to control.
1041
+ # sample (`torch.FloatTensor`):
1042
+ # The noisy input tensor.
1043
+ # timestep (`Union[torch.Tensor, float, int]`):
1044
+ # The number of timesteps to denoise an input.
1045
+ # encoder_hidden_states (`torch.Tensor`):
1046
+ # The encoder hidden states.
1047
+ # controlnet_cond (`torch.FloatTensor`):
1048
+ # The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`.
1049
+ # conditioning_scale (`float`, defaults to `1.0`):
1050
+ # How much the control model affects the base model outputs.
1051
+ # class_labels (`torch.Tensor`, *optional*, defaults to `None`):
1052
+ # Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings.
1053
+ # timestep_cond (`torch.Tensor`, *optional*, defaults to `None`):
1054
+ # Additional conditional embeddings for timestep. If provided, the embeddings will be summed with the
1055
+ # timestep_embedding passed through the `self.time_embedding` layer to obtain the final timestep
1056
+ # embeddings.
1057
+ # attention_mask (`torch.Tensor`, *optional*, defaults to `None`):
1058
+ # An attention mask of shape `(batch, key_tokens)` is applied to `encoder_hidden_states`. If `1` the mask
1059
+ # is kept, otherwise if `0` it is discarded. Mask will be converted into a bias, which adds large
1060
+ # negative values to the attention scores corresponding to "discard" tokens.
1061
+ # added_cond_kwargs (`dict`):
1062
+ # Additional conditions for the Stable Diffusion XL UNet.
1063
+ # cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`):
1064
+ # A kwargs dictionary that if specified is passed along to the `AttnProcessor`.
1065
+ # return_dict (`bool`, defaults to `True`):
1066
+ # Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple.
1067
+
1068
+ # Returns:
1069
+ # [`~models.controlnetxs.ControlNetXSOutput`] **or** `tuple`:
1070
+ # If `return_dict` is `True`, a [`~models.controlnetxs.ControlNetXSOutput`] is returned, otherwise a
1071
+ # tuple is returned where the first element is the sample tensor.
1072
+ # """
1073
+ # # check channel order
1074
+ # channel_order = self.config.controlnet_conditioning_channel_order
1075
+
1076
+ # if channel_order == "rgb":
1077
+ # # in rgb order by default
1078
+ # ...
1079
+ # elif channel_order == "bgr":
1080
+ # controlnet_cond = torch.flip(controlnet_cond, dims=[1])
1081
+ # else:
1082
+ # raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}")
1083
+
1084
+ # # scale control strength
1085
+ # n_connections = len(self.control_model.down_zero_convs_out) + 1 + len(self.control_model.up_zero_convs_out)
1086
+ # scale_list = torch.full((n_connections,), conditioning_scale)
1087
+
1088
+ # # prepare attention_mask
1089
+ # if attention_mask is not None:
1090
+ # attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0
1091
+ # attention_mask = attention_mask.unsqueeze(1)
1092
+
1093
+ # # 1. time
1094
+ # timesteps = timestep
1095
+ # if not torch.is_tensor(timesteps):
1096
+ # # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can
1097
+ # # This would be a good case for the `match` statement (Python 3.10+)
1098
+ # is_mps = sample.device.type == "mps"
1099
+ # if isinstance(timestep, float):
1100
+ # dtype = torch.float32 if is_mps else torch.float64
1101
+ # else:
1102
+ # dtype = torch.int32 if is_mps else torch.int64
1103
+ # timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device)
1104
+ # elif len(timesteps.shape) == 0:
1105
+ # timesteps = timesteps[None].to(sample.device)
1106
+
1107
+ # # broadcast to batch dimension in a way that's compatible with ONNX/Core ML
1108
+ # timesteps = timesteps.expand(sample.shape[0])
1109
+
1110
+ # t_emb = base_model.time_proj(timesteps)
1111
+
1112
+ # # timesteps does not contain any weights and will always return f32 tensors
1113
+ # # but time_embedding might actually be running in fp16. so we need to cast here.
1114
+ # # there might be better ways to encapsulate this.
1115
+ # t_emb = t_emb.to(dtype=sample.dtype)
1116
+
1117
+ # if self.config.learn_embedding:
1118
+ # ctrl_temb = self.control_model.time_embedding(t_emb, timestep_cond)
1119
+ # base_temb = base_model.time_embedding(t_emb, timestep_cond)
1120
+ # interpolation_param = self.config.time_embedding_mix**0.3
1121
+
1122
+ # temb = ctrl_temb * interpolation_param + base_temb * (1 - interpolation_param)
1123
+ # else:
1124
+ # temb = base_model.time_embedding(t_emb)
1125
+
1126
+ # # added time & text embeddings
1127
+ # aug_emb = None
1128
+
1129
+ # if base_model.class_embedding is not None:
1130
+ # if class_labels is None:
1131
+ # raise ValueError("class_labels should be provided when num_class_embeds > 0")
1132
+
1133
+ # if base_model.config.class_embed_type == "timestep":
1134
+ # class_labels = base_model.time_proj(class_labels)
1135
+
1136
+ # class_emb = base_model.class_embedding(class_labels).to(dtype=self.dtype)
1137
+ # temb = temb + class_emb
1138
+
1139
+ # if base_model.config.addition_embed_type is not None:
1140
+ # if base_model.config.addition_embed_type == "text":
1141
+ # aug_emb = base_model.add_embedding(encoder_hidden_states)
1142
+ # elif base_model.config.addition_embed_type == "text_image":
1143
+ # raise NotImplementedError()
1144
+ # elif base_model.config.addition_embed_type == "text_time":
1145
+ # # SDXL - style
1146
+ # if "text_embeds" not in added_cond_kwargs:
1147
+ # raise ValueError(
1148
+ # f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`"
1149
+ # )
1150
+ # text_embeds = added_cond_kwargs.get("text_embeds")
1151
+ # if "time_ids" not in added_cond_kwargs:
1152
+ # raise ValueError(
1153
+ # f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`"
1154
+ # )
1155
+ # time_ids = added_cond_kwargs.get("time_ids")
1156
+ # time_embeds = base_model.add_time_proj(time_ids.flatten())
1157
+ # time_embeds = time_embeds.reshape((text_embeds.shape[0], -1))
1158
+ # add_embeds = torch.concat([text_embeds, time_embeds], dim=-1)
1159
+ # add_embeds = add_embeds.to(temb.dtype)
1160
+ # aug_emb = base_model.add_embedding(add_embeds)
1161
+ # elif base_model.config.addition_embed_type == "image":
1162
+ # raise NotImplementedError()
1163
+ # elif base_model.config.addition_embed_type == "image_hint":
1164
+ # raise NotImplementedError()
1165
+
1166
+ # temb = temb + aug_emb if aug_emb is not None else temb
1167
+
1168
+ # # text embeddings
1169
+ # cemb = encoder_hidden_states
1170
+
1171
+ # # Preparation
1172
+ # guided_hint = self.controlnet_cond_embedding(controlnet_cond)
1173
+ # #guided_hint=None
1174
+ # # h_ctrl = h_base = sample
1175
+ # # hs_base, hs_ctrl = [], []
1176
+ # # it_down_convs_in, it_down_convs_out, it_up_convs_in, it_up_convs_out = map(
1177
+ # # iter, (self.control_model.down_zero_convs_in, self.control_model.down_zero_convs_out, self.control_model.up_zero_convs_in, self.control_model.up_zero_convs_out)
1178
+ # # )
1179
+ # scales = iter(scale_list)
1180
+
1181
+ # return temb,cemb,scales,guided_hint
1182
+
1183
+ def _make_zero_conv(self, in_channels, out_channels=None):
1184
+ # keep running track of channels sizes
1185
+ #self.in_channels = in_channels
1186
+ #self.out_channels = out_channels or in_channels
1187
+ #
1188
+ return zero_module(nn.Conv2d(in_channels, out_channels, 1, padding=0))
1189
+ def _make_identity_conv(self, in_channels, out_channels=None):
1190
+ #out_channels = out_channels or in_channels
1191
+ return nn.Conv2d(in_channels, out_channels, kernel_size=1, padding=0, bias=False)
1192
+
1193
+ @torch.no_grad()
1194
+ def _check_if_vae_compatible(self, vae: AutoencoderKL):
1195
+ condition_downscale_factor = 2 ** (len(self.config.conditioning_embedding_out_channels) - 1)
1196
+ vae_downscale_factor = 2 ** (len(vae.config.block_out_channels) - 1)
1197
+ compatible = condition_downscale_factor == vae_downscale_factor
1198
+ return compatible, condition_downscale_factor, vae_downscale_factor
1199
+
1200
+ def to_sub_blocks(self,blocks):
1201
+ if not is_iterable(blocks):
1202
+ blocks = [blocks]
1203
+
1204
+ sub_blocks = []
1205
+
1206
+ for b in blocks:
1207
+ if hasattr(b, "resnets"):
1208
+ if hasattr(b, "attentions") and b.attentions is not None:
1209
+ for r, a in zip(b.resnets, b.attentions):
1210
+ sub_blocks.append([r, a])
1211
+
1212
+ num_resnets = len(b.resnets)
1213
+ num_attns = len(b.attentions)
1214
+
1215
+ if num_resnets > num_attns:
1216
+ # we can have more resnets than attentions, so add each resnet as separate subblock
1217
+ for i in range(num_attns, num_resnets):
1218
+ sub_blocks.append([b.resnets[i]])
1219
+ else:
1220
+ for r in b.resnets:
1221
+ sub_blocks.append([r])
1222
+
1223
+ # upsamplers are part of the same subblock
1224
+ if hasattr(b, "upsamplers") and b.upsamplers is not None:
1225
+ for u in b.upsamplers:
1226
+ sub_blocks[-1].extend([u])
1227
+
1228
+ # downsamplers are own subblock
1229
+ if hasattr(b, "downsamplers") and b.downsamplers is not None:
1230
+ for d in b.downsamplers:
1231
+ sub_blocks.append([d])
1232
+
1233
+ return list(map(SubBlock, sub_blocks))
1234
+
1235
+
1236
+ class SubBlock(nn.ModuleList):
1237
+ """A SubBlock is the largest piece of either base or control model, that is executed independently of the other model respectively.
1238
+ Before each subblock, information is concatted from base to control. And after each subblock, information is added from control to base.
1239
+ """
1240
+
1241
+ def __init__(self, ms, *args, **kwargs):
1242
+ if not is_iterable(ms):
1243
+ ms = [ms]
1244
+ super().__init__(ms, *args, **kwargs)
1245
+
1246
+ def forward(
1247
+ self,
1248
+ x: torch.Tensor,
1249
+ temb: torch.Tensor,
1250
+ cemb: torch.Tensor,
1251
+ attention_mask: Optional[torch.Tensor] = None,
1252
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
1253
+ ):
1254
+ """Iterate through children and pass correct information to each."""
1255
+ for m in self:
1256
+ if isinstance(m, ResnetBlock2D):
1257
+ x = m(x, temb)
1258
+ elif isinstance(m, Transformer2DModel):
1259
+ x = m(x, cemb, attention_mask=attention_mask, cross_attention_kwargs=cross_attention_kwargs).sample
1260
+ elif isinstance(m, Downsample2D):
1261
+ x = m(x)
1262
+ elif isinstance(m, Upsample2D):
1263
+ x = m(x)
1264
+ else:
1265
+ raise ValueError(
1266
+ f"Type of m is {type(m)} but should be `ResnetBlock2D`, `Transformer2DModel`, `Downsample2D` or `Upsample2D`"
1267
+ )
1268
+
1269
+ return x
1270
+
1271
+
1272
+ def adjust_time_dims(unet: UNet2DConditionModel, in_dim: int, out_dim: int):
1273
+ unet.time_embedding.linear_1 = nn.Linear(in_dim, out_dim)
1274
+
1275
+
1276
+ def increase_block_input_in_encoder_resnet(unet: UNet2DConditionModel, block_no, resnet_idx, by):
1277
+ """Increase channels sizes to allow for additional concatted information from base model"""
1278
+ r = unet.down_blocks[block_no].resnets[resnet_idx]
1279
+ old_norm1, old_conv1 = r.norm1, r.conv1
1280
+ # norm
1281
+ norm_args = "num_groups num_channels eps affine".split(" ")
1282
+ for a in norm_args:
1283
+ assert hasattr(old_norm1, a)
1284
+ norm_kwargs = {a: getattr(old_norm1, a) for a in norm_args}
1285
+ norm_kwargs["num_channels"] += by # surgery done here
1286
+ # conv1
1287
+ conv1_args = [
1288
+ "in_channels",
1289
+ "out_channels",
1290
+ "kernel_size",
1291
+ "stride",
1292
+ "padding",
1293
+ "dilation",
1294
+ "groups",
1295
+ "bias",
1296
+ "padding_mode",
1297
+ ]
1298
+ #if not USE_PEFT_BACKEND:
1299
+ # conv1_args.append("lora_layer")
1300
+
1301
+ for a in conv1_args:
1302
+ assert hasattr(old_conv1, a)
1303
+
1304
+ conv1_kwargs = {a: getattr(old_conv1, a) for a in conv1_args}
1305
+ conv1_kwargs["bias"] = "bias" in conv1_kwargs # as param, bias is a boolean, but as attr, it's a tensor.
1306
+ conv1_kwargs["in_channels"] += by # surgery done here
1307
+ # conv_shortcut
1308
+ # as we changed the input size of the block, the input and output sizes are likely different,
1309
+ # therefore we need a conv_shortcut (simply adding won't work)
1310
+ conv_shortcut_args_kwargs = {
1311
+ "in_channels": conv1_kwargs["in_channels"],
1312
+ "out_channels": conv1_kwargs["out_channels"],
1313
+ # default arguments from resnet.__init__
1314
+ "kernel_size": 1,
1315
+ "stride": 1,
1316
+ "padding": 0,
1317
+ "bias": True,
1318
+ }
1319
+ # swap old with new modules
1320
+ unet.down_blocks[block_no].resnets[resnet_idx].norm1 = GroupNorm(**norm_kwargs)
1321
+ unet.down_blocks[block_no].resnets[resnet_idx].conv1 = (
1322
+ nn.Conv2d(**conv1_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv1_kwargs)
1323
+ )
1324
+ unet.down_blocks[block_no].resnets[resnet_idx].conv_shortcut = (
1325
+ nn.Conv2d(**conv_shortcut_args_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv_shortcut_args_kwargs)
1326
+ )
1327
+ print(f"increasing down {unet.down_blocks[block_no].resnets[resnet_idx].in_channels} by {by}")
1328
+ unet.down_blocks[block_no].resnets[resnet_idx].in_channels += by # surgery done here
1329
+
1330
+ def increase_block_input_in_decoder_resnet(unet: UNet2DConditionModel, block_no, resnet_idx, by):
1331
+ """Increase channels sizes to allow for additional concatted information from base model"""
1332
+ r = unet.up_blocks[block_no].resnets[resnet_idx]
1333
+ old_norm1, old_conv1 = r.norm1, r.conv1
1334
+ # norm
1335
+ norm_args = "num_groups num_channels eps affine".split(" ")
1336
+ for a in norm_args:
1337
+ assert hasattr(old_norm1, a)
1338
+ norm_kwargs = {a: getattr(old_norm1, a) for a in norm_args}
1339
+ norm_kwargs["num_channels"] += by # surgery done here
1340
+ # conv1
1341
+ conv1_args = [
1342
+ "in_channels",
1343
+ "out_channels",
1344
+ "kernel_size",
1345
+ "stride",
1346
+ "padding",
1347
+ "dilation",
1348
+ "groups",
1349
+ "bias",
1350
+ "padding_mode",
1351
+ ]
1352
+ #if not USE_PEFT_BACKEND:
1353
+ # conv1_args.append("lora_layer")
1354
+
1355
+ for a in conv1_args:
1356
+ assert hasattr(old_conv1, a)
1357
+
1358
+ conv1_kwargs = {a: getattr(old_conv1, a) for a in conv1_args}
1359
+ conv1_kwargs["bias"] = "bias" in conv1_kwargs # as param, bias is a boolean, but as attr, it's a tensor.
1360
+ conv1_kwargs["in_channels"] += by # surgery done here
1361
+ # conv_shortcut
1362
+ # as we changed the input size of the block, the input and output sizes are likely different,
1363
+ # therefore we need a conv_shortcut (simply adding won't work)
1364
+ conv_shortcut_args_kwargs = {
1365
+ "in_channels": conv1_kwargs["in_channels"],
1366
+ "out_channels": conv1_kwargs["out_channels"],
1367
+ # default arguments from resnet.__init__
1368
+ "kernel_size": 1,
1369
+ "stride": 1,
1370
+ "padding": 0,
1371
+ "bias": True,
1372
+ }
1373
+ # swap old with new modules
1374
+ unet.up_blocks[block_no].resnets[resnet_idx].norm1 = GroupNorm(**norm_kwargs)
1375
+ unet.up_blocks[block_no].resnets[resnet_idx].conv1 = (
1376
+ nn.Conv2d(**conv1_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv1_kwargs)
1377
+ )
1378
+ unet.up_blocks[block_no].resnets[resnet_idx].conv_shortcut = (
1379
+ nn.Conv2d(**conv_shortcut_args_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv_shortcut_args_kwargs)
1380
+ )
1381
+
1382
+
1383
+ #by =unet.up_blocks[block_no].resnets[resnet_idx].out_channels
1384
+ print(f"increasing up {unet.up_blocks[block_no].resnets[resnet_idx].in_channels} by {by}")
1385
+ unet.up_blocks[block_no].resnets[resnet_idx].in_channels += by # surgery done here
1386
+
1387
+
1388
+ def increase_block_input_in_encoder_downsampler(unet: UNet2DConditionModel, block_no, by):
1389
+ """Increase channels sizes to allow for additional concatted information from base model"""
1390
+ old_down = unet.down_blocks[block_no].downsamplers[0].conv
1391
+
1392
+ args = [
1393
+ "in_channels",
1394
+ "out_channels",
1395
+ "kernel_size",
1396
+ "stride",
1397
+ "padding",
1398
+ "dilation",
1399
+ "groups",
1400
+ "bias",
1401
+ "padding_mode",
1402
+ ]
1403
+ #if not USE_PEFT_BACKEND:
1404
+ # args.append("lora_layer")
1405
+
1406
+ for a in args:
1407
+ assert hasattr(old_down, a)
1408
+ kwargs = {a: getattr(old_down, a) for a in args}
1409
+ kwargs["bias"] = "bias" in kwargs # as param, bias is a boolean, but as attr, it's a tensor.
1410
+ kwargs["in_channels"] += by # surgery done here
1411
+ # swap old with new modules
1412
+ unet.down_blocks[block_no].downsamplers[0].conv = (
1413
+ nn.Conv2d(**kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**kwargs)
1414
+ )
1415
+ unet.down_blocks[block_no].downsamplers[0].channels += by # surgery done here
1416
+
1417
+
1418
+ def increase_block_input_in_decoder_downsampler(unet: UNet2DConditionModel, block_no, by):
1419
+ """Increase channels sizes to allow for additional concatted information from base model"""
1420
+ old_down = unet.up_blocks[block_no].upsamplers[0].conv
1421
+
1422
+ args = [
1423
+ "in_channels",
1424
+ "out_channels",
1425
+ "kernel_size",
1426
+ "stride",
1427
+ "padding",
1428
+ "dilation",
1429
+ "groups",
1430
+ "bias",
1431
+ "padding_mode",
1432
+ ]
1433
+ if not USE_PEFT_BACKEND:
1434
+ args.append("lora_layer")
1435
+
1436
+ for a in args:
1437
+ assert hasattr(old_down, a)
1438
+ kwargs = {a: getattr(old_down, a) for a in args}
1439
+ kwargs["bias"] = "bias" in kwargs # as param, bias is a boolean, but as attr, it's a tensor.
1440
+ kwargs["in_channels"] += by # surgery done here
1441
+ # swap old with new modules
1442
+ unet.up_blocks[block_no].upsamplers[0].conv = (
1443
+ nn.Conv2d(**kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**kwargs)
1444
+ )
1445
+ unet.up_blocks[block_no].upsamplers[0].channels += by # surgery done here
1446
+
1447
+ def increase_block_input_in_mid_resnet(unet: UNet2DConditionModel, by):
1448
+ """Increase channels sizes to allow for additional concatted information from base model"""
1449
+ m = unet.mid_block.resnets[0]
1450
+ old_norm1, old_conv1 = m.norm1, m.conv1
1451
+ # norm
1452
+ norm_args = "num_groups num_channels eps affine".split(" ")
1453
+ for a in norm_args:
1454
+ assert hasattr(old_norm1, a)
1455
+ norm_kwargs = {a: getattr(old_norm1, a) for a in norm_args}
1456
+ norm_kwargs["num_channels"] += by # surgery done here
1457
+ conv1_args = [
1458
+ "in_channels",
1459
+ "out_channels",
1460
+ "kernel_size",
1461
+ "stride",
1462
+ "padding",
1463
+ "dilation",
1464
+ "groups",
1465
+ "bias",
1466
+ "padding_mode",
1467
+ ]
1468
+ #if not USE_PEFT_BACKEND:
1469
+ # conv1_args.append("lora_layer")
1470
+
1471
+ conv1_kwargs = {a: getattr(old_conv1, a) for a in conv1_args}
1472
+ conv1_kwargs["bias"] = "bias" in conv1_kwargs # as param, bias is a boolean, but as attr, it's a tensor.
1473
+ conv1_kwargs["in_channels"] += by # surgery done here
1474
+ # conv_shortcut
1475
+ # as we changed the input size of the block, the input and output sizes are likely different,
1476
+ # therefore we need a conv_shortcut (simply adding won't work)
1477
+ conv_shortcut_args_kwargs = {
1478
+ "in_channels": conv1_kwargs["in_channels"],
1479
+ "out_channels": conv1_kwargs["out_channels"],
1480
+ # default arguments from resnet.__init__
1481
+ "kernel_size": 1,
1482
+ "stride": 1,
1483
+ "padding": 0,
1484
+ "bias": True,
1485
+ }
1486
+ # swap old with new modules
1487
+ unet.mid_block.resnets[0].norm1 = GroupNorm(**norm_kwargs)
1488
+ unet.mid_block.resnets[0].conv1 = (
1489
+ nn.Conv2d(**conv1_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv1_kwargs)
1490
+ )
1491
+ unet.mid_block.resnets[0].conv_shortcut = (
1492
+ nn.Conv2d(**conv_shortcut_args_kwargs) if USE_PEFT_BACKEND else LoRACompatibleConv(**conv_shortcut_args_kwargs)
1493
+ )
1494
+ unet.mid_block.resnets[0].in_channels += by # surgery done here
1495
+
1496
+
1497
+ def adjust_group_norms(unet: UNet2DConditionModel, max_num_group: int = 32):
1498
+ def find_denominator(number, start):
1499
+ if start >= number:
1500
+ return number
1501
+ while start != 0:
1502
+ residual = number % start
1503
+ if residual == 0:
1504
+ return start
1505
+ start -= 1
1506
+
1507
+ for block in [*unet.down_blocks, unet.mid_block]:
1508
+ # resnets
1509
+ for r in block.resnets:
1510
+ if r.norm1.num_groups < max_num_group:
1511
+ r.norm1.num_groups = find_denominator(r.norm1.num_channels, start=max_num_group)
1512
+
1513
+ if r.norm2.num_groups < max_num_group:
1514
+ r.norm2.num_groups = find_denominator(r.norm2.num_channels, start=max_num_group)
1515
+
1516
+ # transformers
1517
+ if hasattr(block, "attentions"):
1518
+ for a in block.attentions:
1519
+ if a.norm.num_groups < max_num_group:
1520
+ a.norm.num_groups = find_denominator(a.norm.num_channels, start=max_num_group)
1521
+
1522
+
1523
+ def is_iterable(o):
1524
+ if isinstance(o, str):
1525
+ return False
1526
+ try:
1527
+ iter(o)
1528
+ return True
1529
+ except TypeError:
1530
+ return False
1531
+
1532
+
1533
+
1534
+ def save_debug_image(image, folder='debug_images', noise_threshold=0.1):
1535
+ os.makedirs(folder, exist_ok=True)
1536
+ timestamp = datetime.datetime.now().strftime("%Y%m%d_%H%M%S_%f")
1537
+ filename = f"debug_image_{timestamp}.png"
1538
+ filepath = os.path.join(folder, filename)
1539
+
1540
+ print("Debugging image information:")
1541
+ print(f"Type of image: {type(image)}")
1542
+
1543
+ if isinstance(image, torch.Tensor):
1544
+ print(f"Image tensor shape: {image.shape}")
1545
+ print(f"Image tensor dtype: {image.dtype}")
1546
+ print(f"Image tensor device: {image.device}")
1547
+ print(f"Image tensor min: {image.min()}, max: {image.max()}")
1548
+ image_np = image.cpu().detach().numpy()
1549
+ elif isinstance(image, np.ndarray):
1550
+ image_np = image
1551
+ else:
1552
+ print(f"Unexpected image type: {type(image)}")
1553
+ return
1554
+
1555
+ print(f"Numpy array shape: {image_np.shape}")
1556
+ print(f"Numpy array dtype: {image_np.dtype}")
1557
+ print(f"Numpy array min: {image_np.min()}, max: {image_np.max()}")
1558
+
1559
+ # Handle different array shapes
1560
+ if image_np.ndim == 4:
1561
+ image_np = np.squeeze(image_np, axis=0)
1562
+ image_np = np.transpose(image_np, (1, 2, 0))
1563
+ elif image_np.ndim == 3:
1564
+ if image_np.shape[0] in [1, 3, 4]:
1565
+ image_np = np.transpose(image_np, (1, 2, 0))
1566
+ elif image_np.ndim == 2:
1567
+ image_np = np.expand_dims(image_np, axis=-1)
1568
+
1569
+ print(f"Processed numpy array shape: {image_np.shape}")
1570
+
1571
+ # Normalize the image, accounting for noise
1572
+ if image_np.dtype != np.uint8:
1573
+ if image_np.max() <= 1 + noise_threshold:
1574
+ # Assume the image is in [0, 1] range with some noise
1575
+ image_np = np.clip(image_np, 0, 1)
1576
+ image_np = (image_np * 255).astype(np.uint8)
1577
+ else:
1578
+ # Assume the image is in a wider range, possibly due to noise
1579
+ lower_percentile = np.percentile(image_np, 1)
1580
+ upper_percentile = np.percentile(image_np, 99)
1581
+ image_np = np.clip(image_np, lower_percentile, upper_percentile)
1582
+ image_np = ((image_np - lower_percentile) / (upper_percentile - lower_percentile) * 255).astype(np.uint8)
1583
+
1584
+ print(f"Normalized array min: {image_np.min()}, max: {image_np.max()}")
1585
+
1586
+ try:
1587
+ image_pil = Image.fromarray(image_np.squeeze() if image_np.shape[-1] == 1 else image_np)
1588
+ image_pil.save(filepath)
1589
+ print(f"Debug image saved as '{filepath}'")
1590
+ except Exception as e:
1591
+ print(f"Error saving image: {str(e)}")
1592
+ print("Attempting to save as numpy array...")
1593
+ np_filepath = filepath.replace('.png', '.npy')
1594
+ np.save(np_filepath, image_np)
1595
+ print(f"Numpy array saved as '{np_filepath}'")
1596
+
1597
+
1598
+
1599
+
1600
+ def zero_module(module):
1601
+ for p in module.parameters():
1602
+ nn.init.zeros_(p)
1603
+ return module
controlnet/pipline_controlnet_xs_v2.py ADDED
@@ -0,0 +1,1227 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+ import inspect
15
+ from typing import Any, Callable, Dict, List, Optional, Union
16
+ import numpy as np
17
+
18
+ import torch
19
+ from packaging import version
20
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer, CLIPVisionModelWithProjection,SiglipVisionModel,AutoProcessor
21
+ from controlnet.controlnetxs_appearance import StyleCodesModel
22
+ from PIL import Image
23
+
24
+ from diffusers.callbacks import MultiPipelineCallbacks, PipelineCallback
25
+ from diffusers.configuration_utils import FrozenDict
26
+ from diffusers.image_processor import PipelineImageInput, VaeImageProcessor
27
+ from diffusers.loaders import FromSingleFileMixin, IPAdapterMixin, LoraLoaderMixin, TextualInversionLoaderMixin
28
+ from diffusers.models import AutoencoderKL, ImageProjection, UNet2DConditionModel
29
+ from diffusers.models.lora import adjust_lora_scale_text_encoder
30
+ from diffusers.schedulers import KarrasDiffusionSchedulers
31
+ from diffusers.utils import (
32
+ USE_PEFT_BACKEND,
33
+ deprecate,
34
+ logging,
35
+ replace_example_docstring,
36
+ scale_lora_layers,
37
+ unscale_lora_layers,
38
+ )
39
+ from diffusers.utils.torch_utils import randn_tensor
40
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
41
+ from diffusers.pipelines.stable_diffusion.pipeline_output import StableDiffusionPipelineOutput
42
+ from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
43
+
44
+
45
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
46
+
47
+ EXAMPLE_DOC_STRING = """
48
+ Examples:
49
+ ```py
50
+ >>> import torch
51
+ >>> from diffusers import StableDiffusionPipeline
52
+
53
+ >>> pipe = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-v1-5", torch_dtype=torch.float16)
54
+ >>> pipe = pipe.to("cuda")
55
+
56
+ >>> prompt = "a photo of an astronaut riding a horse on mars"
57
+ >>> image = pipe(prompt).images[0]
58
+ ```
59
+ """
60
+
61
+
62
+ def rescale_noise_cfg(noise_cfg, noise_pred_text, guidance_rescale=0.0):
63
+ """
64
+ Rescale `noise_cfg` according to `guidance_rescale`. Based on findings of [Common Diffusion Noise Schedules and
65
+ Sample Steps are Flawed](https://arxiv.org/pdf/2305.08891.pdf). See Section 3.4
66
+ """
67
+ std_text = noise_pred_text.std(dim=list(range(1, noise_pred_text.ndim)), keepdim=True)
68
+ std_cfg = noise_cfg.std(dim=list(range(1, noise_cfg.ndim)), keepdim=True)
69
+ # rescale the results from guidance (fixes overexposure)
70
+ noise_pred_rescaled = noise_cfg * (std_text / std_cfg)
71
+ # mix with the original results from guidance by factor guidance_rescale to avoid "plain looking" images
72
+ noise_cfg = guidance_rescale * noise_pred_rescaled + (1 - guidance_rescale) * noise_cfg
73
+ return noise_cfg
74
+
75
+
76
+ def retrieve_timesteps(
77
+ scheduler,
78
+ num_inference_steps: Optional[int] = None,
79
+ device: Optional[Union[str, torch.device]] = None,
80
+ timesteps: Optional[List[int]] = None,
81
+ sigmas: Optional[List[float]] = None,
82
+ **kwargs,
83
+ ):
84
+ """
85
+ Calls the scheduler's `set_timesteps` method and retrieves timesteps from the scheduler after the call. Handles
86
+ custom timesteps. Any kwargs will be supplied to `scheduler.set_timesteps`.
87
+
88
+ Args:
89
+ scheduler (`SchedulerMixin`):
90
+ The scheduler to get timesteps from.
91
+ num_inference_steps (`int`):
92
+ The number of diffusion steps used when generating samples with a pre-trained model. If used, `timesteps`
93
+ must be `None`.
94
+ device (`str` or `torch.device`, *optional*):
95
+ The device to which the timesteps should be moved to. If `None`, the timesteps are not moved.
96
+ timesteps (`List[int]`, *optional*):
97
+ Custom timesteps used to override the timestep spacing strategy of the scheduler. If `timesteps` is passed,
98
+ `num_inference_steps` and `sigmas` must be `None`.
99
+ sigmas (`List[float]`, *optional*):
100
+ Custom sigmas used to override the timestep spacing strategy of the scheduler. If `sigmas` is passed,
101
+ `num_inference_steps` and `timesteps` must be `None`.
102
+
103
+ Returns:
104
+ `Tuple[torch.Tensor, int]`: A tuple where the first element is the timestep schedule from the scheduler and the
105
+ second element is the number of inference steps.
106
+ """
107
+ if timesteps is not None and sigmas is not None:
108
+ raise ValueError("Only one of `timesteps` or `sigmas` can be passed. Please choose one to set custom values")
109
+ if timesteps is not None:
110
+ accepts_timesteps = "timesteps" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
111
+ if not accepts_timesteps:
112
+ raise ValueError(
113
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
114
+ f" timestep schedules. Please check whether you are using the correct scheduler."
115
+ )
116
+ scheduler.set_timesteps(timesteps=timesteps, device=device, **kwargs)
117
+ timesteps = scheduler.timesteps
118
+ num_inference_steps = len(timesteps)
119
+ elif sigmas is not None:
120
+ accept_sigmas = "sigmas" in set(inspect.signature(scheduler.set_timesteps).parameters.keys())
121
+ if not accept_sigmas:
122
+ raise ValueError(
123
+ f"The current scheduler class {scheduler.__class__}'s `set_timesteps` does not support custom"
124
+ f" sigmas schedules. Please check whether you are using the correct scheduler."
125
+ )
126
+ scheduler.set_timesteps(sigmas=sigmas, device=device, **kwargs)
127
+ timesteps = scheduler.timesteps
128
+ num_inference_steps = len(timesteps)
129
+ else:
130
+ scheduler.set_timesteps(num_inference_steps, device=device, **kwargs)
131
+ timesteps = scheduler.timesteps
132
+ return timesteps, num_inference_steps
133
+
134
+
135
+ class StableDiffusionPipelineXSv2(
136
+ DiffusionPipeline,
137
+ StableDiffusionMixin,
138
+ TextualInversionLoaderMixin,
139
+ LoraLoaderMixin,
140
+ IPAdapterMixin,
141
+ FromSingleFileMixin,
142
+ ):
143
+ r"""
144
+ Pipeline for text-to-image generation using Stable Diffusion.
145
+
146
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
147
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
148
+
149
+ The pipeline also inherits the following loading methods:
150
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
151
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
152
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
153
+ - [`~loaders.FromSingleFileMixin.from_single_file`] for loading `.ckpt` files
154
+ - [`~loaders.IPAdapterMixin.load_ip_adapter`] for loading IP Adapters
155
+
156
+ Args:
157
+ vae ([`AutoencoderKL`]):
158
+ Variational Auto-Encoder (VAE) model to encode and decode images to and from latent representations.
159
+ text_encoder ([`~transformers.CLIPTextModel`]):
160
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
161
+ tokenizer ([`~transformers.CLIPTokenizer`]):
162
+ A `CLIPTokenizer` to tokenize text.
163
+ unet ([`UNet2DConditionModel`]):
164
+ A `UNet2DConditionModel` to denoise the encoded image latents.
165
+ scheduler ([`SchedulerMixin`]):
166
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
167
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
168
+ safety_checker ([`StableDiffusionSafetyChecker`]):
169
+ Classification module that estimates whether generated images could be considered offensive or harmful.
170
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
171
+ about a model's potential harms.
172
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
173
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
174
+ """
175
+
176
+ model_cpu_offload_seq = "text_encoder->image_encoder->unet->vae"
177
+ _optional_components = ["safety_checker", "feature_extractor", "image_encoder"]
178
+ _exclude_from_cpu_offload = ["safety_checker"]
179
+ _callback_tensor_inputs = ["latents", "prompt_embeds", "negative_prompt_embeds"]
180
+
181
+ def __init__(
182
+ self,
183
+ vae: AutoencoderKL,
184
+ text_encoder: CLIPTextModel,
185
+ tokenizer: CLIPTokenizer,
186
+ unet: UNet2DConditionModel,
187
+ stylecodes_model: StyleCodesModel,
188
+
189
+ scheduler: KarrasDiffusionSchedulers,
190
+ safety_checker: StableDiffusionSafetyChecker,
191
+ feature_extractor: CLIPImageProcessor,
192
+ image_encoder: SiglipVisionModel = None,
193
+ requires_safety_checker: bool = True,
194
+ ):
195
+ super().__init__()
196
+
197
+ if hasattr(scheduler.config, "steps_offset") and scheduler.config.steps_offset != 1:
198
+ deprecation_message = (
199
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
200
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
201
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
202
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
203
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
204
+ " file"
205
+ )
206
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
207
+ new_config = dict(scheduler.config)
208
+ new_config["steps_offset"] = 1
209
+ scheduler._internal_dict = FrozenDict(new_config)
210
+
211
+ if hasattr(scheduler.config, "clip_sample") and scheduler.config.clip_sample is True:
212
+ deprecation_message = (
213
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration `clip_sample`."
214
+ " `clip_sample` should be set to False in the configuration file. Please make sure to update the"
215
+ " config accordingly as not setting `clip_sample` in the config might lead to incorrect results in"
216
+ " future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it would be very"
217
+ " nice if you could open a Pull request for the `scheduler/scheduler_config.json` file"
218
+ )
219
+ deprecate("clip_sample not set", "1.0.0", deprecation_message, standard_warn=False)
220
+ new_config = dict(scheduler.config)
221
+ new_config["clip_sample"] = False
222
+ scheduler._internal_dict = FrozenDict(new_config)
223
+
224
+ if safety_checker is None and requires_safety_checker:
225
+ logger.warning(
226
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
227
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
228
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
229
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
230
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
231
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
232
+ )
233
+
234
+ if safety_checker is not None and feature_extractor is None:
235
+ raise ValueError(
236
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
237
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
238
+ )
239
+
240
+ is_unet_version_less_0_9_0 = hasattr(unet.config, "_diffusers_version") and version.parse(
241
+ version.parse(unet.config._diffusers_version).base_version
242
+ ) < version.parse("0.9.0.dev0")
243
+ is_unet_sample_size_less_64 = hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
244
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
245
+ deprecation_message = (
246
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
247
+ " 64 which seems highly unlikely. If your checkpoint is a fine-tuned version of any of the"
248
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
249
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
250
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
251
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
252
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
253
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
254
+ " the `unet/config.json` file"
255
+ )
256
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
257
+ new_config = dict(unet.config)
258
+ new_config["sample_size"] = 64
259
+ unet._internal_dict = FrozenDict(new_config)
260
+
261
+
262
+ self.register_modules(
263
+ vae=vae,
264
+ text_encoder=text_encoder,
265
+ tokenizer=tokenizer,
266
+ unet=unet,
267
+ stylecodes_model=stylecodes_model,
268
+
269
+ scheduler=scheduler,
270
+ safety_checker=safety_checker,
271
+ feature_extractor=feature_extractor,
272
+ image_encoder=image_encoder,
273
+ )
274
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
275
+ self.clip_image_processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
276
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
277
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
278
+ self.control_image_processor = VaeImageProcessor(
279
+ vae_scale_factor=self.vae_scale_factor, do_convert_rgb=True, do_normalize=False
280
+ )
281
+ if image_encoder is None:
282
+ self.image_encoder = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224").to(dtype=torch.float16,device="cuda")
283
+
284
+
285
+ @torch.inference_mode()
286
+ def get_image_embeds(self, pil_image=None):
287
+ if isinstance(pil_image, Image.Image):
288
+ pil_image = [pil_image]
289
+ clip_image = self.clip_image_processor(images=pil_image, return_tensors="pt").pixel_values
290
+ clip_image = clip_image.to(self.device, dtype=torch.float16)
291
+ clip_image = {"pixel_values": clip_image}
292
+ clip_image_embeds = self.image_encoder(**clip_image, output_hidden_states=True).hidden_states[-2]
293
+
294
+ return clip_image_embeds
295
+
296
+
297
+
298
+ def _encode_prompt(
299
+ self,
300
+ prompt,
301
+ device,
302
+ num_images_per_prompt,
303
+ do_classifier_free_guidance,
304
+ negative_prompt=None,
305
+ prompt_embeds: Optional[torch.Tensor] = None,
306
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
307
+ lora_scale: Optional[float] = None,
308
+ **kwargs,
309
+ ):
310
+ deprecation_message = "`_encode_prompt()` is deprecated and it will be removed in a future version. Use `encode_prompt()` instead. Also, be aware that the output format changed from a concatenated tensor to a tuple."
311
+ deprecate("_encode_prompt()", "1.0.0", deprecation_message, standard_warn=False)
312
+
313
+ prompt_embeds_tuple = self.encode_prompt(
314
+ prompt=prompt,
315
+ device=device,
316
+ num_images_per_prompt=num_images_per_prompt,
317
+ do_classifier_free_guidance=do_classifier_free_guidance,
318
+ negative_prompt=negative_prompt,
319
+ prompt_embeds=prompt_embeds,
320
+ negative_prompt_embeds=negative_prompt_embeds,
321
+ lora_scale=lora_scale,
322
+ **kwargs,
323
+ )
324
+
325
+ # concatenate for backwards comp
326
+ prompt_embeds = torch.cat([prompt_embeds_tuple[1], prompt_embeds_tuple[0]])
327
+
328
+ return prompt_embeds
329
+
330
+ def encode_prompt(
331
+ self,
332
+ prompt,
333
+ device,
334
+ num_images_per_prompt,
335
+ do_classifier_free_guidance,
336
+ negative_prompt=None,
337
+ prompt_embeds: Optional[torch.Tensor] = None,
338
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
339
+ img_only_prompt_embeds: Optional[torch.Tensor] = None,
340
+ img_prompt_everything_cond: Optional[torch.Tensor] = None,
341
+ lora_scale: Optional[float] = None,
342
+ clip_skip: Optional[int] = None,
343
+ ):
344
+ r"""
345
+ Encodes the prompt into text encoder hidden states.
346
+
347
+ Args:
348
+ prompt (`str` or `List[str]`, *optional*):
349
+ prompt to be encoded
350
+ device: (`torch.device`):
351
+ torch device
352
+ num_images_per_prompt (`int`):
353
+ number of images that should be generated per prompt
354
+ do_classifier_free_guidance (`bool`):
355
+ whether to use classifier free guidance or not
356
+ negative_prompt (`str` or `List[str]`, *optional*):
357
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
358
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
359
+ less than `1`).
360
+ prompt_embeds (`torch.Tensor`, *optional*):
361
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
362
+ provided, text embeddings will be generated from `prompt` input argument.
363
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
364
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
365
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
366
+ argument.
367
+ lora_scale (`float`, *optional*):
368
+ A LoRA scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
369
+ clip_skip (`int`, *optional*):
370
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
371
+ the output of the pre-final layer will be used for computing the prompt embeddings.
372
+ """
373
+ # set lora scale so that monkey patched LoRA
374
+ # function of text encoder can correctly access it
375
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
376
+ self._lora_scale = lora_scale
377
+
378
+ # dynamically adjust the LoRA scale
379
+ if not USE_PEFT_BACKEND:
380
+ adjust_lora_scale_text_encoder(self.text_encoder, lora_scale)
381
+ else:
382
+ scale_lora_layers(self.text_encoder, lora_scale)
383
+
384
+ batch_size = 1
385
+ print("prompt ",prompt)
386
+ if prompt_embeds is None:
387
+ # textual inversion: process multi-vector tokens if necessary
388
+ #if isinstance(self, TextualInversionLoaderMixin):
389
+ # prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
390
+
391
+ text_inputs = self.tokenizer(
392
+ prompt,
393
+ padding="max_length",
394
+ max_length=self.tokenizer.model_max_length,
395
+ truncation=True,
396
+ return_tensors="pt",
397
+ )
398
+ text_input_ids = text_inputs.input_ids
399
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
400
+
401
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
402
+ text_input_ids, untruncated_ids
403
+ ):
404
+ removed_text = self.tokenizer.batch_decode(
405
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
406
+ )
407
+ logger.warning(
408
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
409
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
410
+ )
411
+
412
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
413
+ attention_mask = text_inputs.attention_mask.to(device)
414
+ else:
415
+ attention_mask = None
416
+
417
+ if clip_skip is None:
418
+ prompt_embeds = self.text_encoder(text_input_ids.to(device), attention_mask=attention_mask)
419
+ prompt_embeds = prompt_embeds[0]
420
+ else:
421
+ prompt_embeds = self.text_encoder(
422
+ text_input_ids.to(device), attention_mask=attention_mask, output_hidden_states=True
423
+ )
424
+ # Access the `hidden_states` first, that contains a tuple of
425
+ # all the hidden states from the encoder layers. Then index into
426
+ # the tuple to access the hidden states from the desired layer.
427
+ prompt_embeds = prompt_embeds[-1][-(clip_skip + 1)]
428
+ # We also need to apply the final LayerNorm here to not mess with the
429
+ # representations. The `last_hidden_states` that we typically use for
430
+ # obtaining the final prompt representations passes through the LayerNorm
431
+ # layer.
432
+ prompt_embeds = self.text_encoder.text_model.final_layer_norm(prompt_embeds)
433
+
434
+ if self.text_encoder is not None:
435
+ prompt_embeds_dtype = self.text_encoder.dtype
436
+ elif self.unet is not None:
437
+ prompt_embeds_dtype = self.unet.dtype
438
+ else:
439
+ prompt_embeds_dtype = prompt_embeds.dtype
440
+
441
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
442
+
443
+ bs_embed, seq_len, _ = prompt_embeds.shape
444
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
445
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
446
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
447
+
448
+ # get unconditional embeddings for classifier free guidance
449
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
450
+ uncond_tokens: List[str]
451
+ if negative_prompt is None:
452
+ uncond_tokens = [""] * batch_size
453
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
454
+ raise TypeError(
455
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
456
+ f" {type(prompt)}."
457
+ )
458
+ elif isinstance(negative_prompt, str):
459
+ uncond_tokens = [negative_prompt]
460
+ elif batch_size != len(negative_prompt):
461
+ raise ValueError(
462
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
463
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
464
+ " the batch size of `prompt`."
465
+ )
466
+ else:
467
+ uncond_tokens = negative_prompt
468
+
469
+ # textual inversion: process multi-vector tokens if necessary
470
+ if isinstance(self, TextualInversionLoaderMixin):
471
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
472
+
473
+ max_length = prompt_embeds.shape[1]
474
+ uncond_input = self.tokenizer(
475
+ uncond_tokens,
476
+ padding="max_length",
477
+ max_length=max_length,
478
+ truncation=True,
479
+ return_tensors="pt",
480
+ )
481
+
482
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
483
+ attention_mask = uncond_input.attention_mask.to(device)
484
+ else:
485
+ attention_mask = None
486
+
487
+ negative_prompt_embeds = self.text_encoder(
488
+ uncond_input.input_ids.to(device),
489
+ attention_mask=attention_mask,
490
+ )
491
+ negative_prompt_embeds = negative_prompt_embeds[0]
492
+
493
+ if do_classifier_free_guidance:
494
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
495
+ seq_len = negative_prompt_embeds.shape[1]
496
+
497
+ # if negative_prompt is not None:
498
+ # negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
499
+
500
+ # negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
501
+ # negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
502
+
503
+ # #prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
504
+
505
+ # #prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
506
+ # #prompt_embeds = prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
507
+
508
+ if img_only_prompt_embeds is not None:
509
+ seq_len = img_only_prompt_embeds.shape[1]
510
+ img_only_prompt_embeds = img_only_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
511
+
512
+ img_only_prompt_embeds = img_only_prompt_embeds.repeat(1, num_images_per_prompt, 1)
513
+ img_only_prompt_embeds = img_only_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
514
+
515
+ if img_prompt_everything_cond is not None:
516
+ seq_len = img_prompt_everything_cond.shape[1]
517
+ img_prompt_everything_cond = img_prompt_everything_cond.to(dtype=prompt_embeds_dtype, device=device)
518
+
519
+ img_prompt_everything_cond = img_prompt_everything_cond.repeat(1, num_images_per_prompt, 1)
520
+ img_prompt_everything_cond = img_prompt_everything_cond.view(batch_size * num_images_per_prompt, seq_len, -1)
521
+
522
+ if self.text_encoder is not None:
523
+ if isinstance(self, LoraLoaderMixin) and USE_PEFT_BACKEND:
524
+ # Retrieve the original scale by scaling back the LoRA layers
525
+ unscale_lora_layers(self.text_encoder, lora_scale)
526
+ if img_only_prompt_embeds is not None:
527
+ return prompt_embeds, negative_prompt_embeds, img_only_prompt_embeds,img_prompt_everything_cond
528
+ else:
529
+ return prompt_embeds, negative_prompt_embeds
530
+ def prepare_image(
531
+ self,
532
+ image,
533
+ width,
534
+ height,
535
+ batch_size,
536
+ num_images_per_prompt,
537
+ device,
538
+ dtype,
539
+ do_classifier_free_guidance=False,
540
+ ):
541
+ image = self.control_image_processor.preprocess(image, height=height, width=width).to(dtype=torch.float32)
542
+ image_batch_size = image.shape[0]
543
+
544
+ if image_batch_size == 1:
545
+ repeat_by = batch_size
546
+ else:
547
+ # image batch size is the same as prompt batch size
548
+ repeat_by = num_images_per_prompt
549
+
550
+ ctrl_noise = torch.randn_like(image)
551
+ image = image + (ctrl_noise * 0.02)
552
+
553
+
554
+ image = image.repeat_interleave(repeat_by, dim=0)
555
+
556
+ image = image.to(device=device, dtype=dtype)
557
+ uncond = torch.zeros_like(image)
558
+ #if do_classifier_free_guidance:
559
+ # image = torch.cat([image],[uncond])
560
+
561
+ return image,uncond
562
+ def encode_image(self, image, device, num_images_per_prompt, output_hidden_states=None):
563
+ dtype = next(self.image_encoder.parameters()).dtype
564
+
565
+ if not isinstance(image, torch.Tensor):
566
+ image = self.feature_extractor(image, return_tensors="pt").pixel_values
567
+
568
+ image = image.to(device=device, dtype=dtype)
569
+ if output_hidden_states:
570
+ image_enc_hidden_states = self.image_encoder(image, output_hidden_states=True).hidden_states[-2]
571
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
572
+ uncond_image_enc_hidden_states = self.image_encoder(
573
+ torch.zeros_like(image), output_hidden_states=True
574
+ ).hidden_states[-2]
575
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(
576
+ num_images_per_prompt, dim=0
577
+ )
578
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
579
+ else:
580
+ image_embeds = self.image_encoder(image).image_embeds
581
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
582
+ uncond_image_embeds = torch.zeros_like(image_embeds)
583
+
584
+ return image_embeds, uncond_image_embeds
585
+
586
+ def prepare_ip_adapter_image_embeds(
587
+ self, ip_adapter_image, ip_adapter_image_embeds, device, num_images_per_prompt, do_classifier_free_guidance
588
+ ):
589
+ if ip_adapter_image_embeds is None:
590
+ if not isinstance(ip_adapter_image, list):
591
+ ip_adapter_image = [ip_adapter_image]
592
+
593
+ if len(ip_adapter_image) != len(self.unet.encoder_hid_proj.image_projection_layers):
594
+ raise ValueError(
595
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(self.unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
596
+ )
597
+
598
+ image_embeds = []
599
+ for single_ip_adapter_image, image_proj_layer in zip(
600
+ ip_adapter_image, self.unet.encoder_hid_proj.image_projection_layers
601
+ ):
602
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
603
+ single_image_embeds, single_negative_image_embeds = self.encode_image(
604
+ single_ip_adapter_image, device, 1, output_hidden_state
605
+ )
606
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
607
+ single_negative_image_embeds = torch.stack(
608
+ [single_negative_image_embeds] * num_images_per_prompt, dim=0
609
+ )
610
+
611
+ if do_classifier_free_guidance:
612
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
613
+ single_image_embeds = single_image_embeds.to(device)
614
+
615
+ image_embeds.append(single_image_embeds)
616
+ else:
617
+ repeat_dims = [1]
618
+ image_embeds = []
619
+ for single_image_embeds in ip_adapter_image_embeds:
620
+ if do_classifier_free_guidance:
621
+ single_negative_image_embeds, single_image_embeds = single_image_embeds.chunk(2)
622
+ single_image_embeds = single_image_embeds.repeat(
623
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
624
+ )
625
+ single_negative_image_embeds = single_negative_image_embeds.repeat(
626
+ num_images_per_prompt, *(repeat_dims * len(single_negative_image_embeds.shape[1:]))
627
+ )
628
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
629
+ else:
630
+ single_image_embeds = single_image_embeds.repeat(
631
+ num_images_per_prompt, *(repeat_dims * len(single_image_embeds.shape[1:]))
632
+ )
633
+ image_embeds.append(single_image_embeds)
634
+
635
+ return image_embeds
636
+
637
+ def run_safety_checker(self, image, device, dtype):
638
+ if self.safety_checker is None:
639
+ has_nsfw_concept = None
640
+ else:
641
+ if torch.is_tensor(image):
642
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
643
+ else:
644
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
645
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
646
+ image, has_nsfw_concept = self.safety_checker(
647
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
648
+ )
649
+ return image, has_nsfw_concept
650
+
651
+ def decode_latents(self, latents):
652
+ deprecation_message = "The decode_latents method is deprecated and will be removed in 1.0.0. Please use VaeImageProcessor.postprocess(...) instead"
653
+ deprecate("decode_latents", "1.0.0", deprecation_message, standard_warn=False)
654
+
655
+ latents = 1 / self.vae.config.scaling_factor * latents
656
+ image = self.vae.decode(latents, return_dict=False)[0]
657
+ image = (image / 2 + 0.5).clamp(0, 1)
658
+ # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
659
+ image = image.cpu().permute(0, 2, 3, 1).float().numpy()
660
+ return image
661
+
662
+ def prepare_extra_step_kwargs(self, generator, eta):
663
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
664
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
665
+ # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
666
+ # and should be between [0, 1]
667
+
668
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
669
+ extra_step_kwargs = {}
670
+ if accepts_eta:
671
+ extra_step_kwargs["eta"] = eta
672
+
673
+ # check if the scheduler accepts generator
674
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
675
+ if accepts_generator:
676
+ extra_step_kwargs["generator"] = generator
677
+ return extra_step_kwargs
678
+
679
+ def check_inputs(
680
+ self,
681
+ prompt,
682
+ height,
683
+ width,
684
+ callback_steps,
685
+ negative_prompt=None,
686
+ prompt_embeds=None,
687
+ negative_prompt_embeds=None,
688
+ ip_adapter_image=None,
689
+ ip_adapter_image_embeds=None,
690
+ callback_on_step_end_tensor_inputs=None,
691
+ ):
692
+ if height % 8 != 0 or width % 8 != 0:
693
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
694
+
695
+ if callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0):
696
+ raise ValueError(
697
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
698
+ f" {type(callback_steps)}."
699
+ )
700
+ if callback_on_step_end_tensor_inputs is not None and not all(
701
+ k in self._callback_tensor_inputs for k in callback_on_step_end_tensor_inputs
702
+ ):
703
+ raise ValueError(
704
+ f"`callback_on_step_end_tensor_inputs` has to be in {self._callback_tensor_inputs}, but found {[k for k in callback_on_step_end_tensor_inputs if k not in self._callback_tensor_inputs]}"
705
+ )
706
+
707
+ #if prompt is not None and prompt_embeds is not None:
708
+ # raise ValueError(
709
+ # f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
710
+ # " only forward one of the two."
711
+ # )
712
+ elif prompt is None and prompt_embeds is None:
713
+ raise ValueError(
714
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
715
+ )
716
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
717
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
718
+
719
+ if negative_prompt is not None and negative_prompt_embeds is not None:
720
+ raise ValueError(
721
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
722
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
723
+ )
724
+
725
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
726
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
727
+ raise ValueError(
728
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
729
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
730
+ f" {negative_prompt_embeds.shape}."
731
+ )
732
+
733
+ if ip_adapter_image is not None and ip_adapter_image_embeds is not None:
734
+ raise ValueError(
735
+ "Provide either `ip_adapter_image` or `ip_adapter_image_embeds`. Cannot leave both `ip_adapter_image` and `ip_adapter_image_embeds` defined."
736
+ )
737
+
738
+ if ip_adapter_image_embeds is not None:
739
+ if not isinstance(ip_adapter_image_embeds, list):
740
+ raise ValueError(
741
+ f"`ip_adapter_image_embeds` has to be of type `list` but is {type(ip_adapter_image_embeds)}"
742
+ )
743
+ elif ip_adapter_image_embeds[0].ndim not in [3, 4]:
744
+ raise ValueError(
745
+ f"`ip_adapter_image_embeds` has to be a list of 3D or 4D tensors but is {ip_adapter_image_embeds[0].ndim}D"
746
+ )
747
+
748
+ def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, device, generator, latents=None):
749
+ shape = (
750
+ batch_size,
751
+ num_channels_latents,
752
+ int(height) // self.vae_scale_factor,
753
+ int(width) // self.vae_scale_factor,
754
+ )
755
+ if isinstance(generator, list) and len(generator) != batch_size:
756
+ raise ValueError(
757
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
758
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
759
+ )
760
+
761
+ if latents is None:
762
+ latents = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
763
+ else:
764
+ latents = latents.to(device)
765
+
766
+ # scale the initial noise by the standard deviation required by the scheduler
767
+ latents = latents * self.scheduler.init_noise_sigma
768
+ return latents
769
+
770
+ # Copied from diffusers.pipelines.latent_consistency_models.pipeline_latent_consistency_text2img.LatentConsistencyModelPipeline.get_guidance_scale_embedding
771
+ def get_guidance_scale_embedding(
772
+ self, w: torch.Tensor, embedding_dim: int = 512, dtype: torch.dtype = torch.float32
773
+ ) -> torch.Tensor:
774
+ """
775
+ See https://github.com/google-research/vdm/blob/dc27b98a554f65cdc654b800da5aa1846545d41b/model_vdm.py#L298
776
+
777
+ Args:
778
+ w (`torch.Tensor`):
779
+ Generate embedding vectors with a specified guidance scale to subsequently enrich timestep embeddings.
780
+ embedding_dim (`int`, *optional*, defaults to 512):
781
+ Dimension of the embeddings to generate.
782
+ dtype (`torch.dtype`, *optional*, defaults to `torch.float32`):
783
+ Data type of the generated embeddings.
784
+
785
+ Returns:
786
+ `torch.Tensor`: Embedding vectors with shape `(len(w), embedding_dim)`.
787
+ """
788
+ assert len(w.shape) == 1
789
+ w = w * 1000.0
790
+
791
+ half_dim = embedding_dim // 2
792
+ emb = torch.log(torch.tensor(10000.0)) / (half_dim - 1)
793
+ emb = torch.exp(torch.arange(half_dim, dtype=dtype) * -emb)
794
+ emb = w.to(dtype)[:, None] * emb[None, :]
795
+ emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1)
796
+ if embedding_dim % 2 == 1: # zero pad
797
+ emb = torch.nn.functional.pad(emb, (0, 1))
798
+ assert emb.shape == (w.shape[0], embedding_dim)
799
+ return emb
800
+
801
+ @property
802
+ def guidance_scale(self):
803
+ return self._guidance_scale
804
+
805
+ @property
806
+ def guidance_rescale(self):
807
+ return self._guidance_rescale
808
+
809
+ @property
810
+ def clip_skip(self):
811
+ return self._clip_skip
812
+
813
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
814
+ # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
815
+ # corresponds to doing no classifier free guidance.
816
+ @property
817
+ def do_classifier_free_guidance(self):
818
+ return self._guidance_scale > 1 and self.unet.config.time_cond_proj_dim is None
819
+
820
+ @property
821
+ def cross_attention_kwargs(self):
822
+ return self._cross_attention_kwargs
823
+
824
+ @property
825
+ def num_timesteps(self):
826
+ return self._num_timesteps
827
+
828
+ @property
829
+ def interrupt(self):
830
+ return self._interrupt
831
+
832
+ @torch.no_grad()
833
+ @replace_example_docstring(EXAMPLE_DOC_STRING)
834
+ def __call__(
835
+ self,
836
+ prompt: Union[str, List[str]] = None,
837
+ height: Optional[int] = None,
838
+ width: Optional[int] = None,
839
+ num_inference_steps: int = 50,
840
+ timesteps: List[int] = None,
841
+ sigmas: List[float] = None,
842
+ guidance_scale: float = 7.5,
843
+ negative_prompt: Optional[Union[str, List[str]]] = None,
844
+ num_images_per_prompt: Optional[int] = 1,
845
+ eta: float = 0.0,
846
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
847
+ latents: Optional[torch.Tensor] = None,
848
+ prompt_embeds: Optional[torch.Tensor] = None,
849
+ controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
850
+ negative_prompt_embeds: Optional[torch.Tensor] = None,
851
+ ip_adapter_image: Optional[PipelineImageInput] = None,
852
+ ip_adapter_image_embeds: Optional[List[torch.Tensor]] = None,
853
+ output_type: Optional[str] = "pil",
854
+ image: Optional[Union[Image.Image, List[Image.Image]]] = None,
855
+ return_dict: bool = True,
856
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
857
+ guidance_rescale: float = 0.0,
858
+ clip_skip: Optional[int] = None,
859
+ stylecode: Optional[str] = None,
860
+ callback_on_step_end: Optional[
861
+ Union[Callable[[int, int, Dict], None], PipelineCallback, MultiPipelineCallbacks]
862
+ ] = None,
863
+ callback_on_step_end_tensor_inputs: List[str] = ["latents"],
864
+ **kwargs,
865
+ ):
866
+ r"""
867
+ The call function to the pipeline for generation.
868
+
869
+ Args:
870
+ prompt (`str` or `List[str]`, *optional*):
871
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
872
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
873
+ The height in pixels of the generated image.
874
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
875
+ The width in pixels of the generated image.
876
+ num_inference_steps (`int`, *optional*, defaults to 50):
877
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
878
+ expense of slower inference.
879
+ timesteps (`List[int]`, *optional*):
880
+ Custom timesteps to use for the denoising process with schedulers which support a `timesteps` argument
881
+ in their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is
882
+ passed will be used. Must be in descending order.
883
+ sigmas (`List[float]`, *optional*):
884
+ Custom sigmas to use for the denoising process with schedulers which support a `sigmas` argument in
885
+ their `set_timesteps` method. If not defined, the default behavior when `num_inference_steps` is passed
886
+ will be used.
887
+ guidance_scale (`float`, *optional*, defaults to 7.5):
888
+ A higher guidance scale value encourages the model to generate images closely linked to the text
889
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
890
+ negative_prompt (`str` or `List[str]`, *optional*):
891
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
892
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
893
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
894
+ The number of images to generate per prompt.
895
+ eta (`float`, *optional*, defaults to 0.0):
896
+ Corresponds to parameter eta (η) from the [DDIM](https://arxiv.org/abs/2010.02502) paper. Only applies
897
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
898
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
899
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
900
+ generation deterministic.
901
+ latents (`torch.Tensor`, *optional*):
902
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
903
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
904
+ tensor is generated by sampling using the supplied random `generator`.
905
+ prompt_embeds (`torch.Tensor`, *optional*):
906
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
907
+ provided, text embeddings are generated from the `prompt` input argument.
908
+ negative_prompt_embeds (`torch.Tensor`, *optional*):
909
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
910
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
911
+ ip_adapter_image: (`PipelineImageInput`, *optional*): Optional image input to work with IP Adapters.
912
+ ip_adapter_image_embeds (`List[torch.Tensor]`, *optional*):
913
+ Pre-generated image embeddings for IP-Adapter. It should be a list of length same as number of
914
+ IP-adapters. Each element should be a tensor of shape `(batch_size, num_images, emb_dim)`. It should
915
+ contain the negative image embedding if `do_classifier_free_guidance` is set to `True`. If not
916
+ provided, embeddings are computed from the `ip_adapter_image` input argument.
917
+ output_type (`str`, *optional*, defaults to `"pil"`):
918
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
919
+ return_dict (`bool`, *optional*, defaults to `True`):
920
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
921
+ plain tuple.
922
+ cross_attention_kwargs (`dict`, *optional*):
923
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
924
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
925
+ guidance_rescale (`float`, *optional*, defaults to 0.0):
926
+ Guidance rescale factor from [Common Diffusion Noise Schedules and Sample Steps are
927
+ Flawed](https://arxiv.org/pdf/2305.08891.pdf). Guidance rescale factor should fix overexposure when
928
+ using zero terminal SNR.
929
+ clip_skip (`int`, *optional*):
930
+ Number of layers to be skipped from CLIP while computing the prompt embeddings. A value of 1 means that
931
+ the output of the pre-final layer will be used for computing the prompt embeddings.
932
+ callback_on_step_end (`Callable`, `PipelineCallback`, `MultiPipelineCallbacks`, *optional*):
933
+ A function or a subclass of `PipelineCallback` or `MultiPipelineCallbacks` that is called at the end of
934
+ each denoising step during the inference. with the following arguments: `callback_on_step_end(self:
935
+ DiffusionPipeline, step: int, timestep: int, callback_kwargs: Dict)`. `callback_kwargs` will include a
936
+ list of all tensors as specified by `callback_on_step_end_tensor_inputs`.
937
+ callback_on_step_end_tensor_inputs (`List`, *optional*):
938
+ The list of tensor inputs for the `callback_on_step_end` function. The tensors specified in the list
939
+ will be passed as `callback_kwargs` argument. You will only be able to include variables listed in the
940
+ `._callback_tensor_inputs` attribute of your pipeline class.
941
+
942
+ Examples:
943
+
944
+ Returns:
945
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
946
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
947
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
948
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
949
+ "not-safe-for-work" (nsfw) content.
950
+ """
951
+
952
+ callback = kwargs.pop("callback", None)
953
+ callback_steps = kwargs.pop("callback_steps", None)
954
+
955
+ if callback is not None:
956
+ deprecate(
957
+ "callback",
958
+ "1.0.0",
959
+ "Passing `callback` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
960
+ )
961
+ if callback_steps is not None:
962
+ deprecate(
963
+ "callback_steps",
964
+ "1.0.0",
965
+ "Passing `callback_steps` as an input argument to `__call__` is deprecated, consider using `callback_on_step_end`",
966
+ )
967
+
968
+ if isinstance(callback_on_step_end, (PipelineCallback, MultiPipelineCallbacks)):
969
+ callback_on_step_end_tensor_inputs = callback_on_step_end.tensor_inputs
970
+
971
+ # 0. Default height and width to unet
972
+ height = height or self.unet.config.sample_size * self.vae_scale_factor
973
+ width = width or self.unet.config.sample_size * self.vae_scale_factor
974
+ # to deal with lora scaling and other possible forward hooks
975
+
976
+ # 1. Check inputs. Raise error if not correct
977
+ self.check_inputs(
978
+ prompt,
979
+ height,
980
+ width,
981
+ callback_steps,
982
+ negative_prompt,
983
+ prompt_embeds,
984
+ negative_prompt_embeds,
985
+ ip_adapter_image,
986
+ ip_adapter_image_embeds,
987
+ callback_on_step_end_tensor_inputs,
988
+ )
989
+
990
+ self._guidance_scale = guidance_scale
991
+ self._guidance_rescale = guidance_rescale
992
+ self._clip_skip = clip_skip
993
+ self._cross_attention_kwargs = cross_attention_kwargs
994
+ self._interrupt = False
995
+
996
+ # 2. Define call parameters
997
+ # if prompt is not None and isinstance(prompt, str):
998
+ # batch_size = 1
999
+ # elif prompt is not None and isinstance(prompt, list):
1000
+ # batch_size = len(prompt)
1001
+ # else:
1002
+ #batch_size = prompt_embeds.shape[0]
1003
+ #this broke something ages ago, youll have to add it back in :P
1004
+ batch_size = 1
1005
+ device = self._execution_device
1006
+
1007
+ # 3. Encode input prompt
1008
+ lora_scale = (
1009
+ self.cross_attention_kwargs.get("scale", None) if self.cross_attention_kwargs is not None else None
1010
+ )
1011
+
1012
+ prompt_embeds, negative_prompt_embeds = self.encode_prompt(
1013
+ prompt,
1014
+ device,
1015
+ num_images_per_prompt,
1016
+ self.do_classifier_free_guidance,
1017
+ negative_prompt,
1018
+ prompt_embeds=prompt_embeds,
1019
+ negative_prompt_embeds=negative_prompt_embeds,
1020
+ lora_scale=lora_scale,
1021
+ clip_skip=self.clip_skip,
1022
+ )
1023
+ if image is not None:
1024
+ controlnet_cond = self.get_image_embeds(image)
1025
+ else:
1026
+ controlnet_cond =None
1027
+
1028
+ if self.do_classifier_free_guidance:
1029
+ prompt_embeds = torch.cat([prompt_embeds, negative_prompt_embeds])
1030
+ if controlnet_cond is not None:
1031
+ controlnet_cond = torch.cat([controlnet_cond,controlnet_cond])
1032
+ else:
1033
+ controlnet_cond = None
1034
+ if ip_adapter_image is not None or ip_adapter_image_embeds is not None:
1035
+ image_embeds = self.prepare_ip_adapter_image_embeds(
1036
+ ip_adapter_image,
1037
+ ip_adapter_image_embeds,
1038
+ device,
1039
+ batch_size * num_images_per_prompt,
1040
+ self.do_classifier_free_guidance,
1041
+ )
1042
+
1043
+ # 4. Prepare timesteps
1044
+ timesteps, num_inference_steps = retrieve_timesteps(
1045
+ self.scheduler, num_inference_steps, device, timesteps, sigmas
1046
+ )
1047
+
1048
+ # 5. Prepare latent variables
1049
+ num_channels_latents = self.unet.config.in_channels
1050
+ latents = self.prepare_latents(
1051
+ batch_size * num_images_per_prompt,
1052
+ num_channels_latents,
1053
+ height,
1054
+ width,
1055
+ prompt_embeds.dtype,
1056
+ device,
1057
+ generator,
1058
+ latents,
1059
+ )
1060
+
1061
+ # 6. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1062
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1063
+
1064
+ # 6.1 Add image embeds for IP-Adapter
1065
+ added_cond_kwargs = (
1066
+ {"image_embeds": image_embeds}
1067
+ if (ip_adapter_image is not None or ip_adapter_image_embeds is not None)
1068
+ else None
1069
+ )
1070
+
1071
+ # 6.2 Optionally get Guidance Scale Embedding
1072
+ timestep_cond = None
1073
+ if self.unet.config.time_cond_proj_dim is not None:
1074
+ guidance_scale_tensor = torch.tensor(self.guidance_scale - 1).repeat(batch_size * num_images_per_prompt)
1075
+ timestep_cond = self.get_guidance_scale_embedding(
1076
+ guidance_scale_tensor, embedding_dim=self.unet.config.time_cond_proj_dim
1077
+ ).to(device=device, dtype=latents.dtype)
1078
+
1079
+ # 7. Denoising loop
1080
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1081
+ self._num_timesteps = len(timesteps)
1082
+ #image_pil = save_debug_image(image[0])
1083
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1084
+ for i, t in enumerate(timesteps):
1085
+ if self.interrupt:
1086
+ continue
1087
+
1088
+ latent_expand_num = 2
1089
+ latent_model_input = torch.cat([latents] * latent_expand_num) if self.do_classifier_free_guidance else latents
1090
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1091
+
1092
+ # predict the noise residual
1093
+ dont_control=False
1094
+ if dont_control:
1095
+ noise_pred = self.unet(
1096
+ latent_model_input,
1097
+ t,
1098
+ encoder_hidden_states=prompt_embeds,
1099
+ timestep_cond=timestep_cond,
1100
+ cross_attention_kwargs=self.cross_attention_kwargs,
1101
+ added_cond_kwargs=added_cond_kwargs,
1102
+ return_dict=False,
1103
+ )[0]
1104
+ else:
1105
+ #print("shape ",prompt_embeds.shape,latent_model_input.shape)
1106
+ noise_pred = self.stylecodes_model(
1107
+ base_model=self.unet,
1108
+ sample=latent_model_input,
1109
+ timestep=t,
1110
+ encoder_hidden_states=prompt_embeds,
1111
+ encoder_hidden_states_controlnet=prompt_embeds,
1112
+ controlnet_cond=controlnet_cond,
1113
+ conditioning_scale=controlnet_conditioning_scale,
1114
+ cross_attention_kwargs=cross_attention_kwargs,
1115
+ return_dict=True,
1116
+ stylecode=stylecode,
1117
+ )[0]
1118
+
1119
+
1120
+
1121
+
1122
+ # Save the image
1123
+ # perform guidance
1124
+ if self.do_classifier_free_guidance:
1125
+
1126
+ noise_pred_full, noise_pred_fully_uncond = noise_pred.chunk(2)
1127
+ noise_pred = noise_pred_fully_uncond + self.guidance_scale * (noise_pred_full - noise_pred_fully_uncond)
1128
+
1129
+ #if self.do_classifier_free_guidance and self.guidance_rescale > 0.0:
1130
+ # Based on 3.4. in https://arxiv.org/pdf/2305.08891.pdf
1131
+ # noise_pred = rescale_noise_cfg(noise_pred, noise_pred_text, guidance_rescale=self.guidance_rescale)
1132
+
1133
+ # compute the previous noisy sample x_t -> x_t-1
1134
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=False)[0]
1135
+
1136
+ if callback_on_step_end is not None:
1137
+ callback_kwargs = {}
1138
+ for k in callback_on_step_end_tensor_inputs:
1139
+ callback_kwargs[k] = locals()[k]
1140
+ callback_outputs = callback_on_step_end(self, i, t, callback_kwargs)
1141
+
1142
+ latents = callback_outputs.pop("latents", latents)
1143
+ prompt_embeds = callback_outputs.pop("prompt_embeds", prompt_embeds)
1144
+ negative_prompt_embeds = callback_outputs.pop("negative_prompt_embeds", negative_prompt_embeds)
1145
+
1146
+ # call the callback, if provided
1147
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1148
+ progress_bar.update()
1149
+ if callback is not None and i % callback_steps == 0:
1150
+ step_idx = i // getattr(self.scheduler, "order", 1)
1151
+ callback(step_idx, t, latents)
1152
+
1153
+ if not output_type == "latent":
1154
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, generator=generator)[
1155
+ 0
1156
+ ]
1157
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1158
+ else:
1159
+ image = latents
1160
+ has_nsfw_concept = None
1161
+
1162
+ if has_nsfw_concept is None:
1163
+ do_denormalize = [True] * image.shape[0]
1164
+ else:
1165
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1166
+
1167
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1168
+
1169
+ # Offload all models
1170
+ self.maybe_free_model_hooks()
1171
+
1172
+ if not return_dict:
1173
+ return (image, has_nsfw_concept)
1174
+
1175
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1176
+ def save_debug_image(image, filename='debug_image2.png'):
1177
+ print("Debugging image information:")
1178
+ print(f"Type of image: {type(image)}")
1179
+
1180
+ if isinstance(image, torch.Tensor):
1181
+ print(f"Image tensor shape: {image.shape}")
1182
+ print(f"Image tensor dtype: {image.dtype}")
1183
+ print(f"Image tensor device: {image.device}")
1184
+ print(f"Image tensor min: {image.min()}, max: {image.max()}")
1185
+
1186
+ # Move to CPU and convert to numpy
1187
+ image_np = image.cpu().detach().numpy()
1188
+
1189
+ elif isinstance(image, np.ndarray):
1190
+ image_np = image
1191
+ else:
1192
+ print(f"Unexpected image type: {type(image)}")
1193
+ return
1194
+
1195
+ print(f"Numpy array shape: {image_np.shape}")
1196
+ print(f"Numpy array dtype: {image_np.dtype}")
1197
+ print(f"Numpy array min: {image_np.min()}, max: {image_np.max()}")
1198
+
1199
+ # Handle different array shapes
1200
+ if image_np.ndim == 4:
1201
+ # Assume shape is (batch, channel, height, width)
1202
+ image_np = np.squeeze(image_np, axis=0) # Remove batch dimension
1203
+ image_np = np.transpose(image_np, (1, 2, 0)) # Change to (height, width, channel)
1204
+ elif image_np.ndim == 3:
1205
+ if image_np.shape[0] in [1, 3, 4]:
1206
+ image_np = np.transpose(image_np, (1, 2, 0))
1207
+ elif image_np.ndim == 2:
1208
+ image_np = np.expand_dims(image_np, axis=-1)
1209
+
1210
+ print(f"Processed numpy array shape: {image_np.shape}")
1211
+
1212
+ # Normalize to 0-255 range if not already
1213
+ if image_np.dtype != np.uint8:
1214
+ if image_np.max() <= 1:
1215
+ image_np = (image_np * 255).astype(np.uint8)
1216
+ else:
1217
+ image_np = np.clip(image_np, 0, 255).astype(np.uint8)
1218
+
1219
+ try:
1220
+ image_pil = Image.fromarray(image_np)
1221
+ image_pil.save(filename)
1222
+ print(f"Debug image saved as '{filename}'")
1223
+ except Exception as e:
1224
+ print(f"Error saving image: {str(e)}")
1225
+ print("Attempting to save as numpy array...")
1226
+ np.save(filename.replace('.png', '.npy'), image_np)
1227
+ print(f"Numpy array saved as '{filename.replace('.png', '.npy')}'")