SpicyCat commited on
Commit
9055c3d
1 Parent(s): 070c036

Upload cldm.py

Browse files
Files changed (1) hide show
  1. cldm.py +456 -0
cldm.py ADDED
@@ -0,0 +1,456 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import einops
3
+ from omegaconf import OmegaConf
4
+ import torch
5
+ import torch as th
6
+ import torch.nn as nn
7
+ from modules import devices, lowvram, shared
8
+
9
+ from ldm.modules.diffusionmodules.util import (
10
+ conv_nd,
11
+ linear,
12
+ zero_module,
13
+ timestep_embedding,
14
+ )
15
+
16
+ from ldm.modules.attention import SpatialTransformer
17
+ from ldm.modules.diffusionmodules.openaimodel import UNetModel, TimestepEmbedSequential, ResBlock, Downsample, AttentionBlock
18
+ from ldm.util import exists
19
+
20
+
21
+ def load_state_dict(ckpt_path, location='cpu'):
22
+ _, extension = os.path.splitext(ckpt_path)
23
+ if extension.lower() == ".safetensors":
24
+ import safetensors.torch
25
+ state_dict = safetensors.torch.load_file(ckpt_path, device=location)
26
+ else:
27
+ state_dict = get_state_dict(torch.load(
28
+ ckpt_path, map_location=torch.device(location)))
29
+ state_dict = get_state_dict(state_dict)
30
+ print(f'Loaded state_dict from [{ckpt_path}]')
31
+ return state_dict
32
+
33
+
34
+ def get_state_dict(d):
35
+ return d.get('state_dict', d)
36
+
37
+
38
+ def align(hint, size):
39
+ b, c, h1, w1 = hint.shape
40
+ h, w = size
41
+ if h != h1 or w != w1:
42
+ hint = torch.nn.functional.interpolate(hint, size=size, mode="nearest")
43
+ return hint
44
+
45
+
46
+ def get_node_name(name, parent_name):
47
+ if len(name) <= len(parent_name):
48
+ return False, ''
49
+ p = name[:len(parent_name)]
50
+ if p != parent_name:
51
+ return False, ''
52
+ return True, name[len(parent_name):]
53
+
54
+
55
+ class PlugableControlModel(nn.Module):
56
+ def __init__(self, model_path, config_path, weight=1.0, lowvram=False, base_model=None) -> None:
57
+ super().__init__()
58
+ # temp code
59
+ config_path = "/mnt/workspace/stable-diffusion-webui/extensions/sd-webui-controlnet/models/cldm_v15.yaml"
60
+ print(config_path)
61
+ config = OmegaConf.load(config_path)
62
+
63
+ self.control_model = ControlNet(**config.model.params.control_stage_config.params)
64
+ state_dict = load_state_dict(model_path)
65
+
66
+ if any([k.startswith("control_model.") for k, v in state_dict.items()]):
67
+
68
+ is_diff_model = 'difference' in state_dict
69
+ transfer_ctrl_opt = shared.opts.data.get("control_net_control_transfer", False) and \
70
+ any([k.startswith("model.diffusion_model.") for k, v in state_dict.items()])
71
+
72
+ if (is_diff_model or transfer_ctrl_opt) and base_model is not None:
73
+ # apply transfer control - https://github.com/lllyasviel/ControlNet/blob/main/tool_transfer_control.py
74
+
75
+ unet_state_dict = base_model.state_dict()
76
+ unet_state_dict_keys = unet_state_dict.keys()
77
+ final_state_dict = {}
78
+ counter = 0
79
+ for key in state_dict.keys():
80
+ if not key.startswith("control_model."):
81
+ continue
82
+
83
+ p = state_dict[key]
84
+ is_control, node_name = get_node_name(key, 'control_')
85
+ key_name = node_name.replace("model.", "") if is_control else key
86
+
87
+ if key_name in unet_state_dict_keys:
88
+ if is_diff_model:
89
+ # transfer control by make difference in advance
90
+ p_new = p + unet_state_dict[key_name].clone().cpu()
91
+ else:
92
+ # transfer control by calculate offsets from (delta = p + current_unet_encoder - frozen_unet_encoder)
93
+ p_new = p + unet_state_dict[key_name].clone().cpu() - state_dict["model.diffusion_model."+key_name]
94
+ counter += 1
95
+ else:
96
+ p_new = p
97
+ final_state_dict[key] = p_new
98
+
99
+ print(f'Offset cloned: {counter} values')
100
+ state_dict = final_state_dict
101
+
102
+ state_dict = {k.replace("control_model.", ""): v for k, v in state_dict.items() if k.startswith("control_model.")}
103
+ else:
104
+ # assume that model is done by user
105
+ pass
106
+
107
+ self.control_model.load_state_dict(state_dict)
108
+ self.lowvram = lowvram
109
+ self.weight = weight
110
+ self.only_mid_control = False
111
+ self.control = None
112
+ self.hint_cond = None
113
+
114
+ if not self.lowvram:
115
+ self.control_model.to(devices.get_device_for("controlnet"))
116
+
117
+ def hook(self, model, parent_model):
118
+ outer = self
119
+
120
+ def forward(self, x, timesteps=None, context=None, **kwargs):
121
+ only_mid_control = outer.only_mid_control
122
+
123
+ # hires stuffs
124
+ # note that this method may not works if hr_scale < 1.1
125
+ if abs(x.shape[-1] - outer.hint_cond.shape[-1] // 8) > 8:
126
+ only_mid_control = shared.opts.data.get("control_net_only_midctrl_hires", True)
127
+ # If you want to completely disable control net, uncomment this.
128
+ # return self._original_forward(x, timesteps=timesteps, context=context, **kwargs)
129
+
130
+ control = outer.control_model(x=x, hint=outer.hint_cond, timesteps=timesteps, context=context)
131
+ assert timesteps is not None, ValueError(f"insufficient timestep: {timesteps}")
132
+ hs = []
133
+ with torch.no_grad():
134
+ t_emb = timestep_embedding(
135
+ timesteps, self.model_channels, repeat_only=False)
136
+ emb = self.time_embed(t_emb)
137
+ h = x.type(self.dtype)
138
+ for module in self.input_blocks:
139
+ h = module(h, emb, context)
140
+ hs.append(h)
141
+ h = self.middle_block(h, emb, context)
142
+
143
+ h += control.pop()
144
+
145
+ for i, module in enumerate(self.output_blocks):
146
+ if only_mid_control:
147
+ h = torch.cat([h, hs.pop()], dim=1)
148
+ else:
149
+ hs_input, control_input = hs.pop(), control.pop()
150
+ h = align(h, hs_input.shape[-2:])
151
+ h = torch.cat([h, hs_input + control_input * outer.weight], dim=1)
152
+ h = module(h, emb, context)
153
+
154
+ h = h.type(x.dtype)
155
+ return self.out(h)
156
+
157
+ def forward2(*args, **kwargs):
158
+ # webui will handle other compoments
159
+ try:
160
+ if shared.cmd_opts.lowvram:
161
+ lowvram.send_everything_to_cpu()
162
+ if self.lowvram:
163
+ self.control_model.to(devices.get_device_for("controlnet"))
164
+ return forward(*args, **kwargs)
165
+ finally:
166
+ if self.lowvram:
167
+ self.control_model.cpu()
168
+
169
+ model._original_forward = model.forward
170
+ model.forward = forward2.__get__(model, UNetModel)
171
+
172
+ def notify(self, cond_like, weight):
173
+ self.hint_cond = cond_like
174
+ self.weight = weight
175
+ # print(self.hint_cond.shape)
176
+
177
+ def restore(self, model):
178
+ if not hasattr(model, "_original_forward"):
179
+ # no such handle, ignore
180
+ return
181
+
182
+ model.forward = model._original_forward
183
+ del model._original_forward
184
+
185
+
186
+ class ControlNet(nn.Module):
187
+ def __init__(
188
+ self,
189
+ image_size,
190
+ in_channels,
191
+ model_channels,
192
+ hint_channels,
193
+ num_res_blocks,
194
+ attention_resolutions,
195
+ dropout=0,
196
+ channel_mult=(1, 2, 4, 8),
197
+ conv_resample=True,
198
+ dims=2,
199
+ use_checkpoint=False,
200
+ use_fp16=False,
201
+ num_heads=-1,
202
+ num_head_channels=-1,
203
+ num_heads_upsample=-1,
204
+ use_scale_shift_norm=False,
205
+ resblock_updown=False,
206
+ use_new_attention_order=False,
207
+ use_spatial_transformer=False, # custom transformer support
208
+ transformer_depth=1, # custom transformer support
209
+ context_dim=None, # custom transformer support
210
+ # custom support for prediction of discrete ids into codebook of first stage vq model
211
+ n_embed=None,
212
+ legacy=True,
213
+ disable_self_attentions=None,
214
+ num_attention_blocks=None,
215
+ disable_middle_self_attn=False,
216
+ use_linear_in_transformer=False,
217
+ ):
218
+ super().__init__()
219
+ if use_spatial_transformer:
220
+ assert context_dim is not None, 'Fool!! You forgot to include the dimension of your cross-attention conditioning...'
221
+
222
+ if context_dim is not None:
223
+ assert use_spatial_transformer, 'Fool!! You forgot to use the spatial transformer for your cross-attention conditioning...'
224
+ from omegaconf.listconfig import ListConfig
225
+ if type(context_dim) == ListConfig:
226
+ context_dim = list(context_dim)
227
+
228
+ if num_heads_upsample == -1:
229
+ num_heads_upsample = num_heads
230
+
231
+ if num_heads == -1:
232
+ assert num_head_channels != -1, 'Either num_heads or num_head_channels has to be set'
233
+
234
+ if num_head_channels == -1:
235
+ assert num_heads != -1, 'Either num_heads or num_head_channels has to be set'
236
+
237
+ self.dims = dims
238
+ self.image_size = image_size
239
+ self.in_channels = in_channels
240
+ self.model_channels = model_channels
241
+ if isinstance(num_res_blocks, int):
242
+ self.num_res_blocks = len(channel_mult) * [num_res_blocks]
243
+ else:
244
+ if len(num_res_blocks) != len(channel_mult):
245
+ raise ValueError("provide num_res_blocks either as an int (globally constant) or "
246
+ "as a list/tuple (per-level) with the same length as channel_mult")
247
+ self.num_res_blocks = num_res_blocks
248
+ if disable_self_attentions is not None:
249
+ # should be a list of booleans, indicating whether to disable self-attention in TransformerBlocks or not
250
+ assert len(disable_self_attentions) == len(channel_mult)
251
+ if num_attention_blocks is not None:
252
+ assert len(num_attention_blocks) == len(self.num_res_blocks)
253
+ assert all(map(lambda i: self.num_res_blocks[i] >= num_attention_blocks[i], range(
254
+ len(num_attention_blocks))))
255
+ print(f"Constructor of UNetModel received num_attention_blocks={num_attention_blocks}. "
256
+ f"This option has LESS priority than attention_resolutions {attention_resolutions}, "
257
+ f"i.e., in cases where num_attention_blocks[i] > 0 but 2**i not in attention_resolutions, "
258
+ f"attention will still not be set.")
259
+
260
+ self.attention_resolutions = attention_resolutions
261
+ self.dropout = dropout
262
+ self.channel_mult = channel_mult
263
+ self.conv_resample = conv_resample
264
+ self.use_checkpoint = use_checkpoint
265
+ self.dtype = th.float16 if use_fp16 else th.float32
266
+ self.num_heads = num_heads
267
+ self.num_head_channels = num_head_channels
268
+ self.num_heads_upsample = num_heads_upsample
269
+ self.predict_codebook_ids = n_embed is not None
270
+
271
+ time_embed_dim = model_channels * 4
272
+ self.time_embed = nn.Sequential(
273
+ linear(model_channels, time_embed_dim),
274
+ nn.SiLU(),
275
+ linear(time_embed_dim, time_embed_dim),
276
+ )
277
+
278
+ self.input_blocks = nn.ModuleList(
279
+ [
280
+ TimestepEmbedSequential(
281
+ conv_nd(dims, in_channels, model_channels, 3, padding=1)
282
+ )
283
+ ]
284
+ )
285
+ self.zero_convs = nn.ModuleList([self.make_zero_conv(model_channels)])
286
+
287
+ self.input_hint_block = TimestepEmbedSequential(
288
+ conv_nd(dims, hint_channels, 16, 3, padding=1),
289
+ nn.SiLU(),
290
+ conv_nd(dims, 16, 16, 3, padding=1),
291
+ nn.SiLU(),
292
+ conv_nd(dims, 16, 32, 3, padding=1, stride=2),
293
+ nn.SiLU(),
294
+ conv_nd(dims, 32, 32, 3, padding=1),
295
+ nn.SiLU(),
296
+ conv_nd(dims, 32, 96, 3, padding=1, stride=2),
297
+ nn.SiLU(),
298
+ conv_nd(dims, 96, 96, 3, padding=1),
299
+ nn.SiLU(),
300
+ conv_nd(dims, 96, 256, 3, padding=1, stride=2),
301
+ nn.SiLU(),
302
+ zero_module(conv_nd(dims, 256, model_channels, 3, padding=1))
303
+ )
304
+
305
+ self._feature_size = model_channels
306
+ input_block_chans = [model_channels]
307
+ ch = model_channels
308
+ ds = 1
309
+ for level, mult in enumerate(channel_mult):
310
+ for nr in range(self.num_res_blocks[level]):
311
+ layers = [
312
+ ResBlock(
313
+ ch,
314
+ time_embed_dim,
315
+ dropout,
316
+ out_channels=mult * model_channels,
317
+ dims=dims,
318
+ use_checkpoint=use_checkpoint,
319
+ use_scale_shift_norm=use_scale_shift_norm,
320
+ )
321
+ ]
322
+ ch = mult * model_channels
323
+ if ds in attention_resolutions:
324
+ if num_head_channels == -1:
325
+ dim_head = ch // num_heads
326
+ else:
327
+ num_heads = ch // num_head_channels
328
+ dim_head = num_head_channels
329
+ if legacy:
330
+ #num_heads = 1
331
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
332
+ if exists(disable_self_attentions):
333
+ disabled_sa = disable_self_attentions[level]
334
+ else:
335
+ disabled_sa = False
336
+
337
+ if not exists(num_attention_blocks) or nr < num_attention_blocks[level]:
338
+ layers.append(
339
+ AttentionBlock(
340
+ ch,
341
+ use_checkpoint=use_checkpoint,
342
+ num_heads=num_heads,
343
+ num_head_channels=dim_head,
344
+ use_new_attention_order=use_new_attention_order,
345
+ ) if not use_spatial_transformer else SpatialTransformer(
346
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
347
+ disable_self_attn=disabled_sa, use_linear=use_linear_in_transformer,
348
+ use_checkpoint=use_checkpoint
349
+ )
350
+ )
351
+ self.input_blocks.append(TimestepEmbedSequential(*layers))
352
+ self.zero_convs.append(self.make_zero_conv(ch))
353
+ self._feature_size += ch
354
+ input_block_chans.append(ch)
355
+ if level != len(channel_mult) - 1:
356
+ out_ch = ch
357
+ self.input_blocks.append(
358
+ TimestepEmbedSequential(
359
+ ResBlock(
360
+ ch,
361
+ time_embed_dim,
362
+ dropout,
363
+ out_channels=out_ch,
364
+ dims=dims,
365
+ use_checkpoint=use_checkpoint,
366
+ use_scale_shift_norm=use_scale_shift_norm,
367
+ down=True,
368
+ )
369
+ if resblock_updown
370
+ else Downsample(
371
+ ch, conv_resample, dims=dims, out_channels=out_ch
372
+ )
373
+ )
374
+ )
375
+ ch = out_ch
376
+ input_block_chans.append(ch)
377
+ self.zero_convs.append(self.make_zero_conv(ch))
378
+ ds *= 2
379
+ self._feature_size += ch
380
+
381
+ if num_head_channels == -1:
382
+ dim_head = ch // num_heads
383
+ else:
384
+ num_heads = ch // num_head_channels
385
+ dim_head = num_head_channels
386
+ if legacy:
387
+ #num_heads = 1
388
+ dim_head = ch // num_heads if use_spatial_transformer else num_head_channels
389
+ self.middle_block = TimestepEmbedSequential(
390
+ ResBlock(
391
+ ch,
392
+ time_embed_dim,
393
+ dropout,
394
+ dims=dims,
395
+ use_checkpoint=use_checkpoint,
396
+ use_scale_shift_norm=use_scale_shift_norm,
397
+ ),
398
+ AttentionBlock(
399
+ ch,
400
+ use_checkpoint=use_checkpoint,
401
+ num_heads=num_heads,
402
+ num_head_channels=dim_head,
403
+ use_new_attention_order=use_new_attention_order,
404
+ # always uses a self-attn
405
+ ) if not use_spatial_transformer else SpatialTransformer(
406
+ ch, num_heads, dim_head, depth=transformer_depth, context_dim=context_dim,
407
+ disable_self_attn=disable_middle_self_attn, use_linear=use_linear_in_transformer,
408
+ use_checkpoint=use_checkpoint
409
+ ),
410
+ ResBlock(
411
+ ch,
412
+ time_embed_dim,
413
+ dropout,
414
+ dims=dims,
415
+ use_checkpoint=use_checkpoint,
416
+ use_scale_shift_norm=use_scale_shift_norm,
417
+ ),
418
+ )
419
+ self.middle_block_out = self.make_zero_conv(ch)
420
+ self._feature_size += ch
421
+
422
+ def make_zero_conv(self, channels):
423
+ return TimestepEmbedSequential(zero_module(conv_nd(self.dims, channels, channels, 1, padding=0)))
424
+
425
+ def align(self, hint, h, w):
426
+ c, h1, w1 = hint.shape
427
+ if h != h1 or w != w1:
428
+ hint = align(hint.unsqueeze(0), (h, w))
429
+ return hint.squeeze(0)
430
+ return hint
431
+
432
+ def forward(self, x, hint, timesteps, context, **kwargs):
433
+ t_emb = timestep_embedding(
434
+ timesteps, self.model_channels, repeat_only=False)
435
+ emb = self.time_embed(t_emb)
436
+
437
+ guided_hint = self.input_hint_block(hint, emb, context)
438
+ outs = []
439
+
440
+ h1, w1 = x.shape[-2:]
441
+ guided_hint = self.align(guided_hint, h1, w1)
442
+
443
+ h = x.type(self.dtype)
444
+ for module, zero_conv in zip(self.input_blocks, self.zero_convs):
445
+ if guided_hint is not None:
446
+ h = module(h, emb, context)
447
+ h += guided_hint
448
+ guided_hint = None
449
+ else:
450
+ h = module(h, emb, context)
451
+ outs.append(zero_conv(h, emb, context))
452
+
453
+ h = self.middle_block(h, emb, context)
454
+ outs.append(self.middle_block_out(h, emb, context))
455
+
456
+ return outs