Spaces:
Running
on
Zero
Running
on
Zero
File size: 40,309 Bytes
703e263 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 |
import torch
from .attention import Attention
from .sd_unet import ResnetBlock, UpSampler
from .tiler import TileWorker
from einops import rearrange, repeat
class VAEAttentionBlock(torch.nn.Module):
def __init__(self, num_attention_heads, attention_head_dim, in_channels, num_layers=1, norm_num_groups=32, eps=1e-5):
super().__init__()
inner_dim = num_attention_heads * attention_head_dim
self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=eps, affine=True)
self.transformer_blocks = torch.nn.ModuleList([
Attention(
inner_dim,
num_attention_heads,
attention_head_dim,
bias_q=True,
bias_kv=True,
bias_out=True
)
for d in range(num_layers)
])
def forward(self, hidden_states, time_emb, text_emb, res_stack):
batch, _, height, width = hidden_states.shape
residual = hidden_states
hidden_states = self.norm(hidden_states)
inner_dim = hidden_states.shape[1]
hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim)
for block in self.transformer_blocks:
hidden_states = block(hidden_states)
hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous()
hidden_states = hidden_states + residual
return hidden_states, time_emb, text_emb, res_stack
class TemporalResnetBlock(torch.nn.Module):
def __init__(self, in_channels, out_channels, groups=32, eps=1e-5):
super().__init__()
self.norm1 = torch.nn.GroupNorm(num_groups=groups, num_channels=in_channels, eps=eps, affine=True)
self.conv1 = torch.nn.Conv3d(in_channels, out_channels, kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0))
self.norm2 = torch.nn.GroupNorm(num_groups=groups, num_channels=out_channels, eps=eps, affine=True)
self.conv2 = torch.nn.Conv3d(out_channels, out_channels, kernel_size=(3, 1, 1), stride=1, padding=(1, 0, 0))
self.nonlinearity = torch.nn.SiLU()
self.mix_factor = torch.nn.Parameter(torch.Tensor([0.5]))
def forward(self, hidden_states, time_emb, text_emb, res_stack, **kwargs):
x_spatial = hidden_states
x = rearrange(hidden_states, "T C H W -> 1 C T H W")
x = self.norm1(x)
x = self.nonlinearity(x)
x = self.conv1(x)
x = self.norm2(x)
x = self.nonlinearity(x)
x = self.conv2(x)
x_temporal = hidden_states + x[0].permute(1, 0, 2, 3)
alpha = torch.sigmoid(self.mix_factor)
hidden_states = alpha * x_temporal + (1 - alpha) * x_spatial
return hidden_states, time_emb, text_emb, res_stack
class SVDVAEDecoder(torch.nn.Module):
def __init__(self):
super().__init__()
self.scaling_factor = 0.18215
self.conv_in = torch.nn.Conv2d(4, 512, kernel_size=3, padding=1)
self.blocks = torch.nn.ModuleList([
# UNetMidBlock
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
VAEAttentionBlock(1, 512, 512, 1, eps=1e-6),
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
# UpDecoderBlock
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
UpSampler(512),
# UpDecoderBlock
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
ResnetBlock(512, 512, eps=1e-6),
TemporalResnetBlock(512, 512, eps=1e-6),
UpSampler(512),
# UpDecoderBlock
ResnetBlock(512, 256, eps=1e-6),
TemporalResnetBlock(256, 256, eps=1e-6),
ResnetBlock(256, 256, eps=1e-6),
TemporalResnetBlock(256, 256, eps=1e-6),
ResnetBlock(256, 256, eps=1e-6),
TemporalResnetBlock(256, 256, eps=1e-6),
UpSampler(256),
# UpDecoderBlock
ResnetBlock(256, 128, eps=1e-6),
TemporalResnetBlock(128, 128, eps=1e-6),
ResnetBlock(128, 128, eps=1e-6),
TemporalResnetBlock(128, 128, eps=1e-6),
ResnetBlock(128, 128, eps=1e-6),
TemporalResnetBlock(128, 128, eps=1e-6),
])
self.conv_norm_out = torch.nn.GroupNorm(num_channels=128, num_groups=32, eps=1e-5)
self.conv_act = torch.nn.SiLU()
self.conv_out = torch.nn.Conv2d(128, 3, kernel_size=3, padding=1)
self.time_conv_out = torch.nn.Conv3d(3, 3, kernel_size=(3, 1, 1), padding=(1, 0, 0))
def forward(self, sample):
# 1. pre-process
hidden_states = rearrange(sample, "C T H W -> T C H W")
hidden_states = hidden_states / self.scaling_factor
hidden_states = self.conv_in(hidden_states)
time_emb, text_emb, res_stack = None, None, None
# 2. blocks
for i, block in enumerate(self.blocks):
hidden_states, time_emb, text_emb, res_stack = block(hidden_states, time_emb, text_emb, res_stack)
# 3. output
hidden_states = self.conv_norm_out(hidden_states)
hidden_states = self.conv_act(hidden_states)
hidden_states = self.conv_out(hidden_states)
hidden_states = rearrange(hidden_states, "T C H W -> C T H W")
hidden_states = self.time_conv_out(hidden_states)
return hidden_states
def build_mask(self, data, is_bound):
_, T, H, W = data.shape
t = repeat(torch.arange(T), "T -> T H W", T=T, H=H, W=W)
h = repeat(torch.arange(H), "H -> T H W", T=T, H=H, W=W)
w = repeat(torch.arange(W), "W -> T H W", T=T, H=H, W=W)
border_width = (T + H + W) // 6
pad = torch.ones_like(t) * border_width
mask = torch.stack([
pad if is_bound[0] else t + 1,
pad if is_bound[1] else T - t,
pad if is_bound[2] else h + 1,
pad if is_bound[3] else H - h,
pad if is_bound[4] else w + 1,
pad if is_bound[5] else W - w
]).min(dim=0).values
mask = mask.clip(1, border_width)
mask = (mask / border_width).to(dtype=data.dtype, device=data.device)
mask = rearrange(mask, "T H W -> 1 T H W")
return mask
def decode_video(
self, sample,
batch_time=8, batch_height=128, batch_width=128,
stride_time=4, stride_height=32, stride_width=32,
progress_bar=lambda x:x
):
sample = sample.permute(1, 0, 2, 3)
data_device = sample.device
computation_device = self.conv_in.weight.device
torch_dtype = sample.dtype
_, T, H, W = sample.shape
weight = torch.zeros((1, T, H*8, W*8), dtype=torch_dtype, device=data_device)
values = torch.zeros((3, T, H*8, W*8), dtype=torch_dtype, device=data_device)
# Split tasks
tasks = []
for t in range(0, T, stride_time):
for h in range(0, H, stride_height):
for w in range(0, W, stride_width):
if (t-stride_time >= 0 and t-stride_time+batch_time >= T)\
or (h-stride_height >= 0 and h-stride_height+batch_height >= H)\
or (w-stride_width >= 0 and w-stride_width+batch_width >= W):
continue
tasks.append((t, t+batch_time, h, h+batch_height, w, w+batch_width))
# Run
for tl, tr, hl, hr, wl, wr in progress_bar(tasks):
sample_batch = sample[:, tl:tr, hl:hr, wl:wr].to(computation_device)
sample_batch = self.forward(sample_batch).to(data_device)
mask = self.build_mask(sample_batch, is_bound=(tl==0, tr>=T, hl==0, hr>=H, wl==0, wr>=W))
values[:, tl:tr, hl*8:hr*8, wl*8:wr*8] += sample_batch * mask
weight[:, tl:tr, hl*8:hr*8, wl*8:wr*8] += mask
values /= weight
return values
@staticmethod
def state_dict_converter():
return SVDVAEDecoderStateDictConverter()
class SVDVAEDecoderStateDictConverter:
def __init__(self):
pass
def from_diffusers(self, state_dict):
static_rename_dict = {
"decoder.conv_in": "conv_in",
"decoder.mid_block.attentions.0.group_norm": "blocks.2.norm",
"decoder.mid_block.attentions.0.to_q": "blocks.2.transformer_blocks.0.to_q",
"decoder.mid_block.attentions.0.to_k": "blocks.2.transformer_blocks.0.to_k",
"decoder.mid_block.attentions.0.to_v": "blocks.2.transformer_blocks.0.to_v",
"decoder.mid_block.attentions.0.to_out.0": "blocks.2.transformer_blocks.0.to_out",
"decoder.up_blocks.0.upsamplers.0.conv": "blocks.11.conv",
"decoder.up_blocks.1.upsamplers.0.conv": "blocks.18.conv",
"decoder.up_blocks.2.upsamplers.0.conv": "blocks.25.conv",
"decoder.conv_norm_out": "conv_norm_out",
"decoder.conv_out": "conv_out",
"decoder.time_conv_out": "time_conv_out"
}
prefix_rename_dict = {
"decoder.mid_block.resnets.0.spatial_res_block": "blocks.0",
"decoder.mid_block.resnets.0.temporal_res_block": "blocks.1",
"decoder.mid_block.resnets.0.time_mixer": "blocks.1",
"decoder.mid_block.resnets.1.spatial_res_block": "blocks.3",
"decoder.mid_block.resnets.1.temporal_res_block": "blocks.4",
"decoder.mid_block.resnets.1.time_mixer": "blocks.4",
"decoder.up_blocks.0.resnets.0.spatial_res_block": "blocks.5",
"decoder.up_blocks.0.resnets.0.temporal_res_block": "blocks.6",
"decoder.up_blocks.0.resnets.0.time_mixer": "blocks.6",
"decoder.up_blocks.0.resnets.1.spatial_res_block": "blocks.7",
"decoder.up_blocks.0.resnets.1.temporal_res_block": "blocks.8",
"decoder.up_blocks.0.resnets.1.time_mixer": "blocks.8",
"decoder.up_blocks.0.resnets.2.spatial_res_block": "blocks.9",
"decoder.up_blocks.0.resnets.2.temporal_res_block": "blocks.10",
"decoder.up_blocks.0.resnets.2.time_mixer": "blocks.10",
"decoder.up_blocks.1.resnets.0.spatial_res_block": "blocks.12",
"decoder.up_blocks.1.resnets.0.temporal_res_block": "blocks.13",
"decoder.up_blocks.1.resnets.0.time_mixer": "blocks.13",
"decoder.up_blocks.1.resnets.1.spatial_res_block": "blocks.14",
"decoder.up_blocks.1.resnets.1.temporal_res_block": "blocks.15",
"decoder.up_blocks.1.resnets.1.time_mixer": "blocks.15",
"decoder.up_blocks.1.resnets.2.spatial_res_block": "blocks.16",
"decoder.up_blocks.1.resnets.2.temporal_res_block": "blocks.17",
"decoder.up_blocks.1.resnets.2.time_mixer": "blocks.17",
"decoder.up_blocks.2.resnets.0.spatial_res_block": "blocks.19",
"decoder.up_blocks.2.resnets.0.temporal_res_block": "blocks.20",
"decoder.up_blocks.2.resnets.0.time_mixer": "blocks.20",
"decoder.up_blocks.2.resnets.1.spatial_res_block": "blocks.21",
"decoder.up_blocks.2.resnets.1.temporal_res_block": "blocks.22",
"decoder.up_blocks.2.resnets.1.time_mixer": "blocks.22",
"decoder.up_blocks.2.resnets.2.spatial_res_block": "blocks.23",
"decoder.up_blocks.2.resnets.2.temporal_res_block": "blocks.24",
"decoder.up_blocks.2.resnets.2.time_mixer": "blocks.24",
"decoder.up_blocks.3.resnets.0.spatial_res_block": "blocks.26",
"decoder.up_blocks.3.resnets.0.temporal_res_block": "blocks.27",
"decoder.up_blocks.3.resnets.0.time_mixer": "blocks.27",
"decoder.up_blocks.3.resnets.1.spatial_res_block": "blocks.28",
"decoder.up_blocks.3.resnets.1.temporal_res_block": "blocks.29",
"decoder.up_blocks.3.resnets.1.time_mixer": "blocks.29",
"decoder.up_blocks.3.resnets.2.spatial_res_block": "blocks.30",
"decoder.up_blocks.3.resnets.2.temporal_res_block": "blocks.31",
"decoder.up_blocks.3.resnets.2.time_mixer": "blocks.31",
}
suffix_rename_dict = {
"norm1.weight": "norm1.weight",
"conv1.weight": "conv1.weight",
"norm2.weight": "norm2.weight",
"conv2.weight": "conv2.weight",
"conv_shortcut.weight": "conv_shortcut.weight",
"norm1.bias": "norm1.bias",
"conv1.bias": "conv1.bias",
"norm2.bias": "norm2.bias",
"conv2.bias": "conv2.bias",
"conv_shortcut.bias": "conv_shortcut.bias",
"mix_factor": "mix_factor",
}
state_dict_ = {}
for name in static_rename_dict:
state_dict_[static_rename_dict[name] + ".weight"] = state_dict[name + ".weight"]
state_dict_[static_rename_dict[name] + ".bias"] = state_dict[name + ".bias"]
for prefix_name in prefix_rename_dict:
for suffix_name in suffix_rename_dict:
name = prefix_name + "." + suffix_name
name_ = prefix_rename_dict[prefix_name] + "." + suffix_rename_dict[suffix_name]
if name in state_dict:
state_dict_[name_] = state_dict[name]
return state_dict_
def from_civitai(self, state_dict):
rename_dict = {
"first_stage_model.decoder.conv_in.bias": "conv_in.bias",
"first_stage_model.decoder.conv_in.weight": "conv_in.weight",
"first_stage_model.decoder.conv_out.bias": "conv_out.bias",
"first_stage_model.decoder.conv_out.time_mix_conv.bias": "time_conv_out.bias",
"first_stage_model.decoder.conv_out.time_mix_conv.weight": "time_conv_out.weight",
"first_stage_model.decoder.conv_out.weight": "conv_out.weight",
"first_stage_model.decoder.mid.attn_1.k.bias": "blocks.2.transformer_blocks.0.to_k.bias",
"first_stage_model.decoder.mid.attn_1.k.weight": "blocks.2.transformer_blocks.0.to_k.weight",
"first_stage_model.decoder.mid.attn_1.norm.bias": "blocks.2.norm.bias",
"first_stage_model.decoder.mid.attn_1.norm.weight": "blocks.2.norm.weight",
"first_stage_model.decoder.mid.attn_1.proj_out.bias": "blocks.2.transformer_blocks.0.to_out.bias",
"first_stage_model.decoder.mid.attn_1.proj_out.weight": "blocks.2.transformer_blocks.0.to_out.weight",
"first_stage_model.decoder.mid.attn_1.q.bias": "blocks.2.transformer_blocks.0.to_q.bias",
"first_stage_model.decoder.mid.attn_1.q.weight": "blocks.2.transformer_blocks.0.to_q.weight",
"first_stage_model.decoder.mid.attn_1.v.bias": "blocks.2.transformer_blocks.0.to_v.bias",
"first_stage_model.decoder.mid.attn_1.v.weight": "blocks.2.transformer_blocks.0.to_v.weight",
"first_stage_model.decoder.mid.block_1.conv1.bias": "blocks.0.conv1.bias",
"first_stage_model.decoder.mid.block_1.conv1.weight": "blocks.0.conv1.weight",
"first_stage_model.decoder.mid.block_1.conv2.bias": "blocks.0.conv2.bias",
"first_stage_model.decoder.mid.block_1.conv2.weight": "blocks.0.conv2.weight",
"first_stage_model.decoder.mid.block_1.mix_factor": "blocks.1.mix_factor",
"first_stage_model.decoder.mid.block_1.norm1.bias": "blocks.0.norm1.bias",
"first_stage_model.decoder.mid.block_1.norm1.weight": "blocks.0.norm1.weight",
"first_stage_model.decoder.mid.block_1.norm2.bias": "blocks.0.norm2.bias",
"first_stage_model.decoder.mid.block_1.norm2.weight": "blocks.0.norm2.weight",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.0.bias": "blocks.1.norm1.bias",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.0.weight": "blocks.1.norm1.weight",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.2.bias": "blocks.1.conv1.bias",
"first_stage_model.decoder.mid.block_1.time_stack.in_layers.2.weight": "blocks.1.conv1.weight",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.0.bias": "blocks.1.norm2.bias",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.0.weight": "blocks.1.norm2.weight",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.3.bias": "blocks.1.conv2.bias",
"first_stage_model.decoder.mid.block_1.time_stack.out_layers.3.weight": "blocks.1.conv2.weight",
"first_stage_model.decoder.mid.block_2.conv1.bias": "blocks.3.conv1.bias",
"first_stage_model.decoder.mid.block_2.conv1.weight": "blocks.3.conv1.weight",
"first_stage_model.decoder.mid.block_2.conv2.bias": "blocks.3.conv2.bias",
"first_stage_model.decoder.mid.block_2.conv2.weight": "blocks.3.conv2.weight",
"first_stage_model.decoder.mid.block_2.mix_factor": "blocks.4.mix_factor",
"first_stage_model.decoder.mid.block_2.norm1.bias": "blocks.3.norm1.bias",
"first_stage_model.decoder.mid.block_2.norm1.weight": "blocks.3.norm1.weight",
"first_stage_model.decoder.mid.block_2.norm2.bias": "blocks.3.norm2.bias",
"first_stage_model.decoder.mid.block_2.norm2.weight": "blocks.3.norm2.weight",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.0.bias": "blocks.4.norm1.bias",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.0.weight": "blocks.4.norm1.weight",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.2.bias": "blocks.4.conv1.bias",
"first_stage_model.decoder.mid.block_2.time_stack.in_layers.2.weight": "blocks.4.conv1.weight",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.0.bias": "blocks.4.norm2.bias",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.0.weight": "blocks.4.norm2.weight",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.3.bias": "blocks.4.conv2.bias",
"first_stage_model.decoder.mid.block_2.time_stack.out_layers.3.weight": "blocks.4.conv2.weight",
"first_stage_model.decoder.norm_out.bias": "conv_norm_out.bias",
"first_stage_model.decoder.norm_out.weight": "conv_norm_out.weight",
"first_stage_model.decoder.up.0.block.0.conv1.bias": "blocks.26.conv1.bias",
"first_stage_model.decoder.up.0.block.0.conv1.weight": "blocks.26.conv1.weight",
"first_stage_model.decoder.up.0.block.0.conv2.bias": "blocks.26.conv2.bias",
"first_stage_model.decoder.up.0.block.0.conv2.weight": "blocks.26.conv2.weight",
"first_stage_model.decoder.up.0.block.0.mix_factor": "blocks.27.mix_factor",
"first_stage_model.decoder.up.0.block.0.nin_shortcut.bias": "blocks.26.conv_shortcut.bias",
"first_stage_model.decoder.up.0.block.0.nin_shortcut.weight": "blocks.26.conv_shortcut.weight",
"first_stage_model.decoder.up.0.block.0.norm1.bias": "blocks.26.norm1.bias",
"first_stage_model.decoder.up.0.block.0.norm1.weight": "blocks.26.norm1.weight",
"first_stage_model.decoder.up.0.block.0.norm2.bias": "blocks.26.norm2.bias",
"first_stage_model.decoder.up.0.block.0.norm2.weight": "blocks.26.norm2.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.0.bias": "blocks.27.norm1.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.0.weight": "blocks.27.norm1.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.2.bias": "blocks.27.conv1.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.in_layers.2.weight": "blocks.27.conv1.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.0.bias": "blocks.27.norm2.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.0.weight": "blocks.27.norm2.weight",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.3.bias": "blocks.27.conv2.bias",
"first_stage_model.decoder.up.0.block.0.time_stack.out_layers.3.weight": "blocks.27.conv2.weight",
"first_stage_model.decoder.up.0.block.1.conv1.bias": "blocks.28.conv1.bias",
"first_stage_model.decoder.up.0.block.1.conv1.weight": "blocks.28.conv1.weight",
"first_stage_model.decoder.up.0.block.1.conv2.bias": "blocks.28.conv2.bias",
"first_stage_model.decoder.up.0.block.1.conv2.weight": "blocks.28.conv2.weight",
"first_stage_model.decoder.up.0.block.1.mix_factor": "blocks.29.mix_factor",
"first_stage_model.decoder.up.0.block.1.norm1.bias": "blocks.28.norm1.bias",
"first_stage_model.decoder.up.0.block.1.norm1.weight": "blocks.28.norm1.weight",
"first_stage_model.decoder.up.0.block.1.norm2.bias": "blocks.28.norm2.bias",
"first_stage_model.decoder.up.0.block.1.norm2.weight": "blocks.28.norm2.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.0.bias": "blocks.29.norm1.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.0.weight": "blocks.29.norm1.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.2.bias": "blocks.29.conv1.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.in_layers.2.weight": "blocks.29.conv1.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.0.bias": "blocks.29.norm2.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.0.weight": "blocks.29.norm2.weight",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.3.bias": "blocks.29.conv2.bias",
"first_stage_model.decoder.up.0.block.1.time_stack.out_layers.3.weight": "blocks.29.conv2.weight",
"first_stage_model.decoder.up.0.block.2.conv1.bias": "blocks.30.conv1.bias",
"first_stage_model.decoder.up.0.block.2.conv1.weight": "blocks.30.conv1.weight",
"first_stage_model.decoder.up.0.block.2.conv2.bias": "blocks.30.conv2.bias",
"first_stage_model.decoder.up.0.block.2.conv2.weight": "blocks.30.conv2.weight",
"first_stage_model.decoder.up.0.block.2.mix_factor": "blocks.31.mix_factor",
"first_stage_model.decoder.up.0.block.2.norm1.bias": "blocks.30.norm1.bias",
"first_stage_model.decoder.up.0.block.2.norm1.weight": "blocks.30.norm1.weight",
"first_stage_model.decoder.up.0.block.2.norm2.bias": "blocks.30.norm2.bias",
"first_stage_model.decoder.up.0.block.2.norm2.weight": "blocks.30.norm2.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.0.bias": "blocks.31.norm1.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.0.weight": "blocks.31.norm1.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.2.bias": "blocks.31.conv1.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.in_layers.2.weight": "blocks.31.conv1.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.0.bias": "blocks.31.norm2.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.0.weight": "blocks.31.norm2.weight",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.3.bias": "blocks.31.conv2.bias",
"first_stage_model.decoder.up.0.block.2.time_stack.out_layers.3.weight": "blocks.31.conv2.weight",
"first_stage_model.decoder.up.1.block.0.conv1.bias": "blocks.19.conv1.bias",
"first_stage_model.decoder.up.1.block.0.conv1.weight": "blocks.19.conv1.weight",
"first_stage_model.decoder.up.1.block.0.conv2.bias": "blocks.19.conv2.bias",
"first_stage_model.decoder.up.1.block.0.conv2.weight": "blocks.19.conv2.weight",
"first_stage_model.decoder.up.1.block.0.mix_factor": "blocks.20.mix_factor",
"first_stage_model.decoder.up.1.block.0.nin_shortcut.bias": "blocks.19.conv_shortcut.bias",
"first_stage_model.decoder.up.1.block.0.nin_shortcut.weight": "blocks.19.conv_shortcut.weight",
"first_stage_model.decoder.up.1.block.0.norm1.bias": "blocks.19.norm1.bias",
"first_stage_model.decoder.up.1.block.0.norm1.weight": "blocks.19.norm1.weight",
"first_stage_model.decoder.up.1.block.0.norm2.bias": "blocks.19.norm2.bias",
"first_stage_model.decoder.up.1.block.0.norm2.weight": "blocks.19.norm2.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.0.bias": "blocks.20.norm1.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.0.weight": "blocks.20.norm1.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.2.bias": "blocks.20.conv1.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.in_layers.2.weight": "blocks.20.conv1.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.0.bias": "blocks.20.norm2.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.0.weight": "blocks.20.norm2.weight",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.3.bias": "blocks.20.conv2.bias",
"first_stage_model.decoder.up.1.block.0.time_stack.out_layers.3.weight": "blocks.20.conv2.weight",
"first_stage_model.decoder.up.1.block.1.conv1.bias": "blocks.21.conv1.bias",
"first_stage_model.decoder.up.1.block.1.conv1.weight": "blocks.21.conv1.weight",
"first_stage_model.decoder.up.1.block.1.conv2.bias": "blocks.21.conv2.bias",
"first_stage_model.decoder.up.1.block.1.conv2.weight": "blocks.21.conv2.weight",
"first_stage_model.decoder.up.1.block.1.mix_factor": "blocks.22.mix_factor",
"first_stage_model.decoder.up.1.block.1.norm1.bias": "blocks.21.norm1.bias",
"first_stage_model.decoder.up.1.block.1.norm1.weight": "blocks.21.norm1.weight",
"first_stage_model.decoder.up.1.block.1.norm2.bias": "blocks.21.norm2.bias",
"first_stage_model.decoder.up.1.block.1.norm2.weight": "blocks.21.norm2.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.0.bias": "blocks.22.norm1.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.0.weight": "blocks.22.norm1.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.2.bias": "blocks.22.conv1.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.in_layers.2.weight": "blocks.22.conv1.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.0.bias": "blocks.22.norm2.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.0.weight": "blocks.22.norm2.weight",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.3.bias": "blocks.22.conv2.bias",
"first_stage_model.decoder.up.1.block.1.time_stack.out_layers.3.weight": "blocks.22.conv2.weight",
"first_stage_model.decoder.up.1.block.2.conv1.bias": "blocks.23.conv1.bias",
"first_stage_model.decoder.up.1.block.2.conv1.weight": "blocks.23.conv1.weight",
"first_stage_model.decoder.up.1.block.2.conv2.bias": "blocks.23.conv2.bias",
"first_stage_model.decoder.up.1.block.2.conv2.weight": "blocks.23.conv2.weight",
"first_stage_model.decoder.up.1.block.2.mix_factor": "blocks.24.mix_factor",
"first_stage_model.decoder.up.1.block.2.norm1.bias": "blocks.23.norm1.bias",
"first_stage_model.decoder.up.1.block.2.norm1.weight": "blocks.23.norm1.weight",
"first_stage_model.decoder.up.1.block.2.norm2.bias": "blocks.23.norm2.bias",
"first_stage_model.decoder.up.1.block.2.norm2.weight": "blocks.23.norm2.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.0.bias": "blocks.24.norm1.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.0.weight": "blocks.24.norm1.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.2.bias": "blocks.24.conv1.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.in_layers.2.weight": "blocks.24.conv1.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.0.bias": "blocks.24.norm2.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.0.weight": "blocks.24.norm2.weight",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.3.bias": "blocks.24.conv2.bias",
"first_stage_model.decoder.up.1.block.2.time_stack.out_layers.3.weight": "blocks.24.conv2.weight",
"first_stage_model.decoder.up.1.upsample.conv.bias": "blocks.25.conv.bias",
"first_stage_model.decoder.up.1.upsample.conv.weight": "blocks.25.conv.weight",
"first_stage_model.decoder.up.2.block.0.conv1.bias": "blocks.12.conv1.bias",
"first_stage_model.decoder.up.2.block.0.conv1.weight": "blocks.12.conv1.weight",
"first_stage_model.decoder.up.2.block.0.conv2.bias": "blocks.12.conv2.bias",
"first_stage_model.decoder.up.2.block.0.conv2.weight": "blocks.12.conv2.weight",
"first_stage_model.decoder.up.2.block.0.mix_factor": "blocks.13.mix_factor",
"first_stage_model.decoder.up.2.block.0.norm1.bias": "blocks.12.norm1.bias",
"first_stage_model.decoder.up.2.block.0.norm1.weight": "blocks.12.norm1.weight",
"first_stage_model.decoder.up.2.block.0.norm2.bias": "blocks.12.norm2.bias",
"first_stage_model.decoder.up.2.block.0.norm2.weight": "blocks.12.norm2.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.0.bias": "blocks.13.norm1.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.0.weight": "blocks.13.norm1.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.2.bias": "blocks.13.conv1.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.in_layers.2.weight": "blocks.13.conv1.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.0.bias": "blocks.13.norm2.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.0.weight": "blocks.13.norm2.weight",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.3.bias": "blocks.13.conv2.bias",
"first_stage_model.decoder.up.2.block.0.time_stack.out_layers.3.weight": "blocks.13.conv2.weight",
"first_stage_model.decoder.up.2.block.1.conv1.bias": "blocks.14.conv1.bias",
"first_stage_model.decoder.up.2.block.1.conv1.weight": "blocks.14.conv1.weight",
"first_stage_model.decoder.up.2.block.1.conv2.bias": "blocks.14.conv2.bias",
"first_stage_model.decoder.up.2.block.1.conv2.weight": "blocks.14.conv2.weight",
"first_stage_model.decoder.up.2.block.1.mix_factor": "blocks.15.mix_factor",
"first_stage_model.decoder.up.2.block.1.norm1.bias": "blocks.14.norm1.bias",
"first_stage_model.decoder.up.2.block.1.norm1.weight": "blocks.14.norm1.weight",
"first_stage_model.decoder.up.2.block.1.norm2.bias": "blocks.14.norm2.bias",
"first_stage_model.decoder.up.2.block.1.norm2.weight": "blocks.14.norm2.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.0.bias": "blocks.15.norm1.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.0.weight": "blocks.15.norm1.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.2.bias": "blocks.15.conv1.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.in_layers.2.weight": "blocks.15.conv1.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.0.bias": "blocks.15.norm2.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.0.weight": "blocks.15.norm2.weight",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.3.bias": "blocks.15.conv2.bias",
"first_stage_model.decoder.up.2.block.1.time_stack.out_layers.3.weight": "blocks.15.conv2.weight",
"first_stage_model.decoder.up.2.block.2.conv1.bias": "blocks.16.conv1.bias",
"first_stage_model.decoder.up.2.block.2.conv1.weight": "blocks.16.conv1.weight",
"first_stage_model.decoder.up.2.block.2.conv2.bias": "blocks.16.conv2.bias",
"first_stage_model.decoder.up.2.block.2.conv2.weight": "blocks.16.conv2.weight",
"first_stage_model.decoder.up.2.block.2.mix_factor": "blocks.17.mix_factor",
"first_stage_model.decoder.up.2.block.2.norm1.bias": "blocks.16.norm1.bias",
"first_stage_model.decoder.up.2.block.2.norm1.weight": "blocks.16.norm1.weight",
"first_stage_model.decoder.up.2.block.2.norm2.bias": "blocks.16.norm2.bias",
"first_stage_model.decoder.up.2.block.2.norm2.weight": "blocks.16.norm2.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.0.bias": "blocks.17.norm1.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.0.weight": "blocks.17.norm1.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.2.bias": "blocks.17.conv1.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.in_layers.2.weight": "blocks.17.conv1.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.0.bias": "blocks.17.norm2.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.0.weight": "blocks.17.norm2.weight",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.3.bias": "blocks.17.conv2.bias",
"first_stage_model.decoder.up.2.block.2.time_stack.out_layers.3.weight": "blocks.17.conv2.weight",
"first_stage_model.decoder.up.2.upsample.conv.bias": "blocks.18.conv.bias",
"first_stage_model.decoder.up.2.upsample.conv.weight": "blocks.18.conv.weight",
"first_stage_model.decoder.up.3.block.0.conv1.bias": "blocks.5.conv1.bias",
"first_stage_model.decoder.up.3.block.0.conv1.weight": "blocks.5.conv1.weight",
"first_stage_model.decoder.up.3.block.0.conv2.bias": "blocks.5.conv2.bias",
"first_stage_model.decoder.up.3.block.0.conv2.weight": "blocks.5.conv2.weight",
"first_stage_model.decoder.up.3.block.0.mix_factor": "blocks.6.mix_factor",
"first_stage_model.decoder.up.3.block.0.norm1.bias": "blocks.5.norm1.bias",
"first_stage_model.decoder.up.3.block.0.norm1.weight": "blocks.5.norm1.weight",
"first_stage_model.decoder.up.3.block.0.norm2.bias": "blocks.5.norm2.bias",
"first_stage_model.decoder.up.3.block.0.norm2.weight": "blocks.5.norm2.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.0.bias": "blocks.6.norm1.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.0.weight": "blocks.6.norm1.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.2.bias": "blocks.6.conv1.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.in_layers.2.weight": "blocks.6.conv1.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.0.bias": "blocks.6.norm2.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.0.weight": "blocks.6.norm2.weight",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.3.bias": "blocks.6.conv2.bias",
"first_stage_model.decoder.up.3.block.0.time_stack.out_layers.3.weight": "blocks.6.conv2.weight",
"first_stage_model.decoder.up.3.block.1.conv1.bias": "blocks.7.conv1.bias",
"first_stage_model.decoder.up.3.block.1.conv1.weight": "blocks.7.conv1.weight",
"first_stage_model.decoder.up.3.block.1.conv2.bias": "blocks.7.conv2.bias",
"first_stage_model.decoder.up.3.block.1.conv2.weight": "blocks.7.conv2.weight",
"first_stage_model.decoder.up.3.block.1.mix_factor": "blocks.8.mix_factor",
"first_stage_model.decoder.up.3.block.1.norm1.bias": "blocks.7.norm1.bias",
"first_stage_model.decoder.up.3.block.1.norm1.weight": "blocks.7.norm1.weight",
"first_stage_model.decoder.up.3.block.1.norm2.bias": "blocks.7.norm2.bias",
"first_stage_model.decoder.up.3.block.1.norm2.weight": "blocks.7.norm2.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.0.bias": "blocks.8.norm1.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.0.weight": "blocks.8.norm1.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.2.bias": "blocks.8.conv1.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.in_layers.2.weight": "blocks.8.conv1.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.0.bias": "blocks.8.norm2.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.0.weight": "blocks.8.norm2.weight",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.3.bias": "blocks.8.conv2.bias",
"first_stage_model.decoder.up.3.block.1.time_stack.out_layers.3.weight": "blocks.8.conv2.weight",
"first_stage_model.decoder.up.3.block.2.conv1.bias": "blocks.9.conv1.bias",
"first_stage_model.decoder.up.3.block.2.conv1.weight": "blocks.9.conv1.weight",
"first_stage_model.decoder.up.3.block.2.conv2.bias": "blocks.9.conv2.bias",
"first_stage_model.decoder.up.3.block.2.conv2.weight": "blocks.9.conv2.weight",
"first_stage_model.decoder.up.3.block.2.mix_factor": "blocks.10.mix_factor",
"first_stage_model.decoder.up.3.block.2.norm1.bias": "blocks.9.norm1.bias",
"first_stage_model.decoder.up.3.block.2.norm1.weight": "blocks.9.norm1.weight",
"first_stage_model.decoder.up.3.block.2.norm2.bias": "blocks.9.norm2.bias",
"first_stage_model.decoder.up.3.block.2.norm2.weight": "blocks.9.norm2.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.0.bias": "blocks.10.norm1.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.0.weight": "blocks.10.norm1.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.2.bias": "blocks.10.conv1.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.in_layers.2.weight": "blocks.10.conv1.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.0.bias": "blocks.10.norm2.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.0.weight": "blocks.10.norm2.weight",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.3.bias": "blocks.10.conv2.bias",
"first_stage_model.decoder.up.3.block.2.time_stack.out_layers.3.weight": "blocks.10.conv2.weight",
"first_stage_model.decoder.up.3.upsample.conv.bias": "blocks.11.conv.bias",
"first_stage_model.decoder.up.3.upsample.conv.weight": "blocks.11.conv.weight",
}
state_dict_ = {}
for name in state_dict:
if name in rename_dict:
param = state_dict[name]
if "blocks.2.transformer_blocks.0" in rename_dict[name]:
param = param.squeeze()
state_dict_[rename_dict[name]] = param
return state_dict_
|