|
import torch |
|
import torch.nn as nn |
|
import numpy as np |
|
from einops import rearrange |
|
|
|
|
|
class LinearAttention(nn.Module): |
|
def __init__(self, dim, heads=4, dim_head=32): |
|
super().__init__() |
|
self.heads = heads |
|
hidden_dim = dim_head * heads |
|
self.to_qkv = nn.Conv2d(dim, hidden_dim * 3, 1, bias = False) |
|
self.to_out = nn.Conv2d(hidden_dim, dim, 1) |
|
|
|
def forward(self, x): |
|
b, c, h, w = x.shape |
|
qkv = self.to_qkv(x) |
|
q, k, v = rearrange(qkv, 'b (qkv heads c) h w -> qkv b heads c (h w)', heads = self.heads, qkv=3) |
|
k = k.softmax(dim=-1) |
|
context = torch.einsum('bhdn,bhen->bhde', k, v) |
|
out = torch.einsum('bhde,bhdn->bhen', context, q) |
|
out = rearrange(out, 'b heads c (h w) -> b (heads c) h w', heads=self.heads, h=h, w=w) |
|
return self.to_out(out) |
|
|
|
|
|
def nonlinearity(x): |
|
|
|
return x*torch.sigmoid(x) |
|
|
|
|
|
def Normalize(in_channels, num_groups=32): |
|
return torch.nn.GroupNorm(num_groups=num_groups, num_channels=in_channels, eps=1e-6, affine=True) |
|
|
|
|
|
class Upsample(nn.Module): |
|
def __init__(self, in_channels, with_conv): |
|
super().__init__() |
|
self.with_conv = with_conv |
|
if self.with_conv: |
|
self.conv = torch.nn.Conv2d(in_channels, |
|
in_channels, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
|
|
def forward(self, x): |
|
x = torch.nn.functional.interpolate(x, scale_factor=2.0, mode="nearest") |
|
if self.with_conv: |
|
x = self.conv(x) |
|
return x |
|
|
|
|
|
class Downsample(nn.Module): |
|
def __init__(self, in_channels, with_conv): |
|
super().__init__() |
|
self.with_conv = with_conv |
|
if self.with_conv: |
|
|
|
self.conv = torch.nn.Conv2d(in_channels, |
|
in_channels, |
|
kernel_size=3, |
|
stride=2, |
|
padding=0) |
|
|
|
def forward(self, x): |
|
if self.with_conv: |
|
pad = (0,1,0,1) |
|
x = torch.nn.functional.pad(x, pad, mode="constant", value=0) |
|
x = self.conv(x) |
|
else: |
|
x = torch.nn.functional.avg_pool2d(x, kernel_size=2, stride=2) |
|
return x |
|
|
|
|
|
class ResnetBlock(nn.Module): |
|
def __init__(self, *, in_channels, out_channels=None, conv_shortcut=False, |
|
dropout, temb_channels=512): |
|
super().__init__() |
|
self.in_channels = in_channels |
|
out_channels = in_channels if out_channels is None else out_channels |
|
self.out_channels = out_channels |
|
self.use_conv_shortcut = conv_shortcut |
|
|
|
self.norm1 = Normalize(in_channels) |
|
self.conv1 = torch.nn.Conv2d(in_channels, |
|
out_channels, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
if temb_channels > 0: |
|
self.temb_proj = torch.nn.Linear(temb_channels, |
|
out_channels) |
|
self.norm2 = Normalize(out_channels) |
|
self.dropout = torch.nn.Dropout(dropout) |
|
self.conv2 = torch.nn.Conv2d(out_channels, |
|
out_channels, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
if self.in_channels != self.out_channels: |
|
if self.use_conv_shortcut: |
|
self.conv_shortcut = torch.nn.Conv2d(in_channels, |
|
out_channels, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
else: |
|
self.nin_shortcut = torch.nn.Conv2d(in_channels, |
|
out_channels, |
|
kernel_size=1, |
|
stride=1, |
|
padding=0) |
|
|
|
def forward(self, x, temb): |
|
h = x |
|
h = self.norm1(h) |
|
h = nonlinearity(h) |
|
h = self.conv1(h) |
|
|
|
if temb is not None: |
|
h = h + self.temb_proj(nonlinearity(temb))[:,:,None,None] |
|
|
|
h = self.norm2(h) |
|
h = nonlinearity(h) |
|
h = self.dropout(h) |
|
h = self.conv2(h) |
|
|
|
if self.in_channels != self.out_channels: |
|
if self.use_conv_shortcut: |
|
x = self.conv_shortcut(x) |
|
else: |
|
x = self.nin_shortcut(x) |
|
|
|
return x+h |
|
|
|
|
|
class LinAttnBlock(LinearAttention): |
|
"""to match AttnBlock usage""" |
|
def __init__(self, in_channels): |
|
super().__init__(dim=in_channels, heads=1, dim_head=in_channels) |
|
|
|
|
|
class AttnBlock(nn.Module): |
|
def __init__(self, in_channels): |
|
super().__init__() |
|
self.in_channels = in_channels |
|
|
|
self.norm = Normalize(in_channels) |
|
self.q = torch.nn.Conv2d(in_channels, |
|
in_channels, |
|
kernel_size=1, |
|
stride=1, |
|
padding=0) |
|
self.k = torch.nn.Conv2d(in_channels, |
|
in_channels, |
|
kernel_size=1, |
|
stride=1, |
|
padding=0) |
|
self.v = torch.nn.Conv2d(in_channels, |
|
in_channels, |
|
kernel_size=1, |
|
stride=1, |
|
padding=0) |
|
self.proj_out = torch.nn.Conv2d(in_channels, |
|
in_channels, |
|
kernel_size=1, |
|
stride=1, |
|
padding=0) |
|
|
|
|
|
def forward(self, x): |
|
h_ = x |
|
h_ = self.norm(h_) |
|
q = self.q(h_) |
|
k = self.k(h_) |
|
v = self.v(h_) |
|
|
|
|
|
b,c,h,w = q.shape |
|
q = q.reshape(b,c,h*w) |
|
q = q.permute(0,2,1) |
|
k = k.reshape(b,c,h*w) |
|
w_ = torch.bmm(q,k) |
|
w_ = w_ * (int(c)**(-0.5)) |
|
w_ = torch.nn.functional.softmax(w_, dim=2) |
|
|
|
|
|
v = v.reshape(b,c,h*w) |
|
w_ = w_.permute(0,2,1) |
|
h_ = torch.bmm(v,w_) |
|
h_ = h_.reshape(b,c,h,w) |
|
|
|
h_ = self.proj_out(h_) |
|
|
|
return x+h_ |
|
|
|
|
|
def make_attn(in_channels, attn_type="vanilla"): |
|
assert attn_type in ["vanilla", "linear", "none"], f'attn_type {attn_type} unknown' |
|
print(f"making attention of type '{attn_type}' with {in_channels} in_channels") |
|
if attn_type == "vanilla": |
|
return AttnBlock(in_channels) |
|
elif attn_type == "none": |
|
return nn.Identity(in_channels) |
|
else: |
|
return LinAttnBlock(in_channels) |
|
|
|
|
|
class Encoder(nn.Module): |
|
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, |
|
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, |
|
resolution, z_channels, double_z=True, use_linear_attn=False, attn_type="vanilla", |
|
**ignore_kwargs): |
|
super().__init__() |
|
if use_linear_attn: attn_type = "linear" |
|
self.ch = ch |
|
self.temb_ch = 0 |
|
self.num_resolutions = len(ch_mult) |
|
self.num_res_blocks = num_res_blocks |
|
self.resolution = resolution |
|
self.in_channels = in_channels |
|
|
|
|
|
self.conv_in = torch.nn.Conv2d(in_channels, |
|
self.ch, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
|
|
curr_res = resolution |
|
in_ch_mult = (1,)+tuple(ch_mult) |
|
self.in_ch_mult = in_ch_mult |
|
self.down = nn.ModuleList() |
|
for i_level in range(self.num_resolutions): |
|
block = nn.ModuleList() |
|
attn = nn.ModuleList() |
|
block_in = ch*in_ch_mult[i_level] |
|
block_out = ch*ch_mult[i_level] |
|
for i_block in range(self.num_res_blocks): |
|
block.append(ResnetBlock(in_channels=block_in, |
|
out_channels=block_out, |
|
temb_channels=self.temb_ch, |
|
dropout=dropout)) |
|
block_in = block_out |
|
if curr_res in attn_resolutions: |
|
attn.append(make_attn(block_in, attn_type=attn_type)) |
|
down = nn.Module() |
|
down.block = block |
|
down.attn = attn |
|
if i_level != self.num_resolutions-1: |
|
down.downsample = Downsample(block_in, resamp_with_conv) |
|
curr_res = curr_res // 2 |
|
self.down.append(down) |
|
|
|
|
|
self.mid = nn.Module() |
|
self.mid.block_1 = ResnetBlock(in_channels=block_in, |
|
out_channels=block_in, |
|
temb_channels=self.temb_ch, |
|
dropout=dropout) |
|
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) |
|
self.mid.block_2 = ResnetBlock(in_channels=block_in, |
|
out_channels=block_in, |
|
temb_channels=self.temb_ch, |
|
dropout=dropout) |
|
|
|
|
|
self.norm_out = Normalize(block_in) |
|
self.conv_out = torch.nn.Conv2d(block_in, |
|
2*z_channels if double_z else z_channels, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
|
|
def forward(self, x): |
|
|
|
temb = None |
|
|
|
|
|
hs = [self.conv_in(x)] |
|
for i_level in range(self.num_resolutions): |
|
for i_block in range(self.num_res_blocks): |
|
h = self.down[i_level].block[i_block](hs[-1], temb) |
|
if len(self.down[i_level].attn) > 0: |
|
h = self.down[i_level].attn[i_block](h) |
|
hs.append(h) |
|
if i_level != self.num_resolutions-1: |
|
hs.append(self.down[i_level].downsample(hs[-1])) |
|
|
|
|
|
h = hs[-1] |
|
h = self.mid.block_1(h, temb) |
|
h = self.mid.attn_1(h) |
|
h = self.mid.block_2(h, temb) |
|
|
|
|
|
h = self.norm_out(h) |
|
h = nonlinearity(h) |
|
h = self.conv_out(h) |
|
return h |
|
|
|
|
|
class Decoder(nn.Module): |
|
def __init__(self, *, ch, out_ch, ch_mult=(1,2,4,8), num_res_blocks, |
|
attn_resolutions, dropout=0.0, resamp_with_conv=True, in_channels, |
|
resolution, z_channels, give_pre_end=False, tanh_out=False, use_linear_attn=False, |
|
attn_type="vanilla", **ignorekwargs): |
|
super().__init__() |
|
if use_linear_attn: attn_type = "linear" |
|
self.ch = ch |
|
self.temb_ch = 0 |
|
self.num_resolutions = len(ch_mult) |
|
self.num_res_blocks = num_res_blocks |
|
self.resolution = resolution |
|
self.in_channels = in_channels |
|
self.give_pre_end = give_pre_end |
|
self.tanh_out = tanh_out |
|
|
|
|
|
in_ch_mult = (1,)+tuple(ch_mult) |
|
block_in = ch*ch_mult[self.num_resolutions-1] |
|
curr_res = resolution // 2**(self.num_resolutions-1) |
|
self.z_shape = (1,z_channels,curr_res,curr_res) |
|
print("Working with z of shape {} = {} dimensions.".format( |
|
self.z_shape, np.prod(self.z_shape))) |
|
|
|
|
|
self.conv_in = torch.nn.Conv2d(z_channels, |
|
block_in, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
|
|
|
|
self.mid = nn.Module() |
|
self.mid.block_1 = ResnetBlock(in_channels=block_in, |
|
out_channels=block_in, |
|
temb_channels=self.temb_ch, |
|
dropout=dropout) |
|
self.mid.attn_1 = make_attn(block_in, attn_type=attn_type) |
|
self.mid.block_2 = ResnetBlock(in_channels=block_in, |
|
out_channels=block_in, |
|
temb_channels=self.temb_ch, |
|
dropout=dropout) |
|
|
|
|
|
self.up = nn.ModuleList() |
|
for i_level in reversed(range(self.num_resolutions)): |
|
block = nn.ModuleList() |
|
attn = nn.ModuleList() |
|
block_out = ch*ch_mult[i_level] |
|
for i_block in range(self.num_res_blocks+1): |
|
block.append(ResnetBlock(in_channels=block_in, |
|
out_channels=block_out, |
|
temb_channels=self.temb_ch, |
|
dropout=dropout)) |
|
block_in = block_out |
|
if curr_res in attn_resolutions: |
|
attn.append(make_attn(block_in, attn_type=attn_type)) |
|
up = nn.Module() |
|
up.block = block |
|
up.attn = attn |
|
if i_level != 0: |
|
up.upsample = Upsample(block_in, resamp_with_conv) |
|
curr_res = curr_res * 2 |
|
self.up.insert(0, up) |
|
|
|
|
|
self.norm_out = Normalize(block_in) |
|
self.conv_out = torch.nn.Conv2d(block_in, |
|
out_ch, |
|
kernel_size=3, |
|
stride=1, |
|
padding=1) |
|
|
|
def forward(self, z): |
|
|
|
self.last_z_shape = z.shape |
|
|
|
|
|
temb = None |
|
|
|
|
|
h = self.conv_in(z) |
|
|
|
|
|
h = self.mid.block_1(h, temb) |
|
h = self.mid.attn_1(h) |
|
h = self.mid.block_2(h, temb) |
|
|
|
|
|
for i_level in reversed(range(self.num_resolutions)): |
|
for i_block in range(self.num_res_blocks+1): |
|
h = self.up[i_level].block[i_block](h, temb) |
|
if len(self.up[i_level].attn) > 0: |
|
h = self.up[i_level].attn[i_block](h) |
|
if i_level != 0: |
|
h = self.up[i_level].upsample(h) |
|
|
|
|
|
if self.give_pre_end: |
|
return h |
|
|
|
h = self.norm_out(h) |
|
h = nonlinearity(h) |
|
h = self.conv_out(h) |
|
if self.tanh_out: |
|
h = torch.tanh(h) |
|
return h |
|
|
|
|
|
class FrozenAutoencoderKL(nn.Module): |
|
def __init__(self, ddconfig, embed_dim, pretrained_path, scale_factor=0.18215): |
|
super().__init__() |
|
print(f'Create autoencoder with scale_factor={scale_factor}') |
|
self.encoder = Encoder(**ddconfig) |
|
self.decoder = Decoder(**ddconfig) |
|
assert ddconfig["double_z"] |
|
self.quant_conv = torch.nn.Conv2d(2 * ddconfig["z_channels"], 2 * embed_dim, 1) |
|
self.post_quant_conv = torch.nn.Conv2d(embed_dim, ddconfig["z_channels"], 1) |
|
self.embed_dim = embed_dim |
|
self.scale_factor = scale_factor |
|
m, u = self.load_state_dict(torch.load(pretrained_path, map_location='cpu')) |
|
assert len(m) == 0 and len(u) == 0 |
|
self.eval() |
|
self.requires_grad_(False) |
|
|
|
def encode_moments(self, x): |
|
h = self.encoder(x) |
|
moments = self.quant_conv(h) |
|
return moments |
|
|
|
def sample(self, moments): |
|
mean, logvar = torch.chunk(moments, 2, dim=1) |
|
logvar = torch.clamp(logvar, -30.0, 20.0) |
|
std = torch.exp(0.5 * logvar) |
|
z = mean + std * torch.randn_like(mean) |
|
z = self.scale_factor * z |
|
return z |
|
|
|
def encode(self, x): |
|
moments = self.encode_moments(x) |
|
z = self.sample(moments) |
|
return z |
|
|
|
def decode(self, z): |
|
z = (1. / self.scale_factor) * z |
|
z = self.post_quant_conv(z) |
|
dec = self.decoder(z) |
|
return dec |
|
|
|
def forward(self, inputs, fn): |
|
if fn == 'encode_moments': |
|
return self.encode_moments(inputs) |
|
elif fn == 'encode': |
|
return self.encode(inputs) |
|
elif fn == 'decode': |
|
return self.decode(inputs) |
|
else: |
|
raise NotImplementedError |
|
|
|
|
|
def get_model(pretrained_path, scale_factor=0.18215): |
|
ddconfig = dict( |
|
double_z=True, |
|
z_channels=4, |
|
resolution=256, |
|
in_channels=3, |
|
out_ch=3, |
|
ch=128, |
|
ch_mult=[1, 2, 4, 4], |
|
num_res_blocks=2, |
|
attn_resolutions=[], |
|
dropout=0.0 |
|
) |
|
return FrozenAutoencoderKL(ddconfig, 4, pretrained_path, scale_factor) |
|
|
|
|
|
def main(): |
|
import torchvision.transforms as transforms |
|
from torchvision.utils import save_image |
|
import os |
|
from PIL import Image |
|
|
|
model = get_model('assets/stable-diffusion/autoencoder_kl.pth') |
|
device = torch.device("cuda") if torch.cuda.is_available() else torch.device("cpu") |
|
model = model.to(device) |
|
|
|
scale_factor = 0.18215 |
|
T = transforms.Compose([transforms.Resize(256), transforms.CenterCrop(256), transforms.ToTensor()]) |
|
path = 'imgs' |
|
fnames = os.listdir(path) |
|
for fname in fnames: |
|
p = os.path.join(path, fname) |
|
img = Image.open(p) |
|
img = T(img) |
|
img = img * 2. - 1 |
|
img = img[None, ...] |
|
img = img.to(device) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
with torch.cuda.amp.autocast(): |
|
print('test encode & decode') |
|
recons = [model.decode(model.encode(img)) for _ in range(4)] |
|
|
|
out = torch.cat([img, *recons], dim=0) |
|
out = (out + 1) * 0.5 |
|
save_image(out, f'recons_{fname}') |
|
|
|
|
|
if __name__ == "__main__": |
|
main() |
|
|