ShapLLM-Omni / trellis /models /sparse_structure_vqvae.py
JAMESYJL
v1
faccdf3
from typing import *
import torch
import torch.nn as nn
import torch.nn.functional as F
from ..modules.norm import GroupNorm32, ChannelLayerNorm32
from ..modules.spatial import pixel_shuffle_3d
from ..modules.utils import zero_module, convert_module_to_f16, convert_module_to_f32
def norm_layer(norm_type: str, *args, **kwargs) -> nn.Module:
"""
Return a normalization layer.
"""
if norm_type == "group":
return GroupNorm32(32, *args, **kwargs)
elif norm_type == "layer":
return ChannelLayerNorm32(*args, **kwargs)
else:
raise ValueError(f"Invalid norm type {norm_type}")
class ResBlock3d(nn.Module):
def __init__(
self,
channels: int,
out_channels: Optional[int] = None,
norm_type: Literal["group", "layer"] = "layer",
):
super().__init__()
self.channels = channels
self.out_channels = out_channels or channels
self.norm1 = norm_layer(norm_type, channels)
self.norm2 = norm_layer(norm_type, self.out_channels)
self.conv1 = nn.Conv3d(channels, self.out_channels, 3, padding=1)
self.conv2 = zero_module(nn.Conv3d(self.out_channels, self.out_channels, 3, padding=1))
self.skip_connection = nn.Conv3d(channels, self.out_channels, 1) if channels != self.out_channels else nn.Identity()
def forward(self, x: torch.Tensor) -> torch.Tensor:
h = self.norm1(x)
h = F.silu(h)
h = self.conv1(h)
h = self.norm2(h)
h = F.silu(h)
h = self.conv2(h)
h = h + self.skip_connection(x)
return h
class DownsampleBlock3d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
mode: Literal["conv", "avgpool"] = "conv",
):
assert mode in ["conv", "avgpool"], f"Invalid mode {mode}"
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if mode == "conv":
self.conv = nn.Conv3d(in_channels, out_channels, 2, stride=2)
elif mode == "avgpool":
assert in_channels == out_channels, "Pooling mode requires in_channels to be equal to out_channels"
def forward(self, x: torch.Tensor) -> torch.Tensor:
if hasattr(self, "conv"):
return self.conv(x)
else:
return F.avg_pool3d(x, 2)
class UpsampleBlock3d(nn.Module):
def __init__(
self,
in_channels: int,
out_channels: int,
mode: Literal["conv", "nearest"] = "conv",
):
assert mode in ["conv", "nearest"], f"Invalid mode {mode}"
super().__init__()
self.in_channels = in_channels
self.out_channels = out_channels
if mode == "conv":
self.conv = nn.Conv3d(in_channels, out_channels*8, 3, padding=1)
elif mode == "nearest":
assert in_channels == out_channels, "Nearest mode requires in_channels to be equal to out_channels"
def forward(self, x: torch.Tensor) -> torch.Tensor:
if hasattr(self, "conv"):
x = self.conv(x)
return pixel_shuffle_3d(x, 2)
else:
return F.interpolate(x, scale_factor=2, mode="nearest")
class SparseStructure_vqEncoder(nn.Module):
def __init__(
self,
in_channels: int,
latent_channels: int,
num_res_blocks: int,
channels: List[int],
num_res_blocks_middle: int = 2,
norm_type: Literal["group", "layer"] = "layer",
use_fp16: bool = False,
):
super().__init__()
self.in_channels = in_channels
self.latent_channels = latent_channels
self.num_res_blocks = num_res_blocks
self.channels = channels
self.num_res_blocks_middle = num_res_blocks_middle
self.norm_type = norm_type
self.use_fp16 = use_fp16
self.dtype = torch.float16 if use_fp16 else torch.float32
self.input_layer = nn.Conv3d(in_channels, channels[0], 3, padding=1)
self.blocks = nn.ModuleList([])
for i, ch in enumerate(channels):
self.blocks.extend([
ResBlock3d(ch, ch)
for _ in range(num_res_blocks)
])
if i < len(channels) - 1:
self.blocks.append(
DownsampleBlock3d(ch, channels[i+1])
)
self.middle_block = nn.Sequential(*[
ResBlock3d(channels[-1], channels[-1])
for _ in range(num_res_blocks_middle)
])
self.out_layer = nn.Sequential(
norm_layer(norm_type, channels[-1]),
nn.SiLU(),
nn.Conv3d(channels[-1], latent_channels*2, 3, padding=1)
)
if use_fp16:
self.convert_to_fp16()
@property
def device(self) -> torch.device:
"""
Return the device of the model.
"""
return next(self.parameters()).device
def convert_to_fp16(self) -> None:
"""
Convert the torso of the model to float16.
"""
self.use_fp16 = True
self.dtype = torch.float16
self.blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self) -> None:
"""
Convert the torso of the model to float32.
"""
self.use_fp16 = False
self.dtype = torch.float32
self.blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x: torch.Tensor, sample_posterior: bool = False, return_raw: bool = False,using_out_layer=True) -> torch.Tensor:
h = self.input_layer(x)
h = h.type(self.dtype)
for block in self.blocks:
h = block(h)
h = self.middle_block(h)
#print(h.shape)#[bs,512,16,16,16]
h = h.type(x.dtype)
if using_out_layer == False:
return h
h = self.out_layer(h)
mean, logvar = h.chunk(2, dim=1)
return mean
class SparseStructure_vqDecoder(nn.Module):
def __init__(
self,
out_channels: int,
latent_channels: int,
num_res_blocks: int,
channels: List[int],
num_res_blocks_middle: int = 2,
norm_type: Literal["group", "layer"] = "layer",
use_fp16: bool = False,
):
super().__init__()
self.out_channels = out_channels
self.latent_channels = latent_channels
self.num_res_blocks = num_res_blocks
self.channels = channels
self.num_res_blocks_middle = num_res_blocks_middle
self.norm_type = norm_type
self.use_fp16 = use_fp16
self.dtype = torch.float16 if use_fp16 else torch.float32
self.input_layer = nn.Conv3d(latent_channels, channels[0], 3, padding=1)
self.middle_block = nn.Sequential(*[
ResBlock3d(channels[0], channels[0])
for _ in range(num_res_blocks_middle)
])
self.blocks = nn.ModuleList([])
for i, ch in enumerate(channels):
self.blocks.extend([
ResBlock3d(ch, ch)
for _ in range(num_res_blocks)
])
if i < len(channels) - 1:
self.blocks.append(
UpsampleBlock3d(ch, channels[i+1])
)
self.out_layer = nn.Sequential(
norm_layer(norm_type, channels[-1]),
nn.SiLU(),
nn.Conv3d(channels[-1], out_channels, 3, padding=1)
)
if use_fp16:
self.convert_to_fp16()
@property
def device(self) -> torch.device:
"""
Return the device of the model.
"""
return next(self.parameters()).device
def convert_to_fp16(self) -> None:
"""
Convert the torso of the model to float16.
"""
self.use_fp16 = True
self.dtype = torch.float16
self.blocks.apply(convert_module_to_f16)
self.middle_block.apply(convert_module_to_f16)
def convert_to_fp32(self) -> None:
"""
Convert the torso of the model to float32.
"""
self.use_fp16 = False
self.dtype = torch.float32
self.blocks.apply(convert_module_to_f32)
self.middle_block.apply(convert_module_to_f32)
def forward(self, x: torch.Tensor,using_input_layer=True) -> torch.Tensor:
if using_input_layer == True:
h = self.input_layer(x)
else:
h = x
h = h.type(self.dtype)
h = self.middle_block(h)
for block in self.blocks:
h = block(h)
h = h.type(x.dtype)
h = self.out_layer(h)
return h
class VectorQuantizer(nn.Module):
def __init__(self, num_embeddings=81920, embedding_dim=64, beta=0.25):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.beta = beta
self.embeddings = nn.Embedding(self.num_embeddings, self.embedding_dim)
self.embeddings.weight.data.uniform_(-1/self.num_embeddings, 1/self.num_embeddings)
def forward(self, z,only_return_indices=False):
bs, h, w, d, c = z.shape
z_flatten = z.reshape(-1, self.embedding_dim) # [bs*h*w*d, embedding_dim]
distances = torch.cdist(z_flatten, self.embeddings.weight) # [bs*h*w*d, num_embeddings]
encoding_indices = torch.argmin(distances, dim=1) # [bs*h*w*d]
if only_return_indices == True:
return encoding_indices.view(bs,h*w*d) # [bs,1024]
quantized = self.embeddings(encoding_indices) # [bs*h*w*d, embedding_dim]
quantized = quantized.view(bs, h, w, d, c) # [bs, 8, 8, 8, 64]
encoding_indices = encoding_indices.view(bs, h, w, d) # [bs, 8, 8, 8]
commitment_loss = F.mse_loss(z, quantized.detach())
vq_loss = F.mse_loss(quantized, z.detach())
quantized = z + (quantized - z).detach()
return quantized, vq_loss, commitment_loss,encoding_indices
class VQVAE3D(nn.Module):
def __init__(self,num_embeddings=8192):
super().__init__()
self.Encoder = SparseStructure_vqEncoder(in_channels=1,latent_channels=8,\
num_res_blocks=2,channels=[32, 128, 512],\
num_res_blocks_middle=2,use_fp16=True)
self.Decoder = SparseStructure_vqDecoder(out_channels=1,latent_channels=8,\
num_res_blocks=2,channels=[512, 128, 32],\
num_res_blocks_middle=2,use_fp16=True)
self.vq = VectorQuantizer(num_embeddings=num_embeddings, embedding_dim=32,beta=0.25)
def Encode(self, x):
bs = x.shape[0]
z = self.Encoder(x)
z = z.permute(0,2,3,4,1).contiguous()
z = z.view(bs,8,8,16,32)
encoding_indices = self.vq(z,only_return_indices=True)
return encoding_indices
def Decode(self, encoding_indices):
assert len(encoding_indices.shape) == 2
bs, h, w, d, c = encoding_indices.shape[0],8,8,16,32
quantized = self.vq.embeddings(encoding_indices)
quantized = quantized.view(bs,8,8,16,32)
z_hat = quantized.view(bs,16,16,16,8)
z_hat = z_hat.permute(0,4,1,2,3).contiguous()
recon = self.Decoder(z_hat)
return recon