Spaces:
Sleeping
Sleeping
File size: 9,080 Bytes
57abc33 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 |
import torch
import torch.nn as nn
import torch.nn.functional as F
from segment_anything.modeling import TwoWayTransformer, MaskDecoder
from typing import Dict, List, Tuple
class LayerNorm2d(nn.Module):
def __init__(self, num_channels: int, eps: float = 1e-6) -> None:
super().__init__()
self.weight = nn.Parameter(torch.ones(num_channels))
self.bias = nn.Parameter(torch.zeros(num_channels))
self.eps = eps
def forward(self, x: torch.Tensor) -> torch.Tensor:
u = x.mean(1, keepdim=True)
s = (x - u).pow(2).mean(1, keepdim=True)
x = (x - u) / torch.sqrt(s + self.eps)
x = self.weight[:, None, None] * x + self.bias[:, None, None]
return x
class MLP(nn.Module):
def __init__(
self,
input_dim: int,
hidden_dim: int,
output_dim: int,
num_layers: int,
sigmoid_output: bool = False,
) -> None:
super().__init__()
self.num_layers = num_layers
h = [hidden_dim] * (num_layers - 1)
self.layers = nn.ModuleList(
nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim])
)
self.sigmoid_output = sigmoid_output
def forward(self, x):
for i, layer in enumerate(self.layers):
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
if self.sigmoid_output:
x = F.sigmoid(x)
return x
class MaskDecoderHQ(MaskDecoder):
def __init__(self, model_type):
super().__init__(transformer_dim=256,
transformer=TwoWayTransformer(
depth=2,
embedding_dim=256,
mlp_dim=2048,
num_heads=8,
),
num_multimask_outputs=3,
activation=nn.GELU,
iou_head_depth= 3,
iou_head_hidden_dim= 256,)
assert model_type in ["vit_b","vit_l","vit_h"]
checkpoint_dict = {"vit_b":"pretrained_checkpoint/sam_vit_b_maskdecoder.pth",
"vit_l":"pretrained_checkpoint/sam_vit_l_maskdecoder.pth",
'vit_h':"pretrained_checkpoint/sam_vit_h_maskdecoder.pth"}
checkpoint_path = checkpoint_dict[model_type]
self.load_state_dict(torch.load(checkpoint_path))
print("HQ Decoder init from SAM MaskDecoder")
for n,p in self.named_parameters():
p.requires_grad = False
transformer_dim=256
vit_dim_dict = {"vit_b":768,"vit_l":1024,"vit_h":1280}
vit_dim = vit_dim_dict[model_type]
self.hf_token = nn.Embedding(1, transformer_dim)
self.hf_mlp = MLP(transformer_dim, transformer_dim, transformer_dim // 8, 3)
self.num_mask_tokens = self.num_mask_tokens + 1
self.compress_vit_feat = nn.Sequential(
nn.ConvTranspose2d(vit_dim, transformer_dim, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim),
nn.GELU(),
nn.ConvTranspose2d(transformer_dim, transformer_dim // 8, kernel_size=2, stride=2))
self.embedding_encoder = nn.Sequential(
nn.ConvTranspose2d(transformer_dim, transformer_dim // 4, kernel_size=2, stride=2),
LayerNorm2d(transformer_dim // 4),
nn.GELU(),
nn.ConvTranspose2d(transformer_dim // 4, transformer_dim // 8, kernel_size=2, stride=2),
)
self.embedding_maskfeature = nn.Sequential(
nn.Conv2d(transformer_dim // 8, transformer_dim // 4, 3, 1, 1),
LayerNorm2d(transformer_dim // 4),
nn.GELU(),
nn.Conv2d(transformer_dim // 4, transformer_dim // 8, 3, 1, 1))
def forward(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
multimask_output: bool,
hq_token_only: bool,
interm_embeddings: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""
Predict masks given image and prompt embeddings.
Arguments:
image_embeddings (torch.Tensor): the embeddings from the ViT image encoder
image_pe (torch.Tensor): positional encoding with the shape of image_embeddings
sparse_prompt_embeddings (torch.Tensor): the embeddings of the points and boxes
dense_prompt_embeddings (torch.Tensor): the embeddings of the mask inputs
multimask_output (bool): Whether to return multiple masks or a single
mask.
Returns:
torch.Tensor: batched predicted hq masks
"""
vit_features = interm_embeddings[0].permute(0, 3, 1, 2) # early-layer ViT feature, after 1st global attention block in ViT
hq_features = self.embedding_encoder(image_embeddings) + self.compress_vit_feat(vit_features)
batch_len = len(image_embeddings)
masks = []
iou_preds = []
for i_batch in range(batch_len):
mask, iou_pred = self.predict_masks(
image_embeddings=image_embeddings[i_batch].unsqueeze(0),
image_pe=image_pe[i_batch],
sparse_prompt_embeddings=sparse_prompt_embeddings[i_batch],
dense_prompt_embeddings=dense_prompt_embeddings[i_batch],
hq_feature = hq_features[i_batch].unsqueeze(0)
)
masks.append(mask)
iou_preds.append(iou_pred)
masks = torch.cat(masks,0)
iou_preds = torch.cat(iou_preds,0)
# Select the correct mask or masks for output
if multimask_output:
# mask with highest score
mask_slice = slice(1,self.num_mask_tokens-1)
iou_preds = iou_preds[:, mask_slice]
iou_preds, max_iou_idx = torch.max(iou_preds,dim=1)
iou_preds = iou_preds.unsqueeze(1)
masks_multi = masks[:, mask_slice, :, :]
masks_sam = masks_multi[torch.arange(masks_multi.size(0)),max_iou_idx].unsqueeze(1)
else:
# singale mask output, default
mask_slice = slice(0, 1)
masks_sam = masks[:,mask_slice]
masks_hq = masks[:,slice(self.num_mask_tokens-1, self.num_mask_tokens), :, :]
if hq_token_only:
return masks_hq
else:
return masks_sam, masks_hq
def predict_masks(
self,
image_embeddings: torch.Tensor,
image_pe: torch.Tensor,
sparse_prompt_embeddings: torch.Tensor,
dense_prompt_embeddings: torch.Tensor,
hq_feature: torch.Tensor,
) -> Tuple[torch.Tensor, torch.Tensor]:
"""Predicts masks. See 'forward' for more details."""
output_tokens = torch.cat([self.iou_token.weight, self.mask_tokens.weight, self.hf_token.weight], dim=0)
output_tokens = output_tokens.unsqueeze(0).expand(sparse_prompt_embeddings.size(0), -1, -1)
tokens = torch.cat((output_tokens, sparse_prompt_embeddings), dim=1)
# Expand per-image data in batch direction to be per-mask
src = torch.repeat_interleave(image_embeddings, tokens.shape[0], dim=0)
src = src + dense_prompt_embeddings
pos_src = torch.repeat_interleave(image_pe, tokens.shape[0], dim=0)
b, c, h, w = src.shape
# Run the transformer
hs, src = self.transformer(src, pos_src, tokens)
iou_token_out = hs[:, 0, :]
mask_tokens_out = hs[:, 1 : (1 + self.num_mask_tokens), :]
# Upscale mask embeddings and predict masks using the mask tokens
src = src.transpose(1, 2).view(b, c, h, w)
upscaled_embedding_sam = self.output_upscaling(src)
upscaled_embedding_ours = self.embedding_maskfeature(upscaled_embedding_sam) + hq_feature
hyper_in_list: List[torch.Tensor] = []
for i in range(self.num_mask_tokens):
if i < 4:
hyper_in_list.append(self.output_hypernetworks_mlps[i](mask_tokens_out[:, i, :]))
else:
hyper_in_list.append(self.hf_mlp(mask_tokens_out[:, i, :]))
hyper_in = torch.stack(hyper_in_list, dim=1)
b, c, h, w = upscaled_embedding_sam.shape
masks_sam = (hyper_in[:,:4] @ upscaled_embedding_sam.view(b, c, h * w)).view(b, -1, h, w)
masks_ours = (hyper_in[:,4:] @ upscaled_embedding_ours.view(b, c, h * w)).view(b, -1, h, w)
masks = torch.cat([masks_sam,masks_ours],dim=1)
iou_pred = self.iou_prediction_head(iou_token_out)
return masks, iou_pred |