Spaces:
Paused
Paused
Delete xdecoder
Browse files- xdecoder/BaseModel.py +0 -37
- xdecoder/__init__.py +0 -5
- xdecoder/architectures/__init__.py +0 -2
- xdecoder/architectures/build.py +0 -10
- xdecoder/architectures/registry.py +0 -13
- xdecoder/architectures/xdecoder_model.py +0 -622
- xdecoder/backbone/__init__.py +0 -7
- xdecoder/backbone/backbone.py +0 -51
- xdecoder/backbone/build.py +0 -11
- xdecoder/backbone/focal.py +0 -692
- xdecoder/backbone/focal_dw.py +0 -789
- xdecoder/backbone/registry.py +0 -14
- xdecoder/backbone/resnet.py +0 -731
- xdecoder/backbone/swin.py +0 -892
- xdecoder/body/__init__.py +0 -1
- xdecoder/body/build.py +0 -13
- xdecoder/body/decoder/__init__.py +0 -1
- xdecoder/body/decoder/build.py +0 -12
- xdecoder/body/decoder/registry.py +0 -13
- xdecoder/body/decoder/tmp.py +0 -664
- xdecoder/body/decoder/xdecoder.py +0 -700
- xdecoder/body/decoder/xdecoder2.py +0 -700
- xdecoder/body/encoder/__init__.py +0 -1
- xdecoder/body/encoder/build.py +0 -12
- xdecoder/body/encoder/registry.py +0 -13
- xdecoder/body/encoder/transformer_encoder_fpn.py +0 -324
- xdecoder/body/registry.py +0 -14
- xdecoder/body/transformer_blocks.py +0 -370
- xdecoder/body/xdecoder_head.py +0 -123
- xdecoder/language/LangEncoder/__init__.py +0 -8
- xdecoder/language/LangEncoder/build.py +0 -36
- xdecoder/language/LangEncoder/registry.py +0 -18
- xdecoder/language/LangEncoder/transformer.py +0 -222
- xdecoder/language/__init__.py +0 -3
- xdecoder/language/build.py +0 -11
- xdecoder/language/fixvlpencoder.py +0 -35
- xdecoder/language/loss.py +0 -225
- xdecoder/language/misc.py +0 -64
- xdecoder/language/registry.py +0 -13
- xdecoder/language/vlpencoder.py +0 -168
- xdecoder/modules/__init__.py +0 -3
- xdecoder/modules/attention.py +0 -489
- xdecoder/modules/position_encoding.py +0 -64
- xdecoder/modules/postprocessing.py +0 -122
- xdecoder/utils/__init__.py +0 -4
- xdecoder/utils/box_ops.py +0 -93
- xdecoder/utils/config.py +0 -140
- xdecoder/utils/it_contrastive.py +0 -59
- xdecoder/utils/misc.py +0 -157
xdecoder/BaseModel.py
DELETED
@@ -1,37 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
|
3 |
-
# Copyright (c) 2022 Microsoft
|
4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
5 |
-
# Written by Xueyan Zou (xueyan@cs.wisc.edu)
|
6 |
-
# --------------------------------------------------------
|
7 |
-
|
8 |
-
import os
|
9 |
-
import logging
|
10 |
-
|
11 |
-
import torch
|
12 |
-
import torch.nn as nn
|
13 |
-
|
14 |
-
from utils.model_loading import align_and_update_state_dicts
|
15 |
-
|
16 |
-
logger = logging.getLogger(__name__)
|
17 |
-
|
18 |
-
|
19 |
-
class BaseModel(nn.Module):
|
20 |
-
def __init__(self, opt, module: nn.Module):
|
21 |
-
super(BaseModel, self).__init__()
|
22 |
-
self.opt = opt
|
23 |
-
self.model = module
|
24 |
-
|
25 |
-
def forward(self, *inputs, **kwargs):
|
26 |
-
outputs = self.model(*inputs, **kwargs)
|
27 |
-
return outputs
|
28 |
-
|
29 |
-
def save_pretrained(self, save_dir):
|
30 |
-
save_path = os.path.join(save_dir, 'model_state_dict.pt')
|
31 |
-
torch.save(self.model.state_dict(), save_path)
|
32 |
-
|
33 |
-
def from_pretrained(self, load_path):
|
34 |
-
state_dict = torch.load(load_path, map_location=self.opt['device'])
|
35 |
-
state_dict = align_and_update_state_dicts(self.model.state_dict(), state_dict)
|
36 |
-
self.model.load_state_dict(state_dict, strict=False)
|
37 |
-
return self
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/__init__.py
DELETED
@@ -1,5 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
from __future__ import division
|
3 |
-
from __future__ import print_function
|
4 |
-
|
5 |
-
from .architectures import build_model
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/architectures/__init__.py
DELETED
@@ -1,2 +0,0 @@
|
|
1 |
-
from .xdecoder_model import *
|
2 |
-
from .build import build_model
|
|
|
|
|
|
xdecoder/architectures/build.py
DELETED
@@ -1,10 +0,0 @@
|
|
1 |
-
from .registry import model_entrypoints
|
2 |
-
from .registry import is_model
|
3 |
-
|
4 |
-
def build_model(config, **kwargs):
|
5 |
-
model_name = config['MODEL']['NAME']
|
6 |
-
|
7 |
-
if not is_model(model_name):
|
8 |
-
raise ValueError(f'Unkown model: {model_name}')
|
9 |
-
|
10 |
-
return model_entrypoints(model_name)(config, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/architectures/registry.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_model_entrypoints = {}
|
2 |
-
|
3 |
-
def register_model(fn):
|
4 |
-
module_name_split = fn.__module__.split('.')
|
5 |
-
model_name = module_name_split[-1]
|
6 |
-
_model_entrypoints[model_name] = fn
|
7 |
-
return fn
|
8 |
-
|
9 |
-
def model_entrypoints(model_name):
|
10 |
-
return _model_entrypoints[model_name]
|
11 |
-
|
12 |
-
def is_model(model_name):
|
13 |
-
return model_name in _model_entrypoints
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/architectures/xdecoder_model.py
DELETED
@@ -1,622 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
|
3 |
-
# Copyright (c) 2022 Microsoft
|
4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
5 |
-
# Written by Xueyan Zou (xueyan@cs.wisc.edu)
|
6 |
-
# --------------------------------------------------------
|
7 |
-
|
8 |
-
import random
|
9 |
-
from typing import Tuple
|
10 |
-
from unicodedata import name
|
11 |
-
|
12 |
-
import torch
|
13 |
-
from torch import nn
|
14 |
-
from torch.nn import functional as F
|
15 |
-
import numpy as np
|
16 |
-
|
17 |
-
from .registry import register_model
|
18 |
-
from ..utils import configurable
|
19 |
-
from ..backbone import build_backbone, Backbone
|
20 |
-
from ..body import build_xdecoder_head
|
21 |
-
from ..modules import sem_seg_postprocess, bbox_postprocess
|
22 |
-
from ..language import build_language_encoder
|
23 |
-
from ..language.loss import vl_similarity
|
24 |
-
|
25 |
-
from timm.models.layers import trunc_normal_
|
26 |
-
from nltk.stem.lancaster import LancasterStemmer
|
27 |
-
from detectron2.structures import Boxes, ImageList, Instances, BitMasks, BoxMode
|
28 |
-
from detectron2.utils.memory import retry_if_cuda_oom
|
29 |
-
from detectron2.data import MetadataCatalog
|
30 |
-
from utils.misc import prompt_engineering
|
31 |
-
|
32 |
-
st = LancasterStemmer()
|
33 |
-
|
34 |
-
|
35 |
-
class X_Decoder_Model(nn.Module):
|
36 |
-
@configurable
|
37 |
-
def __init__(
|
38 |
-
self,
|
39 |
-
*,
|
40 |
-
backbone: Backbone,
|
41 |
-
sem_seg_head: nn.Module,
|
42 |
-
criterion: nn.Module,
|
43 |
-
losses: dict,
|
44 |
-
num_queries: int,
|
45 |
-
object_mask_threshold: float,
|
46 |
-
overlap_threshold: float,
|
47 |
-
metadata,
|
48 |
-
task_switch: dict,
|
49 |
-
phrase_prob: float,
|
50 |
-
size_divisibility: int,
|
51 |
-
sem_seg_postprocess_before_inference: bool,
|
52 |
-
pixel_mean: Tuple[float],
|
53 |
-
pixel_std: Tuple[float],
|
54 |
-
# inference
|
55 |
-
semantic_on: bool,
|
56 |
-
panoptic_on: bool,
|
57 |
-
instance_on: bool,
|
58 |
-
test_topk_per_image: int,
|
59 |
-
train_dataset_name: str,
|
60 |
-
retrieval_emsemble: bool,
|
61 |
-
backbone_dim: int,
|
62 |
-
dim_proj: int,
|
63 |
-
):
|
64 |
-
super().__init__()
|
65 |
-
self.backbone = backbone
|
66 |
-
self.sem_seg_head = sem_seg_head
|
67 |
-
self.criterion = criterion
|
68 |
-
self.losses = losses
|
69 |
-
self.num_queries = num_queries
|
70 |
-
self.overlap_threshold = overlap_threshold
|
71 |
-
self.object_mask_threshold = object_mask_threshold
|
72 |
-
self.metadata = metadata
|
73 |
-
if size_divisibility < 0:
|
74 |
-
# use backbone size_divisibility if not set
|
75 |
-
size_divisibility = self.backbone.size_divisibility
|
76 |
-
self.size_divisibility = size_divisibility
|
77 |
-
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference
|
78 |
-
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False)
|
79 |
-
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False)
|
80 |
-
|
81 |
-
# additional args
|
82 |
-
self.semantic_on = semantic_on
|
83 |
-
self.instance_on = instance_on
|
84 |
-
self.panoptic_on = panoptic_on
|
85 |
-
|
86 |
-
# caption argument
|
87 |
-
self.task_switch = task_switch
|
88 |
-
self.phrase_prob = phrase_prob
|
89 |
-
|
90 |
-
self.test_topk_per_image = test_topk_per_image
|
91 |
-
self.train_class_names = None
|
92 |
-
|
93 |
-
self.retrieval_emsemble = retrieval_emsemble
|
94 |
-
# backbone itc loss
|
95 |
-
if task_switch['retrieval'] and retrieval_emsemble:
|
96 |
-
self.backbone_proj = nn.Parameter(torch.empty(backbone_dim, dim_proj))
|
97 |
-
trunc_normal_(self.backbone_proj, std=.02)
|
98 |
-
|
99 |
-
if not self.semantic_on:
|
100 |
-
assert self.sem_seg_postprocess_before_inference
|
101 |
-
|
102 |
-
@classmethod
|
103 |
-
def from_config(cls, cfg):
|
104 |
-
enc_cfg = cfg['MODEL']['ENCODER']
|
105 |
-
dec_cfg = cfg['MODEL']['DECODER']
|
106 |
-
|
107 |
-
task_switch = {'bbox': dec_cfg.get('DETECTION', False),
|
108 |
-
'mask': dec_cfg.get('MASK', True),
|
109 |
-
'caption': dec_cfg['CAPTION'].get('ENABLED', False),
|
110 |
-
'captioning': dec_cfg['CAPTIONING'].get('ENABLED', False),
|
111 |
-
'retrieval': dec_cfg['RETRIEVAL'].get('ENABLED', False),
|
112 |
-
'grounding': dec_cfg['GROUNDING'].get('ENABLED', False)}
|
113 |
-
|
114 |
-
# build model
|
115 |
-
extra = {'task_switch': task_switch}
|
116 |
-
backbone = build_backbone(cfg)
|
117 |
-
lang_encoder = build_language_encoder(cfg)
|
118 |
-
sem_seg_head = build_xdecoder_head(cfg, backbone.output_shape(), lang_encoder, extra)
|
119 |
-
|
120 |
-
# Training Settings.
|
121 |
-
loss_weights = {}
|
122 |
-
matcher = None
|
123 |
-
losses = {}
|
124 |
-
weight_dict = {}
|
125 |
-
grd_weight = {}
|
126 |
-
top_x_layers = {}
|
127 |
-
criterion = None
|
128 |
-
train_dataset_name = None
|
129 |
-
phrase_prob = None
|
130 |
-
# Loss parameters:
|
131 |
-
deep_supervision = None
|
132 |
-
no_object_weight = None
|
133 |
-
|
134 |
-
return {
|
135 |
-
"backbone": backbone,
|
136 |
-
"sem_seg_head": sem_seg_head,
|
137 |
-
"criterion": criterion,
|
138 |
-
"losses": losses,
|
139 |
-
"num_queries": dec_cfg['NUM_OBJECT_QUERIES'],
|
140 |
-
"object_mask_threshold": dec_cfg['TEST']['OBJECT_MASK_THRESHOLD'],
|
141 |
-
"overlap_threshold": dec_cfg['TEST']['OVERLAP_THRESHOLD'],
|
142 |
-
"metadata": None,
|
143 |
-
"size_divisibility": dec_cfg['SIZE_DIVISIBILITY'],
|
144 |
-
"sem_seg_postprocess_before_inference": (
|
145 |
-
dec_cfg['TEST']['SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE']
|
146 |
-
or dec_cfg['TEST']['PANOPTIC_ON']
|
147 |
-
or dec_cfg['TEST']['INSTANCE_ON']
|
148 |
-
),
|
149 |
-
"pixel_mean": cfg['INPUT']['PIXEL_MEAN'],
|
150 |
-
"pixel_std": cfg['INPUT']['PIXEL_STD'],
|
151 |
-
"task_switch": task_switch,
|
152 |
-
"phrase_prob": phrase_prob,
|
153 |
-
# inference
|
154 |
-
"semantic_on": dec_cfg['TEST']['SEMANTIC_ON'],
|
155 |
-
"instance_on": dec_cfg['TEST']['INSTANCE_ON'],
|
156 |
-
"panoptic_on": dec_cfg['TEST']['PANOPTIC_ON'],
|
157 |
-
"test_topk_per_image": cfg['MODEL']['DECODER']['TEST']['DETECTIONS_PER_IMAGE'],
|
158 |
-
"train_dataset_name": train_dataset_name,
|
159 |
-
"retrieval_emsemble": dec_cfg['RETRIEVAL']['ENSEMBLE'],
|
160 |
-
"backbone_dim": cfg['MODEL']['BACKBONE_DIM'],
|
161 |
-
"dim_proj": cfg['MODEL']['DIM_PROJ'],
|
162 |
-
}
|
163 |
-
|
164 |
-
@property
|
165 |
-
def device(self):
|
166 |
-
return self.pixel_mean.device
|
167 |
-
|
168 |
-
def forward(self, batched_inputs, mode=None):
|
169 |
-
if self.training:
|
170 |
-
assert False, "Not support trianing mode."
|
171 |
-
else:
|
172 |
-
if mode == 'retrieval':
|
173 |
-
return self.evaluate_retrieval(batched_inputs)
|
174 |
-
elif mode == 'captioning':
|
175 |
-
return self.evaluate_captioning(batched_inputs)
|
176 |
-
elif mode == 'classification':
|
177 |
-
return self.evaluate_classification(batched_inputs)
|
178 |
-
elif mode in ['grounding_phrasecut', 'grounding_refcoco']:
|
179 |
-
return self.evaluate_grounding(batched_inputs, mode)
|
180 |
-
else:
|
181 |
-
return self.evaluate(batched_inputs)
|
182 |
-
|
183 |
-
def evaluate(self, batched_inputs):
|
184 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
185 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
186 |
-
|
187 |
-
images = ImageList.from_tensors(images, self.size_divisibility)
|
188 |
-
img_bs = images.tensor.shape[0]
|
189 |
-
|
190 |
-
targets = targets_grounding = queries_grounding = None
|
191 |
-
features = self.backbone(images.tensor)
|
192 |
-
outputs = self.sem_seg_head(features, target_queries=queries_grounding)
|
193 |
-
|
194 |
-
mask_cls_results = outputs["pred_logits"]
|
195 |
-
mask_pred_results = outputs["pred_masks"]
|
196 |
-
box_pred_results = outputs["pred_boxes"] if self.task_switch['bbox'] else [None for i in range(len(mask_pred_results))]
|
197 |
-
caption_pred_results = outputs["pred_captions"] if self.task_switch['caption'] else [None for i in range(len(mask_pred_results))]
|
198 |
-
|
199 |
-
# upsample masks
|
200 |
-
mask_pred_results = F.interpolate(
|
201 |
-
mask_pred_results,
|
202 |
-
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
|
203 |
-
mode="bilinear",
|
204 |
-
align_corners=False,
|
205 |
-
)
|
206 |
-
|
207 |
-
input_size = mask_pred_results.shape[-2:]
|
208 |
-
keep_sem_bgd = self.metadata.keep_sem_bgd if hasattr(self.metadata, 'keep_sem_bgd') else False
|
209 |
-
del outputs
|
210 |
-
|
211 |
-
processed_results = []
|
212 |
-
for mask_cls_result, mask_pred_result, box_pred_result, caption_pred_result, input_per_image, image_size in zip(
|
213 |
-
mask_cls_results, mask_pred_results, box_pred_results, caption_pred_results, batched_inputs, images.image_sizes
|
214 |
-
):
|
215 |
-
height = input_per_image.get("height", image_size[0])
|
216 |
-
width = input_per_image.get("width", image_size[1])
|
217 |
-
processed_results.append({})
|
218 |
-
|
219 |
-
if self.sem_seg_postprocess_before_inference:
|
220 |
-
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
|
221 |
-
mask_pred_result, image_size, height, width
|
222 |
-
)
|
223 |
-
mask_cls_result = mask_cls_result.to(mask_pred_result)
|
224 |
-
|
225 |
-
# semantic segmentation inference
|
226 |
-
if self.semantic_on:
|
227 |
-
r = retry_if_cuda_oom(self.semantic_inference)(mask_cls_result, mask_pred_result, keep_sem_bgd)
|
228 |
-
if not self.sem_seg_postprocess_before_inference:
|
229 |
-
r = retry_if_cuda_oom(sem_seg_postprocess)(r, image_size, height, width)
|
230 |
-
processed_results[-1]["sem_seg"] = r
|
231 |
-
|
232 |
-
# panoptic segmentation inference
|
233 |
-
if self.panoptic_on:
|
234 |
-
panoptic_r = retry_if_cuda_oom(self.panoptic_inference)(mask_cls_result, mask_pred_result)
|
235 |
-
processed_results[-1]["panoptic_seg"] = panoptic_r
|
236 |
-
|
237 |
-
# instance segmentation inference
|
238 |
-
if self.instance_on:
|
239 |
-
if self.task_switch['bbox']:
|
240 |
-
box_pred_result = bbox_postprocess(box_pred_result, input_size, image_size, height, width)
|
241 |
-
instance_r = retry_if_cuda_oom(self.instance_inference)(mask_cls_result, mask_pred_result, box_pred_result)
|
242 |
-
processed_results[-1]["instances"] = instance_r
|
243 |
-
if self.task_switch['caption']:
|
244 |
-
processed_results[-1]["captions"] = caption_pred_result
|
245 |
-
processed_results[-1]["masks"] = mask_pred_result
|
246 |
-
|
247 |
-
return processed_results
|
248 |
-
|
249 |
-
|
250 |
-
def evaluate_retrieval(self, batched_inputs):
|
251 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
252 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
253 |
-
images = ImageList.from_tensors(images, self.size_divisibility)
|
254 |
-
img_bs = images.tensor.shape[0]
|
255 |
-
|
256 |
-
targets = targets_grounding = queries_grounding = None
|
257 |
-
features = self.backbone(images.tensor)
|
258 |
-
outputs = self.sem_seg_head(features, target_queries=queries_grounding)
|
259 |
-
v_emb_it = outputs['pred_captions'][:,-1]
|
260 |
-
|
261 |
-
# compute backbone score
|
262 |
-
if self.task_switch['retrieval'] and self.retrieval_emsemble:
|
263 |
-
_v_emb_it = features['res5']
|
264 |
-
bs,nc,_,_ = _v_emb_it.shape
|
265 |
-
_v_emb_it = _v_emb_it.reshape(bs,nc,-1)
|
266 |
-
_v_emb_it = F.adaptive_avg_pool1d(_v_emb_it, 1).reshape(bs,nc) @ self.backbone_proj
|
267 |
-
|
268 |
-
processed_results = []
|
269 |
-
for idx, batch_data in enumerate(batched_inputs):
|
270 |
-
caption_ids = []
|
271 |
-
t_emb_its = []
|
272 |
-
processed_results.append({})
|
273 |
-
for caption in batch_data['captions']:
|
274 |
-
lang_results = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(caption)
|
275 |
-
t_emb_it = lang_results['class_emb']
|
276 |
-
caption_ids.append(batch_data['image_id'])
|
277 |
-
t_emb_its.append(t_emb_it)
|
278 |
-
|
279 |
-
t_emb_it = torch.cat(t_emb_its, dim=0)
|
280 |
-
|
281 |
-
image_embeds = [v_emb_it[idx].unsqueeze(0)]
|
282 |
-
if self.task_switch['retrieval'] and self.retrieval_emsemble:
|
283 |
-
image_embeds += [_v_emb_it[idx].unsqueeze(0)]
|
284 |
-
caption_results = {
|
285 |
-
'image_embeds': image_embeds,
|
286 |
-
'text_embeds': t_emb_it,
|
287 |
-
'caption_ids': caption_ids,
|
288 |
-
'image_ids': batch_data['image_id'],
|
289 |
-
}
|
290 |
-
processed_results[-1]["caption"] = caption_results
|
291 |
-
return processed_results
|
292 |
-
|
293 |
-
def evaluate_captioning(self, batched_inputs, extra={}):
|
294 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
295 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
296 |
-
images = ImageList.from_tensors(images, self.size_divisibility)
|
297 |
-
img_bs = images.tensor.shape[0]
|
298 |
-
|
299 |
-
if not hasattr(self, 'start_token'):
|
300 |
-
self.start_token = torch.tensor([[49406]*77], device=self.device)
|
301 |
-
|
302 |
-
targets = targets_grounding = queries_grounding = None
|
303 |
-
features = self.backbone(images.tensor)
|
304 |
-
|
305 |
-
captioning_mask = None
|
306 |
-
if 'captioning_mask' in batched_inputs[-1]:
|
307 |
-
captioning_mask = torch.cat([x['captioning_mask'] for x in batched_inputs])
|
308 |
-
|
309 |
-
extra.update({'start_token': self.start_token, 'captioning_mask': captioning_mask})
|
310 |
-
outputs = self.sem_seg_head(features, target_queries=queries_grounding, task='captioning_infer', extra=extra)
|
311 |
-
|
312 |
-
processed_results = []
|
313 |
-
for idx, batch_data in enumerate(batched_inputs):
|
314 |
-
processed_results.append({})
|
315 |
-
processed_results[-1]["captioning_token"] = outputs['pred_captionings'][idx]
|
316 |
-
processed_results[-1]["captioning_text"] = outputs['pred_texts'][idx].split('.')[0]
|
317 |
-
processed_results[-1]["image_id"] = batched_inputs[idx]['image_id']
|
318 |
-
|
319 |
-
return processed_results
|
320 |
-
|
321 |
-
def evaluate_classification(self, batched_inputs):
|
322 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
323 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
324 |
-
images = ImageList.from_tensors(images, self.size_divisibility)
|
325 |
-
img_bs = images.tensor.shape[0]
|
326 |
-
|
327 |
-
targets = targets_grounding = queries_grounding = None
|
328 |
-
features = self.backbone(images.tensor)
|
329 |
-
outputs = self.sem_seg_head(features, target_queries=queries_grounding)
|
330 |
-
|
331 |
-
processed_results = []
|
332 |
-
for idx, batch_data in enumerate(batched_inputs):
|
333 |
-
processed_results.append({})
|
334 |
-
processed_results[-1]["pred_class"] = outputs['pred_logits'][idx,-1]
|
335 |
-
return processed_results
|
336 |
-
|
337 |
-
def evaluate_grounding_baseline(self, batched_inputs, mode):
|
338 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
339 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
340 |
-
images = ImageList.from_tensors(images, self.size_divisibility)
|
341 |
-
img_bs = images.tensor.shape[0]
|
342 |
-
|
343 |
-
targets = targets_grounding = queries_grounding = None
|
344 |
-
features = self.backbone(images.tensor)
|
345 |
-
outputs = self.sem_seg_head(features, target_queries=queries_grounding)
|
346 |
-
|
347 |
-
mask_pred_results = outputs["pred_masks"]
|
348 |
-
caption_pred_results = outputs["pred_captions"] if self.task_switch['caption'] else [None for i in range(len(mask_pred_results))]
|
349 |
-
|
350 |
-
# upsample masks
|
351 |
-
mask_pred_results = F.interpolate(
|
352 |
-
mask_pred_results,
|
353 |
-
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
|
354 |
-
mode="bilinear",
|
355 |
-
align_corners=False,
|
356 |
-
)
|
357 |
-
|
358 |
-
processed_results = []
|
359 |
-
for mask_pred_result, caption_pred_result, input_per_image, image_size in zip(
|
360 |
-
mask_pred_results, caption_pred_results, batched_inputs, images.image_sizes
|
361 |
-
):
|
362 |
-
height = input_per_image.get("height", image_size[0])
|
363 |
-
width = input_per_image.get("width", image_size[1])
|
364 |
-
processed_results.append({})
|
365 |
-
|
366 |
-
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
|
367 |
-
mask_pred_result, image_size, height, width
|
368 |
-
)[:-1]
|
369 |
-
|
370 |
-
texts_all = input_per_image['groundings']['texts']
|
371 |
-
grd_masks = []
|
372 |
-
for texts in texts_all:
|
373 |
-
if mode == 'grounding_refcoco':
|
374 |
-
self.sem_seg_head.predictor.lang_encoder.get_text_embeddings(texts, name='grounding', prompt=False, is_eval=True)
|
375 |
-
elif mode == 'grounding_phrasecut':
|
376 |
-
self.sem_seg_head.predictor.lang_encoder.get_text_embeddings(texts, name='grounding', prompt=True, is_eval=False)
|
377 |
-
t_emb = getattr(self.sem_seg_head.predictor.lang_encoder, "{}_text_embeddings".format('grounding')).t()
|
378 |
-
v_emb = caption_pred_result[:-1]
|
379 |
-
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
|
380 |
-
vt_sim = v_emb @ t_emb
|
381 |
-
max_id = vt_sim.max(0)[1][0]
|
382 |
-
grd_masks += [mask_pred_result[max_id]]
|
383 |
-
processed_results[-1]['grounding_mask'] = torch.stack(grd_masks)
|
384 |
-
|
385 |
-
return processed_results
|
386 |
-
|
387 |
-
def evaluate_grounding(self, batched_inputs, mode):
|
388 |
-
images = [x["image"].to(self.device) for x in batched_inputs]
|
389 |
-
images = [(x - self.pixel_mean) / self.pixel_std for x in images]
|
390 |
-
images = ImageList.from_tensors(images, self.size_divisibility)
|
391 |
-
|
392 |
-
extra = {}
|
393 |
-
# mask_pred_results = []
|
394 |
-
# for idx, batch_per_image in enumerate(batched_inputs):
|
395 |
-
# grd_texts = batch_per_image['groundings']['texts']
|
396 |
-
# grd_masks = []
|
397 |
-
# for anno_text in grd_texts:
|
398 |
-
# gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings([anno_text[0]], name='grounding', token=False, norm=False)
|
399 |
-
# token_emb = gtext['token_emb']
|
400 |
-
# tokens = gtext['tokens']
|
401 |
-
|
402 |
-
# grd_emb = token_emb[0][tokens['attention_mask'].bool()[0]]
|
403 |
-
# extra['grounding_tokens'] = grd_emb[:,None]
|
404 |
-
|
405 |
-
# assert len(images.tensor) == 1, "grounding evaluation only support single batch size now"
|
406 |
-
# features = self.backbone(images.tensor)
|
407 |
-
# outputs = self.sem_seg_head(features, extra=extra, task='grounding_eval')
|
408 |
-
|
409 |
-
# pred_gmasks = outputs['pred_masks'][idx,self.num_queries:2*self.num_queries-1]
|
410 |
-
# v_emb = outputs['pred_captions'][idx,self.num_queries:2*self.num_queries-1]
|
411 |
-
# t_emb = grd_emb[-1:]
|
412 |
-
|
413 |
-
# t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
|
414 |
-
# v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
|
415 |
-
|
416 |
-
# temperature = self.sem_seg_head.predictor.lang_encoder.logit_scale
|
417 |
-
# out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
|
418 |
-
|
419 |
-
# matched_id = out_prob.max(0)[1]
|
420 |
-
# grd_masks += [pred_gmasks[matched_id,:,:]]
|
421 |
-
# mask_pred_results += [torch.cat(grd_masks)]
|
422 |
-
|
423 |
-
# comment for multi object inference.
|
424 |
-
mask_pred_results = []
|
425 |
-
for idx, batch_per_image in enumerate(batched_inputs):
|
426 |
-
grd_texts = batch_per_image['groundings']['texts']
|
427 |
-
grd_texts = [x[0] for x in grd_texts]
|
428 |
-
|
429 |
-
gtext = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(grd_texts, name='grounding', token=False, norm=False)
|
430 |
-
token_emb = gtext['token_emb']
|
431 |
-
tokens = gtext['tokens']
|
432 |
-
query_emb = token_emb[tokens['attention_mask'].bool()]
|
433 |
-
extra['grounding_tokens'] = query_emb[:,None]
|
434 |
-
|
435 |
-
features = self.backbone(images.tensor)
|
436 |
-
outputs = self.sem_seg_head(features, extra=extra, task='grounding_eval')
|
437 |
-
|
438 |
-
pred_gmasks = outputs['pred_masks'][idx,self.num_queries:2*self.num_queries-1]
|
439 |
-
v_emb = outputs['pred_captions'][idx,self.num_queries:2*self.num_queries-1]
|
440 |
-
t_emb = gtext['class_emb']
|
441 |
-
|
442 |
-
t_emb = t_emb / (t_emb.norm(dim=-1, keepdim=True) + 1e-7)
|
443 |
-
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
|
444 |
-
|
445 |
-
temperature = self.sem_seg_head.predictor.lang_encoder.logit_scale
|
446 |
-
out_prob = vl_similarity(v_emb, t_emb, temperature=temperature)
|
447 |
-
|
448 |
-
matched_id = out_prob.max(0)[1]
|
449 |
-
mask_pred_results += [pred_gmasks[matched_id,:,:]]
|
450 |
-
|
451 |
-
for i in range(len(mask_pred_results)):
|
452 |
-
# upsample masks
|
453 |
-
mask_pred_results[i] = F.interpolate(
|
454 |
-
mask_pred_results[i][None,],
|
455 |
-
size=(images.tensor.shape[-2], images.tensor.shape[-1]),
|
456 |
-
mode="bilinear",
|
457 |
-
align_corners=False,
|
458 |
-
)[0]
|
459 |
-
|
460 |
-
processed_results = []
|
461 |
-
for mask_pred_result, input_per_image, image_size in zip(
|
462 |
-
mask_pred_results, batched_inputs, images.image_sizes
|
463 |
-
):
|
464 |
-
height = input_per_image.get("height", image_size[0])
|
465 |
-
width = input_per_image.get("width", image_size[1])
|
466 |
-
processed_results.append({})
|
467 |
-
|
468 |
-
mask_pred_result = retry_if_cuda_oom(sem_seg_postprocess)(
|
469 |
-
mask_pred_result, image_size, height, width
|
470 |
-
)
|
471 |
-
processed_results[-1]['grounding_mask'] = mask_pred_result
|
472 |
-
|
473 |
-
# compute bbox
|
474 |
-
# bbox = BitMasks(mask_pred_result > 0).get_bounding_boxes()
|
475 |
-
# bbox = BoxMode.convert(bbox.tensor, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
|
476 |
-
# processed_results[-1]['grounding_box'] = bbox
|
477 |
-
|
478 |
-
return processed_results
|
479 |
-
|
480 |
-
def prepare_vlp_targets(self, batched_inputs, device):
|
481 |
-
input_ids = []
|
482 |
-
attention_mask = []
|
483 |
-
for cnt, x in enumerate(batched_inputs):
|
484 |
-
captions = x['captions']
|
485 |
-
randid = random.randint(0, len(captions)-1)
|
486 |
-
input_ids += x['tokens']['input_ids'][randid:randid+1]
|
487 |
-
attention_mask += x['tokens']['attention_mask'][randid:randid+1]
|
488 |
-
|
489 |
-
input_ids = torch.stack(input_ids)
|
490 |
-
attention_mask = torch.stack(attention_mask)
|
491 |
-
tokens = {"input_ids": input_ids, "attention_mask": attention_mask}
|
492 |
-
lang_results = self.sem_seg_head.predictor.lang_encoder.get_text_token_embeddings(tokens, token=True)
|
493 |
-
|
494 |
-
target_vlp = []
|
495 |
-
for cnt, x in enumerate(batched_inputs):
|
496 |
-
target_dict = {}
|
497 |
-
target_dict["caption_tokens"] = lang_results['token_emb'][cnt:cnt+1]
|
498 |
-
target_dict["caption_proj"] = lang_results['class_emb'][cnt:cnt+1]
|
499 |
-
target_dict["caption_tokenids"] = lang_results['tokens']['input_ids'][cnt:cnt+1]
|
500 |
-
target_dict["caption_mask"] = lang_results['tokens']['attention_mask'][cnt:cnt+1]
|
501 |
-
target_vlp.append(target_dict)
|
502 |
-
return target_vlp
|
503 |
-
|
504 |
-
def semantic_inference(self, mask_cls, mask_pred, keep_sem_bgd=False):
|
505 |
-
if keep_sem_bgd:
|
506 |
-
mask_cls = F.softmax(mask_cls, dim=-1)
|
507 |
-
else:
|
508 |
-
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1]
|
509 |
-
mask_pred = mask_pred.sigmoid()
|
510 |
-
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred)
|
511 |
-
return semseg
|
512 |
-
|
513 |
-
def panoptic_inference(self, mask_cls, mask_pred):
|
514 |
-
scores, labels = F.softmax(mask_cls, dim=-1).max(-1)
|
515 |
-
mask_pred = mask_pred.sigmoid()
|
516 |
-
|
517 |
-
keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold)
|
518 |
-
cur_scores = scores[keep]
|
519 |
-
cur_classes = labels[keep]
|
520 |
-
cur_masks = mask_pred[keep]
|
521 |
-
cur_mask_cls = mask_cls[keep]
|
522 |
-
cur_mask_cls = cur_mask_cls[:, :-1]
|
523 |
-
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks
|
524 |
-
|
525 |
-
h, w = cur_masks.shape[-2:]
|
526 |
-
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device)
|
527 |
-
segments_info = []
|
528 |
-
|
529 |
-
current_segment_id = 0
|
530 |
-
|
531 |
-
if cur_masks.shape[0] == 0:
|
532 |
-
# We didn't detect any mask :(
|
533 |
-
return panoptic_seg, segments_info
|
534 |
-
else:
|
535 |
-
# take argmax
|
536 |
-
cur_mask_ids = cur_prob_masks.argmax(0)
|
537 |
-
stuff_memory_list = {}
|
538 |
-
thing_dataset_id_to_contiguous_id = self.metadata.thing_dataset_id_to_contiguous_id if hasattr(self.metadata, 'thing_dataset_id_to_contiguous_id') else {}
|
539 |
-
for k in range(cur_classes.shape[0]):
|
540 |
-
pred_class = cur_classes[k].item()
|
541 |
-
isthing = pred_class in thing_dataset_id_to_contiguous_id.values()
|
542 |
-
mask_area = (cur_mask_ids == k).sum().item()
|
543 |
-
original_area = (cur_masks[k] >= 0.5).sum().item()
|
544 |
-
mask = (cur_mask_ids == k) & (cur_masks[k] >= 0.5)
|
545 |
-
|
546 |
-
if mask_area > 0 and original_area > 0 and mask.sum().item() > 0:
|
547 |
-
if mask_area / original_area < self.overlap_threshold:
|
548 |
-
continue
|
549 |
-
|
550 |
-
# merge stuff regions
|
551 |
-
if not isthing:
|
552 |
-
if int(pred_class) in stuff_memory_list.keys():
|
553 |
-
panoptic_seg[mask] = stuff_memory_list[int(pred_class)]
|
554 |
-
continue
|
555 |
-
else:
|
556 |
-
stuff_memory_list[int(pred_class)] = current_segment_id + 1
|
557 |
-
|
558 |
-
current_segment_id += 1
|
559 |
-
panoptic_seg[mask] = current_segment_id
|
560 |
-
|
561 |
-
segments_info.append(
|
562 |
-
{
|
563 |
-
"id": current_segment_id,
|
564 |
-
"isthing": bool(isthing),
|
565 |
-
"category_id": int(pred_class),
|
566 |
-
}
|
567 |
-
)
|
568 |
-
return panoptic_seg, segments_info
|
569 |
-
|
570 |
-
def instance_inference(self, mask_cls, mask_pred, box_pred):
|
571 |
-
# mask_pred is already processed to have the same shape as original input
|
572 |
-
image_size = mask_pred.shape[-2:]
|
573 |
-
|
574 |
-
# [Q, K]
|
575 |
-
scores = F.softmax(mask_cls, dim=-1)[:, :-1]
|
576 |
-
labels = torch.arange(self.sem_seg_head.num_classes, device=self.device).unsqueeze(0).repeat(self.num_queries, 1).flatten(0, 1)
|
577 |
-
# scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.num_queries, sorted=False)
|
578 |
-
scores_per_image, topk_indices = scores.flatten(0, 1).topk(self.test_topk_per_image, sorted=False)
|
579 |
-
|
580 |
-
labels_per_image = labels[topk_indices]
|
581 |
-
topk_indices = (topk_indices // self.sem_seg_head.num_classes)
|
582 |
-
# mask_pred = mask_pred.unsqueeze(1).repeat(1, self.sem_seg_head.num_classes, 1).flatten(0, 1)
|
583 |
-
mask_pred = mask_pred[topk_indices]
|
584 |
-
if box_pred is not None:
|
585 |
-
box_pred = box_pred[topk_indices]
|
586 |
-
|
587 |
-
# if this is panoptic segmentation, we only keep the "thing" classes
|
588 |
-
if self.panoptic_on:
|
589 |
-
thing_dataset_id_to_contiguous_id = self.metadata.thing_dataset_id_to_contiguous_id if hasattr(self.metadata, 'thing_dataset_id_to_contiguous_id') else {}
|
590 |
-
keep = torch.zeros_like(scores_per_image).bool()
|
591 |
-
for i, lab in enumerate(labels_per_image):
|
592 |
-
keep[i] = lab in thing_dataset_id_to_contiguous_id.values()
|
593 |
-
|
594 |
-
scores_per_image = scores_per_image[keep]
|
595 |
-
labels_per_image = labels_per_image[keep]
|
596 |
-
mask_pred = mask_pred[keep]
|
597 |
-
|
598 |
-
if box_pred is not None:
|
599 |
-
box_pred = box_pred[keep]
|
600 |
-
|
601 |
-
result = Instances(image_size)
|
602 |
-
# mask (before sigmoid)
|
603 |
-
result.pred_masks = (mask_pred > 0).float()
|
604 |
-
# result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
|
605 |
-
# Uncomment the following to get boxes from masks (this is slow)
|
606 |
-
|
607 |
-
if box_pred is not None:
|
608 |
-
result.pred_boxes = BitMasks(mask_pred > 0).get_bounding_boxes()
|
609 |
-
else:
|
610 |
-
result.pred_boxes = Boxes(torch.zeros(mask_pred.size(0), 4))
|
611 |
-
|
612 |
-
# calculate average mask prob
|
613 |
-
mask_scores_per_image = (mask_pred.sigmoid().flatten(1) * result.pred_masks.flatten(1)).sum(1) / (result.pred_masks.flatten(1).sum(1) + 1e-6)
|
614 |
-
result.scores = scores_per_image * mask_scores_per_image
|
615 |
-
result.pred_classes = labels_per_image
|
616 |
-
|
617 |
-
return result
|
618 |
-
|
619 |
-
|
620 |
-
@register_model
|
621 |
-
def get_segmentation_model(cfg, **kwargs):
|
622 |
-
return X_Decoder_Model(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/__init__.py
DELETED
@@ -1,7 +0,0 @@
|
|
1 |
-
from .build import build_backbone
|
2 |
-
|
3 |
-
from .resnet import *
|
4 |
-
from .swin import *
|
5 |
-
from .focal import *
|
6 |
-
from .focal_dw import *
|
7 |
-
from .backbone import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/backbone.py
DELETED
@@ -1,51 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import torch.nn as nn
|
3 |
-
|
4 |
-
from detectron2.modeling import ShapeSpec
|
5 |
-
|
6 |
-
__all__ = ["Backbone"]
|
7 |
-
|
8 |
-
|
9 |
-
class Backbone(nn.Module):
|
10 |
-
"""
|
11 |
-
Abstract base class for network backbones.
|
12 |
-
"""
|
13 |
-
|
14 |
-
def __init__(self):
|
15 |
-
"""
|
16 |
-
The `__init__` method of any subclass can specify its own set of arguments.
|
17 |
-
"""
|
18 |
-
super().__init__()
|
19 |
-
|
20 |
-
def forward(self):
|
21 |
-
"""
|
22 |
-
Subclasses must override this method, but adhere to the same return type.
|
23 |
-
|
24 |
-
Returns:
|
25 |
-
dict[str->Tensor]: mapping from feature name (e.g., "res2") to tensor
|
26 |
-
"""
|
27 |
-
pass
|
28 |
-
|
29 |
-
@property
|
30 |
-
def size_divisibility(self) -> int:
|
31 |
-
"""
|
32 |
-
Some backbones require the input height and width to be divisible by a
|
33 |
-
specific integer. This is typically true for encoder / decoder type networks
|
34 |
-
with lateral connection (e.g., FPN) for which feature maps need to match
|
35 |
-
dimension in the "bottom up" and "top down" paths. Set to 0 if no specific
|
36 |
-
input size divisibility is required.
|
37 |
-
"""
|
38 |
-
return 0
|
39 |
-
|
40 |
-
def output_shape(self):
|
41 |
-
"""
|
42 |
-
Returns:
|
43 |
-
dict[str->ShapeSpec]
|
44 |
-
"""
|
45 |
-
# this is a backward-compatible default
|
46 |
-
return {
|
47 |
-
name: ShapeSpec(
|
48 |
-
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
|
49 |
-
)
|
50 |
-
for name in self._out_features
|
51 |
-
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/build.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from .registry import model_entrypoints
|
2 |
-
from .registry import is_model
|
3 |
-
|
4 |
-
from .backbone import *
|
5 |
-
|
6 |
-
def build_backbone(config, **kwargs):
|
7 |
-
model_name = config['MODEL']['BACKBONE']['NAME']
|
8 |
-
if not is_model(model_name):
|
9 |
-
raise ValueError(f'Unkown model: {model_name}')
|
10 |
-
|
11 |
-
return model_entrypoints(model_name)(config, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/focal.py
DELETED
@@ -1,692 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# FocalNet for Semantic Segmentation
|
3 |
-
# Copyright (c) 2022 Microsoft
|
4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
5 |
-
# Written by Jianwei Yang
|
6 |
-
# --------------------------------------------------------
|
7 |
-
import math
|
8 |
-
import time
|
9 |
-
import numpy as np
|
10 |
-
import logging
|
11 |
-
import torch
|
12 |
-
import torch.nn as nn
|
13 |
-
import torch.nn.functional as F
|
14 |
-
import torch.utils.checkpoint as checkpoint
|
15 |
-
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
16 |
-
|
17 |
-
from detectron2.utils.file_io import PathManager
|
18 |
-
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
|
19 |
-
|
20 |
-
from .registry import register_backbone
|
21 |
-
|
22 |
-
logger = logging.getLogger(__name__)
|
23 |
-
|
24 |
-
class Mlp(nn.Module):
|
25 |
-
""" Multilayer perceptron."""
|
26 |
-
|
27 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
28 |
-
super().__init__()
|
29 |
-
out_features = out_features or in_features
|
30 |
-
hidden_features = hidden_features or in_features
|
31 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
32 |
-
self.act = act_layer()
|
33 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
34 |
-
self.drop = nn.Dropout(drop)
|
35 |
-
|
36 |
-
def forward(self, x):
|
37 |
-
x = self.fc1(x)
|
38 |
-
x = self.act(x)
|
39 |
-
x = self.drop(x)
|
40 |
-
x = self.fc2(x)
|
41 |
-
x = self.drop(x)
|
42 |
-
return x
|
43 |
-
|
44 |
-
class FocalModulation(nn.Module):
|
45 |
-
""" Focal Modulation
|
46 |
-
|
47 |
-
Args:
|
48 |
-
dim (int): Number of input channels.
|
49 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
50 |
-
focal_level (int): Number of focal levels
|
51 |
-
focal_window (int): Focal window size at focal level 1
|
52 |
-
focal_factor (int, default=2): Step to increase the focal window
|
53 |
-
use_postln (bool, default=False): Whether use post-modulation layernorm
|
54 |
-
"""
|
55 |
-
|
56 |
-
def __init__(self, dim, proj_drop=0., focal_level=2, focal_window=7, focal_factor=2, use_postln=False, use_postln_in_modulation=False, scaling_modulator=False):
|
57 |
-
|
58 |
-
super().__init__()
|
59 |
-
self.dim = dim
|
60 |
-
|
61 |
-
# specific args for focalv3
|
62 |
-
self.focal_level = focal_level
|
63 |
-
self.focal_window = focal_window
|
64 |
-
self.focal_factor = focal_factor
|
65 |
-
self.use_postln_in_modulation = use_postln_in_modulation
|
66 |
-
self.scaling_modulator = scaling_modulator
|
67 |
-
|
68 |
-
self.f = nn.Linear(dim, 2*dim+(self.focal_level+1), bias=True)
|
69 |
-
self.h = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0, groups=1, bias=True)
|
70 |
-
|
71 |
-
self.act = nn.GELU()
|
72 |
-
self.proj = nn.Linear(dim, dim)
|
73 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
74 |
-
self.focal_layers = nn.ModuleList()
|
75 |
-
|
76 |
-
if self.use_postln_in_modulation:
|
77 |
-
self.ln = nn.LayerNorm(dim)
|
78 |
-
|
79 |
-
for k in range(self.focal_level):
|
80 |
-
kernel_size = self.focal_factor*k + self.focal_window
|
81 |
-
self.focal_layers.append(
|
82 |
-
nn.Sequential(
|
83 |
-
nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1, groups=dim,
|
84 |
-
padding=kernel_size//2, bias=False),
|
85 |
-
nn.GELU(),
|
86 |
-
)
|
87 |
-
)
|
88 |
-
|
89 |
-
def forward(self, x):
|
90 |
-
""" Forward function.
|
91 |
-
|
92 |
-
Args:
|
93 |
-
x: input features with shape of (B, H, W, C)
|
94 |
-
"""
|
95 |
-
B, nH, nW, C = x.shape
|
96 |
-
x = self.f(x)
|
97 |
-
x = x.permute(0, 3, 1, 2).contiguous()
|
98 |
-
q, ctx, gates = torch.split(x, (C, C, self.focal_level+1), 1)
|
99 |
-
|
100 |
-
ctx_all = 0
|
101 |
-
for l in range(self.focal_level):
|
102 |
-
ctx = self.focal_layers[l](ctx)
|
103 |
-
ctx_all = ctx_all + ctx*gates[:, l:l+1]
|
104 |
-
ctx_global = self.act(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
|
105 |
-
ctx_all = ctx_all + ctx_global*gates[:,self.focal_level:]
|
106 |
-
|
107 |
-
if self.scaling_modulator:
|
108 |
-
ctx_all = ctx_all / (self.focal_level + 1)
|
109 |
-
|
110 |
-
x_out = q * self.h(ctx_all)
|
111 |
-
x_out = x_out.permute(0, 2, 3, 1).contiguous()
|
112 |
-
if self.use_postln_in_modulation:
|
113 |
-
x_out = self.ln(x_out)
|
114 |
-
x_out = self.proj(x_out)
|
115 |
-
x_out = self.proj_drop(x_out)
|
116 |
-
return x_out
|
117 |
-
|
118 |
-
class FocalModulationBlock(nn.Module):
|
119 |
-
""" Focal Modulation Block.
|
120 |
-
|
121 |
-
Args:
|
122 |
-
dim (int): Number of input channels.
|
123 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
124 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
125 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
126 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
127 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
128 |
-
focal_level (int): number of focal levels
|
129 |
-
focal_window (int): focal kernel size at level 1
|
130 |
-
"""
|
131 |
-
|
132 |
-
def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0.,
|
133 |
-
act_layer=nn.GELU, norm_layer=nn.LayerNorm,
|
134 |
-
focal_level=2, focal_window=9,
|
135 |
-
use_postln=False, use_postln_in_modulation=False,
|
136 |
-
scaling_modulator=False,
|
137 |
-
use_layerscale=False,
|
138 |
-
layerscale_value=1e-4):
|
139 |
-
super().__init__()
|
140 |
-
self.dim = dim
|
141 |
-
self.mlp_ratio = mlp_ratio
|
142 |
-
self.focal_window = focal_window
|
143 |
-
self.focal_level = focal_level
|
144 |
-
self.use_postln = use_postln
|
145 |
-
self.use_layerscale = use_layerscale
|
146 |
-
|
147 |
-
self.norm1 = norm_layer(dim)
|
148 |
-
self.modulation = FocalModulation(
|
149 |
-
dim, focal_window=self.focal_window, focal_level=self.focal_level, proj_drop=drop, use_postln_in_modulation=use_postln_in_modulation, scaling_modulator=scaling_modulator
|
150 |
-
)
|
151 |
-
|
152 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
153 |
-
self.norm2 = norm_layer(dim)
|
154 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
155 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
156 |
-
|
157 |
-
self.H = None
|
158 |
-
self.W = None
|
159 |
-
|
160 |
-
self.gamma_1 = 1.0
|
161 |
-
self.gamma_2 = 1.0
|
162 |
-
if self.use_layerscale:
|
163 |
-
self.gamma_1 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
|
164 |
-
self.gamma_2 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
|
165 |
-
|
166 |
-
def forward(self, x):
|
167 |
-
""" Forward function.
|
168 |
-
|
169 |
-
Args:
|
170 |
-
x: Input feature, tensor size (B, H*W, C).
|
171 |
-
H, W: Spatial resolution of the input feature.
|
172 |
-
"""
|
173 |
-
B, L, C = x.shape
|
174 |
-
H, W = self.H, self.W
|
175 |
-
assert L == H * W, "input feature has wrong size"
|
176 |
-
|
177 |
-
shortcut = x
|
178 |
-
if not self.use_postln:
|
179 |
-
x = self.norm1(x)
|
180 |
-
x = x.view(B, H, W, C)
|
181 |
-
|
182 |
-
# FM
|
183 |
-
x = self.modulation(x).view(B, H * W, C)
|
184 |
-
if self.use_postln:
|
185 |
-
x = self.norm1(x)
|
186 |
-
|
187 |
-
# FFN
|
188 |
-
x = shortcut + self.drop_path(self.gamma_1 * x)
|
189 |
-
|
190 |
-
if self.use_postln:
|
191 |
-
x = x + self.drop_path(self.gamma_2 * self.norm2(self.mlp(x)))
|
192 |
-
else:
|
193 |
-
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
194 |
-
|
195 |
-
return x
|
196 |
-
|
197 |
-
class BasicLayer(nn.Module):
|
198 |
-
""" A basic focal modulation layer for one stage.
|
199 |
-
|
200 |
-
Args:
|
201 |
-
dim (int): Number of feature channels
|
202 |
-
depth (int): Depths of this stage.
|
203 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
204 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
205 |
-
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
206 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
207 |
-
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
208 |
-
focal_level (int): Number of focal levels
|
209 |
-
focal_window (int): Focal window size at focal level 1
|
210 |
-
use_conv_embed (bool): Use overlapped convolution for patch embedding or now. Default: False
|
211 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
212 |
-
"""
|
213 |
-
|
214 |
-
def __init__(self,
|
215 |
-
dim,
|
216 |
-
depth,
|
217 |
-
mlp_ratio=4.,
|
218 |
-
drop=0.,
|
219 |
-
drop_path=0.,
|
220 |
-
norm_layer=nn.LayerNorm,
|
221 |
-
downsample=None,
|
222 |
-
focal_window=9,
|
223 |
-
focal_level=2,
|
224 |
-
use_conv_embed=False,
|
225 |
-
use_postln=False,
|
226 |
-
use_postln_in_modulation=False,
|
227 |
-
scaling_modulator=False,
|
228 |
-
use_layerscale=False,
|
229 |
-
use_checkpoint=False
|
230 |
-
):
|
231 |
-
super().__init__()
|
232 |
-
self.depth = depth
|
233 |
-
self.use_checkpoint = use_checkpoint
|
234 |
-
|
235 |
-
# build blocks
|
236 |
-
self.blocks = nn.ModuleList([
|
237 |
-
FocalModulationBlock(
|
238 |
-
dim=dim,
|
239 |
-
mlp_ratio=mlp_ratio,
|
240 |
-
drop=drop,
|
241 |
-
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
242 |
-
focal_window=focal_window,
|
243 |
-
focal_level=focal_level,
|
244 |
-
use_postln=use_postln,
|
245 |
-
use_postln_in_modulation=use_postln_in_modulation,
|
246 |
-
scaling_modulator=scaling_modulator,
|
247 |
-
use_layerscale=use_layerscale,
|
248 |
-
norm_layer=norm_layer)
|
249 |
-
for i in range(depth)])
|
250 |
-
|
251 |
-
# patch merging layer
|
252 |
-
if downsample is not None:
|
253 |
-
self.downsample = downsample(
|
254 |
-
patch_size=2,
|
255 |
-
in_chans=dim, embed_dim=2*dim,
|
256 |
-
use_conv_embed=use_conv_embed,
|
257 |
-
norm_layer=norm_layer,
|
258 |
-
is_stem=False
|
259 |
-
)
|
260 |
-
|
261 |
-
else:
|
262 |
-
self.downsample = None
|
263 |
-
|
264 |
-
def forward(self, x, H, W):
|
265 |
-
""" Forward function.
|
266 |
-
|
267 |
-
Args:
|
268 |
-
x: Input feature, tensor size (B, H*W, C).
|
269 |
-
H, W: Spatial resolution of the input feature.
|
270 |
-
"""
|
271 |
-
for blk in self.blocks:
|
272 |
-
blk.H, blk.W = H, W
|
273 |
-
if self.use_checkpoint:
|
274 |
-
x = checkpoint.checkpoint(blk, x)
|
275 |
-
else:
|
276 |
-
x = blk(x)
|
277 |
-
if self.downsample is not None:
|
278 |
-
x_reshaped = x.transpose(1, 2).view(x.shape[0], x.shape[-1], H, W)
|
279 |
-
x_down = self.downsample(x_reshaped)
|
280 |
-
x_down = x_down.flatten(2).transpose(1, 2)
|
281 |
-
Wh, Ww = (H + 1) // 2, (W + 1) // 2
|
282 |
-
return x, H, W, x_down, Wh, Ww
|
283 |
-
else:
|
284 |
-
return x, H, W, x, H, W
|
285 |
-
|
286 |
-
|
287 |
-
class PatchEmbed(nn.Module):
|
288 |
-
""" Image to Patch Embedding
|
289 |
-
|
290 |
-
Args:
|
291 |
-
patch_size (int): Patch token size. Default: 4.
|
292 |
-
in_chans (int): Number of input image channels. Default: 3.
|
293 |
-
embed_dim (int): Number of linear projection output channels. Default: 96.
|
294 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
295 |
-
use_conv_embed (bool): Whether use overlapped convolution for patch embedding. Default: False
|
296 |
-
is_stem (bool): Is the stem block or not.
|
297 |
-
"""
|
298 |
-
|
299 |
-
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, use_conv_embed=False, is_stem=False):
|
300 |
-
super().__init__()
|
301 |
-
patch_size = to_2tuple(patch_size)
|
302 |
-
self.patch_size = patch_size
|
303 |
-
|
304 |
-
self.in_chans = in_chans
|
305 |
-
self.embed_dim = embed_dim
|
306 |
-
|
307 |
-
if use_conv_embed:
|
308 |
-
# if we choose to use conv embedding, then we treat the stem and non-stem differently
|
309 |
-
if is_stem:
|
310 |
-
kernel_size = 7; padding = 2; stride = 4
|
311 |
-
else:
|
312 |
-
kernel_size = 3; padding = 1; stride = 2
|
313 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
|
314 |
-
else:
|
315 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
316 |
-
|
317 |
-
if norm_layer is not None:
|
318 |
-
self.norm = norm_layer(embed_dim)
|
319 |
-
else:
|
320 |
-
self.norm = None
|
321 |
-
|
322 |
-
def forward(self, x):
|
323 |
-
"""Forward function."""
|
324 |
-
_, _, H, W = x.size()
|
325 |
-
if W % self.patch_size[1] != 0:
|
326 |
-
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
|
327 |
-
if H % self.patch_size[0] != 0:
|
328 |
-
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
|
329 |
-
|
330 |
-
x = self.proj(x) # B C Wh Ww
|
331 |
-
if self.norm is not None:
|
332 |
-
Wh, Ww = x.size(2), x.size(3)
|
333 |
-
x = x.flatten(2).transpose(1, 2)
|
334 |
-
x = self.norm(x)
|
335 |
-
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
|
336 |
-
|
337 |
-
return x
|
338 |
-
|
339 |
-
|
340 |
-
class FocalNet(nn.Module):
|
341 |
-
""" FocalNet backbone.
|
342 |
-
|
343 |
-
Args:
|
344 |
-
pretrain_img_size (int): Input image size for training the pretrained model,
|
345 |
-
used in absolute postion embedding. Default 224.
|
346 |
-
patch_size (int | tuple(int)): Patch size. Default: 4.
|
347 |
-
in_chans (int): Number of input image channels. Default: 3.
|
348 |
-
embed_dim (int): Number of linear projection output channels. Default: 96.
|
349 |
-
depths (tuple[int]): Depths of each Swin Transformer stage.
|
350 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
351 |
-
drop_rate (float): Dropout rate.
|
352 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
|
353 |
-
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
354 |
-
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
|
355 |
-
out_indices (Sequence[int]): Output from which stages.
|
356 |
-
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
|
357 |
-
-1 means not freezing any parameters.
|
358 |
-
focal_levels (Sequence[int]): Number of focal levels at four stages
|
359 |
-
focal_windows (Sequence[int]): Focal window sizes at first focal level at four stages
|
360 |
-
use_conv_embed (bool): Whether use overlapped convolution for patch embedding
|
361 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
362 |
-
"""
|
363 |
-
|
364 |
-
def __init__(self,
|
365 |
-
pretrain_img_size=1600,
|
366 |
-
patch_size=4,
|
367 |
-
in_chans=3,
|
368 |
-
embed_dim=96,
|
369 |
-
depths=[2, 2, 6, 2],
|
370 |
-
mlp_ratio=4.,
|
371 |
-
drop_rate=0.,
|
372 |
-
drop_path_rate=0.2,
|
373 |
-
norm_layer=nn.LayerNorm,
|
374 |
-
patch_norm=True,
|
375 |
-
out_indices=[0, 1, 2, 3],
|
376 |
-
frozen_stages=-1,
|
377 |
-
focal_levels=[2,2,2,2],
|
378 |
-
focal_windows=[9,9,9,9],
|
379 |
-
use_conv_embed=False,
|
380 |
-
use_postln=False,
|
381 |
-
use_postln_in_modulation=False,
|
382 |
-
scaling_modulator=False,
|
383 |
-
use_layerscale=False,
|
384 |
-
use_checkpoint=False,
|
385 |
-
):
|
386 |
-
super().__init__()
|
387 |
-
|
388 |
-
self.pretrain_img_size = pretrain_img_size
|
389 |
-
self.num_layers = len(depths)
|
390 |
-
self.embed_dim = embed_dim
|
391 |
-
self.patch_norm = patch_norm
|
392 |
-
self.out_indices = out_indices
|
393 |
-
self.frozen_stages = frozen_stages
|
394 |
-
|
395 |
-
# split image into non-overlapping patches
|
396 |
-
self.patch_embed = PatchEmbed(
|
397 |
-
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
|
398 |
-
norm_layer=norm_layer if self.patch_norm else None,
|
399 |
-
use_conv_embed=use_conv_embed, is_stem=True)
|
400 |
-
|
401 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
402 |
-
|
403 |
-
# stochastic depth
|
404 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
405 |
-
|
406 |
-
# build layers
|
407 |
-
self.layers = nn.ModuleList()
|
408 |
-
for i_layer in range(self.num_layers):
|
409 |
-
layer = BasicLayer(
|
410 |
-
dim=int(embed_dim * 2 ** i_layer),
|
411 |
-
depth=depths[i_layer],
|
412 |
-
mlp_ratio=mlp_ratio,
|
413 |
-
drop=drop_rate,
|
414 |
-
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
|
415 |
-
norm_layer=norm_layer,
|
416 |
-
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
|
417 |
-
focal_window=focal_windows[i_layer],
|
418 |
-
focal_level=focal_levels[i_layer],
|
419 |
-
use_conv_embed=use_conv_embed,
|
420 |
-
use_postln=use_postln,
|
421 |
-
use_postln_in_modulation=use_postln_in_modulation,
|
422 |
-
scaling_modulator=scaling_modulator,
|
423 |
-
use_layerscale=use_layerscale,
|
424 |
-
use_checkpoint=use_checkpoint)
|
425 |
-
self.layers.append(layer)
|
426 |
-
|
427 |
-
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
|
428 |
-
self.num_features = num_features
|
429 |
-
|
430 |
-
# add a norm layer for each output
|
431 |
-
for i_layer in out_indices:
|
432 |
-
layer = norm_layer(num_features[i_layer])
|
433 |
-
layer_name = f'norm{i_layer}'
|
434 |
-
self.add_module(layer_name, layer)
|
435 |
-
|
436 |
-
self._freeze_stages()
|
437 |
-
|
438 |
-
def _freeze_stages(self):
|
439 |
-
if self.frozen_stages >= 0:
|
440 |
-
self.patch_embed.eval()
|
441 |
-
for param in self.patch_embed.parameters():
|
442 |
-
param.requires_grad = False
|
443 |
-
|
444 |
-
if self.frozen_stages >= 2:
|
445 |
-
self.pos_drop.eval()
|
446 |
-
for i in range(0, self.frozen_stages - 1):
|
447 |
-
m = self.layers[i]
|
448 |
-
m.eval()
|
449 |
-
for param in m.parameters():
|
450 |
-
param.requires_grad = False
|
451 |
-
|
452 |
-
def init_weights(self, pretrained=None):
|
453 |
-
"""Initialize the weights in backbone.
|
454 |
-
|
455 |
-
Args:
|
456 |
-
pretrained (str, optional): Path to pre-trained weights.
|
457 |
-
Defaults to None.
|
458 |
-
"""
|
459 |
-
|
460 |
-
def _init_weights(m):
|
461 |
-
if isinstance(m, nn.Linear):
|
462 |
-
trunc_normal_(m.weight, std=.02)
|
463 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
464 |
-
nn.init.constant_(m.bias, 0)
|
465 |
-
elif isinstance(m, nn.LayerNorm):
|
466 |
-
nn.init.constant_(m.bias, 0)
|
467 |
-
nn.init.constant_(m.weight, 1.0)
|
468 |
-
|
469 |
-
if isinstance(pretrained, str):
|
470 |
-
self.apply(_init_weights)
|
471 |
-
logger = get_root_logger()
|
472 |
-
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
473 |
-
elif pretrained is None:
|
474 |
-
self.apply(_init_weights)
|
475 |
-
else:
|
476 |
-
raise TypeError('pretrained must be a str or None')
|
477 |
-
|
478 |
-
def load_weights(self, pretrained_dict=None, pretrained_layers=[], verbose=True):
|
479 |
-
model_dict = self.state_dict()
|
480 |
-
|
481 |
-
missed_dict = [k for k in model_dict.keys() if k not in pretrained_dict]
|
482 |
-
logger.info(f'=> Missed keys {missed_dict}')
|
483 |
-
unexpected_dict = [k for k in pretrained_dict.keys() if k not in model_dict]
|
484 |
-
logger.info(f'=> Unexpected keys {unexpected_dict}')
|
485 |
-
|
486 |
-
pretrained_dict = {
|
487 |
-
k: v for k, v in pretrained_dict.items()
|
488 |
-
if k in model_dict.keys()
|
489 |
-
}
|
490 |
-
|
491 |
-
need_init_state_dict = {}
|
492 |
-
for k, v in pretrained_dict.items():
|
493 |
-
need_init = (
|
494 |
-
(
|
495 |
-
k.split('.')[0] in pretrained_layers
|
496 |
-
or pretrained_layers[0] == '*'
|
497 |
-
)
|
498 |
-
and 'relative_position_index' not in k
|
499 |
-
and 'attn_mask' not in k
|
500 |
-
)
|
501 |
-
|
502 |
-
if need_init:
|
503 |
-
# if verbose:
|
504 |
-
# logger.info(f'=> init {k} from {pretrained}')
|
505 |
-
|
506 |
-
if ('pool_layers' in k) or ('focal_layers' in k) and v.size() != model_dict[k].size():
|
507 |
-
table_pretrained = v
|
508 |
-
table_current = model_dict[k]
|
509 |
-
fsize1 = table_pretrained.shape[2]
|
510 |
-
fsize2 = table_current.shape[2]
|
511 |
-
|
512 |
-
# NOTE: different from interpolation used in self-attention, we use padding or clipping for focal conv
|
513 |
-
if fsize1 < fsize2:
|
514 |
-
table_pretrained_resized = torch.zeros(table_current.shape)
|
515 |
-
table_pretrained_resized[:, :, (fsize2-fsize1)//2:-(fsize2-fsize1)//2, (fsize2-fsize1)//2:-(fsize2-fsize1)//2] = table_pretrained
|
516 |
-
v = table_pretrained_resized
|
517 |
-
elif fsize1 > fsize2:
|
518 |
-
table_pretrained_resized = table_pretrained[:, :, (fsize1-fsize2)//2:-(fsize1-fsize2)//2, (fsize1-fsize2)//2:-(fsize1-fsize2)//2]
|
519 |
-
v = table_pretrained_resized
|
520 |
-
|
521 |
-
|
522 |
-
if ("modulation.f" in k or "pre_conv" in k):
|
523 |
-
table_pretrained = v
|
524 |
-
table_current = model_dict[k]
|
525 |
-
if table_pretrained.shape != table_current.shape:
|
526 |
-
if len(table_pretrained.shape) == 2:
|
527 |
-
dim = table_pretrained.shape[1]
|
528 |
-
assert table_current.shape[1] == dim
|
529 |
-
L1 = table_pretrained.shape[0]
|
530 |
-
L2 = table_current.shape[0]
|
531 |
-
|
532 |
-
if L1 < L2:
|
533 |
-
table_pretrained_resized = torch.zeros(table_current.shape)
|
534 |
-
# copy for linear project
|
535 |
-
table_pretrained_resized[:2*dim] = table_pretrained[:2*dim]
|
536 |
-
# copy for global token gating
|
537 |
-
table_pretrained_resized[-1] = table_pretrained[-1]
|
538 |
-
# copy for first multiple focal levels
|
539 |
-
table_pretrained_resized[2*dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
|
540 |
-
# reassign pretrained weights
|
541 |
-
v = table_pretrained_resized
|
542 |
-
elif L1 > L2:
|
543 |
-
raise NotImplementedError
|
544 |
-
elif len(table_pretrained.shape) == 1:
|
545 |
-
dim = table_pretrained.shape[0]
|
546 |
-
L1 = table_pretrained.shape[0]
|
547 |
-
L2 = table_current.shape[0]
|
548 |
-
if L1 < L2:
|
549 |
-
table_pretrained_resized = torch.zeros(table_current.shape)
|
550 |
-
# copy for linear project
|
551 |
-
table_pretrained_resized[:dim] = table_pretrained[:dim]
|
552 |
-
# copy for global token gating
|
553 |
-
table_pretrained_resized[-1] = table_pretrained[-1]
|
554 |
-
# copy for first multiple focal levels
|
555 |
-
# table_pretrained_resized[dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
|
556 |
-
# reassign pretrained weights
|
557 |
-
v = table_pretrained_resized
|
558 |
-
elif L1 > L2:
|
559 |
-
raise NotImplementedError
|
560 |
-
|
561 |
-
need_init_state_dict[k] = v
|
562 |
-
|
563 |
-
self.load_state_dict(need_init_state_dict, strict=False)
|
564 |
-
|
565 |
-
|
566 |
-
def forward(self, x):
|
567 |
-
"""Forward function."""
|
568 |
-
tic = time.time()
|
569 |
-
x = self.patch_embed(x)
|
570 |
-
Wh, Ww = x.size(2), x.size(3)
|
571 |
-
|
572 |
-
x = x.flatten(2).transpose(1, 2)
|
573 |
-
x = self.pos_drop(x)
|
574 |
-
|
575 |
-
outs = {}
|
576 |
-
for i in range(self.num_layers):
|
577 |
-
layer = self.layers[i]
|
578 |
-
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
|
579 |
-
if i in self.out_indices:
|
580 |
-
norm_layer = getattr(self, f'norm{i}')
|
581 |
-
x_out = norm_layer(x_out)
|
582 |
-
|
583 |
-
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
584 |
-
outs["res{}".format(i + 2)] = out
|
585 |
-
|
586 |
-
if len(self.out_indices) == 0:
|
587 |
-
outs["res5"] = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
588 |
-
|
589 |
-
toc = time.time()
|
590 |
-
return outs
|
591 |
-
|
592 |
-
def train(self, mode=True):
|
593 |
-
"""Convert the model into training mode while keep layers freezed."""
|
594 |
-
super(FocalNet, self).train(mode)
|
595 |
-
self._freeze_stages()
|
596 |
-
|
597 |
-
|
598 |
-
class D2FocalNet(FocalNet, Backbone):
|
599 |
-
def __init__(self, cfg, input_shape):
|
600 |
-
|
601 |
-
pretrain_img_size = cfg['BACKBONE']['FOCAL']['PRETRAIN_IMG_SIZE']
|
602 |
-
patch_size = cfg['BACKBONE']['FOCAL']['PATCH_SIZE']
|
603 |
-
in_chans = 3
|
604 |
-
embed_dim = cfg['BACKBONE']['FOCAL']['EMBED_DIM']
|
605 |
-
depths = cfg['BACKBONE']['FOCAL']['DEPTHS']
|
606 |
-
mlp_ratio = cfg['BACKBONE']['FOCAL']['MLP_RATIO']
|
607 |
-
drop_rate = cfg['BACKBONE']['FOCAL']['DROP_RATE']
|
608 |
-
drop_path_rate = cfg['BACKBONE']['FOCAL']['DROP_PATH_RATE']
|
609 |
-
norm_layer = nn.LayerNorm
|
610 |
-
patch_norm = cfg['BACKBONE']['FOCAL']['PATCH_NORM']
|
611 |
-
use_checkpoint = cfg['BACKBONE']['FOCAL']['USE_CHECKPOINT']
|
612 |
-
out_indices = cfg['BACKBONE']['FOCAL']['OUT_INDICES']
|
613 |
-
scaling_modulator = cfg['BACKBONE']['FOCAL'].get('SCALING_MODULATOR', False)
|
614 |
-
|
615 |
-
super().__init__(
|
616 |
-
pretrain_img_size,
|
617 |
-
patch_size,
|
618 |
-
in_chans,
|
619 |
-
embed_dim,
|
620 |
-
depths,
|
621 |
-
mlp_ratio,
|
622 |
-
drop_rate,
|
623 |
-
drop_path_rate,
|
624 |
-
norm_layer,
|
625 |
-
patch_norm,
|
626 |
-
out_indices,
|
627 |
-
focal_levels=cfg['BACKBONE']['FOCAL']['FOCAL_LEVELS'],
|
628 |
-
focal_windows=cfg['BACKBONE']['FOCAL']['FOCAL_WINDOWS'],
|
629 |
-
use_conv_embed=cfg['BACKBONE']['FOCAL']['USE_CONV_EMBED'],
|
630 |
-
use_postln=cfg['BACKBONE']['FOCAL']['USE_POSTLN'],
|
631 |
-
use_postln_in_modulation=cfg['BACKBONE']['FOCAL']['USE_POSTLN_IN_MODULATION'],
|
632 |
-
scaling_modulator=scaling_modulator,
|
633 |
-
use_layerscale=cfg['BACKBONE']['FOCAL']['USE_LAYERSCALE'],
|
634 |
-
use_checkpoint=use_checkpoint,
|
635 |
-
)
|
636 |
-
|
637 |
-
self._out_features = cfg['BACKBONE']['FOCAL']['OUT_FEATURES']
|
638 |
-
|
639 |
-
self._out_feature_strides = {
|
640 |
-
"res2": 4,
|
641 |
-
"res3": 8,
|
642 |
-
"res4": 16,
|
643 |
-
"res5": 32,
|
644 |
-
}
|
645 |
-
self._out_feature_channels = {
|
646 |
-
"res2": self.num_features[0],
|
647 |
-
"res3": self.num_features[1],
|
648 |
-
"res4": self.num_features[2],
|
649 |
-
"res5": self.num_features[3],
|
650 |
-
}
|
651 |
-
|
652 |
-
def forward(self, x):
|
653 |
-
"""
|
654 |
-
Args:
|
655 |
-
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
|
656 |
-
Returns:
|
657 |
-
dict[str->Tensor]: names and the corresponding features
|
658 |
-
"""
|
659 |
-
assert (
|
660 |
-
x.dim() == 4
|
661 |
-
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
|
662 |
-
outputs = {}
|
663 |
-
y = super().forward(x)
|
664 |
-
for k in y.keys():
|
665 |
-
if k in self._out_features:
|
666 |
-
outputs[k] = y[k]
|
667 |
-
return outputs
|
668 |
-
|
669 |
-
def output_shape(self):
|
670 |
-
return {
|
671 |
-
name: ShapeSpec(
|
672 |
-
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
|
673 |
-
)
|
674 |
-
for name in self._out_features
|
675 |
-
}
|
676 |
-
|
677 |
-
@property
|
678 |
-
def size_divisibility(self):
|
679 |
-
return 32
|
680 |
-
|
681 |
-
@register_backbone
|
682 |
-
def get_focal_backbone(cfg):
|
683 |
-
focal = D2FocalNet(cfg['MODEL'], 224)
|
684 |
-
|
685 |
-
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
|
686 |
-
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
|
687 |
-
logger.info(f'=> init from {filename}')
|
688 |
-
with PathManager.open(filename, "rb") as f:
|
689 |
-
ckpt = torch.load(f)['model']
|
690 |
-
focal.load_weights(ckpt, cfg['MODEL']['BACKBONE']['FOCAL'].get('PRETRAINED_LAYERS', ['*']), cfg['VERBOSE'])
|
691 |
-
|
692 |
-
return focal
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/focal_dw.py
DELETED
@@ -1,789 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# FocalNet for Semantic Segmentation
|
3 |
-
# Copyright (c) 2022 Microsoft
|
4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
5 |
-
# Written by Jianwei Yang
|
6 |
-
# --------------------------------------------------------
|
7 |
-
import math
|
8 |
-
import time
|
9 |
-
import numpy as np
|
10 |
-
import logging
|
11 |
-
import torch
|
12 |
-
import torch.nn as nn
|
13 |
-
import torch.nn.functional as F
|
14 |
-
import torch.utils.checkpoint as checkpoint
|
15 |
-
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
16 |
-
|
17 |
-
from detectron2.utils.file_io import PathManager
|
18 |
-
from detectron2.modeling import BACKBONE_REGISTRY, Backbone, ShapeSpec
|
19 |
-
|
20 |
-
from .registry import register_backbone
|
21 |
-
|
22 |
-
logger = logging.getLogger(__name__)
|
23 |
-
|
24 |
-
class Mlp(nn.Module):
|
25 |
-
""" Multilayer perceptron."""
|
26 |
-
|
27 |
-
def __init__(self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.):
|
28 |
-
super().__init__()
|
29 |
-
out_features = out_features or in_features
|
30 |
-
hidden_features = hidden_features or in_features
|
31 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
32 |
-
self.act = act_layer()
|
33 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
34 |
-
self.drop = nn.Dropout(drop)
|
35 |
-
|
36 |
-
def forward(self, x):
|
37 |
-
x = self.fc1(x)
|
38 |
-
x = self.act(x)
|
39 |
-
x = self.drop(x)
|
40 |
-
x = self.fc2(x)
|
41 |
-
x = self.drop(x)
|
42 |
-
return x
|
43 |
-
|
44 |
-
class FocalModulation(nn.Module):
|
45 |
-
""" Focal Modulation
|
46 |
-
|
47 |
-
Args:
|
48 |
-
dim (int): Number of input channels.
|
49 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
50 |
-
focal_level (int): Number of focal levels
|
51 |
-
focal_window (int): Focal window size at focal level 1
|
52 |
-
focal_factor (int, default=2): Step to increase the focal window
|
53 |
-
use_postln (bool, default=False): Whether use post-modulation layernorm
|
54 |
-
"""
|
55 |
-
|
56 |
-
def __init__(self, dim, proj_drop=0., focal_level=2, focal_window=7, focal_factor=2, use_postln=False, use_postln_in_modulation=False, scaling_modulator=False):
|
57 |
-
|
58 |
-
super().__init__()
|
59 |
-
self.dim = dim
|
60 |
-
|
61 |
-
# specific args for focalv3
|
62 |
-
self.focal_level = focal_level
|
63 |
-
self.focal_window = focal_window
|
64 |
-
self.focal_factor = focal_factor
|
65 |
-
self.use_postln_in_modulation = use_postln_in_modulation
|
66 |
-
self.scaling_modulator = scaling_modulator
|
67 |
-
|
68 |
-
self.f = nn.Linear(dim, 2*dim+(self.focal_level+1), bias=True)
|
69 |
-
self.h = nn.Conv2d(dim, dim, kernel_size=1, stride=1, padding=0, groups=1, bias=True)
|
70 |
-
|
71 |
-
self.act = nn.GELU()
|
72 |
-
self.proj = nn.Linear(dim, dim)
|
73 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
74 |
-
self.focal_layers = nn.ModuleList()
|
75 |
-
|
76 |
-
if self.use_postln_in_modulation:
|
77 |
-
self.ln = nn.LayerNorm(dim)
|
78 |
-
|
79 |
-
for k in range(self.focal_level):
|
80 |
-
kernel_size = self.focal_factor*k + self.focal_window
|
81 |
-
self.focal_layers.append(
|
82 |
-
nn.Sequential(
|
83 |
-
nn.Conv2d(dim, dim, kernel_size=kernel_size, stride=1, groups=dim,
|
84 |
-
padding=kernel_size//2, bias=False),
|
85 |
-
nn.GELU(),
|
86 |
-
)
|
87 |
-
)
|
88 |
-
|
89 |
-
def forward(self, x):
|
90 |
-
""" Forward function.
|
91 |
-
|
92 |
-
Args:
|
93 |
-
x: input features with shape of (B, H, W, C)
|
94 |
-
"""
|
95 |
-
B, nH, nW, C = x.shape
|
96 |
-
x = self.f(x)
|
97 |
-
x = x.permute(0, 3, 1, 2).contiguous()
|
98 |
-
q, ctx, gates = torch.split(x, (C, C, self.focal_level+1), 1)
|
99 |
-
|
100 |
-
ctx_all = 0
|
101 |
-
for l in range(self.focal_level):
|
102 |
-
ctx = self.focal_layers[l](ctx)
|
103 |
-
ctx_all = ctx_all + ctx*gates[:, l:l+1]
|
104 |
-
ctx_global = self.act(ctx.mean(2, keepdim=True).mean(3, keepdim=True))
|
105 |
-
ctx_all = ctx_all + ctx_global*gates[:,self.focal_level:]
|
106 |
-
|
107 |
-
if self.scaling_modulator:
|
108 |
-
ctx_all = ctx_all / (self.focal_level + 1)
|
109 |
-
|
110 |
-
x_out = q * self.h(ctx_all)
|
111 |
-
x_out = x_out.permute(0, 2, 3, 1).contiguous()
|
112 |
-
if self.use_postln_in_modulation:
|
113 |
-
x_out = self.ln(x_out)
|
114 |
-
x_out = self.proj(x_out)
|
115 |
-
x_out = self.proj_drop(x_out)
|
116 |
-
return x_out
|
117 |
-
|
118 |
-
class FocalModulationBlock(nn.Module):
|
119 |
-
""" Focal Modulation Block.
|
120 |
-
|
121 |
-
Args:
|
122 |
-
dim (int): Number of input channels.
|
123 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
124 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
125 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
126 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
127 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
128 |
-
focal_level (int): number of focal levels
|
129 |
-
focal_window (int): focal kernel size at level 1
|
130 |
-
"""
|
131 |
-
|
132 |
-
def __init__(self, dim, mlp_ratio=4., drop=0., drop_path=0.,
|
133 |
-
act_layer=nn.GELU, norm_layer=nn.LayerNorm,
|
134 |
-
focal_level=2, focal_window=9,
|
135 |
-
use_postln=False, use_postln_in_modulation=False,
|
136 |
-
scaling_modulator=False,
|
137 |
-
use_layerscale=False,
|
138 |
-
layerscale_value=1e-4):
|
139 |
-
super().__init__()
|
140 |
-
self.dim = dim
|
141 |
-
self.mlp_ratio = mlp_ratio
|
142 |
-
self.focal_window = focal_window
|
143 |
-
self.focal_level = focal_level
|
144 |
-
self.use_postln = use_postln
|
145 |
-
self.use_layerscale = use_layerscale
|
146 |
-
|
147 |
-
self.dw1 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1, groups=dim)
|
148 |
-
self.norm1 = norm_layer(dim)
|
149 |
-
self.modulation = FocalModulation(
|
150 |
-
dim, focal_window=self.focal_window, focal_level=self.focal_level, proj_drop=drop, use_postln_in_modulation=use_postln_in_modulation, scaling_modulator=scaling_modulator
|
151 |
-
)
|
152 |
-
|
153 |
-
self.dw2 = nn.Conv2d(dim, dim, kernel_size=3, stride=1, padding=1, groups=dim)
|
154 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
155 |
-
self.norm2 = norm_layer(dim)
|
156 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
157 |
-
self.mlp = Mlp(in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop)
|
158 |
-
|
159 |
-
self.H = None
|
160 |
-
self.W = None
|
161 |
-
|
162 |
-
self.gamma_1 = 1.0
|
163 |
-
self.gamma_2 = 1.0
|
164 |
-
if self.use_layerscale:
|
165 |
-
self.gamma_1 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
|
166 |
-
self.gamma_2 = nn.Parameter(layerscale_value * torch.ones((dim)), requires_grad=True)
|
167 |
-
|
168 |
-
def forward(self, x):
|
169 |
-
""" Forward function.
|
170 |
-
|
171 |
-
Args:
|
172 |
-
x: Input feature, tensor size (B, H*W, C).
|
173 |
-
H, W: Spatial resolution of the input feature.
|
174 |
-
"""
|
175 |
-
B, L, C = x.shape
|
176 |
-
H, W = self.H, self.W
|
177 |
-
assert L == H * W, "input feature has wrong size"
|
178 |
-
|
179 |
-
x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()
|
180 |
-
x = x + self.dw1(x)
|
181 |
-
x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C)
|
182 |
-
|
183 |
-
shortcut = x
|
184 |
-
if not self.use_postln:
|
185 |
-
x = self.norm1(x)
|
186 |
-
x = x.view(B, H, W, C)
|
187 |
-
|
188 |
-
# FM
|
189 |
-
x = self.modulation(x).view(B, H * W, C)
|
190 |
-
x = shortcut + self.drop_path(self.gamma_1 * x)
|
191 |
-
if self.use_postln:
|
192 |
-
x = self.norm1(x)
|
193 |
-
|
194 |
-
x = x.view(B, H, W, C).permute(0, 3, 1, 2).contiguous()
|
195 |
-
x = x + self.dw2(x)
|
196 |
-
x = x.permute(0, 2, 3, 1).contiguous().view(B, L, C)
|
197 |
-
|
198 |
-
if not self.use_postln:
|
199 |
-
x = x + self.drop_path(self.gamma_2 * self.mlp(self.norm2(x)))
|
200 |
-
else:
|
201 |
-
x = x + self.drop_path(self.gamma_2 * self.mlp(x))
|
202 |
-
x = self.norm2(x)
|
203 |
-
|
204 |
-
return x
|
205 |
-
|
206 |
-
class BasicLayer(nn.Module):
|
207 |
-
""" A basic focal modulation layer for one stage.
|
208 |
-
|
209 |
-
Args:
|
210 |
-
dim (int): Number of feature channels
|
211 |
-
depth (int): Depths of this stage.
|
212 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
213 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
214 |
-
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
215 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
216 |
-
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
217 |
-
focal_level (int): Number of focal levels
|
218 |
-
focal_window (int): Focal window size at focal level 1
|
219 |
-
use_conv_embed (bool): Use overlapped convolution for patch embedding or now. Default: False
|
220 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False
|
221 |
-
"""
|
222 |
-
|
223 |
-
def __init__(self,
|
224 |
-
dim,
|
225 |
-
depth,
|
226 |
-
mlp_ratio=4.,
|
227 |
-
drop=0.,
|
228 |
-
drop_path=0.,
|
229 |
-
norm_layer=nn.LayerNorm,
|
230 |
-
downsample=None,
|
231 |
-
focal_window=9,
|
232 |
-
focal_level=2,
|
233 |
-
use_conv_embed=False,
|
234 |
-
use_postln=False,
|
235 |
-
use_postln_in_modulation=False,
|
236 |
-
scaling_modulator=False,
|
237 |
-
use_layerscale=False,
|
238 |
-
use_checkpoint=False,
|
239 |
-
use_pre_norm=False,
|
240 |
-
):
|
241 |
-
super().__init__()
|
242 |
-
self.depth = depth
|
243 |
-
self.use_checkpoint = use_checkpoint
|
244 |
-
|
245 |
-
# build blocks
|
246 |
-
self.blocks = nn.ModuleList([
|
247 |
-
FocalModulationBlock(
|
248 |
-
dim=dim,
|
249 |
-
mlp_ratio=mlp_ratio,
|
250 |
-
drop=drop,
|
251 |
-
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
252 |
-
focal_window=focal_window,
|
253 |
-
focal_level=focal_level,
|
254 |
-
use_postln=use_postln,
|
255 |
-
use_postln_in_modulation=use_postln_in_modulation,
|
256 |
-
scaling_modulator=scaling_modulator,
|
257 |
-
use_layerscale=use_layerscale,
|
258 |
-
norm_layer=norm_layer)
|
259 |
-
for i in range(depth)])
|
260 |
-
|
261 |
-
# patch merging layer
|
262 |
-
if downsample is not None:
|
263 |
-
self.downsample = downsample(
|
264 |
-
patch_size=2,
|
265 |
-
in_chans=dim, embed_dim=2*dim,
|
266 |
-
use_conv_embed=use_conv_embed,
|
267 |
-
norm_layer=norm_layer,
|
268 |
-
is_stem=False,
|
269 |
-
use_pre_norm=use_pre_norm
|
270 |
-
)
|
271 |
-
|
272 |
-
else:
|
273 |
-
self.downsample = None
|
274 |
-
|
275 |
-
def forward(self, x, H, W):
|
276 |
-
""" Forward function.
|
277 |
-
|
278 |
-
Args:
|
279 |
-
x: Input feature, tensor size (B, H*W, C).
|
280 |
-
H, W: Spatial resolution of the input feature.
|
281 |
-
"""
|
282 |
-
for blk in self.blocks:
|
283 |
-
blk.H, blk.W = H, W
|
284 |
-
if self.use_checkpoint:
|
285 |
-
x = checkpoint.checkpoint(blk, x)
|
286 |
-
else:
|
287 |
-
x = blk(x)
|
288 |
-
if self.downsample is not None:
|
289 |
-
x_reshaped = x.transpose(1, 2).view(x.shape[0], x.shape[-1], H, W)
|
290 |
-
x_down = self.downsample(x_reshaped)
|
291 |
-
x_down = x_down.flatten(2).transpose(1, 2)
|
292 |
-
Wh, Ww = (H + 1) // 2, (W + 1) // 2
|
293 |
-
return x, H, W, x_down, Wh, Ww
|
294 |
-
else:
|
295 |
-
return x, H, W, x, H, W
|
296 |
-
|
297 |
-
|
298 |
-
# class PatchEmbed(nn.Module):
|
299 |
-
# r""" Image to Patch Embedding
|
300 |
-
|
301 |
-
# Args:
|
302 |
-
# img_size (int): Image size. Default: 224.
|
303 |
-
# patch_size (int): Patch token size. Default: 4.
|
304 |
-
# in_chans (int): Number of input image channels. Default: 3.
|
305 |
-
# embed_dim (int): Number of linear projection output channels. Default: 96.
|
306 |
-
# norm_layer (nn.Module, optional): Normalization layer. Default: None
|
307 |
-
# """
|
308 |
-
|
309 |
-
# def __init__(self, img_size=(224, 224), patch_size=4, in_chans=3, embed_dim=96,
|
310 |
-
# use_conv_embed=False, norm_layer=None, is_stem=False, use_pre_norm=False):
|
311 |
-
# super().__init__()
|
312 |
-
# patch_size = to_2tuple(patch_size)
|
313 |
-
# patches_resolution = [img_size[0] // patch_size[0], img_size[1] // patch_size[1]]
|
314 |
-
# self.img_size = img_size
|
315 |
-
# self.patch_size = patch_size
|
316 |
-
# self.patches_resolution = patches_resolution
|
317 |
-
# self.num_patches = patches_resolution[0] * patches_resolution[1]
|
318 |
-
|
319 |
-
# self.in_chans = in_chans
|
320 |
-
# self.embed_dim = embed_dim
|
321 |
-
# self.use_pre_norm = use_pre_norm
|
322 |
-
|
323 |
-
# if use_conv_embed:
|
324 |
-
# # if we choose to use conv embedding, then we treat the stem and non-stem differently
|
325 |
-
# if is_stem:
|
326 |
-
# kernel_size = 7; padding = 3; stride = 4
|
327 |
-
# else:
|
328 |
-
# kernel_size = 3; padding = 1; stride = 2
|
329 |
-
# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
|
330 |
-
# else:
|
331 |
-
# self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
332 |
-
|
333 |
-
# if self.use_pre_norm:
|
334 |
-
# if norm_layer is not None:
|
335 |
-
# self.norm = norm_layer(in_chans)
|
336 |
-
# else:
|
337 |
-
# self.norm = None
|
338 |
-
# else:
|
339 |
-
# if norm_layer is not None:
|
340 |
-
# self.norm = norm_layer(embed_dim)
|
341 |
-
# else:
|
342 |
-
# self.norm = None
|
343 |
-
|
344 |
-
# def forward(self, x):
|
345 |
-
# B, C, H, W = x.shape
|
346 |
-
# # FIXME look at relaxing size constraints
|
347 |
-
# assert H == self.img_size[0] and W == self.img_size[1], \
|
348 |
-
# f"Input image size ({H}*{W}) doesn't match model ({self.img_size[0]}*{self.img_size[1]})."
|
349 |
-
|
350 |
-
# if self.use_pre_norm:
|
351 |
-
# if self.norm is not None:
|
352 |
-
# x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
|
353 |
-
# x = self.norm(x).transpose(1, 2).view(B, C, H, W)
|
354 |
-
# x = self.proj(x).flatten(2).transpose(1, 2)
|
355 |
-
# else:
|
356 |
-
# x = self.proj(x).flatten(2).transpose(1, 2) # B Ph*Pw C
|
357 |
-
# if self.norm is not None:
|
358 |
-
# x = self.norm(x)
|
359 |
-
# return x
|
360 |
-
|
361 |
-
# def flops(self):
|
362 |
-
# Ho, Wo = self.patches_resolution
|
363 |
-
# flops = Ho * Wo * self.embed_dim * self.in_chans * (self.patch_size[0] * self.patch_size[1])
|
364 |
-
# if self.norm is not None:
|
365 |
-
# flops += Ho * Wo * self.embed_dim
|
366 |
-
# return flops
|
367 |
-
|
368 |
-
class PatchEmbed(nn.Module):
|
369 |
-
""" Image to Patch Embedding
|
370 |
-
|
371 |
-
Args:
|
372 |
-
patch_size (int): Patch token size. Default: 4.
|
373 |
-
in_chans (int): Number of input image channels. Default: 3.
|
374 |
-
embed_dim (int): Number of linear projection output channels. Default: 96.
|
375 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
376 |
-
use_conv_embed (bool): Whether use overlapped convolution for patch embedding. Default: False
|
377 |
-
is_stem (bool): Is the stem block or not.
|
378 |
-
"""
|
379 |
-
|
380 |
-
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None, use_conv_embed=False, is_stem=False, use_pre_norm=False):
|
381 |
-
super().__init__()
|
382 |
-
patch_size = to_2tuple(patch_size)
|
383 |
-
self.patch_size = patch_size
|
384 |
-
|
385 |
-
self.in_chans = in_chans
|
386 |
-
self.embed_dim = embed_dim
|
387 |
-
self.use_pre_norm = use_pre_norm
|
388 |
-
|
389 |
-
if use_conv_embed:
|
390 |
-
# if we choose to use conv embedding, then we treat the stem and non-stem differently
|
391 |
-
if is_stem:
|
392 |
-
kernel_size = 7; padding = 3; stride = 4
|
393 |
-
else:
|
394 |
-
kernel_size = 3; padding = 1; stride = 2
|
395 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=kernel_size, stride=stride, padding=padding)
|
396 |
-
else:
|
397 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
398 |
-
|
399 |
-
if self.use_pre_norm:
|
400 |
-
if norm_layer is not None:
|
401 |
-
self.norm = norm_layer(in_chans)
|
402 |
-
else:
|
403 |
-
self.norm = None
|
404 |
-
else:
|
405 |
-
if norm_layer is not None:
|
406 |
-
self.norm = norm_layer(embed_dim)
|
407 |
-
else:
|
408 |
-
self.norm = None
|
409 |
-
|
410 |
-
def forward(self, x):
|
411 |
-
"""Forward function."""
|
412 |
-
B, C, H, W = x.size()
|
413 |
-
if W % self.patch_size[1] != 0:
|
414 |
-
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
|
415 |
-
if H % self.patch_size[0] != 0:
|
416 |
-
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
|
417 |
-
|
418 |
-
if self.use_pre_norm:
|
419 |
-
if self.norm is not None:
|
420 |
-
x = x.flatten(2).transpose(1, 2) # B Ph*Pw C
|
421 |
-
x = self.norm(x).transpose(1, 2).view(B, C, H, W)
|
422 |
-
x = self.proj(x)
|
423 |
-
else:
|
424 |
-
x = self.proj(x) # B C Wh Ww
|
425 |
-
if self.norm is not None:
|
426 |
-
Wh, Ww = x.size(2), x.size(3)
|
427 |
-
x = x.flatten(2).transpose(1, 2)
|
428 |
-
x = self.norm(x)
|
429 |
-
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
|
430 |
-
|
431 |
-
return x
|
432 |
-
|
433 |
-
|
434 |
-
class FocalNet(nn.Module):
|
435 |
-
""" FocalNet backbone.
|
436 |
-
|
437 |
-
Args:
|
438 |
-
pretrain_img_size (int): Input image size for training the pretrained model,
|
439 |
-
used in absolute postion embedding. Default 224.
|
440 |
-
patch_size (int | tuple(int)): Patch size. Default: 4.
|
441 |
-
in_chans (int): Number of input image channels. Default: 3.
|
442 |
-
embed_dim (int): Number of linear projection output channels. Default: 96.
|
443 |
-
depths (tuple[int]): Depths of each Swin Transformer stage.
|
444 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
445 |
-
drop_rate (float): Dropout rate.
|
446 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
|
447 |
-
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
448 |
-
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
|
449 |
-
out_indices (Sequence[int]): Output from which stages.
|
450 |
-
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
|
451 |
-
-1 means not freezing any parameters.
|
452 |
-
focal_levels (Sequence[int]): Number of focal levels at four stages
|
453 |
-
focal_windows (Sequence[int]): Focal window sizes at first focal level at four stages
|
454 |
-
use_conv_embed (bool): Whether use overlapped convolution for patch embedding
|
455 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
456 |
-
"""
|
457 |
-
|
458 |
-
def __init__(self,
|
459 |
-
pretrain_img_size=1600,
|
460 |
-
patch_size=4,
|
461 |
-
in_chans=3,
|
462 |
-
embed_dim=96,
|
463 |
-
depths=[2, 2, 6, 2],
|
464 |
-
mlp_ratio=4.,
|
465 |
-
drop_rate=0.,
|
466 |
-
drop_path_rate=0.2,
|
467 |
-
norm_layer=nn.LayerNorm,
|
468 |
-
patch_norm=True,
|
469 |
-
out_indices=[0, 1, 2, 3],
|
470 |
-
frozen_stages=-1,
|
471 |
-
focal_levels=[2,2,2,2],
|
472 |
-
focal_windows=[9,9,9,9],
|
473 |
-
use_pre_norms=[False, False, False, False],
|
474 |
-
use_conv_embed=False,
|
475 |
-
use_postln=False,
|
476 |
-
use_postln_in_modulation=False,
|
477 |
-
scaling_modulator=False,
|
478 |
-
use_layerscale=False,
|
479 |
-
use_checkpoint=False,
|
480 |
-
):
|
481 |
-
super().__init__()
|
482 |
-
|
483 |
-
self.pretrain_img_size = pretrain_img_size
|
484 |
-
self.num_layers = len(depths)
|
485 |
-
self.embed_dim = embed_dim
|
486 |
-
self.patch_norm = patch_norm
|
487 |
-
self.out_indices = out_indices
|
488 |
-
self.frozen_stages = frozen_stages
|
489 |
-
|
490 |
-
# split image into non-overlapping patches
|
491 |
-
self.patch_embed = PatchEmbed(
|
492 |
-
patch_size=patch_size, in_chans=in_chans, embed_dim=embed_dim,
|
493 |
-
norm_layer=norm_layer if self.patch_norm else None,
|
494 |
-
use_conv_embed=use_conv_embed, is_stem=True, use_pre_norm=False)
|
495 |
-
|
496 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
497 |
-
|
498 |
-
# stochastic depth
|
499 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))] # stochastic depth decay rule
|
500 |
-
|
501 |
-
# build layers
|
502 |
-
self.layers = nn.ModuleList()
|
503 |
-
for i_layer in range(self.num_layers):
|
504 |
-
layer = BasicLayer(
|
505 |
-
dim=int(embed_dim * 2 ** i_layer),
|
506 |
-
depth=depths[i_layer],
|
507 |
-
mlp_ratio=mlp_ratio,
|
508 |
-
drop=drop_rate,
|
509 |
-
drop_path=dpr[sum(depths[:i_layer]):sum(depths[:i_layer + 1])],
|
510 |
-
norm_layer=norm_layer,
|
511 |
-
downsample=PatchEmbed if (i_layer < self.num_layers - 1) else None,
|
512 |
-
focal_window=focal_windows[i_layer],
|
513 |
-
focal_level=focal_levels[i_layer],
|
514 |
-
use_pre_norm=use_pre_norms[i_layer],
|
515 |
-
use_conv_embed=use_conv_embed,
|
516 |
-
use_postln=use_postln,
|
517 |
-
use_postln_in_modulation=use_postln_in_modulation,
|
518 |
-
scaling_modulator=scaling_modulator,
|
519 |
-
use_layerscale=use_layerscale,
|
520 |
-
use_checkpoint=use_checkpoint)
|
521 |
-
self.layers.append(layer)
|
522 |
-
|
523 |
-
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
|
524 |
-
self.num_features = num_features
|
525 |
-
# self.norm = norm_layer(num_features[-1])
|
526 |
-
|
527 |
-
# add a norm layer for each output
|
528 |
-
for i_layer in self.out_indices:
|
529 |
-
layer = norm_layer(num_features[i_layer])
|
530 |
-
layer_name = f'norm{i_layer}'
|
531 |
-
self.add_module(layer_name, layer)
|
532 |
-
|
533 |
-
self._freeze_stages()
|
534 |
-
|
535 |
-
def _freeze_stages(self):
|
536 |
-
if self.frozen_stages >= 0:
|
537 |
-
self.patch_embed.eval()
|
538 |
-
for param in self.patch_embed.parameters():
|
539 |
-
param.requires_grad = False
|
540 |
-
|
541 |
-
if self.frozen_stages >= 2:
|
542 |
-
self.pos_drop.eval()
|
543 |
-
for i in range(0, self.frozen_stages - 1):
|
544 |
-
m = self.layers[i]
|
545 |
-
m.eval()
|
546 |
-
for param in m.parameters():
|
547 |
-
param.requires_grad = False
|
548 |
-
|
549 |
-
def init_weights(self, pretrained=None):
|
550 |
-
"""Initialize the weights in backbone.
|
551 |
-
|
552 |
-
Args:
|
553 |
-
pretrained (str, optional): Path to pre-trained weights.
|
554 |
-
Defaults to None.
|
555 |
-
"""
|
556 |
-
|
557 |
-
def _init_weights(m):
|
558 |
-
if isinstance(m, nn.Linear):
|
559 |
-
trunc_normal_(m.weight, std=.02)
|
560 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
561 |
-
nn.init.constant_(m.bias, 0)
|
562 |
-
elif isinstance(m, nn.LayerNorm):
|
563 |
-
nn.init.constant_(m.bias, 0)
|
564 |
-
nn.init.constant_(m.weight, 1.0)
|
565 |
-
|
566 |
-
if isinstance(pretrained, str):
|
567 |
-
self.apply(_init_weights)
|
568 |
-
logger = get_root_logger()
|
569 |
-
load_checkpoint(self, pretrained, strict=False, logger=logger)
|
570 |
-
elif pretrained is None:
|
571 |
-
self.apply(_init_weights)
|
572 |
-
else:
|
573 |
-
raise TypeError('pretrained must be a str or None')
|
574 |
-
|
575 |
-
def load_weights(self, pretrained_dict=None, pretrained_layers=[], verbose=True):
|
576 |
-
model_dict = self.state_dict()
|
577 |
-
|
578 |
-
missed_dict = [k for k in model_dict.keys() if k not in pretrained_dict]
|
579 |
-
logger.info(f'=> Missed keys {missed_dict}')
|
580 |
-
unexpected_dict = [k for k in pretrained_dict.keys() if k not in model_dict]
|
581 |
-
logger.info(f'=> Unexpected keys {unexpected_dict}')
|
582 |
-
|
583 |
-
pretrained_dict = {
|
584 |
-
k: v for k, v in pretrained_dict.items()
|
585 |
-
if k in model_dict.keys()
|
586 |
-
}
|
587 |
-
|
588 |
-
need_init_state_dict = {}
|
589 |
-
for k, v in pretrained_dict.items():
|
590 |
-
need_init = (
|
591 |
-
(
|
592 |
-
k.split('.')[0] in pretrained_layers
|
593 |
-
or pretrained_layers[0] == '*'
|
594 |
-
)
|
595 |
-
and 'relative_position_index' not in k
|
596 |
-
and 'attn_mask' not in k
|
597 |
-
)
|
598 |
-
|
599 |
-
if need_init:
|
600 |
-
# if verbose:
|
601 |
-
# logger.info(f'=> init {k} from {pretrained}')
|
602 |
-
|
603 |
-
if ('pool_layers' in k) or ('focal_layers' in k) and v.size() != model_dict[k].size():
|
604 |
-
table_pretrained = v
|
605 |
-
table_current = model_dict[k]
|
606 |
-
fsize1 = table_pretrained.shape[2]
|
607 |
-
fsize2 = table_current.shape[2]
|
608 |
-
|
609 |
-
# NOTE: different from interpolation used in self-attention, we use padding or clipping for focal conv
|
610 |
-
if fsize1 < fsize2:
|
611 |
-
table_pretrained_resized = torch.zeros(table_current.shape)
|
612 |
-
table_pretrained_resized[:, :, (fsize2-fsize1)//2:-(fsize2-fsize1)//2, (fsize2-fsize1)//2:-(fsize2-fsize1)//2] = table_pretrained
|
613 |
-
v = table_pretrained_resized
|
614 |
-
elif fsize1 > fsize2:
|
615 |
-
table_pretrained_resized = table_pretrained[:, :, (fsize1-fsize2)//2:-(fsize1-fsize2)//2, (fsize1-fsize2)//2:-(fsize1-fsize2)//2]
|
616 |
-
v = table_pretrained_resized
|
617 |
-
|
618 |
-
|
619 |
-
if ("modulation.f" in k or "pre_conv" in k):
|
620 |
-
table_pretrained = v
|
621 |
-
table_current = model_dict[k]
|
622 |
-
if table_pretrained.shape != table_current.shape:
|
623 |
-
if len(table_pretrained.shape) == 2:
|
624 |
-
dim = table_pretrained.shape[1]
|
625 |
-
assert table_current.shape[1] == dim
|
626 |
-
L1 = table_pretrained.shape[0]
|
627 |
-
L2 = table_current.shape[0]
|
628 |
-
|
629 |
-
if L1 < L2:
|
630 |
-
table_pretrained_resized = torch.zeros(table_current.shape)
|
631 |
-
# copy for linear project
|
632 |
-
table_pretrained_resized[:2*dim] = table_pretrained[:2*dim]
|
633 |
-
# copy for global token gating
|
634 |
-
table_pretrained_resized[-1] = table_pretrained[-1]
|
635 |
-
# copy for first multiple focal levels
|
636 |
-
table_pretrained_resized[2*dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
|
637 |
-
# reassign pretrained weights
|
638 |
-
v = table_pretrained_resized
|
639 |
-
elif L1 > L2:
|
640 |
-
raise NotImplementedError
|
641 |
-
elif len(table_pretrained.shape) == 1:
|
642 |
-
dim = table_pretrained.shape[0]
|
643 |
-
L1 = table_pretrained.shape[0]
|
644 |
-
L2 = table_current.shape[0]
|
645 |
-
if L1 < L2:
|
646 |
-
table_pretrained_resized = torch.zeros(table_current.shape)
|
647 |
-
# copy for linear project
|
648 |
-
table_pretrained_resized[:dim] = table_pretrained[:dim]
|
649 |
-
# copy for global token gating
|
650 |
-
table_pretrained_resized[-1] = table_pretrained[-1]
|
651 |
-
# copy for first multiple focal levels
|
652 |
-
# table_pretrained_resized[dim:2*dim+(L1-2*dim-1)] = table_pretrained[2*dim:-1]
|
653 |
-
# reassign pretrained weights
|
654 |
-
v = table_pretrained_resized
|
655 |
-
elif L1 > L2:
|
656 |
-
raise NotImplementedError
|
657 |
-
|
658 |
-
need_init_state_dict[k] = v
|
659 |
-
|
660 |
-
self.load_state_dict(need_init_state_dict, strict=False)
|
661 |
-
|
662 |
-
|
663 |
-
def forward(self, x):
|
664 |
-
"""Forward function."""
|
665 |
-
tic = time.time()
|
666 |
-
x = self.patch_embed(x)
|
667 |
-
Wh, Ww = x.size(2), x.size(3)
|
668 |
-
|
669 |
-
x = x.flatten(2).transpose(1, 2)
|
670 |
-
x = self.pos_drop(x)
|
671 |
-
|
672 |
-
outs = {}
|
673 |
-
for i in range(self.num_layers):
|
674 |
-
layer = self.layers[i]
|
675 |
-
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
|
676 |
-
if i in self.out_indices:
|
677 |
-
norm_layer = getattr(self, f'norm{i}')
|
678 |
-
x_out = norm_layer(x_out)
|
679 |
-
|
680 |
-
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
681 |
-
outs["res{}".format(i + 2)] = out
|
682 |
-
|
683 |
-
if len(self.out_indices) == 0:
|
684 |
-
outs["res5"] = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
685 |
-
|
686 |
-
toc = time.time()
|
687 |
-
return outs
|
688 |
-
|
689 |
-
def train(self, mode=True):
|
690 |
-
"""Convert the model into training mode while keep layers freezed."""
|
691 |
-
super(FocalNet, self).train(mode)
|
692 |
-
self._freeze_stages()
|
693 |
-
|
694 |
-
|
695 |
-
class D2FocalNet(FocalNet, Backbone):
|
696 |
-
def __init__(self, cfg, input_shape):
|
697 |
-
|
698 |
-
pretrain_img_size = cfg['BACKBONE']['FOCAL']['PRETRAIN_IMG_SIZE']
|
699 |
-
patch_size = cfg['BACKBONE']['FOCAL']['PATCH_SIZE']
|
700 |
-
in_chans = 3
|
701 |
-
embed_dim = cfg['BACKBONE']['FOCAL']['EMBED_DIM']
|
702 |
-
depths = cfg['BACKBONE']['FOCAL']['DEPTHS']
|
703 |
-
mlp_ratio = cfg['BACKBONE']['FOCAL']['MLP_RATIO']
|
704 |
-
drop_rate = cfg['BACKBONE']['FOCAL']['DROP_RATE']
|
705 |
-
drop_path_rate = cfg['BACKBONE']['FOCAL']['DROP_PATH_RATE']
|
706 |
-
norm_layer = nn.LayerNorm
|
707 |
-
patch_norm = cfg['BACKBONE']['FOCAL']['PATCH_NORM']
|
708 |
-
use_checkpoint = cfg['BACKBONE']['FOCAL']['USE_CHECKPOINT']
|
709 |
-
out_indices = cfg['BACKBONE']['FOCAL']['OUT_INDICES']
|
710 |
-
scaling_modulator = cfg['BACKBONE']['FOCAL'].get('SCALING_MODULATOR', False)
|
711 |
-
|
712 |
-
super().__init__(
|
713 |
-
pretrain_img_size,
|
714 |
-
patch_size,
|
715 |
-
in_chans,
|
716 |
-
embed_dim,
|
717 |
-
depths,
|
718 |
-
mlp_ratio,
|
719 |
-
drop_rate,
|
720 |
-
drop_path_rate,
|
721 |
-
norm_layer,
|
722 |
-
patch_norm,
|
723 |
-
out_indices,
|
724 |
-
focal_levels=cfg['BACKBONE']['FOCAL']['FOCAL_LEVELS'],
|
725 |
-
focal_windows=cfg['BACKBONE']['FOCAL']['FOCAL_WINDOWS'],
|
726 |
-
use_conv_embed=cfg['BACKBONE']['FOCAL']['USE_CONV_EMBED'],
|
727 |
-
use_postln=cfg['BACKBONE']['FOCAL']['USE_POSTLN'],
|
728 |
-
use_postln_in_modulation=cfg['BACKBONE']['FOCAL']['USE_POSTLN_IN_MODULATION'],
|
729 |
-
scaling_modulator=scaling_modulator,
|
730 |
-
use_layerscale=cfg['BACKBONE']['FOCAL']['USE_LAYERSCALE'],
|
731 |
-
use_checkpoint=use_checkpoint,
|
732 |
-
)
|
733 |
-
|
734 |
-
self._out_features = cfg['BACKBONE']['FOCAL']['OUT_FEATURES']
|
735 |
-
|
736 |
-
self._out_feature_strides = {
|
737 |
-
"res2": 4,
|
738 |
-
"res3": 8,
|
739 |
-
"res4": 16,
|
740 |
-
"res5": 32,
|
741 |
-
}
|
742 |
-
self._out_feature_channels = {
|
743 |
-
"res2": self.num_features[0],
|
744 |
-
"res3": self.num_features[1],
|
745 |
-
"res4": self.num_features[2],
|
746 |
-
"res5": self.num_features[3],
|
747 |
-
}
|
748 |
-
|
749 |
-
def forward(self, x):
|
750 |
-
"""
|
751 |
-
Args:
|
752 |
-
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
|
753 |
-
Returns:
|
754 |
-
dict[str->Tensor]: names and the corresponding features
|
755 |
-
"""
|
756 |
-
assert (
|
757 |
-
x.dim() == 4
|
758 |
-
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
|
759 |
-
outputs = {}
|
760 |
-
y = super().forward(x)
|
761 |
-
for k in y.keys():
|
762 |
-
if k in self._out_features:
|
763 |
-
outputs[k] = y[k]
|
764 |
-
return outputs
|
765 |
-
|
766 |
-
def output_shape(self):
|
767 |
-
return {
|
768 |
-
name: ShapeSpec(
|
769 |
-
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
|
770 |
-
)
|
771 |
-
for name in self._out_features
|
772 |
-
}
|
773 |
-
|
774 |
-
@property
|
775 |
-
def size_divisibility(self):
|
776 |
-
return 32
|
777 |
-
|
778 |
-
@register_backbone
|
779 |
-
def get_focal_backbone(cfg):
|
780 |
-
focal = D2FocalNet(cfg['MODEL'], 224)
|
781 |
-
|
782 |
-
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
|
783 |
-
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
|
784 |
-
logger.info(f'=> init from {filename}')
|
785 |
-
with PathManager.open(filename, "rb") as f:
|
786 |
-
ckpt = torch.load(f)['model']
|
787 |
-
focal.load_weights(ckpt, cfg['MODEL']['BACKBONE']['FOCAL'].get('PRETRAINED_LAYERS', ['*']), cfg['VERBOSE'])
|
788 |
-
|
789 |
-
return focal
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/registry.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
_model_entrypoints = {}
|
2 |
-
|
3 |
-
|
4 |
-
def register_backbone(fn):
|
5 |
-
module_name_split = fn.__module__.split('.')
|
6 |
-
model_name = module_name_split[-1]
|
7 |
-
_model_entrypoints[model_name] = fn
|
8 |
-
return fn
|
9 |
-
|
10 |
-
def model_entrypoints(model_name):
|
11 |
-
return _model_entrypoints[model_name]
|
12 |
-
|
13 |
-
def is_model(model_name):
|
14 |
-
return model_name in _model_entrypoints
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/resnet.py
DELETED
@@ -1,731 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import pickle
|
3 |
-
import numpy as np
|
4 |
-
from typing import Any, Dict
|
5 |
-
import fvcore.nn.weight_init as weight_init
|
6 |
-
import torch
|
7 |
-
import torch.nn.functional as F
|
8 |
-
from torch import nn
|
9 |
-
|
10 |
-
|
11 |
-
from .backbone import Backbone
|
12 |
-
from .registry import register_backbone
|
13 |
-
|
14 |
-
from detectron2.layers import (
|
15 |
-
CNNBlockBase,
|
16 |
-
Conv2d,
|
17 |
-
DeformConv,
|
18 |
-
ModulatedDeformConv,
|
19 |
-
ShapeSpec,
|
20 |
-
get_norm,
|
21 |
-
)
|
22 |
-
from detectron2.utils.file_io import PathManager
|
23 |
-
|
24 |
-
__all__ = [
|
25 |
-
"ResNetBlockBase",
|
26 |
-
"BasicBlock",
|
27 |
-
"BottleneckBlock",
|
28 |
-
"DeformBottleneckBlock",
|
29 |
-
"BasicStem",
|
30 |
-
"ResNet",
|
31 |
-
"make_stage",
|
32 |
-
"get_resnet_backbone",
|
33 |
-
]
|
34 |
-
|
35 |
-
|
36 |
-
class BasicBlock(CNNBlockBase):
|
37 |
-
"""
|
38 |
-
The basic residual block for ResNet-18 and ResNet-34 defined in :paper:`ResNet`,
|
39 |
-
with two 3x3 conv layers and a projection shortcut if needed.
|
40 |
-
"""
|
41 |
-
|
42 |
-
def __init__(self, in_channels, out_channels, *, stride=1, norm="BN"):
|
43 |
-
"""
|
44 |
-
Args:
|
45 |
-
in_channels (int): Number of input channels.
|
46 |
-
out_channels (int): Number of output channels.
|
47 |
-
stride (int): Stride for the first conv.
|
48 |
-
norm (str or callable): normalization for all conv layers.
|
49 |
-
See :func:`layers.get_norm` for supported format.
|
50 |
-
"""
|
51 |
-
super().__init__(in_channels, out_channels, stride)
|
52 |
-
|
53 |
-
if in_channels != out_channels:
|
54 |
-
self.shortcut = Conv2d(
|
55 |
-
in_channels,
|
56 |
-
out_channels,
|
57 |
-
kernel_size=1,
|
58 |
-
stride=stride,
|
59 |
-
bias=False,
|
60 |
-
norm=get_norm(norm, out_channels),
|
61 |
-
)
|
62 |
-
else:
|
63 |
-
self.shortcut = None
|
64 |
-
|
65 |
-
self.conv1 = Conv2d(
|
66 |
-
in_channels,
|
67 |
-
out_channels,
|
68 |
-
kernel_size=3,
|
69 |
-
stride=stride,
|
70 |
-
padding=1,
|
71 |
-
bias=False,
|
72 |
-
norm=get_norm(norm, out_channels),
|
73 |
-
)
|
74 |
-
|
75 |
-
self.conv2 = Conv2d(
|
76 |
-
out_channels,
|
77 |
-
out_channels,
|
78 |
-
kernel_size=3,
|
79 |
-
stride=1,
|
80 |
-
padding=1,
|
81 |
-
bias=False,
|
82 |
-
norm=get_norm(norm, out_channels),
|
83 |
-
)
|
84 |
-
|
85 |
-
for layer in [self.conv1, self.conv2, self.shortcut]:
|
86 |
-
if layer is not None: # shortcut can be None
|
87 |
-
weight_init.c2_msra_fill(layer)
|
88 |
-
|
89 |
-
def forward(self, x):
|
90 |
-
out = self.conv1(x)
|
91 |
-
out = F.relu_(out)
|
92 |
-
out = self.conv2(out)
|
93 |
-
|
94 |
-
if self.shortcut is not None:
|
95 |
-
shortcut = self.shortcut(x)
|
96 |
-
else:
|
97 |
-
shortcut = x
|
98 |
-
|
99 |
-
out += shortcut
|
100 |
-
out = F.relu_(out)
|
101 |
-
return out
|
102 |
-
|
103 |
-
|
104 |
-
class BottleneckBlock(CNNBlockBase):
|
105 |
-
"""
|
106 |
-
The standard bottleneck residual block used by ResNet-50, 101 and 152
|
107 |
-
defined in :paper:`ResNet`. It contains 3 conv layers with kernels
|
108 |
-
1x1, 3x3, 1x1, and a projection shortcut if needed.
|
109 |
-
"""
|
110 |
-
|
111 |
-
def __init__(
|
112 |
-
self,
|
113 |
-
in_channels,
|
114 |
-
out_channels,
|
115 |
-
*,
|
116 |
-
bottleneck_channels,
|
117 |
-
stride=1,
|
118 |
-
num_groups=1,
|
119 |
-
norm="BN",
|
120 |
-
stride_in_1x1=False,
|
121 |
-
dilation=1,
|
122 |
-
):
|
123 |
-
"""
|
124 |
-
Args:
|
125 |
-
bottleneck_channels (int): number of output channels for the 3x3
|
126 |
-
"bottleneck" conv layers.
|
127 |
-
num_groups (int): number of groups for the 3x3 conv layer.
|
128 |
-
norm (str or callable): normalization for all conv layers.
|
129 |
-
See :func:`layers.get_norm` for supported format.
|
130 |
-
stride_in_1x1 (bool): when stride>1, whether to put stride in the
|
131 |
-
first 1x1 convolution or the bottleneck 3x3 convolution.
|
132 |
-
dilation (int): the dilation rate of the 3x3 conv layer.
|
133 |
-
"""
|
134 |
-
super().__init__(in_channels, out_channels, stride)
|
135 |
-
|
136 |
-
if in_channels != out_channels:
|
137 |
-
self.shortcut = Conv2d(
|
138 |
-
in_channels,
|
139 |
-
out_channels,
|
140 |
-
kernel_size=1,
|
141 |
-
stride=stride,
|
142 |
-
bias=False,
|
143 |
-
norm=get_norm(norm, out_channels),
|
144 |
-
)
|
145 |
-
else:
|
146 |
-
self.shortcut = None
|
147 |
-
|
148 |
-
# The original MSRA ResNet models have stride in the first 1x1 conv
|
149 |
-
# The subsequent fb.torch.resnet and Caffe2 ResNe[X]t implementations have
|
150 |
-
# stride in the 3x3 conv
|
151 |
-
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
|
152 |
-
|
153 |
-
self.conv1 = Conv2d(
|
154 |
-
in_channels,
|
155 |
-
bottleneck_channels,
|
156 |
-
kernel_size=1,
|
157 |
-
stride=stride_1x1,
|
158 |
-
bias=False,
|
159 |
-
norm=get_norm(norm, bottleneck_channels),
|
160 |
-
)
|
161 |
-
|
162 |
-
self.conv2 = Conv2d(
|
163 |
-
bottleneck_channels,
|
164 |
-
bottleneck_channels,
|
165 |
-
kernel_size=3,
|
166 |
-
stride=stride_3x3,
|
167 |
-
padding=1 * dilation,
|
168 |
-
bias=False,
|
169 |
-
groups=num_groups,
|
170 |
-
dilation=dilation,
|
171 |
-
norm=get_norm(norm, bottleneck_channels),
|
172 |
-
)
|
173 |
-
|
174 |
-
self.conv3 = Conv2d(
|
175 |
-
bottleneck_channels,
|
176 |
-
out_channels,
|
177 |
-
kernel_size=1,
|
178 |
-
bias=False,
|
179 |
-
norm=get_norm(norm, out_channels),
|
180 |
-
)
|
181 |
-
|
182 |
-
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
|
183 |
-
if layer is not None: # shortcut can be None
|
184 |
-
weight_init.c2_msra_fill(layer)
|
185 |
-
|
186 |
-
# Zero-initialize the last normalization in each residual branch,
|
187 |
-
# so that at the beginning, the residual branch starts with zeros,
|
188 |
-
# and each residual block behaves like an identity.
|
189 |
-
# See Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
|
190 |
-
# "For BN layers, the learnable scaling coefficient γ is initialized
|
191 |
-
# to be 1, except for each residual block's last BN
|
192 |
-
# where γ is initialized to be 0."
|
193 |
-
|
194 |
-
# nn.init.constant_(self.conv3.norm.weight, 0)
|
195 |
-
# TODO this somehow hurts performance when training GN models from scratch.
|
196 |
-
# Add it as an option when we need to use this code to train a backbone.
|
197 |
-
|
198 |
-
def forward(self, x):
|
199 |
-
out = self.conv1(x)
|
200 |
-
out = F.relu_(out)
|
201 |
-
|
202 |
-
out = self.conv2(out)
|
203 |
-
out = F.relu_(out)
|
204 |
-
|
205 |
-
out = self.conv3(out)
|
206 |
-
|
207 |
-
if self.shortcut is not None:
|
208 |
-
shortcut = self.shortcut(x)
|
209 |
-
else:
|
210 |
-
shortcut = x
|
211 |
-
|
212 |
-
out += shortcut
|
213 |
-
out = F.relu_(out)
|
214 |
-
return out
|
215 |
-
|
216 |
-
|
217 |
-
class DeformBottleneckBlock(CNNBlockBase):
|
218 |
-
"""
|
219 |
-
Similar to :class:`BottleneckBlock`, but with :paper:`deformable conv <deformconv>`
|
220 |
-
in the 3x3 convolution.
|
221 |
-
"""
|
222 |
-
|
223 |
-
def __init__(
|
224 |
-
self,
|
225 |
-
in_channels,
|
226 |
-
out_channels,
|
227 |
-
*,
|
228 |
-
bottleneck_channels,
|
229 |
-
stride=1,
|
230 |
-
num_groups=1,
|
231 |
-
norm="BN",
|
232 |
-
stride_in_1x1=False,
|
233 |
-
dilation=1,
|
234 |
-
deform_modulated=False,
|
235 |
-
deform_num_groups=1,
|
236 |
-
):
|
237 |
-
super().__init__(in_channels, out_channels, stride)
|
238 |
-
self.deform_modulated = deform_modulated
|
239 |
-
|
240 |
-
if in_channels != out_channels:
|
241 |
-
self.shortcut = Conv2d(
|
242 |
-
in_channels,
|
243 |
-
out_channels,
|
244 |
-
kernel_size=1,
|
245 |
-
stride=stride,
|
246 |
-
bias=False,
|
247 |
-
norm=get_norm(norm, out_channels),
|
248 |
-
)
|
249 |
-
else:
|
250 |
-
self.shortcut = None
|
251 |
-
|
252 |
-
stride_1x1, stride_3x3 = (stride, 1) if stride_in_1x1 else (1, stride)
|
253 |
-
|
254 |
-
self.conv1 = Conv2d(
|
255 |
-
in_channels,
|
256 |
-
bottleneck_channels,
|
257 |
-
kernel_size=1,
|
258 |
-
stride=stride_1x1,
|
259 |
-
bias=False,
|
260 |
-
norm=get_norm(norm, bottleneck_channels),
|
261 |
-
)
|
262 |
-
|
263 |
-
if deform_modulated:
|
264 |
-
deform_conv_op = ModulatedDeformConv
|
265 |
-
# offset channels are 2 or 3 (if with modulated) * kernel_size * kernel_size
|
266 |
-
offset_channels = 27
|
267 |
-
else:
|
268 |
-
deform_conv_op = DeformConv
|
269 |
-
offset_channels = 18
|
270 |
-
|
271 |
-
self.conv2_offset = Conv2d(
|
272 |
-
bottleneck_channels,
|
273 |
-
offset_channels * deform_num_groups,
|
274 |
-
kernel_size=3,
|
275 |
-
stride=stride_3x3,
|
276 |
-
padding=1 * dilation,
|
277 |
-
dilation=dilation,
|
278 |
-
)
|
279 |
-
self.conv2 = deform_conv_op(
|
280 |
-
bottleneck_channels,
|
281 |
-
bottleneck_channels,
|
282 |
-
kernel_size=3,
|
283 |
-
stride=stride_3x3,
|
284 |
-
padding=1 * dilation,
|
285 |
-
bias=False,
|
286 |
-
groups=num_groups,
|
287 |
-
dilation=dilation,
|
288 |
-
deformable_groups=deform_num_groups,
|
289 |
-
norm=get_norm(norm, bottleneck_channels),
|
290 |
-
)
|
291 |
-
|
292 |
-
self.conv3 = Conv2d(
|
293 |
-
bottleneck_channels,
|
294 |
-
out_channels,
|
295 |
-
kernel_size=1,
|
296 |
-
bias=False,
|
297 |
-
norm=get_norm(norm, out_channels),
|
298 |
-
)
|
299 |
-
|
300 |
-
for layer in [self.conv1, self.conv2, self.conv3, self.shortcut]:
|
301 |
-
if layer is not None: # shortcut can be None
|
302 |
-
weight_init.c2_msra_fill(layer)
|
303 |
-
|
304 |
-
nn.init.constant_(self.conv2_offset.weight, 0)
|
305 |
-
nn.init.constant_(self.conv2_offset.bias, 0)
|
306 |
-
|
307 |
-
def forward(self, x):
|
308 |
-
out = self.conv1(x)
|
309 |
-
out = F.relu_(out)
|
310 |
-
|
311 |
-
if self.deform_modulated:
|
312 |
-
offset_mask = self.conv2_offset(out)
|
313 |
-
offset_x, offset_y, mask = torch.chunk(offset_mask, 3, dim=1)
|
314 |
-
offset = torch.cat((offset_x, offset_y), dim=1)
|
315 |
-
mask = mask.sigmoid()
|
316 |
-
out = self.conv2(out, offset, mask)
|
317 |
-
else:
|
318 |
-
offset = self.conv2_offset(out)
|
319 |
-
out = self.conv2(out, offset)
|
320 |
-
out = F.relu_(out)
|
321 |
-
|
322 |
-
out = self.conv3(out)
|
323 |
-
|
324 |
-
if self.shortcut is not None:
|
325 |
-
shortcut = self.shortcut(x)
|
326 |
-
else:
|
327 |
-
shortcut = x
|
328 |
-
|
329 |
-
out += shortcut
|
330 |
-
out = F.relu_(out)
|
331 |
-
return out
|
332 |
-
|
333 |
-
|
334 |
-
class BasicStem(CNNBlockBase):
|
335 |
-
"""
|
336 |
-
The standard ResNet stem (layers before the first residual block),
|
337 |
-
with a conv, relu and max_pool.
|
338 |
-
"""
|
339 |
-
|
340 |
-
def __init__(self, in_channels=3, out_channels=64, norm="BN"):
|
341 |
-
"""
|
342 |
-
Args:
|
343 |
-
norm (str or callable): norm after the first conv layer.
|
344 |
-
See :func:`layers.get_norm` for supported format.
|
345 |
-
"""
|
346 |
-
super().__init__(in_channels, out_channels, 4)
|
347 |
-
self.in_channels = in_channels
|
348 |
-
self.conv1 = Conv2d(
|
349 |
-
in_channels,
|
350 |
-
out_channels,
|
351 |
-
kernel_size=7,
|
352 |
-
stride=2,
|
353 |
-
padding=3,
|
354 |
-
bias=False,
|
355 |
-
norm=get_norm(norm, out_channels),
|
356 |
-
)
|
357 |
-
weight_init.c2_msra_fill(self.conv1)
|
358 |
-
|
359 |
-
def forward(self, x):
|
360 |
-
x = self.conv1(x)
|
361 |
-
x = F.relu_(x)
|
362 |
-
x = F.max_pool2d(x, kernel_size=3, stride=2, padding=1)
|
363 |
-
return x
|
364 |
-
|
365 |
-
|
366 |
-
class ResNet(Backbone):
|
367 |
-
"""
|
368 |
-
Implement :paper:`ResNet`.
|
369 |
-
"""
|
370 |
-
|
371 |
-
def __init__(self, stem, stages, num_classes=None, out_features=None, freeze_at=0):
|
372 |
-
"""
|
373 |
-
Args:
|
374 |
-
stem (nn.Module): a stem module
|
375 |
-
stages (list[list[CNNBlockBase]]): several (typically 4) stages,
|
376 |
-
each contains multiple :class:`CNNBlockBase`.
|
377 |
-
num_classes (None or int): if None, will not perform classification.
|
378 |
-
Otherwise, will create a linear layer.
|
379 |
-
out_features (list[str]): name of the layers whose outputs should
|
380 |
-
be returned in forward. Can be anything in "stem", "linear", or "res2" ...
|
381 |
-
If None, will return the output of the last layer.
|
382 |
-
freeze_at (int): The number of stages at the beginning to freeze.
|
383 |
-
see :meth:`freeze` for detailed explanation.
|
384 |
-
"""
|
385 |
-
super().__init__()
|
386 |
-
self.stem = stem
|
387 |
-
self.num_classes = num_classes
|
388 |
-
|
389 |
-
current_stride = self.stem.stride
|
390 |
-
self._out_feature_strides = {"stem": current_stride}
|
391 |
-
self._out_feature_channels = {"stem": self.stem.out_channels}
|
392 |
-
|
393 |
-
self.stage_names, self.stages = [], []
|
394 |
-
|
395 |
-
if out_features is not None:
|
396 |
-
# Avoid keeping unused layers in this module. They consume extra memory
|
397 |
-
# and may cause allreduce to fail
|
398 |
-
num_stages = max(
|
399 |
-
[{"res2": 1, "res3": 2, "res4": 3, "res5": 4}.get(f, 0) for f in out_features]
|
400 |
-
)
|
401 |
-
stages = stages[:num_stages]
|
402 |
-
for i, blocks in enumerate(stages):
|
403 |
-
assert len(blocks) > 0, len(blocks)
|
404 |
-
for block in blocks:
|
405 |
-
assert isinstance(block, CNNBlockBase), block
|
406 |
-
|
407 |
-
name = "res" + str(i + 2)
|
408 |
-
stage = nn.Sequential(*blocks)
|
409 |
-
|
410 |
-
self.add_module(name, stage)
|
411 |
-
self.stage_names.append(name)
|
412 |
-
self.stages.append(stage)
|
413 |
-
|
414 |
-
self._out_feature_strides[name] = current_stride = int(
|
415 |
-
current_stride * np.prod([k.stride for k in blocks])
|
416 |
-
)
|
417 |
-
self._out_feature_channels[name] = curr_channels = blocks[-1].out_channels
|
418 |
-
self.stage_names = tuple(self.stage_names) # Make it static for scripting
|
419 |
-
|
420 |
-
if num_classes is not None:
|
421 |
-
self.avgpool = nn.AdaptiveAvgPool2d((1, 1))
|
422 |
-
self.linear = nn.Linear(curr_channels, num_classes)
|
423 |
-
|
424 |
-
# Sec 5.1 in "Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour":
|
425 |
-
# "The 1000-way fully-connected layer is initialized by
|
426 |
-
# drawing weights from a zero-mean Gaussian with standard deviation of 0.01."
|
427 |
-
nn.init.normal_(self.linear.weight, std=0.01)
|
428 |
-
name = "linear"
|
429 |
-
|
430 |
-
if out_features is None:
|
431 |
-
out_features = [name]
|
432 |
-
self._out_features = out_features
|
433 |
-
assert len(self._out_features)
|
434 |
-
children = [x[0] for x in self.named_children()]
|
435 |
-
for out_feature in self._out_features:
|
436 |
-
assert out_feature in children, "Available children: {}".format(", ".join(children))
|
437 |
-
self.freeze(freeze_at)
|
438 |
-
|
439 |
-
def forward(self, x):
|
440 |
-
"""
|
441 |
-
Args:
|
442 |
-
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
|
443 |
-
|
444 |
-
Returns:
|
445 |
-
dict[str->Tensor]: names and the corresponding features
|
446 |
-
"""
|
447 |
-
assert x.dim() == 4, f"ResNet takes an input of shape (N, C, H, W). Got {x.shape} instead!"
|
448 |
-
outputs = {}
|
449 |
-
x = self.stem(x)
|
450 |
-
if "stem" in self._out_features:
|
451 |
-
outputs["stem"] = x
|
452 |
-
for name, stage in zip(self.stage_names, self.stages):
|
453 |
-
x = stage(x)
|
454 |
-
if name in self._out_features:
|
455 |
-
outputs[name] = x
|
456 |
-
if self.num_classes is not None:
|
457 |
-
x = self.avgpool(x)
|
458 |
-
x = torch.flatten(x, 1)
|
459 |
-
x = self.linear(x)
|
460 |
-
if "linear" in self._out_features:
|
461 |
-
outputs["linear"] = x
|
462 |
-
return outputs
|
463 |
-
|
464 |
-
def output_shape(self):
|
465 |
-
return {
|
466 |
-
name: ShapeSpec(
|
467 |
-
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
|
468 |
-
)
|
469 |
-
for name in self._out_features
|
470 |
-
}
|
471 |
-
|
472 |
-
def freeze(self, freeze_at=0):
|
473 |
-
"""
|
474 |
-
Freeze the first several stages of the ResNet. Commonly used in
|
475 |
-
fine-tuning.
|
476 |
-
|
477 |
-
Layers that produce the same feature map spatial size are defined as one
|
478 |
-
"stage" by :paper:`FPN`.
|
479 |
-
|
480 |
-
Args:
|
481 |
-
freeze_at (int): number of stages to freeze.
|
482 |
-
`1` means freezing the stem. `2` means freezing the stem and
|
483 |
-
one residual stage, etc.
|
484 |
-
|
485 |
-
Returns:
|
486 |
-
nn.Module: this ResNet itself
|
487 |
-
"""
|
488 |
-
if freeze_at >= 1:
|
489 |
-
self.stem.freeze()
|
490 |
-
for idx, stage in enumerate(self.stages, start=2):
|
491 |
-
if freeze_at >= idx:
|
492 |
-
for block in stage.children():
|
493 |
-
block.freeze()
|
494 |
-
return self
|
495 |
-
|
496 |
-
@staticmethod
|
497 |
-
def make_stage(block_class, num_blocks, *, in_channels, out_channels, **kwargs):
|
498 |
-
"""
|
499 |
-
Create a list of blocks of the same type that forms one ResNet stage.
|
500 |
-
|
501 |
-
Args:
|
502 |
-
block_class (type): a subclass of CNNBlockBase that's used to create all blocks in this
|
503 |
-
stage. A module of this type must not change spatial resolution of inputs unless its
|
504 |
-
stride != 1.
|
505 |
-
num_blocks (int): number of blocks in this stage
|
506 |
-
in_channels (int): input channels of the entire stage.
|
507 |
-
out_channels (int): output channels of **every block** in the stage.
|
508 |
-
kwargs: other arguments passed to the constructor of
|
509 |
-
`block_class`. If the argument name is "xx_per_block", the
|
510 |
-
argument is a list of values to be passed to each block in the
|
511 |
-
stage. Otherwise, the same argument is passed to every block
|
512 |
-
in the stage.
|
513 |
-
|
514 |
-
Returns:
|
515 |
-
list[CNNBlockBase]: a list of block module.
|
516 |
-
|
517 |
-
Examples:
|
518 |
-
::
|
519 |
-
stage = ResNet.make_stage(
|
520 |
-
BottleneckBlock, 3, in_channels=16, out_channels=64,
|
521 |
-
bottleneck_channels=16, num_groups=1,
|
522 |
-
stride_per_block=[2, 1, 1],
|
523 |
-
dilations_per_block=[1, 1, 2]
|
524 |
-
)
|
525 |
-
|
526 |
-
Usually, layers that produce the same feature map spatial size are defined as one
|
527 |
-
"stage" (in :paper:`FPN`). Under such definition, ``stride_per_block[1:]`` should
|
528 |
-
all be 1.
|
529 |
-
"""
|
530 |
-
blocks = []
|
531 |
-
for i in range(num_blocks):
|
532 |
-
curr_kwargs = {}
|
533 |
-
for k, v in kwargs.items():
|
534 |
-
if k.endswith("_per_block"):
|
535 |
-
assert len(v) == num_blocks, (
|
536 |
-
f"Argument '{k}' of make_stage should have the "
|
537 |
-
f"same length as num_blocks={num_blocks}."
|
538 |
-
)
|
539 |
-
newk = k[: -len("_per_block")]
|
540 |
-
assert newk not in kwargs, f"Cannot call make_stage with both {k} and {newk}!"
|
541 |
-
curr_kwargs[newk] = v[i]
|
542 |
-
else:
|
543 |
-
curr_kwargs[k] = v
|
544 |
-
|
545 |
-
blocks.append(
|
546 |
-
block_class(in_channels=in_channels, out_channels=out_channels, **curr_kwargs)
|
547 |
-
)
|
548 |
-
in_channels = out_channels
|
549 |
-
return blocks
|
550 |
-
|
551 |
-
@staticmethod
|
552 |
-
def make_default_stages(depth, block_class=None, **kwargs):
|
553 |
-
"""
|
554 |
-
Created list of ResNet stages from pre-defined depth (one of 18, 34, 50, 101, 152).
|
555 |
-
If it doesn't create the ResNet variant you need, please use :meth:`make_stage`
|
556 |
-
instead for fine-grained customization.
|
557 |
-
|
558 |
-
Args:
|
559 |
-
depth (int): depth of ResNet
|
560 |
-
block_class (type): the CNN block class. Has to accept
|
561 |
-
`bottleneck_channels` argument for depth > 50.
|
562 |
-
By default it is BasicBlock or BottleneckBlock, based on the
|
563 |
-
depth.
|
564 |
-
kwargs:
|
565 |
-
other arguments to pass to `make_stage`. Should not contain
|
566 |
-
stride and channels, as they are predefined for each depth.
|
567 |
-
|
568 |
-
Returns:
|
569 |
-
list[list[CNNBlockBase]]: modules in all stages; see arguments of
|
570 |
-
:class:`ResNet.__init__`.
|
571 |
-
"""
|
572 |
-
num_blocks_per_stage = {
|
573 |
-
18: [2, 2, 2, 2],
|
574 |
-
34: [3, 4, 6, 3],
|
575 |
-
50: [3, 4, 6, 3],
|
576 |
-
101: [3, 4, 23, 3],
|
577 |
-
152: [3, 8, 36, 3],
|
578 |
-
}[depth]
|
579 |
-
if block_class is None:
|
580 |
-
block_class = BasicBlock if depth < 50 else BottleneckBlock
|
581 |
-
if depth < 50:
|
582 |
-
in_channels = [64, 64, 128, 256]
|
583 |
-
out_channels = [64, 128, 256, 512]
|
584 |
-
else:
|
585 |
-
in_channels = [64, 256, 512, 1024]
|
586 |
-
out_channels = [256, 512, 1024, 2048]
|
587 |
-
ret = []
|
588 |
-
for (n, s, i, o) in zip(num_blocks_per_stage, [1, 2, 2, 2], in_channels, out_channels):
|
589 |
-
if depth >= 50:
|
590 |
-
kwargs["bottleneck_channels"] = o // 4
|
591 |
-
ret.append(
|
592 |
-
ResNet.make_stage(
|
593 |
-
block_class=block_class,
|
594 |
-
num_blocks=n,
|
595 |
-
stride_per_block=[s] + [1] * (n - 1),
|
596 |
-
in_channels=i,
|
597 |
-
out_channels=o,
|
598 |
-
**kwargs,
|
599 |
-
)
|
600 |
-
)
|
601 |
-
return ret
|
602 |
-
|
603 |
-
|
604 |
-
ResNetBlockBase = CNNBlockBase
|
605 |
-
"""
|
606 |
-
Alias for backward compatibiltiy.
|
607 |
-
"""
|
608 |
-
|
609 |
-
|
610 |
-
def make_stage(*args, **kwargs):
|
611 |
-
"""
|
612 |
-
Deprecated alias for backward compatibiltiy.
|
613 |
-
"""
|
614 |
-
return ResNet.make_stage(*args, **kwargs)
|
615 |
-
|
616 |
-
|
617 |
-
def _convert_ndarray_to_tensor(state_dict: Dict[str, Any]) -> None:
|
618 |
-
"""
|
619 |
-
In-place convert all numpy arrays in the state_dict to torch tensor.
|
620 |
-
Args:
|
621 |
-
state_dict (dict): a state-dict to be loaded to the model.
|
622 |
-
Will be modified.
|
623 |
-
"""
|
624 |
-
# model could be an OrderedDict with _metadata attribute
|
625 |
-
# (as returned by Pytorch's state_dict()). We should preserve these
|
626 |
-
# properties.
|
627 |
-
for k in list(state_dict.keys()):
|
628 |
-
v = state_dict[k]
|
629 |
-
if not isinstance(v, np.ndarray) and not isinstance(v, torch.Tensor):
|
630 |
-
raise ValueError(
|
631 |
-
"Unsupported type found in checkpoint! {}: {}".format(k, type(v))
|
632 |
-
)
|
633 |
-
if not isinstance(v, torch.Tensor):
|
634 |
-
state_dict[k] = torch.from_numpy(v)
|
635 |
-
|
636 |
-
|
637 |
-
@register_backbone
|
638 |
-
def get_resnet_backbone(cfg):
|
639 |
-
"""
|
640 |
-
Create a ResNet instance from config.
|
641 |
-
|
642 |
-
Returns:
|
643 |
-
ResNet: a :class:`ResNet` instance.
|
644 |
-
"""
|
645 |
-
res_cfg = cfg['MODEL']['BACKBONE']['RESNETS']
|
646 |
-
|
647 |
-
# need registration of new blocks/stems?
|
648 |
-
norm = res_cfg['NORM']
|
649 |
-
stem = BasicStem(
|
650 |
-
in_channels=res_cfg['STEM_IN_CHANNELS'],
|
651 |
-
out_channels=res_cfg['STEM_OUT_CHANNELS'],
|
652 |
-
norm=norm,
|
653 |
-
)
|
654 |
-
|
655 |
-
# fmt: off
|
656 |
-
freeze_at = res_cfg['FREEZE_AT']
|
657 |
-
out_features = res_cfg['OUT_FEATURES']
|
658 |
-
depth = res_cfg['DEPTH']
|
659 |
-
num_groups = res_cfg['NUM_GROUPS']
|
660 |
-
width_per_group = res_cfg['WIDTH_PER_GROUP']
|
661 |
-
bottleneck_channels = num_groups * width_per_group
|
662 |
-
in_channels = res_cfg['STEM_OUT_CHANNELS']
|
663 |
-
out_channels = res_cfg['RES2_OUT_CHANNELS']
|
664 |
-
stride_in_1x1 = res_cfg['STRIDE_IN_1X1']
|
665 |
-
res5_dilation = res_cfg['RES5_DILATION']
|
666 |
-
deform_on_per_stage = res_cfg['DEFORM_ON_PER_STAGE']
|
667 |
-
deform_modulated = res_cfg['DEFORM_MODULATED']
|
668 |
-
deform_num_groups = res_cfg['DEFORM_NUM_GROUPS']
|
669 |
-
# fmt: on
|
670 |
-
assert res5_dilation in {1, 2}, "res5_dilation cannot be {}.".format(res5_dilation)
|
671 |
-
|
672 |
-
num_blocks_per_stage = {
|
673 |
-
18: [2, 2, 2, 2],
|
674 |
-
34: [3, 4, 6, 3],
|
675 |
-
50: [3, 4, 6, 3],
|
676 |
-
101: [3, 4, 23, 3],
|
677 |
-
152: [3, 8, 36, 3],
|
678 |
-
}[depth]
|
679 |
-
|
680 |
-
if depth in [18, 34]:
|
681 |
-
assert out_channels == 64, "Must set MODEL.RESNETS.RES2_OUT_CHANNELS = 64 for R18/R34"
|
682 |
-
assert not any(
|
683 |
-
deform_on_per_stage
|
684 |
-
), "MODEL.RESNETS.DEFORM_ON_PER_STAGE unsupported for R18/R34"
|
685 |
-
assert res5_dilation == 1, "Must set MODEL.RESNETS.RES5_DILATION = 1 for R18/R34"
|
686 |
-
assert num_groups == 1, "Must set MODEL.RESNETS.NUM_GROUPS = 1 for R18/R34"
|
687 |
-
|
688 |
-
stages = []
|
689 |
-
|
690 |
-
for idx, stage_idx in enumerate(range(2, 6)):
|
691 |
-
# res5_dilation is used this way as a convention in R-FCN & Deformable Conv paper
|
692 |
-
dilation = res5_dilation if stage_idx == 5 else 1
|
693 |
-
first_stride = 1 if idx == 0 or (stage_idx == 5 and dilation == 2) else 2
|
694 |
-
stage_kargs = {
|
695 |
-
"num_blocks": num_blocks_per_stage[idx],
|
696 |
-
"stride_per_block": [first_stride] + [1] * (num_blocks_per_stage[idx] - 1),
|
697 |
-
"in_channels": in_channels,
|
698 |
-
"out_channels": out_channels,
|
699 |
-
"norm": norm,
|
700 |
-
}
|
701 |
-
# Use BasicBlock for R18 and R34.
|
702 |
-
if depth in [18, 34]:
|
703 |
-
stage_kargs["block_class"] = BasicBlock
|
704 |
-
else:
|
705 |
-
stage_kargs["bottleneck_channels"] = bottleneck_channels
|
706 |
-
stage_kargs["stride_in_1x1"] = stride_in_1x1
|
707 |
-
stage_kargs["dilation"] = dilation
|
708 |
-
stage_kargs["num_groups"] = num_groups
|
709 |
-
if deform_on_per_stage[idx]:
|
710 |
-
stage_kargs["block_class"] = DeformBottleneckBlock
|
711 |
-
stage_kargs["deform_modulated"] = deform_modulated
|
712 |
-
stage_kargs["deform_num_groups"] = deform_num_groups
|
713 |
-
else:
|
714 |
-
stage_kargs["block_class"] = BottleneckBlock
|
715 |
-
blocks = ResNet.make_stage(**stage_kargs)
|
716 |
-
in_channels = out_channels
|
717 |
-
out_channels *= 2
|
718 |
-
bottleneck_channels *= 2
|
719 |
-
stages.append(blocks)
|
720 |
-
backbone = ResNet(stem, stages, out_features=out_features, freeze_at=freeze_at)
|
721 |
-
|
722 |
-
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
|
723 |
-
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
|
724 |
-
with PathManager.open(filename, "rb") as f:
|
725 |
-
ckpt = pickle.load(f, encoding="latin1")['model']
|
726 |
-
_convert_ndarray_to_tensor(ckpt)
|
727 |
-
ckpt.pop('stem.fc.weight')
|
728 |
-
ckpt.pop('stem.fc.bias')
|
729 |
-
backbone.load_state_dict(ckpt)
|
730 |
-
|
731 |
-
return backbone
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/backbone/swin.py
DELETED
@@ -1,892 +0,0 @@
|
|
1 |
-
# --------------------------------------------------------
|
2 |
-
# Swin Transformer
|
3 |
-
# Copyright (c) 2021 Microsoft
|
4 |
-
# Licensed under The MIT License [see LICENSE for details]
|
5 |
-
# Written by Ze Liu, Yutong Lin, Yixuan Wei
|
6 |
-
# --------------------------------------------------------
|
7 |
-
|
8 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
9 |
-
# Modified by Bowen Cheng from https://github.com/SwinTransformer/Swin-Transformer-Semantic-Segmentation/blob/main/mmseg/models/backbones/swin_transformer.py
|
10 |
-
import logging
|
11 |
-
import numpy as np
|
12 |
-
import torch
|
13 |
-
import torch.nn as nn
|
14 |
-
import torch.nn.functional as F
|
15 |
-
import torch.utils.checkpoint as checkpoint
|
16 |
-
from timm.models.layers import DropPath, to_2tuple, trunc_normal_
|
17 |
-
|
18 |
-
from detectron2.modeling import Backbone, ShapeSpec
|
19 |
-
from detectron2.utils.file_io import PathManager
|
20 |
-
|
21 |
-
from .registry import register_backbone
|
22 |
-
|
23 |
-
logger = logging.getLogger(__name__)
|
24 |
-
|
25 |
-
|
26 |
-
class Mlp(nn.Module):
|
27 |
-
"""Multilayer perceptron."""
|
28 |
-
|
29 |
-
def __init__(
|
30 |
-
self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0
|
31 |
-
):
|
32 |
-
super().__init__()
|
33 |
-
out_features = out_features or in_features
|
34 |
-
hidden_features = hidden_features or in_features
|
35 |
-
self.fc1 = nn.Linear(in_features, hidden_features)
|
36 |
-
self.act = act_layer()
|
37 |
-
self.fc2 = nn.Linear(hidden_features, out_features)
|
38 |
-
self.drop = nn.Dropout(drop)
|
39 |
-
|
40 |
-
def forward(self, x):
|
41 |
-
x = self.fc1(x)
|
42 |
-
x = self.act(x)
|
43 |
-
x = self.drop(x)
|
44 |
-
x = self.fc2(x)
|
45 |
-
x = self.drop(x)
|
46 |
-
return x
|
47 |
-
|
48 |
-
|
49 |
-
def window_partition(x, window_size):
|
50 |
-
"""
|
51 |
-
Args:
|
52 |
-
x: (B, H, W, C)
|
53 |
-
window_size (int): window size
|
54 |
-
Returns:
|
55 |
-
windows: (num_windows*B, window_size, window_size, C)
|
56 |
-
"""
|
57 |
-
B, H, W, C = x.shape
|
58 |
-
x = x.view(B, H // window_size, window_size, W // window_size, window_size, C)
|
59 |
-
windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C)
|
60 |
-
return windows
|
61 |
-
|
62 |
-
|
63 |
-
def window_reverse(windows, window_size, H, W):
|
64 |
-
"""
|
65 |
-
Args:
|
66 |
-
windows: (num_windows*B, window_size, window_size, C)
|
67 |
-
window_size (int): Window size
|
68 |
-
H (int): Height of image
|
69 |
-
W (int): Width of image
|
70 |
-
Returns:
|
71 |
-
x: (B, H, W, C)
|
72 |
-
"""
|
73 |
-
B = int(windows.shape[0] / (H * W / window_size / window_size))
|
74 |
-
x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1)
|
75 |
-
x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1)
|
76 |
-
return x
|
77 |
-
|
78 |
-
|
79 |
-
class WindowAttention(nn.Module):
|
80 |
-
"""Window based multi-head self attention (W-MSA) module with relative position bias.
|
81 |
-
It supports both of shifted and non-shifted window.
|
82 |
-
Args:
|
83 |
-
dim (int): Number of input channels.
|
84 |
-
window_size (tuple[int]): The height and width of the window.
|
85 |
-
num_heads (int): Number of attention heads.
|
86 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
87 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set
|
88 |
-
attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0
|
89 |
-
proj_drop (float, optional): Dropout ratio of output. Default: 0.0
|
90 |
-
"""
|
91 |
-
|
92 |
-
def __init__(
|
93 |
-
self,
|
94 |
-
dim,
|
95 |
-
window_size,
|
96 |
-
num_heads,
|
97 |
-
qkv_bias=True,
|
98 |
-
qk_scale=None,
|
99 |
-
attn_drop=0.0,
|
100 |
-
proj_drop=0.0,
|
101 |
-
):
|
102 |
-
|
103 |
-
super().__init__()
|
104 |
-
self.dim = dim
|
105 |
-
self.window_size = window_size # Wh, Ww
|
106 |
-
self.num_heads = num_heads
|
107 |
-
head_dim = dim // num_heads
|
108 |
-
self.scale = qk_scale or head_dim ** -0.5
|
109 |
-
|
110 |
-
# define a parameter table of relative position bias
|
111 |
-
self.relative_position_bias_table = nn.Parameter(
|
112 |
-
torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads)
|
113 |
-
) # 2*Wh-1 * 2*Ww-1, nH
|
114 |
-
|
115 |
-
# get pair-wise relative position index for each token inside the window
|
116 |
-
coords_h = torch.arange(self.window_size[0])
|
117 |
-
coords_w = torch.arange(self.window_size[1])
|
118 |
-
coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww
|
119 |
-
coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww
|
120 |
-
relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww
|
121 |
-
relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2
|
122 |
-
relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0
|
123 |
-
relative_coords[:, :, 1] += self.window_size[1] - 1
|
124 |
-
relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1
|
125 |
-
relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww
|
126 |
-
self.register_buffer("relative_position_index", relative_position_index)
|
127 |
-
|
128 |
-
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
129 |
-
self.attn_drop = nn.Dropout(attn_drop)
|
130 |
-
self.proj = nn.Linear(dim, dim)
|
131 |
-
self.proj_drop = nn.Dropout(proj_drop)
|
132 |
-
|
133 |
-
trunc_normal_(self.relative_position_bias_table, std=0.02)
|
134 |
-
self.softmax = nn.Softmax(dim=-1)
|
135 |
-
|
136 |
-
def forward(self, x, mask=None):
|
137 |
-
"""Forward function.
|
138 |
-
Args:
|
139 |
-
x: input features with shape of (num_windows*B, N, C)
|
140 |
-
mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None
|
141 |
-
"""
|
142 |
-
B_, N, C = x.shape
|
143 |
-
qkv = (
|
144 |
-
self.qkv(x)
|
145 |
-
.reshape(B_, N, 3, self.num_heads, C // self.num_heads)
|
146 |
-
.permute(2, 0, 3, 1, 4)
|
147 |
-
)
|
148 |
-
q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple)
|
149 |
-
|
150 |
-
q = q * self.scale
|
151 |
-
attn = q @ k.transpose(-2, -1)
|
152 |
-
|
153 |
-
relative_position_bias = self.relative_position_bias_table[
|
154 |
-
self.relative_position_index.view(-1)
|
155 |
-
].view(
|
156 |
-
self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1
|
157 |
-
) # Wh*Ww,Wh*Ww,nH
|
158 |
-
relative_position_bias = relative_position_bias.permute(
|
159 |
-
2, 0, 1
|
160 |
-
).contiguous() # nH, Wh*Ww, Wh*Ww
|
161 |
-
attn = attn + relative_position_bias.unsqueeze(0)
|
162 |
-
|
163 |
-
if mask is not None:
|
164 |
-
nW = mask.shape[0]
|
165 |
-
attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0)
|
166 |
-
attn = attn.view(-1, self.num_heads, N, N)
|
167 |
-
attn = self.softmax(attn)
|
168 |
-
else:
|
169 |
-
attn = self.softmax(attn)
|
170 |
-
|
171 |
-
attn = self.attn_drop(attn)
|
172 |
-
|
173 |
-
x = (attn @ v).transpose(1, 2).reshape(B_, N, C)
|
174 |
-
x = self.proj(x)
|
175 |
-
x = self.proj_drop(x)
|
176 |
-
|
177 |
-
return x
|
178 |
-
|
179 |
-
|
180 |
-
class SwinTransformerBlock(nn.Module):
|
181 |
-
"""Swin Transformer Block.
|
182 |
-
Args:
|
183 |
-
dim (int): Number of input channels.
|
184 |
-
num_heads (int): Number of attention heads.
|
185 |
-
window_size (int): Window size.
|
186 |
-
shift_size (int): Shift size for SW-MSA.
|
187 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim.
|
188 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
189 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
190 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
191 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
192 |
-
drop_path (float, optional): Stochastic depth rate. Default: 0.0
|
193 |
-
act_layer (nn.Module, optional): Activation layer. Default: nn.GELU
|
194 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
195 |
-
"""
|
196 |
-
|
197 |
-
def __init__(
|
198 |
-
self,
|
199 |
-
dim,
|
200 |
-
num_heads,
|
201 |
-
window_size=7,
|
202 |
-
shift_size=0,
|
203 |
-
mlp_ratio=4.0,
|
204 |
-
qkv_bias=True,
|
205 |
-
qk_scale=None,
|
206 |
-
drop=0.0,
|
207 |
-
attn_drop=0.0,
|
208 |
-
drop_path=0.0,
|
209 |
-
act_layer=nn.GELU,
|
210 |
-
norm_layer=nn.LayerNorm,
|
211 |
-
):
|
212 |
-
super().__init__()
|
213 |
-
self.dim = dim
|
214 |
-
self.num_heads = num_heads
|
215 |
-
self.window_size = window_size
|
216 |
-
self.shift_size = shift_size
|
217 |
-
self.mlp_ratio = mlp_ratio
|
218 |
-
assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size"
|
219 |
-
|
220 |
-
self.norm1 = norm_layer(dim)
|
221 |
-
self.attn = WindowAttention(
|
222 |
-
dim,
|
223 |
-
window_size=to_2tuple(self.window_size),
|
224 |
-
num_heads=num_heads,
|
225 |
-
qkv_bias=qkv_bias,
|
226 |
-
qk_scale=qk_scale,
|
227 |
-
attn_drop=attn_drop,
|
228 |
-
proj_drop=drop,
|
229 |
-
)
|
230 |
-
|
231 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity()
|
232 |
-
self.norm2 = norm_layer(dim)
|
233 |
-
mlp_hidden_dim = int(dim * mlp_ratio)
|
234 |
-
self.mlp = Mlp(
|
235 |
-
in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop
|
236 |
-
)
|
237 |
-
|
238 |
-
self.H = None
|
239 |
-
self.W = None
|
240 |
-
|
241 |
-
def forward(self, x, mask_matrix):
|
242 |
-
"""Forward function.
|
243 |
-
Args:
|
244 |
-
x: Input feature, tensor size (B, H*W, C).
|
245 |
-
H, W: Spatial resolution of the input feature.
|
246 |
-
mask_matrix: Attention mask for cyclic shift.
|
247 |
-
"""
|
248 |
-
B, L, C = x.shape
|
249 |
-
H, W = self.H, self.W
|
250 |
-
assert L == H * W, "input feature has wrong size"
|
251 |
-
|
252 |
-
# HACK model will not upsampling
|
253 |
-
# if min([H, W]) <= self.window_size:
|
254 |
-
# if window size is larger than input resolution, we don't partition windows
|
255 |
-
# self.shift_size = 0
|
256 |
-
# self.window_size = min([H,W])
|
257 |
-
|
258 |
-
shortcut = x
|
259 |
-
x = self.norm1(x)
|
260 |
-
x = x.view(B, H, W, C)
|
261 |
-
|
262 |
-
# pad feature maps to multiples of window size
|
263 |
-
pad_l = pad_t = 0
|
264 |
-
pad_r = (self.window_size - W % self.window_size) % self.window_size
|
265 |
-
pad_b = (self.window_size - H % self.window_size) % self.window_size
|
266 |
-
x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b))
|
267 |
-
_, Hp, Wp, _ = x.shape
|
268 |
-
|
269 |
-
# cyclic shift
|
270 |
-
if self.shift_size > 0:
|
271 |
-
shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2))
|
272 |
-
attn_mask = mask_matrix
|
273 |
-
else:
|
274 |
-
shifted_x = x
|
275 |
-
attn_mask = None
|
276 |
-
|
277 |
-
# partition windows
|
278 |
-
x_windows = window_partition(
|
279 |
-
shifted_x, self.window_size
|
280 |
-
) # nW*B, window_size, window_size, C
|
281 |
-
x_windows = x_windows.view(
|
282 |
-
-1, self.window_size * self.window_size, C
|
283 |
-
) # nW*B, window_size*window_size, C
|
284 |
-
|
285 |
-
# W-MSA/SW-MSA
|
286 |
-
attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C
|
287 |
-
|
288 |
-
# merge windows
|
289 |
-
attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C)
|
290 |
-
shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C
|
291 |
-
|
292 |
-
# reverse cyclic shift
|
293 |
-
if self.shift_size > 0:
|
294 |
-
x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2))
|
295 |
-
else:
|
296 |
-
x = shifted_x
|
297 |
-
|
298 |
-
if pad_r > 0 or pad_b > 0:
|
299 |
-
x = x[:, :H, :W, :].contiguous()
|
300 |
-
|
301 |
-
x = x.view(B, H * W, C)
|
302 |
-
|
303 |
-
# FFN
|
304 |
-
x = shortcut + self.drop_path(x)
|
305 |
-
x = x + self.drop_path(self.mlp(self.norm2(x)))
|
306 |
-
return x
|
307 |
-
|
308 |
-
|
309 |
-
class PatchMerging(nn.Module):
|
310 |
-
"""Patch Merging Layer
|
311 |
-
Args:
|
312 |
-
dim (int): Number of input channels.
|
313 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
314 |
-
"""
|
315 |
-
|
316 |
-
def __init__(self, dim, norm_layer=nn.LayerNorm):
|
317 |
-
super().__init__()
|
318 |
-
self.dim = dim
|
319 |
-
self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False)
|
320 |
-
self.norm = norm_layer(4 * dim)
|
321 |
-
|
322 |
-
def forward(self, x, H, W):
|
323 |
-
"""Forward function.
|
324 |
-
Args:
|
325 |
-
x: Input feature, tensor size (B, H*W, C).
|
326 |
-
H, W: Spatial resolution of the input feature.
|
327 |
-
"""
|
328 |
-
B, L, C = x.shape
|
329 |
-
assert L == H * W, "input feature has wrong size"
|
330 |
-
|
331 |
-
x = x.view(B, H, W, C)
|
332 |
-
|
333 |
-
# padding
|
334 |
-
pad_input = (H % 2 == 1) or (W % 2 == 1)
|
335 |
-
if pad_input:
|
336 |
-
x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2))
|
337 |
-
|
338 |
-
x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C
|
339 |
-
x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C
|
340 |
-
x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C
|
341 |
-
x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C
|
342 |
-
x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C
|
343 |
-
x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C
|
344 |
-
|
345 |
-
x = self.norm(x)
|
346 |
-
x = self.reduction(x)
|
347 |
-
|
348 |
-
return x
|
349 |
-
|
350 |
-
|
351 |
-
class BasicLayer(nn.Module):
|
352 |
-
"""A basic Swin Transformer layer for one stage.
|
353 |
-
Args:
|
354 |
-
dim (int): Number of feature channels
|
355 |
-
depth (int): Depths of this stage.
|
356 |
-
num_heads (int): Number of attention head.
|
357 |
-
window_size (int): Local window size. Default: 7.
|
358 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
359 |
-
qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True
|
360 |
-
qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set.
|
361 |
-
drop (float, optional): Dropout rate. Default: 0.0
|
362 |
-
attn_drop (float, optional): Attention dropout rate. Default: 0.0
|
363 |
-
drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0
|
364 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm
|
365 |
-
downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None
|
366 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
367 |
-
"""
|
368 |
-
|
369 |
-
def __init__(
|
370 |
-
self,
|
371 |
-
dim,
|
372 |
-
depth,
|
373 |
-
num_heads,
|
374 |
-
window_size=7,
|
375 |
-
mlp_ratio=4.0,
|
376 |
-
qkv_bias=True,
|
377 |
-
qk_scale=None,
|
378 |
-
drop=0.0,
|
379 |
-
attn_drop=0.0,
|
380 |
-
drop_path=0.0,
|
381 |
-
norm_layer=nn.LayerNorm,
|
382 |
-
downsample=None,
|
383 |
-
use_checkpoint=False,
|
384 |
-
):
|
385 |
-
super().__init__()
|
386 |
-
self.window_size = window_size
|
387 |
-
self.shift_size = window_size // 2
|
388 |
-
self.depth = depth
|
389 |
-
self.use_checkpoint = use_checkpoint
|
390 |
-
|
391 |
-
# build blocks
|
392 |
-
self.blocks = nn.ModuleList(
|
393 |
-
[
|
394 |
-
SwinTransformerBlock(
|
395 |
-
dim=dim,
|
396 |
-
num_heads=num_heads,
|
397 |
-
window_size=window_size,
|
398 |
-
shift_size=0 if (i % 2 == 0) else window_size // 2,
|
399 |
-
mlp_ratio=mlp_ratio,
|
400 |
-
qkv_bias=qkv_bias,
|
401 |
-
qk_scale=qk_scale,
|
402 |
-
drop=drop,
|
403 |
-
attn_drop=attn_drop,
|
404 |
-
drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path,
|
405 |
-
norm_layer=norm_layer,
|
406 |
-
)
|
407 |
-
for i in range(depth)
|
408 |
-
]
|
409 |
-
)
|
410 |
-
|
411 |
-
# patch merging layer
|
412 |
-
if downsample is not None:
|
413 |
-
self.downsample = downsample(dim=dim, norm_layer=norm_layer)
|
414 |
-
else:
|
415 |
-
self.downsample = None
|
416 |
-
|
417 |
-
def forward(self, x, H, W):
|
418 |
-
"""Forward function.
|
419 |
-
Args:
|
420 |
-
x: Input feature, tensor size (B, H*W, C).
|
421 |
-
H, W: Spatial resolution of the input feature.
|
422 |
-
"""
|
423 |
-
|
424 |
-
# calculate attention mask for SW-MSA
|
425 |
-
Hp = int(np.ceil(H / self.window_size)) * self.window_size
|
426 |
-
Wp = int(np.ceil(W / self.window_size)) * self.window_size
|
427 |
-
img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1
|
428 |
-
h_slices = (
|
429 |
-
slice(0, -self.window_size),
|
430 |
-
slice(-self.window_size, -self.shift_size),
|
431 |
-
slice(-self.shift_size, None),
|
432 |
-
)
|
433 |
-
w_slices = (
|
434 |
-
slice(0, -self.window_size),
|
435 |
-
slice(-self.window_size, -self.shift_size),
|
436 |
-
slice(-self.shift_size, None),
|
437 |
-
)
|
438 |
-
cnt = 0
|
439 |
-
for h in h_slices:
|
440 |
-
for w in w_slices:
|
441 |
-
img_mask[:, h, w, :] = cnt
|
442 |
-
cnt += 1
|
443 |
-
|
444 |
-
mask_windows = window_partition(
|
445 |
-
img_mask, self.window_size
|
446 |
-
) # nW, window_size, window_size, 1
|
447 |
-
mask_windows = mask_windows.view(-1, self.window_size * self.window_size)
|
448 |
-
attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2)
|
449 |
-
attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill(
|
450 |
-
attn_mask == 0, float(0.0)
|
451 |
-
).type(x.dtype)
|
452 |
-
|
453 |
-
for blk in self.blocks:
|
454 |
-
blk.H, blk.W = H, W
|
455 |
-
if self.use_checkpoint:
|
456 |
-
x = checkpoint.checkpoint(blk, x, attn_mask)
|
457 |
-
else:
|
458 |
-
x = blk(x, attn_mask)
|
459 |
-
if self.downsample is not None:
|
460 |
-
x_down = self.downsample(x, H, W)
|
461 |
-
Wh, Ww = (H + 1) // 2, (W + 1) // 2
|
462 |
-
return x, H, W, x_down, Wh, Ww
|
463 |
-
else:
|
464 |
-
return x, H, W, x, H, W
|
465 |
-
|
466 |
-
|
467 |
-
class PatchEmbed(nn.Module):
|
468 |
-
"""Image to Patch Embedding
|
469 |
-
Args:
|
470 |
-
patch_size (int): Patch token size. Default: 4.
|
471 |
-
in_chans (int): Number of input image channels. Default: 3.
|
472 |
-
embed_dim (int): Number of linear projection output channels. Default: 96.
|
473 |
-
norm_layer (nn.Module, optional): Normalization layer. Default: None
|
474 |
-
"""
|
475 |
-
|
476 |
-
def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None):
|
477 |
-
super().__init__()
|
478 |
-
patch_size = to_2tuple(patch_size)
|
479 |
-
self.patch_size = patch_size
|
480 |
-
|
481 |
-
self.in_chans = in_chans
|
482 |
-
self.embed_dim = embed_dim
|
483 |
-
|
484 |
-
self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size)
|
485 |
-
if norm_layer is not None:
|
486 |
-
self.norm = norm_layer(embed_dim)
|
487 |
-
else:
|
488 |
-
self.norm = None
|
489 |
-
|
490 |
-
def forward(self, x):
|
491 |
-
"""Forward function."""
|
492 |
-
# padding
|
493 |
-
_, _, H, W = x.size()
|
494 |
-
if W % self.patch_size[1] != 0:
|
495 |
-
x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1]))
|
496 |
-
if H % self.patch_size[0] != 0:
|
497 |
-
x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0]))
|
498 |
-
|
499 |
-
x = self.proj(x) # B C Wh Ww
|
500 |
-
if self.norm is not None:
|
501 |
-
Wh, Ww = x.size(2), x.size(3)
|
502 |
-
x = x.flatten(2).transpose(1, 2)
|
503 |
-
x = self.norm(x)
|
504 |
-
x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww)
|
505 |
-
|
506 |
-
return x
|
507 |
-
|
508 |
-
|
509 |
-
class SwinTransformer(nn.Module):
|
510 |
-
"""Swin Transformer backbone.
|
511 |
-
A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` -
|
512 |
-
https://arxiv.org/pdf/2103.14030
|
513 |
-
Args:
|
514 |
-
pretrain_img_size (int): Input image size for training the pretrained model,
|
515 |
-
used in absolute postion embedding. Default 224.
|
516 |
-
patch_size (int | tuple(int)): Patch size. Default: 4.
|
517 |
-
in_chans (int): Number of input image channels. Default: 3.
|
518 |
-
embed_dim (int): Number of linear projection output channels. Default: 96.
|
519 |
-
depths (tuple[int]): Depths of each Swin Transformer stage.
|
520 |
-
num_heads (tuple[int]): Number of attention head of each stage.
|
521 |
-
window_size (int): Window size. Default: 7.
|
522 |
-
mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4.
|
523 |
-
qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True
|
524 |
-
qk_scale (float): Override default qk scale of head_dim ** -0.5 if set.
|
525 |
-
drop_rate (float): Dropout rate.
|
526 |
-
attn_drop_rate (float): Attention dropout rate. Default: 0.
|
527 |
-
drop_path_rate (float): Stochastic depth rate. Default: 0.2.
|
528 |
-
norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm.
|
529 |
-
ape (bool): If True, add absolute position embedding to the patch embedding. Default: False.
|
530 |
-
patch_norm (bool): If True, add normalization after patch embedding. Default: True.
|
531 |
-
out_indices (Sequence[int]): Output from which stages.
|
532 |
-
frozen_stages (int): Stages to be frozen (stop grad and set eval mode).
|
533 |
-
-1 means not freezing any parameters.
|
534 |
-
use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False.
|
535 |
-
"""
|
536 |
-
|
537 |
-
def __init__(
|
538 |
-
self,
|
539 |
-
pretrain_img_size=224,
|
540 |
-
patch_size=4,
|
541 |
-
in_chans=3,
|
542 |
-
embed_dim=96,
|
543 |
-
depths=[2, 2, 6, 2],
|
544 |
-
num_heads=[3, 6, 12, 24],
|
545 |
-
window_size=7,
|
546 |
-
mlp_ratio=4.0,
|
547 |
-
qkv_bias=True,
|
548 |
-
qk_scale=None,
|
549 |
-
drop_rate=0.0,
|
550 |
-
attn_drop_rate=0.0,
|
551 |
-
drop_path_rate=0.2,
|
552 |
-
norm_layer=nn.LayerNorm,
|
553 |
-
ape=False,
|
554 |
-
patch_norm=True,
|
555 |
-
out_indices=(0, 1, 2, 3),
|
556 |
-
frozen_stages=-1,
|
557 |
-
use_checkpoint=False,
|
558 |
-
):
|
559 |
-
super().__init__()
|
560 |
-
|
561 |
-
self.pretrain_img_size = pretrain_img_size
|
562 |
-
self.num_layers = len(depths)
|
563 |
-
self.embed_dim = embed_dim
|
564 |
-
self.ape = ape
|
565 |
-
self.patch_norm = patch_norm
|
566 |
-
self.out_indices = out_indices
|
567 |
-
self.frozen_stages = frozen_stages
|
568 |
-
|
569 |
-
# split image into non-overlapping patches
|
570 |
-
self.patch_embed = PatchEmbed(
|
571 |
-
patch_size=patch_size,
|
572 |
-
in_chans=in_chans,
|
573 |
-
embed_dim=embed_dim,
|
574 |
-
norm_layer=norm_layer if self.patch_norm else None,
|
575 |
-
)
|
576 |
-
|
577 |
-
# absolute position embedding
|
578 |
-
if self.ape:
|
579 |
-
pretrain_img_size = to_2tuple(pretrain_img_size)
|
580 |
-
patch_size = to_2tuple(patch_size)
|
581 |
-
patches_resolution = [
|
582 |
-
pretrain_img_size[0] // patch_size[0],
|
583 |
-
pretrain_img_size[1] // patch_size[1],
|
584 |
-
]
|
585 |
-
|
586 |
-
self.absolute_pos_embed = nn.Parameter(
|
587 |
-
torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1])
|
588 |
-
)
|
589 |
-
trunc_normal_(self.absolute_pos_embed, std=0.02)
|
590 |
-
|
591 |
-
self.pos_drop = nn.Dropout(p=drop_rate)
|
592 |
-
|
593 |
-
# stochastic depth
|
594 |
-
dpr = [
|
595 |
-
x.item() for x in torch.linspace(0, drop_path_rate, sum(depths))
|
596 |
-
] # stochastic depth decay rule
|
597 |
-
|
598 |
-
# build layers
|
599 |
-
self.layers = nn.ModuleList()
|
600 |
-
for i_layer in range(self.num_layers):
|
601 |
-
layer = BasicLayer(
|
602 |
-
dim=int(embed_dim * 2 ** i_layer),
|
603 |
-
depth=depths[i_layer],
|
604 |
-
num_heads=num_heads[i_layer],
|
605 |
-
window_size=window_size,
|
606 |
-
mlp_ratio=mlp_ratio,
|
607 |
-
qkv_bias=qkv_bias,
|
608 |
-
qk_scale=qk_scale,
|
609 |
-
drop=drop_rate,
|
610 |
-
attn_drop=attn_drop_rate,
|
611 |
-
drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])],
|
612 |
-
norm_layer=norm_layer,
|
613 |
-
downsample=PatchMerging if (i_layer < self.num_layers - 1) else None,
|
614 |
-
use_checkpoint=use_checkpoint,
|
615 |
-
)
|
616 |
-
self.layers.append(layer)
|
617 |
-
|
618 |
-
num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)]
|
619 |
-
self.num_features = num_features
|
620 |
-
|
621 |
-
# add a norm layer for each output
|
622 |
-
for i_layer in out_indices:
|
623 |
-
layer = norm_layer(num_features[i_layer])
|
624 |
-
layer_name = f"norm{i_layer}"
|
625 |
-
self.add_module(layer_name, layer)
|
626 |
-
|
627 |
-
self._freeze_stages()
|
628 |
-
|
629 |
-
def _freeze_stages(self):
|
630 |
-
if self.frozen_stages >= 0:
|
631 |
-
self.patch_embed.eval()
|
632 |
-
for param in self.patch_embed.parameters():
|
633 |
-
param.requires_grad = False
|
634 |
-
|
635 |
-
if self.frozen_stages >= 1 and self.ape:
|
636 |
-
self.absolute_pos_embed.requires_grad = False
|
637 |
-
|
638 |
-
if self.frozen_stages >= 2:
|
639 |
-
self.pos_drop.eval()
|
640 |
-
for i in range(0, self.frozen_stages - 1):
|
641 |
-
m = self.layers[i]
|
642 |
-
m.eval()
|
643 |
-
for param in m.parameters():
|
644 |
-
param.requires_grad = False
|
645 |
-
|
646 |
-
def init_weights(self, pretrained=None):
|
647 |
-
"""Initialize the weights in backbone.
|
648 |
-
Args:
|
649 |
-
pretrained (str, optional): Path to pre-trained weights.
|
650 |
-
Defaults to None.
|
651 |
-
"""
|
652 |
-
|
653 |
-
def _init_weights(m):
|
654 |
-
if isinstance(m, nn.Linear):
|
655 |
-
trunc_normal_(m.weight, std=0.02)
|
656 |
-
if isinstance(m, nn.Linear) and m.bias is not None:
|
657 |
-
nn.init.constant_(m.bias, 0)
|
658 |
-
elif isinstance(m, nn.LayerNorm):
|
659 |
-
nn.init.constant_(m.bias, 0)
|
660 |
-
nn.init.constant_(m.weight, 1.0)
|
661 |
-
|
662 |
-
|
663 |
-
def load_weights(self, pretrained_dict=None, pretrained_layers=[], verbose=True):
|
664 |
-
model_dict = self.state_dict()
|
665 |
-
pretrained_dict = {
|
666 |
-
k: v for k, v in pretrained_dict.items()
|
667 |
-
if k in model_dict.keys()
|
668 |
-
}
|
669 |
-
need_init_state_dict = {}
|
670 |
-
for k, v in pretrained_dict.items():
|
671 |
-
need_init = (
|
672 |
-
(
|
673 |
-
k.split('.')[0] in pretrained_layers
|
674 |
-
or pretrained_layers[0] == '*'
|
675 |
-
)
|
676 |
-
and 'relative_position_index' not in k
|
677 |
-
and 'attn_mask' not in k
|
678 |
-
)
|
679 |
-
|
680 |
-
if need_init:
|
681 |
-
# if verbose:
|
682 |
-
# logger.info(f'=> init {k} from {pretrained}')
|
683 |
-
|
684 |
-
if 'relative_position_bias_table' in k and v.size() != model_dict[k].size():
|
685 |
-
relative_position_bias_table_pretrained = v
|
686 |
-
relative_position_bias_table_current = model_dict[k]
|
687 |
-
L1, nH1 = relative_position_bias_table_pretrained.size()
|
688 |
-
L2, nH2 = relative_position_bias_table_current.size()
|
689 |
-
if nH1 != nH2:
|
690 |
-
logger.info(f"Error in loading {k}, passing")
|
691 |
-
else:
|
692 |
-
if L1 != L2:
|
693 |
-
logger.info(
|
694 |
-
'=> load_pretrained: resized variant: {} to {}'
|
695 |
-
.format((L1, nH1), (L2, nH2))
|
696 |
-
)
|
697 |
-
S1 = int(L1 ** 0.5)
|
698 |
-
S2 = int(L2 ** 0.5)
|
699 |
-
relative_position_bias_table_pretrained_resized = torch.nn.functional.interpolate(
|
700 |
-
relative_position_bias_table_pretrained.permute(1, 0).view(1, nH1, S1, S1),
|
701 |
-
size=(S2, S2),
|
702 |
-
mode='bicubic')
|
703 |
-
v = relative_position_bias_table_pretrained_resized.view(nH2, L2).permute(1, 0)
|
704 |
-
|
705 |
-
if 'absolute_pos_embed' in k and v.size() != model_dict[k].size():
|
706 |
-
absolute_pos_embed_pretrained = v
|
707 |
-
absolute_pos_embed_current = model_dict[k]
|
708 |
-
_, L1, C1 = absolute_pos_embed_pretrained.size()
|
709 |
-
_, L2, C2 = absolute_pos_embed_current.size()
|
710 |
-
if C1 != C1:
|
711 |
-
logger.info(f"Error in loading {k}, passing")
|
712 |
-
else:
|
713 |
-
if L1 != L2:
|
714 |
-
logger.info(
|
715 |
-
'=> load_pretrained: resized variant: {} to {}'
|
716 |
-
.format((1, L1, C1), (1, L2, C2))
|
717 |
-
)
|
718 |
-
S1 = int(L1 ** 0.5)
|
719 |
-
S2 = int(L2 ** 0.5)
|
720 |
-
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.reshape(-1, S1, S1, C1)
|
721 |
-
absolute_pos_embed_pretrained = absolute_pos_embed_pretrained.permute(0, 3, 1, 2)
|
722 |
-
absolute_pos_embed_pretrained_resized = torch.nn.functional.interpolate(
|
723 |
-
absolute_pos_embed_pretrained, size=(S2, S2), mode='bicubic')
|
724 |
-
v = absolute_pos_embed_pretrained_resized.permute(0, 2, 3, 1).flatten(1, 2)
|
725 |
-
|
726 |
-
need_init_state_dict[k] = v
|
727 |
-
self.load_state_dict(need_init_state_dict, strict=False)
|
728 |
-
|
729 |
-
|
730 |
-
def forward(self, x):
|
731 |
-
"""Forward function."""
|
732 |
-
x = self.patch_embed(x)
|
733 |
-
|
734 |
-
Wh, Ww = x.size(2), x.size(3)
|
735 |
-
if self.ape:
|
736 |
-
# interpolate the position embedding to the corresponding size
|
737 |
-
absolute_pos_embed = F.interpolate(
|
738 |
-
self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic"
|
739 |
-
)
|
740 |
-
x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C
|
741 |
-
else:
|
742 |
-
x = x.flatten(2).transpose(1, 2)
|
743 |
-
x = self.pos_drop(x)
|
744 |
-
|
745 |
-
outs = {}
|
746 |
-
for i in range(self.num_layers):
|
747 |
-
layer = self.layers[i]
|
748 |
-
x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww)
|
749 |
-
|
750 |
-
if i in self.out_indices:
|
751 |
-
norm_layer = getattr(self, f"norm{i}")
|
752 |
-
x_out = norm_layer(x_out)
|
753 |
-
|
754 |
-
out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
755 |
-
outs["res{}".format(i + 2)] = out
|
756 |
-
|
757 |
-
if len(self.out_indices) == 0:
|
758 |
-
outs["res5"] = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous()
|
759 |
-
|
760 |
-
|
761 |
-
return outs
|
762 |
-
|
763 |
-
def train(self, mode=True):
|
764 |
-
"""Convert the model into training mode while keep layers freezed."""
|
765 |
-
super(SwinTransformer, self).train(mode)
|
766 |
-
self._freeze_stages()
|
767 |
-
|
768 |
-
|
769 |
-
class D2SwinTransformer(SwinTransformer, Backbone):
|
770 |
-
def __init__(self, cfg, pretrain_img_size, patch_size, in_chans, embed_dim,
|
771 |
-
depths, num_heads, window_size, mlp_ratio, qkv_bias, qk_scale,
|
772 |
-
drop_rate, attn_drop_rate, drop_path_rate, norm_layer, ape,
|
773 |
-
patch_norm, out_indices, use_checkpoint):
|
774 |
-
super().__init__(
|
775 |
-
pretrain_img_size,
|
776 |
-
patch_size,
|
777 |
-
in_chans,
|
778 |
-
embed_dim,
|
779 |
-
depths,
|
780 |
-
num_heads,
|
781 |
-
window_size,
|
782 |
-
mlp_ratio,
|
783 |
-
qkv_bias,
|
784 |
-
qk_scale,
|
785 |
-
drop_rate,
|
786 |
-
attn_drop_rate,
|
787 |
-
drop_path_rate,
|
788 |
-
norm_layer,
|
789 |
-
ape,
|
790 |
-
patch_norm,
|
791 |
-
out_indices,
|
792 |
-
use_checkpoint=use_checkpoint,
|
793 |
-
)
|
794 |
-
|
795 |
-
self._out_features = cfg['OUT_FEATURES']
|
796 |
-
|
797 |
-
self._out_feature_strides = {
|
798 |
-
"res2": 4,
|
799 |
-
"res3": 8,
|
800 |
-
"res4": 16,
|
801 |
-
"res5": 32,
|
802 |
-
}
|
803 |
-
self._out_feature_channels = {
|
804 |
-
"res2": self.num_features[0],
|
805 |
-
"res3": self.num_features[1],
|
806 |
-
"res4": self.num_features[2],
|
807 |
-
"res5": self.num_features[3],
|
808 |
-
}
|
809 |
-
|
810 |
-
def forward(self, x):
|
811 |
-
"""
|
812 |
-
Args:
|
813 |
-
x: Tensor of shape (N,C,H,W). H, W must be a multiple of ``self.size_divisibility``.
|
814 |
-
Returns:
|
815 |
-
dict[str->Tensor]: names and the corresponding features
|
816 |
-
"""
|
817 |
-
assert (
|
818 |
-
x.dim() == 4
|
819 |
-
), f"SwinTransformer takes an input of shape (N, C, H, W). Got {x.shape} instead!"
|
820 |
-
outputs = {}
|
821 |
-
y = super().forward(x)
|
822 |
-
for k in y.keys():
|
823 |
-
if k in self._out_features:
|
824 |
-
outputs[k] = y[k]
|
825 |
-
return outputs
|
826 |
-
|
827 |
-
def output_shape(self):
|
828 |
-
feature_names = list(set(self._out_feature_strides.keys()) & set(self._out_features))
|
829 |
-
return {
|
830 |
-
name: ShapeSpec(
|
831 |
-
channels=self._out_feature_channels[name], stride=self._out_feature_strides[name]
|
832 |
-
)
|
833 |
-
for name in feature_names
|
834 |
-
}
|
835 |
-
|
836 |
-
@property
|
837 |
-
def size_divisibility(self):
|
838 |
-
return 32
|
839 |
-
|
840 |
-
|
841 |
-
@register_backbone
|
842 |
-
def get_swin_backbone(cfg):
|
843 |
-
swin_cfg = cfg['MODEL']['BACKBONE']['SWIN']
|
844 |
-
|
845 |
-
pretrain_img_size = swin_cfg['PRETRAIN_IMG_SIZE']
|
846 |
-
patch_size = swin_cfg['PATCH_SIZE']
|
847 |
-
in_chans = 3
|
848 |
-
embed_dim = swin_cfg['EMBED_DIM']
|
849 |
-
depths = swin_cfg['DEPTHS']
|
850 |
-
num_heads = swin_cfg['NUM_HEADS']
|
851 |
-
window_size = swin_cfg['WINDOW_SIZE']
|
852 |
-
mlp_ratio = swin_cfg['MLP_RATIO']
|
853 |
-
qkv_bias = swin_cfg['QKV_BIAS']
|
854 |
-
qk_scale = swin_cfg['QK_SCALE']
|
855 |
-
drop_rate = swin_cfg['DROP_RATE']
|
856 |
-
attn_drop_rate = swin_cfg['ATTN_DROP_RATE']
|
857 |
-
drop_path_rate = swin_cfg['DROP_PATH_RATE']
|
858 |
-
norm_layer = nn.LayerNorm
|
859 |
-
ape = swin_cfg['APE']
|
860 |
-
patch_norm = swin_cfg['PATCH_NORM']
|
861 |
-
use_checkpoint = swin_cfg['USE_CHECKPOINT']
|
862 |
-
out_indices = swin_cfg.get('OUT_INDICES', [0,1,2,3])
|
863 |
-
|
864 |
-
swin = D2SwinTransformer(
|
865 |
-
swin_cfg,
|
866 |
-
pretrain_img_size,
|
867 |
-
patch_size,
|
868 |
-
in_chans,
|
869 |
-
embed_dim,
|
870 |
-
depths,
|
871 |
-
num_heads,
|
872 |
-
window_size,
|
873 |
-
mlp_ratio,
|
874 |
-
qkv_bias,
|
875 |
-
qk_scale,
|
876 |
-
drop_rate,
|
877 |
-
attn_drop_rate,
|
878 |
-
drop_path_rate,
|
879 |
-
norm_layer,
|
880 |
-
ape,
|
881 |
-
patch_norm,
|
882 |
-
out_indices,
|
883 |
-
use_checkpoint=use_checkpoint,
|
884 |
-
)
|
885 |
-
|
886 |
-
if cfg['MODEL']['BACKBONE']['LOAD_PRETRAINED'] is True:
|
887 |
-
filename = cfg['MODEL']['BACKBONE']['PRETRAINED']
|
888 |
-
with PathManager.open(filename, "rb") as f:
|
889 |
-
ckpt = torch.load(f, map_location=cfg['device'])['model']
|
890 |
-
swin.load_weights(ckpt, swin_cfg.get('PRETRAINED_LAYERS', ['*']), cfg['VERBOSE'])
|
891 |
-
|
892 |
-
return swin
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .build import build_xdecoder_head
|
|
|
|
xdecoder/body/build.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
from .registry import model_entrypoints
|
2 |
-
from .registry import is_model
|
3 |
-
|
4 |
-
from .xdecoder_head import *
|
5 |
-
|
6 |
-
|
7 |
-
def build_xdecoder_head(config, *args, **kwargs):
|
8 |
-
model_name = config['MODEL']['HEAD']
|
9 |
-
if not is_model(model_name):
|
10 |
-
raise ValueError(f'Unkown model: {model_name}')
|
11 |
-
|
12 |
-
body = model_entrypoints(model_name)(config, *args, **kwargs)
|
13 |
-
return body
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/decoder/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .build import build_decoder
|
|
|
|
xdecoder/body/decoder/build.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from .registry import model_entrypoints
|
2 |
-
from .registry import is_model
|
3 |
-
|
4 |
-
from .xdecoder import *
|
5 |
-
|
6 |
-
def build_decoder(config, *args, **kwargs):
|
7 |
-
model_name = config['MODEL']['DECODER']['NAME']
|
8 |
-
|
9 |
-
if not is_model(model_name):
|
10 |
-
raise ValueError(f'Unkown model: {model_name}')
|
11 |
-
|
12 |
-
return model_entrypoints(model_name)(config, *args, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/decoder/registry.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_model_entrypoints = {}
|
2 |
-
|
3 |
-
def register_decoder(fn):
|
4 |
-
module_name_split = fn.__module__.split('.')
|
5 |
-
model_name = module_name_split[-1]
|
6 |
-
_model_entrypoints[model_name] = fn
|
7 |
-
return fn
|
8 |
-
|
9 |
-
def model_entrypoints(model_name):
|
10 |
-
return _model_entrypoints[model_name]
|
11 |
-
|
12 |
-
def is_model(model_name):
|
13 |
-
return model_name in _model_entrypoints
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/decoder/tmp.py
DELETED
@@ -1,664 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
|
3 |
-
import logging
|
4 |
-
from typing import Optional
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from torch import nn, Tensor
|
8 |
-
from torch.nn import functional as F
|
9 |
-
|
10 |
-
from timm.models.layers import trunc_normal_
|
11 |
-
from detectron2.layers import Conv2d
|
12 |
-
import fvcore.nn.weight_init as weight_init
|
13 |
-
|
14 |
-
from .registry import register_decoder
|
15 |
-
from ...utils import configurable
|
16 |
-
from ...modules import PositionEmbeddingSine
|
17 |
-
|
18 |
-
from image2html.visualizer import VL
|
19 |
-
|
20 |
-
|
21 |
-
class SelfAttentionLayer(nn.Module):
|
22 |
-
|
23 |
-
def __init__(self, d_model, nhead, dropout=0.0,
|
24 |
-
activation="relu", normalize_before=False):
|
25 |
-
super().__init__()
|
26 |
-
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
27 |
-
|
28 |
-
self.norm = nn.LayerNorm(d_model)
|
29 |
-
self.dropout = nn.Dropout(dropout)
|
30 |
-
|
31 |
-
self.activation = _get_activation_fn(activation)
|
32 |
-
self.normalize_before = normalize_before
|
33 |
-
|
34 |
-
self._reset_parameters()
|
35 |
-
|
36 |
-
def _reset_parameters(self):
|
37 |
-
for p in self.parameters():
|
38 |
-
if p.dim() > 1:
|
39 |
-
nn.init.xavier_uniform_(p)
|
40 |
-
|
41 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
42 |
-
return tensor if pos is None else tensor + pos
|
43 |
-
|
44 |
-
def forward_post(self, tgt,
|
45 |
-
tgt_mask: Optional[Tensor] = None,
|
46 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
47 |
-
query_pos: Optional[Tensor] = None):
|
48 |
-
q = k = self.with_pos_embed(tgt, query_pos)
|
49 |
-
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
|
50 |
-
key_padding_mask=tgt_key_padding_mask)[0]
|
51 |
-
tgt = tgt + self.dropout(tgt2)
|
52 |
-
tgt = self.norm(tgt)
|
53 |
-
|
54 |
-
return tgt
|
55 |
-
|
56 |
-
def forward_pre(self, tgt,
|
57 |
-
tgt_mask: Optional[Tensor] = None,
|
58 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
59 |
-
query_pos: Optional[Tensor] = None):
|
60 |
-
tgt2 = self.norm(tgt)
|
61 |
-
q = k = self.with_pos_embed(tgt2, query_pos)
|
62 |
-
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
|
63 |
-
key_padding_mask=tgt_key_padding_mask)[0]
|
64 |
-
tgt = tgt + self.dropout(tgt2)
|
65 |
-
|
66 |
-
return tgt
|
67 |
-
|
68 |
-
def forward(self, tgt,
|
69 |
-
tgt_mask: Optional[Tensor] = None,
|
70 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
71 |
-
query_pos: Optional[Tensor] = None):
|
72 |
-
if self.normalize_before:
|
73 |
-
return self.forward_pre(tgt, tgt_mask,
|
74 |
-
tgt_key_padding_mask, query_pos)
|
75 |
-
return self.forward_post(tgt, tgt_mask,
|
76 |
-
tgt_key_padding_mask, query_pos)
|
77 |
-
|
78 |
-
|
79 |
-
class CrossAttentionLayer(nn.Module):
|
80 |
-
|
81 |
-
def __init__(self, d_model, nhead, dropout=0.0,
|
82 |
-
activation="relu", normalize_before=False):
|
83 |
-
super().__init__()
|
84 |
-
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
85 |
-
|
86 |
-
self.norm = nn.LayerNorm(d_model)
|
87 |
-
self.dropout = nn.Dropout(dropout)
|
88 |
-
|
89 |
-
self.activation = _get_activation_fn(activation)
|
90 |
-
self.normalize_before = normalize_before
|
91 |
-
|
92 |
-
self._reset_parameters()
|
93 |
-
|
94 |
-
def _reset_parameters(self):
|
95 |
-
for p in self.parameters():
|
96 |
-
if p.dim() > 1:
|
97 |
-
nn.init.xavier_uniform_(p)
|
98 |
-
|
99 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
100 |
-
return tensor if pos is None else tensor + pos
|
101 |
-
|
102 |
-
def forward_post(self, tgt, memory,
|
103 |
-
memory_mask: Optional[Tensor] = None,
|
104 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
105 |
-
pos: Optional[Tensor] = None,
|
106 |
-
query_pos: Optional[Tensor] = None):
|
107 |
-
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
|
108 |
-
key=self.with_pos_embed(memory, pos),
|
109 |
-
value=memory, attn_mask=memory_mask,
|
110 |
-
key_padding_mask=memory_key_padding_mask)
|
111 |
-
tgt = tgt + self.dropout(tgt2)
|
112 |
-
tgt = self.norm(tgt)
|
113 |
-
return tgt, avg_attn
|
114 |
-
|
115 |
-
def forward_pre(self, tgt, memory,
|
116 |
-
memory_mask: Optional[Tensor] = None,
|
117 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
118 |
-
pos: Optional[Tensor] = None,
|
119 |
-
query_pos: Optional[Tensor] = None):
|
120 |
-
tgt2 = self.norm(tgt)
|
121 |
-
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
|
122 |
-
key=self.with_pos_embed(memory, pos),
|
123 |
-
value=memory, attn_mask=memory_mask,
|
124 |
-
key_padding_mask=memory_key_padding_mask)
|
125 |
-
tgt = tgt + self.dropout(tgt2)
|
126 |
-
|
127 |
-
return tgt, avg_attn
|
128 |
-
|
129 |
-
def forward(self, tgt, memory,
|
130 |
-
memory_mask: Optional[Tensor] = None,
|
131 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
132 |
-
pos: Optional[Tensor] = None,
|
133 |
-
query_pos: Optional[Tensor] = None):
|
134 |
-
if self.normalize_before:
|
135 |
-
return self.forward_pre(tgt, memory, memory_mask,
|
136 |
-
memory_key_padding_mask, pos, query_pos)
|
137 |
-
return self.forward_post(tgt, memory, memory_mask,
|
138 |
-
memory_key_padding_mask, pos, query_pos)
|
139 |
-
|
140 |
-
|
141 |
-
class FFNLayer(nn.Module):
|
142 |
-
|
143 |
-
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
|
144 |
-
activation="relu", normalize_before=False):
|
145 |
-
super().__init__()
|
146 |
-
# Implementation of Feedforward model
|
147 |
-
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
148 |
-
self.dropout = nn.Dropout(dropout)
|
149 |
-
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
150 |
-
|
151 |
-
self.norm = nn.LayerNorm(d_model)
|
152 |
-
|
153 |
-
self.activation = _get_activation_fn(activation)
|
154 |
-
self.normalize_before = normalize_before
|
155 |
-
|
156 |
-
self._reset_parameters()
|
157 |
-
|
158 |
-
def _reset_parameters(self):
|
159 |
-
for p in self.parameters():
|
160 |
-
if p.dim() > 1:
|
161 |
-
nn.init.xavier_uniform_(p)
|
162 |
-
|
163 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
164 |
-
return tensor if pos is None else tensor + pos
|
165 |
-
|
166 |
-
def forward_post(self, tgt):
|
167 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
|
168 |
-
tgt = tgt + self.dropout(tgt2)
|
169 |
-
tgt = self.norm(tgt)
|
170 |
-
return tgt
|
171 |
-
|
172 |
-
def forward_pre(self, tgt):
|
173 |
-
tgt2 = self.norm(tgt)
|
174 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
175 |
-
tgt = tgt + self.dropout(tgt2)
|
176 |
-
return tgt
|
177 |
-
|
178 |
-
def forward(self, tgt):
|
179 |
-
if self.normalize_before:
|
180 |
-
return self.forward_pre(tgt)
|
181 |
-
return self.forward_post(tgt)
|
182 |
-
|
183 |
-
|
184 |
-
def _get_activation_fn(activation):
|
185 |
-
"""Return an activation function given a string"""
|
186 |
-
if activation == "relu":
|
187 |
-
return F.relu
|
188 |
-
if activation == "gelu":
|
189 |
-
return F.gelu
|
190 |
-
if activation == "glu":
|
191 |
-
return F.glu
|
192 |
-
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
193 |
-
|
194 |
-
|
195 |
-
class MLP(nn.Module):
|
196 |
-
""" Very simple multi-layer perceptron (also called FFN)"""
|
197 |
-
|
198 |
-
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
|
199 |
-
super().__init__()
|
200 |
-
self.num_layers = num_layers
|
201 |
-
h = [hidden_dim] * (num_layers - 1)
|
202 |
-
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
|
203 |
-
|
204 |
-
def forward(self, x):
|
205 |
-
for i, layer in enumerate(self.layers):
|
206 |
-
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
|
207 |
-
return x
|
208 |
-
|
209 |
-
|
210 |
-
class MultiScaleMaskedTransformerDecoder(nn.Module):
|
211 |
-
|
212 |
-
_version = 2
|
213 |
-
|
214 |
-
@configurable
|
215 |
-
def __init__(
|
216 |
-
self,
|
217 |
-
lang_encoder: nn.Module,
|
218 |
-
in_channels,
|
219 |
-
mask_classification=True,
|
220 |
-
*,
|
221 |
-
hidden_dim: int,
|
222 |
-
dim_proj: int,
|
223 |
-
num_queries: int,
|
224 |
-
contxt_len: int,
|
225 |
-
nheads: int,
|
226 |
-
dim_feedforward: int,
|
227 |
-
dec_layers: int,
|
228 |
-
pre_norm: bool,
|
229 |
-
mask_dim: int,
|
230 |
-
task_switch: dict,
|
231 |
-
captioning_step: int,
|
232 |
-
enforce_input_project: bool,
|
233 |
-
):
|
234 |
-
"""
|
235 |
-
NOTE: this interface is experimental.
|
236 |
-
Args:
|
237 |
-
in_channels: channels of the input features
|
238 |
-
mask_classification: whether to add mask classifier or not
|
239 |
-
num_classes: number of classes
|
240 |
-
hidden_dim: Transformer feature dimension
|
241 |
-
num_queries: number of queries
|
242 |
-
nheads: number of heads
|
243 |
-
dim_feedforward: feature dimension in feedforward network
|
244 |
-
enc_layers: number of Transformer encoder layers
|
245 |
-
dec_layers: number of Transformer decoder layers
|
246 |
-
pre_norm: whether to use pre-LayerNorm or not
|
247 |
-
mask_dim: mask feature dimension
|
248 |
-
enforce_input_project: add input project 1x1 conv even if input
|
249 |
-
channels and hidden dim is identical
|
250 |
-
"""
|
251 |
-
super().__init__()
|
252 |
-
assert mask_classification, "Only support mask classification model"
|
253 |
-
self.mask_classification = mask_classification
|
254 |
-
|
255 |
-
# positional encoding
|
256 |
-
N_steps = hidden_dim // 2
|
257 |
-
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
|
258 |
-
|
259 |
-
# define Transformer decoder here
|
260 |
-
self.num_heads = nheads
|
261 |
-
self.num_layers = dec_layers
|
262 |
-
self.contxt_len = contxt_len
|
263 |
-
self.transformer_self_attention_layers = nn.ModuleList()
|
264 |
-
self.transformer_cross_attention_layers = nn.ModuleList()
|
265 |
-
self.transformer_ffn_layers = nn.ModuleList()
|
266 |
-
|
267 |
-
for _ in range(self.num_layers):
|
268 |
-
self.transformer_self_attention_layers.append(
|
269 |
-
SelfAttentionLayer(
|
270 |
-
d_model=hidden_dim,
|
271 |
-
nhead=nheads,
|
272 |
-
dropout=0.0,
|
273 |
-
normalize_before=pre_norm,
|
274 |
-
)
|
275 |
-
)
|
276 |
-
|
277 |
-
self.transformer_cross_attention_layers.append(
|
278 |
-
CrossAttentionLayer(
|
279 |
-
d_model=hidden_dim,
|
280 |
-
nhead=nheads,
|
281 |
-
dropout=0.0,
|
282 |
-
normalize_before=pre_norm,
|
283 |
-
)
|
284 |
-
)
|
285 |
-
|
286 |
-
self.transformer_ffn_layers.append(
|
287 |
-
FFNLayer(
|
288 |
-
d_model=hidden_dim,
|
289 |
-
dim_feedforward=dim_feedforward,
|
290 |
-
dropout=0.0,
|
291 |
-
normalize_before=pre_norm,
|
292 |
-
)
|
293 |
-
)
|
294 |
-
|
295 |
-
self.decoder_norm = nn.LayerNorm(hidden_dim)
|
296 |
-
|
297 |
-
self.num_queries = num_queries
|
298 |
-
# learnable query features
|
299 |
-
self.query_feat = nn.Embedding(num_queries, hidden_dim)
|
300 |
-
# learnable query p.e.
|
301 |
-
self.query_embed = nn.Embedding(num_queries, hidden_dim)
|
302 |
-
|
303 |
-
# level embedding (we always use 3 scales)
|
304 |
-
self.num_feature_levels = 3
|
305 |
-
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
|
306 |
-
self.input_proj = nn.ModuleList()
|
307 |
-
|
308 |
-
for _ in range(self.num_feature_levels):
|
309 |
-
if in_channels != hidden_dim or enforce_input_project:
|
310 |
-
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
|
311 |
-
weight_init.c2_xavier_fill(self.input_proj[-1])
|
312 |
-
else:
|
313 |
-
self.input_proj.append(nn.Sequential())
|
314 |
-
|
315 |
-
self.task_switch = task_switch
|
316 |
-
|
317 |
-
# output FFNs
|
318 |
-
self.lang_encoder = lang_encoder
|
319 |
-
if self.task_switch['mask']:
|
320 |
-
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
|
321 |
-
|
322 |
-
self.class_embed = nn.Parameter(torch.empty(hidden_dim, dim_proj))
|
323 |
-
trunc_normal_(self.class_embed, std=.02)
|
324 |
-
|
325 |
-
if task_switch['bbox']:
|
326 |
-
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
|
327 |
-
|
328 |
-
# Caption Project and query
|
329 |
-
if task_switch['captioning']:
|
330 |
-
self.caping_embed = nn.Parameter(torch.empty(hidden_dim, dim_proj))
|
331 |
-
trunc_normal_(self.caping_embed, std=.02)
|
332 |
-
self.query_feat_caping = nn.Embedding(contxt_len, hidden_dim)
|
333 |
-
self.captioning_step = captioning_step
|
334 |
-
|
335 |
-
# register self_attn_mask to avoid information leakage, it includes interaction between object query, class query and caping query
|
336 |
-
self_attn_mask = torch.zeros((1, num_queries + contxt_len, num_queries + contxt_len)).bool()
|
337 |
-
self_attn_mask[:, :num_queries, num_queries:] = True # object+class query does not attend with caption query.
|
338 |
-
self_attn_mask[:, num_queries:, num_queries:] = torch.triu(torch.ones((1, contxt_len, contxt_len)), diagonal=1).bool() # caption query only attend with previous token.
|
339 |
-
self_attn_mask[:, :num_queries-1, num_queries-1:num_queries] = True # object query does not attend with class query.
|
340 |
-
self_attn_mask[:, num_queries-1:num_queries, :num_queries-1] = True # class query does not attend with object query.
|
341 |
-
self.register_buffer("self_attn_mask", self_attn_mask)
|
342 |
-
|
343 |
-
|
344 |
-
@classmethod
|
345 |
-
def from_config(cls, cfg, in_channels, lang_encoder, mask_classification, extra):
|
346 |
-
ret = {}
|
347 |
-
|
348 |
-
ret["lang_encoder"] = lang_encoder
|
349 |
-
ret["in_channels"] = in_channels
|
350 |
-
ret["mask_classification"] = mask_classification
|
351 |
-
|
352 |
-
enc_cfg = cfg['MODEL']['ENCODER']
|
353 |
-
dec_cfg = cfg['MODEL']['DECODER']
|
354 |
-
|
355 |
-
ret["hidden_dim"] = dec_cfg['HIDDEN_DIM']
|
356 |
-
ret["dim_proj"] = cfg['MODEL']['DIM_PROJ']
|
357 |
-
ret["num_queries"] = dec_cfg['NUM_OBJECT_QUERIES']
|
358 |
-
ret["contxt_len"] = cfg['MODEL']['TEXT']['CONTEXT_LENGTH']
|
359 |
-
|
360 |
-
# Transformer parameters:
|
361 |
-
ret["nheads"] = dec_cfg['NHEADS']
|
362 |
-
ret["dim_feedforward"] = dec_cfg['DIM_FEEDFORWARD']
|
363 |
-
|
364 |
-
# NOTE: because we add learnable query features which requires supervision,
|
365 |
-
# we add minus 1 to decoder layers to be consistent with our loss
|
366 |
-
# implementation: that is, number of auxiliary losses is always
|
367 |
-
# equal to number of decoder layers. With learnable query features, the number of
|
368 |
-
# auxiliary losses equals number of decoders plus 1.
|
369 |
-
assert dec_cfg['DEC_LAYERS'] >= 1
|
370 |
-
ret["dec_layers"] = dec_cfg['DEC_LAYERS'] - 1
|
371 |
-
ret["pre_norm"] = dec_cfg['PRE_NORM']
|
372 |
-
ret["enforce_input_project"] = dec_cfg['ENFORCE_INPUT_PROJ']
|
373 |
-
ret["mask_dim"] = enc_cfg['MASK_DIM']
|
374 |
-
|
375 |
-
ret["task_switch"] = extra['task_switch']
|
376 |
-
ret["captioning_step"] = dec_cfg['CAPTIONING'].get('STEP', 50)
|
377 |
-
|
378 |
-
return ret
|
379 |
-
|
380 |
-
def forward(self, x, mask_features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
|
381 |
-
if task == 'captioning_infer':
|
382 |
-
return self.forward_captioning(x, mask_features, mask=mask, target_queries=target_queries, target_vlp=target_vlp, task=task, extra=extra)
|
383 |
-
# x is a list of multi-scale feature
|
384 |
-
assert len(x) == self.num_feature_levels
|
385 |
-
src = []
|
386 |
-
pos = []
|
387 |
-
size_list = []
|
388 |
-
|
389 |
-
# disable mask, it does not affect performance
|
390 |
-
del mask
|
391 |
-
for i in range(self.num_feature_levels):
|
392 |
-
size_list.append(x[i].shape[-2:])
|
393 |
-
pos.append(self.pe_layer(x[i], None).flatten(2))
|
394 |
-
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
|
395 |
-
|
396 |
-
# flatten NxCxHxW to HWxNxC
|
397 |
-
pos[-1] = pos[-1].permute(2, 0, 1)
|
398 |
-
src[-1] = src[-1].permute(2, 0, 1)
|
399 |
-
|
400 |
-
_, bs, _ = src[0].shape
|
401 |
-
|
402 |
-
# QxNxC
|
403 |
-
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
|
404 |
-
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
|
405 |
-
|
406 |
-
predictions_class = []
|
407 |
-
predictions_mask = []
|
408 |
-
predictions_bbox = []
|
409 |
-
predictions_caption = []
|
410 |
-
predictions_captioning = []
|
411 |
-
|
412 |
-
self_tgt_mask = None
|
413 |
-
if self.training and task == 'vlp' and self.task_switch['captioning']:
|
414 |
-
output = torch.cat((output, self.query_feat_caping.weight.unsqueeze(1).repeat(1, bs, 1)), dim=0) # concat object query, class token and caption token.
|
415 |
-
caping_lang_embed = torch.cat([caption['caption_tokens'] for caption in target_vlp], dim=0).transpose(0, 1) # language output
|
416 |
-
query_embed = torch.cat((query_embed, caping_lang_embed), dim=0) # may not add at the beginning.
|
417 |
-
self_tgt_mask = self.self_attn_mask.repeat(output.shape[1]*self.num_heads, 1, 1)
|
418 |
-
elif (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
419 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
420 |
-
self_tgt_mask = self.self_attn_mask[:,:self.num_queries,:self.num_queries].repeat(output.shape[1]*self.num_heads, 1, 1)
|
421 |
-
grounding_tokens = extra['grounding_tokens']
|
422 |
-
_grounding_tokens = grounding_tokens.detach().clone()
|
423 |
-
# initialize with negative attention at the beginning.
|
424 |
-
pad_tgt_mask = torch.ones((1, self.num_queries + (self.num_queries-1) + len(grounding_tokens), self.num_queries + (self.num_queries-1) + len(grounding_tokens)), device=self_tgt_mask.device).bool().repeat(output.shape[1]*self.num_heads, 1, 1)
|
425 |
-
pad_tgt_mask[:,:self.num_queries,:self.num_queries] = self_tgt_mask
|
426 |
-
pad_tgt_mask[:,self.num_queries:,self.num_queries:] = False # grounding tokens could attend with eatch other
|
427 |
-
self_tgt_mask = pad_tgt_mask
|
428 |
-
output = torch.cat((output, output[:-1]), dim=0)
|
429 |
-
query_embed = torch.cat((query_embed, query_embed[:-1]), dim=0) # also pad language embdding to fix embedding
|
430 |
-
else:
|
431 |
-
self_tgt_mask = self.self_attn_mask[:,:self.num_queries,:self.num_queries].repeat(output.shape[1]*self.num_heads, 1, 1)
|
432 |
-
|
433 |
-
# prediction heads on learnable query features
|
434 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0], task=task)
|
435 |
-
attn_mask = results["attn_mask"]
|
436 |
-
predictions_class.append(results["outputs_class"])
|
437 |
-
predictions_mask.append(results["outputs_mask"])
|
438 |
-
predictions_bbox.append(results["outputs_bbox"])
|
439 |
-
predictions_caption.append(results["outputs_caption"])
|
440 |
-
predictions_captioning.append(results["outputs_captionting"])
|
441 |
-
|
442 |
-
for i in range(self.num_layers):
|
443 |
-
level_index = i % self.num_feature_levels
|
444 |
-
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
|
445 |
-
|
446 |
-
if self.training and task == 'vlp' and self.task_switch['captioning']:
|
447 |
-
attn_mask = torch.cat((attn_mask, torch.zeros_like(attn_mask[:, :self.contxt_len, :])), dim=1)
|
448 |
-
# attention: cross-attention first
|
449 |
-
output, avg_attn = self.transformer_cross_attention_layers[i](
|
450 |
-
output, src[level_index],
|
451 |
-
memory_mask=attn_mask,
|
452 |
-
memory_key_padding_mask=None, # here we do not apply masking on padded region
|
453 |
-
pos=pos[level_index], query_pos=query_embed
|
454 |
-
)
|
455 |
-
|
456 |
-
if (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
457 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
458 |
-
output = torch.cat((output, _grounding_tokens), dim=0)
|
459 |
-
query_embed = torch.cat((query_embed, grounding_tokens), dim=0)
|
460 |
-
|
461 |
-
output = self.transformer_self_attention_layers[i](
|
462 |
-
output, tgt_mask=self_tgt_mask,
|
463 |
-
tgt_key_padding_mask=None,
|
464 |
-
query_pos=query_embed
|
465 |
-
)
|
466 |
-
|
467 |
-
# FFN
|
468 |
-
output = self.transformer_ffn_layers[i](
|
469 |
-
output
|
470 |
-
)
|
471 |
-
|
472 |
-
if ((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding'] \
|
473 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
474 |
-
_grounding_tokens = output[-len(_grounding_tokens):]
|
475 |
-
output = output[:-len(_grounding_tokens)]
|
476 |
-
query_embed = query_embed[:-len(_grounding_tokens)]
|
477 |
-
|
478 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], layer_id=i, task=task)
|
479 |
-
attn_mask = results["attn_mask"]
|
480 |
-
predictions_class.append(results["outputs_class"])
|
481 |
-
predictions_mask.append(results["outputs_mask"])
|
482 |
-
predictions_bbox.append(results["outputs_bbox"])
|
483 |
-
predictions_caption.append(results["outputs_caption"])
|
484 |
-
predictions_captioning.append(results["outputs_captionting"])
|
485 |
-
|
486 |
-
assert len(predictions_class) == self.num_layers + 1
|
487 |
-
if task == 'vlp':
|
488 |
-
out = {'pred_captionings': predictions_captioning[-1],
|
489 |
-
'pred_captions': predictions_caption[-1],
|
490 |
-
'aux_outputs': [{'pred_captionings': x, 'pred_captions': y } for x, y in zip(predictions_captioning[:-1], predictions_caption[:-1])]}
|
491 |
-
return out
|
492 |
-
else:
|
493 |
-
out = {
|
494 |
-
'pred_logits': predictions_class[-1],
|
495 |
-
'pred_masks': predictions_mask[-1],
|
496 |
-
'pred_boxes': predictions_bbox[-1],
|
497 |
-
'pred_captions': predictions_caption[-1],
|
498 |
-
'aux_outputs': self._set_aux_loss(
|
499 |
-
predictions_class if self.mask_classification else None, predictions_mask, predictions_bbox, predictions_caption
|
500 |
-
)
|
501 |
-
}
|
502 |
-
return out
|
503 |
-
|
504 |
-
def forward_captioning(self, x, mask_features, mask = None, target_queries = None, target_vlp = None, task='seg', extra={}):
|
505 |
-
# x is a list of multi-scale feature
|
506 |
-
assert len(x) == self.num_feature_levels
|
507 |
-
src = []
|
508 |
-
pos = []
|
509 |
-
size_list = []
|
510 |
-
|
511 |
-
# disable mask, it does not affect performance
|
512 |
-
del mask
|
513 |
-
for i in range(self.num_feature_levels):
|
514 |
-
size_list.append(x[i].shape[-2:])
|
515 |
-
pos.append(self.pe_layer(x[i], None).flatten(2))
|
516 |
-
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
|
517 |
-
|
518 |
-
# flatten NxCxHxW to HWxNxC
|
519 |
-
pos[-1] = pos[-1].permute(2, 0, 1)
|
520 |
-
src[-1] = src[-1].permute(2, 0, 1)
|
521 |
-
|
522 |
-
_, bs, _ = src[0].shape
|
523 |
-
|
524 |
-
# QxNxC
|
525 |
-
query_embed_ = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
|
526 |
-
query_feat = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
|
527 |
-
caping_lang_token = extra['start_token'].repeat(bs, 1)
|
528 |
-
query_feat_caping = self.query_feat_caping.weight.unsqueeze(1).repeat(1, bs, 1)
|
529 |
-
|
530 |
-
# prepare token embedding for evaluation
|
531 |
-
token_embs = self.lang_encoder.lang_encoder.token_embedding.weight
|
532 |
-
# token_embs = (token_embs / token_embs.norm(dim=-1, keepdim=True) + 1e-7)
|
533 |
-
|
534 |
-
for cap_idx in range(0, self.captioning_step):
|
535 |
-
caping_lang_embed = self.lang_encoder.forward_language_token((caping_lang_token,))[0].transpose(0, 1)
|
536 |
-
query_embed = torch.cat((query_embed_, caping_lang_embed), dim=0) # may not add at the beginning.
|
537 |
-
output = torch.cat((query_feat, query_feat_caping), dim=0) # concat object query, class token and caption token.
|
538 |
-
|
539 |
-
# prediction heads on learnable query features
|
540 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0], task=task)
|
541 |
-
attn_mask = results["attn_mask"]
|
542 |
-
|
543 |
-
for i in range(self.num_layers):
|
544 |
-
level_index = i % self.num_feature_levels
|
545 |
-
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
|
546 |
-
attn_mask = torch.cat((attn_mask, torch.zeros_like(attn_mask[:, :self.contxt_len, :])), dim=1)
|
547 |
-
self_tgt_mask = self.self_attn_mask.repeat(output.shape[1]*self.num_heads, 1, 1)
|
548 |
-
|
549 |
-
# attention: cross-attention first
|
550 |
-
output, avg_attn = self.transformer_cross_attention_layers[i](
|
551 |
-
output, src[level_index],
|
552 |
-
memory_mask=attn_mask,
|
553 |
-
memory_key_padding_mask=None, # here we do not apply masking on padded region
|
554 |
-
pos=pos[level_index], query_pos=query_embed
|
555 |
-
)
|
556 |
-
|
557 |
-
output = self.transformer_self_attention_layers[i](
|
558 |
-
output, tgt_mask=self_tgt_mask,
|
559 |
-
tgt_key_padding_mask=None,
|
560 |
-
query_pos=query_embed
|
561 |
-
)
|
562 |
-
|
563 |
-
# FFN
|
564 |
-
output = self.transformer_ffn_layers[i](
|
565 |
-
output
|
566 |
-
)
|
567 |
-
|
568 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], layer_id=i, task=task)
|
569 |
-
attn_mask = results["attn_mask"]
|
570 |
-
|
571 |
-
pred_captions_gen = results['outputs_captionting']
|
572 |
-
# pred_captions_gen = (pred_captions_gen / pred_captions_gen.norm(dim=-1, keepdim=True) + 1e-7)
|
573 |
-
pred_captions_gen = pred_captions_gen @ token_embs.t()
|
574 |
-
caping_lang_token[:,cap_idx+1] = pred_captions_gen[:,cap_idx].max(-1)[1]
|
575 |
-
|
576 |
-
out = {'pred_captionings': caping_lang_token,
|
577 |
-
'pred_texts': self.lang_encoder.tokenizer.batch_decode(caping_lang_token, skip_special_tokens=True)}
|
578 |
-
return out
|
579 |
-
|
580 |
-
|
581 |
-
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size, layer_id=-1, task='seg'):
|
582 |
-
decoder_output = self.decoder_norm(output)
|
583 |
-
decoder_output = decoder_output.transpose(0, 1)
|
584 |
-
|
585 |
-
# extract image captioning token from decoder output.
|
586 |
-
if self.task_switch['captioning'] and (task == 'vlp' or task == 'captioning_infer'):
|
587 |
-
outputs_captionting = decoder_output[:,self.num_queries:] @ self.caping_embed
|
588 |
-
else:
|
589 |
-
outputs_captionting = None
|
590 |
-
|
591 |
-
# recompute class token output.
|
592 |
-
norm_decoder_output = decoder_output / (decoder_output.norm(dim=-1, keepdim=True) + 1e-7)
|
593 |
-
obj_token = norm_decoder_output[:,:self.num_queries-1]
|
594 |
-
cls_token = norm_decoder_output[:,self.num_queries-1:self.num_queries]
|
595 |
-
|
596 |
-
sim = (cls_token @ obj_token.transpose(1,2)).softmax(-1)[:,0,:,None] # TODO include class token.
|
597 |
-
cls_token = (sim * decoder_output[:,:self.num_queries-1]).sum(dim=1, keepdim=True)
|
598 |
-
|
599 |
-
if (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
600 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
601 |
-
decoder_output = torch.cat((decoder_output[:,:self.num_queries-1], cls_token, decoder_output[:,self.num_queries:2*self.num_queries-1]), dim=1)
|
602 |
-
else:
|
603 |
-
decoder_output = torch.cat((decoder_output[:,:self.num_queries-1], cls_token), dim=1)
|
604 |
-
|
605 |
-
# compute class, mask and bbox.
|
606 |
-
class_embed = decoder_output @ self.class_embed
|
607 |
-
# HACK do not compute similarity if mask is not on
|
608 |
-
outputs_class = self.lang_encoder.compute_similarity(class_embed, fake=(((not self.task_switch['mask']) and self.training) or (task == 'openimage')))
|
609 |
-
|
610 |
-
if self.task_switch['mask'] or self.task_switch['openimage']['mask']:
|
611 |
-
mask_embed = self.mask_embed(decoder_output)
|
612 |
-
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
|
613 |
-
|
614 |
-
# NOTE: prediction is of higher-resolution
|
615 |
-
# [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW]
|
616 |
-
attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False)
|
617 |
-
|
618 |
-
# must use bool type
|
619 |
-
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
|
620 |
-
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
|
621 |
-
attn_mask = attn_mask.detach()
|
622 |
-
|
623 |
-
# NOTE: fill False for cls token (JY)
|
624 |
-
attn_mask[:, self.num_queries:self.num_queries+1].fill_(False)
|
625 |
-
else:
|
626 |
-
outputs_mask = None
|
627 |
-
attn_mask = torch.zeros((list(decoder_output.shape[:2]) + [attn_mask_target_size[0]*attn_mask_target_size[1]]), device=decoder_output.device).repeat(self.num_heads, 1, 1).bool()
|
628 |
-
|
629 |
-
outputs_bbox = [None for i in range(len(decoder_output))]
|
630 |
-
if self.task_switch['bbox']:
|
631 |
-
outputs_bbox = self.bbox_embed(decoder_output)
|
632 |
-
|
633 |
-
outputs_caption = None
|
634 |
-
if self.task_switch['caption']:
|
635 |
-
outputs_caption = class_embed
|
636 |
-
|
637 |
-
|
638 |
-
results = {
|
639 |
-
"outputs_class": outputs_class,
|
640 |
-
"outputs_mask": outputs_mask,
|
641 |
-
"outputs_bbox": outputs_bbox,
|
642 |
-
"attn_mask": attn_mask,
|
643 |
-
"outputs_caption": outputs_caption,
|
644 |
-
"outputs_captionting": outputs_captionting,
|
645 |
-
}
|
646 |
-
return results
|
647 |
-
|
648 |
-
@torch.jit.unused
|
649 |
-
def _set_aux_loss(self, outputs_class, outputs_seg_masks, outputs_boxes, outputs_captions):
|
650 |
-
# this is a workaround to make torchscript happy, as torchscript
|
651 |
-
# doesn't support dictionary with non-homogeneous values, such
|
652 |
-
# as a dict having both a Tensor and a list.
|
653 |
-
if self.mask_classification:
|
654 |
-
return [
|
655 |
-
{"pred_logits": a, "pred_masks": b, "pred_boxes": c, "pred_captions": d}
|
656 |
-
for a, b, c, d in zip(outputs_class[:-1], outputs_seg_masks[:-1], outputs_boxes[:-1], outputs_captions[:-1])
|
657 |
-
]
|
658 |
-
else:
|
659 |
-
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
|
660 |
-
|
661 |
-
|
662 |
-
@register_decoder
|
663 |
-
def get_masked_transformer_decoder(cfg, in_channels, lang_encoder, mask_classification, extra):
|
664 |
-
return MultiScaleMaskedTransformerDecoder(cfg, in_channels, lang_encoder, mask_classification, extra)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/decoder/xdecoder.py
DELETED
@@ -1,700 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
|
3 |
-
|
4 |
-
# --------------------------------------------------------
|
5 |
-
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
|
6 |
-
# Copyright (c) 2022 Microsoft
|
7 |
-
# Licensed under The MIT License [see LICENSE for details]
|
8 |
-
# Written by Xueyan Zou (xueyan@cs.wisc.edu), Jianwei Yang (jianwyan@microsoft.com)
|
9 |
-
# --------------------------------------------------------
|
10 |
-
|
11 |
-
|
12 |
-
import logging
|
13 |
-
from typing import Optional
|
14 |
-
|
15 |
-
import torch
|
16 |
-
from torch import nn, Tensor
|
17 |
-
from torch.nn import functional as F
|
18 |
-
|
19 |
-
from timm.models.layers import trunc_normal_
|
20 |
-
from detectron2.layers import Conv2d
|
21 |
-
import fvcore.nn.weight_init as weight_init
|
22 |
-
|
23 |
-
from .registry import register_decoder
|
24 |
-
from ...utils import configurable
|
25 |
-
from ...modules import PositionEmbeddingSine
|
26 |
-
|
27 |
-
|
28 |
-
class SelfAttentionLayer(nn.Module):
|
29 |
-
|
30 |
-
def __init__(self, d_model, nhead, dropout=0.0,
|
31 |
-
activation="relu", normalize_before=False):
|
32 |
-
super().__init__()
|
33 |
-
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
34 |
-
|
35 |
-
self.norm = nn.LayerNorm(d_model)
|
36 |
-
self.dropout = nn.Dropout(dropout)
|
37 |
-
|
38 |
-
self.activation = _get_activation_fn(activation)
|
39 |
-
self.normalize_before = normalize_before
|
40 |
-
|
41 |
-
self._reset_parameters()
|
42 |
-
|
43 |
-
def _reset_parameters(self):
|
44 |
-
for p in self.parameters():
|
45 |
-
if p.dim() > 1:
|
46 |
-
nn.init.xavier_uniform_(p)
|
47 |
-
|
48 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
49 |
-
return tensor if pos is None else tensor + pos
|
50 |
-
|
51 |
-
def forward_post(self, tgt,
|
52 |
-
tgt_mask: Optional[Tensor] = None,
|
53 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
54 |
-
query_pos: Optional[Tensor] = None):
|
55 |
-
q = k = self.with_pos_embed(tgt, query_pos)
|
56 |
-
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
|
57 |
-
key_padding_mask=tgt_key_padding_mask)[0]
|
58 |
-
tgt = tgt + self.dropout(tgt2)
|
59 |
-
tgt = self.norm(tgt)
|
60 |
-
|
61 |
-
return tgt
|
62 |
-
|
63 |
-
def forward_pre(self, tgt,
|
64 |
-
tgt_mask: Optional[Tensor] = None,
|
65 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
66 |
-
query_pos: Optional[Tensor] = None):
|
67 |
-
tgt2 = self.norm(tgt)
|
68 |
-
q = k = self.with_pos_embed(tgt2, query_pos)
|
69 |
-
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
|
70 |
-
key_padding_mask=tgt_key_padding_mask)[0]
|
71 |
-
tgt = tgt + self.dropout(tgt2)
|
72 |
-
|
73 |
-
return tgt
|
74 |
-
|
75 |
-
def forward(self, tgt,
|
76 |
-
tgt_mask: Optional[Tensor] = None,
|
77 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
78 |
-
query_pos: Optional[Tensor] = None):
|
79 |
-
if self.normalize_before:
|
80 |
-
return self.forward_pre(tgt, tgt_mask,
|
81 |
-
tgt_key_padding_mask, query_pos)
|
82 |
-
return self.forward_post(tgt, tgt_mask,
|
83 |
-
tgt_key_padding_mask, query_pos)
|
84 |
-
|
85 |
-
|
86 |
-
class CrossAttentionLayer(nn.Module):
|
87 |
-
|
88 |
-
def __init__(self, d_model, nhead, dropout=0.0,
|
89 |
-
activation="relu", normalize_before=False):
|
90 |
-
super().__init__()
|
91 |
-
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
92 |
-
|
93 |
-
self.norm = nn.LayerNorm(d_model)
|
94 |
-
self.dropout = nn.Dropout(dropout)
|
95 |
-
|
96 |
-
self.activation = _get_activation_fn(activation)
|
97 |
-
self.normalize_before = normalize_before
|
98 |
-
|
99 |
-
self._reset_parameters()
|
100 |
-
|
101 |
-
def _reset_parameters(self):
|
102 |
-
for p in self.parameters():
|
103 |
-
if p.dim() > 1:
|
104 |
-
nn.init.xavier_uniform_(p)
|
105 |
-
|
106 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
107 |
-
return tensor if pos is None else tensor + pos
|
108 |
-
|
109 |
-
def forward_post(self, tgt, memory,
|
110 |
-
memory_mask: Optional[Tensor] = None,
|
111 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
112 |
-
pos: Optional[Tensor] = None,
|
113 |
-
query_pos: Optional[Tensor] = None):
|
114 |
-
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
|
115 |
-
key=self.with_pos_embed(memory, pos),
|
116 |
-
value=memory, attn_mask=memory_mask,
|
117 |
-
key_padding_mask=memory_key_padding_mask)
|
118 |
-
tgt = tgt + self.dropout(tgt2)
|
119 |
-
tgt = self.norm(tgt)
|
120 |
-
return tgt, avg_attn
|
121 |
-
|
122 |
-
def forward_pre(self, tgt, memory,
|
123 |
-
memory_mask: Optional[Tensor] = None,
|
124 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
125 |
-
pos: Optional[Tensor] = None,
|
126 |
-
query_pos: Optional[Tensor] = None):
|
127 |
-
tgt2 = self.norm(tgt)
|
128 |
-
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
|
129 |
-
key=self.with_pos_embed(memory, pos),
|
130 |
-
value=memory, attn_mask=memory_mask,
|
131 |
-
key_padding_mask=memory_key_padding_mask)
|
132 |
-
tgt = tgt + self.dropout(tgt2)
|
133 |
-
|
134 |
-
return tgt, avg_attn
|
135 |
-
|
136 |
-
def forward(self, tgt, memory,
|
137 |
-
memory_mask: Optional[Tensor] = None,
|
138 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
139 |
-
pos: Optional[Tensor] = None,
|
140 |
-
query_pos: Optional[Tensor] = None):
|
141 |
-
if self.normalize_before:
|
142 |
-
return self.forward_pre(tgt, memory, memory_mask,
|
143 |
-
memory_key_padding_mask, pos, query_pos)
|
144 |
-
return self.forward_post(tgt, memory, memory_mask,
|
145 |
-
memory_key_padding_mask, pos, query_pos)
|
146 |
-
|
147 |
-
|
148 |
-
class FFNLayer(nn.Module):
|
149 |
-
|
150 |
-
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
|
151 |
-
activation="relu", normalize_before=False):
|
152 |
-
super().__init__()
|
153 |
-
# Implementation of Feedforward model
|
154 |
-
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
155 |
-
self.dropout = nn.Dropout(dropout)
|
156 |
-
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
157 |
-
|
158 |
-
self.norm = nn.LayerNorm(d_model)
|
159 |
-
|
160 |
-
self.activation = _get_activation_fn(activation)
|
161 |
-
self.normalize_before = normalize_before
|
162 |
-
|
163 |
-
self._reset_parameters()
|
164 |
-
|
165 |
-
def _reset_parameters(self):
|
166 |
-
for p in self.parameters():
|
167 |
-
if p.dim() > 1:
|
168 |
-
nn.init.xavier_uniform_(p)
|
169 |
-
|
170 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
171 |
-
return tensor if pos is None else tensor + pos
|
172 |
-
|
173 |
-
def forward_post(self, tgt):
|
174 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
|
175 |
-
tgt = tgt + self.dropout(tgt2)
|
176 |
-
tgt = self.norm(tgt)
|
177 |
-
return tgt
|
178 |
-
|
179 |
-
def forward_pre(self, tgt):
|
180 |
-
tgt2 = self.norm(tgt)
|
181 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
182 |
-
tgt = tgt + self.dropout(tgt2)
|
183 |
-
return tgt
|
184 |
-
|
185 |
-
def forward(self, tgt):
|
186 |
-
if self.normalize_before:
|
187 |
-
return self.forward_pre(tgt)
|
188 |
-
return self.forward_post(tgt)
|
189 |
-
|
190 |
-
|
191 |
-
def _get_activation_fn(activation):
|
192 |
-
"""Return an activation function given a string"""
|
193 |
-
if activation == "relu":
|
194 |
-
return F.relu
|
195 |
-
if activation == "gelu":
|
196 |
-
return F.gelu
|
197 |
-
if activation == "glu":
|
198 |
-
return F.glu
|
199 |
-
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
200 |
-
|
201 |
-
|
202 |
-
class MLP(nn.Module):
|
203 |
-
""" Very simple multi-layer perceptron (also called FFN)"""
|
204 |
-
|
205 |
-
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
|
206 |
-
super().__init__()
|
207 |
-
self.num_layers = num_layers
|
208 |
-
h = [hidden_dim] * (num_layers - 1)
|
209 |
-
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
|
210 |
-
|
211 |
-
def forward(self, x):
|
212 |
-
for i, layer in enumerate(self.layers):
|
213 |
-
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
|
214 |
-
return x
|
215 |
-
|
216 |
-
|
217 |
-
class MultiScaleMaskedTransformerDecoder(nn.Module):
|
218 |
-
|
219 |
-
_version = 2
|
220 |
-
|
221 |
-
@configurable
|
222 |
-
def __init__(
|
223 |
-
self,
|
224 |
-
lang_encoder: nn.Module,
|
225 |
-
in_channels,
|
226 |
-
mask_classification=True,
|
227 |
-
*,
|
228 |
-
hidden_dim: int,
|
229 |
-
dim_proj: int,
|
230 |
-
num_queries: int,
|
231 |
-
contxt_len: int,
|
232 |
-
nheads: int,
|
233 |
-
dim_feedforward: int,
|
234 |
-
dec_layers: int,
|
235 |
-
pre_norm: bool,
|
236 |
-
mask_dim: int,
|
237 |
-
task_switch: dict,
|
238 |
-
captioning_step: int,
|
239 |
-
enforce_input_project: bool,
|
240 |
-
):
|
241 |
-
"""
|
242 |
-
NOTE: this interface is experimental.
|
243 |
-
Args:
|
244 |
-
in_channels: channels of the input features
|
245 |
-
mask_classification: whether to add mask classifier or not
|
246 |
-
num_classes: number of classes
|
247 |
-
hidden_dim: Transformer feature dimension
|
248 |
-
num_queries: number of queries
|
249 |
-
nheads: number of heads
|
250 |
-
dim_feedforward: feature dimension in feedforward network
|
251 |
-
enc_layers: number of Transformer encoder layers
|
252 |
-
dec_layers: number of Transformer decoder layers
|
253 |
-
pre_norm: whether to use pre-LayerNorm or not
|
254 |
-
mask_dim: mask feature dimension
|
255 |
-
enforce_input_project: add input project 1x1 conv even if input
|
256 |
-
channels and hidden dim is identical
|
257 |
-
"""
|
258 |
-
super().__init__()
|
259 |
-
assert mask_classification, "Only support mask classification model"
|
260 |
-
self.mask_classification = mask_classification
|
261 |
-
|
262 |
-
# positional encoding
|
263 |
-
N_steps = hidden_dim // 2
|
264 |
-
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
|
265 |
-
|
266 |
-
# define Transformer decoder here
|
267 |
-
self.num_heads = nheads
|
268 |
-
self.num_layers = dec_layers
|
269 |
-
self.contxt_len = contxt_len
|
270 |
-
self.transformer_self_attention_layers = nn.ModuleList()
|
271 |
-
self.transformer_cross_attention_layers = nn.ModuleList()
|
272 |
-
self.transformer_ffn_layers = nn.ModuleList()
|
273 |
-
|
274 |
-
for _ in range(self.num_layers):
|
275 |
-
self.transformer_self_attention_layers.append(
|
276 |
-
SelfAttentionLayer(
|
277 |
-
d_model=hidden_dim,
|
278 |
-
nhead=nheads,
|
279 |
-
dropout=0.0,
|
280 |
-
normalize_before=pre_norm,
|
281 |
-
)
|
282 |
-
)
|
283 |
-
|
284 |
-
self.transformer_cross_attention_layers.append(
|
285 |
-
CrossAttentionLayer(
|
286 |
-
d_model=hidden_dim,
|
287 |
-
nhead=nheads,
|
288 |
-
dropout=0.0,
|
289 |
-
normalize_before=pre_norm,
|
290 |
-
)
|
291 |
-
)
|
292 |
-
|
293 |
-
self.transformer_ffn_layers.append(
|
294 |
-
FFNLayer(
|
295 |
-
d_model=hidden_dim,
|
296 |
-
dim_feedforward=dim_feedforward,
|
297 |
-
dropout=0.0,
|
298 |
-
normalize_before=pre_norm,
|
299 |
-
)
|
300 |
-
)
|
301 |
-
|
302 |
-
self.decoder_norm = nn.LayerNorm(hidden_dim)
|
303 |
-
|
304 |
-
self.num_queries = num_queries
|
305 |
-
# learnable query features
|
306 |
-
self.query_feat = nn.Embedding(num_queries, hidden_dim)
|
307 |
-
# learnable query p.e.
|
308 |
-
self.query_embed = nn.Embedding(num_queries, hidden_dim)
|
309 |
-
|
310 |
-
# level embedding (we always use 3 scales)
|
311 |
-
self.num_feature_levels = 3
|
312 |
-
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
|
313 |
-
self.input_proj = nn.ModuleList()
|
314 |
-
|
315 |
-
for _ in range(self.num_feature_levels):
|
316 |
-
if in_channels != hidden_dim or enforce_input_project:
|
317 |
-
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
|
318 |
-
weight_init.c2_xavier_fill(self.input_proj[-1])
|
319 |
-
else:
|
320 |
-
self.input_proj.append(nn.Sequential())
|
321 |
-
|
322 |
-
self.task_switch = task_switch
|
323 |
-
|
324 |
-
# output FFNs
|
325 |
-
self.lang_encoder = lang_encoder
|
326 |
-
if self.task_switch['mask']:
|
327 |
-
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
|
328 |
-
|
329 |
-
self.class_embed = nn.Parameter(torch.empty(hidden_dim, dim_proj))
|
330 |
-
trunc_normal_(self.class_embed, std=.02)
|
331 |
-
|
332 |
-
if task_switch['bbox']:
|
333 |
-
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
|
334 |
-
|
335 |
-
# Caption Project and query
|
336 |
-
if task_switch['captioning']:
|
337 |
-
self.caping_embed = nn.Parameter(torch.empty(hidden_dim, dim_proj))
|
338 |
-
trunc_normal_(self.caping_embed, std=.02)
|
339 |
-
# self.query_feat_caping = nn.Embedding(contxt_len, hidden_dim)
|
340 |
-
self.pos_embed_caping = nn.Embedding(contxt_len, hidden_dim)
|
341 |
-
self.captioning_step = captioning_step
|
342 |
-
|
343 |
-
# register self_attn_mask to avoid information leakage, it includes interaction between object query, class query and caping query
|
344 |
-
self_attn_mask = torch.zeros((1, num_queries + contxt_len, num_queries + contxt_len)).bool()
|
345 |
-
self_attn_mask[:, :num_queries, num_queries:] = True # object+class query does not attend with caption query.
|
346 |
-
self_attn_mask[:, num_queries:, num_queries:] = torch.triu(torch.ones((1, contxt_len, contxt_len)), diagonal=1).bool() # caption query only attend with previous token.
|
347 |
-
self_attn_mask[:, :num_queries-1, num_queries-1:num_queries] = True # object query does not attend with class query.
|
348 |
-
self_attn_mask[:, num_queries-1:num_queries, :num_queries-1] = True # class query does not attend with object query.
|
349 |
-
self.register_buffer("self_attn_mask", self_attn_mask)
|
350 |
-
|
351 |
-
|
352 |
-
@classmethod
|
353 |
-
def from_config(cls, cfg, in_channels, lang_encoder, mask_classification, extra):
|
354 |
-
ret = {}
|
355 |
-
|
356 |
-
ret["lang_encoder"] = lang_encoder
|
357 |
-
ret["in_channels"] = in_channels
|
358 |
-
ret["mask_classification"] = mask_classification
|
359 |
-
|
360 |
-
enc_cfg = cfg['MODEL']['ENCODER']
|
361 |
-
dec_cfg = cfg['MODEL']['DECODER']
|
362 |
-
|
363 |
-
ret["hidden_dim"] = dec_cfg['HIDDEN_DIM']
|
364 |
-
ret["dim_proj"] = cfg['MODEL']['DIM_PROJ']
|
365 |
-
ret["num_queries"] = dec_cfg['NUM_OBJECT_QUERIES']
|
366 |
-
ret["contxt_len"] = cfg['MODEL']['TEXT']['CONTEXT_LENGTH']
|
367 |
-
|
368 |
-
# Transformer parameters:
|
369 |
-
ret["nheads"] = dec_cfg['NHEADS']
|
370 |
-
ret["dim_feedforward"] = dec_cfg['DIM_FEEDFORWARD']
|
371 |
-
|
372 |
-
# NOTE: because we add learnable query features which requires supervision,
|
373 |
-
# we add minus 1 to decoder layers to be consistent with our loss
|
374 |
-
# implementation: that is, number of auxiliary losses is always
|
375 |
-
# equal to number of decoder layers. With learnable query features, the number of
|
376 |
-
# auxiliary losses equals number of decoders plus 1.
|
377 |
-
assert dec_cfg['DEC_LAYERS'] >= 1
|
378 |
-
ret["dec_layers"] = dec_cfg['DEC_LAYERS'] - 1
|
379 |
-
ret["pre_norm"] = dec_cfg['PRE_NORM']
|
380 |
-
ret["enforce_input_project"] = dec_cfg['ENFORCE_INPUT_PROJ']
|
381 |
-
ret["mask_dim"] = enc_cfg['MASK_DIM']
|
382 |
-
|
383 |
-
ret["task_switch"] = extra['task_switch']
|
384 |
-
ret["captioning_step"] = dec_cfg['CAPTIONING'].get('STEP', 50)
|
385 |
-
|
386 |
-
return ret
|
387 |
-
|
388 |
-
def forward(self, x, mask_features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
|
389 |
-
if task == 'captioning_infer':
|
390 |
-
return self.forward_captioning(x, mask_features, mask=mask, target_queries=target_queries, target_vlp=target_vlp, task=task, extra=extra)
|
391 |
-
# x is a list of multi-scale feature
|
392 |
-
assert len(x) == self.num_feature_levels
|
393 |
-
src = []
|
394 |
-
pos = []
|
395 |
-
size_list = []
|
396 |
-
|
397 |
-
# disable mask, it does not affect performance
|
398 |
-
del mask
|
399 |
-
for i in range(self.num_feature_levels):
|
400 |
-
size_list.append(x[i].shape[-2:])
|
401 |
-
pos.append(self.pe_layer(x[i], None).flatten(2))
|
402 |
-
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
|
403 |
-
|
404 |
-
# flatten NxCxHxW to HWxNxC
|
405 |
-
pos[-1] = pos[-1].permute(2, 0, 1)
|
406 |
-
src[-1] = src[-1].permute(2, 0, 1)
|
407 |
-
|
408 |
-
_, bs, _ = src[0].shape
|
409 |
-
|
410 |
-
# QxNxC
|
411 |
-
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
|
412 |
-
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
|
413 |
-
|
414 |
-
predictions_class = []
|
415 |
-
predictions_mask = []
|
416 |
-
predictions_bbox = []
|
417 |
-
predictions_caption = []
|
418 |
-
predictions_captioning = []
|
419 |
-
|
420 |
-
self_tgt_mask = None
|
421 |
-
if self.training and task == 'vlp' and self.task_switch['captioning']:
|
422 |
-
# output = torch.cat((output, self.query_feat_caping.weight.unsqueeze(1).repeat(1, bs, 1)), dim=0) # concat object query, class token and caption token.
|
423 |
-
caping_lang_embed = torch.cat([caption['caption_tokens'] for caption in target_vlp], dim=0).transpose(0, 1) # language output
|
424 |
-
_caping_lang_embed = caping_lang_embed.detach().clone()
|
425 |
-
output = torch.cat((output, _caping_lang_embed), dim=0) # concat object query, class token and caption token.
|
426 |
-
caping_lang_embed += self.pos_embed_caping.weight.unsqueeze(1).repeat(1, bs, 1)
|
427 |
-
query_embed = torch.cat((query_embed, caping_lang_embed), dim=0) # may not add at the beginning.
|
428 |
-
self_tgt_mask = self.self_attn_mask.repeat(output.shape[1]*self.num_heads, 1, 1)
|
429 |
-
elif (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
430 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
431 |
-
self_tgt_mask = self.self_attn_mask[:,:self.num_queries,:self.num_queries].repeat(output.shape[1]*self.num_heads, 1, 1)
|
432 |
-
grounding_tokens = extra['grounding_tokens']
|
433 |
-
_grounding_tokens = grounding_tokens.detach().clone()
|
434 |
-
# initialize with negative attention at the beginning.
|
435 |
-
pad_tgt_mask = torch.ones((1, self.num_queries + (self.num_queries-1) + len(grounding_tokens), self.num_queries + (self.num_queries-1) + len(grounding_tokens)), device=self_tgt_mask.device).bool().repeat(output.shape[1]*self.num_heads, 1, 1)
|
436 |
-
pad_tgt_mask[:,:self.num_queries,:self.num_queries] = self_tgt_mask
|
437 |
-
pad_tgt_mask[:,self.num_queries:,self.num_queries:] = False # grounding tokens could attend with eatch other
|
438 |
-
self_tgt_mask = pad_tgt_mask
|
439 |
-
output = torch.cat((output, output[:-1]), dim=0)
|
440 |
-
query_embed = torch.cat((query_embed, query_embed[:-1]), dim=0) # also pad language embdding to fix embedding
|
441 |
-
else:
|
442 |
-
self_tgt_mask = self.self_attn_mask[:,:self.num_queries,:self.num_queries].repeat(output.shape[1]*self.num_heads, 1, 1)
|
443 |
-
|
444 |
-
# prediction heads on learnable query features
|
445 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0], task=task)
|
446 |
-
attn_mask = results["attn_mask"]
|
447 |
-
predictions_class.append(results["outputs_class"])
|
448 |
-
predictions_mask.append(results["outputs_mask"])
|
449 |
-
predictions_bbox.append(results["outputs_bbox"])
|
450 |
-
predictions_caption.append(results["outputs_caption"])
|
451 |
-
predictions_captioning.append(results["outputs_captionting"])
|
452 |
-
|
453 |
-
for i in range(self.num_layers):
|
454 |
-
level_index = i % self.num_feature_levels
|
455 |
-
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
|
456 |
-
|
457 |
-
if self.training and task == 'vlp' and self.task_switch['captioning']:
|
458 |
-
attn_mask = torch.cat((attn_mask, torch.zeros_like(attn_mask[:, :self.contxt_len, :])), dim=1)
|
459 |
-
# attention: cross-attention first
|
460 |
-
output, avg_attn = self.transformer_cross_attention_layers[i](
|
461 |
-
output, src[level_index],
|
462 |
-
memory_mask=attn_mask,
|
463 |
-
memory_key_padding_mask=None, # here we do not apply masking on padded region
|
464 |
-
pos=pos[level_index], query_pos=query_embed
|
465 |
-
)
|
466 |
-
|
467 |
-
if (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
468 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
469 |
-
output = torch.cat((output, _grounding_tokens), dim=0)
|
470 |
-
query_embed = torch.cat((query_embed, grounding_tokens), dim=0)
|
471 |
-
|
472 |
-
output = self.transformer_self_attention_layers[i](
|
473 |
-
output, tgt_mask=self_tgt_mask,
|
474 |
-
tgt_key_padding_mask=None,
|
475 |
-
query_pos=query_embed
|
476 |
-
)
|
477 |
-
|
478 |
-
# FFN
|
479 |
-
output = self.transformer_ffn_layers[i](
|
480 |
-
output
|
481 |
-
)
|
482 |
-
|
483 |
-
if ((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding'] \
|
484 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
485 |
-
_grounding_tokens = output[-len(_grounding_tokens):]
|
486 |
-
output = output[:-len(_grounding_tokens)]
|
487 |
-
query_embed = query_embed[:-len(_grounding_tokens)]
|
488 |
-
|
489 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], layer_id=i, task=task)
|
490 |
-
attn_mask = results["attn_mask"]
|
491 |
-
predictions_class.append(results["outputs_class"])
|
492 |
-
predictions_mask.append(results["outputs_mask"])
|
493 |
-
predictions_bbox.append(results["outputs_bbox"])
|
494 |
-
predictions_caption.append(results["outputs_caption"])
|
495 |
-
predictions_captioning.append(results["outputs_captionting"])
|
496 |
-
|
497 |
-
assert len(predictions_class) == self.num_layers + 1
|
498 |
-
if task == 'vlp':
|
499 |
-
out = {'pred_captionings': predictions_captioning[-1],
|
500 |
-
'pred_captions': predictions_caption[-1],
|
501 |
-
'aux_outputs': [{'pred_captionings': x, 'pred_captions': y } for x, y in zip(predictions_captioning[:-1], predictions_caption[:-1])]}
|
502 |
-
return out
|
503 |
-
else:
|
504 |
-
out = {
|
505 |
-
'pred_logits': predictions_class[-1],
|
506 |
-
'pred_masks': predictions_mask[-1],
|
507 |
-
'pred_boxes': predictions_bbox[-1],
|
508 |
-
'pred_captions': predictions_caption[-1],
|
509 |
-
'aux_outputs': self._set_aux_loss(
|
510 |
-
predictions_class if self.mask_classification else None, predictions_mask, predictions_bbox, predictions_caption
|
511 |
-
)
|
512 |
-
}
|
513 |
-
return out
|
514 |
-
|
515 |
-
def forward_captioning(self, x, mask_features, mask = None, target_queries = None, target_vlp = None, task='seg', extra={}):
|
516 |
-
# x is a list of multi-scale feature
|
517 |
-
assert len(x) == self.num_feature_levels
|
518 |
-
src = []
|
519 |
-
pos = []
|
520 |
-
size_list = []
|
521 |
-
|
522 |
-
# disable mask, it does not affect performance
|
523 |
-
del mask
|
524 |
-
for i in range(self.num_feature_levels):
|
525 |
-
size_list.append(x[i].shape[-2:])
|
526 |
-
pos.append(self.pe_layer(x[i], None).flatten(2))
|
527 |
-
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
|
528 |
-
|
529 |
-
# flatten NxCxHxW to HWxNxC
|
530 |
-
pos[-1] = pos[-1].permute(2, 0, 1)
|
531 |
-
src[-1] = src[-1].permute(2, 0, 1)
|
532 |
-
|
533 |
-
_, bs, _ = src[0].shape
|
534 |
-
|
535 |
-
# QxNxC
|
536 |
-
query_embed_ = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
|
537 |
-
query_feat = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
|
538 |
-
caping_lang_token = extra['start_token'].repeat(bs, 1)
|
539 |
-
start_id = 0
|
540 |
-
if 'token' in extra:
|
541 |
-
caping_lang_token[:,:len(extra['token'][0])] = extra['token']
|
542 |
-
start_id = len(extra['token'][0])-1
|
543 |
-
# query_feat_caping = self.query_feat_caping.weight.unsqueeze(1).repeat(1, bs, 1)
|
544 |
-
pos_embed_caping = self.pos_embed_caping.weight.unsqueeze(1).repeat(1, bs, 1)
|
545 |
-
# prepare token embedding for evaluation
|
546 |
-
token_embs = self.lang_encoder.lang_encoder.token_embedding.weight
|
547 |
-
# token_embs = (token_embs / token_embs.norm(dim=-1, keepdim=True) + 1e-7)
|
548 |
-
|
549 |
-
for cap_idx in range(start_id, self.captioning_step):
|
550 |
-
caping_lang_embed = self.lang_encoder.forward_language_token((caping_lang_token,))[0].transpose(0, 1)
|
551 |
-
output = torch.cat((query_feat, caping_lang_embed), dim=0) # concat object query, class token and caption token.
|
552 |
-
caping_lang_embed += pos_embed_caping
|
553 |
-
query_embed = torch.cat((query_embed_, caping_lang_embed), dim=0) # may not add at the beginning.
|
554 |
-
# output = torch.cat((query_feat, query_feat_caping), dim=0) # concat object query, class token and caption token.
|
555 |
-
|
556 |
-
# prediction heads on learnable query features
|
557 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0], task=task)
|
558 |
-
attn_mask = results["attn_mask"]
|
559 |
-
|
560 |
-
for i in range(self.num_layers):
|
561 |
-
level_index = i % self.num_feature_levels
|
562 |
-
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
|
563 |
-
attn_mask = torch.cat((attn_mask, torch.zeros_like(attn_mask[:, :self.contxt_len, :])), dim=1)
|
564 |
-
self_tgt_mask = self.self_attn_mask.repeat(output.shape[1]*self.num_heads, 1, 1)
|
565 |
-
|
566 |
-
if extra['captioning_mask'] is not None:
|
567 |
-
bs,nq,wh = attn_mask.shape
|
568 |
-
assert bs==self.num_heads, "Only support single image referring captioning."
|
569 |
-
cap_mask = extra['captioning_mask']
|
570 |
-
attn_mask = attn_mask.reshape(bs,nq,size_list[i%3][0],size_list[i%3][1])
|
571 |
-
cap_mask = F.interpolate(cap_mask[None,].float(), size_list[i%3], mode='nearest').bool()[0,0]
|
572 |
-
attn_mask[:,self.num_queries:, cap_mask] = True
|
573 |
-
attn_mask = attn_mask.reshape(bs,nq,wh)
|
574 |
-
|
575 |
-
# attention: cross-attention first
|
576 |
-
output, avg_attn = self.transformer_cross_attention_layers[i](
|
577 |
-
output, src[level_index],
|
578 |
-
memory_mask=attn_mask,
|
579 |
-
memory_key_padding_mask=None, # here we do not apply masking on padded region
|
580 |
-
pos=pos[level_index], query_pos=query_embed
|
581 |
-
)
|
582 |
-
|
583 |
-
output = self.transformer_self_attention_layers[i](
|
584 |
-
output, tgt_mask=self_tgt_mask,
|
585 |
-
tgt_key_padding_mask=None,
|
586 |
-
query_pos=query_embed
|
587 |
-
)
|
588 |
-
|
589 |
-
# FFN
|
590 |
-
output = self.transformer_ffn_layers[i](
|
591 |
-
output
|
592 |
-
)
|
593 |
-
|
594 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], layer_id=i, task=task)
|
595 |
-
attn_mask = results["attn_mask"]
|
596 |
-
|
597 |
-
pred_captions_gen = results['outputs_captionting']
|
598 |
-
# pred_captions_gen = (pred_captions_gen / pred_captions_gen.norm(dim=-1, keepdim=True) + 1e-7)
|
599 |
-
pred_captions_gen = pred_captions_gen @ token_embs.t()
|
600 |
-
caping_lang_token[:,cap_idx+1] = pred_captions_gen[:,cap_idx].max(-1)[1]
|
601 |
-
|
602 |
-
texts = self.lang_encoder.tokenizer.batch_decode(caping_lang_token, skip_special_tokens=False)
|
603 |
-
texts_new = []
|
604 |
-
|
605 |
-
for x in texts:
|
606 |
-
x = x.split('<|endoftext|>')[0]
|
607 |
-
x = x.replace('<|endoftext|>','')
|
608 |
-
x = x.replace('<|startoftext|>','')
|
609 |
-
x = x.strip()
|
610 |
-
texts_new.append(x)
|
611 |
-
|
612 |
-
out = {'pred_captionings': caping_lang_token,
|
613 |
-
'pred_texts': texts_new}
|
614 |
-
return out
|
615 |
-
|
616 |
-
|
617 |
-
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size, layer_id=-1, task='seg'):
|
618 |
-
decoder_output = self.decoder_norm(output)
|
619 |
-
decoder_output = decoder_output.transpose(0, 1)
|
620 |
-
|
621 |
-
# extract image captioning token from decoder output.
|
622 |
-
if self.task_switch['captioning'] and (task == 'vlp' or task == 'captioning_infer'):
|
623 |
-
outputs_captionting = decoder_output[:,self.num_queries:] @ self.caping_embed
|
624 |
-
else:
|
625 |
-
outputs_captionting = None
|
626 |
-
|
627 |
-
# recompute class token output.
|
628 |
-
norm_decoder_output = decoder_output / (decoder_output.norm(dim=-1, keepdim=True) + 1e-7)
|
629 |
-
obj_token = norm_decoder_output[:,:self.num_queries-1]
|
630 |
-
cls_token = norm_decoder_output[:,self.num_queries-1:self.num_queries]
|
631 |
-
|
632 |
-
sim = (cls_token @ obj_token.transpose(1,2)).softmax(-1)[:,0,:,None] # TODO include class token.
|
633 |
-
cls_token = (sim * decoder_output[:,:self.num_queries-1]).sum(dim=1, keepdim=True)
|
634 |
-
|
635 |
-
if (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
636 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
637 |
-
decoder_output = torch.cat((decoder_output[:,:self.num_queries-1], cls_token, decoder_output[:,self.num_queries:2*self.num_queries-1]), dim=1)
|
638 |
-
else:
|
639 |
-
decoder_output = torch.cat((decoder_output[:,:self.num_queries-1], cls_token), dim=1)
|
640 |
-
|
641 |
-
# compute class, mask and bbox.
|
642 |
-
class_embed = decoder_output @ self.class_embed
|
643 |
-
# HACK do not compute similarity if mask is not on
|
644 |
-
outputs_class = self.lang_encoder.compute_similarity(class_embed, fake=(((not self.task_switch['mask']) and self.training) or (task == 'openimage')))
|
645 |
-
|
646 |
-
if self.task_switch['mask'] or self.task_switch['openimage']['mask']:
|
647 |
-
mask_embed = self.mask_embed(decoder_output)
|
648 |
-
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
|
649 |
-
|
650 |
-
# NOTE: prediction is of higher-resolution
|
651 |
-
# [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW]
|
652 |
-
attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False)
|
653 |
-
|
654 |
-
# must use bool type
|
655 |
-
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
|
656 |
-
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
|
657 |
-
attn_mask = attn_mask.detach()
|
658 |
-
|
659 |
-
# NOTE: fill False for cls token (JY)
|
660 |
-
attn_mask[:, self.num_queries:self.num_queries+1].fill_(False)
|
661 |
-
else:
|
662 |
-
outputs_mask = None
|
663 |
-
attn_mask = torch.zeros((list(decoder_output.shape[:2]) + [attn_mask_target_size[0]*attn_mask_target_size[1]]), device=decoder_output.device).repeat(self.num_heads, 1, 1).bool()
|
664 |
-
|
665 |
-
outputs_bbox = [None for i in range(len(decoder_output))]
|
666 |
-
if self.task_switch['bbox']:
|
667 |
-
outputs_bbox = self.bbox_embed(decoder_output)
|
668 |
-
|
669 |
-
outputs_caption = None
|
670 |
-
if self.task_switch['caption']:
|
671 |
-
outputs_caption = class_embed
|
672 |
-
|
673 |
-
|
674 |
-
results = {
|
675 |
-
"outputs_class": outputs_class,
|
676 |
-
"outputs_mask": outputs_mask,
|
677 |
-
"outputs_bbox": outputs_bbox,
|
678 |
-
"attn_mask": attn_mask,
|
679 |
-
"outputs_caption": outputs_caption,
|
680 |
-
"outputs_captionting": outputs_captionting,
|
681 |
-
}
|
682 |
-
return results
|
683 |
-
|
684 |
-
@torch.jit.unused
|
685 |
-
def _set_aux_loss(self, outputs_class, outputs_seg_masks, outputs_boxes, outputs_captions):
|
686 |
-
# this is a workaround to make torchscript happy, as torchscript
|
687 |
-
# doesn't support dictionary with non-homogeneous values, such
|
688 |
-
# as a dict having both a Tensor and a list.
|
689 |
-
if self.mask_classification:
|
690 |
-
return [
|
691 |
-
{"pred_logits": a, "pred_masks": b, "pred_boxes": c, "pred_captions": d}
|
692 |
-
for a, b, c, d in zip(outputs_class[:-1], outputs_seg_masks[:-1], outputs_boxes[:-1], outputs_captions[:-1])
|
693 |
-
]
|
694 |
-
else:
|
695 |
-
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
|
696 |
-
|
697 |
-
|
698 |
-
@register_decoder
|
699 |
-
def get_masked_transformer_decoder(cfg, in_channels, lang_encoder, mask_classification, extra):
|
700 |
-
return MultiScaleMaskedTransformerDecoder(cfg, in_channels, lang_encoder, mask_classification, extra)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/decoder/xdecoder2.py
DELETED
@@ -1,700 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/detr.py
|
3 |
-
|
4 |
-
# --------------------------------------------------------
|
5 |
-
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
|
6 |
-
# Copyright (c) 2022 Microsoft
|
7 |
-
# Licensed under The MIT License [see LICENSE for details]
|
8 |
-
# Written by Xueyan Zou (xueyan@cs.wisc.edu), Jianwei Yang (jianwyan@microsoft.com)
|
9 |
-
# --------------------------------------------------------
|
10 |
-
|
11 |
-
|
12 |
-
import logging
|
13 |
-
from typing import Optional
|
14 |
-
|
15 |
-
import torch
|
16 |
-
from torch import nn, Tensor
|
17 |
-
from torch.nn import functional as F
|
18 |
-
|
19 |
-
from timm.models.layers import trunc_normal_
|
20 |
-
from detectron2.layers import Conv2d
|
21 |
-
import fvcore.nn.weight_init as weight_init
|
22 |
-
|
23 |
-
from .registry import register_decoder
|
24 |
-
from ...utils import configurable
|
25 |
-
from ...modules import PositionEmbeddingSine
|
26 |
-
|
27 |
-
|
28 |
-
class SelfAttentionLayer(nn.Module):
|
29 |
-
|
30 |
-
def __init__(self, d_model, nhead, dropout=0.0,
|
31 |
-
activation="relu", normalize_before=False):
|
32 |
-
super().__init__()
|
33 |
-
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
34 |
-
|
35 |
-
self.norm = nn.LayerNorm(d_model)
|
36 |
-
self.dropout = nn.Dropout(dropout)
|
37 |
-
|
38 |
-
self.activation = _get_activation_fn(activation)
|
39 |
-
self.normalize_before = normalize_before
|
40 |
-
|
41 |
-
self._reset_parameters()
|
42 |
-
|
43 |
-
def _reset_parameters(self):
|
44 |
-
for p in self.parameters():
|
45 |
-
if p.dim() > 1:
|
46 |
-
nn.init.xavier_uniform_(p)
|
47 |
-
|
48 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
49 |
-
return tensor if pos is None else tensor + pos
|
50 |
-
|
51 |
-
def forward_post(self, tgt,
|
52 |
-
tgt_mask: Optional[Tensor] = None,
|
53 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
54 |
-
query_pos: Optional[Tensor] = None):
|
55 |
-
q = k = self.with_pos_embed(tgt, query_pos)
|
56 |
-
tgt2 = self.self_attn(q, k, value=tgt, attn_mask=tgt_mask,
|
57 |
-
key_padding_mask=tgt_key_padding_mask)[0]
|
58 |
-
tgt = tgt + self.dropout(tgt2)
|
59 |
-
tgt = self.norm(tgt)
|
60 |
-
|
61 |
-
return tgt
|
62 |
-
|
63 |
-
def forward_pre(self, tgt,
|
64 |
-
tgt_mask: Optional[Tensor] = None,
|
65 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
66 |
-
query_pos: Optional[Tensor] = None):
|
67 |
-
tgt2 = self.norm(tgt)
|
68 |
-
q = k = self.with_pos_embed(tgt2, query_pos)
|
69 |
-
tgt2 = self.self_attn(q, k, value=tgt2, attn_mask=tgt_mask,
|
70 |
-
key_padding_mask=tgt_key_padding_mask)[0]
|
71 |
-
tgt = tgt + self.dropout(tgt2)
|
72 |
-
|
73 |
-
return tgt
|
74 |
-
|
75 |
-
def forward(self, tgt,
|
76 |
-
tgt_mask: Optional[Tensor] = None,
|
77 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
78 |
-
query_pos: Optional[Tensor] = None):
|
79 |
-
if self.normalize_before:
|
80 |
-
return self.forward_pre(tgt, tgt_mask,
|
81 |
-
tgt_key_padding_mask, query_pos)
|
82 |
-
return self.forward_post(tgt, tgt_mask,
|
83 |
-
tgt_key_padding_mask, query_pos)
|
84 |
-
|
85 |
-
|
86 |
-
class CrossAttentionLayer(nn.Module):
|
87 |
-
|
88 |
-
def __init__(self, d_model, nhead, dropout=0.0,
|
89 |
-
activation="relu", normalize_before=False):
|
90 |
-
super().__init__()
|
91 |
-
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
92 |
-
|
93 |
-
self.norm = nn.LayerNorm(d_model)
|
94 |
-
self.dropout = nn.Dropout(dropout)
|
95 |
-
|
96 |
-
self.activation = _get_activation_fn(activation)
|
97 |
-
self.normalize_before = normalize_before
|
98 |
-
|
99 |
-
self._reset_parameters()
|
100 |
-
|
101 |
-
def _reset_parameters(self):
|
102 |
-
for p in self.parameters():
|
103 |
-
if p.dim() > 1:
|
104 |
-
nn.init.xavier_uniform_(p)
|
105 |
-
|
106 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
107 |
-
return tensor if pos is None else tensor + pos
|
108 |
-
|
109 |
-
def forward_post(self, tgt, memory,
|
110 |
-
memory_mask: Optional[Tensor] = None,
|
111 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
112 |
-
pos: Optional[Tensor] = None,
|
113 |
-
query_pos: Optional[Tensor] = None):
|
114 |
-
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt, query_pos),
|
115 |
-
key=self.with_pos_embed(memory, pos),
|
116 |
-
value=memory, attn_mask=memory_mask,
|
117 |
-
key_padding_mask=memory_key_padding_mask)
|
118 |
-
tgt = tgt + self.dropout(tgt2)
|
119 |
-
tgt = self.norm(tgt)
|
120 |
-
return tgt, avg_attn
|
121 |
-
|
122 |
-
def forward_pre(self, tgt, memory,
|
123 |
-
memory_mask: Optional[Tensor] = None,
|
124 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
125 |
-
pos: Optional[Tensor] = None,
|
126 |
-
query_pos: Optional[Tensor] = None):
|
127 |
-
tgt2 = self.norm(tgt)
|
128 |
-
tgt2, avg_attn = self.multihead_attn(query=self.with_pos_embed(tgt2, query_pos),
|
129 |
-
key=self.with_pos_embed(memory, pos),
|
130 |
-
value=memory, attn_mask=memory_mask,
|
131 |
-
key_padding_mask=memory_key_padding_mask)
|
132 |
-
tgt = tgt + self.dropout(tgt2)
|
133 |
-
|
134 |
-
return tgt, avg_attn
|
135 |
-
|
136 |
-
def forward(self, tgt, memory,
|
137 |
-
memory_mask: Optional[Tensor] = None,
|
138 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
139 |
-
pos: Optional[Tensor] = None,
|
140 |
-
query_pos: Optional[Tensor] = None):
|
141 |
-
if self.normalize_before:
|
142 |
-
return self.forward_pre(tgt, memory, memory_mask,
|
143 |
-
memory_key_padding_mask, pos, query_pos)
|
144 |
-
return self.forward_post(tgt, memory, memory_mask,
|
145 |
-
memory_key_padding_mask, pos, query_pos)
|
146 |
-
|
147 |
-
|
148 |
-
class FFNLayer(nn.Module):
|
149 |
-
|
150 |
-
def __init__(self, d_model, dim_feedforward=2048, dropout=0.0,
|
151 |
-
activation="relu", normalize_before=False):
|
152 |
-
super().__init__()
|
153 |
-
# Implementation of Feedforward model
|
154 |
-
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
155 |
-
self.dropout = nn.Dropout(dropout)
|
156 |
-
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
157 |
-
|
158 |
-
self.norm = nn.LayerNorm(d_model)
|
159 |
-
|
160 |
-
self.activation = _get_activation_fn(activation)
|
161 |
-
self.normalize_before = normalize_before
|
162 |
-
|
163 |
-
self._reset_parameters()
|
164 |
-
|
165 |
-
def _reset_parameters(self):
|
166 |
-
for p in self.parameters():
|
167 |
-
if p.dim() > 1:
|
168 |
-
nn.init.xavier_uniform_(p)
|
169 |
-
|
170 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
171 |
-
return tensor if pos is None else tensor + pos
|
172 |
-
|
173 |
-
def forward_post(self, tgt):
|
174 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
|
175 |
-
tgt = tgt + self.dropout(tgt2)
|
176 |
-
tgt = self.norm(tgt)
|
177 |
-
return tgt
|
178 |
-
|
179 |
-
def forward_pre(self, tgt):
|
180 |
-
tgt2 = self.norm(tgt)
|
181 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
182 |
-
tgt = tgt + self.dropout(tgt2)
|
183 |
-
return tgt
|
184 |
-
|
185 |
-
def forward(self, tgt):
|
186 |
-
if self.normalize_before:
|
187 |
-
return self.forward_pre(tgt)
|
188 |
-
return self.forward_post(tgt)
|
189 |
-
|
190 |
-
|
191 |
-
def _get_activation_fn(activation):
|
192 |
-
"""Return an activation function given a string"""
|
193 |
-
if activation == "relu":
|
194 |
-
return F.relu
|
195 |
-
if activation == "gelu":
|
196 |
-
return F.gelu
|
197 |
-
if activation == "glu":
|
198 |
-
return F.glu
|
199 |
-
raise RuntimeError(F"activation should be relu/gelu, not {activation}.")
|
200 |
-
|
201 |
-
|
202 |
-
class MLP(nn.Module):
|
203 |
-
""" Very simple multi-layer perceptron (also called FFN)"""
|
204 |
-
|
205 |
-
def __init__(self, input_dim, hidden_dim, output_dim, num_layers):
|
206 |
-
super().__init__()
|
207 |
-
self.num_layers = num_layers
|
208 |
-
h = [hidden_dim] * (num_layers - 1)
|
209 |
-
self.layers = nn.ModuleList(nn.Linear(n, k) for n, k in zip([input_dim] + h, h + [output_dim]))
|
210 |
-
|
211 |
-
def forward(self, x):
|
212 |
-
for i, layer in enumerate(self.layers):
|
213 |
-
x = F.relu(layer(x)) if i < self.num_layers - 1 else layer(x)
|
214 |
-
return x
|
215 |
-
|
216 |
-
|
217 |
-
class MultiScaleMaskedTransformerDecoder(nn.Module):
|
218 |
-
|
219 |
-
_version = 2
|
220 |
-
|
221 |
-
@configurable
|
222 |
-
def __init__(
|
223 |
-
self,
|
224 |
-
lang_encoder: nn.Module,
|
225 |
-
in_channels,
|
226 |
-
mask_classification=True,
|
227 |
-
*,
|
228 |
-
hidden_dim: int,
|
229 |
-
dim_proj: int,
|
230 |
-
num_queries: int,
|
231 |
-
contxt_len: int,
|
232 |
-
nheads: int,
|
233 |
-
dim_feedforward: int,
|
234 |
-
dec_layers: int,
|
235 |
-
pre_norm: bool,
|
236 |
-
mask_dim: int,
|
237 |
-
task_switch: dict,
|
238 |
-
captioning_step: int,
|
239 |
-
enforce_input_project: bool,
|
240 |
-
):
|
241 |
-
"""
|
242 |
-
NOTE: this interface is experimental.
|
243 |
-
Args:
|
244 |
-
in_channels: channels of the input features
|
245 |
-
mask_classification: whether to add mask classifier or not
|
246 |
-
num_classes: number of classes
|
247 |
-
hidden_dim: Transformer feature dimension
|
248 |
-
num_queries: number of queries
|
249 |
-
nheads: number of heads
|
250 |
-
dim_feedforward: feature dimension in feedforward network
|
251 |
-
enc_layers: number of Transformer encoder layers
|
252 |
-
dec_layers: number of Transformer decoder layers
|
253 |
-
pre_norm: whether to use pre-LayerNorm or not
|
254 |
-
mask_dim: mask feature dimension
|
255 |
-
enforce_input_project: add input project 1x1 conv even if input
|
256 |
-
channels and hidden dim is identical
|
257 |
-
"""
|
258 |
-
super().__init__()
|
259 |
-
assert mask_classification, "Only support mask classification model"
|
260 |
-
self.mask_classification = mask_classification
|
261 |
-
|
262 |
-
# positional encoding
|
263 |
-
N_steps = hidden_dim // 2
|
264 |
-
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
|
265 |
-
|
266 |
-
# define Transformer decoder here
|
267 |
-
self.num_heads = nheads
|
268 |
-
self.num_layers = dec_layers
|
269 |
-
self.contxt_len = contxt_len
|
270 |
-
self.transformer_self_attention_layers = nn.ModuleList()
|
271 |
-
self.transformer_cross_attention_layers = nn.ModuleList()
|
272 |
-
self.transformer_ffn_layers = nn.ModuleList()
|
273 |
-
|
274 |
-
for _ in range(self.num_layers):
|
275 |
-
self.transformer_self_attention_layers.append(
|
276 |
-
SelfAttentionLayer(
|
277 |
-
d_model=hidden_dim,
|
278 |
-
nhead=nheads,
|
279 |
-
dropout=0.0,
|
280 |
-
normalize_before=pre_norm,
|
281 |
-
)
|
282 |
-
)
|
283 |
-
|
284 |
-
self.transformer_cross_attention_layers.append(
|
285 |
-
CrossAttentionLayer(
|
286 |
-
d_model=hidden_dim,
|
287 |
-
nhead=nheads,
|
288 |
-
dropout=0.0,
|
289 |
-
normalize_before=pre_norm,
|
290 |
-
)
|
291 |
-
)
|
292 |
-
|
293 |
-
self.transformer_ffn_layers.append(
|
294 |
-
FFNLayer(
|
295 |
-
d_model=hidden_dim,
|
296 |
-
dim_feedforward=dim_feedforward,
|
297 |
-
dropout=0.0,
|
298 |
-
normalize_before=pre_norm,
|
299 |
-
)
|
300 |
-
)
|
301 |
-
|
302 |
-
self.decoder_norm = nn.LayerNorm(hidden_dim)
|
303 |
-
|
304 |
-
self.num_queries = num_queries
|
305 |
-
# learnable query features
|
306 |
-
self.query_feat = nn.Embedding(num_queries, hidden_dim)
|
307 |
-
# learnable query p.e.
|
308 |
-
self.query_embed = nn.Embedding(num_queries, hidden_dim)
|
309 |
-
|
310 |
-
# level embedding (we always use 3 scales)
|
311 |
-
self.num_feature_levels = 3
|
312 |
-
self.level_embed = nn.Embedding(self.num_feature_levels, hidden_dim)
|
313 |
-
self.input_proj = nn.ModuleList()
|
314 |
-
|
315 |
-
for _ in range(self.num_feature_levels):
|
316 |
-
if in_channels != hidden_dim or enforce_input_project:
|
317 |
-
self.input_proj.append(Conv2d(in_channels, hidden_dim, kernel_size=1))
|
318 |
-
weight_init.c2_xavier_fill(self.input_proj[-1])
|
319 |
-
else:
|
320 |
-
self.input_proj.append(nn.Sequential())
|
321 |
-
|
322 |
-
self.task_switch = task_switch
|
323 |
-
|
324 |
-
# output FFNs
|
325 |
-
self.lang_encoder = lang_encoder
|
326 |
-
if self.task_switch['mask']:
|
327 |
-
self.mask_embed = MLP(hidden_dim, hidden_dim, mask_dim, 3)
|
328 |
-
|
329 |
-
self.class_embed = nn.Parameter(torch.empty(hidden_dim, dim_proj))
|
330 |
-
trunc_normal_(self.class_embed, std=.02)
|
331 |
-
|
332 |
-
if task_switch['bbox']:
|
333 |
-
self.bbox_embed = MLP(hidden_dim, hidden_dim, 4, 3)
|
334 |
-
|
335 |
-
# Caption Project and query
|
336 |
-
if task_switch['captioning']:
|
337 |
-
self.caping_embed = nn.Parameter(torch.empty(hidden_dim, dim_proj))
|
338 |
-
trunc_normal_(self.caping_embed, std=.02)
|
339 |
-
self.query_feat_caping = nn.Embedding(contxt_len, hidden_dim)
|
340 |
-
# self.pos_embed_caping = nn.Embedding(contxt_len, hidden_dim)
|
341 |
-
self.captioning_step = captioning_step
|
342 |
-
|
343 |
-
# register self_attn_mask to avoid information leakage, it includes interaction between object query, class query and caping query
|
344 |
-
self_attn_mask = torch.zeros((1, num_queries + contxt_len, num_queries + contxt_len)).bool()
|
345 |
-
self_attn_mask[:, :num_queries, num_queries:] = True # object+class query does not attend with caption query.
|
346 |
-
self_attn_mask[:, num_queries:, num_queries:] = torch.triu(torch.ones((1, contxt_len, contxt_len)), diagonal=1).bool() # caption query only attend with previous token.
|
347 |
-
self_attn_mask[:, :num_queries-1, num_queries-1:num_queries] = True # object query does not attend with class query.
|
348 |
-
self_attn_mask[:, num_queries-1:num_queries, :num_queries-1] = True # class query does not attend with object query.
|
349 |
-
self.register_buffer("self_attn_mask", self_attn_mask)
|
350 |
-
|
351 |
-
|
352 |
-
@classmethod
|
353 |
-
def from_config(cls, cfg, in_channels, lang_encoder, mask_classification, extra):
|
354 |
-
ret = {}
|
355 |
-
|
356 |
-
ret["lang_encoder"] = lang_encoder
|
357 |
-
ret["in_channels"] = in_channels
|
358 |
-
ret["mask_classification"] = mask_classification
|
359 |
-
|
360 |
-
enc_cfg = cfg['MODEL']['ENCODER']
|
361 |
-
dec_cfg = cfg['MODEL']['DECODER']
|
362 |
-
|
363 |
-
ret["hidden_dim"] = dec_cfg['HIDDEN_DIM']
|
364 |
-
ret["dim_proj"] = cfg['MODEL']['DIM_PROJ']
|
365 |
-
ret["num_queries"] = dec_cfg['NUM_OBJECT_QUERIES']
|
366 |
-
ret["contxt_len"] = cfg['MODEL']['TEXT']['CONTEXT_LENGTH']
|
367 |
-
|
368 |
-
# Transformer parameters:
|
369 |
-
ret["nheads"] = dec_cfg['NHEADS']
|
370 |
-
ret["dim_feedforward"] = dec_cfg['DIM_FEEDFORWARD']
|
371 |
-
|
372 |
-
# NOTE: because we add learnable query features which requires supervision,
|
373 |
-
# we add minus 1 to decoder layers to be consistent with our loss
|
374 |
-
# implementation: that is, number of auxiliary losses is always
|
375 |
-
# equal to number of decoder layers. With learnable query features, the number of
|
376 |
-
# auxiliary losses equals number of decoders plus 1.
|
377 |
-
assert dec_cfg['DEC_LAYERS'] >= 1
|
378 |
-
ret["dec_layers"] = dec_cfg['DEC_LAYERS'] - 1
|
379 |
-
ret["pre_norm"] = dec_cfg['PRE_NORM']
|
380 |
-
ret["enforce_input_project"] = dec_cfg['ENFORCE_INPUT_PROJ']
|
381 |
-
ret["mask_dim"] = enc_cfg['MASK_DIM']
|
382 |
-
|
383 |
-
ret["task_switch"] = extra['task_switch']
|
384 |
-
ret["captioning_step"] = dec_cfg['CAPTIONING'].get('STEP', 50)
|
385 |
-
|
386 |
-
return ret
|
387 |
-
|
388 |
-
def forward(self, x, mask_features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
|
389 |
-
if task == 'captioning_infer':
|
390 |
-
return self.forward_captioning(x, mask_features, mask=mask, target_queries=target_queries, target_vlp=target_vlp, task=task, extra=extra)
|
391 |
-
# x is a list of multi-scale feature
|
392 |
-
assert len(x) == self.num_feature_levels
|
393 |
-
src = []
|
394 |
-
pos = []
|
395 |
-
size_list = []
|
396 |
-
|
397 |
-
# disable mask, it does not affect performance
|
398 |
-
del mask
|
399 |
-
for i in range(self.num_feature_levels):
|
400 |
-
size_list.append(x[i].shape[-2:])
|
401 |
-
pos.append(self.pe_layer(x[i], None).flatten(2))
|
402 |
-
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
|
403 |
-
|
404 |
-
# flatten NxCxHxW to HWxNxC
|
405 |
-
pos[-1] = pos[-1].permute(2, 0, 1)
|
406 |
-
src[-1] = src[-1].permute(2, 0, 1)
|
407 |
-
|
408 |
-
_, bs, _ = src[0].shape
|
409 |
-
|
410 |
-
# QxNxC
|
411 |
-
query_embed = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
|
412 |
-
output = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
|
413 |
-
|
414 |
-
predictions_class = []
|
415 |
-
predictions_mask = []
|
416 |
-
predictions_bbox = []
|
417 |
-
predictions_caption = []
|
418 |
-
predictions_captioning = []
|
419 |
-
|
420 |
-
self_tgt_mask = None
|
421 |
-
if self.training and task == 'vlp' and self.task_switch['captioning']:
|
422 |
-
output = torch.cat((output, self.query_feat_caping.weight.unsqueeze(1).repeat(1, bs, 1)), dim=0) # concat object query, class token and caption token.
|
423 |
-
caping_lang_embed = torch.cat([caption['caption_tokens'] for caption in target_vlp], dim=0).transpose(0, 1) # language output
|
424 |
-
# _caping_lang_embed = caping_lang_embed.detach().clone()
|
425 |
-
# output = torch.cat((output, _caping_lang_embed), dim=0) # concat object query, class token and caption token.
|
426 |
-
# caping_lang_embed += self.pos_embed_caping.weight.unsqueeze(1).repeat(1, bs, 1)
|
427 |
-
query_embed = torch.cat((query_embed, caping_lang_embed), dim=0) # may not add at the beginning.
|
428 |
-
self_tgt_mask = self.self_attn_mask.repeat(output.shape[1]*self.num_heads, 1, 1)
|
429 |
-
elif (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
430 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
431 |
-
self_tgt_mask = self.self_attn_mask[:,:self.num_queries,:self.num_queries].repeat(output.shape[1]*self.num_heads, 1, 1)
|
432 |
-
grounding_tokens = extra['grounding_tokens']
|
433 |
-
_grounding_tokens = grounding_tokens.detach().clone()
|
434 |
-
# initialize with negative attention at the beginning.
|
435 |
-
pad_tgt_mask = torch.ones((1, self.num_queries + (self.num_queries-1) + len(grounding_tokens), self.num_queries + (self.num_queries-1) + len(grounding_tokens)), device=self_tgt_mask.device).bool().repeat(output.shape[1]*self.num_heads, 1, 1)
|
436 |
-
pad_tgt_mask[:,:self.num_queries,:self.num_queries] = self_tgt_mask
|
437 |
-
pad_tgt_mask[:,self.num_queries:,self.num_queries:] = False # grounding tokens could attend with eatch other
|
438 |
-
self_tgt_mask = pad_tgt_mask
|
439 |
-
output = torch.cat((output, output[:-1]), dim=0)
|
440 |
-
query_embed = torch.cat((query_embed, query_embed[:-1]), dim=0) # also pad language embdding to fix embedding
|
441 |
-
else:
|
442 |
-
self_tgt_mask = self.self_attn_mask[:,:self.num_queries,:self.num_queries].repeat(output.shape[1]*self.num_heads, 1, 1)
|
443 |
-
|
444 |
-
# prediction heads on learnable query features
|
445 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0], task=task)
|
446 |
-
attn_mask = results["attn_mask"]
|
447 |
-
predictions_class.append(results["outputs_class"])
|
448 |
-
predictions_mask.append(results["outputs_mask"])
|
449 |
-
predictions_bbox.append(results["outputs_bbox"])
|
450 |
-
predictions_caption.append(results["outputs_caption"])
|
451 |
-
predictions_captioning.append(results["outputs_captionting"])
|
452 |
-
|
453 |
-
for i in range(self.num_layers):
|
454 |
-
level_index = i % self.num_feature_levels
|
455 |
-
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
|
456 |
-
|
457 |
-
if self.training and task == 'vlp' and self.task_switch['captioning']:
|
458 |
-
attn_mask = torch.cat((attn_mask, torch.zeros_like(attn_mask[:, :self.contxt_len, :])), dim=1)
|
459 |
-
# attention: cross-attention first
|
460 |
-
output, avg_attn = self.transformer_cross_attention_layers[i](
|
461 |
-
output, src[level_index],
|
462 |
-
memory_mask=attn_mask,
|
463 |
-
memory_key_padding_mask=None, # here we do not apply masking on padded region
|
464 |
-
pos=pos[level_index], query_pos=query_embed
|
465 |
-
)
|
466 |
-
|
467 |
-
if (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
468 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
469 |
-
output = torch.cat((output, _grounding_tokens), dim=0)
|
470 |
-
query_embed = torch.cat((query_embed, grounding_tokens), dim=0)
|
471 |
-
|
472 |
-
output = self.transformer_self_attention_layers[i](
|
473 |
-
output, tgt_mask=self_tgt_mask,
|
474 |
-
tgt_key_padding_mask=None,
|
475 |
-
query_pos=query_embed
|
476 |
-
)
|
477 |
-
|
478 |
-
# FFN
|
479 |
-
output = self.transformer_ffn_layers[i](
|
480 |
-
output
|
481 |
-
)
|
482 |
-
|
483 |
-
if ((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding'] \
|
484 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
485 |
-
_grounding_tokens = output[-len(_grounding_tokens):]
|
486 |
-
output = output[:-len(_grounding_tokens)]
|
487 |
-
query_embed = query_embed[:-len(_grounding_tokens)]
|
488 |
-
|
489 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], layer_id=i, task=task)
|
490 |
-
attn_mask = results["attn_mask"]
|
491 |
-
predictions_class.append(results["outputs_class"])
|
492 |
-
predictions_mask.append(results["outputs_mask"])
|
493 |
-
predictions_bbox.append(results["outputs_bbox"])
|
494 |
-
predictions_caption.append(results["outputs_caption"])
|
495 |
-
predictions_captioning.append(results["outputs_captionting"])
|
496 |
-
|
497 |
-
assert len(predictions_class) == self.num_layers + 1
|
498 |
-
if task == 'vlp':
|
499 |
-
out = {'pred_captionings': predictions_captioning[-1],
|
500 |
-
'pred_captions': predictions_caption[-1],
|
501 |
-
'aux_outputs': [{'pred_captionings': x, 'pred_captions': y } for x, y in zip(predictions_captioning[:-1], predictions_caption[:-1])]}
|
502 |
-
return out
|
503 |
-
else:
|
504 |
-
out = {
|
505 |
-
'pred_logits': predictions_class[-1],
|
506 |
-
'pred_masks': predictions_mask[-1],
|
507 |
-
'pred_boxes': predictions_bbox[-1],
|
508 |
-
'pred_captions': predictions_caption[-1],
|
509 |
-
'aux_outputs': self._set_aux_loss(
|
510 |
-
predictions_class if self.mask_classification else None, predictions_mask, predictions_bbox, predictions_caption
|
511 |
-
)
|
512 |
-
}
|
513 |
-
return out
|
514 |
-
|
515 |
-
def forward_captioning(self, x, mask_features, mask = None, target_queries = None, target_vlp = None, task='seg', extra={}):
|
516 |
-
# x is a list of multi-scale feature
|
517 |
-
assert len(x) == self.num_feature_levels
|
518 |
-
src = []
|
519 |
-
pos = []
|
520 |
-
size_list = []
|
521 |
-
|
522 |
-
# disable mask, it does not affect performance
|
523 |
-
del mask
|
524 |
-
for i in range(self.num_feature_levels):
|
525 |
-
size_list.append(x[i].shape[-2:])
|
526 |
-
pos.append(self.pe_layer(x[i], None).flatten(2))
|
527 |
-
src.append(self.input_proj[i](x[i]).flatten(2) + self.level_embed.weight[i][None, :, None])
|
528 |
-
|
529 |
-
# flatten NxCxHxW to HWxNxC
|
530 |
-
pos[-1] = pos[-1].permute(2, 0, 1)
|
531 |
-
src[-1] = src[-1].permute(2, 0, 1)
|
532 |
-
|
533 |
-
_, bs, _ = src[0].shape
|
534 |
-
|
535 |
-
# QxNxC
|
536 |
-
query_embed_ = self.query_embed.weight.unsqueeze(1).repeat(1, bs, 1)
|
537 |
-
query_feat = self.query_feat.weight.unsqueeze(1).repeat(1, bs, 1)
|
538 |
-
caping_lang_token = extra['start_token'].repeat(bs, 1)
|
539 |
-
start_id = 0
|
540 |
-
if 'token' in extra:
|
541 |
-
caping_lang_token[:,:len(extra['token'][0])] = extra['token']
|
542 |
-
start_id = len(extra['token'][0])-1
|
543 |
-
query_feat_caping = self.query_feat_caping.weight.unsqueeze(1).repeat(1, bs, 1)
|
544 |
-
# pos_embed_caping = self.pos_embed_caping.weight.unsqueeze(1).repeat(1, bs, 1)
|
545 |
-
# prepare token embedding for evaluation
|
546 |
-
token_embs = self.lang_encoder.lang_encoder.token_embedding.weight
|
547 |
-
# token_embs = (token_embs / token_embs.norm(dim=-1, keepdim=True) + 1e-7)
|
548 |
-
|
549 |
-
for cap_idx in range(start_id, self.captioning_step):
|
550 |
-
caping_lang_embed = self.lang_encoder.forward_language_token((caping_lang_token,))[0].transpose(0, 1)
|
551 |
-
# output = torch.cat((query_feat, caping_lang_embed), dim=0) # concat object query, class token and caption token.
|
552 |
-
# caping_lang_embed += pos_embed_caping
|
553 |
-
query_embed = torch.cat((query_embed_, caping_lang_embed), dim=0) # may not add at the beginning.
|
554 |
-
output = torch.cat((query_feat, query_feat_caping), dim=0) # concat object query, class token and caption token.
|
555 |
-
|
556 |
-
# prediction heads on learnable query features
|
557 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[0], task=task)
|
558 |
-
attn_mask = results["attn_mask"]
|
559 |
-
|
560 |
-
for i in range(self.num_layers):
|
561 |
-
level_index = i % self.num_feature_levels
|
562 |
-
attn_mask[torch.where(attn_mask.sum(-1) == attn_mask.shape[-1])] = False
|
563 |
-
attn_mask = torch.cat((attn_mask, torch.zeros_like(attn_mask[:, :self.contxt_len, :])), dim=1)
|
564 |
-
self_tgt_mask = self.self_attn_mask.repeat(output.shape[1]*self.num_heads, 1, 1)
|
565 |
-
|
566 |
-
if extra['captioning_mask'] is not None:
|
567 |
-
bs,nq,wh = attn_mask.shape
|
568 |
-
assert bs==self.num_heads, "Only support single image referring captioning."
|
569 |
-
cap_mask = extra['captioning_mask']
|
570 |
-
attn_mask = attn_mask.reshape(bs,nq,size_list[i%3][0],size_list[i%3][1])
|
571 |
-
cap_mask = F.interpolate(cap_mask[None,].float(), size_list[i%3], mode='nearest').bool()[0,0]
|
572 |
-
attn_mask[:,self.num_queries:, cap_mask] = True
|
573 |
-
attn_mask = attn_mask.reshape(bs,nq,wh)
|
574 |
-
|
575 |
-
# attention: cross-attention first
|
576 |
-
output, avg_attn = self.transformer_cross_attention_layers[i](
|
577 |
-
output, src[level_index],
|
578 |
-
memory_mask=attn_mask,
|
579 |
-
memory_key_padding_mask=None, # here we do not apply masking on padded region
|
580 |
-
pos=pos[level_index], query_pos=query_embed
|
581 |
-
)
|
582 |
-
|
583 |
-
output = self.transformer_self_attention_layers[i](
|
584 |
-
output, tgt_mask=self_tgt_mask,
|
585 |
-
tgt_key_padding_mask=None,
|
586 |
-
query_pos=query_embed
|
587 |
-
)
|
588 |
-
|
589 |
-
# FFN
|
590 |
-
output = self.transformer_ffn_layers[i](
|
591 |
-
output
|
592 |
-
)
|
593 |
-
|
594 |
-
results = self.forward_prediction_heads(output, mask_features, attn_mask_target_size=size_list[(i + 1) % self.num_feature_levels], layer_id=i, task=task)
|
595 |
-
attn_mask = results["attn_mask"]
|
596 |
-
|
597 |
-
pred_captions_gen = results['outputs_captionting']
|
598 |
-
# pred_captions_gen = (pred_captions_gen / pred_captions_gen.norm(dim=-1, keepdim=True) + 1e-7)
|
599 |
-
pred_captions_gen = pred_captions_gen @ token_embs.t()
|
600 |
-
caping_lang_token[:,cap_idx+1] = pred_captions_gen[:,cap_idx].max(-1)[1]
|
601 |
-
|
602 |
-
texts = self.lang_encoder.tokenizer.batch_decode(caping_lang_token, skip_special_tokens=False)
|
603 |
-
texts_new = []
|
604 |
-
|
605 |
-
for x in texts:
|
606 |
-
x = x.split('<|endoftext|>')[0]
|
607 |
-
x = x.replace('<|endoftext|>','')
|
608 |
-
x = x.replace('<|startoftext|>','')
|
609 |
-
x = x.strip()
|
610 |
-
texts_new.append(x)
|
611 |
-
|
612 |
-
out = {'pred_captionings': caping_lang_token,
|
613 |
-
'pred_texts': texts_new}
|
614 |
-
return out
|
615 |
-
|
616 |
-
|
617 |
-
def forward_prediction_heads(self, output, mask_features, attn_mask_target_size, layer_id=-1, task='seg'):
|
618 |
-
decoder_output = self.decoder_norm(output)
|
619 |
-
decoder_output = decoder_output.transpose(0, 1)
|
620 |
-
|
621 |
-
# extract image captioning token from decoder output.
|
622 |
-
if self.task_switch['captioning'] and (task == 'vlp' or task == 'captioning_infer'):
|
623 |
-
outputs_captionting = decoder_output[:,self.num_queries:] @ self.caping_embed
|
624 |
-
else:
|
625 |
-
outputs_captionting = None
|
626 |
-
|
627 |
-
# recompute class token output.
|
628 |
-
norm_decoder_output = decoder_output / (decoder_output.norm(dim=-1, keepdim=True) + 1e-7)
|
629 |
-
obj_token = norm_decoder_output[:,:self.num_queries-1]
|
630 |
-
cls_token = norm_decoder_output[:,self.num_queries-1:self.num_queries]
|
631 |
-
|
632 |
-
sim = (cls_token @ obj_token.transpose(1,2)).softmax(-1)[:,0,:,None] # TODO include class token.
|
633 |
-
cls_token = (sim * decoder_output[:,:self.num_queries-1]).sum(dim=1, keepdim=True)
|
634 |
-
|
635 |
-
if (((self.training and task == 'seg') or (task == 'grounding_eval')) and self.task_switch['grounding']) \
|
636 |
-
or ((self.training and task == 'openimage') and self.task_switch['openimage']['grounding']):
|
637 |
-
decoder_output = torch.cat((decoder_output[:,:self.num_queries-1], cls_token, decoder_output[:,self.num_queries:2*self.num_queries-1]), dim=1)
|
638 |
-
else:
|
639 |
-
decoder_output = torch.cat((decoder_output[:,:self.num_queries-1], cls_token), dim=1)
|
640 |
-
|
641 |
-
# compute class, mask and bbox.
|
642 |
-
class_embed = decoder_output @ self.class_embed
|
643 |
-
# HACK do not compute similarity if mask is not on
|
644 |
-
outputs_class = self.lang_encoder.compute_similarity(class_embed, fake=(((not self.task_switch['mask']) and self.training) or (task == 'openimage')))
|
645 |
-
|
646 |
-
if self.task_switch['mask'] or self.task_switch['openimage']['mask']:
|
647 |
-
mask_embed = self.mask_embed(decoder_output)
|
648 |
-
outputs_mask = torch.einsum("bqc,bchw->bqhw", mask_embed, mask_features)
|
649 |
-
|
650 |
-
# NOTE: prediction is of higher-resolution
|
651 |
-
# [B, Q, H, W] -> [B, Q, H*W] -> [B, h, Q, H*W] -> [B*h, Q, HW]
|
652 |
-
attn_mask = F.interpolate(outputs_mask, size=attn_mask_target_size, mode="bilinear", align_corners=False)
|
653 |
-
|
654 |
-
# must use bool type
|
655 |
-
# If a BoolTensor is provided, positions with ``True`` are not allowed to attend while ``False`` values will be unchanged.
|
656 |
-
attn_mask = (attn_mask.sigmoid().flatten(2).unsqueeze(1).repeat(1, self.num_heads, 1, 1).flatten(0, 1) < 0.5).bool()
|
657 |
-
attn_mask = attn_mask.detach()
|
658 |
-
|
659 |
-
# NOTE: fill False for cls token (JY)
|
660 |
-
attn_mask[:, self.num_queries:self.num_queries+1].fill_(False)
|
661 |
-
else:
|
662 |
-
outputs_mask = None
|
663 |
-
attn_mask = torch.zeros((list(decoder_output.shape[:2]) + [attn_mask_target_size[0]*attn_mask_target_size[1]]), device=decoder_output.device).repeat(self.num_heads, 1, 1).bool()
|
664 |
-
|
665 |
-
outputs_bbox = [None for i in range(len(decoder_output))]
|
666 |
-
if self.task_switch['bbox']:
|
667 |
-
outputs_bbox = self.bbox_embed(decoder_output)
|
668 |
-
|
669 |
-
outputs_caption = None
|
670 |
-
if self.task_switch['caption']:
|
671 |
-
outputs_caption = class_embed
|
672 |
-
|
673 |
-
|
674 |
-
results = {
|
675 |
-
"outputs_class": outputs_class,
|
676 |
-
"outputs_mask": outputs_mask,
|
677 |
-
"outputs_bbox": outputs_bbox,
|
678 |
-
"attn_mask": attn_mask,
|
679 |
-
"outputs_caption": outputs_caption,
|
680 |
-
"outputs_captionting": outputs_captionting,
|
681 |
-
}
|
682 |
-
return results
|
683 |
-
|
684 |
-
@torch.jit.unused
|
685 |
-
def _set_aux_loss(self, outputs_class, outputs_seg_masks, outputs_boxes, outputs_captions):
|
686 |
-
# this is a workaround to make torchscript happy, as torchscript
|
687 |
-
# doesn't support dictionary with non-homogeneous values, such
|
688 |
-
# as a dict having both a Tensor and a list.
|
689 |
-
if self.mask_classification:
|
690 |
-
return [
|
691 |
-
{"pred_logits": a, "pred_masks": b, "pred_boxes": c, "pred_captions": d}
|
692 |
-
for a, b, c, d in zip(outputs_class[:-1], outputs_seg_masks[:-1], outputs_boxes[:-1], outputs_captions[:-1])
|
693 |
-
]
|
694 |
-
else:
|
695 |
-
return [{"pred_masks": b} for b in outputs_seg_masks[:-1]]
|
696 |
-
|
697 |
-
|
698 |
-
@register_decoder
|
699 |
-
def get_masked_transformer_decoder(cfg, in_channels, lang_encoder, mask_classification, extra):
|
700 |
-
return MultiScaleMaskedTransformerDecoder(cfg, in_channels, lang_encoder, mask_classification, extra)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/encoder/__init__.py
DELETED
@@ -1 +0,0 @@
|
|
1 |
-
from .build import build_encoder
|
|
|
|
xdecoder/body/encoder/build.py
DELETED
@@ -1,12 +0,0 @@
|
|
1 |
-
from .registry import model_entrypoints
|
2 |
-
from .registry import is_model
|
3 |
-
|
4 |
-
from .transformer_encoder_fpn import *
|
5 |
-
|
6 |
-
def build_encoder(config, *args, **kwargs):
|
7 |
-
model_name = config['MODEL']['ENCODER']['NAME']
|
8 |
-
|
9 |
-
if not is_model(model_name):
|
10 |
-
raise ValueError(f'Unkown model: {model_name}')
|
11 |
-
|
12 |
-
return model_entrypoints(model_name)(config, *args, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/encoder/registry.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_model_entrypoints = {}
|
2 |
-
|
3 |
-
def register_encoder(fn):
|
4 |
-
module_name_split = fn.__module__.split('.')
|
5 |
-
model_name = module_name_split[-1]
|
6 |
-
_model_entrypoints[model_name] = fn
|
7 |
-
return fn
|
8 |
-
|
9 |
-
def model_entrypoints(model_name):
|
10 |
-
return _model_entrypoints[model_name]
|
11 |
-
|
12 |
-
def is_model(model_name):
|
13 |
-
return model_name in _model_entrypoints
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/encoder/transformer_encoder_fpn.py
DELETED
@@ -1,324 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import logging
|
3 |
-
import numpy as np
|
4 |
-
from typing import Callable, Dict, List, Optional, Tuple, Union
|
5 |
-
|
6 |
-
import torch
|
7 |
-
from torch import nn
|
8 |
-
from torch.nn import functional as F
|
9 |
-
from torch.nn.init import xavier_uniform_, constant_, uniform_, normal_
|
10 |
-
from torch.cuda.amp import autocast
|
11 |
-
|
12 |
-
import fvcore.nn.weight_init as weight_init
|
13 |
-
from detectron2.layers import Conv2d, DeformConv, ShapeSpec, get_norm
|
14 |
-
|
15 |
-
from .registry import register_encoder
|
16 |
-
from ..transformer_blocks import TransformerEncoder, TransformerEncoderLayer, _get_clones, _get_activation_fn
|
17 |
-
from ...modules import PositionEmbeddingSine
|
18 |
-
from ...utils import configurable
|
19 |
-
|
20 |
-
# from ..layers import Conv2d, DeformConv, ShapeSpec, get_norm
|
21 |
-
|
22 |
-
# This is a modified FPN decoder.
|
23 |
-
class BasePixelDecoder(nn.Module):
|
24 |
-
def __init__(
|
25 |
-
self,
|
26 |
-
input_shape: Dict[str, ShapeSpec],
|
27 |
-
*,
|
28 |
-
conv_dim: int,
|
29 |
-
mask_dim: int,
|
30 |
-
mask_on: bool,
|
31 |
-
norm: Optional[Union[str, Callable]] = None,
|
32 |
-
):
|
33 |
-
"""
|
34 |
-
NOTE: this interface is experimental.
|
35 |
-
Args:
|
36 |
-
input_shape: shapes (channels and stride) of the input features
|
37 |
-
conv_dims: number of output channels for the intermediate conv layers.
|
38 |
-
mask_dim: number of output channels for the final conv layer.
|
39 |
-
norm (str or callable): normalization for all conv layers
|
40 |
-
"""
|
41 |
-
super().__init__()
|
42 |
-
|
43 |
-
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
|
44 |
-
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
|
45 |
-
feature_channels = [v.channels for k, v in input_shape]
|
46 |
-
|
47 |
-
lateral_convs = []
|
48 |
-
output_convs = []
|
49 |
-
|
50 |
-
use_bias = norm == ""
|
51 |
-
for idx, in_channels in enumerate(feature_channels):
|
52 |
-
if idx == len(self.in_features) - 1:
|
53 |
-
output_norm = get_norm(norm, conv_dim)
|
54 |
-
output_conv = Conv2d(
|
55 |
-
in_channels,
|
56 |
-
conv_dim,
|
57 |
-
kernel_size=3,
|
58 |
-
stride=1,
|
59 |
-
padding=1,
|
60 |
-
bias=use_bias,
|
61 |
-
norm=output_norm,
|
62 |
-
activation=F.relu,
|
63 |
-
)
|
64 |
-
weight_init.c2_xavier_fill(output_conv)
|
65 |
-
self.add_module("layer_{}".format(idx + 1), output_conv)
|
66 |
-
|
67 |
-
lateral_convs.append(None)
|
68 |
-
output_convs.append(output_conv)
|
69 |
-
else:
|
70 |
-
lateral_norm = get_norm(norm, conv_dim)
|
71 |
-
output_norm = get_norm(norm, conv_dim)
|
72 |
-
|
73 |
-
lateral_conv = Conv2d(
|
74 |
-
in_channels, conv_dim, kernel_size=1, bias=use_bias, norm=lateral_norm
|
75 |
-
)
|
76 |
-
output_conv = Conv2d(
|
77 |
-
conv_dim,
|
78 |
-
conv_dim,
|
79 |
-
kernel_size=3,
|
80 |
-
stride=1,
|
81 |
-
padding=1,
|
82 |
-
bias=use_bias,
|
83 |
-
norm=output_norm,
|
84 |
-
activation=F.relu,
|
85 |
-
)
|
86 |
-
weight_init.c2_xavier_fill(lateral_conv)
|
87 |
-
weight_init.c2_xavier_fill(output_conv)
|
88 |
-
self.add_module("adapter_{}".format(idx + 1), lateral_conv)
|
89 |
-
self.add_module("layer_{}".format(idx + 1), output_conv)
|
90 |
-
|
91 |
-
lateral_convs.append(lateral_conv)
|
92 |
-
output_convs.append(output_conv)
|
93 |
-
# Place convs into top-down order (from low to high resolution)
|
94 |
-
# to make the top-down computation in forward clearer.
|
95 |
-
self.lateral_convs = lateral_convs[::-1]
|
96 |
-
self.output_convs = output_convs[::-1]
|
97 |
-
|
98 |
-
self.mask_on = mask_on
|
99 |
-
if self.mask_on:
|
100 |
-
self.mask_dim = mask_dim
|
101 |
-
self.mask_features = Conv2d(
|
102 |
-
conv_dim,
|
103 |
-
mask_dim,
|
104 |
-
kernel_size=3,
|
105 |
-
stride=1,
|
106 |
-
padding=1,
|
107 |
-
)
|
108 |
-
weight_init.c2_xavier_fill(self.mask_features)
|
109 |
-
|
110 |
-
self.maskformer_num_feature_levels = 3 # always use 3 scales
|
111 |
-
|
112 |
-
@classmethod
|
113 |
-
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
|
114 |
-
enc_cfg = cfg['MODEL']['ENCODER']
|
115 |
-
ret = {}
|
116 |
-
ret["input_shape"] = {
|
117 |
-
k: v for k, v in input_shape.items() if k in enc_cfg['IN_FEATURES']
|
118 |
-
}
|
119 |
-
ret["conv_dim"] = enc_cfg['CONVS_DIM']
|
120 |
-
ret["mask_dim"] = enc_cfg['MASK_DIM']
|
121 |
-
ret["norm"] = enc_cfg['NORM']
|
122 |
-
return ret
|
123 |
-
|
124 |
-
def forward_features(self, features):
|
125 |
-
multi_scale_features = []
|
126 |
-
num_cur_levels = 0
|
127 |
-
# Reverse feature maps into top-down order (from low to high resolution)
|
128 |
-
for idx, f in enumerate(self.in_features[::-1]):
|
129 |
-
x = features[f]
|
130 |
-
lateral_conv = self.lateral_convs[idx]
|
131 |
-
output_conv = self.output_convs[idx]
|
132 |
-
if lateral_conv is None:
|
133 |
-
y = output_conv(x)
|
134 |
-
else:
|
135 |
-
cur_fpn = lateral_conv(x)
|
136 |
-
# Following FPN implementation, we use nearest upsampling here
|
137 |
-
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
|
138 |
-
y = output_conv(y)
|
139 |
-
if num_cur_levels < self.maskformer_num_feature_levels:
|
140 |
-
multi_scale_features.append(y)
|
141 |
-
num_cur_levels += 1
|
142 |
-
|
143 |
-
mask_features = self.mask_features(y) if self.mask_on else None
|
144 |
-
return mask_features, None, multi_scale_features
|
145 |
-
|
146 |
-
def forward(self, features, targets=None):
|
147 |
-
logger = logging.getLogger(__name__)
|
148 |
-
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
|
149 |
-
return self.forward_features(features)
|
150 |
-
|
151 |
-
|
152 |
-
class TransformerEncoderOnly(nn.Module):
|
153 |
-
def __init__(
|
154 |
-
self,
|
155 |
-
d_model=512,
|
156 |
-
nhead=8,
|
157 |
-
num_encoder_layers=6,
|
158 |
-
dim_feedforward=2048,
|
159 |
-
dropout=0.1,
|
160 |
-
activation="relu",
|
161 |
-
normalize_before=False,
|
162 |
-
):
|
163 |
-
super().__init__()
|
164 |
-
|
165 |
-
encoder_layer = TransformerEncoderLayer(
|
166 |
-
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
|
167 |
-
)
|
168 |
-
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
|
169 |
-
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
170 |
-
|
171 |
-
self._reset_parameters()
|
172 |
-
|
173 |
-
self.d_model = d_model
|
174 |
-
self.nhead = nhead
|
175 |
-
|
176 |
-
def _reset_parameters(self):
|
177 |
-
for p in self.parameters():
|
178 |
-
if p.dim() > 1:
|
179 |
-
nn.init.xavier_uniform_(p)
|
180 |
-
|
181 |
-
def forward(self, src, mask, pos_embed):
|
182 |
-
# flatten NxCxHxW to HWxNxC
|
183 |
-
bs, c, h, w = src.shape
|
184 |
-
src = src.flatten(2).permute(2, 0, 1)
|
185 |
-
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
|
186 |
-
if mask is not None:
|
187 |
-
mask = mask.flatten(1)
|
188 |
-
|
189 |
-
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
|
190 |
-
return memory.permute(1, 2, 0).view(bs, c, h, w)
|
191 |
-
|
192 |
-
|
193 |
-
# This is a modified FPN decoder with extra Transformer encoder that processes the lowest-resolution feature map.
|
194 |
-
class TransformerEncoderPixelDecoder(BasePixelDecoder):
|
195 |
-
@configurable
|
196 |
-
def __init__(
|
197 |
-
self,
|
198 |
-
input_shape: Dict[str, ShapeSpec],
|
199 |
-
*,
|
200 |
-
transformer_dropout: float,
|
201 |
-
transformer_nheads: int,
|
202 |
-
transformer_dim_feedforward: int,
|
203 |
-
transformer_enc_layers: int,
|
204 |
-
transformer_pre_norm: bool,
|
205 |
-
conv_dim: int,
|
206 |
-
mask_dim: int,
|
207 |
-
mask_on: int,
|
208 |
-
norm: Optional[Union[str, Callable]] = None,
|
209 |
-
):
|
210 |
-
"""
|
211 |
-
NOTE: this interface is experimental.
|
212 |
-
Args:
|
213 |
-
input_shape: shapes (channels and stride) of the input features
|
214 |
-
transformer_dropout: dropout probability in transformer
|
215 |
-
transformer_nheads: number of heads in transformer
|
216 |
-
transformer_dim_feedforward: dimension of feedforward network
|
217 |
-
transformer_enc_layers: number of transformer encoder layers
|
218 |
-
transformer_pre_norm: whether to use pre-layernorm or not
|
219 |
-
conv_dims: number of output channels for the intermediate conv layers.
|
220 |
-
mask_dim: number of output channels for the final conv layer.
|
221 |
-
norm (str or callable): normalization for all conv layers
|
222 |
-
"""
|
223 |
-
super().__init__(input_shape, conv_dim=conv_dim, mask_dim=mask_dim, norm=norm, mask_on=mask_on)
|
224 |
-
|
225 |
-
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
|
226 |
-
self.in_features = [k for k, v in input_shape] # starting from "res2" to "res5"
|
227 |
-
feature_strides = [v.stride for k, v in input_shape]
|
228 |
-
feature_channels = [v.channels for k, v in input_shape]
|
229 |
-
|
230 |
-
in_channels = feature_channels[len(self.in_features) - 1]
|
231 |
-
self.input_proj = Conv2d(in_channels, conv_dim, kernel_size=1)
|
232 |
-
weight_init.c2_xavier_fill(self.input_proj)
|
233 |
-
self.transformer = TransformerEncoderOnly(
|
234 |
-
d_model=conv_dim,
|
235 |
-
dropout=transformer_dropout,
|
236 |
-
nhead=transformer_nheads,
|
237 |
-
dim_feedforward=transformer_dim_feedforward,
|
238 |
-
num_encoder_layers=transformer_enc_layers,
|
239 |
-
normalize_before=transformer_pre_norm,
|
240 |
-
)
|
241 |
-
N_steps = conv_dim // 2
|
242 |
-
self.pe_layer = PositionEmbeddingSine(N_steps, normalize=True)
|
243 |
-
|
244 |
-
# update layer
|
245 |
-
use_bias = norm == ""
|
246 |
-
output_norm = get_norm(norm, conv_dim)
|
247 |
-
output_conv = Conv2d(
|
248 |
-
conv_dim,
|
249 |
-
conv_dim,
|
250 |
-
kernel_size=3,
|
251 |
-
stride=1,
|
252 |
-
padding=1,
|
253 |
-
bias=use_bias,
|
254 |
-
norm=output_norm,
|
255 |
-
activation=F.relu,
|
256 |
-
)
|
257 |
-
weight_init.c2_xavier_fill(output_conv)
|
258 |
-
delattr(self, "layer_{}".format(len(self.in_features)))
|
259 |
-
self.add_module("layer_{}".format(len(self.in_features)), output_conv)
|
260 |
-
self.output_convs[0] = output_conv
|
261 |
-
|
262 |
-
@classmethod
|
263 |
-
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
|
264 |
-
enc_cfg = cfg['MODEL']['ENCODER']
|
265 |
-
dec_cfg = cfg['MODEL']['DECODER']
|
266 |
-
|
267 |
-
ret = super().from_config(cfg, input_shape)
|
268 |
-
ret["transformer_dropout"] = dec_cfg['DROPOUT']
|
269 |
-
ret["transformer_nheads"] = dec_cfg['NHEADS']
|
270 |
-
ret["transformer_dim_feedforward"] = dec_cfg['DIM_FEEDFORWARD']
|
271 |
-
ret["transformer_enc_layers"] = enc_cfg['TRANSFORMER_ENC_LAYERS'] # a separate config
|
272 |
-
ret["transformer_pre_norm"] = dec_cfg['PRE_NORM']
|
273 |
-
|
274 |
-
ret['mask_on'] = cfg['MODEL']['DECODER']['MASK']
|
275 |
-
return ret
|
276 |
-
|
277 |
-
def forward_features(self, features):
|
278 |
-
multi_scale_features = []
|
279 |
-
num_cur_levels = 0
|
280 |
-
|
281 |
-
# Reverse feature maps into top-down order (from low to high resolution)
|
282 |
-
for idx, f in enumerate(self.in_features[::-1]):
|
283 |
-
x = features[f]
|
284 |
-
lateral_conv = self.lateral_convs[idx]
|
285 |
-
output_conv = self.output_convs[idx]
|
286 |
-
if lateral_conv is None:
|
287 |
-
transformer = self.input_proj(x)
|
288 |
-
pos = self.pe_layer(x)
|
289 |
-
transformer = self.transformer(transformer, None, pos)
|
290 |
-
y = output_conv(transformer)
|
291 |
-
# save intermediate feature as input to Transformer decoder
|
292 |
-
transformer_encoder_features = transformer
|
293 |
-
else:
|
294 |
-
cur_fpn = lateral_conv(x)
|
295 |
-
# Following FPN implementation, we use nearest upsampling here
|
296 |
-
y = cur_fpn + F.interpolate(y, size=cur_fpn.shape[-2:], mode="nearest")
|
297 |
-
y = output_conv(y)
|
298 |
-
if num_cur_levels < self.maskformer_num_feature_levels:
|
299 |
-
multi_scale_features.append(y)
|
300 |
-
num_cur_levels += 1
|
301 |
-
|
302 |
-
mask_features = self.mask_features(y) if self.mask_on else None
|
303 |
-
return mask_features, transformer_encoder_features, multi_scale_features
|
304 |
-
|
305 |
-
def forward(self, features, targets=None):
|
306 |
-
logger = logging.getLogger(__name__)
|
307 |
-
logger.warning("Calling forward() may cause unpredicted behavior of PixelDecoder module.")
|
308 |
-
return self.forward_features(features)
|
309 |
-
|
310 |
-
|
311 |
-
|
312 |
-
@register_encoder
|
313 |
-
def get_transformer_encoder_fpn(cfg, input_shape):
|
314 |
-
"""
|
315 |
-
Build a pixel decoder from `cfg.MODEL.MASK_FORMER.PIXEL_DECODER_NAME`.
|
316 |
-
"""
|
317 |
-
model = TransformerEncoderPixelDecoder(cfg, input_shape)
|
318 |
-
forward_features = getattr(model, "forward_features", None)
|
319 |
-
if not callable(forward_features):
|
320 |
-
raise ValueError(
|
321 |
-
"Only SEM_SEG_HEADS with forward_features method can be used as pixel decoder. "
|
322 |
-
f"Please implement forward_features for {name} to only return mask features."
|
323 |
-
)
|
324 |
-
return model
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/registry.py
DELETED
@@ -1,14 +0,0 @@
|
|
1 |
-
_model_entrypoints = {}
|
2 |
-
|
3 |
-
|
4 |
-
def register_body(fn):
|
5 |
-
module_name_split = fn.__module__.split('.')
|
6 |
-
model_name = module_name_split[-1]
|
7 |
-
_model_entrypoints[model_name] = fn
|
8 |
-
return fn
|
9 |
-
|
10 |
-
def model_entrypoints(model_name):
|
11 |
-
return _model_entrypoints[model_name]
|
12 |
-
|
13 |
-
def is_model(model_name):
|
14 |
-
return model_name in _model_entrypoints
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/transformer_blocks.py
DELETED
@@ -1,370 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/transformer.py
|
3 |
-
"""
|
4 |
-
Transformer class.
|
5 |
-
|
6 |
-
Copy-paste from torch.nn.Transformer with modifications:
|
7 |
-
* positional encodings are passed in MHattention
|
8 |
-
* extra LN at the end of encoder is removed
|
9 |
-
* decoder returns a stack of activations from all decoding layers
|
10 |
-
"""
|
11 |
-
import copy
|
12 |
-
from typing import List, Optional
|
13 |
-
|
14 |
-
import torch
|
15 |
-
import torch.nn.functional as F
|
16 |
-
from torch import Tensor, nn
|
17 |
-
|
18 |
-
|
19 |
-
class Transformer(nn.Module):
|
20 |
-
def __init__(
|
21 |
-
self,
|
22 |
-
d_model=512,
|
23 |
-
nhead=8,
|
24 |
-
num_encoder_layers=6,
|
25 |
-
num_decoder_layers=6,
|
26 |
-
dim_feedforward=2048,
|
27 |
-
dropout=0.1,
|
28 |
-
activation="relu",
|
29 |
-
normalize_before=False,
|
30 |
-
return_intermediate_dec=False,
|
31 |
-
):
|
32 |
-
super().__init__()
|
33 |
-
|
34 |
-
encoder_layer = TransformerEncoderLayer(
|
35 |
-
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
|
36 |
-
)
|
37 |
-
encoder_norm = nn.LayerNorm(d_model) if normalize_before else None
|
38 |
-
self.encoder = TransformerEncoder(encoder_layer, num_encoder_layers, encoder_norm)
|
39 |
-
|
40 |
-
decoder_layer = TransformerDecoderLayer(
|
41 |
-
d_model, nhead, dim_feedforward, dropout, activation, normalize_before
|
42 |
-
)
|
43 |
-
decoder_norm = nn.LayerNorm(d_model)
|
44 |
-
self.decoder = TransformerDecoder(
|
45 |
-
decoder_layer,
|
46 |
-
num_decoder_layers,
|
47 |
-
decoder_norm,
|
48 |
-
return_intermediate=return_intermediate_dec,
|
49 |
-
)
|
50 |
-
|
51 |
-
self._reset_parameters()
|
52 |
-
|
53 |
-
self.d_model = d_model
|
54 |
-
self.nhead = nhead
|
55 |
-
|
56 |
-
def _reset_parameters(self):
|
57 |
-
for p in self.parameters():
|
58 |
-
if p.dim() > 1:
|
59 |
-
nn.init.xavier_uniform_(p)
|
60 |
-
|
61 |
-
def forward(self, src, mask, query_embed, pos_embed):
|
62 |
-
# flatten NxCxHxW to HWxNxC
|
63 |
-
bs, c, h, w = src.shape
|
64 |
-
src = src.flatten(2).permute(2, 0, 1)
|
65 |
-
pos_embed = pos_embed.flatten(2).permute(2, 0, 1)
|
66 |
-
query_embed = query_embed.unsqueeze(1).repeat(1, bs, 1)
|
67 |
-
if mask is not None:
|
68 |
-
mask = mask.flatten(1)
|
69 |
-
|
70 |
-
tgt = torch.zeros_like(query_embed)
|
71 |
-
memory = self.encoder(src, src_key_padding_mask=mask, pos=pos_embed)
|
72 |
-
hs = self.decoder(
|
73 |
-
tgt, memory, memory_key_padding_mask=mask, pos=pos_embed, query_pos=query_embed
|
74 |
-
)
|
75 |
-
return hs.transpose(1, 2), memory.permute(1, 2, 0).view(bs, c, h, w)
|
76 |
-
|
77 |
-
|
78 |
-
class TransformerEncoder(nn.Module):
|
79 |
-
def __init__(self, encoder_layer, num_layers, norm=None):
|
80 |
-
super().__init__()
|
81 |
-
self.layers = _get_clones(encoder_layer, num_layers)
|
82 |
-
self.num_layers = num_layers
|
83 |
-
self.norm = norm
|
84 |
-
|
85 |
-
def forward(
|
86 |
-
self,
|
87 |
-
src,
|
88 |
-
mask: Optional[Tensor] = None,
|
89 |
-
src_key_padding_mask: Optional[Tensor] = None,
|
90 |
-
pos: Optional[Tensor] = None,
|
91 |
-
):
|
92 |
-
output = src
|
93 |
-
|
94 |
-
for layer in self.layers:
|
95 |
-
output = layer(
|
96 |
-
output, src_mask=mask, src_key_padding_mask=src_key_padding_mask, pos=pos
|
97 |
-
)
|
98 |
-
|
99 |
-
if self.norm is not None:
|
100 |
-
output = self.norm(output)
|
101 |
-
|
102 |
-
return output
|
103 |
-
|
104 |
-
|
105 |
-
class TransformerDecoder(nn.Module):
|
106 |
-
def __init__(self, decoder_layer, num_layers, norm=None, return_intermediate=False):
|
107 |
-
super().__init__()
|
108 |
-
self.layers = _get_clones(decoder_layer, num_layers)
|
109 |
-
self.num_layers = num_layers
|
110 |
-
self.norm = norm
|
111 |
-
self.return_intermediate = return_intermediate
|
112 |
-
|
113 |
-
def forward(
|
114 |
-
self,
|
115 |
-
tgt,
|
116 |
-
memory,
|
117 |
-
tgt_mask: Optional[Tensor] = None,
|
118 |
-
memory_mask: Optional[Tensor] = None,
|
119 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
120 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
121 |
-
pos: Optional[Tensor] = None,
|
122 |
-
query_pos: Optional[Tensor] = None,
|
123 |
-
):
|
124 |
-
output = tgt
|
125 |
-
|
126 |
-
intermediate = []
|
127 |
-
|
128 |
-
for layer in self.layers:
|
129 |
-
output = layer(
|
130 |
-
output,
|
131 |
-
memory,
|
132 |
-
tgt_mask=tgt_mask,
|
133 |
-
memory_mask=memory_mask,
|
134 |
-
tgt_key_padding_mask=tgt_key_padding_mask,
|
135 |
-
memory_key_padding_mask=memory_key_padding_mask,
|
136 |
-
pos=pos,
|
137 |
-
query_pos=query_pos,
|
138 |
-
)
|
139 |
-
if self.return_intermediate:
|
140 |
-
intermediate.append(self.norm(output))
|
141 |
-
|
142 |
-
if self.norm is not None:
|
143 |
-
output = self.norm(output)
|
144 |
-
if self.return_intermediate:
|
145 |
-
intermediate.pop()
|
146 |
-
intermediate.append(output)
|
147 |
-
|
148 |
-
if self.return_intermediate:
|
149 |
-
return torch.stack(intermediate)
|
150 |
-
|
151 |
-
return output.unsqueeze(0)
|
152 |
-
|
153 |
-
|
154 |
-
class TransformerEncoderLayer(nn.Module):
|
155 |
-
def __init__(
|
156 |
-
self,
|
157 |
-
d_model,
|
158 |
-
nhead,
|
159 |
-
dim_feedforward=2048,
|
160 |
-
dropout=0.1,
|
161 |
-
activation="relu",
|
162 |
-
normalize_before=False,
|
163 |
-
):
|
164 |
-
super().__init__()
|
165 |
-
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
166 |
-
# Implementation of Feedforward model
|
167 |
-
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
168 |
-
self.dropout = nn.Dropout(dropout)
|
169 |
-
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
170 |
-
|
171 |
-
self.norm1 = nn.LayerNorm(d_model)
|
172 |
-
self.norm2 = nn.LayerNorm(d_model)
|
173 |
-
self.dropout1 = nn.Dropout(dropout)
|
174 |
-
self.dropout2 = nn.Dropout(dropout)
|
175 |
-
|
176 |
-
self.activation = _get_activation_fn(activation)
|
177 |
-
self.normalize_before = normalize_before
|
178 |
-
|
179 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
180 |
-
return tensor if pos is None else tensor + pos
|
181 |
-
|
182 |
-
def forward_post(
|
183 |
-
self,
|
184 |
-
src,
|
185 |
-
src_mask: Optional[Tensor] = None,
|
186 |
-
src_key_padding_mask: Optional[Tensor] = None,
|
187 |
-
pos: Optional[Tensor] = None,
|
188 |
-
):
|
189 |
-
q = k = self.with_pos_embed(src, pos)
|
190 |
-
|
191 |
-
src2 = self.self_attn(
|
192 |
-
q, k, value=src, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
|
193 |
-
)[0]
|
194 |
-
src = src + self.dropout1(src2)
|
195 |
-
src = self.norm1(src)
|
196 |
-
src2 = self.linear2(self.dropout(self.activation(self.linear1(src))))
|
197 |
-
src = src + self.dropout2(src2)
|
198 |
-
src = self.norm2(src)
|
199 |
-
return src
|
200 |
-
|
201 |
-
def forward_pre(
|
202 |
-
self,
|
203 |
-
src,
|
204 |
-
src_mask: Optional[Tensor] = None,
|
205 |
-
src_key_padding_mask: Optional[Tensor] = None,
|
206 |
-
pos: Optional[Tensor] = None,
|
207 |
-
):
|
208 |
-
src2 = self.norm1(src)
|
209 |
-
q = k = self.with_pos_embed(src2, pos)
|
210 |
-
src2 = self.self_attn(
|
211 |
-
q, k, value=src2, attn_mask=src_mask, key_padding_mask=src_key_padding_mask
|
212 |
-
)[0]
|
213 |
-
src = src + self.dropout1(src2)
|
214 |
-
src2 = self.norm2(src)
|
215 |
-
src2 = self.linear2(self.dropout(self.activation(self.linear1(src2))))
|
216 |
-
src = src + self.dropout2(src2)
|
217 |
-
return src
|
218 |
-
|
219 |
-
def forward(
|
220 |
-
self,
|
221 |
-
src,
|
222 |
-
src_mask: Optional[Tensor] = None,
|
223 |
-
src_key_padding_mask: Optional[Tensor] = None,
|
224 |
-
pos: Optional[Tensor] = None,
|
225 |
-
):
|
226 |
-
if self.normalize_before:
|
227 |
-
return self.forward_pre(src, src_mask, src_key_padding_mask, pos)
|
228 |
-
return self.forward_post(src, src_mask, src_key_padding_mask, pos)
|
229 |
-
|
230 |
-
|
231 |
-
class TransformerDecoderLayer(nn.Module):
|
232 |
-
def __init__(
|
233 |
-
self,
|
234 |
-
d_model,
|
235 |
-
nhead,
|
236 |
-
dim_feedforward=2048,
|
237 |
-
dropout=0.1,
|
238 |
-
activation="relu",
|
239 |
-
normalize_before=False,
|
240 |
-
):
|
241 |
-
super().__init__()
|
242 |
-
self.self_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
243 |
-
self.multihead_attn = nn.MultiheadAttention(d_model, nhead, dropout=dropout)
|
244 |
-
# Implementation of Feedforward model
|
245 |
-
self.linear1 = nn.Linear(d_model, dim_feedforward)
|
246 |
-
self.dropout = nn.Dropout(dropout)
|
247 |
-
self.linear2 = nn.Linear(dim_feedforward, d_model)
|
248 |
-
|
249 |
-
self.norm1 = nn.LayerNorm(d_model)
|
250 |
-
self.norm2 = nn.LayerNorm(d_model)
|
251 |
-
self.norm3 = nn.LayerNorm(d_model)
|
252 |
-
self.dropout1 = nn.Dropout(dropout)
|
253 |
-
self.dropout2 = nn.Dropout(dropout)
|
254 |
-
self.dropout3 = nn.Dropout(dropout)
|
255 |
-
|
256 |
-
self.activation = _get_activation_fn(activation)
|
257 |
-
self.normalize_before = normalize_before
|
258 |
-
|
259 |
-
def with_pos_embed(self, tensor, pos: Optional[Tensor]):
|
260 |
-
return tensor if pos is None else tensor + pos
|
261 |
-
|
262 |
-
def forward_post(
|
263 |
-
self,
|
264 |
-
tgt,
|
265 |
-
memory,
|
266 |
-
tgt_mask: Optional[Tensor] = None,
|
267 |
-
memory_mask: Optional[Tensor] = None,
|
268 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
269 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
270 |
-
pos: Optional[Tensor] = None,
|
271 |
-
query_pos: Optional[Tensor] = None,
|
272 |
-
):
|
273 |
-
q = k = self.with_pos_embed(tgt, query_pos)
|
274 |
-
tgt2 = self.self_attn(
|
275 |
-
q, k, value=tgt, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
|
276 |
-
)[0]
|
277 |
-
tgt = tgt + self.dropout1(tgt2)
|
278 |
-
tgt = self.norm1(tgt)
|
279 |
-
tgt2 = self.multihead_attn(
|
280 |
-
query=self.with_pos_embed(tgt, query_pos),
|
281 |
-
key=self.with_pos_embed(memory, pos),
|
282 |
-
value=memory,
|
283 |
-
attn_mask=memory_mask,
|
284 |
-
key_padding_mask=memory_key_padding_mask,
|
285 |
-
)[0]
|
286 |
-
tgt = tgt + self.dropout2(tgt2)
|
287 |
-
tgt = self.norm2(tgt)
|
288 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt))))
|
289 |
-
tgt = tgt + self.dropout3(tgt2)
|
290 |
-
tgt = self.norm3(tgt)
|
291 |
-
return tgt
|
292 |
-
|
293 |
-
def forward_pre(
|
294 |
-
self,
|
295 |
-
tgt,
|
296 |
-
memory,
|
297 |
-
tgt_mask: Optional[Tensor] = None,
|
298 |
-
memory_mask: Optional[Tensor] = None,
|
299 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
300 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
301 |
-
pos: Optional[Tensor] = None,
|
302 |
-
query_pos: Optional[Tensor] = None,
|
303 |
-
):
|
304 |
-
tgt2 = self.norm1(tgt)
|
305 |
-
q = k = self.with_pos_embed(tgt2, query_pos)
|
306 |
-
tgt2 = self.self_attn(
|
307 |
-
q, k, value=tgt2, attn_mask=tgt_mask, key_padding_mask=tgt_key_padding_mask
|
308 |
-
)[0]
|
309 |
-
tgt = tgt + self.dropout1(tgt2)
|
310 |
-
tgt2 = self.norm2(tgt)
|
311 |
-
tgt2 = self.multihead_attn(
|
312 |
-
query=self.with_pos_embed(tgt2, query_pos),
|
313 |
-
key=self.with_pos_embed(memory, pos),
|
314 |
-
value=memory,
|
315 |
-
attn_mask=memory_mask,
|
316 |
-
key_padding_mask=memory_key_padding_mask,
|
317 |
-
)[0]
|
318 |
-
tgt = tgt + self.dropout2(tgt2)
|
319 |
-
tgt2 = self.norm3(tgt)
|
320 |
-
tgt2 = self.linear2(self.dropout(self.activation(self.linear1(tgt2))))
|
321 |
-
tgt = tgt + self.dropout3(tgt2)
|
322 |
-
return tgt
|
323 |
-
|
324 |
-
def forward(
|
325 |
-
self,
|
326 |
-
tgt,
|
327 |
-
memory,
|
328 |
-
tgt_mask: Optional[Tensor] = None,
|
329 |
-
memory_mask: Optional[Tensor] = None,
|
330 |
-
tgt_key_padding_mask: Optional[Tensor] = None,
|
331 |
-
memory_key_padding_mask: Optional[Tensor] = None,
|
332 |
-
pos: Optional[Tensor] = None,
|
333 |
-
query_pos: Optional[Tensor] = None,
|
334 |
-
):
|
335 |
-
if self.normalize_before:
|
336 |
-
return self.forward_pre(
|
337 |
-
tgt,
|
338 |
-
memory,
|
339 |
-
tgt_mask,
|
340 |
-
memory_mask,
|
341 |
-
tgt_key_padding_mask,
|
342 |
-
memory_key_padding_mask,
|
343 |
-
pos,
|
344 |
-
query_pos,
|
345 |
-
)
|
346 |
-
return self.forward_post(
|
347 |
-
tgt,
|
348 |
-
memory,
|
349 |
-
tgt_mask,
|
350 |
-
memory_mask,
|
351 |
-
tgt_key_padding_mask,
|
352 |
-
memory_key_padding_mask,
|
353 |
-
pos,
|
354 |
-
query_pos,
|
355 |
-
)
|
356 |
-
|
357 |
-
|
358 |
-
def _get_clones(module, N):
|
359 |
-
return nn.ModuleList([copy.deepcopy(module) for i in range(N)])
|
360 |
-
|
361 |
-
|
362 |
-
def _get_activation_fn(activation):
|
363 |
-
"""Return an activation function given a string"""
|
364 |
-
if activation == "relu":
|
365 |
-
return F.relu
|
366 |
-
if activation == "gelu":
|
367 |
-
return F.gelu
|
368 |
-
if activation == "glu":
|
369 |
-
return F.glu
|
370 |
-
raise RuntimeError(f"activation should be relu/gelu, not {activation}.")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/body/xdecoder_head.py
DELETED
@@ -1,123 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
|
3 |
-
# --------------------------------------------------------
|
4 |
-
# X-Decoder -- Generalized Decoding for Pixel, Image, and Language
|
5 |
-
# Copyright (c) 2022 Microsoft
|
6 |
-
# Licensed under The MIT License [see LICENSE for details]
|
7 |
-
# Written by Jianwei Yang (jianwyan@microsoft.com), Xueyan Zou (xueyan@cs.wisc.edu)
|
8 |
-
# --------------------------------------------------------
|
9 |
-
|
10 |
-
from typing import Dict
|
11 |
-
|
12 |
-
from torch import nn
|
13 |
-
|
14 |
-
from detectron2.layers import ShapeSpec
|
15 |
-
|
16 |
-
from .registry import register_body
|
17 |
-
from .encoder import build_encoder
|
18 |
-
from .decoder import build_decoder
|
19 |
-
from ..utils import configurable
|
20 |
-
|
21 |
-
|
22 |
-
class XDecoderHead(nn.Module):
|
23 |
-
|
24 |
-
@configurable
|
25 |
-
def __init__(
|
26 |
-
self,
|
27 |
-
input_shape: Dict[str, ShapeSpec],
|
28 |
-
*,
|
29 |
-
num_classes: int,
|
30 |
-
pixel_decoder: nn.Module,
|
31 |
-
loss_weight: float = 1.0,
|
32 |
-
ignore_value: int = -1,
|
33 |
-
# extra parameters
|
34 |
-
transformer_predictor: nn.Module,
|
35 |
-
transformer_in_feature: str,
|
36 |
-
):
|
37 |
-
"""
|
38 |
-
NOTE: this interface is experimental.
|
39 |
-
Args:
|
40 |
-
input_shape: shapes (channels and stride) of the input features
|
41 |
-
num_classes: number of classes to predict
|
42 |
-
pixel_decoder: the pixel decoder module
|
43 |
-
loss_weight: loss weight
|
44 |
-
ignore_value: category id to be ignored during training.
|
45 |
-
transformer_predictor: the transformer decoder that makes prediction
|
46 |
-
transformer_in_feature: input feature name to the transformer_predictor
|
47 |
-
"""
|
48 |
-
super().__init__()
|
49 |
-
|
50 |
-
input_shape = sorted(input_shape.items(), key=lambda x: x[1].stride)
|
51 |
-
self.in_features = [k for k, v in input_shape]
|
52 |
-
feature_strides = [v.stride for k, v in input_shape]
|
53 |
-
feature_channels = [v.channels for k, v in input_shape]
|
54 |
-
|
55 |
-
self.ignore_value = ignore_value
|
56 |
-
self.common_stride = 4
|
57 |
-
self.loss_weight = loss_weight
|
58 |
-
|
59 |
-
self.pixel_decoder = pixel_decoder
|
60 |
-
self.predictor = transformer_predictor
|
61 |
-
self.transformer_in_feature = transformer_in_feature
|
62 |
-
|
63 |
-
self.num_classes = num_classes
|
64 |
-
|
65 |
-
@classmethod
|
66 |
-
def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec], lang_encoder: nn.Module, extra: dict):
|
67 |
-
|
68 |
-
in_features_type = cfg['MODEL']['DECODER']['TRANSFORMER_IN_FEATURE']
|
69 |
-
enc_cfg = cfg['MODEL']['ENCODER']
|
70 |
-
dec_cfg = cfg['MODEL']['DECODER']
|
71 |
-
|
72 |
-
# figure out in_channels to transformer predictor
|
73 |
-
if in_features_type == "transformer_encoder":
|
74 |
-
transformer_predictor_in_channels = enc_cfg['CONVS_DIM']
|
75 |
-
elif in_features_type == "pixel_embedding":
|
76 |
-
transformer_predictor_in_channels = enc_cfg['MASK_DIM']
|
77 |
-
elif in_features_type == "multi_scale_pixel_decoder": # for maskformer2
|
78 |
-
transformer_predictor_in_channels = enc_cfg['CONVS_DIM']
|
79 |
-
else:
|
80 |
-
transformer_predictor_in_channels = input_shape[dec_cfg['TRANSFORMER_IN_FEATURE']].channels
|
81 |
-
|
82 |
-
return {
|
83 |
-
"input_shape": {
|
84 |
-
k: v for k, v in input_shape.items() if k in enc_cfg['IN_FEATURES']
|
85 |
-
},
|
86 |
-
"ignore_value": enc_cfg['IGNORE_VALUE'],
|
87 |
-
"num_classes": enc_cfg.get('NUM_CLASSES', None),
|
88 |
-
"pixel_decoder": build_encoder(cfg, input_shape),
|
89 |
-
"loss_weight": enc_cfg['LOSS_WEIGHT'],
|
90 |
-
"transformer_in_feature": dec_cfg['TRANSFORMER_IN_FEATURE'],
|
91 |
-
"transformer_predictor": build_decoder(
|
92 |
-
cfg,
|
93 |
-
transformer_predictor_in_channels,
|
94 |
-
lang_encoder,
|
95 |
-
mask_classification=True,
|
96 |
-
extra=extra,
|
97 |
-
),
|
98 |
-
}
|
99 |
-
|
100 |
-
def forward(self, features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
|
101 |
-
return self.layers(features, mask, target_queries, target_vlp, task, extra)
|
102 |
-
|
103 |
-
def layers(self, features, mask=None, target_queries=None, target_vlp=None, task='seg', extra={}):
|
104 |
-
mask_features, transformer_encoder_features, multi_scale_features = self.pixel_decoder.forward_features(features)
|
105 |
-
|
106 |
-
if self.transformer_in_feature == "multi_scale_pixel_decoder":
|
107 |
-
predictions = self.predictor(multi_scale_features, mask_features, mask, target_queries, target_vlp, task, extra)
|
108 |
-
else:
|
109 |
-
if self.transformer_in_feature == "transformer_encoder":
|
110 |
-
assert (
|
111 |
-
transformer_encoder_features is not None
|
112 |
-
), "Please use the TransformerEncoderPixelDecoder."
|
113 |
-
predictions = self.predictor(transformer_encoder_features, mask_features, mask)
|
114 |
-
elif self.transformer_in_feature == "pixel_embedding":
|
115 |
-
predictions = self.predictor(mask_features, mask_features, mask)
|
116 |
-
else:
|
117 |
-
predictions = self.predictor(features[self.transformer_in_feature], mask_features, mask)
|
118 |
-
return predictions
|
119 |
-
|
120 |
-
|
121 |
-
@register_body
|
122 |
-
def get_xdecoder_head(cfg, input_shape, lang_encoder, extra):
|
123 |
-
return XDecoderHead(cfg, input_shape, lang_encoder, extra)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/LangEncoder/__init__.py
DELETED
@@ -1,8 +0,0 @@
|
|
1 |
-
from __future__ import absolute_import
|
2 |
-
from __future__ import division
|
3 |
-
from __future__ import print_function
|
4 |
-
|
5 |
-
from .build import build_lang_encoder
|
6 |
-
from .build import build_tokenizer
|
7 |
-
|
8 |
-
from .transformer import *
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/LangEncoder/build.py
DELETED
@@ -1,36 +0,0 @@
|
|
1 |
-
import os
|
2 |
-
|
3 |
-
from transformers import CLIPTokenizer, CLIPTokenizerFast
|
4 |
-
from transformers import AutoTokenizer
|
5 |
-
|
6 |
-
from .registry import lang_encoders
|
7 |
-
from .registry import is_lang_encoder
|
8 |
-
|
9 |
-
|
10 |
-
def build_lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
|
11 |
-
model_name = config_encoder['NAME']
|
12 |
-
|
13 |
-
if not is_lang_encoder(model_name):
|
14 |
-
raise ValueError(f'Unkown model: {model_name}')
|
15 |
-
|
16 |
-
return lang_encoders(model_name)(config_encoder, tokenizer, verbose, **kwargs)
|
17 |
-
|
18 |
-
|
19 |
-
def build_tokenizer(config_encoder):
|
20 |
-
tokenizer = None
|
21 |
-
os.environ['TOKENIZERS_PARALLELISM'] = 'true'
|
22 |
-
if config_encoder['TOKENIZER'] == 'clip':
|
23 |
-
pretrained_tokenizer = config_encoder.get(
|
24 |
-
'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
|
25 |
-
)
|
26 |
-
tokenizer = CLIPTokenizer.from_pretrained(pretrained_tokenizer)
|
27 |
-
tokenizer.add_special_tokens({'cls_token': tokenizer.eos_token})
|
28 |
-
elif config_encoder['TOKENIZER'] == 'clip-fast':
|
29 |
-
pretrained_tokenizer = config_encoder.get(
|
30 |
-
'PRETRAINED_TOKENIZER', 'openai/clip-vit-base-patch32'
|
31 |
-
)
|
32 |
-
tokenizer = CLIPTokenizerFast.from_pretrained(pretrained_tokenizer, from_slow=True)
|
33 |
-
else:
|
34 |
-
tokenizer = AutoTokenizer.from_pretrained(config_encoder['TOKENIZER'])
|
35 |
-
|
36 |
-
return tokenizer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/LangEncoder/registry.py
DELETED
@@ -1,18 +0,0 @@
|
|
1 |
-
_lang_encoders = {}
|
2 |
-
|
3 |
-
|
4 |
-
def register_lang_encoder(fn):
|
5 |
-
module_name_split = fn.__module__.split('.')
|
6 |
-
model_name = module_name_split[-1]
|
7 |
-
|
8 |
-
_lang_encoders[model_name] = fn
|
9 |
-
|
10 |
-
return fn
|
11 |
-
|
12 |
-
|
13 |
-
def lang_encoders(model_name):
|
14 |
-
return _lang_encoders[model_name]
|
15 |
-
|
16 |
-
|
17 |
-
def is_lang_encoder(model_name):
|
18 |
-
return model_name in _lang_encoders
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/LangEncoder/transformer.py
DELETED
@@ -1,222 +0,0 @@
|
|
1 |
-
from collections import OrderedDict
|
2 |
-
from typing import Tuple, Union
|
3 |
-
import logging
|
4 |
-
import os
|
5 |
-
|
6 |
-
import numpy as np
|
7 |
-
import torch
|
8 |
-
import torch.nn.functional as F
|
9 |
-
from torch import nn
|
10 |
-
|
11 |
-
from timm.models.layers import DropPath, trunc_normal_
|
12 |
-
|
13 |
-
from .registry import register_lang_encoder
|
14 |
-
from utils.distributed import is_main_process
|
15 |
-
from utils.model import register_norm_module
|
16 |
-
|
17 |
-
logger = logging.getLogger(__name__)
|
18 |
-
|
19 |
-
|
20 |
-
@register_norm_module
|
21 |
-
class LayerNorm(nn.Module):
|
22 |
-
def __init__(self, hidden_size, eps=1e-12):
|
23 |
-
"""Construct a layernorm module in the TF style (epsilon inside the square root).
|
24 |
-
"""
|
25 |
-
super(LayerNorm, self).__init__()
|
26 |
-
self.weight = nn.Parameter(torch.ones(hidden_size))
|
27 |
-
self.bias = nn.Parameter(torch.zeros(hidden_size))
|
28 |
-
self.variance_epsilon = eps
|
29 |
-
|
30 |
-
def forward(self, x):
|
31 |
-
pdtype = x.dtype
|
32 |
-
x = x.float()
|
33 |
-
u = x.mean(-1, keepdim=True)
|
34 |
-
s = (x - u).pow(2).mean(-1, keepdim=True)
|
35 |
-
x = (x - u) / torch.sqrt(s + self.variance_epsilon)
|
36 |
-
return self.weight * x.to(pdtype) + self.bias
|
37 |
-
|
38 |
-
|
39 |
-
class QuickGELU(nn.Module):
|
40 |
-
def forward(self, x: torch.Tensor):
|
41 |
-
return x * torch.sigmoid(1.702 * x)
|
42 |
-
|
43 |
-
|
44 |
-
class ResidualAttentionBlock(nn.Module):
|
45 |
-
def __init__(self,
|
46 |
-
d_model: int,
|
47 |
-
n_head: int,
|
48 |
-
attn_mask: torch.Tensor = None,
|
49 |
-
drop_path: float = 0.0):
|
50 |
-
super().__init__()
|
51 |
-
|
52 |
-
self.attn = nn.MultiheadAttention(d_model, n_head)
|
53 |
-
self.ln_1 = LayerNorm(d_model)
|
54 |
-
self.mlp = nn.Sequential(OrderedDict([
|
55 |
-
("c_fc", nn.Linear(d_model, d_model * 4)),
|
56 |
-
("gelu", QuickGELU()),
|
57 |
-
("c_proj", nn.Linear(d_model * 4, d_model))
|
58 |
-
]))
|
59 |
-
self.ln_2 = LayerNorm(d_model)
|
60 |
-
self.attn_mask = attn_mask
|
61 |
-
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
|
62 |
-
|
63 |
-
def attention(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
|
64 |
-
self.attn_mask = self.attn_mask.to(dtype=x.dtype, device=x.device) \
|
65 |
-
if self.attn_mask is not None else None
|
66 |
-
|
67 |
-
|
68 |
-
return self.attn(
|
69 |
-
x, x, x,
|
70 |
-
key_padding_mask=key_padding_mask,
|
71 |
-
need_weights=False,
|
72 |
-
attn_mask=self.attn_mask
|
73 |
-
)[0]
|
74 |
-
|
75 |
-
def forward(self, x: torch.Tensor, key_padding_mask: torch.Tensor = None):
|
76 |
-
x = x + self.drop_path(self.attention(self.ln_1(x), key_padding_mask=key_padding_mask))
|
77 |
-
x = x + self.drop_path(self.mlp(self.ln_2(x)))
|
78 |
-
return x
|
79 |
-
|
80 |
-
|
81 |
-
class Transformer(nn.Module):
|
82 |
-
def __init__(self,
|
83 |
-
context_length: int,
|
84 |
-
vocab_size: int,
|
85 |
-
width: int,
|
86 |
-
layers: int,
|
87 |
-
heads: int,
|
88 |
-
drop_path: float = 0.0,
|
89 |
-
autogressive: bool =True):
|
90 |
-
super().__init__()
|
91 |
-
|
92 |
-
self.token_embedding = nn.Embedding(vocab_size, width)
|
93 |
-
|
94 |
-
self.context_length = context_length
|
95 |
-
self.positional_embedding = nn.Parameter(
|
96 |
-
torch.empty(self.context_length, width)
|
97 |
-
)
|
98 |
-
|
99 |
-
self.width = width
|
100 |
-
self.layers = layers
|
101 |
-
self.autogressive = autogressive
|
102 |
-
attn_mask = self.build_attention_mask() if autogressive else None
|
103 |
-
dpr = [x.item() for x in torch.linspace(0, drop_path, layers)] # stochastic depth decay rule
|
104 |
-
self.resblocks = nn.ModuleList(
|
105 |
-
[
|
106 |
-
ResidualAttentionBlock(width, heads, attn_mask, dpr[i])
|
107 |
-
for i in range(layers)
|
108 |
-
]
|
109 |
-
)
|
110 |
-
|
111 |
-
self.ln_final = LayerNorm(width)
|
112 |
-
|
113 |
-
trunc_normal_(self.positional_embedding, std=.02)
|
114 |
-
# nn.init.normal_(self.token_embedding, std=.02)
|
115 |
-
trunc_normal_(self.token_embedding.weight, std=.02)
|
116 |
-
self.apply(self._init_weights)
|
117 |
-
|
118 |
-
@property
|
119 |
-
def dim_out(self):
|
120 |
-
return self.width
|
121 |
-
|
122 |
-
def build_attention_mask(self):
|
123 |
-
# lazily create causal attention mask, with full attention between the vision tokens
|
124 |
-
# pytorch uses additive attention mask; fill with -inf
|
125 |
-
mask = torch.empty(self.context_length, self.context_length)
|
126 |
-
mask.fill_(float("-inf"))
|
127 |
-
mask.triu_(1) # zero out the lower diagonal
|
128 |
-
return mask
|
129 |
-
|
130 |
-
def _init_weights(self, m):
|
131 |
-
if isinstance(m, (nn.Linear, nn.Conv2d)):
|
132 |
-
if is_main_process():
|
133 |
-
logger.info('=> init weight of Linear/Conv2d from trunc norm')
|
134 |
-
trunc_normal_(m.weight, std=0.02)
|
135 |
-
if m.bias is not None:
|
136 |
-
if is_main_process():
|
137 |
-
logger.info('=> init bias of Linear/Conv2d to zeros')
|
138 |
-
nn.init.constant_(m.bias, 0)
|
139 |
-
elif isinstance(m, (nn.LayerNorm, nn.BatchNorm2d)):
|
140 |
-
nn.init.constant_(m.bias, 0)
|
141 |
-
|
142 |
-
def load_pretrained(self, pretrained='', pretrained_layers=[], verbose=True):
|
143 |
-
if os.path.isfile(pretrained):
|
144 |
-
pretrained_dict = torch.load(pretrained, map_location='cpu')
|
145 |
-
logging.info(f'=> loading pretrained model {pretrained}')
|
146 |
-
model_dict = self.state_dict()
|
147 |
-
stripped_key = lambda x: x[13:] if x.startswith('lang_encoder.') else x
|
148 |
-
pretrained_dict = {
|
149 |
-
stripped_key(k): v for k, v in pretrained_dict.items()
|
150 |
-
if stripped_key(k) in model_dict.keys()
|
151 |
-
}
|
152 |
-
need_init_state_dict = {}
|
153 |
-
for k, v in pretrained_dict.items():
|
154 |
-
need_init = (
|
155 |
-
k.split('.')[0] in pretrained_layers
|
156 |
-
or pretrained_layers[0] == '*'
|
157 |
-
)
|
158 |
-
if need_init:
|
159 |
-
if verbose:
|
160 |
-
logger.info(f'=> init {k} from {pretrained}')
|
161 |
-
|
162 |
-
if 'positional_embedding' in k and v.size() != model_dict[k].size():
|
163 |
-
positional_embedding_pretrained = v
|
164 |
-
positional_embedding_current = model_dict[k]
|
165 |
-
L1, nH1 = positional_embedding_pretrained.size()
|
166 |
-
L2, nH2 = positional_embedding_current.size()
|
167 |
-
if nH1 != nH2:
|
168 |
-
logger.info(f"Error in loading {k}, passing")
|
169 |
-
else:
|
170 |
-
if L1 != L2:
|
171 |
-
logger.info(
|
172 |
-
'=> load_pretrained: resized variant: {} to {}'
|
173 |
-
.format((L1, nH1), (L2, nH2))
|
174 |
-
)
|
175 |
-
|
176 |
-
posemb = positional_embedding_pretrained.float()
|
177 |
-
posemb_grid = posemb.unsqueeze(dim=0).permute(0, 2, 1)
|
178 |
-
posemb_grid = torch.nn.functional.interpolate(posemb_grid, size=L2, mode='linear')
|
179 |
-
posemb_grid = posemb_grid.permute(0, 2, 1).squeeze(dim=0)
|
180 |
-
v = posemb_grid
|
181 |
-
|
182 |
-
need_init_state_dict[k] = v
|
183 |
-
|
184 |
-
self.load_state_dict(need_init_state_dict, strict=False)
|
185 |
-
|
186 |
-
|
187 |
-
@torch.jit.ignore
|
188 |
-
def no_weight_decay(self):
|
189 |
-
return {
|
190 |
-
'positional_embedding',
|
191 |
-
'token_embedding',
|
192 |
-
}
|
193 |
-
|
194 |
-
def forward(self, input_ids, attention_mask=None):
|
195 |
-
key_padding_mask = (attention_mask == 0) if (not self.autogressive and attention_mask is not None) else None
|
196 |
-
# key_padding_mask = (input_ids == 0) if not self.autogressive else None
|
197 |
-
x = self.token_embedding(input_ids) # [batch_size, n_ctx, d_model]
|
198 |
-
x = x + self.positional_embedding
|
199 |
-
x = x.permute(1, 0, 2) # NLD -> LND
|
200 |
-
for block in self.resblocks:
|
201 |
-
x = block(x, key_padding_mask)
|
202 |
-
x = x.permute(1, 0, 2) # LND -> NLD
|
203 |
-
|
204 |
-
x = self.ln_final(x)
|
205 |
-
|
206 |
-
return {'last_hidden_state': x}
|
207 |
-
|
208 |
-
|
209 |
-
@register_lang_encoder
|
210 |
-
def lang_encoder(config_encoder, tokenizer, verbose, **kwargs):
|
211 |
-
transformer = Transformer(
|
212 |
-
context_length=config_encoder['CONTEXT_LENGTH'],
|
213 |
-
vocab_size=tokenizer.vocab_size,
|
214 |
-
width=config_encoder['WIDTH'],
|
215 |
-
layers=config_encoder['LAYERS'],
|
216 |
-
heads=config_encoder['HEADS'],
|
217 |
-
autogressive=config_encoder.get('AUTOGRESSIVE', True)
|
218 |
-
)
|
219 |
-
|
220 |
-
if config_encoder.get('LOAD_PRETRAINED', False):
|
221 |
-
transformer.load_pretrained(config_encoder['PRETRAINED'], config_encoder.get('PRETRAINED_LAYERS', ['*']))
|
222 |
-
return transformer
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from .fixvlpencoder import *
|
2 |
-
from .vlpencoder import *
|
3 |
-
from .build import build_language_encoder
|
|
|
|
|
|
|
|
xdecoder/language/build.py
DELETED
@@ -1,11 +0,0 @@
|
|
1 |
-
from .registry import model_entrypoints
|
2 |
-
from .registry import is_model
|
3 |
-
|
4 |
-
|
5 |
-
def build_language_encoder(config, **kwargs):
|
6 |
-
model_name = config['MODEL']['TEXT']['ARCH']
|
7 |
-
|
8 |
-
if not is_model(model_name):
|
9 |
-
raise ValueError(f'Unkown model: {model_name}')
|
10 |
-
|
11 |
-
return model_entrypoints(model_name)(config, **kwargs)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/fixvlpencoder.py
DELETED
@@ -1,35 +0,0 @@
|
|
1 |
-
from importlib.metadata import requires
|
2 |
-
import torch
|
3 |
-
import torch.nn as nn
|
4 |
-
|
5 |
-
from .registry import register_model
|
6 |
-
from .vlpencoder import LanguageEncoder
|
7 |
-
|
8 |
-
class FixLanguageEncoder(LanguageEncoder):
|
9 |
-
|
10 |
-
def __init__(
|
11 |
-
self,
|
12 |
-
*args, **kwargs):
|
13 |
-
super(FixLanguageEncoder, self).__init__(*args, **kwargs)
|
14 |
-
self.logit_scale = nn.Parameter(torch.ones([]), requires_grad=False)
|
15 |
-
|
16 |
-
@torch.no_grad()
|
17 |
-
def get_text_embeddings(self, *args, **kwargs):
|
18 |
-
return super().get_text_embeddings(*args, **kwargs)
|
19 |
-
|
20 |
-
@torch.no_grad()
|
21 |
-
def get_text_token_embeddings(self, *args, **kwargs):
|
22 |
-
return super().get_text_token_embeddings(*args, **kwargs)
|
23 |
-
|
24 |
-
@torch.no_grad()
|
25 |
-
def forward_language(self, *args, **kwargs):
|
26 |
-
return super().forward_language(*args, **kwargs)
|
27 |
-
|
28 |
-
@torch.no_grad()
|
29 |
-
def forward_language_token(self, *args, **kwargs):
|
30 |
-
return super().forward_language_token(*args, **kwargs)
|
31 |
-
|
32 |
-
|
33 |
-
@register_model
|
34 |
-
def get_language_model(cfg, **kwargs):
|
35 |
-
return FixLanguageEncoder(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/loss.py
DELETED
@@ -1,225 +0,0 @@
|
|
1 |
-
import pickle
|
2 |
-
from distutils import log
|
3 |
-
|
4 |
-
import torch
|
5 |
-
import torch.nn.functional as F
|
6 |
-
import torch.distributed as dist
|
7 |
-
|
8 |
-
from einops import rearrange, repeat
|
9 |
-
from timm.loss import SoftTargetCrossEntropy
|
10 |
-
|
11 |
-
soft_cross_entropy = SoftTargetCrossEntropy()
|
12 |
-
|
13 |
-
def is_dist_initialized():
|
14 |
-
return torch.distributed.is_initialized()
|
15 |
-
|
16 |
-
def get_world_size():
|
17 |
-
if is_dist_initialized():
|
18 |
-
return torch.distributed.get_world_size()
|
19 |
-
return 1
|
20 |
-
|
21 |
-
def get_rank():
|
22 |
-
if is_dist_initialized():
|
23 |
-
return dist.get_rank()
|
24 |
-
return 0
|
25 |
-
|
26 |
-
def all_gather_grad(x):
|
27 |
-
if get_world_size() > 1:
|
28 |
-
all_x = [torch.zeros_like(x) for _ in range(get_world_size())]
|
29 |
-
torch.distributed.all_gather(all_x, x)
|
30 |
-
all_x[torch.distributed.get_rank()] = x
|
31 |
-
x = torch.cat(all_x, dim=0)
|
32 |
-
return x
|
33 |
-
|
34 |
-
def vl_multilabel_contrastive_loss(image_feat, text_feat, temperature=1):
|
35 |
-
"""
|
36 |
-
Args:
|
37 |
-
image_feat (torch.Tensor): shape [B, L1, C] # B: batch_size, L1: 1, C: 256
|
38 |
-
text_feat (torch.Tensor): shape [B, L2, C] # B:batch_size, L2: number of selected nouns, C: 256
|
39 |
-
|
40 |
-
Returns:
|
41 |
-
"""
|
42 |
-
# [B, L1, C], L1 = 1
|
43 |
-
# image_feat = F.normalize(image_feat, dim=-1)
|
44 |
-
# [B, L2, C]
|
45 |
-
# text_feat = F.normalize(text_feat, dim=-1)
|
46 |
-
# HACK: normalize outside
|
47 |
-
|
48 |
-
# [B, L1, L2]
|
49 |
-
dist_per_img = image_feat @ rearrange(text_feat, 'b l c -> b c l')
|
50 |
-
# [B, L2, L1]
|
51 |
-
dist_per_text = text_feat @ rearrange(image_feat, 'b l c -> b c l')
|
52 |
-
|
53 |
-
batch = image_feat.shape[0]
|
54 |
-
img_len = image_feat.shape[1]
|
55 |
-
text_len = text_feat.shape[1]
|
56 |
-
# [B, L1, L2]
|
57 |
-
pos_labels_batch_img = rearrange(torch.ones_like(dist_per_text) / dist_per_text.size(1), 'b l2 l1 -> b l1 l2')
|
58 |
-
# [B, L2, L1]
|
59 |
-
pos_labels_batch_text = rearrange(torch.ones_like(dist_per_img) / dist_per_img.size(1), 'b l1 l2 -> b l2 l1')
|
60 |
-
|
61 |
-
image_x = rearrange(image_feat, 'b l c -> (b l) c')
|
62 |
-
text_x = rearrange(text_feat, 'b l c -> (b l) c')
|
63 |
-
|
64 |
-
logits_per_img = image_x @ all_gather_grad(text_x).t()
|
65 |
-
logits_per_text = text_x @ all_gather_grad(image_x).t()
|
66 |
-
|
67 |
-
# get label globally
|
68 |
-
# [B, L1, B, L2, W]
|
69 |
-
labels_per_img = F.one_hot(
|
70 |
-
torch.ones(batch, img_len, batch, text_len, dtype=torch.long, device=image_x.device) * get_rank(),
|
71 |
-
num_classes=get_world_size()).to(image_x.dtype)
|
72 |
-
labels_per_img *= rearrange(pos_labels_batch_img, 'b l1 l2 -> b l1 1 l2 1') * repeat(
|
73 |
-
torch.eye(batch, dtype=image_x.dtype, device=image_x.device), 'b1 b2 -> b1 1 b2 1 1')
|
74 |
-
# [BxL1, WxBxL2]
|
75 |
-
labels_per_img = rearrange(labels_per_img, 'b1 l1 b2 l2 w -> (b1 l1) (w b2 l2)')
|
76 |
-
# [B, L2, B, L1, W]
|
77 |
-
labels_per_text = F.one_hot(
|
78 |
-
torch.ones(batch, text_len, batch, img_len, dtype=torch.long, device=text_x.device) * get_rank(),
|
79 |
-
num_classes=get_world_size()).to(text_x.dtype)
|
80 |
-
labels_per_text *= rearrange(pos_labels_batch_text, 'b l2 l1 -> b l2 1 l1 1') * repeat(
|
81 |
-
torch.eye(batch, dtype=text_x.dtype, device=image_x.device), 'b2 b1 -> b2 1 b1 1 1')
|
82 |
-
# [BxL2, WxBxL1]
|
83 |
-
labels_per_text = rearrange(labels_per_text, 'b2 l2 b1 l1 w -> (b2 l2) (w b1 l1)')
|
84 |
-
|
85 |
-
logit_scale = temperature.exp().clamp(max=100)
|
86 |
-
|
87 |
-
loss_img = soft_cross_entropy(logit_scale * logits_per_img, labels_per_img)
|
88 |
-
loss_text = soft_cross_entropy(logit_scale * logits_per_text, labels_per_text)
|
89 |
-
|
90 |
-
loss = 0.5 * (loss_img + loss_text)
|
91 |
-
return loss
|
92 |
-
|
93 |
-
def vl_contrastive_loss(image_feat, text_feat, temperature=1):
|
94 |
-
# if image_id or text_id is None, it should be None across all GPUs
|
95 |
-
# image_feat = F.normalize(image_feat, dim=1)
|
96 |
-
# text_feat = F.normalize(text_feat, dim=1)
|
97 |
-
# handle normalization outside
|
98 |
-
|
99 |
-
# add the following 4 lines
|
100 |
-
image_feat = all_gather_grad(image_feat)
|
101 |
-
text_feat = all_gather_grad(text_feat)
|
102 |
-
|
103 |
-
logits = torch.matmul(image_feat, text_feat.t())
|
104 |
-
logit_scale = temperature.exp().clamp(max=100)
|
105 |
-
|
106 |
-
gt = torch.arange(logits.shape[0], device=logits.device)
|
107 |
-
loss1 = F.cross_entropy(logit_scale * logits, gt)
|
108 |
-
loss2 = F.cross_entropy(logit_scale * logits.t(), gt)
|
109 |
-
return (loss1 + loss2) / 2 # scale it up by the number of GPUs
|
110 |
-
|
111 |
-
|
112 |
-
def all_gather_pickle(data, device):
|
113 |
-
"""
|
114 |
-
Run all_gather on arbitrary picklable data (not necessarily tensors)
|
115 |
-
Args:
|
116 |
-
data: any picklable object
|
117 |
-
Returns:
|
118 |
-
list[data]: list of data gathered from each rank
|
119 |
-
"""
|
120 |
-
world_size = get_world_size()
|
121 |
-
if world_size == 1:
|
122 |
-
return [data]
|
123 |
-
|
124 |
-
# serialized to a Tensor
|
125 |
-
buffer = pickle.dumps(data)
|
126 |
-
storage = torch.ByteStorage.from_buffer(buffer)
|
127 |
-
tensor = torch.ByteTensor(storage).to(device)
|
128 |
-
|
129 |
-
# obtain Tensor size of each rank
|
130 |
-
local_size = torch.LongTensor([tensor.numel()]).cuda()
|
131 |
-
size_list = [torch.LongTensor([0]).cuda() for _ in range(world_size)]
|
132 |
-
dist.all_gather(size_list, local_size)
|
133 |
-
size_list = [int(size.item()) for size in size_list]
|
134 |
-
max_size = max(size_list)
|
135 |
-
|
136 |
-
# receiving Tensor from all ranks
|
137 |
-
# we pad the tensor because torch all_gather does not support
|
138 |
-
# gathering tensors of different shapes
|
139 |
-
tensor_list = []
|
140 |
-
for _ in size_list:
|
141 |
-
tensor_list.append(torch.ByteTensor(size=(max_size,)).cuda())
|
142 |
-
if local_size != max_size:
|
143 |
-
padding = torch.ByteTensor(size=(max_size - local_size,)).cuda()
|
144 |
-
tensor = torch.cat((tensor, padding), dim=0)
|
145 |
-
dist.all_gather(tensor_list, tensor)
|
146 |
-
|
147 |
-
data_list = []
|
148 |
-
for size, tensor in zip(size_list, tensor_list):
|
149 |
-
buffer = tensor.cpu().numpy().tobytes()[:size]
|
150 |
-
data_list.append(pickle.loads(buffer))
|
151 |
-
|
152 |
-
return data_list
|
153 |
-
|
154 |
-
def all_gather_arbitary_tensor(tensor):
|
155 |
-
if get_world_size() > 1:
|
156 |
-
device = tensor.device
|
157 |
-
tensor_batch = all_gather_pickle(tensor.cpu(), device)
|
158 |
-
tensor_batch = [x.to(device) for x in tensor_batch]
|
159 |
-
tensor_batch[torch.distributed.get_rank()] = tensor
|
160 |
-
tensor_batch = torch.cat(tensor_batch, dim=0)
|
161 |
-
else:
|
162 |
-
tensor_batch = tensor
|
163 |
-
return tensor_batch
|
164 |
-
|
165 |
-
def ql_contrastive_loss(image_feat, text_feat, temperature=1):
|
166 |
-
# add the following 4 lines
|
167 |
-
image_feat = all_gather_arbitary_tensor(image_feat)
|
168 |
-
text_feat = all_gather_arbitary_tensor(text_feat)
|
169 |
-
|
170 |
-
logits = torch.matmul(image_feat, text_feat.t())
|
171 |
-
logit_scale = temperature.exp().clamp(max=100)
|
172 |
-
|
173 |
-
gt = torch.arange(logits.shape[0], device=logits.device)
|
174 |
-
loss1 = F.cross_entropy(logit_scale * logits, gt)
|
175 |
-
loss2 = F.cross_entropy(logit_scale * logits.t(), gt)
|
176 |
-
return (loss1 + loss2) / 2 # scale it up by the number of GPUs
|
177 |
-
|
178 |
-
def vl_similarity(image_feat, text_feat, temperature=1):
|
179 |
-
# Only support single GPU for now.
|
180 |
-
logits = torch.matmul(image_feat, text_feat.t())
|
181 |
-
logits = temperature.exp().clamp(max=100) * logits
|
182 |
-
return logits
|
183 |
-
|
184 |
-
def ql_multi_contrastive_loss(image_feat, text_feat, text_hash, temperature=1):
|
185 |
-
# add the following 4 lines
|
186 |
-
image_feat = all_gather_arbitary_tensor(image_feat)
|
187 |
-
text_feat = all_gather_arbitary_tensor(text_feat)
|
188 |
-
|
189 |
-
text_hash_batch = all_gather_pickle(text_hash, text_feat.device)
|
190 |
-
text_hash_all = torch.cat(text_hash_batch)
|
191 |
-
|
192 |
-
text_hash_all_unique = torch.unique(text_hash_all).tolist()
|
193 |
-
gt = torch.zeros((image_feat.shape[0], len(text_hash_all_unique)), device=text_feat.device)
|
194 |
-
text_hash_all = text_hash_all.tolist()
|
195 |
-
text_feat_unique = torch.stack([text_feat[text_hash_all.index(txt)] for txt in text_hash_all_unique])
|
196 |
-
|
197 |
-
for idx, txt in enumerate(text_hash_all):
|
198 |
-
gt[idx][text_hash_all_unique.index(txt)] = 1
|
199 |
-
|
200 |
-
logits = torch.matmul(image_feat, text_feat_unique.t())
|
201 |
-
logits = logits*temperature.exp().clamp(max=100)
|
202 |
-
|
203 |
-
loss_img = soft_cross_entropy(logits, gt)
|
204 |
-
loss_text = soft_cross_entropy(logits.t(), gt.t() / gt.t().sum(-1, keepdim=True))
|
205 |
-
|
206 |
-
loss = 0.7 * loss_img + 0.3 * loss_text
|
207 |
-
return loss
|
208 |
-
|
209 |
-
def image_text_contrastive_loss_queue(image_feat_inp, text_feat_inp, lang_enc, training):
|
210 |
-
# add the following 4 lines
|
211 |
-
image_feat = all_gather_grad(image_feat_inp.contiguous())
|
212 |
-
text_feat = all_gather_grad(text_feat_inp.contiguous())
|
213 |
-
|
214 |
-
image_feat = image_feat / (image_feat.norm(dim=-1, keepdim=True) + 1e-7)
|
215 |
-
text_feat = text_feat / (text_feat.norm(dim=-1, keepdim=True) + 1e-7)
|
216 |
-
|
217 |
-
temperature = lang_enc.logit_scale
|
218 |
-
logits = torch.matmul(image_feat, text_feat.t())
|
219 |
-
logit_scale = temperature.exp().clamp(max=100)
|
220 |
-
|
221 |
-
gt = torch.arange(logits.shape[0], device=logits.device)
|
222 |
-
loss1 = F.cross_entropy(logit_scale * logits, gt)
|
223 |
-
loss2 = F.cross_entropy(logit_scale * logits.t(), gt)
|
224 |
-
|
225 |
-
return (loss1 + loss2) / 2 # scale it up by the number of GPUs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/misc.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
import random
|
2 |
-
|
3 |
-
import nltk
|
4 |
-
nltk.data.path.append('/mnt/data/nltk_data')
|
5 |
-
import numpy as np
|
6 |
-
|
7 |
-
from utils.constants import IMAGENET_DEFAULT_TEMPLATES
|
8 |
-
|
9 |
-
|
10 |
-
def get_tag(tokenized, tags):
|
11 |
-
if not isinstance(tags, (list, tuple)):
|
12 |
-
tags = [tags]
|
13 |
-
ret = []
|
14 |
-
for (word, pos) in nltk.pos_tag(tokenized):
|
15 |
-
for tag in tags:
|
16 |
-
if pos == tag:
|
17 |
-
ret.append(word)
|
18 |
-
return ret
|
19 |
-
|
20 |
-
def get_noun_phrase(tokenized):
|
21 |
-
# Taken from Su Nam Kim Paper...
|
22 |
-
grammar = r"""
|
23 |
-
NBAR:
|
24 |
-
{<NN.*|JJ>*<NN.*>} # Nouns and Adjectives, terminated with Nouns
|
25 |
-
|
26 |
-
NP:
|
27 |
-
{<NBAR>}
|
28 |
-
{<NBAR><IN><NBAR>} # Above, connected with in/of/etc...
|
29 |
-
"""
|
30 |
-
chunker = nltk.RegexpParser(grammar)
|
31 |
-
|
32 |
-
chunked = chunker.parse(nltk.pos_tag(tokenized))
|
33 |
-
continuous_chunk = []
|
34 |
-
current_chunk = []
|
35 |
-
|
36 |
-
for subtree in chunked:
|
37 |
-
if isinstance(subtree, nltk.Tree):
|
38 |
-
current_chunk.append(' '.join([token for token, pos in subtree.leaves()]))
|
39 |
-
elif current_chunk:
|
40 |
-
named_entity = ' '.join(current_chunk)
|
41 |
-
if named_entity not in continuous_chunk:
|
42 |
-
continuous_chunk.append(named_entity)
|
43 |
-
current_chunk = []
|
44 |
-
else:
|
45 |
-
continue
|
46 |
-
|
47 |
-
return continuous_chunk
|
48 |
-
|
49 |
-
def text_noun_with_prompt_all(text, phrase_prob=0.0, append_text=True):
|
50 |
-
tokenized = nltk.word_tokenize(text)
|
51 |
-
|
52 |
-
if random.random() >= phrase_prob:
|
53 |
-
nouns = get_tag(tokenized, ['NN', 'NNS', 'NNP'])
|
54 |
-
else:
|
55 |
-
nouns = get_noun_phrase(tokenized)
|
56 |
-
|
57 |
-
|
58 |
-
prompt_texts = [np.random.choice(IMAGENET_DEFAULT_TEMPLATES).format(noun) for noun in nouns]
|
59 |
-
|
60 |
-
if append_text:
|
61 |
-
prompt_texts += [text]
|
62 |
-
nouns += [text]
|
63 |
-
|
64 |
-
return prompt_texts, nouns
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/registry.py
DELETED
@@ -1,13 +0,0 @@
|
|
1 |
-
_model_entrypoints = {}
|
2 |
-
|
3 |
-
def register_model(fn):
|
4 |
-
module_name_split = fn.__module__.split('.')
|
5 |
-
model_name = module_name_split[-1]
|
6 |
-
_model_entrypoints[model_name] = fn
|
7 |
-
return fn
|
8 |
-
|
9 |
-
def model_entrypoints(model_name):
|
10 |
-
return _model_entrypoints[model_name]
|
11 |
-
|
12 |
-
def is_model(model_name):
|
13 |
-
return model_name in _model_entrypoints
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/language/vlpencoder.py
DELETED
@@ -1,168 +0,0 @@
|
|
1 |
-
|
2 |
-
import torch
|
3 |
-
from torch import nn
|
4 |
-
from torch.nn import functional as F
|
5 |
-
|
6 |
-
from timm.models.layers import trunc_normal_
|
7 |
-
|
8 |
-
from .registry import register_model
|
9 |
-
from ..utils import configurable
|
10 |
-
from .LangEncoder import build_tokenizer, build_lang_encoder
|
11 |
-
from utils.misc import prompt_engineering, get_prompt_templates
|
12 |
-
|
13 |
-
|
14 |
-
class LanguageEncoder(nn.Module):
|
15 |
-
|
16 |
-
@configurable
|
17 |
-
def __init__(
|
18 |
-
self,
|
19 |
-
tokenizer,
|
20 |
-
tokenizer_type,
|
21 |
-
lang_encoder,
|
22 |
-
lang_projection,
|
23 |
-
max_token_num,
|
24 |
-
):
|
25 |
-
super().__init__()
|
26 |
-
self.tokenizer = tokenizer
|
27 |
-
self.tokenizer_type = tokenizer_type
|
28 |
-
self.lang_encoder = lang_encoder
|
29 |
-
self.lang_proj = lang_projection
|
30 |
-
self.max_token_num = max_token_num
|
31 |
-
self.logit_scale = nn.Parameter(torch.ones([]))
|
32 |
-
|
33 |
-
@classmethod
|
34 |
-
def from_config(cls, cfg):
|
35 |
-
tokenizer = build_tokenizer(cfg['MODEL']['TEXT'])
|
36 |
-
tokenizer_type = cfg['MODEL']['TEXT']['TOKENIZER']
|
37 |
-
lang_encoder = build_lang_encoder(cfg['MODEL']['TEXT'], tokenizer, cfg['VERBOSE'])
|
38 |
-
max_token_num = cfg['MODEL']['TEXT']['CONTEXT_LENGTH']
|
39 |
-
|
40 |
-
dim_lang = cfg['MODEL']['TEXT']['WIDTH']
|
41 |
-
dim_projection = cfg['MODEL']['DIM_PROJ']
|
42 |
-
lang_projection = nn.Parameter(torch.empty(dim_lang, dim_projection))
|
43 |
-
trunc_normal_(lang_projection, std=.02)
|
44 |
-
|
45 |
-
return {
|
46 |
-
"tokenizer": tokenizer,
|
47 |
-
"tokenizer_type": tokenizer_type,
|
48 |
-
"lang_encoder": lang_encoder,
|
49 |
-
"lang_projection": lang_projection,
|
50 |
-
"max_token_num": max_token_num,
|
51 |
-
}
|
52 |
-
|
53 |
-
def get_text_embeddings(self, class_names, name='default', is_eval=False, add_bgd=False, prompt=True, norm=True):
|
54 |
-
if not is_eval:
|
55 |
-
if prompt:
|
56 |
-
# randomly sample one template
|
57 |
-
arbitary_concepts = [
|
58 |
-
prompt_engineering(class_names[label].replace('-other','').replace('-merged','').replace('-stuff',''), topk=10000, suffix='.') \
|
59 |
-
for label in range(len(class_names))
|
60 |
-
]
|
61 |
-
if add_bgd:
|
62 |
-
arbitary_concepts.append("A background in coco.")
|
63 |
-
else:
|
64 |
-
arbitary_concepts = class_names
|
65 |
-
|
66 |
-
input_ids = []
|
67 |
-
attention_masks = []
|
68 |
-
for txt in arbitary_concepts:
|
69 |
-
tokens = self.tokenizer(
|
70 |
-
txt, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt'
|
71 |
-
)
|
72 |
-
tokens['input_ids'].squeeze_()
|
73 |
-
tokens['attention_mask'].squeeze_()
|
74 |
-
|
75 |
-
input_ids.append(tokens['input_ids'])
|
76 |
-
attention_masks.append(tokens['attention_mask'])
|
77 |
-
|
78 |
-
arbitary_tokens = torch.stack(input_ids)
|
79 |
-
arbitary_attention_masks = torch.stack(attention_masks)
|
80 |
-
|
81 |
-
text_emb = self.forward_language((arbitary_tokens.cuda(), arbitary_attention_masks.cuda()), norm=norm)
|
82 |
-
setattr(self, '{}_text_embeddings'.format(name), text_emb)
|
83 |
-
else:
|
84 |
-
with torch.no_grad():
|
85 |
-
def extract_mean_emb(txts):
|
86 |
-
tokens = self.tokenizer(
|
87 |
-
txts, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt'
|
88 |
-
)
|
89 |
-
clss_embedding = self.forward_language((tokens['input_ids'].cuda(), tokens['attention_mask'].cuda()), norm=norm)
|
90 |
-
clss_embedding = clss_embedding.mean(dim=0)
|
91 |
-
clss_embedding /= clss_embedding.norm()
|
92 |
-
return clss_embedding
|
93 |
-
|
94 |
-
templates = get_prompt_templates()
|
95 |
-
clss_embeddings = []
|
96 |
-
if prompt:
|
97 |
-
for clss in class_names:
|
98 |
-
txts = [template.format(clss.replace('-other','').replace('-merged','').replace('-stuff','')) for template in templates]
|
99 |
-
clss_embeddings.append(extract_mean_emb(txts))
|
100 |
-
else:
|
101 |
-
clss_embeddings.append(extract_mean_emb(class_names))
|
102 |
-
|
103 |
-
if add_bgd:
|
104 |
-
txts = ["A background in coco."]
|
105 |
-
clss_embeddings.append(extract_mean_emb(txts))
|
106 |
-
|
107 |
-
text_emb = torch.stack(clss_embeddings, dim=0)
|
108 |
-
setattr(self, '{}_text_embeddings'.format(name), text_emb)
|
109 |
-
|
110 |
-
def get_text_token_embeddings(self, txts, name='default', token=False, norm=False):
|
111 |
-
if not token:
|
112 |
-
tokens = self.tokenizer(
|
113 |
-
txts, padding='max_length', truncation=True, max_length=self.max_token_num, return_tensors='pt'
|
114 |
-
)
|
115 |
-
tokens = {key: value.cuda() for key, value in tokens.items()}
|
116 |
-
else:
|
117 |
-
tokens = txts
|
118 |
-
token_emb, class_emb = self.forward_language_token((tokens['input_ids'], tokens['attention_mask']), norm=norm)
|
119 |
-
ret = {"tokens": tokens,
|
120 |
-
"token_emb": token_emb,
|
121 |
-
"class_emb": class_emb,}
|
122 |
-
setattr(self, '{}_token_embeddings'.format(name), ret)
|
123 |
-
return ret
|
124 |
-
|
125 |
-
def forward_language(self, texts, norm=True):
|
126 |
-
x = self.lang_encoder(*texts)
|
127 |
-
x = x['last_hidden_state']
|
128 |
-
|
129 |
-
if self.tokenizer_type == 'clip':
|
130 |
-
x = x[torch.arange(x.size(0)), texts[0].argmax(dim=-1)]
|
131 |
-
else:
|
132 |
-
x = x[:, 0]
|
133 |
-
|
134 |
-
x = x @ self.lang_proj
|
135 |
-
if norm:
|
136 |
-
x = x / (x.norm(dim=-1, keepdim=True) + 1e-7)
|
137 |
-
return x
|
138 |
-
|
139 |
-
def forward_language_token(self, texts, norm=False):
|
140 |
-
x = self.lang_encoder(*texts)
|
141 |
-
token_x = x['last_hidden_state']
|
142 |
-
|
143 |
-
if self.tokenizer_type == 'clip':
|
144 |
-
class_x = token_x[torch.arange(token_x.size(0)), texts[0].argmax(dim=-1)]
|
145 |
-
else:
|
146 |
-
class_x = token_x[:, 0]
|
147 |
-
|
148 |
-
class_x = class_x @ self.lang_proj
|
149 |
-
token_x = token_x @ self.lang_proj
|
150 |
-
|
151 |
-
if norm:
|
152 |
-
class_x = class_x / (class_x.norm(dim=-1, keepdim=True) + 1e-7)
|
153 |
-
token_x = token_x / (token_x.norm(dim=-1, keepdim=True) + 1e-7)
|
154 |
-
|
155 |
-
return token_x, class_x
|
156 |
-
|
157 |
-
def compute_similarity(self, v_emb, name='default', fake=False):
|
158 |
-
if fake:
|
159 |
-
return None
|
160 |
-
v_emb = v_emb / (v_emb.norm(dim=-1, keepdim=True) + 1e-7)
|
161 |
-
t_emb = getattr(self, '{}_text_embeddings'.format(name))
|
162 |
-
output = self.logit_scale.exp() * v_emb @ t_emb.unsqueeze(0).transpose(1, 2)
|
163 |
-
return output
|
164 |
-
|
165 |
-
|
166 |
-
@register_model
|
167 |
-
def get_language_model(cfg, **kwargs):
|
168 |
-
return LanguageEncoder(cfg)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/modules/__init__.py
DELETED
@@ -1,3 +0,0 @@
|
|
1 |
-
from .position_encoding import *
|
2 |
-
from .attention import *
|
3 |
-
from .postprocessing import *
|
|
|
|
|
|
|
|
xdecoder/modules/attention.py
DELETED
@@ -1,489 +0,0 @@
|
|
1 |
-
# Code copy from PyTorch, modified by Xueyan Zou
|
2 |
-
|
3 |
-
import warnings
|
4 |
-
from typing import Optional, Tuple
|
5 |
-
|
6 |
-
import torch
|
7 |
-
import torch.nn as nn
|
8 |
-
from torch import Tensor
|
9 |
-
from torch.nn.init import constant_, xavier_normal_, xavier_uniform_
|
10 |
-
from torch.nn.parameter import Parameter
|
11 |
-
from torch.overrides import has_torch_function, handle_torch_function
|
12 |
-
from torch.nn.functional import pad, linear, softmax, dropout
|
13 |
-
|
14 |
-
|
15 |
-
def multi_head_attention_forward(
|
16 |
-
query: Tensor,
|
17 |
-
key: Tensor,
|
18 |
-
value: Tensor,
|
19 |
-
embed_dim_to_check: int,
|
20 |
-
num_heads: int,
|
21 |
-
in_proj_weight: Tensor,
|
22 |
-
in_proj_bias: Tensor,
|
23 |
-
bias_k: Optional[Tensor],
|
24 |
-
bias_v: Optional[Tensor],
|
25 |
-
add_zero_attn: bool,
|
26 |
-
dropout_p: float,
|
27 |
-
out_proj_weight: Tensor,
|
28 |
-
out_proj_bias: Tensor,
|
29 |
-
training: bool = True,
|
30 |
-
key_padding_mask: Optional[Tensor] = None,
|
31 |
-
need_weights: bool = True,
|
32 |
-
attn_mask: Optional[Tensor] = None,
|
33 |
-
use_separate_proj_weight: bool = False,
|
34 |
-
q_proj_weight: Optional[Tensor] = None,
|
35 |
-
k_proj_weight: Optional[Tensor] = None,
|
36 |
-
v_proj_weight: Optional[Tensor] = None,
|
37 |
-
static_k: Optional[Tensor] = None,
|
38 |
-
static_v: Optional[Tensor] = None,
|
39 |
-
) -> Tuple[Tensor, Optional[Tensor]]:
|
40 |
-
r"""
|
41 |
-
Args:
|
42 |
-
query, key, value: map a query and a set of key-value pairs to an output.
|
43 |
-
See "Attention Is All You Need" for more details.
|
44 |
-
embed_dim_to_check: total dimension of the model.
|
45 |
-
num_heads: parallel attention heads.
|
46 |
-
in_proj_weight, in_proj_bias: input projection weight and bias.
|
47 |
-
bias_k, bias_v: bias of the key and value sequences to be added at dim=0.
|
48 |
-
add_zero_attn: add a new batch of zeros to the key and
|
49 |
-
value sequences at dim=1.
|
50 |
-
dropout_p: probability of an element to be zeroed.
|
51 |
-
out_proj_weight, out_proj_bias: the output projection weight and bias.
|
52 |
-
training: apply dropout if is ``True``.
|
53 |
-
key_padding_mask: if provided, specified padding elements in the key will
|
54 |
-
be ignored by the attention. This is an binary mask. When the value is True,
|
55 |
-
the corresponding value on the attention layer will be filled with -inf.
|
56 |
-
need_weights: output attn_output_weights.
|
57 |
-
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
|
58 |
-
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
|
59 |
-
use_separate_proj_weight: the function accept the proj. weights for query, key,
|
60 |
-
and value in different forms. If false, in_proj_weight will be used, which is
|
61 |
-
a combination of q_proj_weight, k_proj_weight, v_proj_weight.
|
62 |
-
q_proj_weight, k_proj_weight, v_proj_weight, in_proj_bias: input projection weight and bias.
|
63 |
-
static_k, static_v: static key and value used for attention operators.
|
64 |
-
|
65 |
-
|
66 |
-
Shape:
|
67 |
-
Inputs:
|
68 |
-
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
|
69 |
-
the embedding dimension.
|
70 |
-
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
|
71 |
-
the embedding dimension.
|
72 |
-
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
|
73 |
-
the embedding dimension.
|
74 |
-
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
|
75 |
-
If a ByteTensor is provided, the non-zero positions will be ignored while the zero positions
|
76 |
-
will be unchanged. If a BoolTensor is provided, the positions with the
|
77 |
-
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
|
78 |
-
- attn_mask: 2D mask :math:`(L, S)` where L is the target sequence length, S is the source sequence length.
|
79 |
-
3D mask :math:`(N*num_heads, L, S)` where N is the batch size, L is the target sequence length,
|
80 |
-
S is the source sequence length. attn_mask ensures that position i is allowed to attend the unmasked
|
81 |
-
positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
|
82 |
-
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
|
83 |
-
are not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
|
84 |
-
is provided, it will be added to the attention weight.
|
85 |
-
- static_k: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
|
86 |
-
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
|
87 |
-
- static_v: :math:`(N*num_heads, S, E/num_heads)`, where S is the source sequence length,
|
88 |
-
N is the batch size, E is the embedding dimension. E/num_heads is the head dimension.
|
89 |
-
|
90 |
-
Outputs:
|
91 |
-
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
|
92 |
-
E is the embedding dimension.
|
93 |
-
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
|
94 |
-
L is the target sequence length, S is the source sequence length.
|
95 |
-
"""
|
96 |
-
tens_ops = (query, key, value, in_proj_weight, in_proj_bias, bias_k, bias_v, out_proj_weight, out_proj_bias)
|
97 |
-
if has_torch_function(tens_ops):
|
98 |
-
return handle_torch_function(
|
99 |
-
multi_head_attention_forward,
|
100 |
-
tens_ops,
|
101 |
-
query,
|
102 |
-
key,
|
103 |
-
value,
|
104 |
-
embed_dim_to_check,
|
105 |
-
num_heads,
|
106 |
-
in_proj_weight,
|
107 |
-
in_proj_bias,
|
108 |
-
bias_k,
|
109 |
-
bias_v,
|
110 |
-
add_zero_attn,
|
111 |
-
dropout_p,
|
112 |
-
out_proj_weight,
|
113 |
-
out_proj_bias,
|
114 |
-
training=training,
|
115 |
-
key_padding_mask=key_padding_mask,
|
116 |
-
need_weights=need_weights,
|
117 |
-
attn_mask=attn_mask,
|
118 |
-
use_separate_proj_weight=use_separate_proj_weight,
|
119 |
-
q_proj_weight=q_proj_weight,
|
120 |
-
k_proj_weight=k_proj_weight,
|
121 |
-
v_proj_weight=v_proj_weight,
|
122 |
-
static_k=static_k,
|
123 |
-
static_v=static_v,
|
124 |
-
)
|
125 |
-
tgt_len, bsz, embed_dim = query.size()
|
126 |
-
assert embed_dim == embed_dim_to_check
|
127 |
-
# allow MHA to have different sizes for the feature dimension
|
128 |
-
assert key.size(0) == value.size(0) and key.size(1) == value.size(1)
|
129 |
-
|
130 |
-
head_dim = embed_dim // num_heads
|
131 |
-
assert head_dim * num_heads == embed_dim, "embed_dim must be divisible by num_heads"
|
132 |
-
scaling = float(head_dim) ** -0.5
|
133 |
-
|
134 |
-
if not use_separate_proj_weight:
|
135 |
-
if (query is key or torch.equal(query, key)) and (key is value or torch.equal(key, value)):
|
136 |
-
# self-attention
|
137 |
-
q, k, v = linear(query, in_proj_weight, in_proj_bias).chunk(3, dim=-1)
|
138 |
-
|
139 |
-
elif key is value or torch.equal(key, value):
|
140 |
-
# encoder-decoder attention
|
141 |
-
# This is inline in_proj function with in_proj_weight and in_proj_bias
|
142 |
-
_b = in_proj_bias
|
143 |
-
_start = 0
|
144 |
-
_end = embed_dim
|
145 |
-
_w = in_proj_weight[_start:_end, :]
|
146 |
-
if _b is not None:
|
147 |
-
_b = _b[_start:_end]
|
148 |
-
q = linear(query, _w, _b)
|
149 |
-
|
150 |
-
if key is None:
|
151 |
-
assert value is None
|
152 |
-
k = None
|
153 |
-
v = None
|
154 |
-
else:
|
155 |
-
|
156 |
-
# This is inline in_proj function with in_proj_weight and in_proj_bias
|
157 |
-
_b = in_proj_bias
|
158 |
-
_start = embed_dim
|
159 |
-
_end = None
|
160 |
-
_w = in_proj_weight[_start:, :]
|
161 |
-
if _b is not None:
|
162 |
-
_b = _b[_start:]
|
163 |
-
k, v = linear(key, _w, _b).chunk(2, dim=-1)
|
164 |
-
|
165 |
-
else:
|
166 |
-
# This is inline in_proj function with in_proj_weight and in_proj_bias
|
167 |
-
_b = in_proj_bias
|
168 |
-
_start = 0
|
169 |
-
_end = embed_dim
|
170 |
-
_w = in_proj_weight[_start:_end, :]
|
171 |
-
if _b is not None:
|
172 |
-
_b = _b[_start:_end]
|
173 |
-
q = linear(query, _w, _b)
|
174 |
-
|
175 |
-
# This is inline in_proj function with in_proj_weight and in_proj_bias
|
176 |
-
_b = in_proj_bias
|
177 |
-
_start = embed_dim
|
178 |
-
_end = embed_dim * 2
|
179 |
-
_w = in_proj_weight[_start:_end, :]
|
180 |
-
if _b is not None:
|
181 |
-
_b = _b[_start:_end]
|
182 |
-
k = linear(key, _w, _b)
|
183 |
-
|
184 |
-
# This is inline in_proj function with in_proj_weight and in_proj_bias
|
185 |
-
_b = in_proj_bias
|
186 |
-
_start = embed_dim * 2
|
187 |
-
_end = None
|
188 |
-
_w = in_proj_weight[_start:, :]
|
189 |
-
if _b is not None:
|
190 |
-
_b = _b[_start:]
|
191 |
-
v = linear(value, _w, _b)
|
192 |
-
else:
|
193 |
-
q_proj_weight_non_opt = torch.jit._unwrap_optional(q_proj_weight)
|
194 |
-
len1, len2 = q_proj_weight_non_opt.size()
|
195 |
-
assert len1 == embed_dim and len2 == query.size(-1)
|
196 |
-
|
197 |
-
k_proj_weight_non_opt = torch.jit._unwrap_optional(k_proj_weight)
|
198 |
-
len1, len2 = k_proj_weight_non_opt.size()
|
199 |
-
assert len1 == embed_dim and len2 == key.size(-1)
|
200 |
-
|
201 |
-
v_proj_weight_non_opt = torch.jit._unwrap_optional(v_proj_weight)
|
202 |
-
len1, len2 = v_proj_weight_non_opt.size()
|
203 |
-
assert len1 == embed_dim and len2 == value.size(-1)
|
204 |
-
|
205 |
-
if in_proj_bias is not None:
|
206 |
-
q = linear(query, q_proj_weight_non_opt, in_proj_bias[0:embed_dim])
|
207 |
-
k = linear(key, k_proj_weight_non_opt, in_proj_bias[embed_dim : (embed_dim * 2)])
|
208 |
-
v = linear(value, v_proj_weight_non_opt, in_proj_bias[(embed_dim * 2) :])
|
209 |
-
else:
|
210 |
-
q = linear(query, q_proj_weight_non_opt, in_proj_bias)
|
211 |
-
k = linear(key, k_proj_weight_non_opt, in_proj_bias)
|
212 |
-
v = linear(value, v_proj_weight_non_opt, in_proj_bias)
|
213 |
-
q = q * scaling
|
214 |
-
|
215 |
-
if attn_mask is not None:
|
216 |
-
assert (
|
217 |
-
attn_mask.dtype == torch.float32
|
218 |
-
or attn_mask.dtype == torch.float64
|
219 |
-
or attn_mask.dtype == torch.float16
|
220 |
-
or attn_mask.dtype == torch.uint8
|
221 |
-
or attn_mask.dtype == torch.bool
|
222 |
-
), "Only float, byte, and bool types are supported for attn_mask, not {}".format(attn_mask.dtype)
|
223 |
-
if attn_mask.dtype == torch.uint8:
|
224 |
-
warnings.warn("Byte tensor for attn_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead.")
|
225 |
-
attn_mask = attn_mask.to(torch.bool)
|
226 |
-
|
227 |
-
if attn_mask.dim() == 2:
|
228 |
-
attn_mask = attn_mask.unsqueeze(0)
|
229 |
-
if list(attn_mask.size()) != [1, query.size(0), key.size(0)]:
|
230 |
-
raise RuntimeError("The size of the 2D attn_mask is not correct.")
|
231 |
-
elif attn_mask.dim() == 3:
|
232 |
-
if list(attn_mask.size()) != [bsz * num_heads, query.size(0), key.size(0)]:
|
233 |
-
raise RuntimeError("The size of the 3D attn_mask is not correct.")
|
234 |
-
else:
|
235 |
-
raise RuntimeError("attn_mask's dimension {} is not supported".format(attn_mask.dim()))
|
236 |
-
# attn_mask's dim is 3 now.
|
237 |
-
|
238 |
-
# convert ByteTensor key_padding_mask to bool
|
239 |
-
if key_padding_mask is not None and key_padding_mask.dtype == torch.uint8:
|
240 |
-
warnings.warn(
|
241 |
-
"Byte tensor for key_padding_mask in nn.MultiheadAttention is deprecated. Use bool tensor instead."
|
242 |
-
)
|
243 |
-
key_padding_mask = key_padding_mask.to(torch.bool)
|
244 |
-
|
245 |
-
if bias_k is not None and bias_v is not None:
|
246 |
-
if static_k is None and static_v is None:
|
247 |
-
k = torch.cat([k, bias_k.repeat(1, bsz, 1)])
|
248 |
-
v = torch.cat([v, bias_v.repeat(1, bsz, 1)])
|
249 |
-
if attn_mask is not None:
|
250 |
-
attn_mask = pad(attn_mask, (0, 1))
|
251 |
-
if key_padding_mask is not None:
|
252 |
-
key_padding_mask = pad(key_padding_mask, (0, 1))
|
253 |
-
else:
|
254 |
-
assert static_k is None, "bias cannot be added to static key."
|
255 |
-
assert static_v is None, "bias cannot be added to static value."
|
256 |
-
else:
|
257 |
-
assert bias_k is None
|
258 |
-
assert bias_v is None
|
259 |
-
|
260 |
-
q = q.contiguous().view(tgt_len, bsz * num_heads, head_dim).transpose(0, 1)
|
261 |
-
if k is not None:
|
262 |
-
k = k.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
263 |
-
if v is not None:
|
264 |
-
v = v.contiguous().view(-1, bsz * num_heads, head_dim).transpose(0, 1)
|
265 |
-
|
266 |
-
if static_k is not None:
|
267 |
-
assert static_k.size(0) == bsz * num_heads
|
268 |
-
assert static_k.size(2) == head_dim
|
269 |
-
k = static_k
|
270 |
-
|
271 |
-
if static_v is not None:
|
272 |
-
assert static_v.size(0) == bsz * num_heads
|
273 |
-
assert static_v.size(2) == head_dim
|
274 |
-
v = static_v
|
275 |
-
|
276 |
-
src_len = k.size(1)
|
277 |
-
|
278 |
-
if key_padding_mask is not None:
|
279 |
-
# assert key_padding_mask.size(0) == bsz
|
280 |
-
assert key_padding_mask.size(1) == src_len
|
281 |
-
|
282 |
-
if add_zero_attn:
|
283 |
-
src_len += 1
|
284 |
-
k = torch.cat([k, torch.zeros((k.size(0), 1) + k.size()[2:], dtype=k.dtype, device=k.device)], dim=1)
|
285 |
-
v = torch.cat([v, torch.zeros((v.size(0), 1) + v.size()[2:], dtype=v.dtype, device=v.device)], dim=1)
|
286 |
-
if attn_mask is not None:
|
287 |
-
attn_mask = pad(attn_mask, (0, 1))
|
288 |
-
if key_padding_mask is not None:
|
289 |
-
key_padding_mask = pad(key_padding_mask, (0, 1))
|
290 |
-
|
291 |
-
attn_output_weights = torch.bmm(q, k.transpose(1, 2))
|
292 |
-
assert list(attn_output_weights.size()) == [bsz * num_heads, tgt_len, src_len]
|
293 |
-
|
294 |
-
if attn_mask is not None:
|
295 |
-
if attn_mask.dtype == torch.bool:
|
296 |
-
attn_output_weights.masked_fill_(attn_mask, float("-inf"))
|
297 |
-
else:
|
298 |
-
attn_output_weights += attn_mask
|
299 |
-
|
300 |
-
if key_padding_mask is not None:
|
301 |
-
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
|
302 |
-
attn_output_weights = attn_output_weights.masked_fill(
|
303 |
-
key_padding_mask.unsqueeze(1),
|
304 |
-
float("-inf"),
|
305 |
-
)
|
306 |
-
attn_output_weights = attn_output_weights.view(bsz * num_heads, tgt_len, src_len)
|
307 |
-
|
308 |
-
attn_output_weights = softmax(attn_output_weights, dim=-1)
|
309 |
-
attn_output_weights = dropout(attn_output_weights, p=dropout_p, training=training)
|
310 |
-
|
311 |
-
attn_output = torch.bmm(attn_output_weights, v)
|
312 |
-
assert list(attn_output.size()) == [bsz * num_heads, tgt_len, head_dim]
|
313 |
-
attn_output = attn_output.transpose(0, 1).contiguous().view(tgt_len, bsz, embed_dim)
|
314 |
-
attn_output = linear(attn_output, out_proj_weight, out_proj_bias)
|
315 |
-
|
316 |
-
if need_weights:
|
317 |
-
# average attention weights over heads
|
318 |
-
attn_output_weights = attn_output_weights.view(bsz, num_heads, tgt_len, src_len)
|
319 |
-
return attn_output, attn_output_weights.sum(dim=1) / num_heads
|
320 |
-
else:
|
321 |
-
return attn_output, None
|
322 |
-
|
323 |
-
|
324 |
-
# This class exists solely for Transformer; it has an annotation stating
|
325 |
-
# that bias is never None, which appeases TorchScript
|
326 |
-
class _LinearWithBias(nn.Linear):
|
327 |
-
bias: Tensor # type: ignore
|
328 |
-
|
329 |
-
def __init__(self, in_features: int, out_features: int) -> None:
|
330 |
-
super().__init__(in_features, out_features, bias=True) # type: ignore
|
331 |
-
|
332 |
-
|
333 |
-
class MultiheadAttention(nn.Module):
|
334 |
-
r"""Allows the model to jointly attend to information
|
335 |
-
from different representation subspaces.
|
336 |
-
See `Attention Is All You Need <https://arxiv.org/abs/1706.03762>`_
|
337 |
-
|
338 |
-
.. math::
|
339 |
-
\text{MultiHead}(Q, K, V) = \text{Concat}(head_1,\dots,head_h)W^O
|
340 |
-
|
341 |
-
where :math:`head_i = \text{Attention}(QW_i^Q, KW_i^K, VW_i^V)`.
|
342 |
-
|
343 |
-
Args:
|
344 |
-
embed_dim: total dimension of the model.
|
345 |
-
num_heads: parallel attention heads.
|
346 |
-
dropout: a Dropout layer on attn_output_weights. Default: 0.0.
|
347 |
-
bias: add bias as module parameter. Default: True.
|
348 |
-
add_bias_kv: add bias to the key and value sequences at dim=0.
|
349 |
-
add_zero_attn: add a new batch of zeros to the key and
|
350 |
-
value sequences at dim=1.
|
351 |
-
kdim: total number of features in key. Default: None.
|
352 |
-
vdim: total number of features in value. Default: None.
|
353 |
-
|
354 |
-
Note that if :attr:`kdim` and :attr:`vdim` are None, they will be set
|
355 |
-
to :attr:`embed_dim` such that query, key, and value have the same
|
356 |
-
number of features.
|
357 |
-
|
358 |
-
Examples::
|
359 |
-
|
360 |
-
>>> multihead_attn = nn.MultiheadAttention(embed_dim, num_heads)
|
361 |
-
>>> attn_output, attn_output_weights = multihead_attn(query, key, value)
|
362 |
-
"""
|
363 |
-
bias_k: Optional[torch.Tensor]
|
364 |
-
bias_v: Optional[torch.Tensor]
|
365 |
-
|
366 |
-
def __init__(self, embed_dim, num_heads, dropout=0., bias=True, add_bias_kv=False, add_zero_attn=False, kdim=None, vdim=None):
|
367 |
-
super(MultiheadAttention, self).__init__()
|
368 |
-
self.embed_dim = embed_dim
|
369 |
-
self.kdim = kdim if kdim is not None else embed_dim
|
370 |
-
self.vdim = vdim if vdim is not None else embed_dim
|
371 |
-
self._qkv_same_embed_dim = self.kdim == embed_dim and self.vdim == embed_dim
|
372 |
-
|
373 |
-
self.num_heads = num_heads
|
374 |
-
self.dropout = dropout
|
375 |
-
self.head_dim = embed_dim // num_heads
|
376 |
-
assert self.head_dim * num_heads == self.embed_dim, "embed_dim must be divisible by num_heads"
|
377 |
-
|
378 |
-
if self._qkv_same_embed_dim is False:
|
379 |
-
self.q_proj_weight = Parameter(torch.Tensor(embed_dim, embed_dim))
|
380 |
-
self.k_proj_weight = Parameter(torch.Tensor(embed_dim, self.kdim))
|
381 |
-
self.v_proj_weight = Parameter(torch.Tensor(embed_dim, self.vdim))
|
382 |
-
self.register_parameter('in_proj_weight', None)
|
383 |
-
else:
|
384 |
-
self.in_proj_weight = Parameter(torch.empty(3 * embed_dim, embed_dim))
|
385 |
-
self.register_parameter('q_proj_weight', None)
|
386 |
-
self.register_parameter('k_proj_weight', None)
|
387 |
-
self.register_parameter('v_proj_weight', None)
|
388 |
-
|
389 |
-
if bias:
|
390 |
-
self.in_proj_bias = Parameter(torch.empty(3 * embed_dim))
|
391 |
-
else:
|
392 |
-
self.register_parameter('in_proj_bias', None)
|
393 |
-
self.out_proj = _LinearWithBias(embed_dim, embed_dim)
|
394 |
-
|
395 |
-
if add_bias_kv:
|
396 |
-
self.bias_k = Parameter(torch.empty(1, 1, embed_dim))
|
397 |
-
self.bias_v = Parameter(torch.empty(1, 1, embed_dim))
|
398 |
-
else:
|
399 |
-
self.bias_k = self.bias_v = None
|
400 |
-
|
401 |
-
self.add_zero_attn = add_zero_attn
|
402 |
-
|
403 |
-
self._reset_parameters()
|
404 |
-
|
405 |
-
def _reset_parameters(self):
|
406 |
-
if self._qkv_same_embed_dim:
|
407 |
-
xavier_uniform_(self.in_proj_weight)
|
408 |
-
else:
|
409 |
-
xavier_uniform_(self.q_proj_weight)
|
410 |
-
xavier_uniform_(self.k_proj_weight)
|
411 |
-
xavier_uniform_(self.v_proj_weight)
|
412 |
-
|
413 |
-
if self.in_proj_bias is not None:
|
414 |
-
constant_(self.in_proj_bias, 0.)
|
415 |
-
constant_(self.out_proj.bias, 0.)
|
416 |
-
if self.bias_k is not None:
|
417 |
-
xavier_normal_(self.bias_k)
|
418 |
-
if self.bias_v is not None:
|
419 |
-
xavier_normal_(self.bias_v)
|
420 |
-
|
421 |
-
def __setstate__(self, state):
|
422 |
-
# Support loading old MultiheadAttention checkpoints generated by v1.1.0
|
423 |
-
if '_qkv_same_embed_dim' not in state:
|
424 |
-
state['_qkv_same_embed_dim'] = True
|
425 |
-
|
426 |
-
super(MultiheadAttention, self).__setstate__(state)
|
427 |
-
|
428 |
-
def forward(self, query: Tensor, key: Tensor, value: Tensor, key_padding_mask: Optional[Tensor] = None,
|
429 |
-
need_weights: bool = True, attn_mask: Optional[Tensor] = None) -> Tuple[Tensor, Optional[Tensor]]:
|
430 |
-
r"""
|
431 |
-
Args:
|
432 |
-
query, key, value: map a query and a set of key-value pairs to an output.
|
433 |
-
See "Attention Is All You Need" for more details.
|
434 |
-
key_padding_mask: if provided, specified padding elements in the key will
|
435 |
-
be ignored by the attention. When given a binary mask and a value is True,
|
436 |
-
the corresponding value on the attention layer will be ignored. When given
|
437 |
-
a byte mask and a value is non-zero, the corresponding value on the attention
|
438 |
-
layer will be ignored
|
439 |
-
need_weights: output attn_output_weights.
|
440 |
-
attn_mask: 2D or 3D mask that prevents attention to certain positions. A 2D mask will be broadcasted for all
|
441 |
-
the batches while a 3D mask allows to specify a different mask for the entries of each batch.
|
442 |
-
|
443 |
-
Shapes for inputs:
|
444 |
-
- query: :math:`(L, N, E)` where L is the target sequence length, N is the batch size, E is
|
445 |
-
the embedding dimension.
|
446 |
-
- key: :math:`(S, N, E)`, where S is the source sequence length, N is the batch size, E is
|
447 |
-
the embedding dimension.
|
448 |
-
- value: :math:`(S, N, E)` where S is the source sequence length, N is the batch size, E is
|
449 |
-
the embedding dimension.
|
450 |
-
- key_padding_mask: :math:`(N, S)` where N is the batch size, S is the source sequence length.
|
451 |
-
If a ByteTensor is provided, the non-zero positions will be ignored while the position
|
452 |
-
with the zero positions will be unchanged. If a BoolTensor is provided, the positions with the
|
453 |
-
value of ``True`` will be ignored while the position with the value of ``False`` will be unchanged.
|
454 |
-
- attn_mask: if a 2D mask: :math:`(L, S)` where L is the target sequence length, S is the
|
455 |
-
source sequence length.
|
456 |
-
|
457 |
-
If a 3D mask: :math:`(N\cdot\text{num\_heads}, L, S)` where N is the batch size, L is the target sequence
|
458 |
-
length, S is the source sequence length. ``attn_mask`` ensure that position i is allowed to attend
|
459 |
-
the unmasked positions. If a ByteTensor is provided, the non-zero positions are not allowed to attend
|
460 |
-
while the zero positions will be unchanged. If a BoolTensor is provided, positions with ``True``
|
461 |
-
is not allowed to attend while ``False`` values will be unchanged. If a FloatTensor
|
462 |
-
is provided, it will be added to the attention weight.
|
463 |
-
|
464 |
-
Shapes for outputs:
|
465 |
-
- attn_output: :math:`(L, N, E)` where L is the target sequence length, N is the batch size,
|
466 |
-
E is the embedding dimension.
|
467 |
-
- attn_output_weights: :math:`(N, L, S)` where N is the batch size,
|
468 |
-
L is the target sequence length, S is the source sequence length.
|
469 |
-
"""
|
470 |
-
if not self._qkv_same_embed_dim:
|
471 |
-
return multi_head_attention_forward(
|
472 |
-
query, key, value, self.embed_dim, self.num_heads,
|
473 |
-
self.in_proj_weight, self.in_proj_bias,
|
474 |
-
self.bias_k, self.bias_v, self.add_zero_attn,
|
475 |
-
self.dropout, self.out_proj.weight, self.out_proj.bias,
|
476 |
-
training=self.training,
|
477 |
-
key_padding_mask=key_padding_mask, need_weights=need_weights,
|
478 |
-
attn_mask=attn_mask, use_separate_proj_weight=True,
|
479 |
-
q_proj_weight=self.q_proj_weight, k_proj_weight=self.k_proj_weight,
|
480 |
-
v_proj_weight=self.v_proj_weight)
|
481 |
-
else:
|
482 |
-
return multi_head_attention_forward(
|
483 |
-
query, key, value, self.embed_dim, self.num_heads,
|
484 |
-
self.in_proj_weight, self.in_proj_bias,
|
485 |
-
self.bias_k, self.bias_v, self.add_zero_attn,
|
486 |
-
self.dropout, self.out_proj.weight, self.out_proj.bias,
|
487 |
-
training=self.training,
|
488 |
-
key_padding_mask=key_padding_mask, need_weights=need_weights,
|
489 |
-
attn_mask=attn_mask)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/modules/position_encoding.py
DELETED
@@ -1,64 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
## Modified by Bowen Cheng from: https://github.com/facebookresearch/detr/blob/master/models/position_encoding.py
|
3 |
-
"""
|
4 |
-
Various positional encodings for the transformer.
|
5 |
-
"""
|
6 |
-
import math
|
7 |
-
|
8 |
-
import torch
|
9 |
-
from torch import nn
|
10 |
-
|
11 |
-
|
12 |
-
class PositionEmbeddingSine(nn.Module):
|
13 |
-
"""
|
14 |
-
This is a more standard version of the position embedding, very similar to the one
|
15 |
-
used by the Attention is all you need paper, generalized to work on images.
|
16 |
-
"""
|
17 |
-
|
18 |
-
def __init__(self, num_pos_feats=64, temperature=10000, normalize=False, scale=None):
|
19 |
-
super().__init__()
|
20 |
-
self.num_pos_feats = num_pos_feats
|
21 |
-
self.temperature = temperature
|
22 |
-
self.normalize = normalize
|
23 |
-
if scale is not None and normalize is False:
|
24 |
-
raise ValueError("normalize should be True if scale is passed")
|
25 |
-
if scale is None:
|
26 |
-
scale = 2 * math.pi
|
27 |
-
self.scale = scale
|
28 |
-
|
29 |
-
def forward(self, x, mask=None):
|
30 |
-
if mask is None:
|
31 |
-
mask = torch.zeros((x.size(0), x.size(2), x.size(3)), device=x.device, dtype=torch.bool)
|
32 |
-
not_mask = ~mask
|
33 |
-
y_embed = not_mask.cumsum(1, dtype=x.dtype)
|
34 |
-
x_embed = not_mask.cumsum(2, dtype=x.dtype)
|
35 |
-
if self.normalize:
|
36 |
-
eps = 1e-6
|
37 |
-
y_embed = y_embed / (y_embed[:, -1:, :] + eps) * self.scale
|
38 |
-
x_embed = x_embed / (x_embed[:, :, -1:] + eps) * self.scale
|
39 |
-
|
40 |
-
dim_t = torch.arange(self.num_pos_feats, dtype=x.dtype, device=x.device)
|
41 |
-
dim_t = self.temperature ** (2 * (dim_t // 2) / self.num_pos_feats)
|
42 |
-
|
43 |
-
pos_x = x_embed[:, :, :, None] / dim_t
|
44 |
-
pos_y = y_embed[:, :, :, None] / dim_t
|
45 |
-
pos_x = torch.stack(
|
46 |
-
(pos_x[:, :, :, 0::2].sin(), pos_x[:, :, :, 1::2].cos()), dim=4
|
47 |
-
).flatten(3)
|
48 |
-
pos_y = torch.stack(
|
49 |
-
(pos_y[:, :, :, 0::2].sin(), pos_y[:, :, :, 1::2].cos()), dim=4
|
50 |
-
).flatten(3)
|
51 |
-
pos = torch.cat((pos_y, pos_x), dim=3).permute(0, 3, 1, 2)
|
52 |
-
return pos
|
53 |
-
|
54 |
-
def __repr__(self, _repr_indent=4):
|
55 |
-
head = "Positional encoding " + self.__class__.__name__
|
56 |
-
body = [
|
57 |
-
"num_pos_feats: {}".format(self.num_pos_feats),
|
58 |
-
"temperature: {}".format(self.temperature),
|
59 |
-
"normalize: {}".format(self.normalize),
|
60 |
-
"scale: {}".format(self.scale),
|
61 |
-
]
|
62 |
-
# _repr_indent = 4
|
63 |
-
lines = [head] + [" " * _repr_indent + line for line in body]
|
64 |
-
return "\n".join(lines)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/modules/postprocessing.py
DELETED
@@ -1,122 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
import torch
|
3 |
-
from torch.nn import functional as F
|
4 |
-
|
5 |
-
from detectron2.structures import Instances, ROIMasks
|
6 |
-
|
7 |
-
|
8 |
-
# perhaps should rename to "resize_instance"
|
9 |
-
def detector_postprocess(
|
10 |
-
results: Instances, output_height: int, output_width: int, mask_threshold: float = 0.5
|
11 |
-
):
|
12 |
-
"""
|
13 |
-
Resize the output instances.
|
14 |
-
The input images are often resized when entering an object detector.
|
15 |
-
As a result, we often need the outputs of the detector in a different
|
16 |
-
resolution from its inputs.
|
17 |
-
|
18 |
-
This function will resize the raw outputs of an R-CNN detector
|
19 |
-
to produce outputs according to the desired output resolution.
|
20 |
-
|
21 |
-
Args:
|
22 |
-
results (Instances): the raw outputs from the detector.
|
23 |
-
`results.image_size` contains the input image resolution the detector sees.
|
24 |
-
This object might be modified in-place.
|
25 |
-
output_height, output_width: the desired output resolution.
|
26 |
-
|
27 |
-
Returns:
|
28 |
-
Instances: the resized output from the model, based on the output resolution
|
29 |
-
"""
|
30 |
-
if isinstance(output_width, torch.Tensor):
|
31 |
-
# This shape might (but not necessarily) be tensors during tracing.
|
32 |
-
# Converts integer tensors to float temporaries to ensure true
|
33 |
-
# division is performed when computing scale_x and scale_y.
|
34 |
-
output_width_tmp = output_width.float()
|
35 |
-
output_height_tmp = output_height.float()
|
36 |
-
new_size = torch.stack([output_height, output_width])
|
37 |
-
else:
|
38 |
-
new_size = (output_height, output_width)
|
39 |
-
output_width_tmp = output_width
|
40 |
-
output_height_tmp = output_height
|
41 |
-
|
42 |
-
scale_x, scale_y = (
|
43 |
-
output_width_tmp / results.image_size[1],
|
44 |
-
output_height_tmp / results.image_size[0],
|
45 |
-
)
|
46 |
-
results = Instances(new_size, **results.get_fields())
|
47 |
-
|
48 |
-
if results.has("pred_boxes"):
|
49 |
-
output_boxes = results.pred_boxes
|
50 |
-
elif results.has("proposal_boxes"):
|
51 |
-
output_boxes = results.proposal_boxes
|
52 |
-
else:
|
53 |
-
output_boxes = None
|
54 |
-
assert output_boxes is not None, "Predictions must contain boxes!"
|
55 |
-
|
56 |
-
output_boxes.scale(scale_x, scale_y)
|
57 |
-
output_boxes.clip(results.image_size)
|
58 |
-
|
59 |
-
results = results[output_boxes.nonempty()]
|
60 |
-
|
61 |
-
if results.has("pred_masks"):
|
62 |
-
if isinstance(results.pred_masks, ROIMasks):
|
63 |
-
roi_masks = results.pred_masks
|
64 |
-
else:
|
65 |
-
# pred_masks is a tensor of shape (N, 1, M, M)
|
66 |
-
roi_masks = ROIMasks(results.pred_masks[:, 0, :, :])
|
67 |
-
results.pred_masks = roi_masks.to_bitmasks(
|
68 |
-
results.pred_boxes, output_height, output_width, mask_threshold
|
69 |
-
).tensor # TODO return ROIMasks/BitMask object in the future
|
70 |
-
|
71 |
-
if results.has("pred_keypoints"):
|
72 |
-
results.pred_keypoints[:, :, 0] *= scale_x
|
73 |
-
results.pred_keypoints[:, :, 1] *= scale_y
|
74 |
-
|
75 |
-
return results
|
76 |
-
|
77 |
-
def bbox_postprocess(result, input_size, img_size, output_height, output_width):
|
78 |
-
"""
|
79 |
-
result: [xc,yc,w,h] range [0,1] to [x1,y1,x2,y2] range [0,w], [0,h]
|
80 |
-
"""
|
81 |
-
if result is None:
|
82 |
-
return None
|
83 |
-
|
84 |
-
scale = torch.tensor([input_size[1], input_size[0], input_size[1], input_size[0]])[None,:].to(result.device)
|
85 |
-
result = result.sigmoid() * scale
|
86 |
-
x1,y1,x2,y2 = result[:,0] - result[:,2]/2, result[:,1] - result[:,3]/2, result[:,0] + result[:,2]/2, result[:,1] + result[:,3]/2
|
87 |
-
h,w = img_size
|
88 |
-
|
89 |
-
x1 = x1.clamp(min=0, max=w)
|
90 |
-
y1 = y1.clamp(min=0, max=h)
|
91 |
-
x2 = x2.clamp(min=0, max=w)
|
92 |
-
y2 = y2.clamp(min=0, max=h)
|
93 |
-
|
94 |
-
box = torch.stack([x1,y1,x2,y2]).permute(1,0)
|
95 |
-
scale = torch.tensor([output_width/w, output_height/h, output_width/w, output_height/h])[None,:].to(result.device)
|
96 |
-
box = box*scale
|
97 |
-
return box
|
98 |
-
|
99 |
-
def sem_seg_postprocess(result, img_size, output_height, output_width):
|
100 |
-
"""
|
101 |
-
Return semantic segmentation predictions in the original resolution.
|
102 |
-
|
103 |
-
The input images are often resized when entering semantic segmentor. Moreover, in same
|
104 |
-
cases, they also padded inside segmentor to be divisible by maximum network stride.
|
105 |
-
As a result, we often need the predictions of the segmentor in a different
|
106 |
-
resolution from its inputs.
|
107 |
-
|
108 |
-
Args:
|
109 |
-
result (Tensor): semantic segmentation prediction logits. A tensor of shape (C, H, W),
|
110 |
-
where C is the number of classes, and H, W are the height and width of the prediction.
|
111 |
-
img_size (tuple): image size that segmentor is taking as input.
|
112 |
-
output_height, output_width: the desired output resolution.
|
113 |
-
|
114 |
-
Returns:
|
115 |
-
semantic segmentation prediction (Tensor): A tensor of the shape
|
116 |
-
(C, output_height, output_width) that contains per-pixel soft predictions.
|
117 |
-
"""
|
118 |
-
result = result[:, : img_size[0], : img_size[1]].expand(1, -1, -1, -1)
|
119 |
-
result = F.interpolate(
|
120 |
-
result, size=(output_height, output_width), mode="bilinear", align_corners=False
|
121 |
-
)[0]
|
122 |
-
return result
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/utils/__init__.py
DELETED
@@ -1,4 +0,0 @@
|
|
1 |
-
from .config import *
|
2 |
-
from .misc import *
|
3 |
-
from .box_ops import *
|
4 |
-
from .it_contrastive import *
|
|
|
|
|
|
|
|
|
|
xdecoder/utils/box_ops.py
DELETED
@@ -1,93 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
|
2 |
-
"""
|
3 |
-
Utilities for bounding box manipulation and GIoU.
|
4 |
-
"""
|
5 |
-
import torch
|
6 |
-
from torchvision.ops.boxes import box_area
|
7 |
-
|
8 |
-
|
9 |
-
def box_cxcywh_to_xyxy(x):
|
10 |
-
x_c, y_c, w, h = x.unbind(-1)
|
11 |
-
b = [(x_c - 0.5 * w), (y_c - 0.5 * h),
|
12 |
-
(x_c + 0.5 * w), (y_c + 0.5 * h)]
|
13 |
-
return torch.stack(b, dim=-1)
|
14 |
-
|
15 |
-
|
16 |
-
def box_xyxy_to_cxcywh(x):
|
17 |
-
x0, y0, x1, y1 = x.unbind(-1)
|
18 |
-
b = [(x0 + x1) / 2, (y0 + y1) / 2,
|
19 |
-
(x1 - x0), (y1 - y0)]
|
20 |
-
return torch.stack(b, dim=-1)
|
21 |
-
|
22 |
-
def box_xywh_to_xyxy(x):
|
23 |
-
x0, y0, x1, y1 = x.unbind(-1)
|
24 |
-
b = [x0, y0, (x0 + x1), (y0 + y1)]
|
25 |
-
return torch.stack(b, dim=-1)
|
26 |
-
|
27 |
-
|
28 |
-
# modified from torchvision to also return the union
|
29 |
-
def box_iou(boxes1, boxes2):
|
30 |
-
area1 = box_area(boxes1)
|
31 |
-
area2 = box_area(boxes2)
|
32 |
-
|
33 |
-
lt = torch.max(boxes1[:, None, :2], boxes2[:, :2]) # [N,M,2]
|
34 |
-
rb = torch.min(boxes1[:, None, 2:], boxes2[:, 2:]) # [N,M,2]
|
35 |
-
|
36 |
-
wh = (rb - lt).clamp(min=0) # [N,M,2]
|
37 |
-
inter = wh[:, :, 0] * wh[:, :, 1] # [N,M]
|
38 |
-
|
39 |
-
union = area1[:, None] + area2 - inter
|
40 |
-
|
41 |
-
iou = inter / union
|
42 |
-
return iou, union
|
43 |
-
|
44 |
-
|
45 |
-
def generalized_box_iou(boxes1, boxes2):
|
46 |
-
"""
|
47 |
-
Generalized IoU from https://giou.stanford.edu/
|
48 |
-
|
49 |
-
The boxes should be in [x0, y0, x1, y1] format
|
50 |
-
|
51 |
-
Returns a [N, M] pairwise matrix, where N = len(boxes1)
|
52 |
-
and M = len(boxes2)
|
53 |
-
"""
|
54 |
-
# degenerate boxes gives inf / nan results
|
55 |
-
# so do an early check
|
56 |
-
assert (boxes1[:, 2:] >= boxes1[:, :2]).all()
|
57 |
-
assert (boxes2[:, 2:] >= boxes2[:, :2]).all()
|
58 |
-
iou, union = box_iou(boxes1, boxes2)
|
59 |
-
|
60 |
-
lt = torch.min(boxes1[:, None, :2], boxes2[:, :2])
|
61 |
-
rb = torch.max(boxes1[:, None, 2:], boxes2[:, 2:])
|
62 |
-
|
63 |
-
wh = (rb - lt).clamp(min=0) # [N,M,2]
|
64 |
-
area = wh[:, :, 0] * wh[:, :, 1]
|
65 |
-
|
66 |
-
return iou - (area - union) / area
|
67 |
-
|
68 |
-
|
69 |
-
def masks_to_boxes(masks):
|
70 |
-
"""Compute the bounding boxes around the provided masks
|
71 |
-
|
72 |
-
The masks should be in format [N, H, W] where N is the number of masks, (H, W) are the spatial dimensions.
|
73 |
-
|
74 |
-
Returns a [N, 4] tensors, with the boxes in xyxy format
|
75 |
-
"""
|
76 |
-
if masks.numel() == 0:
|
77 |
-
return torch.zeros((0, 4), device=masks.device)
|
78 |
-
|
79 |
-
h, w = masks.shape[-2:]
|
80 |
-
|
81 |
-
y = torch.arange(0, h, dtype=torch.float)
|
82 |
-
x = torch.arange(0, w, dtype=torch.float)
|
83 |
-
y, x = torch.meshgrid(y, x)
|
84 |
-
|
85 |
-
x_mask = (masks * x.unsqueeze(0))
|
86 |
-
x_max = x_mask.flatten(1).max(-1)[0]
|
87 |
-
x_min = x_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
|
88 |
-
|
89 |
-
y_mask = (masks * y.unsqueeze(0))
|
90 |
-
y_max = y_mask.flatten(1).max(-1)[0]
|
91 |
-
y_min = y_mask.masked_fill(~(masks.bool()), 1e8).flatten(1).min(-1)[0]
|
92 |
-
|
93 |
-
return torch.stack([x_min, y_min, x_max, y_max], 1)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/utils/config.py
DELETED
@@ -1,140 +0,0 @@
|
|
1 |
-
# -*- coding: utf-8 -*-
|
2 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
3 |
-
|
4 |
-
import functools
|
5 |
-
import inspect
|
6 |
-
|
7 |
-
def configurable(init_func=None, *, from_config=None):
|
8 |
-
"""
|
9 |
-
Decorate a function or a class's __init__ method so that it can be called
|
10 |
-
with a :class:`CfgNode` object using a :func:`from_config` function that translates
|
11 |
-
:class:`CfgNode` to arguments.
|
12 |
-
|
13 |
-
Examples:
|
14 |
-
::
|
15 |
-
# Usage 1: Decorator on __init__:
|
16 |
-
class A:
|
17 |
-
@configurable
|
18 |
-
def __init__(self, a, b=2, c=3):
|
19 |
-
pass
|
20 |
-
|
21 |
-
@classmethod
|
22 |
-
def from_config(cls, cfg): # 'cfg' must be the first argument
|
23 |
-
# Returns kwargs to be passed to __init__
|
24 |
-
return {"a": cfg.A, "b": cfg.B}
|
25 |
-
|
26 |
-
a1 = A(a=1, b=2) # regular construction
|
27 |
-
a2 = A(cfg) # construct with a cfg
|
28 |
-
a3 = A(cfg, b=3, c=4) # construct with extra overwrite
|
29 |
-
|
30 |
-
# Usage 2: Decorator on any function. Needs an extra from_config argument:
|
31 |
-
@configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
|
32 |
-
def a_func(a, b=2, c=3):
|
33 |
-
pass
|
34 |
-
|
35 |
-
a1 = a_func(a=1, b=2) # regular call
|
36 |
-
a2 = a_func(cfg) # call with a cfg
|
37 |
-
a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
|
38 |
-
|
39 |
-
Args:
|
40 |
-
init_func (callable): a class's ``__init__`` method in usage 1. The
|
41 |
-
class must have a ``from_config`` classmethod which takes `cfg` as
|
42 |
-
the first argument.
|
43 |
-
from_config (callable): the from_config function in usage 2. It must take `cfg`
|
44 |
-
as its first argument.
|
45 |
-
"""
|
46 |
-
|
47 |
-
if init_func is not None:
|
48 |
-
assert (
|
49 |
-
inspect.isfunction(init_func)
|
50 |
-
and from_config is None
|
51 |
-
and init_func.__name__ == "__init__"
|
52 |
-
), "Incorrect use of @configurable. Check API documentation for examples."
|
53 |
-
|
54 |
-
@functools.wraps(init_func)
|
55 |
-
def wrapped(self, *args, **kwargs):
|
56 |
-
try:
|
57 |
-
from_config_func = type(self).from_config
|
58 |
-
except AttributeError as e:
|
59 |
-
raise AttributeError(
|
60 |
-
"Class with @configurable must have a 'from_config' classmethod."
|
61 |
-
) from e
|
62 |
-
if not inspect.ismethod(from_config_func):
|
63 |
-
raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
|
64 |
-
|
65 |
-
if _called_with_cfg(*args, **kwargs):
|
66 |
-
explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
|
67 |
-
init_func(self, **explicit_args)
|
68 |
-
else:
|
69 |
-
init_func(self, *args, **kwargs)
|
70 |
-
|
71 |
-
return wrapped
|
72 |
-
|
73 |
-
else:
|
74 |
-
if from_config is None:
|
75 |
-
return configurable # @configurable() is made equivalent to @configurable
|
76 |
-
assert inspect.isfunction(
|
77 |
-
from_config
|
78 |
-
), "from_config argument of configurable must be a function!"
|
79 |
-
|
80 |
-
def wrapper(orig_func):
|
81 |
-
@functools.wraps(orig_func)
|
82 |
-
def wrapped(*args, **kwargs):
|
83 |
-
if _called_with_cfg(*args, **kwargs):
|
84 |
-
explicit_args = _get_args_from_config(from_config, *args, **kwargs)
|
85 |
-
return orig_func(**explicit_args)
|
86 |
-
else:
|
87 |
-
return orig_func(*args, **kwargs)
|
88 |
-
|
89 |
-
wrapped.from_config = from_config
|
90 |
-
return wrapped
|
91 |
-
|
92 |
-
return wrapper
|
93 |
-
|
94 |
-
def _called_with_cfg(*args, **kwargs):
|
95 |
-
"""
|
96 |
-
Returns:
|
97 |
-
bool: whether the arguments contain CfgNode and should be considered
|
98 |
-
forwarded to from_config.
|
99 |
-
"""
|
100 |
-
from omegaconf import DictConfig
|
101 |
-
|
102 |
-
if len(args) and isinstance(args[0], (dict)):
|
103 |
-
return True
|
104 |
-
if isinstance(kwargs.pop("cfg", None), (dict)):
|
105 |
-
return True
|
106 |
-
# `from_config`'s first argument is forced to be "cfg".
|
107 |
-
# So the above check covers all cases.
|
108 |
-
return False
|
109 |
-
|
110 |
-
def _get_args_from_config(from_config_func, *args, **kwargs):
|
111 |
-
"""
|
112 |
-
Use `from_config` to obtain explicit arguments.
|
113 |
-
|
114 |
-
Returns:
|
115 |
-
dict: arguments to be used for cls.__init__
|
116 |
-
"""
|
117 |
-
signature = inspect.signature(from_config_func)
|
118 |
-
if list(signature.parameters.keys())[0] != "cfg":
|
119 |
-
if inspect.isfunction(from_config_func):
|
120 |
-
name = from_config_func.__name__
|
121 |
-
else:
|
122 |
-
name = f"{from_config_func.__self__}.from_config"
|
123 |
-
raise TypeError(f"{name} must take 'cfg' as the first argument!")
|
124 |
-
support_var_arg = any(
|
125 |
-
param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
|
126 |
-
for param in signature.parameters.values()
|
127 |
-
)
|
128 |
-
if support_var_arg: # forward all arguments to from_config, if from_config accepts them
|
129 |
-
ret = from_config_func(*args, **kwargs)
|
130 |
-
else:
|
131 |
-
# forward supported arguments to from_config
|
132 |
-
supported_arg_names = set(signature.parameters.keys())
|
133 |
-
extra_kwargs = {}
|
134 |
-
for name in list(kwargs.keys()):
|
135 |
-
if name not in supported_arg_names:
|
136 |
-
extra_kwargs[name] = kwargs.pop(name)
|
137 |
-
ret = from_config_func(*args, **kwargs)
|
138 |
-
# forward the other arguments to __init__
|
139 |
-
ret.update(extra_kwargs)
|
140 |
-
return ret
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/utils/it_contrastive.py
DELETED
@@ -1,59 +0,0 @@
|
|
1 |
-
import torch
|
2 |
-
import torch.nn as nn
|
3 |
-
import torch.nn.functional as F
|
4 |
-
|
5 |
-
def is_dist_initialized():
|
6 |
-
return torch.distributed.is_initialized()
|
7 |
-
|
8 |
-
def get_world_size():
|
9 |
-
if is_dist_initialized():
|
10 |
-
return torch.distributed.get_world_size()
|
11 |
-
return 1
|
12 |
-
|
13 |
-
def all_gather_grad(x):
|
14 |
-
if get_world_size() > 1:
|
15 |
-
all_x = [torch.zeros_like(x) for _ in range(get_world_size())]
|
16 |
-
torch.distributed.all_gather(all_x, x)
|
17 |
-
all_x[torch.distributed.get_rank()] = x
|
18 |
-
x = torch.cat(all_x, dim=0)
|
19 |
-
return x
|
20 |
-
|
21 |
-
@torch.no_grad()
|
22 |
-
def all_gather_nograd(tensor):
|
23 |
-
# from albef
|
24 |
-
"""
|
25 |
-
Performs all_gather operation on the provided tensors.
|
26 |
-
*** Warning ***: torch.distributed.all_gather has no gradient.
|
27 |
-
"""
|
28 |
-
if get_world_size() > 1:
|
29 |
-
tensors_gather = [torch.ones_like(tensor)
|
30 |
-
for _ in range(torch.distributed.get_world_size())]
|
31 |
-
torch.distributed.all_gather(tensors_gather, tensor, async_op=False)
|
32 |
-
|
33 |
-
tensor = torch.cat(tensors_gather, dim=0)
|
34 |
-
return tensor
|
35 |
-
|
36 |
-
def image_text_contrastive_loss(image_feat, text_feat, temperature, image_id=None, text_id=None):
|
37 |
-
# add the following 4 lines
|
38 |
-
image_feat = all_gather_grad(image_feat)
|
39 |
-
text_feat = all_gather_grad(text_feat)
|
40 |
-
|
41 |
-
logits = torch.matmul(image_feat, text_feat.t())
|
42 |
-
logits /= temperature
|
43 |
-
|
44 |
-
if image_id is None and text_id is None:
|
45 |
-
gt = torch.arange(logits.shape[0], device=logits.device)
|
46 |
-
loss1 = F.cross_entropy(logits, gt)
|
47 |
-
loss2 = F.cross_entropy(logits.t(), gt)
|
48 |
-
else:
|
49 |
-
image_id = all_gather_grad(image_id)
|
50 |
-
text_id = all_gather_grad(text_id)
|
51 |
-
|
52 |
-
gt_image = image_id.reshape((-1, 1)) == image_id.reshape((1, -1))
|
53 |
-
gt_text = text_id.reshape((-1, 1)) == text_id.reshape((1, -1))
|
54 |
-
gt = torch.logical_or(gt_image, gt_text)
|
55 |
-
|
56 |
-
loss1 = -torch.sum(gt * F.log_softmax(logits, dim=1)) / gt.sum()
|
57 |
-
loss2 = -torch.sum(gt.t() * F.log_softmax(logits.t(), dim=1)) / gt.sum()
|
58 |
-
|
59 |
-
return (loss1 + loss2) / 2 * get_world_size() # scale it up by the number of GPUs
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
xdecoder/utils/misc.py
DELETED
@@ -1,157 +0,0 @@
|
|
1 |
-
# Copyright (c) Facebook, Inc. and its affiliates.
|
2 |
-
# Modified by Bowen Cheng from https://github.com/facebookresearch/detr/blob/master/util/misc.py
|
3 |
-
# Modified by Xueyan Zou
|
4 |
-
"""
|
5 |
-
Misc functions, including distributed helpers.
|
6 |
-
|
7 |
-
Mostly copy-paste from torchvision references.
|
8 |
-
"""
|
9 |
-
from typing import List, Optional
|
10 |
-
|
11 |
-
import torch
|
12 |
-
import torch.distributed as dist
|
13 |
-
import torchvision
|
14 |
-
from torch import Tensor
|
15 |
-
|
16 |
-
def _max_by_axis(the_list):
|
17 |
-
# type: (List[List[int]]) -> List[int]
|
18 |
-
maxes = the_list[0]
|
19 |
-
for sublist in the_list[1:]:
|
20 |
-
for index, item in enumerate(sublist):
|
21 |
-
maxes[index] = max(maxes[index], item)
|
22 |
-
return maxes
|
23 |
-
|
24 |
-
class NestedTensor(object):
|
25 |
-
def __init__(self, tensors, mask: Optional[Tensor]):
|
26 |
-
self.tensors = tensors
|
27 |
-
self.mask = mask
|
28 |
-
|
29 |
-
def to(self, device):
|
30 |
-
# type: (Device) -> NestedTensor # noqa
|
31 |
-
cast_tensor = self.tensors.to(device)
|
32 |
-
mask = self.mask
|
33 |
-
if mask is not None:
|
34 |
-
assert mask is not None
|
35 |
-
cast_mask = mask.to(device)
|
36 |
-
else:
|
37 |
-
cast_mask = None
|
38 |
-
return NestedTensor(cast_tensor, cast_mask)
|
39 |
-
|
40 |
-
def decompose(self):
|
41 |
-
return self.tensors, self.mask
|
42 |
-
|
43 |
-
def __repr__(self):
|
44 |
-
return str(self.tensors)
|
45 |
-
|
46 |
-
def nested_tensor_from_tensor_list(tensor_list: List[Tensor]):
|
47 |
-
# TODO make this more general
|
48 |
-
if tensor_list[0].ndim == 3:
|
49 |
-
if torchvision._is_tracing():
|
50 |
-
# nested_tensor_from_tensor_list() does not export well to ONNX
|
51 |
-
# call _onnx_nested_tensor_from_tensor_list() instead
|
52 |
-
return _onnx_nested_tensor_from_tensor_list(tensor_list)
|
53 |
-
|
54 |
-
# TODO make it support different-sized images
|
55 |
-
max_size = _max_by_axis([list(img.shape) for img in tensor_list])
|
56 |
-
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
|
57 |
-
batch_shape = [len(tensor_list)] + max_size
|
58 |
-
b, c, h, w = batch_shape
|
59 |
-
dtype = tensor_list[0].dtype
|
60 |
-
device = tensor_list[0].device
|
61 |
-
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
|
62 |
-
mask = torch.ones((b, h, w), dtype=torch.bool, device=device)
|
63 |
-
for img, pad_img, m in zip(tensor_list, tensor, mask):
|
64 |
-
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
65 |
-
m[: img.shape[1], : img.shape[2]] = False
|
66 |
-
elif tensor_list[0].ndim == 2:
|
67 |
-
if torchvision._is_tracing():
|
68 |
-
# nested_tensor_from_tensor_list() does not export well to ONNX
|
69 |
-
# call _onnx_nested_tensor_from_tensor_list() instead
|
70 |
-
return _onnx_nested_tensor_from_tensor_list(tensor_list)
|
71 |
-
|
72 |
-
# TODO make it support different-sized images
|
73 |
-
max_size = _max_by_axis([list(txt.shape) for txt in tensor_list])
|
74 |
-
# min_size = tuple(min(s) for s in zip(*[img.shape for img in tensor_list]))
|
75 |
-
batch_shape = [len(tensor_list)] + max_size
|
76 |
-
b, c, l = batch_shape
|
77 |
-
dtype = tensor_list[0].dtype
|
78 |
-
device = tensor_list[0].device
|
79 |
-
tensor = torch.zeros(batch_shape, dtype=dtype, device=device)
|
80 |
-
mask = torch.ones((b, l), dtype=torch.bool, device=device)
|
81 |
-
for txt, pad_txt, m in zip(tensor_list, tensor, mask):
|
82 |
-
pad_txt[: txt.shape[0], : txt.shape[1]] = txt
|
83 |
-
m[: txt.shape[1]] = False
|
84 |
-
else:
|
85 |
-
raise ValueError("not supported")
|
86 |
-
return NestedTensor(tensor, mask)
|
87 |
-
|
88 |
-
def _collate_and_pad_divisibility(tensor_list: list, div=32):
|
89 |
-
max_size = []
|
90 |
-
for i in range(tensor_list[0].dim()):
|
91 |
-
max_size_i = torch.max(
|
92 |
-
torch.tensor([img.shape[i] for img in tensor_list]).to(torch.float32)
|
93 |
-
).to(torch.int64)
|
94 |
-
max_size.append(max_size_i)
|
95 |
-
max_size = tuple(max_size)
|
96 |
-
|
97 |
-
c,h,w = max_size
|
98 |
-
pad_h = (div - h % div) if h % div != 0 else 0
|
99 |
-
pad_w = (div - w % div) if w % div != 0 else 0
|
100 |
-
max_size = (c,h+pad_h,w+pad_w)
|
101 |
-
|
102 |
-
# work around for
|
103 |
-
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
104 |
-
# m[: img.shape[1], :img.shape[2]] = False
|
105 |
-
# which is not yet supported in onnx
|
106 |
-
padded_imgs = []
|
107 |
-
padded_masks = []
|
108 |
-
for img in tensor_list:
|
109 |
-
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
|
110 |
-
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
|
111 |
-
padded_imgs.append(padded_img)
|
112 |
-
|
113 |
-
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
|
114 |
-
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
|
115 |
-
padded_masks.append(padded_mask.to(torch.bool))
|
116 |
-
|
117 |
-
return padded_imgs
|
118 |
-
|
119 |
-
# _onnx_nested_tensor_from_tensor_list() is an implementation of
|
120 |
-
# nested_tensor_from_tensor_list() that is supported by ONNX tracing.
|
121 |
-
@torch.jit.unused
|
122 |
-
def _onnx_nested_tensor_from_tensor_list(tensor_list: List[Tensor]) -> NestedTensor:
|
123 |
-
max_size = []
|
124 |
-
for i in range(tensor_list[0].dim()):
|
125 |
-
max_size_i = torch.max(
|
126 |
-
torch.stack([img.shape[i] for img in tensor_list]).to(torch.float32)
|
127 |
-
).to(torch.int64)
|
128 |
-
max_size.append(max_size_i)
|
129 |
-
max_size = tuple(max_size)
|
130 |
-
|
131 |
-
# work around for
|
132 |
-
# pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
|
133 |
-
# m[: img.shape[1], :img.shape[2]] = False
|
134 |
-
# which is not yet supported in onnx
|
135 |
-
padded_imgs = []
|
136 |
-
padded_masks = []
|
137 |
-
for img in tensor_list:
|
138 |
-
padding = [(s1 - s2) for s1, s2 in zip(max_size, tuple(img.shape))]
|
139 |
-
padded_img = torch.nn.functional.pad(img, (0, padding[2], 0, padding[1], 0, padding[0]))
|
140 |
-
padded_imgs.append(padded_img)
|
141 |
-
|
142 |
-
m = torch.zeros_like(img[0], dtype=torch.int, device=img.device)
|
143 |
-
padded_mask = torch.nn.functional.pad(m, (0, padding[2], 0, padding[1]), "constant", 1)
|
144 |
-
padded_masks.append(padded_mask.to(torch.bool))
|
145 |
-
|
146 |
-
tensor = torch.stack(padded_imgs)
|
147 |
-
mask = torch.stack(padded_masks)
|
148 |
-
|
149 |
-
return NestedTensor(tensor, mask=mask)
|
150 |
-
|
151 |
-
|
152 |
-
def is_dist_avail_and_initialized():
|
153 |
-
if not dist.is_available():
|
154 |
-
return False
|
155 |
-
if not dist.is_initialized():
|
156 |
-
return False
|
157 |
-
return True
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|