Spaces:
Runtime error
Runtime error
# Copyright (c) Facebook, Inc. and its affiliates. | |
from typing import Tuple | |
import torch | |
from detectron2.config import configurable | |
from detectron2.modeling import META_ARCH_REGISTRY, build_backbone, build_sem_seg_head | |
from detectron2.modeling.backbone import Backbone | |
from detectron2.structures import ImageList | |
from torch import nn | |
from torch.nn import functional as F | |
from torchvision.transforms import functional as Ftv | |
from utils.log import getLogger | |
from .modeling.criterion import SetCriterion | |
from .modeling.matcher import HungarianMatcher | |
logger = getLogger(__name__) | |
def interpolate_or_crop(img, | |
size=(128, 128), | |
mode="bilinear", | |
align_corners=False, | |
tol=1.1): | |
h, w = img.shape[-2:] | |
H, W = size | |
if h == H and w == W: | |
return img | |
if H <= h < tol * H and W <= w < tol * W: | |
logger.info_once(f"Using center cropping instead of interpolation") | |
return Ftv.center_crop(img, output_size=size) | |
return F.interpolate(img, size=size, mode=mode, align_corners=align_corners) | |
class MaskFormer(nn.Module): | |
""" | |
Main class for mask classification semantic segmentation architectures. | |
""" | |
def __init__( | |
self, | |
*, | |
backbone: Backbone, | |
sem_seg_head: nn.Module, | |
criterion: nn.Module, | |
num_queries: int, | |
panoptic_on: bool, | |
object_mask_threshold: float, | |
overlap_threshold: float, | |
metadata, | |
size_divisibility: int, | |
sem_seg_postprocess_before_inference: bool, | |
pixel_mean: Tuple[float], | |
pixel_std: Tuple[float], | |
crop_not_upsample: bool=True | |
): | |
""" | |
Args: | |
backbone: a backbone module, must follow detectron2's backbone interface | |
sem_seg_head: a module that predicts semantic segmentation from backbone features | |
criterion: a module that defines the loss | |
num_queries: int, number of queries | |
panoptic_on: bool, whether to output panoptic segmentation prediction | |
object_mask_threshold: float, threshold to filter query based on classification score | |
for panoptic segmentation inference | |
overlap_threshold: overlap threshold used in general inference for panoptic segmentation | |
metadata: dataset meta, get `thing` and `stuff` category names for panoptic | |
segmentation inference | |
size_divisibility: Some backbones require the input height and width to be divisible by a | |
specific integer. We can use this to override such requirement. | |
sem_seg_postprocess_before_inference: whether to resize the prediction back | |
to original input size before semantic segmentation inference or after. | |
For high-resolution dataset like Mapillary, resizing predictions before | |
inference will cause OOM error. | |
pixel_mean, pixel_std: list or tuple with #channels element, representing | |
the per-channel mean and std to be used to normalize the input image | |
""" | |
super().__init__() | |
self.crop_not_upsample = crop_not_upsample | |
self.backbone = backbone | |
self.sem_seg_head = sem_seg_head | |
self.criterion = criterion | |
self.num_queries = num_queries | |
self.overlap_threshold = overlap_threshold | |
self.panoptic_on = panoptic_on | |
self.object_mask_threshold = object_mask_threshold | |
self.metadata = metadata | |
if size_divisibility < 0: | |
# use backbone size_divisibility if not set | |
size_divisibility = self.backbone.size_divisibility | |
self.size_divisibility = size_divisibility | |
self.sem_seg_postprocess_before_inference = sem_seg_postprocess_before_inference | |
self.register_buffer("pixel_mean", torch.Tensor(pixel_mean).view(-1, 1, 1), False) | |
self.register_buffer("pixel_std", torch.Tensor(pixel_std).view(-1, 1, 1), False) | |
def from_config(cls, cfg): | |
backbone = build_backbone(cfg) | |
out_shape = backbone.output_shape() | |
if len(cfg.GWM.SAMPLE_KEYS) > 1: | |
for k, v in out_shape.items(): | |
out_shape[k] = v._replace(channels=v.channels * len(cfg.GWM.SAMPLE_KEYS)) | |
sem_seg_head = build_sem_seg_head(cfg, out_shape) | |
# Loss parameters: | |
deep_supervision = cfg.MODEL.MASK_FORMER.DEEP_SUPERVISION | |
no_object_weight = cfg.MODEL.MASK_FORMER.NO_OBJECT_WEIGHT | |
dice_weight = cfg.MODEL.MASK_FORMER.DICE_WEIGHT | |
mask_weight = cfg.MODEL.MASK_FORMER.MASK_WEIGHT | |
# building criterion | |
matcher = HungarianMatcher( | |
cost_class=1, | |
cost_mask=mask_weight, | |
cost_dice=dice_weight, | |
) | |
weight_dict = {"loss_ce": 1, "loss_mask": mask_weight, "loss_dice": dice_weight} | |
if deep_supervision: | |
dec_layers = cfg.MODEL.MASK_FORMER.DEC_LAYERS | |
aux_weight_dict = {} | |
for i in range(dec_layers - 1): | |
aux_weight_dict.update({k + f"_{i}": v for k, v in weight_dict.items()}) | |
weight_dict.update(aux_weight_dict) | |
losses = ["labels", "masks"] | |
criterion = SetCriterion( | |
sem_seg_head.num_classes, | |
matcher=matcher, | |
weight_dict=weight_dict, | |
eos_coef=no_object_weight, | |
losses=losses, | |
) | |
return { | |
"backbone": backbone, | |
"sem_seg_head": sem_seg_head, | |
"criterion": criterion, | |
"num_queries": cfg.MODEL.MASK_FORMER.NUM_OBJECT_QUERIES, | |
"panoptic_on": cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON, | |
"object_mask_threshold": cfg.MODEL.MASK_FORMER.TEST.OBJECT_MASK_THRESHOLD, | |
"overlap_threshold": cfg.MODEL.MASK_FORMER.TEST.OVERLAP_THRESHOLD, | |
"metadata": None, # MetadataCatalog.get(cfg.DATASETS.TRAIN[0]), | |
"size_divisibility": cfg.MODEL.MASK_FORMER.SIZE_DIVISIBILITY, | |
"sem_seg_postprocess_before_inference": ( | |
cfg.MODEL.MASK_FORMER.TEST.SEM_SEG_POSTPROCESSING_BEFORE_INFERENCE | |
or cfg.MODEL.MASK_FORMER.TEST.PANOPTIC_ON | |
), | |
"pixel_mean": cfg.MODEL.PIXEL_MEAN, | |
"pixel_std": cfg.MODEL.PIXEL_STD, | |
'crop_not_upsample': cfg.MODEL.SEM_SEG_HEAD.PIXEL_DECODER_NAME != 'BasePixelDecoder' | |
} | |
def device(self): | |
return self.pixel_mean.device | |
def forward(self, batched_inputs): | |
""" | |
Args: | |
batched_inputs: a list, batched outputs of :class:`DatasetMapper`. | |
Each item in the list contains the inputs for one image. | |
For now, each item in the list is a dict that contains: | |
* "image": Tensor, image in (C, H, W) format. | |
* "instances": per-region ground truth | |
* Other information that's included in the original dicts, such as: | |
"height", "width" (int): the output resolution of the model (may be different | |
from input resolution), used in inference. | |
Returns: | |
list[dict]: | |
each dict has the results for one image. The dict contains the following keys: | |
* "sem_seg": | |
A Tensor that represents the | |
per-pixel segmentation prediced by the head. | |
The prediction has shape KxHxW that represents the logits of | |
each class for each pixel. | |
* "panoptic_seg": | |
A tuple that represent panoptic output | |
panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. | |
segments_info (list[dict]): Describe each segment in `panoptic_seg`. | |
Each dict contains keys "id", "category_id", "isthing". | |
""" | |
return self.forward_base(batched_inputs, keys=["image"], get_train=not self.training, | |
get_eval=not self.training) | |
def forward_base(self, batched_inputs, keys, get_train=False, get_eval=False, raw_sem_seg=False): | |
for i, key in enumerate(keys): | |
images = [x[key].to(self.device) for x in batched_inputs] | |
images = [(x - self.pixel_mean) / self.pixel_std for x in images] | |
images = ImageList.from_tensors(images, self.size_divisibility) | |
logger.debug_once(f"Maskformer input {key} shape: {images.tensor.shape}") | |
out = self.backbone(images.tensor) | |
if i == 0: | |
features = out | |
else: | |
features = {k: torch.cat([features[k], v], 1) for k, v in out.items()} | |
outputs = self.sem_seg_head(features) | |
if get_train: | |
# mask classification target | |
if "instances" in batched_inputs[0]: | |
gt_instances = [x["instances"].to(self.device) for x in batched_inputs] | |
targets = self.prepare_targets(gt_instances, images) | |
else: | |
targets = None | |
# bipartite matching-based loss | |
losses = self.criterion(outputs, targets) | |
for k in list(losses.keys()): | |
if k in self.criterion.weight_dict: | |
losses[k] *= self.criterion.weight_dict[k] | |
else: | |
# remove this loss if not specified in `weight_dict` | |
losses.pop(k) | |
if not get_eval: | |
return losses | |
if get_eval: | |
# mask_cls_results = outputs["pred_logits"] | |
mask_pred_results = outputs["pred_masks"] | |
mask_cls_results = mask_pred_results | |
logger.debug_once(f"Maskformer mask_pred_results shape: {mask_pred_results.shape}") | |
# upsample masks | |
# mask_pred_results = interpolate_or_crop( | |
# mask_pred_results, | |
# size=(images.tensor.shape[-2], images.tensor.shape[-1]), | |
# mode="bilinear", | |
# align_corners=False, | |
# ) | |
processed_results = [] | |
for mask_cls_result, mask_pred_result, input_per_image, image_size in zip( | |
mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes | |
): | |
if raw_sem_seg: | |
processed_results.append({"sem_seg": mask_pred_result}) | |
continue | |
height = input_per_image.get("height", image_size[0]) | |
width = input_per_image.get("width", image_size[1]) | |
logger.debug_once(f"Maskformer mask_pred_results target HW: {height, width}") | |
r = interpolate_or_crop(mask_pred_result[None], size=(height, width), mode="bilinear", align_corners=False)[0] | |
processed_results.append({"sem_seg": r}) | |
# panoptic segmentation inference | |
# if self.panoptic_on: | |
# panoptic_r = self.panoptic_inference(mask_cls_result, mask_pred_result) | |
# processed_results[-1]["panoptic_seg"] = panoptic_r | |
# if 'features' in outputs: | |
# features = outputs['features'] | |
# features = interpolate_or_crop( | |
# features, | |
# size=(images.tensor.shape[-2], images.tensor.shape[-1]), | |
# mode="bilinear", | |
# align_corners=False, | |
# ) | |
# for res, f in zip(processed_results, features): | |
# res['features'] = f | |
del outputs | |
if not get_train: | |
return processed_results | |
return losses, processed_results | |
def prepare_targets(self, targets, images): | |
h, w = images.tensor.shape[-2:] | |
new_targets = [] | |
for targets_per_image in targets: | |
# pad gt | |
gt_masks = targets_per_image.gt_masks | |
padded_masks = torch.zeros((gt_masks.shape[0], h, w), dtype=gt_masks.dtype, device=gt_masks.device) | |
padded_masks[:, : gt_masks.shape[1], : gt_masks.shape[2]] = gt_masks | |
new_targets.append( | |
{ | |
"labels": targets_per_image.gt_classes, | |
"masks": padded_masks, | |
} | |
) | |
return new_targets | |
def semantic_inference(self, mask_cls, mask_pred): | |
mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] | |
mask_pred = mask_pred.sigmoid() | |
semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) | |
return semseg | |
def panoptic_inference(self, mask_cls, mask_pred): | |
scores, labels = F.softmax(mask_cls, dim=-1).max(-1) | |
mask_pred = mask_pred.sigmoid() | |
keep = labels.ne(self.sem_seg_head.num_classes) & (scores > self.object_mask_threshold) | |
cur_scores = scores[keep] | |
cur_classes = labels[keep] | |
cur_masks = mask_pred[keep] | |
cur_mask_cls = mask_cls[keep] | |
cur_mask_cls = cur_mask_cls[:, :-1] | |
cur_prob_masks = cur_scores.view(-1, 1, 1) * cur_masks | |
h, w = cur_masks.shape[-2:] | |
panoptic_seg = torch.zeros((h, w), dtype=torch.int32, device=cur_masks.device) | |
segments_info = [] | |
current_segment_id = 0 | |
if cur_masks.shape[0] == 0: | |
# We didn't detect any mask :( | |
return panoptic_seg, segments_info | |
else: | |
# take argmax | |
cur_mask_ids = cur_prob_masks.argmax(0) | |
stuff_memory_list = {} | |
for k in range(cur_classes.shape[0]): | |
pred_class = cur_classes[k].item() | |
isthing = pred_class in self.metadata.thing_dataset_id_to_contiguous_id.values() | |
mask = cur_mask_ids == k | |
mask_area = mask.sum().item() | |
original_area = (cur_masks[k] >= 0.5).sum().item() | |
if mask_area > 0 and original_area > 0: | |
if mask_area / original_area < self.overlap_threshold: | |
continue | |
# merge stuff regions | |
if not isthing: | |
if int(pred_class) in stuff_memory_list.keys(): | |
panoptic_seg[mask] = stuff_memory_list[int(pred_class)] | |
continue | |
else: | |
stuff_memory_list[int(pred_class)] = current_segment_id + 1 | |
current_segment_id += 1 | |
panoptic_seg[mask] = current_segment_id | |
segments_info.append( | |
{ | |
"id": current_segment_id, | |
"isthing": bool(isthing), | |
"category_id": int(pred_class), | |
} | |
) | |
return panoptic_seg, segments_info | |