# Copyright (c) Facebook, Inc. and its affiliates. # Copyright (c) Meta Platforms, Inc. All Rights Reserved # Modified by Feng Liang from # https://github.com/MendelXu/zsseg.baseline/blob/master/mask_former/zero_shot_mask_former_model.py import logging from typing import Tuple import numpy as np import torch from torch import nn from torch.nn import functional as F from detectron2.config import configurable from detectron2.data import MetadataCatalog from detectron2.modeling import META_ARCH_REGISTRY from detectron2.modeling.backbone import Backbone from detectron2.modeling.postprocessing import sem_seg_postprocess from detectron2.structures import ImageList from detectron2.utils.logger import log_first_n from .modeling.clip_adapter import ( ClipAdapter, MaskFormerClipAdapter, build_text_prompt, ) from .mask_former_model import MaskFormer from .utils.misc import get_gt_binary_masks @META_ARCH_REGISTRY.register() class OVSeg(MaskFormer): """ Main class for zero shot mask classification semantic segmentation architectures. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, clip_adapter: nn.Module, criterion: nn.Module, num_queries: int, panoptic_on: bool, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, clip_ensemble: bool, clip_ensemble_weight: float, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss clip_adapter: adapter for clip-based mask classification num_queries: int, number of queries panoptic_on: bool, whether to output panoptic segmentation prediction object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image """ super().__init__( backbone=backbone, sem_seg_head=sem_seg_head, criterion=criterion, num_queries=num_queries, panoptic_on=panoptic_on, object_mask_threshold=object_mask_threshold, overlap_threshold=overlap_threshold, metadata=metadata, size_divisibility=size_divisibility, sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference, pixel_mean=pixel_mean, pixel_std=pixel_std, ) self.clip_adapter: ClipAdapter = clip_adapter self.clip_ensemble: bool = clip_ensemble self.clip_ensemble_weight: float = clip_ensemble_weight @classmethod def from_config(cls, cfg): init_kwargs = MaskFormer.from_config(cfg) text_templates = build_text_prompt(cfg.MODEL.CLIP_ADAPTER) clip_adapter = MaskFormerClipAdapter( cfg.MODEL.CLIP_ADAPTER.CLIP_MODEL_NAME, text_templates, mask_fill=cfg.MODEL.CLIP_ADAPTER.MASK_FILL, mask_expand_ratio=cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO, mask_thr=cfg.MODEL.CLIP_ADAPTER.MASK_THR, mask_matting=cfg.MODEL.CLIP_ADAPTER.MASK_MATTING, region_resized=cfg.MODEL.CLIP_ADAPTER.REGION_RESIZED, mask_prompt_depth=cfg.MODEL.CLIP_ADAPTER.MASK_PROMPT_DEPTH, mask_prompt_fwd=cfg.MODEL.CLIP_ADAPTER.MASK_PROMPT_FWD, ) init_kwargs["clip_adapter"] = clip_adapter init_kwargs["clip_ensemble"] = cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE init_kwargs[ "clip_ensemble_weight" ] = cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE_WEIGHT return init_kwargs def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "instances": per-region ground truth * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model (may be different from input resolution), used in inference. Returns: list[dict]: each dict has the results for one image. The dict contains the following keys: * "sem_seg": A Tensor that represents the per-pixel segmentation prediced by the head. The prediction has shape KxHxW that represents the logits of each class for each pixel. * "panoptic_seg": A tuple that represent panoptic output panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". """ dataset_name = [x["meta"]["dataset_name"] for x in batched_inputs] assert len(set(dataset_name)) == 1 dataset_name = dataset_name[0] images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.size_divisibility) features = self.backbone(images.tensor) outputs = self.sem_seg_head(features) class_names = self.get_class_name_list(dataset_name) text_features = self.clip_adapter.get_text_features(class_names) outputs["pred_logits"] = self.clip_adapter.get_sim_logits( text_features, self.clip_adapter.normalize_feature(outputs["pred_logits"]) ) if self.training: if "aux_outputs" in outputs.keys(): for i in range(len(outputs["aux_outputs"])): outputs["aux_outputs"][i][ "pred_logits" ] = self.clip_adapter.get_sim_logits( text_features, self.clip_adapter.normalize_feature( outputs["aux_outputs"][i]["pred_logits"] ), ) # mask classification target if "instances" in batched_inputs[0]: gt_instances = [x["instances"].to(self.device) for x in batched_inputs] targets = self.prepare_targets(gt_instances, images) else: targets = None # bipartite matching-based loss losses = self.criterion(outputs, targets) for k in list(losses.keys()): if k in self.criterion.weight_dict: losses[k] *= self.criterion.weight_dict[k] else: # remove this loss if not specified in `weight_dict` losses.pop(k) return losses else: mask_cls_results = outputs["pred_logits"] mask_pred_results = outputs["pred_masks"] # upsample masks mask_pred_results = F.interpolate( mask_pred_results, size=(images.tensor.shape[-2], images.tensor.shape[-1]), mode="bilinear", align_corners=False, ) processed_results = [] for mask_cls_result, mask_pred_result, input_per_image, image_size in zip( mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes ): height = image_size[0] width = image_size[1] mask_pred_result = sem_seg_postprocess( mask_pred_result, image_size, height, width ) image = input_per_image["image"].to(self.device) r, regions = self.semantic_inference( mask_cls_result, mask_pred_result, image, class_names ) height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) r = sem_seg_postprocess(r, image_size, height, width) processed_results.append({"sem_seg": r}) # panoptic segmentation inference if self.panoptic_on: panoptic_r = self.panoptic_inference( mask_cls_result, mask_pred_result ) processed_results[-1]["panoptic_seg"] = panoptic_r return processed_results def semantic_inference(self, mask_cls, mask_pred, image, class_names): mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] mask_pred = mask_pred.sigmoid() regions = None if self.clip_ensemble: clip_cls, regions, valid_flag = self.clip_adapter( image, class_names, mask_pred, normalize=True ) if clip_cls is None: clip_cls = torch.empty(0, mask_cls.shape[-1] + 1, device=self.device) # softmax before index or after? clip_cls = F.softmax(clip_cls[:, :-1], dim=-1) if self.clip_ensemble_weight > 0: map_back_clip_cls = mask_cls.new_ones(mask_cls.shape) map_back_clip_cls[valid_flag] = clip_cls mask_cls = torch.pow(mask_cls, 1 - self.clip_ensemble_weight) * \ torch.pow(map_back_clip_cls, self.clip_ensemble_weight) else: # only clip model predictions are used mask_cls = clip_cls mask_pred = mask_pred[valid_flag] semseg = torch.einsum("qc,qhw->chw", mask_cls, mask_pred) return semseg, regions def get_class_name_list(self, dataset_name): class_names = [ c.strip() for c in MetadataCatalog.get(dataset_name).stuff_classes ] return class_names @META_ARCH_REGISTRY.register() class OVSegDEMO(MaskFormer): """ Main class for zero shot mask classification semantic segmentation architectures. """ @configurable def __init__( self, *, backbone: Backbone, sem_seg_head: nn.Module, clip_adapter: nn.Module, criterion: nn.Module, num_queries: int, panoptic_on: bool, object_mask_threshold: float, overlap_threshold: float, metadata, size_divisibility: int, sem_seg_postprocess_before_inference: bool, clip_ensemble: bool, clip_ensemble_weight: float, pixel_mean: Tuple[float], pixel_std: Tuple[float], ): """ Args: backbone: a backbone module, must follow detectron2's backbone interface sem_seg_head: a module that predicts semantic segmentation from backbone features criterion: a module that defines the loss clip_adapter: adapter for clip-based mask classification num_queries: int, number of queries panoptic_on: bool, whether to output panoptic segmentation prediction object_mask_threshold: float, threshold to filter query based on classification score for panoptic segmentation inference overlap_threshold: overlap threshold used in general inference for panoptic segmentation metadata: dataset meta, get `thing` and `stuff` category names for panoptic segmentation inference size_divisibility: Some backbones require the input height and width to be divisible by a specific integer. We can use this to override such requirement. sem_seg_postprocess_before_inference: whether to resize the prediction back to original input size before semantic segmentation inference or after. For high-resolution dataset like Mapillary, resizing predictions before inference will cause OOM error. pixel_mean, pixel_std: list or tuple with #channels element, representing the per-channel mean and std to be used to normalize the input image """ super().__init__( backbone=backbone, sem_seg_head=sem_seg_head, criterion=criterion, num_queries=num_queries, panoptic_on=panoptic_on, object_mask_threshold=object_mask_threshold, overlap_threshold=overlap_threshold, metadata=metadata, size_divisibility=size_divisibility, sem_seg_postprocess_before_inference=sem_seg_postprocess_before_inference, pixel_mean=pixel_mean, pixel_std=pixel_std, ) self.clip_adapter: ClipAdapter = clip_adapter self.clip_ensemble: bool = clip_ensemble self.clip_ensemble_weight: float = clip_ensemble_weight @classmethod def from_config(cls, cfg): init_kwargs = MaskFormer.from_config(cfg) text_templates = build_text_prompt(cfg.MODEL.CLIP_ADAPTER) clip_adapter = MaskFormerClipAdapter( cfg.MODEL.CLIP_ADAPTER.CLIP_MODEL_NAME, text_templates, mask_fill=cfg.MODEL.CLIP_ADAPTER.MASK_FILL, mask_expand_ratio=cfg.MODEL.CLIP_ADAPTER.MASK_EXPAND_RATIO, mask_thr=cfg.MODEL.CLIP_ADAPTER.MASK_THR, mask_matting=cfg.MODEL.CLIP_ADAPTER.MASK_MATTING, region_resized=cfg.MODEL.CLIP_ADAPTER.REGION_RESIZED, mask_prompt_depth=cfg.MODEL.CLIP_ADAPTER.MASK_PROMPT_DEPTH, mask_prompt_fwd=cfg.MODEL.CLIP_ADAPTER.MASK_PROMPT_FWD, ) init_kwargs["clip_adapter"] = clip_adapter init_kwargs["clip_ensemble"] = cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE init_kwargs[ "clip_ensemble_weight" ] = cfg.MODEL.CLIP_ADAPTER.CLIP_ENSEMBLE_WEIGHT return init_kwargs def forward(self, batched_inputs): """ Args: batched_inputs: a list, batched outputs of :class:`DatasetMapper`. Each item in the list contains the inputs for one image. For now, each item in the list is a dict that contains: * "image": Tensor, image in (C, H, W) format. * "instances": per-region ground truth * Other information that's included in the original dicts, such as: "height", "width" (int): the output resolution of the model (may be different from input resolution), used in inference. Returns: list[dict]: each dict has the results for one image. The dict contains the following keys: * "sem_seg": A Tensor that represents the per-pixel segmentation prediced by the head. The prediction has shape KxHxW that represents the logits of each class for each pixel. * "panoptic_seg": A tuple that represent panoptic output panoptic_seg (Tensor): of shape (height, width) where the values are ids for each segment. segments_info (list[dict]): Describe each segment in `panoptic_seg`. Each dict contains keys "id", "category_id", "isthing". """ images = [x["image"].to(self.device) for x in batched_inputs] images = [(x - self.pixel_mean) / self.pixel_std for x in images] images = ImageList.from_tensors(images, self.size_divisibility) features = self.backbone(images.tensor) outputs = self.sem_seg_head(features) class_names = batched_inputs[0]["class_names"] if len(class_names) == 1: # Because classification is performed in a 'contrastive' manner, adding others to represent other concepts class_names.append('others') text_features = self.clip_adapter.get_text_features(class_names) outputs["pred_logits"] = self.clip_adapter.get_sim_logits( text_features, self.clip_adapter.normalize_feature(outputs["pred_logits"]) ) mask_cls_results = outputs["pred_logits"] mask_pred_results = outputs["pred_masks"] # upsample masks mask_pred_results = F.interpolate( mask_pred_results, size=(images.tensor.shape[-2], images.tensor.shape[-1]), mode="bilinear", align_corners=False, ) processed_results = [] for mask_cls_result, mask_pred_result, input_per_image, image_size in zip( mask_cls_results, mask_pred_results, batched_inputs, images.image_sizes ): height = image_size[0] width = image_size[1] mask_pred_result = sem_seg_postprocess( mask_pred_result, image_size, height, width ) image = input_per_image["image"].to(self.device) r, regions = self.demo_inference(mask_cls_result, mask_pred_result, image, class_names) height = input_per_image.get("height", image_size[0]) width = input_per_image.get("width", image_size[1]) r = sem_seg_postprocess(r, image_size, height, width) processed_results.append({"sem_seg": r}) return processed_results def demo_inference(self, mask_cls, mask_pred, image, class_names): mask_cls = F.softmax(mask_cls, dim=-1)[..., :-1] mask_pred = mask_pred.sigmoid() regions = None if self.clip_ensemble: clip_cls, regions, valid_flag = self.clip_adapter( image, class_names, mask_pred, normalize=True ) if clip_cls is None: clip_cls = torch.empty(0, mask_cls.shape[-1] + 1, device=self.device) # softmax before index or after? clip_cls = F.softmax(clip_cls[:, :-1], dim=-1) if self.clip_ensemble_weight > 0: map_back_clip_cls = mask_cls.new_ones(mask_cls.shape) map_back_clip_cls[valid_flag] = clip_cls mask_cls = torch.pow(mask_cls, 1 - self.clip_ensemble_weight) * \ torch.pow(map_back_clip_cls, self.clip_ensemble_weight) else: # only clip model predictions are used mask_cls = clip_cls mask_pred = mask_pred[valid_flag] bin_mask = mask_pred > self.clip_adapter.mask_thr select_cls = torch.zeros(sum(valid_flag), mask_cls.shape[-1], device=self.device) select_mask = torch.argmax(mask_cls, dim=0) if len(class_names) == 2 and class_names[-1] == 'others': select_mask = select_mask[:-1] for idx in select_mask: select_cls[idx] = mask_cls[idx] semseg = torch.einsum("qc,qhw->chw", select_cls, bin_mask.float()) return semseg, regions