diff --git a/.gitattributes b/.gitattributes
index a6344aac8c09253b3b630fb776ae94478aa0275b..69df653f5776f81f8510bd79f29888da3bab4122 100644
--- a/.gitattributes
+++ b/.gitattributes
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
*.zip filter=lfs diff=lfs merge=lfs -text
*.zst filter=lfs diff=lfs merge=lfs -text
*tfevents* filter=lfs diff=lfs merge=lfs -text
+ckpt/** filter=lfs diff=lfs merge=lfs -text
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000000000000000000000000000000000000..bb30824e7ac19feb457d35f30078496f7cd07778
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,180 @@
+# Byte-compiled / optimized / DLL files
+__pycache__/
+*.py[cod]
+*$py.class
+
+# C extensions
+*.so
+
+# Distribution / packaging
+.Python
+build/
+develop-eggs/
+dist/
+downloads/
+eggs/
+.eggs/
+lib/
+lib64/
+parts/
+sdist/
+var/
+wheels/
+share/python-wheels/
+*.egg-info/
+.installed.cfg
+*.egg
+MANIFEST
+venv
+models
+.vs/
+outputs
+temp_faces
+loras
+
+# PyInstaller
+# Usually these files are written by a python script from a template
+# before PyInstaller builds the exe, so as to inject date/other infos into it.
+*.manifest
+*.spec
+
+# Installer logs
+pip-log.txt
+pip-delete-this-directory.txt
+
+# Unit test / coverage reports
+htmlcov/
+.tox/
+.nox/
+.coverage
+.coverage.*
+.cache
+nosetests.xml
+coverage.xml
+*.cover
+*.py,cover
+.hypothesis/
+.pytest_cache/
+cover/
+
+# Translations
+*.mo
+*.pot
+
+# Django stuff:
+*.log
+local_settings.py
+db.sqlite3
+db.sqlite3-journal
+
+# Flask stuff:
+instance/
+.webassets-cache
+
+# Scrapy stuff:
+.scrapy
+
+# Sphinx documentation
+docs/_build/
+
+# PyBuilder
+.pybuilder/
+target/
+
+# Jupyter Notebook
+.ipynb_checkpoints
+
+# IPython
+profile_default/
+ipython_config.py
+
+# pyenv
+# For a library or package, you might want to ignore these files since the code is
+# intended to run in multiple environments; otherwise, check them in:
+# .python-version
+
+# pipenv
+# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
+# However, in case of collaboration, if having platform-specific dependencies or dependencies
+# having no cross-platform support, pipenv may install dependencies that don't work, or not
+# install all needed dependencies.
+#Pipfile.lock
+
+# poetry
+# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
+# This is especially recommended for binary packages to ensure reproducibility, and is more
+# commonly ignored for libraries.
+# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
+#poetry.lock
+
+# pdm
+# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
+#pdm.lock
+# pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
+# in version control.
+# https://pdm.fming.dev/#use-with-ide
+.pdm.toml
+
+# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
+__pypackages__/
+
+# Celery stuff
+celerybeat-schedule
+celerybeat.pid
+
+# SageMath parsed files
+*.sage.py
+
+# Environments
+.env
+.venv
+env/
+venv/
+ENV/
+env.bak/
+venv.bak/
+
+# Spyder project settings
+.spyderproject
+.spyproject
+
+# Rope project settings
+.ropeproject
+
+# mkdocs documentation
+/site
+
+# mypy
+.mypy_cache/
+.dmypy.json
+dmypy.json
+
+# Pyre type checker
+.pyre/
+
+# pytype static type analyzer
+.pytype/
+
+# Cython debug symbols
+cython_debug/
+
+# PyCharm
+# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
+# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
+# and can be added to the global gitignore or merged into this file. For a more nuclear
+# option (not recommended) you can uncomment the following to ignore the entire idea folder.
+#.idea/
+
+.vscode/
+*.pyc
+
+#others
+outputs/
+__pycache__/
+densepose/__pycache__/
+detectron2/__pycache__/
+ip_adapter/__pycache__/
+preprocess/humanparsing/__pycache__/
+preprocess/openpose/__pycache__/
+util/__pycache__/
+src/__pycache__/
\ No newline at end of file
diff --git a/README.md b/README.md
index 8c04ab5b9b34d32b303b8100cb681bcfbacc0f7b..d9a60c51913a6e919edb9611ed78d2311ce2e3d5 100644
--- a/README.md
+++ b/README.md
@@ -1,12 +1,162 @@
+
+
+
IDM-VTON: Improving Diffusion Models for Authentic Virtual Try-on in the Wild
+
+
+
+
+
+
+
+
+
+This is the official implementation of the paper ["Improving Diffusion Models for Authentic Virtual Try-on in the Wild"](https://arxiv.org/abs/2403.05139).
+
+Star ⭐ us if you like it!
+
---
-title: Ailusion VTON DEMO V1
-emoji: 👁
-colorFrom: red
-colorTo: indigo
-sdk: gradio
-sdk_version: 4.31.2
-app_file: app.py
-pinned: false
----
-Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
+
+![teaser2](assets/teaser2.png)
+![teaser](assets/teaser.png)
+
+
+## TODO LIST
+
+
+- [x] demo model
+- [x] inference code
+- [ ] training code
+
+
+
+## Requirements
+
+```
+git clone https://github.com/yisol/IDM-VTON.git
+cd IDM-VTON
+
+conda env create -f environment.yaml
+conda activate idm
+```
+
+## Data preparation
+
+### VITON-HD
+You can download VITON-HD dataset from [VITON-HD](https://github.com/shadow2496/VITON-HD).
+
+After download VITON-HD dataset, move vitonhd_test_tagged.json into the test folder.
+
+Structure of the Dataset directory should be as follows.
+
+```
+
+train
+|-- ...
+
+test
+|-- image
+|-- image-densepose
+|-- agnostic-mask
+|-- cloth
+|-- vitonhd_test_tagged.json
+
+```
+
+### DressCode
+You can download DressCode dataset from [DressCode](https://github.com/aimagelab/dress-code).
+
+We provide pre-computed densepose images and captions for garments [here](https://kaistackr-my.sharepoint.com/:u:/g/personal/cpis7_kaist_ac_kr/EaIPRG-aiRRIopz9i002FOwBDa-0-BHUKVZ7Ia5yAVVG3A?e=YxkAip).
+
+We used [detectron2](https://github.com/facebookresearch/detectron2) for obtaining densepose images, refer [here](https://github.com/sangyun884/HR-VITON/issues/45) for more details.
+
+After download the DressCode dataset, place image-densepose directories and caption text files as follows.
+
+```
+DressCode
+|-- dresses
+ |-- images
+ |-- image-densepose
+ |-- dc_caption.txt
+ |-- ...
+|-- lower_body
+ |-- images
+ |-- image-densepose
+ |-- dc_caption.txt
+ |-- ...
+|-- upper_body
+ |-- images
+ |-- image-densepose
+ |-- dc_caption.txt
+ |-- ...
+```
+
+
+## Inference
+
+
+### VITON-HD
+
+Inference using python file with arguments,
+
+```
+accelerate launch inference.py \
+ --width 768 --height 1024 --num_inference_steps 30 \
+ --output_dir "result" \
+ --unpaired \
+ --data_dir "DATA_DIR" \
+ --seed 42 \
+ --test_batch_size 2 \
+ --guidance_scale 2.0
+```
+
+or, you can simply run with the script file.
+
+```
+sh inference.sh
+```
+
+### DressCode
+
+For DressCode dataset, put the category you want to generate images via category argument,
+```
+accelerate launch inference_dc.py \
+ --width 768 --height 1024 --num_inference_steps 30 \
+ --output_dir "result" \
+ --unpaired \
+ --data_dir "DATA_DIR" \
+ --seed 42
+ --test_batch_size 2
+ --guidance_scale 2.0
+ --category "upper_body"
+```
+
+or, you can simply run with the script file.
+```
+sh inference.sh
+```
+
+
+## Acknowledgements
+
+For the [demo](https://huggingface.co/spaces/yisol/IDM-VTON), GPUs are supported from [ZeroGPU](https://huggingface.co/zero-gpu-explorers), and masking generation codes are based on [OOTDiffusion](https://github.com/levihsu/OOTDiffusion) and [DCI-VTON](https://github.com/bcmi/DCI-VTON-Virtual-Try-On).
+
+Parts of our code are based on [IP-Adapter](https://github.com/tencent-ailab/IP-Adapter).
+
+
+
+## Citation
+```
+@article{choi2024improving,
+ title={Improving Diffusion Models for Virtual Try-on},
+ author={Choi, Yisol and Kwak, Sangkyung and Lee, Kyungmin and Choi, Hyungwon and Shin, Jinwoo},
+ journal={arXiv preprint arXiv:2403.05139},
+ year={2024}
+}
+```
+
+## License
+The codes and checkpoints in this repository are under the [CC BY-NC-SA 4.0 license](https://creativecommons.org/licenses/by-nc-sa/4.0/legalcode).
+
+
+
diff --git a/app.py b/app.py
new file mode 100644
index 0000000000000000000000000000000000000000..934a3e7dba16b745f8f22aec8e6cdc72753fe670
--- /dev/null
+++ b/app.py
@@ -0,0 +1,313 @@
+import gradio as gr
+from PIL import Image
+from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
+from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
+from src.unet_hacked_tryon import UNet2DConditionModel
+from transformers import (
+ CLIPImageProcessor,
+ CLIPVisionModelWithProjection,
+ CLIPTextModel,
+ CLIPTextModelWithProjection,
+)
+from diffusers import DDPMScheduler,AutoencoderKL
+from typing import List
+
+import torch
+import os
+from transformers import AutoTokenizer
+import spaces
+import numpy as np
+from utils_mask import get_mask_location
+from torchvision import transforms
+import apply_net
+from preprocess.humanparsing.run_parsing import Parsing
+from preprocess.openpose.run_openpose import OpenPose
+from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
+from torchvision.transforms.functional import to_pil_image
+
+
+def pil_to_binary_mask(pil_image, threshold=0):
+ np_image = np.array(pil_image)
+ grayscale_image = Image.fromarray(np_image).convert("L")
+ binary_mask = np.array(grayscale_image) > threshold
+ mask = np.zeros(binary_mask.shape, dtype=np.uint8)
+ for i in range(binary_mask.shape[0]):
+ for j in range(binary_mask.shape[1]):
+ if binary_mask[i,j] == True :
+ mask[i,j] = 1
+ mask = (mask*255).astype(np.uint8)
+ output_mask = Image.fromarray(mask)
+ return output_mask
+
+
+base_path = 'yisol/IDM-VTON'
+example_path = os.path.join(os.path.dirname(__file__), 'example')
+
+unet = UNet2DConditionModel.from_pretrained(
+ base_path,
+ subfolder="unet",
+ torch_dtype=torch.float16,
+)
+unet.requires_grad_(False)
+tokenizer_one = AutoTokenizer.from_pretrained(
+ base_path,
+ subfolder="tokenizer",
+ revision=None,
+ use_fast=False,
+)
+tokenizer_two = AutoTokenizer.from_pretrained(
+ base_path,
+ subfolder="tokenizer_2",
+ revision=None,
+ use_fast=False,
+)
+noise_scheduler = DDPMScheduler.from_pretrained(base_path, subfolder="scheduler")
+
+text_encoder_one = CLIPTextModel.from_pretrained(
+ base_path,
+ subfolder="text_encoder",
+ torch_dtype=torch.float16,
+)
+text_encoder_two = CLIPTextModelWithProjection.from_pretrained(
+ base_path,
+ subfolder="text_encoder_2",
+ torch_dtype=torch.float16,
+)
+image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ base_path,
+ subfolder="image_encoder",
+ torch_dtype=torch.float16,
+ )
+vae = AutoencoderKL.from_pretrained(base_path,
+ subfolder="vae",
+ torch_dtype=torch.float16,
+)
+
+# "stabilityai/stable-diffusion-xl-base-1.0",
+UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(
+ base_path,
+ subfolder="unet_encoder",
+ torch_dtype=torch.float16,
+)
+
+parsing_model = Parsing(0)
+openpose_model = OpenPose(0)
+
+UNet_Encoder.requires_grad_(False)
+image_encoder.requires_grad_(False)
+vae.requires_grad_(False)
+unet.requires_grad_(False)
+text_encoder_one.requires_grad_(False)
+text_encoder_two.requires_grad_(False)
+tensor_transfrom = transforms.Compose(
+ [
+ transforms.ToTensor(),
+ transforms.Normalize([0.5], [0.5]),
+ ]
+ )
+
+pipe = TryonPipeline.from_pretrained(
+ base_path,
+ unet=unet,
+ vae=vae,
+ feature_extractor= CLIPImageProcessor(),
+ text_encoder = text_encoder_one,
+ text_encoder_2 = text_encoder_two,
+ tokenizer = tokenizer_one,
+ tokenizer_2 = tokenizer_two,
+ scheduler = noise_scheduler,
+ image_encoder=image_encoder,
+ torch_dtype=torch.float16,
+)
+pipe.unet_encoder = UNet_Encoder
+
+@spaces.GPU
+def start_tryon(dict,garm_img,garment_des,is_checked,is_checked_crop,denoise_steps,seed):
+ device = "cuda"
+
+ openpose_model.preprocessor.body_estimation.model.to(device)
+ pipe.to(device)
+ pipe.unet_encoder.to(device)
+
+ garm_img= garm_img.convert("RGB").resize((768,1024))
+ human_img_orig = dict["background"].convert("RGB")
+
+ if is_checked_crop:
+ width, height = human_img_orig.size
+ target_width = int(min(width, height * (3 / 4)))
+ target_height = int(min(height, width * (4 / 3)))
+ left = (width - target_width) / 2
+ top = (height - target_height) / 2
+ right = (width + target_width) / 2
+ bottom = (height + target_height) / 2
+ cropped_img = human_img_orig.crop((left, top, right, bottom))
+ crop_size = cropped_img.size
+ human_img = cropped_img.resize((768,1024))
+ else:
+ human_img = human_img_orig.resize((768,1024))
+
+
+ if is_checked:
+ keypoints = openpose_model(human_img.resize((384,512)))
+ model_parse, _ = parsing_model(human_img.resize((384,512)))
+ mask, mask_gray = get_mask_location('hd', "upper_body", model_parse, keypoints)
+ mask = mask.resize((768,1024))
+ else:
+ mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
+ # mask = transforms.ToTensor()(mask)
+ # mask = mask.unsqueeze(0)
+ mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
+ mask_gray = to_pil_image((mask_gray+1.0)/2.0)
+
+
+ human_img_arg = _apply_exif_orientation(human_img.resize((384,512)))
+ human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
+
+
+
+ args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
+ # verbosity = getattr(args, "verbosity", None)
+ pose_img = args.func(args,human_img_arg)
+ pose_img = pose_img[:,:,::-1]
+ pose_img = Image.fromarray(pose_img).resize((768,1024))
+
+ with torch.no_grad():
+ # Extract the images
+ with torch.cuda.amp.autocast():
+ with torch.no_grad():
+ prompt = "model is wearing " + garment_des
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
+ with torch.inference_mode():
+ (
+ prompt_embeds,
+ negative_prompt_embeds,
+ pooled_prompt_embeds,
+ negative_pooled_prompt_embeds,
+ ) = pipe.encode_prompt(
+ prompt,
+ num_images_per_prompt=1,
+ do_classifier_free_guidance=True,
+ negative_prompt=negative_prompt,
+ )
+
+ prompt = "a photo of " + garment_des
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
+ if not isinstance(prompt, List):
+ prompt = [prompt] * 1
+ if not isinstance(negative_prompt, List):
+ negative_prompt = [negative_prompt] * 1
+ with torch.inference_mode():
+ (
+ prompt_embeds_c,
+ _,
+ _,
+ _,
+ ) = pipe.encode_prompt(
+ prompt,
+ num_images_per_prompt=1,
+ do_classifier_free_guidance=False,
+ negative_prompt=negative_prompt,
+ )
+
+
+
+ pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,torch.float16)
+ garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,torch.float16)
+ generator = torch.Generator(device).manual_seed(seed) if seed is not None else None
+ images = pipe(
+ prompt_embeds=prompt_embeds.to(device,torch.float16),
+ negative_prompt_embeds=negative_prompt_embeds.to(device,torch.float16),
+ pooled_prompt_embeds=pooled_prompt_embeds.to(device,torch.float16),
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds.to(device,torch.float16),
+ num_inference_steps=denoise_steps,
+ generator=generator,
+ strength = 1.0,
+ pose_img = pose_img.to(device,torch.float16),
+ text_embeds_cloth=prompt_embeds_c.to(device,torch.float16),
+ cloth = garm_tensor.to(device,torch.float16),
+ mask_image=mask,
+ image=human_img,
+ height=1024,
+ width=768,
+ ip_adapter_image = garm_img.resize((768,1024)),
+ guidance_scale=2.0,
+ )[0]
+
+ if is_checked_crop:
+ out_img = images[0].resize(crop_size)
+ human_img_orig.paste(out_img, (int(left), int(top)))
+ return human_img_orig, mask_gray
+ else:
+ return images[0], mask_gray
+ # return images[0], mask_gray
+
+garm_list = os.listdir(os.path.join(example_path,"cloth"))
+garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
+
+human_list = os.listdir(os.path.join(example_path,"human"))
+human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
+
+human_ex_list = []
+for ex_human in human_list_path:
+ ex_dict= {}
+ ex_dict['background'] = ex_human
+ ex_dict['layers'] = None
+ ex_dict['composite'] = None
+ human_ex_list.append(ex_dict)
+
+##default human
+
+
+image_blocks = gr.Blocks().queue()
+with image_blocks as demo:
+ gr.Markdown("## IDM-VTON 👕👔👚")
+ gr.Markdown("Virtual Try-on with your image and garment image. Check out the [source codes](https://github.com/yisol/IDM-VTON) and the [model](https://huggingface.co/yisol/IDM-VTON)")
+ with gr.Row():
+ with gr.Column():
+ imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
+ with gr.Row():
+ is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
+ with gr.Row():
+ is_checked_crop = gr.Checkbox(label="Yes", info="Use auto-crop & resizing",value=False)
+
+ example = gr.Examples(
+ inputs=imgs,
+ examples_per_page=10,
+ examples=human_ex_list
+ )
+
+ with gr.Column():
+ garm_img = gr.Image(label="Garment", sources='upload', type="pil")
+ with gr.Row(elem_id="prompt-container"):
+ with gr.Row():
+ prompt = gr.Textbox(placeholder="Description of garment ex) Short Sleeve Round Neck T-shirts", show_label=False, elem_id="prompt")
+ example = gr.Examples(
+ inputs=garm_img,
+ examples_per_page=8,
+ examples=garm_list_path)
+ with gr.Column():
+ # image_out = gr.Image(label="Output", elem_id="output-img", height=400)
+ masked_img = gr.Image(label="Masked image output", elem_id="masked-img",show_share_button=False)
+ with gr.Column():
+ # image_out = gr.Image(label="Output", elem_id="output-img", height=400)
+ image_out = gr.Image(label="Output", elem_id="output-img",show_share_button=False)
+
+
+
+
+ with gr.Column():
+ try_button = gr.Button(value="Try-on")
+ with gr.Accordion(label="Advanced Settings", open=False):
+ with gr.Row():
+ denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=40, value=30, step=1)
+ seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=42)
+
+
+
+ try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, is_checked,is_checked_crop, denoise_steps, seed], outputs=[image_out,masked_img], api_name='tryon')
+
+
+
+
+image_blocks.launch()
+
diff --git a/app_VTON.py b/app_VTON.py
new file mode 100644
index 0000000000000000000000000000000000000000..2753fc91d9671756121bd02ada9a2093f1856f5f
--- /dev/null
+++ b/app_VTON.py
@@ -0,0 +1,328 @@
+import gradio as gr
+import argparse, torch, os
+from PIL import Image
+from src.tryon_pipeline import StableDiffusionXLInpaintPipeline as TryonPipeline
+from src.unet_hacked_garmnet import UNet2DConditionModel as UNet2DConditionModel_ref
+from src.unet_hacked_tryon import UNet2DConditionModel
+from transformers import (
+ CLIPImageProcessor,
+ CLIPVisionModelWithProjection,
+)
+from diffusers import AutoencoderKL
+from typing import List
+from util.common import open_folder
+from util.image import pil_to_binary_mask, save_output_image
+from utils_mask import get_mask_location
+from torchvision import transforms
+import apply_net
+from preprocess.humanparsing.run_parsing import Parsing
+from preprocess.openpose.run_openpose import OpenPose
+from detectron2.data.detection_utils import convert_PIL_to_numpy,_apply_exif_orientation
+from torchvision.transforms.functional import to_pil_image
+from util.pipeline import quantize_4bit, restart_cpu_offload, torch_gc
+
+parser = argparse.ArgumentParser()
+parser.add_argument("--share", type=str, default=False, help="Set to True to share the app publicly.")
+parser.add_argument("--lowvram", action="store_true", help="Enable CPU offload for model operations.")
+parser.add_argument("--load_mode", default=None, type=str, choices=["4bit", "8bit"], help="Quantization mode for optimization memory consumption")
+parser.add_argument("--fixed_vae", action="store_true", default=True, help="Use fixed vae for FP16.")
+args = parser.parse_args()
+
+load_mode = args.load_mode
+fixed_vae = args.fixed_vae
+
+dtype = torch.float16
+device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
+model_id = 'yisol/IDM-VTON'
+vae_model_id = 'madebyollin/sdxl-vae-fp16-fix'
+
+dtypeQuantize = dtype
+
+if(load_mode in ('4bit','8bit')):
+ dtypeQuantize = torch.float8_e4m3fn
+
+ENABLE_CPU_OFFLOAD = args.lowvram
+torch.backends.cudnn.allow_tf32 = False
+torch.backends.cuda.allow_tf32 = False
+need_restart_cpu_offloading = False
+
+unet = None
+pipe = None
+UNet_Encoder = None
+example_path = os.path.join(os.path.dirname(__file__), 'example')
+
+def start_tryon(dict, garm_img, garment_des, category, is_checked, is_checked_crop, denoise_steps, is_randomize_seed, seed, number_of_images):
+ global pipe, unet, UNet_Encoder, need_restart_cpu_offloading
+
+ if pipe == None:
+ unet = UNet2DConditionModel.from_pretrained(
+ model_id,
+ subfolder="unet",
+ torch_dtype=dtypeQuantize,
+ )
+ if load_mode == '4bit':
+ quantize_4bit(unet)
+
+ unet.requires_grad_(False)
+
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
+ model_id,
+ subfolder="image_encoder",
+ torch_dtype=torch.float16,
+ )
+ if load_mode == '4bit':
+ quantize_4bit(image_encoder)
+
+ if fixed_vae:
+ vae = AutoencoderKL.from_pretrained(vae_model_id, torch_dtype=dtype)
+ else:
+ vae = AutoencoderKL.from_pretrained(model_id,
+ subfolder="vae",
+ torch_dtype=dtype,
+ )
+
+ # "stabilityai/stable-diffusion-xl-base-1.0",
+ UNet_Encoder = UNet2DConditionModel_ref.from_pretrained(
+ model_id,
+ subfolder="unet_encoder",
+ torch_dtype=dtypeQuantize,
+ )
+
+ if load_mode == '4bit':
+ quantize_4bit(UNet_Encoder)
+
+ UNet_Encoder.requires_grad_(False)
+ image_encoder.requires_grad_(False)
+ vae.requires_grad_(False)
+ unet.requires_grad_(False)
+
+ pipe_param = {
+ 'pretrained_model_name_or_path': model_id,
+ 'unet': unet,
+ 'torch_dtype': dtype,
+ 'vae': vae,
+ 'image_encoder': image_encoder,
+ 'feature_extractor': CLIPImageProcessor(),
+ }
+
+ pipe = TryonPipeline.from_pretrained(**pipe_param).to(device)
+ pipe.unet_encoder = UNet_Encoder
+ pipe.unet_encoder.to(pipe.unet.device)
+
+ if load_mode == '4bit':
+ if pipe.text_encoder is not None:
+ quantize_4bit(pipe.text_encoder)
+ if pipe.text_encoder_2 is not None:
+ quantize_4bit(pipe.text_encoder_2)
+
+ else:
+ if ENABLE_CPU_OFFLOAD:
+ need_restart_cpu_offloading =True
+
+ torch_gc()
+ parsing_model = Parsing(0)
+ openpose_model = OpenPose(0)
+ openpose_model.preprocessor.body_estimation.model.to(device)
+ tensor_transfrom = transforms.Compose(
+ [
+ transforms.ToTensor(),
+ transforms.Normalize([0.5], [0.5]),
+ ]
+ )
+
+ if need_restart_cpu_offloading:
+ restart_cpu_offload(pipe, load_mode)
+ elif ENABLE_CPU_OFFLOAD:
+ pipe.enable_model_cpu_offload()
+
+ #if load_mode != '4bit' :
+ # pipe.enable_xformers_memory_efficient_attention()
+
+ garm_img= garm_img.convert("RGB").resize((768,1024))
+ human_img_orig = dict["background"].convert("RGB")
+
+ if is_checked_crop:
+ width, height = human_img_orig.size
+ target_width = int(min(width, height * (3 / 4)))
+ target_height = int(min(height, width * (4 / 3)))
+ left = (width - target_width) / 2
+ top = (height - target_height) / 2
+ right = (width + target_width) / 2
+ bottom = (height + target_height) / 2
+ cropped_img = human_img_orig.crop((left, top, right, bottom))
+ crop_size = cropped_img.size
+ human_img = cropped_img.resize((768,1024))
+ else:
+ human_img = human_img_orig.resize((768,1024))
+
+ if is_checked:
+ keypoints = openpose_model(human_img.resize((384,512)))
+ model_parse, _ = parsing_model(human_img.resize((384,512)))
+ mask, mask_gray = get_mask_location('hd', category, model_parse, keypoints)
+ mask = mask.resize((768,1024))
+ else:
+ mask = pil_to_binary_mask(dict['layers'][0].convert("RGB").resize((768, 1024)))
+ # mask = transforms.ToTensor()(mask)
+ # mask = mask.unsqueeze(0)
+
+ mask_gray = (1-transforms.ToTensor()(mask)) * tensor_transfrom(human_img)
+ mask_gray = to_pil_image((mask_gray+1.0)/2.0)
+
+ human_img_arg = _apply_exif_orientation(human_img.resize((384,512)))
+ human_img_arg = convert_PIL_to_numpy(human_img_arg, format="BGR")
+
+ args = apply_net.create_argument_parser().parse_args(('show', './configs/densepose_rcnn_R_50_FPN_s1x.yaml', './ckpt/densepose/model_final_162be9.pkl', 'dp_segm', '-v', '--opts', 'MODEL.DEVICE', 'cuda'))
+ # verbosity = getattr(args, "verbosity", None)
+ pose_img = args.func(args,human_img_arg)
+ pose_img = pose_img[:,:,::-1]
+ pose_img = Image.fromarray(pose_img).resize((768,1024))
+
+ if pipe.text_encoder is not None:
+ pipe.text_encoder.to(device)
+
+ if pipe.text_encoder_2 is not None:
+ pipe.text_encoder_2.to(device)
+
+ with torch.no_grad():
+ # Extract the images
+ with torch.cuda.amp.autocast(dtype=dtype):
+ with torch.no_grad():
+ prompt = "model is wearing " + garment_des
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
+ with torch.inference_mode():
+ (
+ prompt_embeds,
+ negative_prompt_embeds,
+ pooled_prompt_embeds,
+ negative_pooled_prompt_embeds,
+ ) = pipe.encode_prompt(
+ prompt,
+ num_images_per_prompt=1,
+ do_classifier_free_guidance=True,
+ negative_prompt=negative_prompt,
+ )
+
+ prompt = "a photo of " + garment_des
+ negative_prompt = "monochrome, lowres, bad anatomy, worst quality, low quality"
+ if not isinstance(prompt, List):
+ prompt = [prompt] * 1
+ if not isinstance(negative_prompt, List):
+ negative_prompt = [negative_prompt] * 1
+ with torch.inference_mode():
+ (
+ prompt_embeds_c,
+ _,
+ _,
+ _,
+ ) = pipe.encode_prompt(
+ prompt,
+ num_images_per_prompt=1,
+ do_classifier_free_guidance=False,
+ negative_prompt=negative_prompt,
+ )
+
+ pose_img = tensor_transfrom(pose_img).unsqueeze(0).to(device,dtype)
+ garm_tensor = tensor_transfrom(garm_img).unsqueeze(0).to(device,dtype)
+ results = []
+ current_seed = seed
+ for i in range(number_of_images):
+ if is_randomize_seed:
+ current_seed = torch.randint(0, 2**32, size=(1,)).item()
+ generator = torch.Generator(device).manual_seed(current_seed) if seed != -1 else None
+ current_seed = current_seed + i
+
+ images = pipe(
+ prompt_embeds=prompt_embeds.to(device,dtype),
+ negative_prompt_embeds=negative_prompt_embeds.to(device,dtype),
+ pooled_prompt_embeds=pooled_prompt_embeds.to(device,dtype),
+ negative_pooled_prompt_embeds=negative_pooled_prompt_embeds.to(device,dtype),
+ num_inference_steps=denoise_steps,
+ generator=generator,
+ strength = 1.0,
+ pose_img = pose_img.to(device,dtype),
+ text_embeds_cloth=prompt_embeds_c.to(device,dtype),
+ cloth = garm_tensor.to(device,dtype),
+ mask_image=mask,
+ image=human_img,
+ height=1024,
+ width=768,
+ ip_adapter_image = garm_img.resize((768,1024)),
+ guidance_scale=2.0,
+ dtype=dtype,
+ device=device,
+ )[0]
+ if is_checked_crop:
+ out_img = images[0].resize(crop_size)
+ human_img_orig.paste(out_img, (int(left), int(top)))
+ img_path = save_output_image(human_img_orig, base_path="outputs", base_filename='img', seed=current_seed)
+ results.append(img_path)
+ else:
+ img_path = save_output_image(images[0], base_path="outputs", base_filename='img')
+ results.append(img_path)
+ return results, mask_gray
+
+garm_list = os.listdir(os.path.join(example_path,"cloth"))
+garm_list_path = [os.path.join(example_path,"cloth",garm) for garm in garm_list]
+
+human_list = os.listdir(os.path.join(example_path,"human"))
+human_list_path = [os.path.join(example_path,"human",human) for human in human_list]
+
+human_ex_list = []
+for ex_human in human_list_path:
+ if "Jensen" in ex_human or "sam1 (1)" in ex_human:
+ ex_dict = {}
+ ex_dict['background'] = ex_human
+ ex_dict['layers'] = None
+ ex_dict['composite'] = None
+ human_ex_list.append(ex_dict)
+
+image_blocks = gr.Blocks().queue()
+with image_blocks as demo:
+ gr.Markdown("## V7 - IDM-VTON 👕👔👚 improved by SECourses and DEVAIEXP: 1-Click Installers Latest Version On : https://www.patreon.com/posts/103022942")
+ gr.Markdown("Virtual Try-on with your image and garment image. Check out the [source codes](https://github.com/yisol/IDM-VTON) and the [model](https://huggingface.co/yisol/IDM-VTON)")
+ with gr.Row():
+ with gr.Column():
+ imgs = gr.ImageEditor(sources='upload', type="pil", label='Human. Mask with pen or use auto-masking', interactive=True)
+ with gr.Row():
+ category = gr.Radio(choices=["upper_body", "lower_body", "dresses"], label="Select Garment Category", value="upper_body")
+ is_checked = gr.Checkbox(label="Yes", info="Use auto-generated mask (Takes 5 seconds)",value=True)
+ with gr.Row():
+ is_checked_crop = gr.Checkbox(label="Yes", info="Use auto-crop & resizing",value=True)
+
+ example = gr.Examples(
+ inputs=imgs,
+ examples_per_page=2,
+ examples=human_ex_list
+ )
+
+ with gr.Column():
+ garm_img = gr.Image(label="Garment", sources='upload', type="pil")
+ with gr.Row(elem_id="prompt-container"):
+ with gr.Row():
+ prompt = gr.Textbox(placeholder="Description of garment ex) Short Sleeve Round Neck T-shirts", show_label=False, elem_id="prompt")
+ example = gr.Examples(
+ inputs=garm_img,
+ examples_per_page=8,
+ examples=garm_list_path)
+ with gr.Column():
+ with gr.Row():
+ # image_out = gr.Image(label="Output", elem_id="output-img", height=400)
+ masked_img = gr.Image(label="Masked image output", elem_id="masked-img",show_share_button=False)
+ with gr.Row():
+ btn_open_outputs = gr.Button("Open Outputs Folder")
+ btn_open_outputs.click(fn=open_folder)
+ with gr.Column():
+ with gr.Row():
+ # image_out = gr.Image(label="Output", elem_id="output-img", height=400)
+ image_gallery = gr.Gallery(label="Generated Images", show_label=True)
+ with gr.Row():
+ try_button = gr.Button(value="Try-on")
+ denoise_steps = gr.Number(label="Denoising Steps", minimum=20, maximum=120, value=30, step=1)
+ seed = gr.Number(label="Seed", minimum=-1, maximum=2147483647, step=1, value=1)
+ is_randomize_seed = gr.Checkbox(label="Randomize seed for each generated image", value=True)
+ number_of_images = gr.Number(label="Number Of Images To Generate (it will start from your input seed and increment by 1)", minimum=1, maximum=9999, value=1, step=1)
+
+
+ try_button.click(fn=start_tryon, inputs=[imgs, garm_img, prompt, category, is_checked, is_checked_crop, denoise_steps, is_randomize_seed, seed, number_of_images], outputs=[image_gallery, masked_img],api_name='tryon')
+
+image_blocks.launch(inbrowser=True,share=args.share)
diff --git a/apply_net.py b/apply_net.py
new file mode 100644
index 0000000000000000000000000000000000000000..732539d616d7dd9d039302f97b5d6438a5cb4892
--- /dev/null
+++ b/apply_net.py
@@ -0,0 +1,359 @@
+#!/usr/bin/env python3
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import argparse
+import glob
+import logging
+import os
+import sys
+from typing import Any, ClassVar, Dict, List
+import torch
+
+from detectron2.config import CfgNode, get_cfg
+from detectron2.data.detection_utils import read_image
+from detectron2.engine.defaults import DefaultPredictor
+from detectron2.structures.instances import Instances
+from detectron2.utils.logger import setup_logger
+
+from densepose import add_densepose_config
+from densepose.structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
+from densepose.utils.logger import verbosity_to_level
+from densepose.vis.base import CompoundVisualizer
+from densepose.vis.bounding_box import ScoredBoundingBoxVisualizer
+from densepose.vis.densepose_outputs_vertex import (
+ DensePoseOutputsTextureVisualizer,
+ DensePoseOutputsVertexVisualizer,
+ get_texture_atlases,
+)
+from densepose.vis.densepose_results import (
+ DensePoseResultsContourVisualizer,
+ DensePoseResultsFineSegmentationVisualizer,
+ DensePoseResultsUVisualizer,
+ DensePoseResultsVVisualizer,
+)
+from densepose.vis.densepose_results_textures import (
+ DensePoseResultsVisualizerWithTexture,
+ get_texture_atlas,
+)
+from densepose.vis.extractor import (
+ CompoundExtractor,
+ DensePoseOutputsExtractor,
+ DensePoseResultExtractor,
+ create_extractor,
+)
+
+DOC = """Apply Net - a tool to print / visualize DensePose results
+"""
+
+LOGGER_NAME = "apply_net"
+logger = logging.getLogger(LOGGER_NAME)
+
+_ACTION_REGISTRY: Dict[str, "Action"] = {}
+
+
+class Action:
+ @classmethod
+ def add_arguments(cls: type, parser: argparse.ArgumentParser):
+ parser.add_argument(
+ "-v",
+ "--verbosity",
+ action="count",
+ help="Verbose mode. Multiple -v options increase the verbosity.",
+ )
+
+
+def register_action(cls: type):
+ """
+ Decorator for action classes to automate action registration
+ """
+ global _ACTION_REGISTRY
+ _ACTION_REGISTRY[cls.COMMAND] = cls
+ return cls
+
+
+class InferenceAction(Action):
+ @classmethod
+ def add_arguments(cls: type, parser: argparse.ArgumentParser):
+ super(InferenceAction, cls).add_arguments(parser)
+ parser.add_argument("cfg", metavar="", help="Config file")
+ parser.add_argument("model", metavar="", help="Model file")
+ parser.add_argument(
+ "--opts",
+ help="Modify config options using the command-line 'KEY VALUE' pairs",
+ default=[],
+ nargs=argparse.REMAINDER,
+ )
+
+ @classmethod
+ def execute(cls: type, args: argparse.Namespace, human_img):
+ logger.info(f"Loading config from {args.cfg}")
+ opts = []
+ cfg = cls.setup_config(args.cfg, args.model, args, opts)
+ logger.info(f"Loading model from {args.model}")
+ predictor = DefaultPredictor(cfg)
+ # logger.info(f"Loading data from {args.input}")
+ # file_list = cls._get_input_file_list(args.input)
+ # if len(file_list) == 0:
+ # logger.warning(f"No input images for {args.input}")
+ # return
+ context = cls.create_context(args, cfg)
+ # for file_name in file_list:
+ # img = read_image(file_name, format="BGR") # predictor expects BGR image.
+ with torch.no_grad():
+ outputs = predictor(human_img)["instances"]
+ out_pose = cls.execute_on_outputs(context, {"image": human_img}, outputs)
+ cls.postexecute(context)
+ return out_pose
+
+ @classmethod
+ def setup_config(
+ cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
+ ):
+ cfg = get_cfg()
+ add_densepose_config(cfg)
+ cfg.merge_from_file(config_fpath)
+ cfg.merge_from_list(args.opts)
+ if opts:
+ cfg.merge_from_list(opts)
+ cfg.MODEL.WEIGHTS = model_fpath
+ cfg.freeze()
+ return cfg
+
+ @classmethod
+ def _get_input_file_list(cls: type, input_spec: str):
+ if os.path.isdir(input_spec):
+ file_list = [
+ os.path.join(input_spec, fname)
+ for fname in os.listdir(input_spec)
+ if os.path.isfile(os.path.join(input_spec, fname))
+ ]
+ elif os.path.isfile(input_spec):
+ file_list = [input_spec]
+ else:
+ file_list = glob.glob(input_spec)
+ return file_list
+
+
+@register_action
+class DumpAction(InferenceAction):
+ """
+ Dump action that outputs results to a pickle file
+ """
+
+ COMMAND: ClassVar[str] = "dump"
+
+ @classmethod
+ def add_parser(cls: type, subparsers: argparse._SubParsersAction):
+ parser = subparsers.add_parser(cls.COMMAND, help="Dump model outputs to a file.")
+ cls.add_arguments(parser)
+ parser.set_defaults(func=cls.execute)
+
+ @classmethod
+ def add_arguments(cls: type, parser: argparse.ArgumentParser):
+ super(DumpAction, cls).add_arguments(parser)
+ parser.add_argument(
+ "--output",
+ metavar="",
+ default="results.pkl",
+ help="File name to save dump to",
+ )
+
+ @classmethod
+ def execute_on_outputs(
+ cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
+ ):
+ image_fpath = entry["file_name"]
+ logger.info(f"Processing {image_fpath}")
+ result = {"file_name": image_fpath}
+ if outputs.has("scores"):
+ result["scores"] = outputs.get("scores").cpu()
+ if outputs.has("pred_boxes"):
+ result["pred_boxes_XYXY"] = outputs.get("pred_boxes").tensor.cpu()
+ if outputs.has("pred_densepose"):
+ if isinstance(outputs.pred_densepose, DensePoseChartPredictorOutput):
+ extractor = DensePoseResultExtractor()
+ elif isinstance(outputs.pred_densepose, DensePoseEmbeddingPredictorOutput):
+ extractor = DensePoseOutputsExtractor()
+ result["pred_densepose"] = extractor(outputs)[0]
+ context["results"].append(result)
+
+ @classmethod
+ def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode):
+ context = {"results": [], "out_fname": args.output}
+ return context
+
+ @classmethod
+ def postexecute(cls: type, context: Dict[str, Any]):
+ out_fname = context["out_fname"]
+ out_dir = os.path.dirname(out_fname)
+ if len(out_dir) > 0 and not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ with open(out_fname, "wb") as hFile:
+ torch.save(context["results"], hFile)
+ logger.info(f"Output saved to {out_fname}")
+
+
+@register_action
+class ShowAction(InferenceAction):
+ """
+ Show action that visualizes selected entries on an image
+ """
+
+ COMMAND: ClassVar[str] = "show"
+ VISUALIZERS: ClassVar[Dict[str, object]] = {
+ "dp_contour": DensePoseResultsContourVisualizer,
+ "dp_segm": DensePoseResultsFineSegmentationVisualizer,
+ "dp_u": DensePoseResultsUVisualizer,
+ "dp_v": DensePoseResultsVVisualizer,
+ "dp_iuv_texture": DensePoseResultsVisualizerWithTexture,
+ "dp_cse_texture": DensePoseOutputsTextureVisualizer,
+ "dp_vertex": DensePoseOutputsVertexVisualizer,
+ "bbox": ScoredBoundingBoxVisualizer,
+ }
+
+ @classmethod
+ def add_parser(cls: type, subparsers: argparse._SubParsersAction):
+ parser = subparsers.add_parser(cls.COMMAND, help="Visualize selected entries")
+ cls.add_arguments(parser)
+ parser.set_defaults(func=cls.execute)
+
+ @classmethod
+ def add_arguments(cls: type, parser: argparse.ArgumentParser):
+ super(ShowAction, cls).add_arguments(parser)
+ parser.add_argument(
+ "visualizations",
+ metavar="",
+ help="Comma separated list of visualizations, possible values: "
+ "[{}]".format(",".join(sorted(cls.VISUALIZERS.keys()))),
+ )
+ parser.add_argument(
+ "--min_score",
+ metavar="",
+ default=0.8,
+ type=float,
+ help="Minimum detection score to visualize",
+ )
+ parser.add_argument(
+ "--nms_thresh", metavar="", default=None, type=float, help="NMS threshold"
+ )
+ parser.add_argument(
+ "--texture_atlas",
+ metavar="",
+ default=None,
+ help="Texture atlas file (for IUV texture transfer)",
+ )
+ parser.add_argument(
+ "--texture_atlases_map",
+ metavar="",
+ default=None,
+ help="JSON string of a dict containing texture atlas files for each mesh",
+ )
+ parser.add_argument(
+ "--output",
+ metavar="",
+ default="outputres.png",
+ help="File name to save output to",
+ )
+
+ @classmethod
+ def setup_config(
+ cls: type, config_fpath: str, model_fpath: str, args: argparse.Namespace, opts: List[str]
+ ):
+ opts.append("MODEL.ROI_HEADS.SCORE_THRESH_TEST")
+ opts.append(str(args.min_score))
+ if args.nms_thresh is not None:
+ opts.append("MODEL.ROI_HEADS.NMS_THRESH_TEST")
+ opts.append(str(args.nms_thresh))
+ cfg = super(ShowAction, cls).setup_config(config_fpath, model_fpath, args, opts)
+ return cfg
+
+ @classmethod
+ def execute_on_outputs(
+ cls: type, context: Dict[str, Any], entry: Dict[str, Any], outputs: Instances
+ ):
+ import cv2
+ import numpy as np
+ visualizer = context["visualizer"]
+ extractor = context["extractor"]
+ # image_fpath = entry["file_name"]
+ # logger.info(f"Processing {image_fpath}")
+ image = cv2.cvtColor(entry["image"], cv2.COLOR_BGR2GRAY)
+ image = np.tile(image[:, :, np.newaxis], [1, 1, 3])
+ data = extractor(outputs)
+ image_vis = visualizer.visualize(image, data)
+
+ return image_vis
+ entry_idx = context["entry_idx"] + 1
+ out_fname = './image-densepose/' + image_fpath.split('/')[-1]
+ out_dir = './image-densepose'
+ out_dir = os.path.dirname(out_fname)
+ if len(out_dir) > 0 and not os.path.exists(out_dir):
+ os.makedirs(out_dir)
+ cv2.imwrite(out_fname, image_vis)
+ logger.info(f"Output saved to {out_fname}")
+ context["entry_idx"] += 1
+
+ @classmethod
+ def postexecute(cls: type, context: Dict[str, Any]):
+ pass
+# python ./apply_net.py show ./configs/densepose_rcnn_R_50_FPN_s1x.yaml https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl /home/alin0222/DressCode/upper_body/images dp_segm -v --opts MODEL.DEVICE cpu
+
+ @classmethod
+ def _get_out_fname(cls: type, entry_idx: int, fname_base: str):
+ base, ext = os.path.splitext(fname_base)
+ return base + ".{0:04d}".format(entry_idx) + ext
+
+ @classmethod
+ def create_context(cls: type, args: argparse.Namespace, cfg: CfgNode) -> Dict[str, Any]:
+ vis_specs = args.visualizations.split(",")
+ visualizers = []
+ extractors = []
+ for vis_spec in vis_specs:
+ texture_atlas = get_texture_atlas(args.texture_atlas)
+ texture_atlases_dict = get_texture_atlases(args.texture_atlases_map)
+ vis = cls.VISUALIZERS[vis_spec](
+ cfg=cfg,
+ texture_atlas=texture_atlas,
+ texture_atlases_dict=texture_atlases_dict,
+ )
+ visualizers.append(vis)
+ extractor = create_extractor(vis)
+ extractors.append(extractor)
+ visualizer = CompoundVisualizer(visualizers)
+ extractor = CompoundExtractor(extractors)
+ context = {
+ "extractor": extractor,
+ "visualizer": visualizer,
+ "out_fname": args.output,
+ "entry_idx": 0,
+ }
+ return context
+
+
+def create_argument_parser() -> argparse.ArgumentParser:
+ parser = argparse.ArgumentParser(
+ description=DOC,
+ formatter_class=lambda prog: argparse.HelpFormatter(prog, max_help_position=120),
+ )
+ parser.set_defaults(func=lambda _: parser.print_help(sys.stdout))
+ subparsers = parser.add_subparsers(title="Actions")
+ for _, action in _ACTION_REGISTRY.items():
+ action.add_parser(subparsers)
+ return parser
+
+
+def main():
+ parser = create_argument_parser()
+ args = parser.parse_args()
+ verbosity = getattr(args, "verbosity", None)
+ global logger
+ logger = setup_logger(name=LOGGER_NAME)
+ logger.setLevel(verbosity_to_level(verbosity))
+ args.func(args)
+
+
+if __name__ == "__main__":
+ main()
+
+
+# python ./apply_net.py show ./configs/densepose_rcnn_R_50_FPN_s1x.yaml https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl /home/alin0222/Dresscode/dresses/humanonly dp_segm -v --opts MODEL.DEVICE cuda
diff --git a/ckpt/densepose/model_final_162be9.pkl b/ckpt/densepose/model_final_162be9.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..1556c53af8286e4584b18130475c0ccdb0a61ad6
--- /dev/null
+++ b/ckpt/densepose/model_final_162be9.pkl
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:b8a7382001b16e453bad95ca9dbc68ae8f2b839b304cf90eaf5c27fbdb4dae91
+size 255757821
diff --git a/ckpt/humanparsing/parsing_atr.onnx b/ckpt/humanparsing/parsing_atr.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..28883cf4b0069c96f0f00930798428017425c3fa
--- /dev/null
+++ b/ckpt/humanparsing/parsing_atr.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:04c7d1d070d0e0ae943d86b18cb5aaaea9e278d97462e9cfb270cbbe4cd977f4
+size 266859305
diff --git a/ckpt/humanparsing/parsing_lip.onnx b/ckpt/humanparsing/parsing_lip.onnx
new file mode 100644
index 0000000000000000000000000000000000000000..7d1a879fa30fc002188b0c9fec3cc05064dd1093
--- /dev/null
+++ b/ckpt/humanparsing/parsing_lip.onnx
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:8436e1dae96e2601c373d1ace29c8f0978b16357d9038c17a8ba756cca376dbc
+size 266863411
diff --git a/ckpt/openpose/.DS_Store b/ckpt/openpose/.DS_Store
new file mode 100644
index 0000000000000000000000000000000000000000..dcabce6a14f89d7fd47ce0b1d490ade23aa242a2
--- /dev/null
+++ b/ckpt/openpose/.DS_Store
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:e953475b1378e1d0566f8ad8de20077ce8610ae23fb2b5f8bfe57104aca8e911
+size 6148
diff --git a/ckpt/openpose/ckpts/body_pose_model.pth b/ckpt/openpose/ckpts/body_pose_model.pth
new file mode 100644
index 0000000000000000000000000000000000000000..9acb77e68f31906a8875f1daef2f3f7ef94acb1e
--- /dev/null
+++ b/ckpt/openpose/ckpts/body_pose_model.pth
@@ -0,0 +1,3 @@
+version https://git-lfs.github.com/spec/v1
+oid sha256:25a948c16078b0f08e236bda51a385d855ef4c153598947c28c0d47ed94bb746
+size 209267595
diff --git a/configs/Base-DensePose-RCNN-FPN.yaml b/configs/Base-DensePose-RCNN-FPN.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1579187a7004e716eb3a86dbbfebb092d7aca84b
--- /dev/null
+++ b/configs/Base-DensePose-RCNN-FPN.yaml
@@ -0,0 +1,48 @@
+VERSION: 2
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ BACKBONE:
+ NAME: "build_resnet_fpn_backbone"
+ RESNETS:
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
+ FPN:
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
+ ANCHOR_GENERATOR:
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
+ RPN:
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
+ # Detectron1 uses 2000 proposals per-batch,
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
+ POST_NMS_TOPK_TRAIN: 1000
+ POST_NMS_TOPK_TEST: 1000
+
+ DENSEPOSE_ON: True
+ ROI_HEADS:
+ NAME: "DensePoseROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_FC: 2
+ POOLER_RESOLUTION: 7
+ POOLER_SAMPLING_RATIO: 2
+ POOLER_TYPE: "ROIAlign"
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ POOLER_TYPE: "ROIAlign"
+ NUM_COARSE_SEGM_CHANNELS: 2
+DATASETS:
+ TRAIN: ("densepose_coco_2014_train", "densepose_coco_2014_valminusminival")
+ TEST: ("densepose_coco_2014_minival",)
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.01
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+ WARMUP_FACTOR: 0.1
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
diff --git a/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml b/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..36eabfed984b360907f5782d4e8b0232784f8a40
--- /dev/null
+++ b/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://1drv.ms/u/s!Aus8VCZ_C_33dYBMemi9xOUFR0w"
+ BACKBONE:
+ NAME: "build_hrfpn_backbone"
+ RPN:
+ IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5']
+ ROI_HEADS:
+ IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5']
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ CLIP_GRADIENTS:
+ ENABLED: True
+ CLIP_TYPE: "norm"
+ BASE_LR: 0.03
diff --git a/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w40_s1x.yaml b/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w40_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0ca8085e154c40a5b0f42a17575d2d48328619f0
--- /dev/null
+++ b/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w40_s1x.yaml
@@ -0,0 +1,23 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://1drv.ms/u/s!Aus8VCZ_C_33ck0gvo5jfoWBOPo"
+ BACKBONE:
+ NAME: "build_hrfpn_backbone"
+ RPN:
+ IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5']
+ ROI_HEADS:
+ IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5']
+ HRNET:
+ STAGE2:
+ NUM_CHANNELS: [40, 80]
+ STAGE3:
+ NUM_CHANNELS: [40, 80, 160]
+ STAGE4:
+ NUM_CHANNELS: [40, 80, 160, 320]
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ CLIP_GRADIENTS:
+ ENABLED: True
+ CLIP_TYPE: "norm"
+ BASE_LR: 0.03
diff --git a/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w48_s1x.yaml b/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w48_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a3f437ab57ae0ff48cd4a97cbda987346f9a5a24
--- /dev/null
+++ b/configs/HRNet/densepose_rcnn_HRFPN_HRNet_w48_s1x.yaml
@@ -0,0 +1,23 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://1drv.ms/u/s!Aus8VCZ_C_33dKvqI6pBZlifgJk"
+ BACKBONE:
+ NAME: "build_hrfpn_backbone"
+ RPN:
+ IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5']
+ ROI_HEADS:
+ IN_FEATURES: ['p1', 'p2', 'p3', 'p4', 'p5']
+ HRNET:
+ STAGE2:
+ NUM_CHANNELS: [48, 96]
+ STAGE3:
+ NUM_CHANNELS: [48, 96, 192]
+ STAGE4:
+ NUM_CHANNELS: [48, 96, 192, 384]
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ CLIP_GRADIENTS:
+ ENABLED: True
+ CLIP_TYPE: "norm"
+ BASE_LR: 0.03
diff --git a/configs/cse/Base-DensePose-RCNN-FPN-Human.yaml b/configs/cse/Base-DensePose-RCNN-FPN-Human.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e92340ee0cdba2abd0a35114cbf3e78b04435dfe
--- /dev/null
+++ b/configs/cse/Base-DensePose-RCNN-FPN-Human.yaml
@@ -0,0 +1,20 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ ROI_DENSEPOSE_HEAD:
+ CSE:
+ EMBEDDERS:
+ "smpl_27554":
+ TYPE: vertex_feature
+ NUM_VERTICES: 27554
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_smpl_27554_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_coco_2014_train_cse"
+ - "densepose_coco_2014_valminusminival_cse"
+ TEST:
+ - "densepose_coco_2014_minival_cse"
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "smpl_27554"
diff --git a/configs/cse/Base-DensePose-RCNN-FPN.yaml b/configs/cse/Base-DensePose-RCNN-FPN.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..de3b26009bdee95666248f99cd243fe37e7fd8bd
--- /dev/null
+++ b/configs/cse/Base-DensePose-RCNN-FPN.yaml
@@ -0,0 +1,60 @@
+VERSION: 2
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ BACKBONE:
+ NAME: "build_resnet_fpn_backbone"
+ RESNETS:
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
+ FPN:
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
+ ANCHOR_GENERATOR:
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
+ RPN:
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
+ # Detectron1 uses 2000 proposals per-batch,
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
+ POST_NMS_TOPK_TRAIN: 1000
+ POST_NMS_TOPK_TEST: 1000
+
+ DENSEPOSE_ON: True
+ ROI_HEADS:
+ NAME: "DensePoseROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_FC: 2
+ POOLER_RESOLUTION: 7
+ POOLER_SAMPLING_RATIO: 2
+ POOLER_TYPE: "ROIAlign"
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ POOLER_TYPE: "ROIAlign"
+ NUM_COARSE_SEGM_CHANNELS: 2
+ PREDICTOR_NAME: "DensePoseEmbeddingPredictor"
+ LOSS_NAME: "DensePoseCseLoss"
+ CSE:
+ # embedding loss, possible values:
+ # - "EmbeddingLoss"
+ # - "SoftEmbeddingLoss"
+ EMBED_LOSS_NAME: "EmbeddingLoss"
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.01
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+ WARMUP_FACTOR: 0.1
+ CLIP_GRADIENTS:
+ CLIP_TYPE: norm
+ CLIP_VALUE: 1.0
+ ENABLED: true
+ NORM_TYPE: 2.0
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+DENSEPOSE_EVALUATION:
+ TYPE: cse
+ STORAGE: file
diff --git a/configs/cse/densepose_rcnn_R_101_FPN_DL_s1x.yaml b/configs/cse/densepose_rcnn_R_101_FPN_DL_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..69d858902671e683b884b32c3c1448a44dc3995e
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_101_FPN_DL_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ CSE:
+ EMBED_LOSS_NAME: "EmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml b/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..141657cdab24a2f591eeef763aef29543c43108e
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_101_FPN_DL_soft_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/cse/densepose_rcnn_R_101_FPN_s1x.yaml b/configs/cse/densepose_rcnn_R_101_FPN_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d2eea1e2c3cecc7bba1bfd6f2332227bd3d0f5ed
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_101_FPN_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ CSE:
+ EMBED_LOSS_NAME: "EmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/cse/densepose_rcnn_R_101_FPN_soft_s1x.yaml b/configs/cse/densepose_rcnn_R_101_FPN_soft_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1c362e1f9e93f9b9b458532f5318518396404d9f
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_101_FPN_soft_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_DL_s1x.yaml b/configs/cse/densepose_rcnn_R_50_FPN_DL_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..26684deaa9c72aab1408dbe3abb6ac3a9b6a17ac
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_DL_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ CSE:
+ EMBED_LOSS_NAME: "EmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_DL_soft_s1x.yaml b/configs/cse/densepose_rcnn_R_50_FPN_DL_soft_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b53501d29b84e9ff4088ce98bc83688e89e546ed
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_DL_soft_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml b/configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..c186625a86cc76441b9edeefeabd7caf44af7755
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ CSE:
+ EMBED_LOSS_NAME: "EmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_16k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_16k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..69ab22669e2176b6ec661fc982be7412abb5e0e8
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_16k.yaml
@@ -0,0 +1,133 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 1
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "cat_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl"
+ "dog_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds2_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds2_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds2_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds2_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CATEGORY_MAPS:
+ "densepose_lvis_v1_ds2_train_v1":
+ "1202": 943 # zebra -> sheep
+ "569": 943 # horse -> sheep
+ "496": 943 # giraffe -> sheep
+ "422": 943 # elephant -> sheep
+ "80": 943 # cow -> sheep
+ "76": 943 # bear -> sheep
+ "225": 943 # cat -> sheep
+ "378": 943 # dog -> sheep
+ "densepose_lvis_v1_ds2_val_v1":
+ "1202": 943 # zebra -> sheep
+ "569": 943 # horse -> sheep
+ "496": 943 # giraffe -> sheep
+ "422": 943 # elephant -> sheep
+ "80": 943 # cow -> sheep
+ "76": 943 # bear -> sheep
+ "225": 943 # cat -> sheep
+ "378": 943 # dog -> sheep
+ CLASS_TO_MESH_NAME_MAPPING:
+ # Note: different classes are mapped to a single class
+ # mesh is chosen based on GT data, so this is just some
+ # value which has no particular meaning
+ "0": "sheep_5004"
+SOLVER:
+ MAX_ITER: 16000
+ STEPS: (12000, 14000)
+DENSEPOSE_EVALUATION:
+ EVALUATE_MESH_ALIGNMENT: True
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..921a9c125d9da982fb88172acc7825ba3c583370
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_CA_finetune_4k.yaml
@@ -0,0 +1,133 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 1
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "cat_5001":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5001
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_5001_256.pkl"
+ "dog_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_5002_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds1_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds1_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds1_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds1_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CATEGORY_MAPS:
+ "densepose_lvis_v1_ds1_train_v1":
+ "1202": 943 # zebra -> sheep
+ "569": 943 # horse -> sheep
+ "496": 943 # giraffe -> sheep
+ "422": 943 # elephant -> sheep
+ "80": 943 # cow -> sheep
+ "76": 943 # bear -> sheep
+ "225": 943 # cat -> sheep
+ "378": 943 # dog -> sheep
+ "densepose_lvis_v1_ds1_val_v1":
+ "1202": 943 # zebra -> sheep
+ "569": 943 # horse -> sheep
+ "496": 943 # giraffe -> sheep
+ "422": 943 # elephant -> sheep
+ "80": 943 # cow -> sheep
+ "76": 943 # bear -> sheep
+ "225": 943 # cat -> sheep
+ "378": 943 # dog -> sheep
+ CLASS_TO_MESH_NAME_MAPPING:
+ # Note: different classes are mapped to a single class
+ # mesh is chosen based on GT data, so this is just some
+ # value which has no particular meaning
+ "0": "sheep_5004"
+SOLVER:
+ MAX_ITER: 4000
+ STEPS: (3000, 3500)
+DENSEPOSE_EVALUATION:
+ EVALUATE_MESH_ALIGNMENT: True
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..1b5a098d171e508fcb9dd8088ecc1799c3068efc
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_16k.yaml
@@ -0,0 +1,119 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k/270668502/model_final_21b1d2.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 9
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "cat_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl"
+ "dog_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds2_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds2_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds2_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds2_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "bear_4936"
+ "1": "cow_5002"
+ "2": "cat_7466"
+ "3": "dog_7466"
+ "4": "elephant_5002"
+ "5": "giraffe_5002"
+ "6": "horse_5004"
+ "7": "sheep_5004"
+ "8": "zebra_5002"
+SOLVER:
+ MAX_ITER: 16000
+ STEPS: (12000, 14000)
+DENSEPOSE_EVALUATION:
+ EVALUATE_MESH_ALIGNMENT: True
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_i2m_16k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_i2m_16k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..18d6dacf4b62e609aa85735a87daa8d2506000d7
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_i2m_16k.yaml
@@ -0,0 +1,121 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k/270668502/model_final_21b1d2.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 9
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ PIX_TO_SHAPE_CYCLE_LOSS:
+ ENABLED: True
+ EMBEDDERS:
+ "cat_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl"
+ "dog_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds2_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds2_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds2_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds2_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "bear_4936"
+ "1": "cow_5002"
+ "2": "cat_7466"
+ "3": "dog_7466"
+ "4": "elephant_5002"
+ "5": "giraffe_5002"
+ "6": "horse_5004"
+ "7": "sheep_5004"
+ "8": "zebra_5002"
+SOLVER:
+ MAX_ITER: 16000
+ STEPS: (12000, 14000)
+DENSEPOSE_EVALUATION:
+ EVALUATE_MESH_ALIGNMENT: True
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_m2m_16k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_m2m_16k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6b798ae21204b9310adae33040c870253edc68ee
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_I0_finetune_m2m_16k.yaml
@@ -0,0 +1,138 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k/267687159/model_final_354e61.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 9
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ SHAPE_TO_SHAPE_CYCLE_LOSS:
+ ENABLED: True
+ EMBEDDERS:
+ "cat_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl"
+ "dog_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+ "smpl_27554":
+ TYPE: vertex_feature
+ NUM_VERTICES: 27554
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_smpl_27554_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds2_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds2_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds2_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds2_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "bear_4936"
+ "1": "cow_5002"
+ "2": "cat_7466"
+ "3": "dog_7466"
+ "4": "elephant_5002"
+ "5": "giraffe_5002"
+ "6": "horse_5004"
+ "7": "sheep_5004"
+ "8": "zebra_5002"
+SOLVER:
+ MAX_ITER: 16000
+ STEPS: (12000, 14000)
+DENSEPOSE_EVALUATION:
+ EVALUATE_MESH_ALIGNMENT: True
+ MESH_ALIGNMENT_MESH_NAMES:
+ - bear_4936
+ - cow_5002
+ - cat_7466
+ - dog_7466
+ - elephant_5002
+ - giraffe_5002
+ - horse_5004
+ - sheep_5004
+ - zebra_5002
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_16k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_16k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b1462e374377fbf448e176951794face175b5002
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_16k.yaml
@@ -0,0 +1,119 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 9
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "cat_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl"
+ "dog_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds2_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds2_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds2_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds2_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "bear_4936"
+ "1": "cow_5002"
+ "2": "cat_7466"
+ "3": "dog_7466"
+ "4": "elephant_5002"
+ "5": "giraffe_5002"
+ "6": "horse_5004"
+ "7": "sheep_5004"
+ "8": "zebra_5002"
+SOLVER:
+ MAX_ITER: 16000
+ STEPS: (12000, 14000)
+DENSEPOSE_EVALUATION:
+ EVALUATE_MESH_ALIGNMENT: True
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_4k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_4k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ba4b81dde2ef53749b096f137ac658563fdad857
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_4k.yaml
@@ -0,0 +1,119 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 9
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "cat_5001":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5001
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_5001_256.pkl"
+ "dog_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_5002_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds1_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds1_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds1_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds1_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "bear_4936"
+ "1": "cow_5002"
+ "2": "cat_5001"
+ "3": "dog_5002"
+ "4": "elephant_5002"
+ "5": "giraffe_5002"
+ "6": "horse_5004"
+ "7": "sheep_5004"
+ "8": "zebra_5002"
+SOLVER:
+ MAX_ITER: 4000
+ STEPS: (3000, 3500)
+DENSEPOSE_EVALUATION:
+ EVALUATE_MESH_ALIGNMENT: True
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..bb6136e274ca64aa2285698664d3243519d1979f
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_maskonly_24k.yaml
@@ -0,0 +1,118 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 9
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBED_LOSS_WEIGHT: 0.0
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "cat_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_7466_256.pkl"
+ "dog_7466":
+ TYPE: vertex_feature
+ NUM_VERTICES: 7466
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_7466_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_ds2_train_v1"
+ TEST:
+ - "densepose_lvis_v1_ds2_val_v1"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_ds2_train_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_ds2_val_v1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "bear_4936"
+ "1": "cow_5002"
+ "2": "cat_7466"
+ "3": "dog_7466"
+ "4": "elephant_5002"
+ "5": "giraffe_5002"
+ "6": "horse_5004"
+ "7": "sheep_5004"
+ "8": "zebra_5002"
+SOLVER:
+ MAX_ITER: 24000
+ STEPS: (20000, 22000)
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3bccb7837a2e4b905b4e3c7af465c3be3a44452d
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_chimps_finetune_4k.yaml
@@ -0,0 +1,29 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/cse/densepose_rcnn_R_50_FPN_soft_s1x/250533982/model_final_2c4512.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ GEODESIC_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "chimp_5029":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5029
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_chimp_5029_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_chimps_cse_train"
+ TEST:
+ - "densepose_chimps_cse_val"
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "chimp_5029"
+SOLVER:
+ MAX_ITER: 4000
+ STEPS: (3000, 3500)
diff --git a/configs/cse/densepose_rcnn_R_50_FPN_soft_s1x.yaml b/configs/cse/densepose_rcnn_R_50_FPN_soft_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9662fb8f8a4e9f7b01f41ddb79a3469ecab7032b
--- /dev/null
+++ b/configs/cse/densepose_rcnn_R_50_FPN_soft_s1x.yaml
@@ -0,0 +1,12 @@
+_BASE_: "Base-DensePose-RCNN-FPN-Human.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_101_FPN_DL_WC1M_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_DL_WC1M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3c16763c532499c1a0c62fb8c81a2ab97be3a1ec
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_DL_WC1M_s1x.yaml
@@ -0,0 +1,18 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..15475b1ac3bb7272a7ebc0061a55119ffd2591b9
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_DL_WC1_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0cbe07f3bb0027bb7ecdc86f96d60790382b477b
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_DL_WC2M_s1x.yaml
@@ -0,0 +1,18 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..7546b967ab89129c9a276f19b1cf2d6b59f1a462
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_DL_WC2_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..045f7f02f1b4eb0c0ef1733c3ac65e3aa70168de
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_DL_s1x.yaml
@@ -0,0 +1,10 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_101_FPN_WC1M_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_WC1M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..9334e18655d4451457a58c6ce945e01855f95105
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_WC1M_s1x.yaml
@@ -0,0 +1,18 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..ace62094fbc4ce2024810333c11c7a955d8eeb22
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_WC1_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_101_FPN_WC2M_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_WC2M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..90f0be2805cd04e83c25d041d35ae66c90ce2b95
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_WC2M_s1x.yaml
@@ -0,0 +1,18 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..766c098f6dcdd1fb3f67957d7d1d982b37747b96
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_WC2_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_101_FPN_s1x.yaml b/configs/densepose_rcnn_R_101_FPN_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..af44fb767edf9bf093463e62f93e070d0d019c5a
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_s1x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml b/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8e79a1b9549cf19ed4a43cf9caf3dc88f6133310
--- /dev/null
+++ b/configs/densepose_rcnn_R_101_FPN_s1x_legacy.yaml
@@ -0,0 +1,17 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-101.pkl"
+ RESNETS:
+ DEPTH: 101
+ ROI_DENSEPOSE_HEAD:
+ NUM_COARSE_SEGM_CHANNELS: 15
+ POOLER_RESOLUTION: 14
+ HEATMAP_SIZE: 56
+ INDEX_WEIGHTS: 2.0
+ PART_WEIGHTS: 0.3
+ POINT_REGRESSION_WEIGHTS: 0.1
+ DECODER_ON: False
+SOLVER:
+ BASE_LR: 0.002
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_50_FPN_DL_WC1M_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_DL_WC1M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..18a417a9a76d388810d46d1ee738d8b19abf0db0
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_DL_WC1M_s1x.yaml
@@ -0,0 +1,18 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f3720eff56ce042a68da6c99f484b963cae2c7d9
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_DL_WC1_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_50_FPN_DL_WC2M_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_DL_WC2M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a413d2a0d1549702fb45a2e50056fe0abde941f
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_DL_WC2M_s1x.yaml
@@ -0,0 +1,18 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5a47cc05e6e9dc882778c6b502d93cbcec88fb88
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_DL_WC2_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..52a170b4a28289ad943314f77256e34800d23121
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_DL_s1x.yaml
@@ -0,0 +1,10 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..8a81f2a143cbfcd2dbc92f0fc5c86f951b9b7adf
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_WC1M_s1x.yaml
@@ -0,0 +1,20 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ CLIP_TYPE: norm
+ CLIP_VALUE: 100.0
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d36e54256ac22f1b01604e54430da24972f06eeb
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_WC1_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_50_FPN_WC2M_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_WC2M_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5cf29eacd57626c676ed4c960a3e97e552b6dbdf
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_WC2M_s1x.yaml
@@ -0,0 +1,18 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..e880d469564a3757ba3f4d708054074cefda49b6
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_WC2_s1x.yaml
@@ -0,0 +1,16 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/densepose_rcnn_R_50_FPN_s1x.yaml b/configs/densepose_rcnn_R_50_FPN_s1x.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d2dd14c6f92f3850b99e6f1c828c0fcee52120e1
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_s1x.yaml
@@ -0,0 +1,8 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+SOLVER:
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml b/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6c5391f3b3c3d437312a290d29b0656cb3804b25
--- /dev/null
+++ b/configs/densepose_rcnn_R_50_FPN_s1x_legacy.yaml
@@ -0,0 +1,17 @@
+_BASE_: "Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ NUM_COARSE_SEGM_CHANNELS: 15
+ POOLER_RESOLUTION: 14
+ HEATMAP_SIZE: 56
+ INDEX_WEIGHTS: 2.0
+ PART_WEIGHTS: 0.3
+ POINT_REGRESSION_WEIGHTS: 0.1
+ DECODER_ON: False
+SOLVER:
+ BASE_LR: 0.002
+ MAX_ITER: 130000
+ STEPS: (100000, 120000)
diff --git a/configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml b/configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f09d723f3cb9eef94223c5926dbb7731397304c9
--- /dev/null
+++ b/configs/evolution/Base-RCNN-FPN-Atop10P_CA.yaml
@@ -0,0 +1,91 @@
+MODEL:
+ META_ARCHITECTURE: "GeneralizedRCNN"
+ BACKBONE:
+ NAME: "build_resnet_fpn_backbone"
+ RESNETS:
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
+ FPN:
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
+ ANCHOR_GENERATOR:
+ SIZES: [[32], [64], [128], [256], [512]] # One size for each in feature map
+ ASPECT_RATIOS: [[0.5, 1.0, 2.0]] # Three aspect ratios (same for all in feature maps)
+ RPN:
+ IN_FEATURES: ["p2", "p3", "p4", "p5", "p6"]
+ PRE_NMS_TOPK_TRAIN: 2000 # Per FPN level
+ PRE_NMS_TOPK_TEST: 1000 # Per FPN level
+ # Detectron1 uses 2000 proposals per-batch,
+ # (See "modeling/rpn/rpn_outputs.py" for details of this legacy issue)
+ # which is approximately 1000 proposals per-image since the default batch size for FPN is 2.
+ POST_NMS_TOPK_TRAIN: 1000
+ POST_NMS_TOPK_TEST: 1000
+ ROI_HEADS:
+ NAME: "StandardROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_BOX_HEAD:
+ NAME: "FastRCNNConvFCHead"
+ NUM_FC: 2
+ POOLER_RESOLUTION: 7
+ ROI_MASK_HEAD:
+ NAME: "MaskRCNNConvUpsampleHead"
+ NUM_CONV: 4
+ POOLER_RESOLUTION: 14
+DATASETS:
+ TRAIN: ("base_coco_2017_train", "densepose_coco_2014_train")
+ TEST: ("densepose_chimps",)
+ CATEGORY_MAPS:
+ "base_coco_2017_train":
+ "16": 1 # bird -> person
+ "17": 1 # cat -> person
+ "18": 1 # dog -> person
+ "19": 1 # horse -> person
+ "20": 1 # sheep -> person
+ "21": 1 # cow -> person
+ "22": 1 # elephant -> person
+ "23": 1 # bear -> person
+ "24": 1 # zebra -> person
+ "25": 1 # girafe -> person
+ "base_coco_2017_val":
+ "16": 1 # bird -> person
+ "17": 1 # cat -> person
+ "18": 1 # dog -> person
+ "19": 1 # horse -> person
+ "20": 1 # sheep -> person
+ "21": 1 # cow -> person
+ "22": 1 # elephant -> person
+ "23": 1 # bear -> person
+ "24": 1 # zebra -> person
+ "25": 1 # girafe -> person
+ WHITELISTED_CATEGORIES:
+ "base_coco_2017_train":
+ - 1 # person
+ - 16 # bird
+ - 17 # cat
+ - 18 # dog
+ - 19 # horse
+ - 20 # sheep
+ - 21 # cow
+ - 22 # elephant
+ - 23 # bear
+ - 24 # zebra
+ - 25 # girafe
+ "base_coco_2017_val":
+ - 1 # person
+ - 16 # bird
+ - 17 # cat
+ - 18 # dog
+ - 19 # horse
+ - 20 # sheep
+ - 21 # cow
+ - 22 # elephant
+ - 23 # bear
+ - 24 # zebra
+ - 25 # girafe
+SOLVER:
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.02
+ STEPS: (60000, 80000)
+ MAX_ITER: 90000
+INPUT:
+ MIN_SIZE_TRAIN: (640, 672, 704, 736, 768, 800)
+VERSION: 2
diff --git a/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA.yaml b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..6296692d5ff15da24f87adb6327a62d9f4a34892
--- /dev/null
+++ b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA.yaml
@@ -0,0 +1,28 @@
+_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ DENSEPOSE_ON: True
+ ROI_HEADS:
+ NAME: "DensePoseROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+ POOLER_TYPE: "ROIAlign"
+ NUM_COARSE_SEGM_CHANNELS: 2
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+ INDEX_WEIGHTS: 1.0
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ WARMUP_FACTOR: 0.025
+ MAX_ITER: 270000
+ STEPS: (210000, 250000)
diff --git a/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_coarsesegm.yaml b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_coarsesegm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..033918e0daec8c225306dafac3a5fe9923189e53
--- /dev/null
+++ b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_coarsesegm.yaml
@@ -0,0 +1,56 @@
+_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
+MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+ RESNETS:
+ DEPTH: 50
+ DENSEPOSE_ON: True
+ ROI_HEADS:
+ NAME: "DensePoseROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+ POOLER_TYPE: "ROIAlign"
+ NUM_COARSE_SEGM_CHANNELS: 2
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+BOOTSTRAP_DATASETS:
+ - DATASET: "chimpnsee"
+ RATIO: 1.0
+ IMAGE_LOADER:
+ TYPE: "video_keyframe"
+ SELECT:
+ STRATEGY: "random_k"
+ NUM_IMAGES: 4
+ TRANSFORM:
+ TYPE: "resize"
+ MIN_SIZE: 800
+ MAX_SIZE: 1333
+ BATCH_SIZE: 8
+ NUM_WORKERS: 1
+ INFERENCE:
+ INPUT_BATCH_SIZE: 1
+ OUTPUT_BATCH_SIZE: 1
+ DATA_SAMPLER:
+ # supported types:
+ # densepose_uniform
+ # densepose_UV_confidence
+ # densepose_fine_segm_confidence
+ # densepose_coarse_segm_confidence
+ TYPE: "densepose_coarse_segm_confidence"
+ COUNT_PER_CLASS: 8
+ FILTER:
+ TYPE: "detection_score"
+ MIN_VALUE: 0.8
+BOOTSTRAP_MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 270000
+ STEPS: (210000, 250000)
diff --git a/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_finesegm.yaml b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_finesegm.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..5814a4a01fd772674fa40c0cba34666aed87b33a
--- /dev/null
+++ b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_finesegm.yaml
@@ -0,0 +1,56 @@
+_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
+MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+ RESNETS:
+ DEPTH: 50
+ DENSEPOSE_ON: True
+ ROI_HEADS:
+ NAME: "DensePoseROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+ POOLER_TYPE: "ROIAlign"
+ NUM_COARSE_SEGM_CHANNELS: 2
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+BOOTSTRAP_DATASETS:
+ - DATASET: "chimpnsee"
+ RATIO: 1.0
+ IMAGE_LOADER:
+ TYPE: "video_keyframe"
+ SELECT:
+ STRATEGY: "random_k"
+ NUM_IMAGES: 4
+ TRANSFORM:
+ TYPE: "resize"
+ MIN_SIZE: 800
+ MAX_SIZE: 1333
+ BATCH_SIZE: 8
+ NUM_WORKERS: 1
+ INFERENCE:
+ INPUT_BATCH_SIZE: 1
+ OUTPUT_BATCH_SIZE: 1
+ DATA_SAMPLER:
+ # supported types:
+ # densepose_uniform
+ # densepose_UV_confidence
+ # densepose_fine_segm_confidence
+ # densepose_coarse_segm_confidence
+ TYPE: "densepose_fine_segm_confidence"
+ COUNT_PER_CLASS: 8
+ FILTER:
+ TYPE: "detection_score"
+ MIN_VALUE: 0.8
+BOOTSTRAP_MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 270000
+ STEPS: (210000, 250000)
diff --git a/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d591ea6e22282f43fff0b44131e0913aa7261276
--- /dev/null
+++ b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uniform.yaml
@@ -0,0 +1,56 @@
+_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
+MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+ RESNETS:
+ DEPTH: 50
+ DENSEPOSE_ON: True
+ ROI_HEADS:
+ NAME: "DensePoseROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+ POOLER_TYPE: "ROIAlign"
+ NUM_COARSE_SEGM_CHANNELS: 2
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+BOOTSTRAP_DATASETS:
+ - DATASET: "chimpnsee"
+ RATIO: 1.0
+ IMAGE_LOADER:
+ TYPE: "video_keyframe"
+ SELECT:
+ STRATEGY: "random_k"
+ NUM_IMAGES: 4
+ TRANSFORM:
+ TYPE: "resize"
+ MIN_SIZE: 800
+ MAX_SIZE: 1333
+ BATCH_SIZE: 8
+ NUM_WORKERS: 1
+ INFERENCE:
+ INPUT_BATCH_SIZE: 1
+ OUTPUT_BATCH_SIZE: 1
+ DATA_SAMPLER:
+ # supported types:
+ # densepose_uniform
+ # densepose_UV_confidence
+ # densepose_fine_segm_confidence
+ # densepose_coarse_segm_confidence
+ TYPE: "densepose_uniform"
+ COUNT_PER_CLASS: 8
+ FILTER:
+ TYPE: "detection_score"
+ MIN_VALUE: 0.8
+BOOTSTRAP_MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 270000
+ STEPS: (210000, 250000)
diff --git a/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uv.yaml b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uv.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..110acff5a54247abb7b344672038b71e24167f33
--- /dev/null
+++ b/configs/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA_B_uv.yaml
@@ -0,0 +1,56 @@
+_BASE_: "Base-RCNN-FPN-Atop10P_CA.yaml"
+MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+ RESNETS:
+ DEPTH: 50
+ DENSEPOSE_ON: True
+ ROI_HEADS:
+ NAME: "DensePoseROIHeads"
+ IN_FEATURES: ["p2", "p3", "p4", "p5"]
+ NUM_CLASSES: 1
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ SEGM_CONFIDENCE:
+ ENABLED: True
+ POINT_REGRESSION_WEIGHTS: 0.0005
+ POOLER_TYPE: "ROIAlign"
+ NUM_COARSE_SEGM_CHANNELS: 2
+ COARSE_SEGM_TRAINED_BY_MASKS: True
+BOOTSTRAP_DATASETS:
+ - DATASET: "chimpnsee"
+ RATIO: 1.0
+ IMAGE_LOADER:
+ TYPE: "video_keyframe"
+ SELECT:
+ STRATEGY: "random_k"
+ NUM_IMAGES: 4
+ TRANSFORM:
+ TYPE: "resize"
+ MIN_SIZE: 800
+ MAX_SIZE: 1333
+ BATCH_SIZE: 8
+ NUM_WORKERS: 1
+ INFERENCE:
+ INPUT_BATCH_SIZE: 1
+ OUTPUT_BATCH_SIZE: 1
+ DATA_SAMPLER:
+ # supported types:
+ # densepose_uniform
+ # densepose_UV_confidence
+ # densepose_fine_segm_confidence
+ # densepose_coarse_segm_confidence
+ TYPE: "densepose_UV_confidence"
+ COUNT_PER_CLASS: 8
+ FILTER:
+ TYPE: "detection_score"
+ MIN_VALUE: 0.8
+BOOTSTRAP_MODEL:
+ WEIGHTS: https://dl.fbaipublicfiles.com/densepose/evolution/densepose_R_50_FPN_DL_WC1M_3x_Atop10P_CA/217578784/model_final_9fe1cc.pkl
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 270000
+ STEPS: (210000, 250000)
diff --git a/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_DL_instant_test.yaml b/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_DL_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..3b43f75da549a9e5148c8528b5d375317680d738
--- /dev/null
+++ b/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_DL_instant_test.yaml
@@ -0,0 +1,11 @@
+_BASE_: "../../cse/Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+DATASETS:
+ TRAIN: ("densepose_coco_2014_minival_100_cse",)
+ TEST: ("densepose_coco_2014_minival_100_cse",)
+SOLVER:
+ MAX_ITER: 40
+ STEPS: (30,)
diff --git a/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_instant_test.yaml b/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..a2c49a2d14e5665af117972d126e25422e37b2b9
--- /dev/null
+++ b/configs/quick_schedules/cse/densepose_rcnn_R_50_FPN_soft_animals_finetune_instant_test.yaml
@@ -0,0 +1,126 @@
+_BASE_: "../../cse/Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_HEADS:
+ NUM_CLASSES: 9
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseV1ConvXHead"
+ CSE:
+ EMBED_LOSS_NAME: "SoftEmbeddingLoss"
+ EMBEDDING_DIST_GAUSS_SIGMA: 0.1
+ EMBEDDERS:
+ "cat_5001":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5001
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cat_5001_256.pkl"
+ "dog_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_dog_5002_256.pkl"
+ "sheep_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_sheep_5004_256.pkl"
+ "horse_5004":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5004
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_horse_5004_256.pkl"
+ "zebra_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_zebra_5002_256.pkl"
+ "giraffe_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_giraffe_5002_256.pkl"
+ "elephant_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_elephant_5002_256.pkl"
+ "cow_5002":
+ TYPE: vertex_feature
+ NUM_VERTICES: 5002
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_cow_5002_256.pkl"
+ "bear_4936":
+ TYPE: vertex_feature
+ NUM_VERTICES: 4936
+ FEATURE_DIM: 256
+ FEATURES_TRAINABLE: False
+ IS_TRAINABLE: True
+ INIT_FILE: "https://dl.fbaipublicfiles.com/densepose/data/cse/lbo/phi_bear_4936_256.pkl"
+DATASETS:
+ TRAIN:
+ - "densepose_lvis_v1_train1"
+ - "densepose_lvis_v1_train2"
+ TEST:
+ - "densepose_lvis_v1_val_animals_100"
+ WHITELISTED_CATEGORIES:
+ "densepose_lvis_v1_train1":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_train2":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ "densepose_lvis_v1_val_animals_100":
+ - 943 # sheep
+ - 1202 # zebra
+ - 569 # horse
+ - 496 # giraffe
+ - 422 # elephant
+ - 80 # cow
+ - 76 # bear
+ - 225 # cat
+ - 378 # dog
+ CLASS_TO_MESH_NAME_MAPPING:
+ "0": "bear_4936"
+ "1": "cow_5002"
+ "2": "cat_5001"
+ "3": "dog_5002"
+ "4": "elephant_5002"
+ "5": "giraffe_5002"
+ "6": "horse_5004"
+ "7": "sheep_5004"
+ "8": "zebra_5002"
+SOLVER:
+ MAX_ITER: 40
+ STEPS: (30,)
diff --git a/configs/quick_schedules/densepose_rcnn_HRFPN_HRNet_w32_instant_test.yaml b/configs/quick_schedules/densepose_rcnn_HRFPN_HRNet_w32_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..95677ce9a7ff426a9051737876e7424908b1423f
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_HRFPN_HRNet_w32_instant_test.yaml
@@ -0,0 +1,8 @@
+_BASE_: "../HRNet/densepose_rcnn_HRFPN_HRNet_w32_s1x.yaml"
+DATASETS:
+ TRAIN: ("densepose_coco_2014_minival_100",)
+ TEST: ("densepose_coco_2014_minival_100",)
+SOLVER:
+ MAX_ITER: 40
+ STEPS: (30,)
+ IMS_PER_BATCH: 2
diff --git a/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml b/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b90989eef81e27d23119d2cd4627e8cea211ac51
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_R_50_FPN_DL_instant_test.yaml
@@ -0,0 +1,11 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ ROI_DENSEPOSE_HEAD:
+ NAME: "DensePoseDeepLabHead"
+DATASETS:
+ TRAIN: ("densepose_coco_2014_minival_100",)
+ TEST: ("densepose_coco_2014_minival_100",)
+SOLVER:
+ MAX_ITER: 40
+ STEPS: (30,)
diff --git a/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml b/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..b124da19140f564258b583ec109eeeeaff8fd78a
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_R_50_FPN_TTA_inference_acc_test.yaml
@@ -0,0 +1,13 @@
+_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl"
+DATASETS:
+ TRAIN: ()
+ TEST: ("densepose_coco_2014_minival_100",)
+TEST:
+ AUG:
+ ENABLED: True
+ MIN_SIZES: (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
+ MAX_SIZE: 4000
+ FLIP: True
+ EXPECTED_RESULTS: [["bbox_TTA", "AP", 61.74, 0.03], ["densepose_gps_TTA", "AP", 60.22, 0.03], ["densepose_gpsm_TTA", "AP", 63.59, 0.03]]
diff --git a/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml b/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f0fe61151adf255baba717f3e65ff6fab52829a6
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC1_instant_test.yaml
@@ -0,0 +1,19 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "iid_iso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+DATASETS:
+ TRAIN: ("densepose_coco_2014_minival_100",)
+ TEST: ("densepose_coco_2014_minival_100",)
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 40
+ STEPS: (30,)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml b/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..f0d9358c8846452314697a19b5e2ea9e075ddaeb
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_R_50_FPN_WC2_instant_test.yaml
@@ -0,0 +1,19 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ RESNETS:
+ DEPTH: 50
+ ROI_DENSEPOSE_HEAD:
+ UV_CONFIDENCE:
+ ENABLED: True
+ TYPE: "indep_aniso"
+ POINT_REGRESSION_WEIGHTS: 0.0005
+DATASETS:
+ TRAIN: ("densepose_coco_2014_minival_100",)
+ TEST: ("densepose_coco_2014_minival_100",)
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ MAX_ITER: 40
+ STEPS: (30,)
+ WARMUP_FACTOR: 0.025
diff --git a/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml b/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..d607c98813d045c1e19875bdfe45fbc1c3fdb292
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_R_50_FPN_inference_acc_test.yaml
@@ -0,0 +1,8 @@
+_BASE_: "../densepose_rcnn_R_50_FPN_s1x.yaml"
+MODEL:
+ WEIGHTS: "https://dl.fbaipublicfiles.com/densepose/densepose_rcnn_R_50_FPN_s1x/165712039/model_final_162be9.pkl"
+DATASETS:
+ TRAIN: ()
+ TEST: ("densepose_coco_2014_minival_100",)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 59.27, 0.025], ["densepose_gps", "AP", 60.11, 0.02], ["densepose_gpsm", "AP", 64.09, 0.02]]
diff --git a/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml b/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..057c8768186e8a818228aa2f028ba3007374c571
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_R_50_FPN_instant_test.yaml
@@ -0,0 +1,9 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+DATASETS:
+ TRAIN: ("densepose_coco_2014_minival_100",)
+ TEST: ("densepose_coco_2014_minival_100",)
+SOLVER:
+ MAX_ITER: 40
+ STEPS: (30,)
diff --git a/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml b/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml
new file mode 100644
index 0000000000000000000000000000000000000000..0053c9d7d41af0ee7262804838d8edcde10ed40d
--- /dev/null
+++ b/configs/quick_schedules/densepose_rcnn_R_50_FPN_training_acc_test.yaml
@@ -0,0 +1,18 @@
+_BASE_: "../Base-DensePose-RCNN-FPN.yaml"
+MODEL:
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
+ ROI_HEADS:
+ NUM_CLASSES: 1
+DATASETS:
+ TRAIN: ("densepose_coco_2014_minival",)
+ TEST: ("densepose_coco_2014_minival",)
+SOLVER:
+ CLIP_GRADIENTS:
+ ENABLED: True
+ CLIP_TYPE: norm
+ CLIP_VALUE: 1.0
+ MAX_ITER: 6000
+ STEPS: (5500, 5800)
+TEST:
+ EXPECTED_RESULTS: [["bbox", "AP", 76.2477, 1.0], ["densepose_gps", "AP", 79.6090, 1.5], ["densepose_gpsm", "AP", 80.0061, 1.5]]
+
diff --git a/densepose/__init__.py b/densepose/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b50a3da91dd0d2a69502af9d5d62f2f4280d973f
--- /dev/null
+++ b/densepose/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .data.datasets import builtin # just to register data
+from .converters import builtin as builtin_converters # register converters
+from .config import (
+ add_densepose_config,
+ add_densepose_head_config,
+ add_hrnet_config,
+ add_dataset_category_config,
+ add_bootstrap_config,
+ load_bootstrap_config,
+)
+from .structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData
+from .evaluation import DensePoseCOCOEvaluator
+from .modeling.roi_heads import DensePoseROIHeads
+from .modeling.test_time_augmentation import (
+ DensePoseGeneralizedRCNNWithTTA,
+ DensePoseDatasetMapperTTA,
+)
+from .utils.transform import load_from_cfg
+from .modeling.hrfpn import build_hrfpn_backbone
diff --git a/densepose/config.py b/densepose/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..2a06a09c80865ab987773511b2acc71e232b26ac
--- /dev/null
+++ b/densepose/config.py
@@ -0,0 +1,277 @@
+# -*- coding = utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+# pyre-ignore-all-errors
+
+from detectron2.config import CfgNode as CN
+
+
+def add_dataset_category_config(cfg: CN) -> None:
+ """
+ Add config for additional category-related dataset options
+ - category whitelisting
+ - category mapping
+ """
+ _C = cfg
+ _C.DATASETS.CATEGORY_MAPS = CN(new_allowed=True)
+ _C.DATASETS.WHITELISTED_CATEGORIES = CN(new_allowed=True)
+ # class to mesh mapping
+ _C.DATASETS.CLASS_TO_MESH_NAME_MAPPING = CN(new_allowed=True)
+
+
+def add_evaluation_config(cfg: CN) -> None:
+ _C = cfg
+ _C.DENSEPOSE_EVALUATION = CN()
+ # evaluator type, possible values:
+ # - "iou": evaluator for models that produce iou data
+ # - "cse": evaluator for models that produce cse data
+ _C.DENSEPOSE_EVALUATION.TYPE = "iou"
+ # storage for DensePose results, possible values:
+ # - "none": no explicit storage, all the results are stored in the
+ # dictionary with predictions, memory intensive;
+ # historically the default storage type
+ # - "ram": RAM storage, uses per-process RAM storage, which is
+ # reduced to a single process storage on later stages,
+ # less memory intensive
+ # - "file": file storage, uses per-process file-based storage,
+ # the least memory intensive, but may create bottlenecks
+ # on file system accesses
+ _C.DENSEPOSE_EVALUATION.STORAGE = "none"
+ # minimum threshold for IOU values: the lower its values is,
+ # the more matches are produced (and the higher the AP score)
+ _C.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD = 0.5
+ # Non-distributed inference is slower (at inference time) but can avoid RAM OOM
+ _C.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE = True
+ # evaluate mesh alignment based on vertex embeddings, only makes sense in CSE context
+ _C.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT = False
+ # meshes to compute mesh alignment for
+ _C.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES = []
+
+
+def add_bootstrap_config(cfg: CN) -> None:
+ """ """
+ _C = cfg
+ _C.BOOTSTRAP_DATASETS = []
+ _C.BOOTSTRAP_MODEL = CN()
+ _C.BOOTSTRAP_MODEL.WEIGHTS = ""
+ _C.BOOTSTRAP_MODEL.DEVICE = "cuda"
+
+
+def get_bootstrap_dataset_config() -> CN:
+ _C = CN()
+ _C.DATASET = ""
+ # ratio used to mix data loaders
+ _C.RATIO = 0.1
+ # image loader
+ _C.IMAGE_LOADER = CN(new_allowed=True)
+ _C.IMAGE_LOADER.TYPE = ""
+ _C.IMAGE_LOADER.BATCH_SIZE = 4
+ _C.IMAGE_LOADER.NUM_WORKERS = 4
+ _C.IMAGE_LOADER.CATEGORIES = []
+ _C.IMAGE_LOADER.MAX_COUNT_PER_CATEGORY = 1_000_000
+ _C.IMAGE_LOADER.CATEGORY_TO_CLASS_MAPPING = CN(new_allowed=True)
+ # inference
+ _C.INFERENCE = CN()
+ # batch size for model inputs
+ _C.INFERENCE.INPUT_BATCH_SIZE = 4
+ # batch size to group model outputs
+ _C.INFERENCE.OUTPUT_BATCH_SIZE = 2
+ # sampled data
+ _C.DATA_SAMPLER = CN(new_allowed=True)
+ _C.DATA_SAMPLER.TYPE = ""
+ _C.DATA_SAMPLER.USE_GROUND_TRUTH_CATEGORIES = False
+ # filter
+ _C.FILTER = CN(new_allowed=True)
+ _C.FILTER.TYPE = ""
+ return _C
+
+
+def load_bootstrap_config(cfg: CN) -> None:
+ """
+ Bootstrap datasets are given as a list of `dict` that are not automatically
+ converted into CfgNode. This method processes all bootstrap dataset entries
+ and ensures that they are in CfgNode format and comply with the specification
+ """
+ if not cfg.BOOTSTRAP_DATASETS:
+ return
+
+ bootstrap_datasets_cfgnodes = []
+ for dataset_cfg in cfg.BOOTSTRAP_DATASETS:
+ _C = get_bootstrap_dataset_config().clone()
+ _C.merge_from_other_cfg(CN(dataset_cfg))
+ bootstrap_datasets_cfgnodes.append(_C)
+ cfg.BOOTSTRAP_DATASETS = bootstrap_datasets_cfgnodes
+
+
+def add_densepose_head_cse_config(cfg: CN) -> None:
+ """
+ Add configuration options for Continuous Surface Embeddings (CSE)
+ """
+ _C = cfg
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE = CN()
+ # Dimensionality D of the embedding space
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE = 16
+ # Embedder specifications for various mesh IDs
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS = CN(new_allowed=True)
+ # normalization coefficient for embedding distances
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA = 0.01
+ # normalization coefficient for geodesic distances
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA = 0.01
+ # embedding loss weight
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_WEIGHT = 0.6
+ # embedding loss name, currently the following options are supported:
+ # - EmbeddingLoss: cross-entropy on vertex labels
+ # - SoftEmbeddingLoss: cross-entropy on vertex label combined with
+ # Gaussian penalty on distance between vertices
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME = "EmbeddingLoss"
+ # optimizer hyperparameters
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR = 1.0
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR = 1.0
+ # Shape to shape cycle consistency loss parameters:
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False})
+ # shape to shape cycle consistency loss weight
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.025
+ # norm type used for loss computation
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P = 2
+ # normalization term for embedding similarity matrices
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE = 0.05
+ # maximum number of vertices to include into shape to shape cycle loss
+ # if negative or zero, all vertices are considered
+ # if positive, random subset of vertices of given size is considered
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES = 4936
+ # Pixel to shape cycle consistency loss parameters:
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS = CN({"ENABLED": False})
+ # pixel to shape cycle consistency loss weight
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.WEIGHT = 0.0001
+ # norm type used for loss computation
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P = 2
+ # map images to all meshes and back (if false, use only gt meshes from the batch)
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY = False
+ # Randomly select at most this number of pixels from every instance
+ # if negative or zero, all vertices are considered
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE = 100
+ # normalization factor for pixel to pixel distances (higher value = smoother distribution)
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA = 5.0
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX = 0.05
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL = 0.05
+
+
+def add_densepose_head_config(cfg: CN) -> None:
+ """
+ Add config for densepose head.
+ """
+ _C = cfg
+
+ _C.MODEL.DENSEPOSE_ON = True
+
+ _C.MODEL.ROI_DENSEPOSE_HEAD = CN()
+ _C.MODEL.ROI_DENSEPOSE_HEAD.NAME = ""
+ _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS = 8
+ # Number of parts used for point labels
+ _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES = 24
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL = 4
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM = 512
+ _C.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL = 3
+ _C.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE = 2
+ _C.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE = 112
+ _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE = "ROIAlignV2"
+ _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION = 28
+ _C.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO = 2
+ _C.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS = 2 # 15 or 2
+ # Overlap threshold for an RoI to be considered foreground (if >= FG_IOU_THRESHOLD)
+ _C.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD = 0.7
+ # Loss weights for annotation masks.(14 Parts)
+ _C.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS = 5.0
+ # Loss weights for surface parts. (24 Parts)
+ _C.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS = 1.0
+ # Loss weights for UV regression.
+ _C.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS = 0.01
+ # Coarse segmentation is trained using instance segmentation task data
+ _C.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS = False
+ # For Decoder
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON = True
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES = 256
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS = 256
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM = ""
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE = 4
+ # For DeepLab head
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB = CN()
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM = "GN"
+ _C.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON = 0
+ # Predictor class name, must be registered in DENSEPOSE_PREDICTOR_REGISTRY
+ # Some registered predictors:
+ # "DensePoseChartPredictor": predicts segmentation and UV coordinates for predefined charts
+ # "DensePoseChartWithConfidencePredictor": predicts segmentation, UV coordinates
+ # and associated confidences for predefined charts (default)
+ # "DensePoseEmbeddingWithConfidencePredictor": predicts segmentation, embeddings
+ # and associated confidences for CSE
+ _C.MODEL.ROI_DENSEPOSE_HEAD.PREDICTOR_NAME = "DensePoseChartWithConfidencePredictor"
+ # Loss class name, must be registered in DENSEPOSE_LOSS_REGISTRY
+ # Some registered losses:
+ # "DensePoseChartLoss": loss for chart-based models that estimate
+ # segmentation and UV coordinates
+ # "DensePoseChartWithConfidenceLoss": loss for chart-based models that estimate
+ # segmentation, UV coordinates and the corresponding confidences (default)
+ _C.MODEL.ROI_DENSEPOSE_HEAD.LOSS_NAME = "DensePoseChartWithConfidenceLoss"
+ # Confidences
+ # Enable learning UV confidences (variances) along with the actual values
+ _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE = CN({"ENABLED": False})
+ # UV confidence lower bound
+ _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON = 0.01
+ # Enable learning segmentation confidences (variances) along with the actual values
+ _C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE = CN({"ENABLED": False})
+ # Segmentation confidence lower bound
+ _C.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON = 0.01
+ # Statistical model type for confidence learning, possible values:
+ # - "iid_iso": statistically independent identically distributed residuals
+ # with isotropic covariance
+ # - "indep_aniso": statistically independent residuals with anisotropic
+ # covariances
+ _C.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE = "iid_iso"
+ # List of angles for rotation in data augmentation during training
+ _C.INPUT.ROTATION_ANGLES = [0]
+ _C.TEST.AUG.ROTATION_ANGLES = () # Rotation TTA
+
+ add_densepose_head_cse_config(cfg)
+
+
+def add_hrnet_config(cfg: CN) -> None:
+ """
+ Add config for HRNet backbone.
+ """
+ _C = cfg
+
+ # For HigherHRNet w32
+ _C.MODEL.HRNET = CN()
+ _C.MODEL.HRNET.STEM_INPLANES = 64
+ _C.MODEL.HRNET.STAGE2 = CN()
+ _C.MODEL.HRNET.STAGE2.NUM_MODULES = 1
+ _C.MODEL.HRNET.STAGE2.NUM_BRANCHES = 2
+ _C.MODEL.HRNET.STAGE2.BLOCK = "BASIC"
+ _C.MODEL.HRNET.STAGE2.NUM_BLOCKS = [4, 4]
+ _C.MODEL.HRNET.STAGE2.NUM_CHANNELS = [32, 64]
+ _C.MODEL.HRNET.STAGE2.FUSE_METHOD = "SUM"
+ _C.MODEL.HRNET.STAGE3 = CN()
+ _C.MODEL.HRNET.STAGE3.NUM_MODULES = 4
+ _C.MODEL.HRNET.STAGE3.NUM_BRANCHES = 3
+ _C.MODEL.HRNET.STAGE3.BLOCK = "BASIC"
+ _C.MODEL.HRNET.STAGE3.NUM_BLOCKS = [4, 4, 4]
+ _C.MODEL.HRNET.STAGE3.NUM_CHANNELS = [32, 64, 128]
+ _C.MODEL.HRNET.STAGE3.FUSE_METHOD = "SUM"
+ _C.MODEL.HRNET.STAGE4 = CN()
+ _C.MODEL.HRNET.STAGE4.NUM_MODULES = 3
+ _C.MODEL.HRNET.STAGE4.NUM_BRANCHES = 4
+ _C.MODEL.HRNET.STAGE4.BLOCK = "BASIC"
+ _C.MODEL.HRNET.STAGE4.NUM_BLOCKS = [4, 4, 4, 4]
+ _C.MODEL.HRNET.STAGE4.NUM_CHANNELS = [32, 64, 128, 256]
+ _C.MODEL.HRNET.STAGE4.FUSE_METHOD = "SUM"
+
+ _C.MODEL.HRNET.HRFPN = CN()
+ _C.MODEL.HRNET.HRFPN.OUT_CHANNELS = 256
+
+
+def add_densepose_config(cfg: CN) -> None:
+ add_densepose_head_config(cfg)
+ add_hrnet_config(cfg)
+ add_bootstrap_config(cfg)
+ add_dataset_category_config(cfg)
+ add_evaluation_config(cfg)
diff --git a/densepose/converters/__init__.py b/densepose/converters/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..930339e13f408ad46d0504fac557ef8cf0a57a56
--- /dev/null
+++ b/densepose/converters/__init__.py
@@ -0,0 +1,15 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .hflip import HFlipConverter
+from .to_mask import ToMaskConverter
+from .to_chart_result import ToChartResultConverter, ToChartResultConverterWithConfidences
+from .segm_to_mask import (
+ predictor_output_with_fine_and_coarse_segm_to_mask,
+ predictor_output_with_coarse_segm_to_mask,
+ resample_fine_and_coarse_segm_to_bbox,
+)
+from .chart_output_to_chart_result import (
+ densepose_chart_predictor_output_to_result,
+ densepose_chart_predictor_output_to_result_with_confidences,
+)
+from .chart_output_hflip import densepose_chart_predictor_output_hflip
diff --git a/densepose/converters/base.py b/densepose/converters/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9dbe56cecff6dbbc1a1fda5a89c5f917513dcd8
--- /dev/null
+++ b/densepose/converters/base.py
@@ -0,0 +1,93 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any, Tuple, Type
+import torch
+
+
+class BaseConverter:
+ """
+ Converter base class to be reused by various converters.
+ Converter allows one to convert data from various source types to a particular
+ destination type. Each source type needs to register its converter. The
+ registration for each source type is valid for all descendants of that type.
+ """
+
+ @classmethod
+ def register(cls, from_type: Type, converter: Any = None):
+ """
+ Registers a converter for the specified type.
+ Can be used as a decorator (if converter is None), or called as a method.
+
+ Args:
+ from_type (type): type to register the converter for;
+ all instances of this type will use the same converter
+ converter (callable): converter to be registered for the given
+ type; if None, this method is assumed to be a decorator for the converter
+ """
+
+ if converter is not None:
+ cls._do_register(from_type, converter)
+
+ def wrapper(converter: Any) -> Any:
+ cls._do_register(from_type, converter)
+ return converter
+
+ return wrapper
+
+ @classmethod
+ def _do_register(cls, from_type: Type, converter: Any):
+ cls.registry[from_type] = converter # pyre-ignore[16]
+
+ @classmethod
+ def _lookup_converter(cls, from_type: Type) -> Any:
+ """
+ Perform recursive lookup for the given type
+ to find registered converter. If a converter was found for some base
+ class, it gets registered for this class to save on further lookups.
+
+ Args:
+ from_type: type for which to find a converter
+ Return:
+ callable or None - registered converter or None
+ if no suitable entry was found in the registry
+ """
+ if from_type in cls.registry: # pyre-ignore[16]
+ return cls.registry[from_type]
+ for base in from_type.__bases__:
+ converter = cls._lookup_converter(base)
+ if converter is not None:
+ cls._do_register(from_type, converter)
+ return converter
+ return None
+
+ @classmethod
+ def convert(cls, instance: Any, *args, **kwargs):
+ """
+ Convert an instance to the destination type using some registered
+ converter. Does recursive lookup for base classes, so there's no need
+ for explicit registration for derived classes.
+
+ Args:
+ instance: source instance to convert to the destination type
+ Return:
+ An instance of the destination type obtained from the source instance
+ Raises KeyError, if no suitable converter found
+ """
+ instance_type = type(instance)
+ converter = cls._lookup_converter(instance_type)
+ if converter is None:
+ if cls.dst_type is None: # pyre-ignore[16]
+ output_type_str = "itself"
+ else:
+ output_type_str = cls.dst_type
+ raise KeyError(f"Could not find converter from {instance_type} to {output_type_str}")
+ return converter(instance, *args, **kwargs)
+
+
+IntTupleBox = Tuple[int, int, int, int]
+
+
+def make_int_box(box: torch.Tensor) -> IntTupleBox:
+ int_box = [0, 0, 0, 0]
+ int_box[0], int_box[1], int_box[2], int_box[3] = tuple(box.long().tolist())
+ return int_box[0], int_box[1], int_box[2], int_box[3]
diff --git a/densepose/converters/builtin.py b/densepose/converters/builtin.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bd48f8f7afc49cf38bf410f01bc673d446f37d7
--- /dev/null
+++ b/densepose/converters/builtin.py
@@ -0,0 +1,31 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from ..structures import DensePoseChartPredictorOutput, DensePoseEmbeddingPredictorOutput
+from . import (
+ HFlipConverter,
+ ToChartResultConverter,
+ ToChartResultConverterWithConfidences,
+ ToMaskConverter,
+ densepose_chart_predictor_output_hflip,
+ densepose_chart_predictor_output_to_result,
+ densepose_chart_predictor_output_to_result_with_confidences,
+ predictor_output_with_coarse_segm_to_mask,
+ predictor_output_with_fine_and_coarse_segm_to_mask,
+)
+
+ToMaskConverter.register(
+ DensePoseChartPredictorOutput, predictor_output_with_fine_and_coarse_segm_to_mask
+)
+ToMaskConverter.register(
+ DensePoseEmbeddingPredictorOutput, predictor_output_with_coarse_segm_to_mask
+)
+
+ToChartResultConverter.register(
+ DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result
+)
+
+ToChartResultConverterWithConfidences.register(
+ DensePoseChartPredictorOutput, densepose_chart_predictor_output_to_result_with_confidences
+)
+
+HFlipConverter.register(DensePoseChartPredictorOutput, densepose_chart_predictor_output_hflip)
diff --git a/densepose/converters/chart_output_hflip.py b/densepose/converters/chart_output_hflip.py
new file mode 100644
index 0000000000000000000000000000000000000000..17d294841264c248cf7fa9e3d2d2b4efdbb9a5e8
--- /dev/null
+++ b/densepose/converters/chart_output_hflip.py
@@ -0,0 +1,71 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from dataclasses import fields
+import torch
+
+from densepose.structures import DensePoseChartPredictorOutput, DensePoseTransformData
+
+
+def densepose_chart_predictor_output_hflip(
+ densepose_predictor_output: DensePoseChartPredictorOutput,
+ transform_data: DensePoseTransformData,
+) -> DensePoseChartPredictorOutput:
+ """
+ Change to take into account a Horizontal flip.
+ """
+ if len(densepose_predictor_output) > 0:
+
+ PredictorOutput = type(densepose_predictor_output)
+ output_dict = {}
+
+ for field in fields(densepose_predictor_output):
+ field_value = getattr(densepose_predictor_output, field.name)
+ # flip tensors
+ if isinstance(field_value, torch.Tensor):
+ setattr(densepose_predictor_output, field.name, torch.flip(field_value, [3]))
+
+ densepose_predictor_output = _flip_iuv_semantics_tensor(
+ densepose_predictor_output, transform_data
+ )
+ densepose_predictor_output = _flip_segm_semantics_tensor(
+ densepose_predictor_output, transform_data
+ )
+
+ for field in fields(densepose_predictor_output):
+ output_dict[field.name] = getattr(densepose_predictor_output, field.name)
+
+ return PredictorOutput(**output_dict)
+ else:
+ return densepose_predictor_output
+
+
+def _flip_iuv_semantics_tensor(
+ densepose_predictor_output: DensePoseChartPredictorOutput,
+ dp_transform_data: DensePoseTransformData,
+) -> DensePoseChartPredictorOutput:
+ point_label_symmetries = dp_transform_data.point_label_symmetries
+ uv_symmetries = dp_transform_data.uv_symmetries
+
+ N, C, H, W = densepose_predictor_output.u.shape
+ u_loc = (densepose_predictor_output.u[:, 1:, :, :].clamp(0, 1) * 255).long()
+ v_loc = (densepose_predictor_output.v[:, 1:, :, :].clamp(0, 1) * 255).long()
+ Iindex = torch.arange(C - 1, device=densepose_predictor_output.u.device)[
+ None, :, None, None
+ ].expand(N, C - 1, H, W)
+ densepose_predictor_output.u[:, 1:, :, :] = uv_symmetries["U_transforms"][Iindex, v_loc, u_loc]
+ densepose_predictor_output.v[:, 1:, :, :] = uv_symmetries["V_transforms"][Iindex, v_loc, u_loc]
+
+ for el in ["fine_segm", "u", "v"]:
+ densepose_predictor_output.__dict__[el] = densepose_predictor_output.__dict__[el][
+ :, point_label_symmetries, :, :
+ ]
+ return densepose_predictor_output
+
+
+def _flip_segm_semantics_tensor(
+ densepose_predictor_output: DensePoseChartPredictorOutput, dp_transform_data
+):
+ if densepose_predictor_output.coarse_segm.shape[1] > 2:
+ densepose_predictor_output.coarse_segm = densepose_predictor_output.coarse_segm[
+ :, dp_transform_data.mask_label_symmetries, :, :
+ ]
+ return densepose_predictor_output
diff --git a/densepose/converters/chart_output_to_chart_result.py b/densepose/converters/chart_output_to_chart_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..4248f6c91b641a4ad1d00d0316ee82d701f9152f
--- /dev/null
+++ b/densepose/converters/chart_output_to_chart_result.py
@@ -0,0 +1,188 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Dict
+import torch
+from torch.nn import functional as F
+
+from detectron2.structures.boxes import Boxes, BoxMode
+
+from ..structures import (
+ DensePoseChartPredictorOutput,
+ DensePoseChartResult,
+ DensePoseChartResultWithConfidences,
+)
+from . import resample_fine_and_coarse_segm_to_bbox
+from .base import IntTupleBox, make_int_box
+
+
+def resample_uv_tensors_to_bbox(
+ u: torch.Tensor,
+ v: torch.Tensor,
+ labels: torch.Tensor,
+ box_xywh_abs: IntTupleBox,
+) -> torch.Tensor:
+ """
+ Resamples U and V coordinate estimates for the given bounding box
+
+ Args:
+ u (tensor [1, C, H, W] of float): U coordinates
+ v (tensor [1, C, H, W] of float): V coordinates
+ labels (tensor [H, W] of long): labels obtained by resampling segmentation
+ outputs for the given bounding box
+ box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
+ Return:
+ Resampled U and V coordinates - a tensor [2, H, W] of float
+ """
+ x, y, w, h = box_xywh_abs
+ w = max(int(w), 1)
+ h = max(int(h), 1)
+ u_bbox = F.interpolate(u, (h, w), mode="bilinear", align_corners=False)
+ v_bbox = F.interpolate(v, (h, w), mode="bilinear", align_corners=False)
+ uv = torch.zeros([2, h, w], dtype=torch.float32, device=u.device)
+ for part_id in range(1, u_bbox.size(1)):
+ uv[0][labels == part_id] = u_bbox[0, part_id][labels == part_id]
+ uv[1][labels == part_id] = v_bbox[0, part_id][labels == part_id]
+ return uv
+
+
+def resample_uv_to_bbox(
+ predictor_output: DensePoseChartPredictorOutput,
+ labels: torch.Tensor,
+ box_xywh_abs: IntTupleBox,
+) -> torch.Tensor:
+ """
+ Resamples U and V coordinate estimates for the given bounding box
+
+ Args:
+ predictor_output (DensePoseChartPredictorOutput): DensePose predictor
+ output to be resampled
+ labels (tensor [H, W] of long): labels obtained by resampling segmentation
+ outputs for the given bounding box
+ box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
+ Return:
+ Resampled U and V coordinates - a tensor [2, H, W] of float
+ """
+ return resample_uv_tensors_to_bbox(
+ predictor_output.u,
+ predictor_output.v,
+ labels,
+ box_xywh_abs,
+ )
+
+
+def densepose_chart_predictor_output_to_result(
+ predictor_output: DensePoseChartPredictorOutput, boxes: Boxes
+) -> DensePoseChartResult:
+ """
+ Convert densepose chart predictor outputs to results
+
+ Args:
+ predictor_output (DensePoseChartPredictorOutput): DensePose predictor
+ output to be converted to results, must contain only 1 output
+ boxes (Boxes): bounding box that corresponds to the predictor output,
+ must contain only 1 bounding box
+ Return:
+ DensePose chart-based result (DensePoseChartResult)
+ """
+ assert len(predictor_output) == 1 and len(boxes) == 1, (
+ f"Predictor output to result conversion can operate only single outputs"
+ f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes"
+ )
+
+ boxes_xyxy_abs = boxes.tensor.clone()
+ boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ box_xywh = make_int_box(boxes_xywh_abs[0])
+
+ labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0)
+ uv = resample_uv_to_bbox(predictor_output, labels, box_xywh)
+ return DensePoseChartResult(labels=labels, uv=uv)
+
+
+def resample_confidences_to_bbox(
+ predictor_output: DensePoseChartPredictorOutput,
+ labels: torch.Tensor,
+ box_xywh_abs: IntTupleBox,
+) -> Dict[str, torch.Tensor]:
+ """
+ Resamples confidences for the given bounding box
+
+ Args:
+ predictor_output (DensePoseChartPredictorOutput): DensePose predictor
+ output to be resampled
+ labels (tensor [H, W] of long): labels obtained by resampling segmentation
+ outputs for the given bounding box
+ box_xywh_abs (tuple of 4 int): bounding box that corresponds to predictor outputs
+ Return:
+ Resampled confidences - a dict of [H, W] tensors of float
+ """
+
+ x, y, w, h = box_xywh_abs
+ w = max(int(w), 1)
+ h = max(int(h), 1)
+
+ confidence_names = [
+ "sigma_1",
+ "sigma_2",
+ "kappa_u",
+ "kappa_v",
+ "fine_segm_confidence",
+ "coarse_segm_confidence",
+ ]
+ confidence_results = {key: None for key in confidence_names}
+ confidence_names = [
+ key for key in confidence_names if getattr(predictor_output, key) is not None
+ ]
+ confidence_base = torch.zeros([h, w], dtype=torch.float32, device=predictor_output.u.device)
+
+ # assign data from channels that correspond to the labels
+ for key in confidence_names:
+ resampled_confidence = F.interpolate(
+ getattr(predictor_output, key),
+ (h, w),
+ mode="bilinear",
+ align_corners=False,
+ )
+ result = confidence_base.clone()
+ for part_id in range(1, predictor_output.u.size(1)):
+ if resampled_confidence.size(1) != predictor_output.u.size(1):
+ # confidence is not part-based, don't try to fill it part by part
+ continue
+ result[labels == part_id] = resampled_confidence[0, part_id][labels == part_id]
+
+ if resampled_confidence.size(1) != predictor_output.u.size(1):
+ # confidence is not part-based, fill the data with the first channel
+ # (targeted for segmentation confidences that have only 1 channel)
+ result = resampled_confidence[0, 0]
+
+ confidence_results[key] = result
+
+ return confidence_results # pyre-ignore[7]
+
+
+def densepose_chart_predictor_output_to_result_with_confidences(
+ predictor_output: DensePoseChartPredictorOutput, boxes: Boxes
+) -> DensePoseChartResultWithConfidences:
+ """
+ Convert densepose chart predictor outputs to results
+
+ Args:
+ predictor_output (DensePoseChartPredictorOutput): DensePose predictor
+ output with confidences to be converted to results, must contain only 1 output
+ boxes (Boxes): bounding box that corresponds to the predictor output,
+ must contain only 1 bounding box
+ Return:
+ DensePose chart-based result with confidences (DensePoseChartResultWithConfidences)
+ """
+ assert len(predictor_output) == 1 and len(boxes) == 1, (
+ f"Predictor output to result conversion can operate only single outputs"
+ f", got {len(predictor_output)} predictor outputs and {len(boxes)} boxes"
+ )
+
+ boxes_xyxy_abs = boxes.tensor.clone()
+ boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ box_xywh = make_int_box(boxes_xywh_abs[0])
+
+ labels = resample_fine_and_coarse_segm_to_bbox(predictor_output, box_xywh).squeeze(0)
+ uv = resample_uv_to_bbox(predictor_output, labels, box_xywh)
+ confidences = resample_confidences_to_bbox(predictor_output, labels, box_xywh)
+ return DensePoseChartResultWithConfidences(labels=labels, uv=uv, **confidences)
diff --git a/densepose/converters/hflip.py b/densepose/converters/hflip.py
new file mode 100644
index 0000000000000000000000000000000000000000..6df144280b2b84308acbb607e3313d0992faa68c
--- /dev/null
+++ b/densepose/converters/hflip.py
@@ -0,0 +1,34 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any
+
+from .base import BaseConverter
+
+
+class HFlipConverter(BaseConverter):
+ """
+ Converts various DensePose predictor outputs to DensePose results.
+ Each DensePose predictor output type has to register its convertion strategy.
+ """
+
+ registry = {}
+ dst_type = None
+
+ @classmethod
+ # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
+ # inconsistently.
+ def convert(cls, predictor_outputs: Any, transform_data: Any, *args, **kwargs):
+ """
+ Performs an horizontal flip on DensePose predictor outputs.
+ Does recursive lookup for base classes, so there's no need
+ for explicit registration for derived classes.
+
+ Args:
+ predictor_outputs: DensePose predictor output to be converted to BitMasks
+ transform_data: Anything useful for the flip
+ Return:
+ An instance of the same type as predictor_outputs
+ """
+ return super(HFlipConverter, cls).convert(
+ predictor_outputs, transform_data, *args, **kwargs
+ )
diff --git a/densepose/converters/segm_to_mask.py b/densepose/converters/segm_to_mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..6433d5dec75c3d6141252af144b61d8999077bb7
--- /dev/null
+++ b/densepose/converters/segm_to_mask.py
@@ -0,0 +1,150 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any
+import torch
+from torch.nn import functional as F
+
+from detectron2.structures import BitMasks, Boxes, BoxMode
+
+from .base import IntTupleBox, make_int_box
+from .to_mask import ImageSizeType
+
+
+def resample_coarse_segm_tensor_to_bbox(coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox):
+ """
+ Resample coarse segmentation tensor to the given
+ bounding box and derive labels for each pixel of the bounding box
+
+ Args:
+ coarse_segm: float tensor of shape [1, K, Hout, Wout]
+ box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
+ corner coordinates, width (W) and height (H)
+ Return:
+ Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
+ """
+ x, y, w, h = box_xywh_abs
+ w = max(int(w), 1)
+ h = max(int(h), 1)
+ labels = F.interpolate(coarse_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
+ return labels
+
+
+def resample_fine_and_coarse_segm_tensors_to_bbox(
+ fine_segm: torch.Tensor, coarse_segm: torch.Tensor, box_xywh_abs: IntTupleBox
+):
+ """
+ Resample fine and coarse segmentation tensors to the given
+ bounding box and derive labels for each pixel of the bounding box
+
+ Args:
+ fine_segm: float tensor of shape [1, C, Hout, Wout]
+ coarse_segm: float tensor of shape [1, K, Hout, Wout]
+ box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
+ corner coordinates, width (W) and height (H)
+ Return:
+ Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
+ """
+ x, y, w, h = box_xywh_abs
+ w = max(int(w), 1)
+ h = max(int(h), 1)
+ # coarse segmentation
+ coarse_segm_bbox = F.interpolate(
+ coarse_segm,
+ (h, w),
+ mode="bilinear",
+ align_corners=False,
+ ).argmax(dim=1)
+ # combined coarse and fine segmentation
+ labels = (
+ F.interpolate(fine_segm, (h, w), mode="bilinear", align_corners=False).argmax(dim=1)
+ * (coarse_segm_bbox > 0).long()
+ )
+ return labels
+
+
+def resample_fine_and_coarse_segm_to_bbox(predictor_output: Any, box_xywh_abs: IntTupleBox):
+ """
+ Resample fine and coarse segmentation outputs from a predictor to the given
+ bounding box and derive labels for each pixel of the bounding box
+
+ Args:
+ predictor_output: DensePose predictor output that contains segmentation
+ results to be resampled
+ box_xywh_abs (tuple of 4 int): bounding box given by its upper-left
+ corner coordinates, width (W) and height (H)
+ Return:
+ Labels for each pixel of the bounding box, a long tensor of size [1, H, W]
+ """
+ return resample_fine_and_coarse_segm_tensors_to_bbox(
+ predictor_output.fine_segm,
+ predictor_output.coarse_segm,
+ box_xywh_abs,
+ )
+
+
+def predictor_output_with_coarse_segm_to_mask(
+ predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType
+) -> BitMasks:
+ """
+ Convert predictor output with coarse and fine segmentation to a mask.
+ Assumes that predictor output has the following attributes:
+ - coarse_segm (tensor of size [N, D, H, W]): coarse segmentation
+ unnormalized scores for N instances; D is the number of coarse
+ segmentation labels, H and W is the resolution of the estimate
+
+ Args:
+ predictor_output: DensePose predictor output to be converted to mask
+ boxes (Boxes): bounding boxes that correspond to the DensePose
+ predictor outputs
+ image_size_hw (tuple [int, int]): image height Himg and width Wimg
+ Return:
+ BitMasks that contain a bool tensor of size [N, Himg, Wimg] with
+ a mask of the size of the image for each instance
+ """
+ H, W = image_size_hw
+ boxes_xyxy_abs = boxes.tensor.clone()
+ boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ N = len(boxes_xywh_abs)
+ masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)
+ for i in range(len(boxes_xywh_abs)):
+ box_xywh = make_int_box(boxes_xywh_abs[i])
+ box_mask = resample_coarse_segm_tensor_to_bbox(predictor_output[i].coarse_segm, box_xywh)
+ x, y, w, h = box_xywh
+ masks[i, y : y + h, x : x + w] = box_mask
+
+ return BitMasks(masks)
+
+
+def predictor_output_with_fine_and_coarse_segm_to_mask(
+ predictor_output: Any, boxes: Boxes, image_size_hw: ImageSizeType
+) -> BitMasks:
+ """
+ Convert predictor output with coarse and fine segmentation to a mask.
+ Assumes that predictor output has the following attributes:
+ - coarse_segm (tensor of size [N, D, H, W]): coarse segmentation
+ unnormalized scores for N instances; D is the number of coarse
+ segmentation labels, H and W is the resolution of the estimate
+ - fine_segm (tensor of size [N, C, H, W]): fine segmentation
+ unnormalized scores for N instances; C is the number of fine
+ segmentation labels, H and W is the resolution of the estimate
+
+ Args:
+ predictor_output: DensePose predictor output to be converted to mask
+ boxes (Boxes): bounding boxes that correspond to the DensePose
+ predictor outputs
+ image_size_hw (tuple [int, int]): image height Himg and width Wimg
+ Return:
+ BitMasks that contain a bool tensor of size [N, Himg, Wimg] with
+ a mask of the size of the image for each instance
+ """
+ H, W = image_size_hw
+ boxes_xyxy_abs = boxes.tensor.clone()
+ boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ N = len(boxes_xywh_abs)
+ masks = torch.zeros((N, H, W), dtype=torch.bool, device=boxes.tensor.device)
+ for i in range(len(boxes_xywh_abs)):
+ box_xywh = make_int_box(boxes_xywh_abs[i])
+ labels_i = resample_fine_and_coarse_segm_to_bbox(predictor_output[i], box_xywh)
+ x, y, w, h = box_xywh
+ masks[i, y : y + h, x : x + w] = labels_i > 0
+ return BitMasks(masks)
diff --git a/densepose/converters/to_chart_result.py b/densepose/converters/to_chart_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..3eabd2614c285e8ea39d241b73f0d4b5762e6baa
--- /dev/null
+++ b/densepose/converters/to_chart_result.py
@@ -0,0 +1,70 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any
+
+from detectron2.structures import Boxes
+
+from ..structures import DensePoseChartResult, DensePoseChartResultWithConfidences
+from .base import BaseConverter
+
+
+class ToChartResultConverter(BaseConverter):
+ """
+ Converts various DensePose predictor outputs to DensePose results.
+ Each DensePose predictor output type has to register its convertion strategy.
+ """
+
+ registry = {}
+ dst_type = DensePoseChartResult
+
+ @classmethod
+ # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
+ # inconsistently.
+ def convert(cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs) -> DensePoseChartResult:
+ """
+ Convert DensePose predictor outputs to DensePoseResult using some registered
+ converter. Does recursive lookup for base classes, so there's no need
+ for explicit registration for derived classes.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor output to be
+ converted to BitMasks
+ boxes (Boxes): bounding boxes that correspond to the DensePose
+ predictor outputs
+ Return:
+ An instance of DensePoseResult. If no suitable converter was found, raises KeyError
+ """
+ return super(ToChartResultConverter, cls).convert(predictor_outputs, boxes, *args, **kwargs)
+
+
+class ToChartResultConverterWithConfidences(BaseConverter):
+ """
+ Converts various DensePose predictor outputs to DensePose results.
+ Each DensePose predictor output type has to register its convertion strategy.
+ """
+
+ registry = {}
+ dst_type = DensePoseChartResultWithConfidences
+
+ @classmethod
+ # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
+ # inconsistently.
+ def convert(
+ cls, predictor_outputs: Any, boxes: Boxes, *args, **kwargs
+ ) -> DensePoseChartResultWithConfidences:
+ """
+ Convert DensePose predictor outputs to DensePoseResult with confidences
+ using some registered converter. Does recursive lookup for base classes,
+ so there's no need for explicit registration for derived classes.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor output with confidences
+ to be converted to BitMasks
+ boxes (Boxes): bounding boxes that correspond to the DensePose
+ predictor outputs
+ Return:
+ An instance of DensePoseResult. If no suitable converter was found, raises KeyError
+ """
+ return super(ToChartResultConverterWithConfidences, cls).convert(
+ predictor_outputs, boxes, *args, **kwargs
+ )
diff --git a/densepose/converters/to_mask.py b/densepose/converters/to_mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..a57fd71afc448a7d269a8a38c2014b14c8c5074f
--- /dev/null
+++ b/densepose/converters/to_mask.py
@@ -0,0 +1,49 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any, Tuple
+
+from detectron2.structures import BitMasks, Boxes
+
+from .base import BaseConverter
+
+ImageSizeType = Tuple[int, int]
+
+
+class ToMaskConverter(BaseConverter):
+ """
+ Converts various DensePose predictor outputs to masks
+ in bit mask format (see `BitMasks`). Each DensePose predictor output type
+ has to register its convertion strategy.
+ """
+
+ registry = {}
+ dst_type = BitMasks
+
+ @classmethod
+ # pyre-fixme[14]: `convert` overrides method defined in `BaseConverter`
+ # inconsistently.
+ def convert(
+ cls,
+ densepose_predictor_outputs: Any,
+ boxes: Boxes,
+ image_size_hw: ImageSizeType,
+ *args,
+ **kwargs
+ ) -> BitMasks:
+ """
+ Convert DensePose predictor outputs to BitMasks using some registered
+ converter. Does recursive lookup for base classes, so there's no need
+ for explicit registration for derived classes.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor output to be
+ converted to BitMasks
+ boxes (Boxes): bounding boxes that correspond to the DensePose
+ predictor outputs
+ image_size_hw (tuple [int, int]): image height and width
+ Return:
+ An instance of `BitMasks`. If no suitable converter was found, raises KeyError
+ """
+ return super(ToMaskConverter, cls).convert(
+ densepose_predictor_outputs, boxes, image_size_hw, *args, **kwargs
+ )
diff --git a/densepose/data/__init__.py b/densepose/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bf21ba75306970fd6a44069b49107320a84182b8
--- /dev/null
+++ b/densepose/data/__init__.py
@@ -0,0 +1,25 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .meshes import builtin
+from .build import (
+ build_detection_test_loader,
+ build_detection_train_loader,
+ build_combined_loader,
+ build_frame_selector,
+ build_inference_based_loaders,
+ has_inference_based_loaders,
+ BootstrapDatasetFactoryCatalog,
+)
+from .combined_loader import CombinedDataLoader
+from .dataset_mapper import DatasetMapper
+from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter
+from .image_list_dataset import ImageListDataset
+from .utils import is_relative_local_path, maybe_prepend_base_path
+
+# ensure the builtin datasets are registered
+from . import datasets
+
+# ensure the bootstrap datasets builders are registered
+from . import build
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/densepose/data/build.py b/densepose/data/build.py
new file mode 100644
index 0000000000000000000000000000000000000000..39edbd89d88b7f66e4952add5d23289c8e7b9348
--- /dev/null
+++ b/densepose/data/build.py
@@ -0,0 +1,736 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import itertools
+import logging
+import numpy as np
+from collections import UserDict, defaultdict
+from dataclasses import dataclass
+from typing import Any, Callable, Collection, Dict, Iterable, List, Optional, Sequence, Tuple
+import torch
+from torch.utils.data.dataset import Dataset
+
+from detectron2.config import CfgNode
+from detectron2.data.build import build_detection_test_loader as d2_build_detection_test_loader
+from detectron2.data.build import build_detection_train_loader as d2_build_detection_train_loader
+from detectron2.data.build import (
+ load_proposals_into_dataset,
+ print_instances_class_histogram,
+ trivial_batch_collator,
+ worker_init_reset_seed,
+)
+from detectron2.data.catalog import DatasetCatalog, Metadata, MetadataCatalog
+from detectron2.data.samplers import TrainingSampler
+from detectron2.utils.comm import get_world_size
+
+from densepose.config import get_bootstrap_dataset_config
+from densepose.modeling import build_densepose_embedder
+
+from .combined_loader import CombinedDataLoader, Loader
+from .dataset_mapper import DatasetMapper
+from .datasets.coco import DENSEPOSE_CSE_KEYS_WITHOUT_MASK, DENSEPOSE_IUV_KEYS_WITHOUT_MASK
+from .datasets.dataset_type import DatasetType
+from .inference_based_loader import InferenceBasedLoader, ScoreBasedFilter
+from .samplers import (
+ DensePoseConfidenceBasedSampler,
+ DensePoseCSEConfidenceBasedSampler,
+ DensePoseCSEUniformSampler,
+ DensePoseUniformSampler,
+ MaskFromDensePoseSampler,
+ PredictionToGroundTruthSampler,
+)
+from .transform import ImageResizeTransform
+from .utils import get_category_to_class_mapping, get_class_to_mesh_name_mapping
+from .video import (
+ FirstKFramesSelector,
+ FrameSelectionStrategy,
+ LastKFramesSelector,
+ RandomKFramesSelector,
+ VideoKeyframeDataset,
+ video_list_from_file,
+)
+
+__all__ = ["build_detection_train_loader", "build_detection_test_loader"]
+
+
+Instance = Dict[str, Any]
+InstancePredicate = Callable[[Instance], bool]
+
+
+def _compute_num_images_per_worker(cfg: CfgNode) -> int:
+ num_workers = get_world_size()
+ images_per_batch = cfg.SOLVER.IMS_PER_BATCH
+ assert (
+ images_per_batch % num_workers == 0
+ ), "SOLVER.IMS_PER_BATCH ({}) must be divisible by the number of workers ({}).".format(
+ images_per_batch, num_workers
+ )
+ assert (
+ images_per_batch >= num_workers
+ ), "SOLVER.IMS_PER_BATCH ({}) must be larger than the number of workers ({}).".format(
+ images_per_batch, num_workers
+ )
+ images_per_worker = images_per_batch // num_workers
+ return images_per_worker
+
+
+def _map_category_id_to_contiguous_id(dataset_name: str, dataset_dicts: Iterable[Instance]) -> None:
+ meta = MetadataCatalog.get(dataset_name)
+ for dataset_dict in dataset_dicts:
+ for ann in dataset_dict["annotations"]:
+ ann["category_id"] = meta.thing_dataset_id_to_contiguous_id[ann["category_id"]]
+
+
+@dataclass
+class _DatasetCategory:
+ """
+ Class representing category data in a dataset:
+ - id: category ID, as specified in the dataset annotations file
+ - name: category name, as specified in the dataset annotations file
+ - mapped_id: category ID after applying category maps (DATASETS.CATEGORY_MAPS config option)
+ - mapped_name: category name after applying category maps
+ - dataset_name: dataset in which the category is defined
+
+ For example, when training models in a class-agnostic manner, one could take LVIS 1.0
+ dataset and map the animal categories to the same category as human data from COCO:
+ id = 225
+ name = "cat"
+ mapped_id = 1
+ mapped_name = "person"
+ dataset_name = "lvis_v1_animals_dp_train"
+ """
+
+ id: int
+ name: str
+ mapped_id: int
+ mapped_name: str
+ dataset_name: str
+
+
+_MergedCategoriesT = Dict[int, List[_DatasetCategory]]
+
+
+def _add_category_id_to_contiguous_id_maps_to_metadata(
+ merged_categories: _MergedCategoriesT,
+) -> None:
+ merged_categories_per_dataset = {}
+ for contiguous_cat_id, cat_id in enumerate(sorted(merged_categories.keys())):
+ for cat in merged_categories[cat_id]:
+ if cat.dataset_name not in merged_categories_per_dataset:
+ merged_categories_per_dataset[cat.dataset_name] = defaultdict(list)
+ merged_categories_per_dataset[cat.dataset_name][cat_id].append(
+ (
+ contiguous_cat_id,
+ cat,
+ )
+ )
+
+ logger = logging.getLogger(__name__)
+ for dataset_name, merged_categories in merged_categories_per_dataset.items():
+ meta = MetadataCatalog.get(dataset_name)
+ if not hasattr(meta, "thing_classes"):
+ meta.thing_classes = []
+ meta.thing_dataset_id_to_contiguous_id = {}
+ meta.thing_dataset_id_to_merged_id = {}
+ else:
+ meta.thing_classes.clear()
+ meta.thing_dataset_id_to_contiguous_id.clear()
+ meta.thing_dataset_id_to_merged_id.clear()
+ logger.info(f"Dataset {dataset_name}: category ID to contiguous ID mapping:")
+ for _cat_id, categories in sorted(merged_categories.items()):
+ added_to_thing_classes = False
+ for contiguous_cat_id, cat in categories:
+ if not added_to_thing_classes:
+ meta.thing_classes.append(cat.mapped_name)
+ added_to_thing_classes = True
+ meta.thing_dataset_id_to_contiguous_id[cat.id] = contiguous_cat_id
+ meta.thing_dataset_id_to_merged_id[cat.id] = cat.mapped_id
+ logger.info(f"{cat.id} ({cat.name}) -> {contiguous_cat_id}")
+
+
+def _maybe_create_general_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
+ def has_annotations(instance: Instance) -> bool:
+ return "annotations" in instance
+
+ def has_only_crowd_anotations(instance: Instance) -> bool:
+ for ann in instance["annotations"]:
+ if ann.get("is_crowd", 0) == 0:
+ return False
+ return True
+
+ def general_keep_instance_predicate(instance: Instance) -> bool:
+ return has_annotations(instance) and not has_only_crowd_anotations(instance)
+
+ if not cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS:
+ return None
+ return general_keep_instance_predicate
+
+
+def _maybe_create_keypoints_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
+
+ min_num_keypoints = cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
+
+ def has_sufficient_num_keypoints(instance: Instance) -> bool:
+ num_kpts = sum(
+ (np.array(ann["keypoints"][2::3]) > 0).sum()
+ for ann in instance["annotations"]
+ if "keypoints" in ann
+ )
+ return num_kpts >= min_num_keypoints
+
+ if cfg.MODEL.KEYPOINT_ON and (min_num_keypoints > 0):
+ return has_sufficient_num_keypoints
+ return None
+
+
+def _maybe_create_mask_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
+ if not cfg.MODEL.MASK_ON:
+ return None
+
+ def has_mask_annotations(instance: Instance) -> bool:
+ return any("segmentation" in ann for ann in instance["annotations"])
+
+ return has_mask_annotations
+
+
+def _maybe_create_densepose_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
+ if not cfg.MODEL.DENSEPOSE_ON:
+ return None
+
+ use_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
+
+ def has_densepose_annotations(instance: Instance) -> bool:
+ for ann in instance["annotations"]:
+ if all(key in ann for key in DENSEPOSE_IUV_KEYS_WITHOUT_MASK) or all(
+ key in ann for key in DENSEPOSE_CSE_KEYS_WITHOUT_MASK
+ ):
+ return True
+ if use_masks and "segmentation" in ann:
+ return True
+ return False
+
+ return has_densepose_annotations
+
+
+def _maybe_create_specific_keep_instance_predicate(cfg: CfgNode) -> Optional[InstancePredicate]:
+ specific_predicate_creators = [
+ _maybe_create_keypoints_keep_instance_predicate,
+ _maybe_create_mask_keep_instance_predicate,
+ _maybe_create_densepose_keep_instance_predicate,
+ ]
+ predicates = [creator(cfg) for creator in specific_predicate_creators]
+ predicates = [p for p in predicates if p is not None]
+ if not predicates:
+ return None
+
+ def combined_predicate(instance: Instance) -> bool:
+ return any(p(instance) for p in predicates)
+
+ return combined_predicate
+
+
+def _get_train_keep_instance_predicate(cfg: CfgNode):
+ general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
+ combined_specific_keep_predicate = _maybe_create_specific_keep_instance_predicate(cfg)
+
+ def combined_general_specific_keep_predicate(instance: Instance) -> bool:
+ return general_keep_predicate(instance) and combined_specific_keep_predicate(instance)
+
+ if (general_keep_predicate is None) and (combined_specific_keep_predicate is None):
+ return None
+ if general_keep_predicate is None:
+ return combined_specific_keep_predicate
+ if combined_specific_keep_predicate is None:
+ return general_keep_predicate
+ return combined_general_specific_keep_predicate
+
+
+def _get_test_keep_instance_predicate(cfg: CfgNode):
+ general_keep_predicate = _maybe_create_general_keep_instance_predicate(cfg)
+ return general_keep_predicate
+
+
+def _maybe_filter_and_map_categories(
+ dataset_name: str, dataset_dicts: List[Instance]
+) -> List[Instance]:
+ meta = MetadataCatalog.get(dataset_name)
+ category_id_map = meta.thing_dataset_id_to_contiguous_id
+ filtered_dataset_dicts = []
+ for dataset_dict in dataset_dicts:
+ anns = []
+ for ann in dataset_dict["annotations"]:
+ cat_id = ann["category_id"]
+ if cat_id not in category_id_map:
+ continue
+ ann["category_id"] = category_id_map[cat_id]
+ anns.append(ann)
+ dataset_dict["annotations"] = anns
+ filtered_dataset_dicts.append(dataset_dict)
+ return filtered_dataset_dicts
+
+
+def _add_category_whitelists_to_metadata(cfg: CfgNode) -> None:
+ for dataset_name, whitelisted_cat_ids in cfg.DATASETS.WHITELISTED_CATEGORIES.items():
+ meta = MetadataCatalog.get(dataset_name)
+ meta.whitelisted_categories = whitelisted_cat_ids
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Whitelisted categories for dataset {}: {}".format(
+ dataset_name, meta.whitelisted_categories
+ )
+ )
+
+
+def _add_category_maps_to_metadata(cfg: CfgNode) -> None:
+ for dataset_name, category_map in cfg.DATASETS.CATEGORY_MAPS.items():
+ category_map = {
+ int(cat_id_src): int(cat_id_dst) for cat_id_src, cat_id_dst in category_map.items()
+ }
+ meta = MetadataCatalog.get(dataset_name)
+ meta.category_map = category_map
+ logger = logging.getLogger(__name__)
+ logger.info("Category maps for dataset {}: {}".format(dataset_name, meta.category_map))
+
+
+def _add_category_info_to_bootstrapping_metadata(dataset_name: str, dataset_cfg: CfgNode) -> None:
+ meta = MetadataCatalog.get(dataset_name)
+ meta.category_to_class_mapping = get_category_to_class_mapping(dataset_cfg)
+ meta.categories = dataset_cfg.CATEGORIES
+ meta.max_count_per_category = dataset_cfg.MAX_COUNT_PER_CATEGORY
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Category to class mapping for dataset {}: {}".format(
+ dataset_name, meta.category_to_class_mapping
+ )
+ )
+
+
+def _maybe_add_class_to_mesh_name_map_to_metadata(dataset_names: List[str], cfg: CfgNode) -> None:
+ for dataset_name in dataset_names:
+ meta = MetadataCatalog.get(dataset_name)
+ if not hasattr(meta, "class_to_mesh_name"):
+ meta.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
+
+
+def _merge_categories(dataset_names: Collection[str]) -> _MergedCategoriesT:
+ merged_categories = defaultdict(list)
+ category_names = {}
+ for dataset_name in dataset_names:
+ meta = MetadataCatalog.get(dataset_name)
+ whitelisted_categories = meta.get("whitelisted_categories")
+ category_map = meta.get("category_map", {})
+ cat_ids = (
+ whitelisted_categories if whitelisted_categories is not None else meta.categories.keys()
+ )
+ for cat_id in cat_ids:
+ cat_name = meta.categories[cat_id]
+ cat_id_mapped = category_map.get(cat_id, cat_id)
+ if cat_id_mapped == cat_id or cat_id_mapped in cat_ids:
+ category_names[cat_id] = cat_name
+ else:
+ category_names[cat_id] = str(cat_id_mapped)
+ # assign temporary mapped category name, this name can be changed
+ # during the second pass, since mapped ID can correspond to a category
+ # from a different dataset
+ cat_name_mapped = meta.categories[cat_id_mapped]
+ merged_categories[cat_id_mapped].append(
+ _DatasetCategory(
+ id=cat_id,
+ name=cat_name,
+ mapped_id=cat_id_mapped,
+ mapped_name=cat_name_mapped,
+ dataset_name=dataset_name,
+ )
+ )
+ # second pass to assign proper mapped category names
+ for cat_id, categories in merged_categories.items():
+ for cat in categories:
+ if cat_id in category_names and cat.mapped_name != category_names[cat_id]:
+ cat.mapped_name = category_names[cat_id]
+
+ return merged_categories
+
+
+def _warn_if_merged_different_categories(merged_categories: _MergedCategoriesT) -> None:
+ logger = logging.getLogger(__name__)
+ for cat_id in merged_categories:
+ merged_categories_i = merged_categories[cat_id]
+ first_cat_name = merged_categories_i[0].name
+ if len(merged_categories_i) > 1 and not all(
+ cat.name == first_cat_name for cat in merged_categories_i[1:]
+ ):
+ cat_summary_str = ", ".join(
+ [f"{cat.id} ({cat.name}) from {cat.dataset_name}" for cat in merged_categories_i]
+ )
+ logger.warning(
+ f"Merged category {cat_id} corresponds to the following categories: "
+ f"{cat_summary_str}"
+ )
+
+
+def combine_detection_dataset_dicts(
+ dataset_names: Collection[str],
+ keep_instance_predicate: Optional[InstancePredicate] = None,
+ proposal_files: Optional[Collection[str]] = None,
+) -> List[Instance]:
+ """
+ Load and prepare dataset dicts for training / testing
+
+ Args:
+ dataset_names (Collection[str]): a list of dataset names
+ keep_instance_predicate (Callable: Dict[str, Any] -> bool): predicate
+ applied to instance dicts which defines whether to keep the instance
+ proposal_files (Collection[str]): if given, a list of object proposal files
+ that match each dataset in `dataset_names`.
+ """
+ assert len(dataset_names)
+ if proposal_files is None:
+ proposal_files = [None] * len(dataset_names)
+ assert len(dataset_names) == len(proposal_files)
+ # load datasets and metadata
+ dataset_name_to_dicts = {}
+ for dataset_name in dataset_names:
+ dataset_name_to_dicts[dataset_name] = DatasetCatalog.get(dataset_name)
+ assert len(dataset_name_to_dicts), f"Dataset '{dataset_name}' is empty!"
+ # merge categories, requires category metadata to be loaded
+ # cat_id -> [(orig_cat_id, cat_name, dataset_name)]
+ merged_categories = _merge_categories(dataset_names)
+ _warn_if_merged_different_categories(merged_categories)
+ merged_category_names = [
+ merged_categories[cat_id][0].mapped_name for cat_id in sorted(merged_categories)
+ ]
+ # map to contiguous category IDs
+ _add_category_id_to_contiguous_id_maps_to_metadata(merged_categories)
+ # load annotations and dataset metadata
+ for dataset_name, proposal_file in zip(dataset_names, proposal_files):
+ dataset_dicts = dataset_name_to_dicts[dataset_name]
+ assert len(dataset_dicts), f"Dataset '{dataset_name}' is empty!"
+ if proposal_file is not None:
+ dataset_dicts = load_proposals_into_dataset(dataset_dicts, proposal_file)
+ dataset_dicts = _maybe_filter_and_map_categories(dataset_name, dataset_dicts)
+ print_instances_class_histogram(dataset_dicts, merged_category_names)
+ dataset_name_to_dicts[dataset_name] = dataset_dicts
+
+ if keep_instance_predicate is not None:
+ all_datasets_dicts_plain = [
+ d
+ for d in itertools.chain.from_iterable(dataset_name_to_dicts.values())
+ if keep_instance_predicate(d)
+ ]
+ else:
+ all_datasets_dicts_plain = list(
+ itertools.chain.from_iterable(dataset_name_to_dicts.values())
+ )
+ return all_datasets_dicts_plain
+
+
+def build_detection_train_loader(cfg: CfgNode, mapper=None):
+ """
+ A data loader is created in a way similar to that of Detectron2.
+ The main differences are:
+ - it allows to combine datasets with different but compatible object category sets
+
+ The data loader is created by the following steps:
+ 1. Use the dataset names in config to query :class:`DatasetCatalog`, and obtain a list of dicts.
+ 2. Start workers to work on the dicts. Each worker will:
+ * Map each metadata dict into another format to be consumed by the model.
+ * Batch them by simply putting dicts into a list.
+ The batched ``list[mapped_dict]`` is what this dataloader will return.
+
+ Args:
+ cfg (CfgNode): the config
+ mapper (callable): a callable which takes a sample (dict) from dataset and
+ returns the format to be consumed by the model.
+ By default it will be `DatasetMapper(cfg, True)`.
+
+ Returns:
+ an infinite iterator of training data
+ """
+
+ _add_category_whitelists_to_metadata(cfg)
+ _add_category_maps_to_metadata(cfg)
+ _maybe_add_class_to_mesh_name_map_to_metadata(cfg.DATASETS.TRAIN, cfg)
+ dataset_dicts = combine_detection_dataset_dicts(
+ cfg.DATASETS.TRAIN,
+ keep_instance_predicate=_get_train_keep_instance_predicate(cfg),
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
+ )
+ if mapper is None:
+ mapper = DatasetMapper(cfg, True)
+ return d2_build_detection_train_loader(cfg, dataset=dataset_dicts, mapper=mapper)
+
+
+def build_detection_test_loader(cfg, dataset_name, mapper=None):
+ """
+ Similar to `build_detection_train_loader`.
+ But this function uses the given `dataset_name` argument (instead of the names in cfg),
+ and uses batch size 1.
+
+ Args:
+ cfg: a detectron2 CfgNode
+ dataset_name (str): a name of the dataset that's available in the DatasetCatalog
+ mapper (callable): a callable which takes a sample (dict) from dataset
+ and returns the format to be consumed by the model.
+ By default it will be `DatasetMapper(cfg, False)`.
+
+ Returns:
+ DataLoader: a torch DataLoader, that loads the given detection
+ dataset, with test-time transformation and batching.
+ """
+ _add_category_whitelists_to_metadata(cfg)
+ _add_category_maps_to_metadata(cfg)
+ _maybe_add_class_to_mesh_name_map_to_metadata([dataset_name], cfg)
+ dataset_dicts = combine_detection_dataset_dicts(
+ [dataset_name],
+ keep_instance_predicate=_get_test_keep_instance_predicate(cfg),
+ proposal_files=[
+ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(dataset_name)]
+ ]
+ if cfg.MODEL.LOAD_PROPOSALS
+ else None,
+ )
+ sampler = None
+ if not cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE:
+ sampler = torch.utils.data.SequentialSampler(dataset_dicts)
+ if mapper is None:
+ mapper = DatasetMapper(cfg, False)
+ return d2_build_detection_test_loader(
+ dataset_dicts, mapper=mapper, num_workers=cfg.DATALOADER.NUM_WORKERS, sampler=sampler
+ )
+
+
+def build_frame_selector(cfg: CfgNode):
+ strategy = FrameSelectionStrategy(cfg.STRATEGY)
+ if strategy == FrameSelectionStrategy.RANDOM_K:
+ frame_selector = RandomKFramesSelector(cfg.NUM_IMAGES)
+ elif strategy == FrameSelectionStrategy.FIRST_K:
+ frame_selector = FirstKFramesSelector(cfg.NUM_IMAGES)
+ elif strategy == FrameSelectionStrategy.LAST_K:
+ frame_selector = LastKFramesSelector(cfg.NUM_IMAGES)
+ elif strategy == FrameSelectionStrategy.ALL:
+ frame_selector = None
+ # pyre-fixme[61]: `frame_selector` may not be initialized here.
+ return frame_selector
+
+
+def build_transform(cfg: CfgNode, data_type: str):
+ if cfg.TYPE == "resize":
+ if data_type == "image":
+ return ImageResizeTransform(cfg.MIN_SIZE, cfg.MAX_SIZE)
+ raise ValueError(f"Unknown transform {cfg.TYPE} for data type {data_type}")
+
+
+def build_combined_loader(cfg: CfgNode, loaders: Collection[Loader], ratios: Sequence[float]):
+ images_per_worker = _compute_num_images_per_worker(cfg)
+ return CombinedDataLoader(loaders, images_per_worker, ratios)
+
+
+def build_bootstrap_dataset(dataset_name: str, cfg: CfgNode) -> Sequence[torch.Tensor]:
+ """
+ Build dataset that provides data to bootstrap on
+
+ Args:
+ dataset_name (str): Name of the dataset, needs to have associated metadata
+ to load the data
+ cfg (CfgNode): bootstrapping config
+ Returns:
+ Sequence[Tensor] - dataset that provides image batches, Tensors of size
+ [N, C, H, W] of type float32
+ """
+ logger = logging.getLogger(__name__)
+ _add_category_info_to_bootstrapping_metadata(dataset_name, cfg)
+ meta = MetadataCatalog.get(dataset_name)
+ factory = BootstrapDatasetFactoryCatalog.get(meta.dataset_type)
+ dataset = None
+ if factory is not None:
+ dataset = factory(meta, cfg)
+ if dataset is None:
+ logger.warning(f"Failed to create dataset {dataset_name} of type {meta.dataset_type}")
+ return dataset
+
+
+def build_data_sampler(cfg: CfgNode, sampler_cfg: CfgNode, embedder: Optional[torch.nn.Module]):
+ if sampler_cfg.TYPE == "densepose_uniform":
+ data_sampler = PredictionToGroundTruthSampler()
+ # transform densepose pred -> gt
+ data_sampler.register_sampler(
+ "pred_densepose",
+ "gt_densepose",
+ DensePoseUniformSampler(count_per_class=sampler_cfg.COUNT_PER_CLASS),
+ )
+ data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
+ return data_sampler
+ elif sampler_cfg.TYPE == "densepose_UV_confidence":
+ data_sampler = PredictionToGroundTruthSampler()
+ # transform densepose pred -> gt
+ data_sampler.register_sampler(
+ "pred_densepose",
+ "gt_densepose",
+ DensePoseConfidenceBasedSampler(
+ confidence_channel="sigma_2",
+ count_per_class=sampler_cfg.COUNT_PER_CLASS,
+ search_proportion=0.5,
+ ),
+ )
+ data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
+ return data_sampler
+ elif sampler_cfg.TYPE == "densepose_fine_segm_confidence":
+ data_sampler = PredictionToGroundTruthSampler()
+ # transform densepose pred -> gt
+ data_sampler.register_sampler(
+ "pred_densepose",
+ "gt_densepose",
+ DensePoseConfidenceBasedSampler(
+ confidence_channel="fine_segm_confidence",
+ count_per_class=sampler_cfg.COUNT_PER_CLASS,
+ search_proportion=0.5,
+ ),
+ )
+ data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
+ return data_sampler
+ elif sampler_cfg.TYPE == "densepose_coarse_segm_confidence":
+ data_sampler = PredictionToGroundTruthSampler()
+ # transform densepose pred -> gt
+ data_sampler.register_sampler(
+ "pred_densepose",
+ "gt_densepose",
+ DensePoseConfidenceBasedSampler(
+ confidence_channel="coarse_segm_confidence",
+ count_per_class=sampler_cfg.COUNT_PER_CLASS,
+ search_proportion=0.5,
+ ),
+ )
+ data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
+ return data_sampler
+ elif sampler_cfg.TYPE == "densepose_cse_uniform":
+ assert embedder is not None
+ data_sampler = PredictionToGroundTruthSampler()
+ # transform densepose pred -> gt
+ data_sampler.register_sampler(
+ "pred_densepose",
+ "gt_densepose",
+ DensePoseCSEUniformSampler(
+ cfg=cfg,
+ use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
+ embedder=embedder,
+ count_per_class=sampler_cfg.COUNT_PER_CLASS,
+ ),
+ )
+ data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
+ return data_sampler
+ elif sampler_cfg.TYPE == "densepose_cse_coarse_segm_confidence":
+ assert embedder is not None
+ data_sampler = PredictionToGroundTruthSampler()
+ # transform densepose pred -> gt
+ data_sampler.register_sampler(
+ "pred_densepose",
+ "gt_densepose",
+ DensePoseCSEConfidenceBasedSampler(
+ cfg=cfg,
+ use_gt_categories=sampler_cfg.USE_GROUND_TRUTH_CATEGORIES,
+ embedder=embedder,
+ confidence_channel="coarse_segm_confidence",
+ count_per_class=sampler_cfg.COUNT_PER_CLASS,
+ search_proportion=0.5,
+ ),
+ )
+ data_sampler.register_sampler("pred_densepose", "gt_masks", MaskFromDensePoseSampler())
+ return data_sampler
+
+ raise ValueError(f"Unknown data sampler type {sampler_cfg.TYPE}")
+
+
+def build_data_filter(cfg: CfgNode):
+ if cfg.TYPE == "detection_score":
+ min_score = cfg.MIN_VALUE
+ return ScoreBasedFilter(min_score=min_score)
+ raise ValueError(f"Unknown data filter type {cfg.TYPE}")
+
+
+def build_inference_based_loader(
+ cfg: CfgNode,
+ dataset_cfg: CfgNode,
+ model: torch.nn.Module,
+ embedder: Optional[torch.nn.Module] = None,
+) -> InferenceBasedLoader:
+ """
+ Constructs data loader based on inference results of a model.
+ """
+ dataset = build_bootstrap_dataset(dataset_cfg.DATASET, dataset_cfg.IMAGE_LOADER)
+ meta = MetadataCatalog.get(dataset_cfg.DATASET)
+ training_sampler = TrainingSampler(len(dataset))
+ data_loader = torch.utils.data.DataLoader(
+ dataset, # pyre-ignore[6]
+ batch_size=dataset_cfg.IMAGE_LOADER.BATCH_SIZE,
+ sampler=training_sampler,
+ num_workers=dataset_cfg.IMAGE_LOADER.NUM_WORKERS,
+ collate_fn=trivial_batch_collator,
+ worker_init_fn=worker_init_reset_seed,
+ )
+ return InferenceBasedLoader(
+ model,
+ data_loader=data_loader,
+ data_sampler=build_data_sampler(cfg, dataset_cfg.DATA_SAMPLER, embedder),
+ data_filter=build_data_filter(dataset_cfg.FILTER),
+ shuffle=True,
+ batch_size=dataset_cfg.INFERENCE.OUTPUT_BATCH_SIZE,
+ inference_batch_size=dataset_cfg.INFERENCE.INPUT_BATCH_SIZE,
+ category_to_class_mapping=meta.category_to_class_mapping,
+ )
+
+
+def has_inference_based_loaders(cfg: CfgNode) -> bool:
+ """
+ Returns True, if at least one inferense-based loader must
+ be instantiated for training
+ """
+ return len(cfg.BOOTSTRAP_DATASETS) > 0
+
+
+def build_inference_based_loaders(
+ cfg: CfgNode, model: torch.nn.Module
+) -> Tuple[List[InferenceBasedLoader], List[float]]:
+ loaders = []
+ ratios = []
+ embedder = build_densepose_embedder(cfg).to(device=model.device) # pyre-ignore[16]
+ for dataset_spec in cfg.BOOTSTRAP_DATASETS:
+ dataset_cfg = get_bootstrap_dataset_config().clone()
+ dataset_cfg.merge_from_other_cfg(CfgNode(dataset_spec))
+ loader = build_inference_based_loader(cfg, dataset_cfg, model, embedder)
+ loaders.append(loader)
+ ratios.append(dataset_cfg.RATIO)
+ return loaders, ratios
+
+
+def build_video_list_dataset(meta: Metadata, cfg: CfgNode):
+ video_list_fpath = meta.video_list_fpath
+ video_base_path = meta.video_base_path
+ category = meta.category
+ if cfg.TYPE == "video_keyframe":
+ frame_selector = build_frame_selector(cfg.SELECT)
+ transform = build_transform(cfg.TRANSFORM, data_type="image")
+ video_list = video_list_from_file(video_list_fpath, video_base_path)
+ keyframe_helper_fpath = getattr(cfg, "KEYFRAME_HELPER", None)
+ return VideoKeyframeDataset(
+ video_list, category, frame_selector, transform, keyframe_helper_fpath
+ )
+
+
+class _BootstrapDatasetFactoryCatalog(UserDict):
+ """
+ A global dictionary that stores information about bootstrapped datasets creation functions
+ from metadata and config, for diverse DatasetType
+ """
+
+ def register(self, dataset_type: DatasetType, factory: Callable[[Metadata, CfgNode], Dataset]):
+ """
+ Args:
+ dataset_type (DatasetType): a DatasetType e.g. DatasetType.VIDEO_LIST
+ factory (Callable[Metadata, CfgNode]): a callable which takes Metadata and cfg
+ arguments and returns a dataset object.
+ """
+ assert dataset_type not in self, "Dataset '{}' is already registered!".format(dataset_type)
+ self[dataset_type] = factory
+
+
+BootstrapDatasetFactoryCatalog = _BootstrapDatasetFactoryCatalog()
+BootstrapDatasetFactoryCatalog.register(DatasetType.VIDEO_LIST, build_video_list_dataset)
diff --git a/densepose/data/combined_loader.py b/densepose/data/combined_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..5bfbbdeaf53e184b83a6e0f951867b79d3d9f1fd
--- /dev/null
+++ b/densepose/data/combined_loader.py
@@ -0,0 +1,44 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import random
+from collections import deque
+from typing import Any, Collection, Deque, Iterable, Iterator, List, Sequence
+
+Loader = Iterable[Any]
+
+
+def _pooled_next(iterator: Iterator[Any], pool: Deque[Any]):
+ if not pool:
+ pool.extend(next(iterator))
+ return pool.popleft()
+
+
+class CombinedDataLoader:
+ """
+ Combines data loaders using the provided sampling ratios
+ """
+
+ BATCH_COUNT = 100
+
+ def __init__(self, loaders: Collection[Loader], batch_size: int, ratios: Sequence[float]):
+ self.loaders = loaders
+ self.batch_size = batch_size
+ self.ratios = ratios
+
+ def __iter__(self) -> Iterator[List[Any]]:
+ iters = [iter(loader) for loader in self.loaders]
+ indices = []
+ pool = [deque()] * len(iters)
+ # infinite iterator, as in D2
+ while True:
+ if not indices:
+ # just a buffer of indices, its size doesn't matter
+ # as long as it's a multiple of batch_size
+ k = self.batch_size * self.BATCH_COUNT
+ indices = random.choices(range(len(self.loaders)), self.ratios, k=k)
+ try:
+ batch = [_pooled_next(iters[i], pool[i]) for i in indices[: self.batch_size]]
+ except StopIteration:
+ break
+ indices = indices[self.batch_size :]
+ yield batch
diff --git a/densepose/data/dataset_mapper.py b/densepose/data/dataset_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..3229c4d7b9eab3e8e2d4f895d5209dd655d716a5
--- /dev/null
+++ b/densepose/data/dataset_mapper.py
@@ -0,0 +1,168 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import copy
+import logging
+from typing import Any, Dict, List, Tuple
+import torch
+
+from detectron2.data import MetadataCatalog
+from detectron2.data import detection_utils as utils
+from detectron2.data import transforms as T
+from detectron2.layers import ROIAlign
+from detectron2.structures import BoxMode
+from detectron2.utils.file_io import PathManager
+
+from densepose.structures import DensePoseDataRelative, DensePoseList, DensePoseTransformData
+
+
+def build_augmentation(cfg, is_train):
+ logger = logging.getLogger(__name__)
+ result = utils.build_augmentation(cfg, is_train)
+ if is_train:
+ random_rotation = T.RandomRotation(
+ cfg.INPUT.ROTATION_ANGLES, expand=False, sample_style="choice"
+ )
+ result.append(random_rotation)
+ logger.info("DensePose-specific augmentation used in training: " + str(random_rotation))
+ return result
+
+
+class DatasetMapper:
+ """
+ A customized version of `detectron2.data.DatasetMapper`
+ """
+
+ def __init__(self, cfg, is_train=True):
+ self.augmentation = build_augmentation(cfg, is_train)
+
+ # fmt: off
+ self.img_format = cfg.INPUT.FORMAT
+ self.mask_on = (
+ cfg.MODEL.MASK_ON or (
+ cfg.MODEL.DENSEPOSE_ON
+ and cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS)
+ )
+ self.keypoint_on = cfg.MODEL.KEYPOINT_ON
+ self.densepose_on = cfg.MODEL.DENSEPOSE_ON
+ assert not cfg.MODEL.LOAD_PROPOSALS, "not supported yet"
+ # fmt: on
+ if self.keypoint_on and is_train:
+ # Flip only makes sense in training
+ self.keypoint_hflip_indices = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
+ else:
+ self.keypoint_hflip_indices = None
+
+ if self.densepose_on:
+ densepose_transform_srcs = [
+ MetadataCatalog.get(ds).densepose_transform_src
+ for ds in cfg.DATASETS.TRAIN + cfg.DATASETS.TEST
+ ]
+ assert len(densepose_transform_srcs) > 0
+ # TODO: check that DensePose transformation data is the same for
+ # all the datasets. Otherwise one would have to pass DB ID with
+ # each entry to select proper transformation data. For now, since
+ # all DensePose annotated data uses the same data semantics, we
+ # omit this check.
+ densepose_transform_data_fpath = PathManager.get_local_path(densepose_transform_srcs[0])
+ self.densepose_transform_data = DensePoseTransformData.load(
+ densepose_transform_data_fpath
+ )
+
+ self.is_train = is_train
+
+ def __call__(self, dataset_dict):
+ """
+ Args:
+ dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
+
+ Returns:
+ dict: a format that builtin models in detectron2 accept
+ """
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
+ image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
+ utils.check_image_size(dataset_dict, image)
+
+ image, transforms = T.apply_transform_gens(self.augmentation, image)
+ image_shape = image.shape[:2] # h, w
+ dataset_dict["image"] = torch.as_tensor(image.transpose(2, 0, 1).astype("float32"))
+
+ if not self.is_train:
+ dataset_dict.pop("annotations", None)
+ return dataset_dict
+
+ for anno in dataset_dict["annotations"]:
+ if not self.mask_on:
+ anno.pop("segmentation", None)
+ if not self.keypoint_on:
+ anno.pop("keypoints", None)
+
+ # USER: Implement additional transformations if you have other types of data
+ # USER: Don't call transpose_densepose if you don't need
+ annos = [
+ self._transform_densepose(
+ utils.transform_instance_annotations(
+ obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
+ ),
+ transforms,
+ )
+ for obj in dataset_dict.pop("annotations")
+ if obj.get("iscrowd", 0) == 0
+ ]
+
+ if self.mask_on:
+ self._add_densepose_masks_as_segmentation(annos, image_shape)
+
+ instances = utils.annotations_to_instances(annos, image_shape, mask_format="bitmask")
+ densepose_annotations = [obj.get("densepose") for obj in annos]
+ if densepose_annotations and not all(v is None for v in densepose_annotations):
+ instances.gt_densepose = DensePoseList(
+ densepose_annotations, instances.gt_boxes, image_shape
+ )
+
+ dataset_dict["instances"] = instances[instances.gt_boxes.nonempty()]
+ return dataset_dict
+
+ def _transform_densepose(self, annotation, transforms):
+ if not self.densepose_on:
+ return annotation
+
+ # Handle densepose annotations
+ is_valid, reason_not_valid = DensePoseDataRelative.validate_annotation(annotation)
+ if is_valid:
+ densepose_data = DensePoseDataRelative(annotation, cleanup=True)
+ densepose_data.apply_transform(transforms, self.densepose_transform_data)
+ annotation["densepose"] = densepose_data
+ else:
+ # logger = logging.getLogger(__name__)
+ # logger.debug("Could not load DensePose annotation: {}".format(reason_not_valid))
+ DensePoseDataRelative.cleanup_annotation(annotation)
+ # NOTE: annotations for certain instances may be unavailable.
+ # 'None' is accepted by the DensePostList data structure.
+ annotation["densepose"] = None
+ return annotation
+
+ def _add_densepose_masks_as_segmentation(
+ self, annotations: List[Dict[str, Any]], image_shape_hw: Tuple[int, int]
+ ):
+ for obj in annotations:
+ if ("densepose" not in obj) or ("segmentation" in obj):
+ continue
+ # DP segmentation: torch.Tensor [S, S] of float32, S=256
+ segm_dp = torch.zeros_like(obj["densepose"].segm)
+ segm_dp[obj["densepose"].segm > 0] = 1
+ segm_h, segm_w = segm_dp.shape
+ bbox_segm_dp = torch.tensor((0, 0, segm_h - 1, segm_w - 1), dtype=torch.float32)
+ # image bbox
+ x0, y0, x1, y1 = (
+ v.item() for v in BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS)
+ )
+ segm_aligned = (
+ ROIAlign((y1 - y0, x1 - x0), 1.0, 0, aligned=True)
+ .forward(segm_dp.view(1, 1, *segm_dp.shape), bbox_segm_dp)
+ .squeeze()
+ )
+ image_mask = torch.zeros(*image_shape_hw, dtype=torch.float32)
+ image_mask[y0:y1, x0:x1] = segm_aligned
+ # segmentation for BitMask: np.array [H, W] of bool
+ obj["segmentation"] = image_mask >= 0.5
diff --git a/densepose/data/datasets/__init__.py b/densepose/data/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..260ccb9c43e5aa2d0f1fd28cfcbdd4f31913d16a
--- /dev/null
+++ b/densepose/data/datasets/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from . import builtin # ensure the builtin datasets are registered
+
+__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
diff --git a/densepose/data/datasets/builtin.py b/densepose/data/datasets/builtin.py
new file mode 100644
index 0000000000000000000000000000000000000000..7572cd6abc550fdce9d1fd079a7af4870de303bb
--- /dev/null
+++ b/densepose/data/datasets/builtin.py
@@ -0,0 +1,16 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .chimpnsee import register_dataset as register_chimpnsee_dataset
+from .coco import BASE_DATASETS as BASE_COCO_DATASETS
+from .coco import DATASETS as COCO_DATASETS
+from .coco import register_datasets as register_coco_datasets
+from .lvis import DATASETS as LVIS_DATASETS
+from .lvis import register_datasets as register_lvis_datasets
+
+DEFAULT_DATASETS_ROOT = "datasets"
+
+
+register_coco_datasets(COCO_DATASETS, DEFAULT_DATASETS_ROOT)
+register_coco_datasets(BASE_COCO_DATASETS, DEFAULT_DATASETS_ROOT)
+register_lvis_datasets(LVIS_DATASETS, DEFAULT_DATASETS_ROOT)
+
+register_chimpnsee_dataset(DEFAULT_DATASETS_ROOT) # pyre-ignore[19]
diff --git a/densepose/data/datasets/chimpnsee.py b/densepose/data/datasets/chimpnsee.py
new file mode 100644
index 0000000000000000000000000000000000000000..61e0b506dc4ed6ad78c9c4ce4677415a27f5f6cd
--- /dev/null
+++ b/densepose/data/datasets/chimpnsee.py
@@ -0,0 +1,29 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Optional
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+
+from ..utils import maybe_prepend_base_path
+from .dataset_type import DatasetType
+
+CHIMPNSEE_DATASET_NAME = "chimpnsee"
+
+
+def register_dataset(datasets_root: Optional[str] = None) -> None:
+ def empty_load_callback():
+ pass
+
+ video_list_fpath = maybe_prepend_base_path(
+ datasets_root,
+ "chimpnsee/cdna.eva.mpg.de/video_list.txt",
+ )
+ video_base_path = maybe_prepend_base_path(datasets_root, "chimpnsee/cdna.eva.mpg.de")
+
+ DatasetCatalog.register(CHIMPNSEE_DATASET_NAME, empty_load_callback)
+ MetadataCatalog.get(CHIMPNSEE_DATASET_NAME).set(
+ dataset_type=DatasetType.VIDEO_LIST,
+ video_list_fpath=video_list_fpath,
+ video_base_path=video_base_path,
+ category="chimpanzee",
+ )
diff --git a/densepose/data/datasets/coco.py b/densepose/data/datasets/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..c19f7b034b1641c9ccd88634f12fcdc3013bce09
--- /dev/null
+++ b/densepose/data/datasets/coco.py
@@ -0,0 +1,432 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import io
+import logging
+import os
+from collections import defaultdict
+from dataclasses import dataclass
+from typing import Any, Dict, Iterable, List, Optional
+from fvcore.common.timer import Timer
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.structures import BoxMode
+from detectron2.utils.file_io import PathManager
+
+from ..utils import maybe_prepend_base_path
+
+DENSEPOSE_MASK_KEY = "dp_masks"
+DENSEPOSE_IUV_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_I", "dp_U", "dp_V"]
+DENSEPOSE_CSE_KEYS_WITHOUT_MASK = ["dp_x", "dp_y", "dp_vertex", "ref_model"]
+DENSEPOSE_ALL_POSSIBLE_KEYS = set(
+ DENSEPOSE_IUV_KEYS_WITHOUT_MASK + DENSEPOSE_CSE_KEYS_WITHOUT_MASK + [DENSEPOSE_MASK_KEY]
+)
+DENSEPOSE_METADATA_URL_PREFIX = "https://dl.fbaipublicfiles.com/densepose/data/"
+
+
+@dataclass
+class CocoDatasetInfo:
+ name: str
+ images_root: str
+ annotations_fpath: str
+
+
+DATASETS = [
+ CocoDatasetInfo(
+ name="densepose_coco_2014_train",
+ images_root="coco/train2014",
+ annotations_fpath="coco/annotations/densepose_train2014.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_coco_2014_minival",
+ images_root="coco/val2014",
+ annotations_fpath="coco/annotations/densepose_minival2014.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_coco_2014_minival_100",
+ images_root="coco/val2014",
+ annotations_fpath="coco/annotations/densepose_minival2014_100.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_coco_2014_valminusminival",
+ images_root="coco/val2014",
+ annotations_fpath="coco/annotations/densepose_valminusminival2014.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_coco_2014_train_cse",
+ images_root="coco/train2014",
+ annotations_fpath="coco_cse/densepose_train2014_cse.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_coco_2014_minival_cse",
+ images_root="coco/val2014",
+ annotations_fpath="coco_cse/densepose_minival2014_cse.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_coco_2014_minival_100_cse",
+ images_root="coco/val2014",
+ annotations_fpath="coco_cse/densepose_minival2014_100_cse.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_coco_2014_valminusminival_cse",
+ images_root="coco/val2014",
+ annotations_fpath="coco_cse/densepose_valminusminival2014_cse.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_chimps",
+ images_root="densepose_chimps/images",
+ annotations_fpath="densepose_chimps/densepose_chimps_densepose.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_chimps_cse_train",
+ images_root="densepose_chimps/images",
+ annotations_fpath="densepose_chimps/densepose_chimps_cse_train.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_chimps_cse_val",
+ images_root="densepose_chimps/images",
+ annotations_fpath="densepose_chimps/densepose_chimps_cse_val.json",
+ ),
+ CocoDatasetInfo(
+ name="posetrack2017_train",
+ images_root="posetrack2017/posetrack_data_2017",
+ annotations_fpath="posetrack2017/densepose_posetrack_train2017.json",
+ ),
+ CocoDatasetInfo(
+ name="posetrack2017_val",
+ images_root="posetrack2017/posetrack_data_2017",
+ annotations_fpath="posetrack2017/densepose_posetrack_val2017.json",
+ ),
+ CocoDatasetInfo(
+ name="lvis_v05_train",
+ images_root="coco/train2017",
+ annotations_fpath="lvis/lvis_v0.5_plus_dp_train.json",
+ ),
+ CocoDatasetInfo(
+ name="lvis_v05_val",
+ images_root="coco/val2017",
+ annotations_fpath="lvis/lvis_v0.5_plus_dp_val.json",
+ ),
+]
+
+
+BASE_DATASETS = [
+ CocoDatasetInfo(
+ name="base_coco_2017_train",
+ images_root="coco/train2017",
+ annotations_fpath="coco/annotations/instances_train2017.json",
+ ),
+ CocoDatasetInfo(
+ name="base_coco_2017_val",
+ images_root="coco/val2017",
+ annotations_fpath="coco/annotations/instances_val2017.json",
+ ),
+ CocoDatasetInfo(
+ name="base_coco_2017_val_100",
+ images_root="coco/val2017",
+ annotations_fpath="coco/annotations/instances_val2017_100.json",
+ ),
+]
+
+
+def get_metadata(base_path: Optional[str]) -> Dict[str, Any]:
+ """
+ Returns metadata associated with COCO DensePose datasets
+
+ Args:
+ base_path: Optional[str]
+ Base path used to load metadata from
+
+ Returns:
+ Dict[str, Any]
+ Metadata in the form of a dictionary
+ """
+ meta = {
+ "densepose_transform_src": maybe_prepend_base_path(base_path, "UV_symmetry_transforms.mat"),
+ "densepose_smpl_subdiv": maybe_prepend_base_path(base_path, "SMPL_subdiv.mat"),
+ "densepose_smpl_subdiv_transform": maybe_prepend_base_path(
+ base_path,
+ "SMPL_SUBDIV_TRANSFORM.mat",
+ ),
+ }
+ return meta
+
+
+def _load_coco_annotations(json_file: str):
+ """
+ Load COCO annotations from a JSON file
+
+ Args:
+ json_file: str
+ Path to the file to load annotations from
+ Returns:
+ Instance of `pycocotools.coco.COCO` that provides access to annotations
+ data
+ """
+ from pycocotools.coco import COCO
+
+ logger = logging.getLogger(__name__)
+ timer = Timer()
+ with contextlib.redirect_stdout(io.StringIO()):
+ coco_api = COCO(json_file)
+ if timer.seconds() > 1:
+ logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
+ return coco_api
+
+
+def _add_categories_metadata(dataset_name: str, categories: List[Dict[str, Any]]):
+ meta = MetadataCatalog.get(dataset_name)
+ meta.categories = {c["id"]: c["name"] for c in categories}
+ logger = logging.getLogger(__name__)
+ logger.info("Dataset {} categories: {}".format(dataset_name, meta.categories))
+
+
+def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]):
+ if "minival" in json_file:
+ # Skip validation on COCO2014 valminusminival and minival annotations
+ # The ratio of buggy annotations there is tiny and does not affect accuracy
+ # Therefore we explicitly white-list them
+ return
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
+ json_file
+ )
+
+
+def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
+ if "bbox" not in ann_dict:
+ return
+ obj["bbox"] = ann_dict["bbox"]
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
+
+
+def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
+ if "segmentation" not in ann_dict:
+ return
+ segm = ann_dict["segmentation"]
+ if not isinstance(segm, dict):
+ # filter out invalid polygons (< 3 points)
+ segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
+ if len(segm) == 0:
+ return
+ obj["segmentation"] = segm
+
+
+def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
+ if "keypoints" not in ann_dict:
+ return
+ keypts = ann_dict["keypoints"] # list[int]
+ for idx, v in enumerate(keypts):
+ if idx % 3 != 2:
+ # COCO's segmentation coordinates are floating points in [0, H or W],
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
+ # Therefore we assume the coordinates are "pixel indices" and
+ # add 0.5 to convert to floating point coordinates.
+ keypts[idx] = v + 0.5
+ obj["keypoints"] = keypts
+
+
+def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]):
+ for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
+ if key in ann_dict:
+ obj[key] = ann_dict[key]
+
+
+def _combine_images_with_annotations(
+ dataset_name: str,
+ image_root: str,
+ img_datas: Iterable[Dict[str, Any]],
+ ann_datas: Iterable[Iterable[Dict[str, Any]]],
+):
+
+ ann_keys = ["iscrowd", "category_id"]
+ dataset_dicts = []
+ contains_video_frame_info = False
+
+ for img_dict, ann_dicts in zip(img_datas, ann_datas):
+ record = {}
+ record["file_name"] = os.path.join(image_root, img_dict["file_name"])
+ record["height"] = img_dict["height"]
+ record["width"] = img_dict["width"]
+ record["image_id"] = img_dict["id"]
+ record["dataset"] = dataset_name
+ if "frame_id" in img_dict:
+ record["frame_id"] = img_dict["frame_id"]
+ record["video_id"] = img_dict.get("vid_id", None)
+ contains_video_frame_info = True
+ objs = []
+ for ann_dict in ann_dicts:
+ assert ann_dict["image_id"] == record["image_id"]
+ assert ann_dict.get("ignore", 0) == 0
+ obj = {key: ann_dict[key] for key in ann_keys if key in ann_dict}
+ _maybe_add_bbox(obj, ann_dict)
+ _maybe_add_segm(obj, ann_dict)
+ _maybe_add_keypoints(obj, ann_dict)
+ _maybe_add_densepose(obj, ann_dict)
+ objs.append(obj)
+ record["annotations"] = objs
+ dataset_dicts.append(record)
+ if contains_video_frame_info:
+ create_video_frame_mapping(dataset_name, dataset_dicts)
+ return dataset_dicts
+
+
+def get_contiguous_id_to_category_id_map(metadata):
+ cat_id_2_cont_id = metadata.thing_dataset_id_to_contiguous_id
+ cont_id_2_cat_id = {}
+ for cat_id, cont_id in cat_id_2_cont_id.items():
+ if cont_id in cont_id_2_cat_id:
+ continue
+ cont_id_2_cat_id[cont_id] = cat_id
+ return cont_id_2_cat_id
+
+
+def maybe_filter_categories_cocoapi(dataset_name, coco_api):
+ meta = MetadataCatalog.get(dataset_name)
+ cont_id_2_cat_id = get_contiguous_id_to_category_id_map(meta)
+ cat_id_2_cont_id = meta.thing_dataset_id_to_contiguous_id
+ # filter categories
+ cats = []
+ for cat in coco_api.dataset["categories"]:
+ cat_id = cat["id"]
+ if cat_id not in cat_id_2_cont_id:
+ continue
+ cont_id = cat_id_2_cont_id[cat_id]
+ if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id):
+ cats.append(cat)
+ coco_api.dataset["categories"] = cats
+ # filter annotations, if multiple categories are mapped to a single
+ # contiguous ID, use only one category ID and map all annotations to that category ID
+ anns = []
+ for ann in coco_api.dataset["annotations"]:
+ cat_id = ann["category_id"]
+ if cat_id not in cat_id_2_cont_id:
+ continue
+ cont_id = cat_id_2_cont_id[cat_id]
+ ann["category_id"] = cont_id_2_cat_id[cont_id]
+ anns.append(ann)
+ coco_api.dataset["annotations"] = anns
+ # recreate index
+ coco_api.createIndex()
+
+
+def maybe_filter_and_map_categories_cocoapi(dataset_name, coco_api):
+ meta = MetadataCatalog.get(dataset_name)
+ category_id_map = meta.thing_dataset_id_to_contiguous_id
+ # map categories
+ cats = []
+ for cat in coco_api.dataset["categories"]:
+ cat_id = cat["id"]
+ if cat_id not in category_id_map:
+ continue
+ cat["id"] = category_id_map[cat_id]
+ cats.append(cat)
+ coco_api.dataset["categories"] = cats
+ # map annotation categories
+ anns = []
+ for ann in coco_api.dataset["annotations"]:
+ cat_id = ann["category_id"]
+ if cat_id not in category_id_map:
+ continue
+ ann["category_id"] = category_id_map[cat_id]
+ anns.append(ann)
+ coco_api.dataset["annotations"] = anns
+ # recreate index
+ coco_api.createIndex()
+
+
+def create_video_frame_mapping(dataset_name, dataset_dicts):
+ mapping = defaultdict(dict)
+ for d in dataset_dicts:
+ video_id = d.get("video_id")
+ if video_id is None:
+ continue
+ mapping[video_id].update({d["frame_id"]: d["file_name"]})
+ MetadataCatalog.get(dataset_name).set(video_frame_mapping=mapping)
+
+
+def load_coco_json(annotations_json_file: str, image_root: str, dataset_name: str):
+ """
+ Loads a JSON file with annotations in COCO instances format.
+ Replaces `detectron2.data.datasets.coco.load_coco_json` to handle metadata
+ in a more flexible way. Postpones category mapping to a later stage to be
+ able to combine several datasets with different (but coherent) sets of
+ categories.
+
+ Args:
+
+ annotations_json_file: str
+ Path to the JSON file with annotations in COCO instances format.
+ image_root: str
+ directory that contains all the images
+ dataset_name: str
+ the name that identifies a dataset, e.g. "densepose_coco_2014_train"
+ extra_annotation_keys: Optional[List[str]]
+ If provided, these keys are used to extract additional data from
+ the annotations.
+ """
+ coco_api = _load_coco_annotations(PathManager.get_local_path(annotations_json_file))
+ _add_categories_metadata(dataset_name, coco_api.loadCats(coco_api.getCatIds()))
+ # sort indices for reproducible results
+ img_ids = sorted(coco_api.imgs.keys())
+ # imgs is a list of dicts, each looks something like:
+ # {'license': 4,
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
+ # 'height': 427,
+ # 'width': 640,
+ # 'date_captured': '2013-11-17 05:57:24',
+ # 'id': 1268}
+ imgs = coco_api.loadImgs(img_ids)
+ logger = logging.getLogger(__name__)
+ logger.info("Loaded {} images in COCO format from {}".format(len(imgs), annotations_json_file))
+ # anns is a list[list[dict]], where each dict is an annotation
+ # record for an object. The inner list enumerates the objects in an image
+ # and the outer list enumerates over images.
+ anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
+ _verify_annotations_have_unique_ids(annotations_json_file, anns)
+ dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
+ return dataset_records
+
+
+def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None):
+ """
+ Registers provided COCO DensePose dataset
+
+ Args:
+ dataset_data: CocoDatasetInfo
+ Dataset data
+ datasets_root: Optional[str]
+ Datasets root folder (default: None)
+ """
+ annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
+ images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
+
+ def load_annotations():
+ return load_coco_json(
+ annotations_json_file=annotations_fpath,
+ image_root=images_root,
+ dataset_name=dataset_data.name,
+ )
+
+ DatasetCatalog.register(dataset_data.name, load_annotations)
+ MetadataCatalog.get(dataset_data.name).set(
+ json_file=annotations_fpath,
+ image_root=images_root,
+ **get_metadata(DENSEPOSE_METADATA_URL_PREFIX)
+ )
+
+
+def register_datasets(
+ datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
+):
+ """
+ Registers provided COCO DensePose datasets
+
+ Args:
+ datasets_data: Iterable[CocoDatasetInfo]
+ An iterable of dataset datas
+ datasets_root: Optional[str]
+ Datasets root folder (default: None)
+ """
+ for dataset_data in datasets_data:
+ register_dataset(dataset_data, datasets_root)
diff --git a/densepose/data/datasets/dataset_type.py b/densepose/data/datasets/dataset_type.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed8f8f299af96847d9d16a77920429fe0195c526
--- /dev/null
+++ b/densepose/data/datasets/dataset_type.py
@@ -0,0 +1,11 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from enum import Enum
+
+
+class DatasetType(Enum):
+ """
+ Dataset type, mostly used for datasets that contain data to bootstrap models on
+ """
+
+ VIDEO_LIST = "video_list"
diff --git a/densepose/data/datasets/lvis.py b/densepose/data/datasets/lvis.py
new file mode 100644
index 0000000000000000000000000000000000000000..b4af9fa292f445c81dc840ab53d07c1af313dfc7
--- /dev/null
+++ b/densepose/data/datasets/lvis.py
@@ -0,0 +1,257 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import os
+from typing import Any, Dict, Iterable, List, Optional
+from fvcore.common.timer import Timer
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.data.datasets.lvis import get_lvis_instances_meta
+from detectron2.structures import BoxMode
+from detectron2.utils.file_io import PathManager
+
+from ..utils import maybe_prepend_base_path
+from .coco import (
+ DENSEPOSE_ALL_POSSIBLE_KEYS,
+ DENSEPOSE_METADATA_URL_PREFIX,
+ CocoDatasetInfo,
+ get_metadata,
+)
+
+DATASETS = [
+ CocoDatasetInfo(
+ name="densepose_lvis_v1_ds1_train_v1",
+ images_root="coco_",
+ annotations_fpath="lvis/densepose_lvis_v1_ds1_train_v1.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_lvis_v1_ds1_val_v1",
+ images_root="coco_",
+ annotations_fpath="lvis/densepose_lvis_v1_ds1_val_v1.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_lvis_v1_ds2_train_v1",
+ images_root="coco_",
+ annotations_fpath="lvis/densepose_lvis_v1_ds2_train_v1.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_lvis_v1_ds2_val_v1",
+ images_root="coco_",
+ annotations_fpath="lvis/densepose_lvis_v1_ds2_val_v1.json",
+ ),
+ CocoDatasetInfo(
+ name="densepose_lvis_v1_ds1_val_animals_100",
+ images_root="coco_",
+ annotations_fpath="lvis/densepose_lvis_v1_val_animals_100_v2.json",
+ ),
+]
+
+
+def _load_lvis_annotations(json_file: str):
+ """
+ Load COCO annotations from a JSON file
+
+ Args:
+ json_file: str
+ Path to the file to load annotations from
+ Returns:
+ Instance of `pycocotools.coco.COCO` that provides access to annotations
+ data
+ """
+ from lvis import LVIS
+
+ json_file = PathManager.get_local_path(json_file)
+ logger = logging.getLogger(__name__)
+ timer = Timer()
+ lvis_api = LVIS(json_file)
+ if timer.seconds() > 1:
+ logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
+ return lvis_api
+
+
+def _add_categories_metadata(dataset_name: str) -> None:
+ metadict = get_lvis_instances_meta(dataset_name)
+ categories = metadict["thing_classes"]
+ metadata = MetadataCatalog.get(dataset_name)
+ metadata.categories = {i + 1: categories[i] for i in range(len(categories))}
+ logger = logging.getLogger(__name__)
+ logger.info(f"Dataset {dataset_name} has {len(categories)} categories")
+
+
+def _verify_annotations_have_unique_ids(json_file: str, anns: List[List[Dict[str, Any]]]) -> None:
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
+ json_file
+ )
+
+
+def _maybe_add_bbox(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
+ if "bbox" not in ann_dict:
+ return
+ obj["bbox"] = ann_dict["bbox"]
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
+
+
+def _maybe_add_segm(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
+ if "segmentation" not in ann_dict:
+ return
+ segm = ann_dict["segmentation"]
+ if not isinstance(segm, dict):
+ # filter out invalid polygons (< 3 points)
+ segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
+ if len(segm) == 0:
+ return
+ obj["segmentation"] = segm
+
+
+def _maybe_add_keypoints(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
+ if "keypoints" not in ann_dict:
+ return
+ keypts = ann_dict["keypoints"] # list[int]
+ for idx, v in enumerate(keypts):
+ if idx % 3 != 2:
+ # COCO's segmentation coordinates are floating points in [0, H or W],
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
+ # Therefore we assume the coordinates are "pixel indices" and
+ # add 0.5 to convert to floating point coordinates.
+ keypts[idx] = v + 0.5
+ obj["keypoints"] = keypts
+
+
+def _maybe_add_densepose(obj: Dict[str, Any], ann_dict: Dict[str, Any]) -> None:
+ for key in DENSEPOSE_ALL_POSSIBLE_KEYS:
+ if key in ann_dict:
+ obj[key] = ann_dict[key]
+
+
+def _combine_images_with_annotations(
+ dataset_name: str,
+ image_root: str,
+ img_datas: Iterable[Dict[str, Any]],
+ ann_datas: Iterable[Iterable[Dict[str, Any]]],
+):
+
+ dataset_dicts = []
+
+ def get_file_name(img_root, img_dict):
+ # Determine the path including the split folder ("train2017", "val2017", "test2017") from
+ # the coco_url field. Example:
+ # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
+ split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
+ return os.path.join(img_root + split_folder, file_name)
+
+ for img_dict, ann_dicts in zip(img_datas, ann_datas):
+ record = {}
+ record["file_name"] = get_file_name(image_root, img_dict)
+ record["height"] = img_dict["height"]
+ record["width"] = img_dict["width"]
+ record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
+ record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
+ record["image_id"] = img_dict["id"]
+ record["dataset"] = dataset_name
+
+ objs = []
+ for ann_dict in ann_dicts:
+ assert ann_dict["image_id"] == record["image_id"]
+ obj = {}
+ _maybe_add_bbox(obj, ann_dict)
+ obj["iscrowd"] = ann_dict.get("iscrowd", 0)
+ obj["category_id"] = ann_dict["category_id"]
+ _maybe_add_segm(obj, ann_dict)
+ _maybe_add_keypoints(obj, ann_dict)
+ _maybe_add_densepose(obj, ann_dict)
+ objs.append(obj)
+ record["annotations"] = objs
+ dataset_dicts.append(record)
+ return dataset_dicts
+
+
+def load_lvis_json(annotations_json_file: str, image_root: str, dataset_name: str):
+ """
+ Loads a JSON file with annotations in LVIS instances format.
+ Replaces `detectron2.data.datasets.coco.load_lvis_json` to handle metadata
+ in a more flexible way. Postpones category mapping to a later stage to be
+ able to combine several datasets with different (but coherent) sets of
+ categories.
+
+ Args:
+
+ annotations_json_file: str
+ Path to the JSON file with annotations in COCO instances format.
+ image_root: str
+ directory that contains all the images
+ dataset_name: str
+ the name that identifies a dataset, e.g. "densepose_coco_2014_train"
+ extra_annotation_keys: Optional[List[str]]
+ If provided, these keys are used to extract additional data from
+ the annotations.
+ """
+ lvis_api = _load_lvis_annotations(PathManager.get_local_path(annotations_json_file))
+
+ _add_categories_metadata(dataset_name)
+
+ # sort indices for reproducible results
+ img_ids = sorted(lvis_api.imgs.keys())
+ # imgs is a list of dicts, each looks something like:
+ # {'license': 4,
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
+ # 'height': 427,
+ # 'width': 640,
+ # 'date_captured': '2013-11-17 05:57:24',
+ # 'id': 1268}
+ imgs = lvis_api.load_imgs(img_ids)
+ logger = logging.getLogger(__name__)
+ logger.info("Loaded {} images in LVIS format from {}".format(len(imgs), annotations_json_file))
+ # anns is a list[list[dict]], where each dict is an annotation
+ # record for an object. The inner list enumerates the objects in an image
+ # and the outer list enumerates over images.
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
+
+ _verify_annotations_have_unique_ids(annotations_json_file, anns)
+ dataset_records = _combine_images_with_annotations(dataset_name, image_root, imgs, anns)
+ return dataset_records
+
+
+def register_dataset(dataset_data: CocoDatasetInfo, datasets_root: Optional[str] = None) -> None:
+ """
+ Registers provided LVIS DensePose dataset
+
+ Args:
+ dataset_data: CocoDatasetInfo
+ Dataset data
+ datasets_root: Optional[str]
+ Datasets root folder (default: None)
+ """
+ annotations_fpath = maybe_prepend_base_path(datasets_root, dataset_data.annotations_fpath)
+ images_root = maybe_prepend_base_path(datasets_root, dataset_data.images_root)
+
+ def load_annotations():
+ return load_lvis_json(
+ annotations_json_file=annotations_fpath,
+ image_root=images_root,
+ dataset_name=dataset_data.name,
+ )
+
+ DatasetCatalog.register(dataset_data.name, load_annotations)
+ MetadataCatalog.get(dataset_data.name).set(
+ json_file=annotations_fpath,
+ image_root=images_root,
+ evaluator_type="lvis",
+ **get_metadata(DENSEPOSE_METADATA_URL_PREFIX),
+ )
+
+
+def register_datasets(
+ datasets_data: Iterable[CocoDatasetInfo], datasets_root: Optional[str] = None
+) -> None:
+ """
+ Registers provided LVIS DensePose datasets
+
+ Args:
+ datasets_data: Iterable[CocoDatasetInfo]
+ An iterable of dataset datas
+ datasets_root: Optional[str]
+ Datasets root folder (default: None)
+ """
+ for dataset_data in datasets_data:
+ register_dataset(dataset_data, datasets_root)
diff --git a/densepose/data/image_list_dataset.py b/densepose/data/image_list_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..92a95d3d5e7d4d7d6bf1d29d51295d32ae2104d2
--- /dev/null
+++ b/densepose/data/image_list_dataset.py
@@ -0,0 +1,72 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import logging
+import numpy as np
+from typing import Any, Callable, Dict, List, Optional, Union
+import torch
+from torch.utils.data.dataset import Dataset
+
+from detectron2.data.detection_utils import read_image
+
+ImageTransform = Callable[[torch.Tensor], torch.Tensor]
+
+
+class ImageListDataset(Dataset):
+ """
+ Dataset that provides images from a list.
+ """
+
+ _EMPTY_IMAGE = torch.empty((0, 3, 1, 1))
+
+ def __init__(
+ self,
+ image_list: List[str],
+ category_list: Union[str, List[str], None] = None,
+ transform: Optional[ImageTransform] = None,
+ ):
+ """
+ Args:
+ image_list (List[str]): list of paths to image files
+ category_list (Union[str, List[str], None]): list of animal categories for
+ each image. If it is a string, or None, this applies to all images
+ """
+ if type(category_list) == list:
+ self.category_list = category_list
+ else:
+ self.category_list = [category_list] * len(image_list)
+ assert len(image_list) == len(
+ self.category_list
+ ), "length of image and category lists must be equal"
+ self.image_list = image_list
+ self.transform = transform
+
+ def __getitem__(self, idx: int) -> Dict[str, Any]:
+ """
+ Gets selected images from the list
+
+ Args:
+ idx (int): video index in the video list file
+ Returns:
+ A dictionary containing two keys:
+ images (torch.Tensor): tensor of size [N, 3, H, W] (N = 1, or 0 for _EMPTY_IMAGE)
+ categories (List[str]): categories of the frames
+ """
+ categories = [self.category_list[idx]]
+ fpath = self.image_list[idx]
+ transform = self.transform
+
+ try:
+ image = torch.from_numpy(np.ascontiguousarray(read_image(fpath, format="BGR")))
+ image = image.permute(2, 0, 1).unsqueeze(0).float() # HWC -> NCHW
+ if transform is not None:
+ image = transform(image)
+ return {"images": image, "categories": categories}
+ except (OSError, RuntimeError) as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(f"Error opening image file container {fpath}: {e}")
+
+ return {"images": self._EMPTY_IMAGE, "categories": []}
+
+ def __len__(self):
+ return len(self.image_list)
diff --git a/densepose/data/inference_based_loader.py b/densepose/data/inference_based_loader.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb89544500c29c4055353060ebbc8b428bd0262a
--- /dev/null
+++ b/densepose/data/inference_based_loader.py
@@ -0,0 +1,172 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import random
+from typing import Any, Callable, Dict, Iterable, Iterator, List, Optional, Tuple
+import torch
+from torch import nn
+
+SampledData = Any
+ModelOutput = Any
+
+
+def _grouper(iterable: Iterable[Any], n: int, fillvalue=None) -> Iterator[Tuple[Any]]:
+ """
+ Group elements of an iterable by chunks of size `n`, e.g.
+ grouper(range(9), 4) ->
+ (0, 1, 2, 3), (4, 5, 6, 7), (8, None, None, None)
+ """
+ it = iter(iterable)
+ while True:
+ values = []
+ for _ in range(n):
+ try:
+ value = next(it)
+ except StopIteration:
+ if values:
+ values.extend([fillvalue] * (n - len(values)))
+ yield tuple(values)
+ return
+ values.append(value)
+ yield tuple(values)
+
+
+class ScoreBasedFilter:
+ """
+ Filters entries in model output based on their scores
+ Discards all entries with score less than the specified minimum
+ """
+
+ def __init__(self, min_score: float = 0.8):
+ self.min_score = min_score
+
+ def __call__(self, model_output: ModelOutput) -> ModelOutput:
+ for model_output_i in model_output:
+ instances = model_output_i["instances"]
+ if not instances.has("scores"):
+ continue
+ instances_filtered = instances[instances.scores >= self.min_score]
+ model_output_i["instances"] = instances_filtered
+ return model_output
+
+
+class InferenceBasedLoader:
+ """
+ Data loader based on results inferred by a model. Consists of:
+ - a data loader that provides batches of images
+ - a model that is used to infer the results
+ - a data sampler that converts inferred results to annotations
+ """
+
+ def __init__(
+ self,
+ model: nn.Module,
+ data_loader: Iterable[List[Dict[str, Any]]],
+ data_sampler: Optional[Callable[[ModelOutput], List[SampledData]]] = None,
+ data_filter: Optional[Callable[[ModelOutput], ModelOutput]] = None,
+ shuffle: bool = True,
+ batch_size: int = 4,
+ inference_batch_size: int = 4,
+ drop_last: bool = False,
+ category_to_class_mapping: Optional[dict] = None,
+ ):
+ """
+ Constructor
+
+ Args:
+ model (torch.nn.Module): model used to produce data
+ data_loader (Iterable[List[Dict[str, Any]]]): iterable that provides
+ dictionaries with "images" and "categories" fields to perform inference on
+ data_sampler (Callable: ModelOutput -> SampledData): functor
+ that produces annotation data from inference results;
+ (optional, default: None)
+ data_filter (Callable: ModelOutput -> ModelOutput): filter
+ that selects model outputs for further processing
+ (optional, default: None)
+ shuffle (bool): if True, the input images get shuffled
+ batch_size (int): batch size for the produced annotation data
+ inference_batch_size (int): batch size for input images
+ drop_last (bool): if True, drop the last batch if it is undersized
+ category_to_class_mapping (dict): category to class mapping
+ """
+ self.model = model
+ self.model.eval()
+ self.data_loader = data_loader
+ self.data_sampler = data_sampler
+ self.data_filter = data_filter
+ self.shuffle = shuffle
+ self.batch_size = batch_size
+ self.inference_batch_size = inference_batch_size
+ self.drop_last = drop_last
+ if category_to_class_mapping is not None:
+ self.category_to_class_mapping = category_to_class_mapping
+ else:
+ self.category_to_class_mapping = {}
+
+ def __iter__(self) -> Iterator[List[SampledData]]:
+ for batch in self.data_loader:
+ # batch : List[Dict[str: Tensor[N, C, H, W], str: Optional[str]]]
+ # images_batch : Tensor[N, C, H, W]
+ # image : Tensor[C, H, W]
+ images_and_categories = [
+ {"image": image, "category": category}
+ for element in batch
+ for image, category in zip(element["images"], element["categories"])
+ ]
+ if not images_and_categories:
+ continue
+ if self.shuffle:
+ random.shuffle(images_and_categories)
+ yield from self._produce_data(images_and_categories) # pyre-ignore[6]
+
+ def _produce_data(
+ self, images_and_categories: List[Tuple[torch.Tensor, Optional[str]]]
+ ) -> Iterator[List[SampledData]]:
+ """
+ Produce batches of data from images
+
+ Args:
+ images_and_categories (List[Tuple[torch.Tensor, Optional[str]]]):
+ list of images and corresponding categories to process
+
+ Returns:
+ Iterator over batches of data sampled from model outputs
+ """
+ data_batches: List[SampledData] = []
+ category_to_class_mapping = self.category_to_class_mapping
+ batched_images_and_categories = _grouper(images_and_categories, self.inference_batch_size)
+ for batch in batched_images_and_categories:
+ batch = [
+ {
+ "image": image_and_category["image"].to(self.model.device),
+ "category": image_and_category["category"],
+ }
+ for image_and_category in batch
+ if image_and_category is not None
+ ]
+ if not batch:
+ continue
+ with torch.no_grad():
+ model_output = self.model(batch)
+ for model_output_i, batch_i in zip(model_output, batch):
+ assert len(batch_i["image"].shape) == 3
+ model_output_i["image"] = batch_i["image"]
+ instance_class = category_to_class_mapping.get(batch_i["category"], 0)
+ model_output_i["instances"].dataset_classes = torch.tensor(
+ [instance_class] * len(model_output_i["instances"])
+ )
+ model_output_filtered = (
+ model_output if self.data_filter is None else self.data_filter(model_output)
+ )
+ data = (
+ model_output_filtered
+ if self.data_sampler is None
+ else self.data_sampler(model_output_filtered)
+ )
+ for data_i in data:
+ if len(data_i["instances"]):
+ data_batches.append(data_i)
+ if len(data_batches) >= self.batch_size:
+ yield data_batches[: self.batch_size]
+ data_batches = data_batches[self.batch_size :]
+ if not self.drop_last and data_batches:
+ yield data_batches
diff --git a/densepose/data/meshes/__init__.py b/densepose/data/meshes/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e1f0d5dc439dc58914238b23572f586dd1c693e
--- /dev/null
+++ b/densepose/data/meshes/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from . import builtin
+
+__all__ = [k for k in globals().keys() if "builtin" not in k and not k.startswith("_")]
diff --git a/densepose/data/meshes/builtin.py b/densepose/data/meshes/builtin.py
new file mode 100644
index 0000000000000000000000000000000000000000..c0b23760e8268b068149931b173a4285ba451993
--- /dev/null
+++ b/densepose/data/meshes/builtin.py
@@ -0,0 +1,101 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from .catalog import MeshInfo, register_meshes
+
+DENSEPOSE_MESHES_DIR = "https://dl.fbaipublicfiles.com/densepose/meshes/"
+
+MESHES = [
+ MeshInfo(
+ name="smpl_27554",
+ data="smpl_27554.pkl",
+ geodists="geodists/geodists_smpl_27554.pkl",
+ symmetry="symmetry/symmetry_smpl_27554.pkl",
+ texcoords="texcoords/texcoords_smpl_27554.pkl",
+ ),
+ MeshInfo(
+ name="chimp_5029",
+ data="chimp_5029.pkl",
+ geodists="geodists/geodists_chimp_5029.pkl",
+ symmetry="symmetry/symmetry_chimp_5029.pkl",
+ texcoords="texcoords/texcoords_chimp_5029.pkl",
+ ),
+ MeshInfo(
+ name="cat_5001",
+ data="cat_5001.pkl",
+ geodists="geodists/geodists_cat_5001.pkl",
+ symmetry="symmetry/symmetry_cat_5001.pkl",
+ texcoords="texcoords/texcoords_cat_5001.pkl",
+ ),
+ MeshInfo(
+ name="cat_7466",
+ data="cat_7466.pkl",
+ geodists="geodists/geodists_cat_7466.pkl",
+ symmetry="symmetry/symmetry_cat_7466.pkl",
+ texcoords="texcoords/texcoords_cat_7466.pkl",
+ ),
+ MeshInfo(
+ name="sheep_5004",
+ data="sheep_5004.pkl",
+ geodists="geodists/geodists_sheep_5004.pkl",
+ symmetry="symmetry/symmetry_sheep_5004.pkl",
+ texcoords="texcoords/texcoords_sheep_5004.pkl",
+ ),
+ MeshInfo(
+ name="zebra_5002",
+ data="zebra_5002.pkl",
+ geodists="geodists/geodists_zebra_5002.pkl",
+ symmetry="symmetry/symmetry_zebra_5002.pkl",
+ texcoords="texcoords/texcoords_zebra_5002.pkl",
+ ),
+ MeshInfo(
+ name="horse_5004",
+ data="horse_5004.pkl",
+ geodists="geodists/geodists_horse_5004.pkl",
+ symmetry="symmetry/symmetry_horse_5004.pkl",
+ texcoords="texcoords/texcoords_zebra_5002.pkl",
+ ),
+ MeshInfo(
+ name="giraffe_5002",
+ data="giraffe_5002.pkl",
+ geodists="geodists/geodists_giraffe_5002.pkl",
+ symmetry="symmetry/symmetry_giraffe_5002.pkl",
+ texcoords="texcoords/texcoords_giraffe_5002.pkl",
+ ),
+ MeshInfo(
+ name="elephant_5002",
+ data="elephant_5002.pkl",
+ geodists="geodists/geodists_elephant_5002.pkl",
+ symmetry="symmetry/symmetry_elephant_5002.pkl",
+ texcoords="texcoords/texcoords_elephant_5002.pkl",
+ ),
+ MeshInfo(
+ name="dog_5002",
+ data="dog_5002.pkl",
+ geodists="geodists/geodists_dog_5002.pkl",
+ symmetry="symmetry/symmetry_dog_5002.pkl",
+ texcoords="texcoords/texcoords_dog_5002.pkl",
+ ),
+ MeshInfo(
+ name="dog_7466",
+ data="dog_7466.pkl",
+ geodists="geodists/geodists_dog_7466.pkl",
+ symmetry="symmetry/symmetry_dog_7466.pkl",
+ texcoords="texcoords/texcoords_dog_7466.pkl",
+ ),
+ MeshInfo(
+ name="cow_5002",
+ data="cow_5002.pkl",
+ geodists="geodists/geodists_cow_5002.pkl",
+ symmetry="symmetry/symmetry_cow_5002.pkl",
+ texcoords="texcoords/texcoords_cow_5002.pkl",
+ ),
+ MeshInfo(
+ name="bear_4936",
+ data="bear_4936.pkl",
+ geodists="geodists/geodists_bear_4936.pkl",
+ symmetry="symmetry/symmetry_bear_4936.pkl",
+ texcoords="texcoords/texcoords_bear_4936.pkl",
+ ),
+]
+
+register_meshes(MESHES, DENSEPOSE_MESHES_DIR)
diff --git a/densepose/data/meshes/catalog.py b/densepose/data/meshes/catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..b258f3ce11a90666b9c764541ce299384cfddf4e
--- /dev/null
+++ b/densepose/data/meshes/catalog.py
@@ -0,0 +1,71 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import logging
+from collections import UserDict
+from dataclasses import dataclass
+from typing import Iterable, Optional
+
+from ..utils import maybe_prepend_base_path
+
+
+@dataclass
+class MeshInfo:
+ name: str
+ data: str
+ geodists: Optional[str] = None
+ symmetry: Optional[str] = None
+ texcoords: Optional[str] = None
+
+
+class _MeshCatalog(UserDict):
+ def __init__(self, *args, **kwargs):
+ super().__init__(*args, **kwargs)
+ self.mesh_ids = {}
+ self.mesh_names = {}
+ self.max_mesh_id = -1
+
+ def __setitem__(self, key, value):
+ if key in self:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Overwriting mesh catalog entry '{key}': old value {self[key]}"
+ f", new value {value}"
+ )
+ mesh_id = self.mesh_ids[key]
+ else:
+ self.max_mesh_id += 1
+ mesh_id = self.max_mesh_id
+ super().__setitem__(key, value)
+ self.mesh_ids[key] = mesh_id
+ self.mesh_names[mesh_id] = key
+
+ def get_mesh_id(self, shape_name: str) -> int:
+ return self.mesh_ids[shape_name]
+
+ def get_mesh_name(self, mesh_id: int) -> str:
+ return self.mesh_names[mesh_id]
+
+
+MeshCatalog = _MeshCatalog()
+
+
+def register_mesh(mesh_info: MeshInfo, base_path: Optional[str]) -> None:
+ geodists, symmetry, texcoords = mesh_info.geodists, mesh_info.symmetry, mesh_info.texcoords
+ if geodists:
+ geodists = maybe_prepend_base_path(base_path, geodists)
+ if symmetry:
+ symmetry = maybe_prepend_base_path(base_path, symmetry)
+ if texcoords:
+ texcoords = maybe_prepend_base_path(base_path, texcoords)
+ MeshCatalog[mesh_info.name] = MeshInfo(
+ name=mesh_info.name,
+ data=maybe_prepend_base_path(base_path, mesh_info.data),
+ geodists=geodists,
+ symmetry=symmetry,
+ texcoords=texcoords,
+ )
+
+
+def register_meshes(mesh_infos: Iterable[MeshInfo], base_path: Optional[str]) -> None:
+ for mesh_info in mesh_infos:
+ register_mesh(mesh_info, base_path)
diff --git a/densepose/data/samplers/__init__.py b/densepose/data/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7dba87ea1c6f37ab56071d2f5d715bd78fe8816f
--- /dev/null
+++ b/densepose/data/samplers/__init__.py
@@ -0,0 +1,8 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .densepose_uniform import DensePoseUniformSampler
+from .densepose_confidence_based import DensePoseConfidenceBasedSampler
+from .densepose_cse_uniform import DensePoseCSEUniformSampler
+from .densepose_cse_confidence_based import DensePoseCSEConfidenceBasedSampler
+from .mask_from_densepose import MaskFromDensePoseSampler
+from .prediction_to_gt import PredictionToGroundTruthSampler
diff --git a/densepose/data/samplers/densepose_base.py b/densepose/data/samplers/densepose_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..4d499d8f20d811fb8197d7bdae358540bb5b0dfc
--- /dev/null
+++ b/densepose/data/samplers/densepose_base.py
@@ -0,0 +1,203 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any, Dict, List, Tuple
+import torch
+from torch.nn import functional as F
+
+from detectron2.structures import BoxMode, Instances
+
+from densepose.converters import ToChartResultConverter
+from densepose.converters.base import IntTupleBox, make_int_box
+from densepose.structures import DensePoseDataRelative, DensePoseList
+
+
+class DensePoseBaseSampler:
+ """
+ Base DensePose sampler to produce DensePose data from DensePose predictions.
+ Samples for each class are drawn according to some distribution over all pixels estimated
+ to belong to that class.
+ """
+
+ def __init__(self, count_per_class: int = 8):
+ """
+ Constructor
+
+ Args:
+ count_per_class (int): the sampler produces at most `count_per_class`
+ samples for each category
+ """
+ self.count_per_class = count_per_class
+
+ def __call__(self, instances: Instances) -> DensePoseList:
+ """
+ Convert DensePose predictions (an instance of `DensePoseChartPredictorOutput`)
+ into DensePose annotations data (an instance of `DensePoseList`)
+ """
+ boxes_xyxy_abs = instances.pred_boxes.tensor.clone().cpu()
+ boxes_xywh_abs = BoxMode.convert(boxes_xyxy_abs, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ dp_datas = []
+ for i in range(len(boxes_xywh_abs)):
+ annotation_i = self._sample(instances[i], make_int_box(boxes_xywh_abs[i]))
+ annotation_i[DensePoseDataRelative.S_KEY] = self._resample_mask( # pyre-ignore[6]
+ instances[i].pred_densepose
+ )
+ dp_datas.append(DensePoseDataRelative(annotation_i))
+ # create densepose annotations on CPU
+ dp_list = DensePoseList(dp_datas, boxes_xyxy_abs, instances.image_size)
+ return dp_list
+
+ def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]:
+ """
+ Sample DensPoseDataRelative from estimation results
+ """
+ labels, dp_result = self._produce_labels_and_results(instance)
+ annotation = {
+ DensePoseDataRelative.X_KEY: [],
+ DensePoseDataRelative.Y_KEY: [],
+ DensePoseDataRelative.U_KEY: [],
+ DensePoseDataRelative.V_KEY: [],
+ DensePoseDataRelative.I_KEY: [],
+ }
+ n, h, w = dp_result.shape
+ for part_id in range(1, DensePoseDataRelative.N_PART_LABELS + 1):
+ # indices - tuple of 3 1D tensors of size k
+ # 0: index along the first dimension N
+ # 1: index along H dimension
+ # 2: index along W dimension
+ indices = torch.nonzero(labels.expand(n, h, w) == part_id, as_tuple=True)
+ # values - an array of size [n, k]
+ # n: number of channels (U, V, confidences)
+ # k: number of points labeled with part_id
+ values = dp_result[indices].view(n, -1)
+ k = values.shape[1]
+ count = min(self.count_per_class, k)
+ if count <= 0:
+ continue
+ index_sample = self._produce_index_sample(values, count)
+ sampled_values = values[:, index_sample]
+ sampled_y = indices[1][index_sample] + 0.5
+ sampled_x = indices[2][index_sample] + 0.5
+ # prepare / normalize data
+ x = (sampled_x / w * 256.0).cpu().tolist()
+ y = (sampled_y / h * 256.0).cpu().tolist()
+ u = sampled_values[0].clamp(0, 1).cpu().tolist()
+ v = sampled_values[1].clamp(0, 1).cpu().tolist()
+ fine_segm_labels = [part_id] * count
+ # extend annotations
+ annotation[DensePoseDataRelative.X_KEY].extend(x)
+ annotation[DensePoseDataRelative.Y_KEY].extend(y)
+ annotation[DensePoseDataRelative.U_KEY].extend(u)
+ annotation[DensePoseDataRelative.V_KEY].extend(v)
+ annotation[DensePoseDataRelative.I_KEY].extend(fine_segm_labels)
+ return annotation
+
+ def _produce_index_sample(self, values: torch.Tensor, count: int):
+ """
+ Abstract method to produce a sample of indices to select data
+ To be implemented in descendants
+
+ Args:
+ values (torch.Tensor): an array of size [n, k] that contains
+ estimated values (U, V, confidences);
+ n: number of channels (U, V, confidences)
+ k: number of points labeled with part_id
+ count (int): number of samples to produce, should be positive and <= k
+
+ Return:
+ list(int): indices of values (along axis 1) selected as a sample
+ """
+ raise NotImplementedError
+
+ def _produce_labels_and_results(self, instance: Instances) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Method to get labels and DensePose results from an instance
+
+ Args:
+ instance (Instances): an instance of `DensePoseChartPredictorOutput`
+
+ Return:
+ labels (torch.Tensor): shape [H, W], DensePose segmentation labels
+ dp_result (torch.Tensor): shape [2, H, W], stacked DensePose results u and v
+ """
+ converter = ToChartResultConverter
+ chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
+ labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
+ return labels, dp_result
+
+ def _resample_mask(self, output: Any) -> torch.Tensor:
+ """
+ Convert DensePose predictor output to segmentation annotation - tensors of size
+ (256, 256) and type `int64`.
+
+ Args:
+ output: DensePose predictor output with the following attributes:
+ - coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse
+ segmentation scores
+ - fine_segm: tensor of size [N, C, H, W] with unnormalized fine
+ segmentation scores
+ Return:
+ Tensor of size (S, S) and type `int64` with coarse segmentation annotations,
+ where S = DensePoseDataRelative.MASK_SIZE
+ """
+ sz = DensePoseDataRelative.MASK_SIZE
+ S = (
+ F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False)
+ .argmax(dim=1)
+ .long()
+ )
+ I = (
+ (
+ F.interpolate(
+ output.fine_segm,
+ (sz, sz),
+ mode="bilinear",
+ align_corners=False,
+ ).argmax(dim=1)
+ * (S > 0).long()
+ )
+ .squeeze()
+ .cpu()
+ )
+ # Map fine segmentation results to coarse segmentation ground truth
+ # TODO: extract this into separate classes
+ # coarse segmentation: 1 = Torso, 2 = Right Hand, 3 = Left Hand,
+ # 4 = Left Foot, 5 = Right Foot, 6 = Upper Leg Right, 7 = Upper Leg Left,
+ # 8 = Lower Leg Right, 9 = Lower Leg Left, 10 = Upper Arm Left,
+ # 11 = Upper Arm Right, 12 = Lower Arm Left, 13 = Lower Arm Right,
+ # 14 = Head
+ # fine segmentation: 1, 2 = Torso, 3 = Right Hand, 4 = Left Hand,
+ # 5 = Left Foot, 6 = Right Foot, 7, 9 = Upper Leg Right,
+ # 8, 10 = Upper Leg Left, 11, 13 = Lower Leg Right,
+ # 12, 14 = Lower Leg Left, 15, 17 = Upper Arm Left,
+ # 16, 18 = Upper Arm Right, 19, 21 = Lower Arm Left,
+ # 20, 22 = Lower Arm Right, 23, 24 = Head
+ FINE_TO_COARSE_SEGMENTATION = {
+ 1: 1,
+ 2: 1,
+ 3: 2,
+ 4: 3,
+ 5: 4,
+ 6: 5,
+ 7: 6,
+ 8: 7,
+ 9: 6,
+ 10: 7,
+ 11: 8,
+ 12: 9,
+ 13: 8,
+ 14: 9,
+ 15: 10,
+ 16: 11,
+ 17: 10,
+ 18: 11,
+ 19: 12,
+ 20: 13,
+ 21: 12,
+ 22: 13,
+ 23: 14,
+ 24: 14,
+ }
+ mask = torch.zeros((sz, sz), dtype=torch.int64, device=torch.device("cpu"))
+ for i in range(DensePoseDataRelative.N_PART_LABELS):
+ mask[I == i + 1] = FINE_TO_COARSE_SEGMENTATION[i + 1]
+ return mask
diff --git a/densepose/data/samplers/densepose_confidence_based.py b/densepose/data/samplers/densepose_confidence_based.py
new file mode 100644
index 0000000000000000000000000000000000000000..48e325b06e46817dafc0da2d984a8626d754e119
--- /dev/null
+++ b/densepose/data/samplers/densepose_confidence_based.py
@@ -0,0 +1,108 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import random
+from typing import Optional, Tuple
+import torch
+
+from densepose.converters import ToChartResultConverterWithConfidences
+
+from .densepose_base import DensePoseBaseSampler
+
+
+class DensePoseConfidenceBasedSampler(DensePoseBaseSampler):
+ """
+ Samples DensePose data from DensePose predictions.
+ Samples for each class are drawn using confidence value estimates.
+ """
+
+ def __init__(
+ self,
+ confidence_channel: str,
+ count_per_class: int = 8,
+ search_count_multiplier: Optional[float] = None,
+ search_proportion: Optional[float] = None,
+ ):
+ """
+ Constructor
+
+ Args:
+ confidence_channel (str): confidence channel to use for sampling;
+ possible values:
+ "sigma_2": confidences for UV values
+ "fine_segm_confidence": confidences for fine segmentation
+ "coarse_segm_confidence": confidences for coarse segmentation
+ (default: "sigma_2")
+ count_per_class (int): the sampler produces at most `count_per_class`
+ samples for each category (default: 8)
+ search_count_multiplier (float or None): if not None, the total number
+ of the most confident estimates of a given class to consider is
+ defined as `min(search_count_multiplier * count_per_class, N)`,
+ where `N` is the total number of estimates of the class; cannot be
+ specified together with `search_proportion` (default: None)
+ search_proportion (float or None): if not None, the total number of the
+ of the most confident estimates of a given class to consider is
+ defined as `min(max(search_proportion * N, count_per_class), N)`,
+ where `N` is the total number of estimates of the class; cannot be
+ specified together with `search_count_multiplier` (default: None)
+ """
+ super().__init__(count_per_class)
+ self.confidence_channel = confidence_channel
+ self.search_count_multiplier = search_count_multiplier
+ self.search_proportion = search_proportion
+ assert (search_count_multiplier is None) or (search_proportion is None), (
+ f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
+ f"and search_proportion (={search_proportion})"
+ )
+
+ def _produce_index_sample(self, values: torch.Tensor, count: int):
+ """
+ Produce a sample of indices to select data based on confidences
+
+ Args:
+ values (torch.Tensor): an array of size [n, k] that contains
+ estimated values (U, V, confidences);
+ n: number of channels (U, V, confidences)
+ k: number of points labeled with part_id
+ count (int): number of samples to produce, should be positive and <= k
+
+ Return:
+ list(int): indices of values (along axis 1) selected as a sample
+ """
+ k = values.shape[1]
+ if k == count:
+ index_sample = list(range(k))
+ else:
+ # take the best count * search_count_multiplier pixels,
+ # sample from them uniformly
+ # (here best = smallest variance)
+ _, sorted_confidence_indices = torch.sort(values[2])
+ if self.search_count_multiplier is not None:
+ search_count = min(int(count * self.search_count_multiplier), k)
+ elif self.search_proportion is not None:
+ search_count = min(max(int(k * self.search_proportion), count), k)
+ else:
+ search_count = min(count, k)
+ sample_from_top = random.sample(range(search_count), count)
+ index_sample = sorted_confidence_indices[:search_count][sample_from_top]
+ return index_sample
+
+ def _produce_labels_and_results(self, instance) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Method to get labels and DensePose results from an instance, with confidences
+
+ Args:
+ instance (Instances): an instance of `DensePoseChartPredictorOutputWithConfidences`
+
+ Return:
+ labels (torch.Tensor): shape [H, W], DensePose segmentation labels
+ dp_result (torch.Tensor): shape [3, H, W], DensePose results u and v
+ stacked with the confidence channel
+ """
+ converter = ToChartResultConverterWithConfidences
+ chart_result = converter.convert(instance.pred_densepose, instance.pred_boxes)
+ labels, dp_result = chart_result.labels.cpu(), chart_result.uv.cpu()
+ dp_result = torch.cat(
+ (dp_result, getattr(chart_result, self.confidence_channel)[None].cpu())
+ )
+
+ return labels, dp_result
diff --git a/densepose/data/samplers/densepose_cse_base.py b/densepose/data/samplers/densepose_cse_base.py
new file mode 100644
index 0000000000000000000000000000000000000000..845545c1438b9d2a4fbb4c6dac0642461a7e539f
--- /dev/null
+++ b/densepose/data/samplers/densepose_cse_base.py
@@ -0,0 +1,139 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any, Dict, List, Tuple
+import torch
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from densepose.converters.base import IntTupleBox
+from densepose.data.utils import get_class_to_mesh_name_mapping
+from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
+from densepose.structures import DensePoseDataRelative
+
+from .densepose_base import DensePoseBaseSampler
+
+
+class DensePoseCSEBaseSampler(DensePoseBaseSampler):
+ """
+ Base DensePose sampler to produce DensePose data from DensePose predictions.
+ Samples for each class are drawn according to some distribution over all pixels estimated
+ to belong to that class.
+ """
+
+ def __init__(
+ self,
+ cfg: CfgNode,
+ use_gt_categories: bool,
+ embedder: torch.nn.Module,
+ count_per_class: int = 8,
+ ):
+ """
+ Constructor
+
+ Args:
+ cfg (CfgNode): the config of the model
+ embedder (torch.nn.Module): necessary to compute mesh vertex embeddings
+ count_per_class (int): the sampler produces at most `count_per_class`
+ samples for each category
+ """
+ super().__init__(count_per_class)
+ self.embedder = embedder
+ self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
+ self.use_gt_categories = use_gt_categories
+
+ def _sample(self, instance: Instances, bbox_xywh: IntTupleBox) -> Dict[str, List[Any]]:
+ """
+ Sample DensPoseDataRelative from estimation results
+ """
+ if self.use_gt_categories:
+ instance_class = instance.dataset_classes.tolist()[0]
+ else:
+ instance_class = instance.pred_classes.tolist()[0]
+ mesh_name = self.class_to_mesh_name[instance_class]
+
+ annotation = {
+ DensePoseDataRelative.X_KEY: [],
+ DensePoseDataRelative.Y_KEY: [],
+ DensePoseDataRelative.VERTEX_IDS_KEY: [],
+ DensePoseDataRelative.MESH_NAME_KEY: mesh_name,
+ }
+
+ mask, embeddings, other_values = self._produce_mask_and_results(instance, bbox_xywh)
+ indices = torch.nonzero(mask, as_tuple=True)
+ selected_embeddings = embeddings.permute(1, 2, 0)[indices].cpu()
+ values = other_values[:, indices[0], indices[1]]
+ k = values.shape[1]
+
+ count = min(self.count_per_class, k)
+ if count <= 0:
+ return annotation
+
+ index_sample = self._produce_index_sample(values, count)
+ closest_vertices = squared_euclidean_distance_matrix(
+ selected_embeddings[index_sample], self.embedder(mesh_name)
+ )
+ closest_vertices = torch.argmin(closest_vertices, dim=1)
+
+ sampled_y = indices[0][index_sample] + 0.5
+ sampled_x = indices[1][index_sample] + 0.5
+ # prepare / normalize data
+ _, _, w, h = bbox_xywh
+ x = (sampled_x / w * 256.0).cpu().tolist()
+ y = (sampled_y / h * 256.0).cpu().tolist()
+ # extend annotations
+ annotation[DensePoseDataRelative.X_KEY].extend(x)
+ annotation[DensePoseDataRelative.Y_KEY].extend(y)
+ annotation[DensePoseDataRelative.VERTEX_IDS_KEY].extend(closest_vertices.cpu().tolist())
+ return annotation
+
+ def _produce_mask_and_results(
+ self, instance: Instances, bbox_xywh: IntTupleBox
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Method to get labels and DensePose results from an instance
+
+ Args:
+ instance (Instances): an instance of `DensePoseEmbeddingPredictorOutput`
+ bbox_xywh (IntTupleBox): the corresponding bounding box
+
+ Return:
+ mask (torch.Tensor): shape [H, W], DensePose segmentation mask
+ embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W],
+ DensePose CSE Embeddings
+ other_values (Tuple[torch.Tensor]): a tensor of shape [0, H, W],
+ for potential other values
+ """
+ densepose_output = instance.pred_densepose
+ S = densepose_output.coarse_segm
+ E = densepose_output.embedding
+ _, _, w, h = bbox_xywh
+ embeddings = F.interpolate(E, size=(h, w), mode="bilinear")[0]
+ coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0]
+ mask = coarse_segm_resized.argmax(0) > 0
+ other_values = torch.empty((0, h, w), device=E.device)
+ return mask, embeddings, other_values
+
+ def _resample_mask(self, output: Any) -> torch.Tensor:
+ """
+ Convert DensePose predictor output to segmentation annotation - tensors of size
+ (256, 256) and type `int64`.
+
+ Args:
+ output: DensePose predictor output with the following attributes:
+ - coarse_segm: tensor of size [N, D, H, W] with unnormalized coarse
+ segmentation scores
+ Return:
+ Tensor of size (S, S) and type `int64` with coarse segmentation annotations,
+ where S = DensePoseDataRelative.MASK_SIZE
+ """
+ sz = DensePoseDataRelative.MASK_SIZE
+ mask = (
+ F.interpolate(output.coarse_segm, (sz, sz), mode="bilinear", align_corners=False)
+ .argmax(dim=1)
+ .long()
+ .squeeze()
+ .cpu()
+ )
+ return mask
diff --git a/densepose/data/samplers/densepose_cse_confidence_based.py b/densepose/data/samplers/densepose_cse_confidence_based.py
new file mode 100644
index 0000000000000000000000000000000000000000..964b7f4ac41d2e1bb3da1cf6861af7f644b859fc
--- /dev/null
+++ b/densepose/data/samplers/densepose_cse_confidence_based.py
@@ -0,0 +1,119 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import random
+from typing import Optional, Tuple
+import torch
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from densepose.converters.base import IntTupleBox
+
+from .densepose_cse_base import DensePoseCSEBaseSampler
+
+
+class DensePoseCSEConfidenceBasedSampler(DensePoseCSEBaseSampler):
+ """
+ Samples DensePose data from DensePose predictions.
+ Samples for each class are drawn using confidence value estimates.
+ """
+
+ def __init__(
+ self,
+ cfg: CfgNode,
+ use_gt_categories: bool,
+ embedder: torch.nn.Module,
+ confidence_channel: str,
+ count_per_class: int = 8,
+ search_count_multiplier: Optional[float] = None,
+ search_proportion: Optional[float] = None,
+ ):
+ """
+ Constructor
+
+ Args:
+ cfg (CfgNode): the config of the model
+ embedder (torch.nn.Module): necessary to compute mesh vertex embeddings
+ confidence_channel (str): confidence channel to use for sampling;
+ possible values:
+ "coarse_segm_confidence": confidences for coarse segmentation
+ (default: "coarse_segm_confidence")
+ count_per_class (int): the sampler produces at most `count_per_class`
+ samples for each category (default: 8)
+ search_count_multiplier (float or None): if not None, the total number
+ of the most confident estimates of a given class to consider is
+ defined as `min(search_count_multiplier * count_per_class, N)`,
+ where `N` is the total number of estimates of the class; cannot be
+ specified together with `search_proportion` (default: None)
+ search_proportion (float or None): if not None, the total number of the
+ of the most confident estimates of a given class to consider is
+ defined as `min(max(search_proportion * N, count_per_class), N)`,
+ where `N` is the total number of estimates of the class; cannot be
+ specified together with `search_count_multiplier` (default: None)
+ """
+ super().__init__(cfg, use_gt_categories, embedder, count_per_class)
+ self.confidence_channel = confidence_channel
+ self.search_count_multiplier = search_count_multiplier
+ self.search_proportion = search_proportion
+ assert (search_count_multiplier is None) or (search_proportion is None), (
+ f"Cannot specify both search_count_multiplier (={search_count_multiplier})"
+ f"and search_proportion (={search_proportion})"
+ )
+
+ def _produce_index_sample(self, values: torch.Tensor, count: int):
+ """
+ Produce a sample of indices to select data based on confidences
+
+ Args:
+ values (torch.Tensor): a tensor of length k that contains confidences
+ k: number of points labeled with part_id
+ count (int): number of samples to produce, should be positive and <= k
+
+ Return:
+ list(int): indices of values (along axis 1) selected as a sample
+ """
+ k = values.shape[1]
+ if k == count:
+ index_sample = list(range(k))
+ else:
+ # take the best count * search_count_multiplier pixels,
+ # sample from them uniformly
+ # (here best = smallest variance)
+ _, sorted_confidence_indices = torch.sort(values[0])
+ if self.search_count_multiplier is not None:
+ search_count = min(int(count * self.search_count_multiplier), k)
+ elif self.search_proportion is not None:
+ search_count = min(max(int(k * self.search_proportion), count), k)
+ else:
+ search_count = min(count, k)
+ sample_from_top = random.sample(range(search_count), count)
+ index_sample = sorted_confidence_indices[-search_count:][sample_from_top]
+ return index_sample
+
+ def _produce_mask_and_results(
+ self, instance: Instances, bbox_xywh: IntTupleBox
+ ) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
+ """
+ Method to get labels and DensePose results from an instance
+
+ Args:
+ instance (Instances): an instance of
+ `DensePoseEmbeddingPredictorOutputWithConfidences`
+ bbox_xywh (IntTupleBox): the corresponding bounding box
+
+ Return:
+ mask (torch.Tensor): shape [H, W], DensePose segmentation mask
+ embeddings (Tuple[torch.Tensor]): a tensor of shape [D, H, W]
+ DensePose CSE Embeddings
+ other_values: a tensor of shape [1, H, W], DensePose CSE confidence
+ """
+ _, _, w, h = bbox_xywh
+ densepose_output = instance.pred_densepose
+ mask, embeddings, _ = super()._produce_mask_and_results(instance, bbox_xywh)
+ other_values = F.interpolate(
+ getattr(densepose_output, self.confidence_channel),
+ size=(h, w),
+ mode="bilinear",
+ )[0].cpu()
+ return mask, embeddings, other_values
diff --git a/densepose/data/samplers/densepose_cse_uniform.py b/densepose/data/samplers/densepose_cse_uniform.py
new file mode 100644
index 0000000000000000000000000000000000000000..567636cc7dfbcc9167dd7f4aa2b752c6e53d311f
--- /dev/null
+++ b/densepose/data/samplers/densepose_cse_uniform.py
@@ -0,0 +1,12 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .densepose_cse_base import DensePoseCSEBaseSampler
+from .densepose_uniform import DensePoseUniformSampler
+
+
+class DensePoseCSEUniformSampler(DensePoseCSEBaseSampler, DensePoseUniformSampler):
+ """
+ Uniform Sampler for CSE
+ """
+
+ pass
diff --git a/densepose/data/samplers/densepose_uniform.py b/densepose/data/samplers/densepose_uniform.py
new file mode 100644
index 0000000000000000000000000000000000000000..0d72cc30c9342b36efd6a7e80e55bf088b5c797c
--- /dev/null
+++ b/densepose/data/samplers/densepose_uniform.py
@@ -0,0 +1,41 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import random
+import torch
+
+from .densepose_base import DensePoseBaseSampler
+
+
+class DensePoseUniformSampler(DensePoseBaseSampler):
+ """
+ Samples DensePose data from DensePose predictions.
+ Samples for each class are drawn uniformly over all pixels estimated
+ to belong to that class.
+ """
+
+ def __init__(self, count_per_class: int = 8):
+ """
+ Constructor
+
+ Args:
+ count_per_class (int): the sampler produces at most `count_per_class`
+ samples for each category
+ """
+ super().__init__(count_per_class)
+
+ def _produce_index_sample(self, values: torch.Tensor, count: int):
+ """
+ Produce a uniform sample of indices to select data
+
+ Args:
+ values (torch.Tensor): an array of size [n, k] that contains
+ estimated values (U, V, confidences);
+ n: number of channels (U, V, confidences)
+ k: number of points labeled with part_id
+ count (int): number of samples to produce, should be positive and <= k
+
+ Return:
+ list(int): indices of values (along axis 1) selected as a sample
+ """
+ k = values.shape[1]
+ return random.sample(range(k), count)
diff --git a/densepose/data/samplers/mask_from_densepose.py b/densepose/data/samplers/mask_from_densepose.py
new file mode 100644
index 0000000000000000000000000000000000000000..0e6e812ba5af4675a81aec3ef8fd9b96d53325cc
--- /dev/null
+++ b/densepose/data/samplers/mask_from_densepose.py
@@ -0,0 +1,28 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from detectron2.structures import BitMasks, Instances
+
+from densepose.converters import ToMaskConverter
+
+
+class MaskFromDensePoseSampler:
+ """
+ Produce mask GT from DensePose predictions
+ This sampler simply converts DensePose predictions to BitMasks
+ that a contain a bool tensor of the size of the input image
+ """
+
+ def __call__(self, instances: Instances) -> BitMasks:
+ """
+ Converts predicted data from `instances` into the GT mask data
+
+ Args:
+ instances (Instances): predicted results, expected to have `pred_densepose` field
+
+ Returns:
+ Boolean Tensor of the size of the input image that has non-zero
+ values at pixels that are estimated to belong to the detected object
+ """
+ return ToMaskConverter.convert(
+ instances.pred_densepose, instances.pred_boxes, instances.image_size
+ )
diff --git a/densepose/data/samplers/prediction_to_gt.py b/densepose/data/samplers/prediction_to_gt.py
new file mode 100644
index 0000000000000000000000000000000000000000..3881fa5503c32c9e2f0602971971995f1211e054
--- /dev/null
+++ b/densepose/data/samplers/prediction_to_gt.py
@@ -0,0 +1,98 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from dataclasses import dataclass
+from typing import Any, Callable, Dict, List, Optional
+
+from detectron2.structures import Instances
+
+ModelOutput = Dict[str, Any]
+SampledData = Dict[str, Any]
+
+
+@dataclass
+class _Sampler:
+ """
+ Sampler registry entry that contains:
+ - src (str): source field to sample from (deleted after sampling)
+ - dst (Optional[str]): destination field to sample to, if not None
+ - func (Optional[Callable: Any -> Any]): function that performs sampling,
+ if None, reference copy is performed
+ """
+
+ src: str
+ dst: Optional[str]
+ func: Optional[Callable[[Any], Any]]
+
+
+class PredictionToGroundTruthSampler:
+ """
+ Sampler implementation that converts predictions to GT using registered
+ samplers for different fields of `Instances`.
+ """
+
+ def __init__(self, dataset_name: str = ""):
+ self.dataset_name = dataset_name
+ self._samplers = {}
+ self.register_sampler("pred_boxes", "gt_boxes", None)
+ self.register_sampler("pred_classes", "gt_classes", None)
+ # delete scores
+ self.register_sampler("scores")
+
+ def __call__(self, model_output: List[ModelOutput]) -> List[SampledData]:
+ """
+ Transform model output into ground truth data through sampling
+
+ Args:
+ model_output (Dict[str, Any]): model output
+ Returns:
+ Dict[str, Any]: sampled data
+ """
+ for model_output_i in model_output:
+ instances: Instances = model_output_i["instances"]
+ # transform data in each field
+ for _, sampler in self._samplers.items():
+ if not instances.has(sampler.src) or sampler.dst is None:
+ continue
+ if sampler.func is None:
+ instances.set(sampler.dst, instances.get(sampler.src))
+ else:
+ instances.set(sampler.dst, sampler.func(instances))
+ # delete model output data that was transformed
+ for _, sampler in self._samplers.items():
+ if sampler.src != sampler.dst and instances.has(sampler.src):
+ instances.remove(sampler.src)
+ model_output_i["dataset"] = self.dataset_name
+ return model_output
+
+ def register_sampler(
+ self,
+ prediction_attr: str,
+ gt_attr: Optional[str] = None,
+ func: Optional[Callable[[Any], Any]] = None,
+ ):
+ """
+ Register sampler for a field
+
+ Args:
+ prediction_attr (str): field to replace with a sampled value
+ gt_attr (Optional[str]): field to store the sampled value to, if not None
+ func (Optional[Callable: Any -> Any]): sampler function
+ """
+ self._samplers[(prediction_attr, gt_attr)] = _Sampler(
+ src=prediction_attr, dst=gt_attr, func=func
+ )
+
+ def remove_sampler(
+ self,
+ prediction_attr: str,
+ gt_attr: Optional[str] = None,
+ ):
+ """
+ Remove sampler for a field
+
+ Args:
+ prediction_attr (str): field to replace with a sampled value
+ gt_attr (Optional[str]): field to store the sampled value to, if not None
+ """
+ assert (prediction_attr, gt_attr) in self._samplers
+ del self._samplers[(prediction_attr, gt_attr)]
diff --git a/densepose/data/transform/__init__.py b/densepose/data/transform/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..369e1b278899b225d55bfc729514873b4259c7b9
--- /dev/null
+++ b/densepose/data/transform/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .image import ImageResizeTransform
diff --git a/densepose/data/transform/image.py b/densepose/data/transform/image.py
new file mode 100644
index 0000000000000000000000000000000000000000..8139b67841633841199a1aae3b25e326afaaf5e2
--- /dev/null
+++ b/densepose/data/transform/image.py
@@ -0,0 +1,39 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import torch
+
+
+class ImageResizeTransform:
+ """
+ Transform that resizes images loaded from a dataset
+ (BGR data in NCHW channel order, typically uint8) to a format ready to be
+ consumed by DensePose training (BGR float32 data in NCHW channel order)
+ """
+
+ def __init__(self, min_size: int = 800, max_size: int = 1333):
+ self.min_size = min_size
+ self.max_size = max_size
+
+ def __call__(self, images: torch.Tensor) -> torch.Tensor:
+ """
+ Args:
+ images (torch.Tensor): tensor of size [N, 3, H, W] that contains
+ BGR data (typically in uint8)
+ Returns:
+ images (torch.Tensor): tensor of size [N, 3, H1, W1] where
+ H1 and W1 are chosen to respect the specified min and max sizes
+ and preserve the original aspect ratio, the data channels
+ follow BGR order and the data type is `torch.float32`
+ """
+ # resize with min size
+ images = images.float()
+ min_size = min(images.shape[-2:])
+ max_size = max(images.shape[-2:])
+ scale = min(self.min_size / min_size, self.max_size / max_size)
+ images = torch.nn.functional.interpolate(
+ images,
+ scale_factor=scale,
+ mode="bilinear",
+ align_corners=False,
+ )
+ return images
diff --git a/densepose/data/utils.py b/densepose/data/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..9878c31d03bd4114425f89dd1c6dda74337fe2e2
--- /dev/null
+++ b/densepose/data/utils.py
@@ -0,0 +1,38 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import os
+from typing import Dict, Optional
+
+from detectron2.config import CfgNode
+
+
+def is_relative_local_path(path: str) -> bool:
+ path_str = os.fsdecode(path)
+ return ("://" not in path_str) and not os.path.isabs(path)
+
+
+def maybe_prepend_base_path(base_path: Optional[str], path: str):
+ """
+ Prepends the provided path with a base path prefix if:
+ 1) base path is not None;
+ 2) path is a local path
+ """
+ if base_path is None:
+ return path
+ if is_relative_local_path(path):
+ return os.path.join(base_path, path)
+ return path
+
+
+def get_class_to_mesh_name_mapping(cfg: CfgNode) -> Dict[int, str]:
+ return {
+ int(class_id): mesh_name
+ for class_id, mesh_name in cfg.DATASETS.CLASS_TO_MESH_NAME_MAPPING.items()
+ }
+
+
+def get_category_to_class_mapping(dataset_cfg: CfgNode) -> Dict[str, int]:
+ return {
+ category: int(class_id)
+ for category, class_id in dataset_cfg.CATEGORY_TO_CLASS_MAPPING.items()
+ }
diff --git a/densepose/data/video/__init__.py b/densepose/data/video/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..72406e153b688461bfcb0ef21e35020399239309
--- /dev/null
+++ b/densepose/data/video/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .frame_selector import (
+ FrameSelectionStrategy,
+ RandomKFramesSelector,
+ FirstKFramesSelector,
+ LastKFramesSelector,
+ FrameTsList,
+ FrameSelector,
+)
+
+from .video_keyframe_dataset import (
+ VideoKeyframeDataset,
+ video_list_from_file,
+ list_keyframes,
+ read_keyframes,
+)
diff --git a/densepose/data/video/frame_selector.py b/densepose/data/video/frame_selector.py
new file mode 100644
index 0000000000000000000000000000000000000000..c28f0e96475537319ff584f73fa422f838ae7b40
--- /dev/null
+++ b/densepose/data/video/frame_selector.py
@@ -0,0 +1,87 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import random
+from collections.abc import Callable
+from enum import Enum
+from typing import Callable as TCallable
+from typing import List
+
+FrameTsList = List[int]
+FrameSelector = TCallable[[FrameTsList], FrameTsList]
+
+
+class FrameSelectionStrategy(Enum):
+ """
+ Frame selection strategy used with videos:
+ - "random_k": select k random frames
+ - "first_k": select k first frames
+ - "last_k": select k last frames
+ - "all": select all frames
+ """
+
+ # fmt: off
+ RANDOM_K = "random_k"
+ FIRST_K = "first_k"
+ LAST_K = "last_k"
+ ALL = "all"
+ # fmt: on
+
+
+class RandomKFramesSelector(Callable): # pyre-ignore[39]
+ """
+ Selector that retains at most `k` random frames
+ """
+
+ def __init__(self, k: int):
+ self.k = k
+
+ def __call__(self, frame_tss: FrameTsList) -> FrameTsList:
+ """
+ Select `k` random frames
+
+ Args:
+ frames_tss (List[int]): timestamps of input frames
+ Returns:
+ List[int]: timestamps of selected frames
+ """
+ return random.sample(frame_tss, min(self.k, len(frame_tss)))
+
+
+class FirstKFramesSelector(Callable): # pyre-ignore[39]
+ """
+ Selector that retains at most `k` first frames
+ """
+
+ def __init__(self, k: int):
+ self.k = k
+
+ def __call__(self, frame_tss: FrameTsList) -> FrameTsList:
+ """
+ Select `k` first frames
+
+ Args:
+ frames_tss (List[int]): timestamps of input frames
+ Returns:
+ List[int]: timestamps of selected frames
+ """
+ return frame_tss[: self.k]
+
+
+class LastKFramesSelector(Callable): # pyre-ignore[39]
+ """
+ Selector that retains at most `k` last frames from video data
+ """
+
+ def __init__(self, k: int):
+ self.k = k
+
+ def __call__(self, frame_tss: FrameTsList) -> FrameTsList:
+ """
+ Select `k` last frames
+
+ Args:
+ frames_tss (List[int]): timestamps of input frames
+ Returns:
+ List[int]: timestamps of selected frames
+ """
+ return frame_tss[-self.k :]
diff --git a/densepose/data/video/video_keyframe_dataset.py b/densepose/data/video/video_keyframe_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..be379d12bff5b6348087ba343d3c027b52524136
--- /dev/null
+++ b/densepose/data/video/video_keyframe_dataset.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import csv
+import logging
+import numpy as np
+from typing import Any, Callable, Dict, List, Optional, Union
+import av
+import torch
+from torch.utils.data.dataset import Dataset
+
+from detectron2.utils.file_io import PathManager
+
+from ..utils import maybe_prepend_base_path
+from .frame_selector import FrameSelector, FrameTsList
+
+FrameList = List[av.frame.Frame] # pyre-ignore[16]
+FrameTransform = Callable[[torch.Tensor], torch.Tensor]
+
+
+def list_keyframes(video_fpath: str, video_stream_idx: int = 0) -> FrameTsList:
+ """
+ Traverses all keyframes of a video file. Returns a list of keyframe
+ timestamps. Timestamps are counts in timebase units.
+
+ Args:
+ video_fpath (str): Video file path
+ video_stream_idx (int): Video stream index (default: 0)
+ Returns:
+ List[int]: list of keyframe timestaps (timestamp is a count in timebase
+ units)
+ """
+ try:
+ with PathManager.open(video_fpath, "rb") as io:
+ container = av.open(io, mode="r")
+ stream = container.streams.video[video_stream_idx]
+ keyframes = []
+ pts = -1
+ # Note: even though we request forward seeks for keyframes, sometimes
+ # a keyframe in backwards direction is returned. We introduce tolerance
+ # as a max count of ignored backward seeks
+ tolerance_backward_seeks = 2
+ while True:
+ try:
+ container.seek(pts + 1, backward=False, any_frame=False, stream=stream)
+ except av.AVError as e:
+ # the exception occurs when the video length is exceeded,
+ # we then return whatever data we've already collected
+ logger = logging.getLogger(__name__)
+ logger.debug(
+ f"List keyframes: Error seeking video file {video_fpath}, "
+ f"video stream {video_stream_idx}, pts {pts + 1}, AV error: {e}"
+ )
+ return keyframes
+ except OSError as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"List keyframes: Error seeking video file {video_fpath}, "
+ f"video stream {video_stream_idx}, pts {pts + 1}, OS error: {e}"
+ )
+ return []
+ packet = next(container.demux(video=video_stream_idx))
+ if packet.pts is not None and packet.pts <= pts:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Video file {video_fpath}, stream {video_stream_idx}: "
+ f"bad seek for packet {pts + 1} (got packet {packet.pts}), "
+ f"tolerance {tolerance_backward_seeks}."
+ )
+ tolerance_backward_seeks -= 1
+ if tolerance_backward_seeks == 0:
+ return []
+ pts += 1
+ continue
+ tolerance_backward_seeks = 2
+ pts = packet.pts
+ if pts is None:
+ return keyframes
+ if packet.is_keyframe:
+ keyframes.append(pts)
+ return keyframes
+ except OSError as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"List keyframes: Error opening video file container {video_fpath}, " f"OS error: {e}"
+ )
+ except RuntimeError as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"List keyframes: Error opening video file container {video_fpath}, "
+ f"Runtime error: {e}"
+ )
+ return []
+
+
+def read_keyframes(
+ video_fpath: str, keyframes: FrameTsList, video_stream_idx: int = 0
+) -> FrameList: # pyre-ignore[11]
+ """
+ Reads keyframe data from a video file.
+
+ Args:
+ video_fpath (str): Video file path
+ keyframes (List[int]): List of keyframe timestamps (as counts in
+ timebase units to be used in container seek operations)
+ video_stream_idx (int): Video stream index (default: 0)
+ Returns:
+ List[Frame]: list of frames that correspond to the specified timestamps
+ """
+ try:
+ with PathManager.open(video_fpath, "rb") as io:
+ container = av.open(io)
+ stream = container.streams.video[video_stream_idx]
+ frames = []
+ for pts in keyframes:
+ try:
+ container.seek(pts, any_frame=False, stream=stream)
+ frame = next(container.decode(video=0))
+ frames.append(frame)
+ except av.AVError as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Read keyframes: Error seeking video file {video_fpath}, "
+ f"video stream {video_stream_idx}, pts {pts}, AV error: {e}"
+ )
+ container.close()
+ return frames
+ except OSError as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Read keyframes: Error seeking video file {video_fpath}, "
+ f"video stream {video_stream_idx}, pts {pts}, OS error: {e}"
+ )
+ container.close()
+ return frames
+ except StopIteration:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Read keyframes: Error decoding frame from {video_fpath}, "
+ f"video stream {video_stream_idx}, pts {pts}"
+ )
+ container.close()
+ return frames
+
+ container.close()
+ return frames
+ except OSError as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Read keyframes: Error opening video file container {video_fpath}, OS error: {e}"
+ )
+ except RuntimeError as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Read keyframes: Error opening video file container {video_fpath}, Runtime error: {e}"
+ )
+ return []
+
+
+def video_list_from_file(video_list_fpath: str, base_path: Optional[str] = None):
+ """
+ Create a list of paths to video files from a text file.
+
+ Args:
+ video_list_fpath (str): path to a plain text file with the list of videos
+ base_path (str): base path for entries from the video list (default: None)
+ """
+ video_list = []
+ with PathManager.open(video_list_fpath, "r") as io:
+ for line in io:
+ video_list.append(maybe_prepend_base_path(base_path, str(line.strip())))
+ return video_list
+
+
+def read_keyframe_helper_data(fpath: str):
+ """
+ Read keyframe data from a file in CSV format: the header should contain
+ "video_id" and "keyframes" fields. Value specifications are:
+ video_id: int
+ keyframes: list(int)
+ Example of contents:
+ video_id,keyframes
+ 2,"[1,11,21,31,41,51,61,71,81]"
+
+ Args:
+ fpath (str): File containing keyframe data
+
+ Return:
+ video_id_to_keyframes (dict: int -> list(int)): for a given video ID it
+ contains a list of keyframes for that video
+ """
+ video_id_to_keyframes = {}
+ try:
+ with PathManager.open(fpath, "r") as io:
+ csv_reader = csv.reader(io)
+ header = next(csv_reader)
+ video_id_idx = header.index("video_id")
+ keyframes_idx = header.index("keyframes")
+ for row in csv_reader:
+ video_id = int(row[video_id_idx])
+ assert (
+ video_id not in video_id_to_keyframes
+ ), f"Duplicate keyframes entry for video {fpath}"
+ video_id_to_keyframes[video_id] = (
+ [int(v) for v in row[keyframes_idx][1:-1].split(",")]
+ if len(row[keyframes_idx]) > 2
+ else []
+ )
+ except Exception as e:
+ logger = logging.getLogger(__name__)
+ logger.warning(f"Error reading keyframe helper data from {fpath}: {e}")
+ return video_id_to_keyframes
+
+
+class VideoKeyframeDataset(Dataset):
+ """
+ Dataset that provides keyframes for a set of videos.
+ """
+
+ _EMPTY_FRAMES = torch.empty((0, 3, 1, 1))
+
+ def __init__(
+ self,
+ video_list: List[str],
+ category_list: Union[str, List[str], None] = None,
+ frame_selector: Optional[FrameSelector] = None,
+ transform: Optional[FrameTransform] = None,
+ keyframe_helper_fpath: Optional[str] = None,
+ ):
+ """
+ Dataset constructor
+
+ Args:
+ video_list (List[str]): list of paths to video files
+ category_list (Union[str, List[str], None]): list of animal categories for each
+ video file. If it is a string, or None, this applies to all videos
+ frame_selector (Callable: KeyFrameList -> KeyFrameList):
+ selects keyframes to process, keyframes are given by
+ packet timestamps in timebase counts. If None, all keyframes
+ are selected (default: None)
+ transform (Callable: torch.Tensor -> torch.Tensor):
+ transforms a batch of RGB images (tensors of size [B, 3, H, W]),
+ returns a tensor of the same size. If None, no transform is
+ applied (default: None)
+
+ """
+ if type(category_list) == list:
+ self.category_list = category_list
+ else:
+ self.category_list = [category_list] * len(video_list)
+ assert len(video_list) == len(
+ self.category_list
+ ), "length of video and category lists must be equal"
+ self.video_list = video_list
+ self.frame_selector = frame_selector
+ self.transform = transform
+ self.keyframe_helper_data = (
+ read_keyframe_helper_data(keyframe_helper_fpath)
+ if keyframe_helper_fpath is not None
+ else None
+ )
+
+ def __getitem__(self, idx: int) -> Dict[str, Any]:
+ """
+ Gets selected keyframes from a given video
+
+ Args:
+ idx (int): video index in the video list file
+ Returns:
+ A dictionary containing two keys:
+ images (torch.Tensor): tensor of size [N, H, W, 3] or of size
+ defined by the transform that contains keyframes data
+ categories (List[str]): categories of the frames
+ """
+ categories = [self.category_list[idx]]
+ fpath = self.video_list[idx]
+ keyframes = (
+ list_keyframes(fpath)
+ if self.keyframe_helper_data is None or idx not in self.keyframe_helper_data
+ else self.keyframe_helper_data[idx]
+ )
+ transform = self.transform
+ frame_selector = self.frame_selector
+ if not keyframes:
+ return {"images": self._EMPTY_FRAMES, "categories": []}
+ if frame_selector is not None:
+ keyframes = frame_selector(keyframes)
+ frames = read_keyframes(fpath, keyframes)
+ if not frames:
+ return {"images": self._EMPTY_FRAMES, "categories": []}
+ frames = np.stack([frame.to_rgb().to_ndarray() for frame in frames])
+ frames = torch.as_tensor(frames, device=torch.device("cpu"))
+ frames = frames[..., [2, 1, 0]] # RGB -> BGR
+ frames = frames.permute(0, 3, 1, 2).float() # NHWC -> NCHW
+ if transform is not None:
+ frames = transform(frames)
+ return {"images": frames, "categories": categories}
+
+ def __len__(self):
+ return len(self.video_list)
diff --git a/densepose/engine/__init__.py b/densepose/engine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..539b93a7beca07d229a6b6d387f885469242ad86
--- /dev/null
+++ b/densepose/engine/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .trainer import Trainer
diff --git a/densepose/engine/trainer.py b/densepose/engine/trainer.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8ffe82c3d64d01ae36bb3c07cc6d75950937389
--- /dev/null
+++ b/densepose/engine/trainer.py
@@ -0,0 +1,258 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import logging
+import os
+from collections import OrderedDict
+from typing import List, Optional, Union
+import torch
+from torch import nn
+
+from detectron2.checkpoint import DetectionCheckpointer
+from detectron2.config import CfgNode
+from detectron2.engine import DefaultTrainer
+from detectron2.evaluation import (
+ DatasetEvaluator,
+ DatasetEvaluators,
+ inference_on_dataset,
+ print_csv_format,
+)
+from detectron2.solver.build import get_default_optimizer_params, maybe_add_gradient_clipping
+from detectron2.utils import comm
+from detectron2.utils.events import EventWriter, get_event_storage
+
+from densepose import DensePoseDatasetMapperTTA, DensePoseGeneralizedRCNNWithTTA, load_from_cfg
+from densepose.data import (
+ DatasetMapper,
+ build_combined_loader,
+ build_detection_test_loader,
+ build_detection_train_loader,
+ build_inference_based_loaders,
+ has_inference_based_loaders,
+)
+from densepose.evaluation.d2_evaluator_adapter import Detectron2COCOEvaluatorAdapter
+from densepose.evaluation.evaluator import DensePoseCOCOEvaluator, build_densepose_evaluator_storage
+from densepose.modeling.cse import Embedder
+
+
+class SampleCountingLoader:
+ def __init__(self, loader):
+ self.loader = loader
+
+ def __iter__(self):
+ it = iter(self.loader)
+ storage = get_event_storage()
+ while True:
+ try:
+ batch = next(it)
+ num_inst_per_dataset = {}
+ for data in batch:
+ dataset_name = data["dataset"]
+ if dataset_name not in num_inst_per_dataset:
+ num_inst_per_dataset[dataset_name] = 0
+ num_inst = len(data["instances"])
+ num_inst_per_dataset[dataset_name] += num_inst
+ for dataset_name in num_inst_per_dataset:
+ storage.put_scalar(f"batch/{dataset_name}", num_inst_per_dataset[dataset_name])
+ yield batch
+ except StopIteration:
+ break
+
+
+class SampleCountMetricPrinter(EventWriter):
+ def __init__(self):
+ self.logger = logging.getLogger(__name__)
+
+ def write(self):
+ storage = get_event_storage()
+ batch_stats_strs = []
+ for key, buf in storage.histories().items():
+ if key.startswith("batch/"):
+ batch_stats_strs.append(f"{key} {buf.avg(20)}")
+ self.logger.info(", ".join(batch_stats_strs))
+
+
+class Trainer(DefaultTrainer):
+ @classmethod
+ def extract_embedder_from_model(cls, model: nn.Module) -> Optional[Embedder]:
+ if isinstance(model, nn.parallel.DistributedDataParallel):
+ model = model.module
+ if hasattr(model, "roi_heads") and hasattr(model.roi_heads, "embedder"):
+ return model.roi_heads.embedder
+ return None
+
+ # TODO: the only reason to copy the base class code here is to pass the embedder from
+ # the model to the evaluator; that should be refactored to avoid unnecessary copy-pasting
+ @classmethod
+ def test(
+ cls,
+ cfg: CfgNode,
+ model: nn.Module,
+ evaluators: Optional[Union[DatasetEvaluator, List[DatasetEvaluator]]] = None,
+ ):
+ """
+ Args:
+ cfg (CfgNode):
+ model (nn.Module):
+ evaluators (DatasetEvaluator, list[DatasetEvaluator] or None): if None, will call
+ :meth:`build_evaluator`. Otherwise, must have the same length as
+ ``cfg.DATASETS.TEST``.
+
+ Returns:
+ dict: a dict of result metrics
+ """
+ logger = logging.getLogger(__name__)
+ if isinstance(evaluators, DatasetEvaluator):
+ evaluators = [evaluators]
+ if evaluators is not None:
+ assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
+ len(cfg.DATASETS.TEST), len(evaluators)
+ )
+
+ results = OrderedDict()
+ for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
+ data_loader = cls.build_test_loader(cfg, dataset_name)
+ # When evaluators are passed in as arguments,
+ # implicitly assume that evaluators can be created before data_loader.
+ if evaluators is not None:
+ evaluator = evaluators[idx]
+ else:
+ try:
+ embedder = cls.extract_embedder_from_model(model)
+ evaluator = cls.build_evaluator(cfg, dataset_name, embedder=embedder)
+ except NotImplementedError:
+ logger.warn(
+ "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
+ "or implement its `build_evaluator` method."
+ )
+ results[dataset_name] = {}
+ continue
+ if cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE or comm.is_main_process():
+ results_i = inference_on_dataset(model, data_loader, evaluator)
+ else:
+ results_i = {}
+ results[dataset_name] = results_i
+ if comm.is_main_process():
+ assert isinstance(
+ results_i, dict
+ ), "Evaluator must return a dict on the main process. Got {} instead.".format(
+ results_i
+ )
+ logger.info("Evaluation results for {} in csv format:".format(dataset_name))
+ print_csv_format(results_i)
+
+ if len(results) == 1:
+ results = list(results.values())[0]
+ return results
+
+ @classmethod
+ def build_evaluator(
+ cls,
+ cfg: CfgNode,
+ dataset_name: str,
+ output_folder: Optional[str] = None,
+ embedder: Optional[Embedder] = None,
+ ) -> DatasetEvaluators:
+ if output_folder is None:
+ output_folder = os.path.join(cfg.OUTPUT_DIR, "inference")
+ evaluators = []
+ distributed = cfg.DENSEPOSE_EVALUATION.DISTRIBUTED_INFERENCE
+ # Note: we currently use COCO evaluator for both COCO and LVIS datasets
+ # to have compatible metrics. LVIS bbox evaluator could also be used
+ # with an adapter to properly handle filtered / mapped categories
+ # evaluator_type = MetadataCatalog.get(dataset_name).evaluator_type
+ # if evaluator_type == "coco":
+ # evaluators.append(COCOEvaluator(dataset_name, output_dir=output_folder))
+ # elif evaluator_type == "lvis":
+ # evaluators.append(LVISEvaluator(dataset_name, output_dir=output_folder))
+ evaluators.append(
+ Detectron2COCOEvaluatorAdapter(
+ dataset_name, output_dir=output_folder, distributed=distributed
+ )
+ )
+ if cfg.MODEL.DENSEPOSE_ON:
+ storage = build_densepose_evaluator_storage(cfg, output_folder)
+ evaluators.append(
+ DensePoseCOCOEvaluator(
+ dataset_name,
+ distributed,
+ output_folder,
+ evaluator_type=cfg.DENSEPOSE_EVALUATION.TYPE,
+ min_iou_threshold=cfg.DENSEPOSE_EVALUATION.MIN_IOU_THRESHOLD,
+ storage=storage,
+ embedder=embedder,
+ should_evaluate_mesh_alignment=cfg.DENSEPOSE_EVALUATION.EVALUATE_MESH_ALIGNMENT,
+ mesh_alignment_mesh_names=cfg.DENSEPOSE_EVALUATION.MESH_ALIGNMENT_MESH_NAMES,
+ )
+ )
+ return DatasetEvaluators(evaluators)
+
+ @classmethod
+ def build_optimizer(cls, cfg: CfgNode, model: nn.Module):
+ params = get_default_optimizer_params(
+ model,
+ base_lr=cfg.SOLVER.BASE_LR,
+ weight_decay_norm=cfg.SOLVER.WEIGHT_DECAY_NORM,
+ bias_lr_factor=cfg.SOLVER.BIAS_LR_FACTOR,
+ weight_decay_bias=cfg.SOLVER.WEIGHT_DECAY_BIAS,
+ overrides={
+ "features": {
+ "lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.FEATURES_LR_FACTOR,
+ },
+ "embeddings": {
+ "lr": cfg.SOLVER.BASE_LR * cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_LR_FACTOR,
+ },
+ },
+ )
+ optimizer = torch.optim.SGD(
+ params,
+ cfg.SOLVER.BASE_LR,
+ momentum=cfg.SOLVER.MOMENTUM,
+ nesterov=cfg.SOLVER.NESTEROV,
+ weight_decay=cfg.SOLVER.WEIGHT_DECAY,
+ )
+ # pyre-fixme[6]: For 2nd param expected `Type[Optimizer]` but got `SGD`.
+ return maybe_add_gradient_clipping(cfg, optimizer)
+
+ @classmethod
+ def build_test_loader(cls, cfg: CfgNode, dataset_name):
+ return build_detection_test_loader(cfg, dataset_name, mapper=DatasetMapper(cfg, False))
+
+ @classmethod
+ def build_train_loader(cls, cfg: CfgNode):
+ data_loader = build_detection_train_loader(cfg, mapper=DatasetMapper(cfg, True))
+ if not has_inference_based_loaders(cfg):
+ return data_loader
+ model = cls.build_model(cfg)
+ model.to(cfg.BOOTSTRAP_MODEL.DEVICE)
+ DetectionCheckpointer(model).resume_or_load(cfg.BOOTSTRAP_MODEL.WEIGHTS, resume=False)
+ inference_based_loaders, ratios = build_inference_based_loaders(cfg, model)
+ loaders = [data_loader] + inference_based_loaders
+ ratios = [1.0] + ratios
+ combined_data_loader = build_combined_loader(cfg, loaders, ratios)
+ sample_counting_loader = SampleCountingLoader(combined_data_loader)
+ return sample_counting_loader
+
+ def build_writers(self):
+ writers = super().build_writers()
+ writers.append(SampleCountMetricPrinter())
+ return writers
+
+ @classmethod
+ def test_with_TTA(cls, cfg: CfgNode, model):
+ logger = logging.getLogger("detectron2.trainer")
+ # In the end of training, run an evaluation with TTA
+ # Only support some R-CNN models.
+ logger.info("Running inference with test-time augmentation ...")
+ transform_data = load_from_cfg(cfg)
+ model = DensePoseGeneralizedRCNNWithTTA(
+ cfg, model, transform_data, DensePoseDatasetMapperTTA(cfg)
+ )
+ evaluators = [
+ cls.build_evaluator(
+ cfg, name, output_folder=os.path.join(cfg.OUTPUT_DIR, "inference_TTA")
+ )
+ for name in cfg.DATASETS.TEST
+ ]
+ res = cls.test(cfg, model, evaluators) # pyre-ignore[6]
+ res = OrderedDict({k + "_TTA": v for k, v in res.items()})
+ return res
diff --git a/densepose/evaluation/__init__.py b/densepose/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5ae1f20cdc822ebf3c870f1289a0ad210c57ae7
--- /dev/null
+++ b/densepose/evaluation/__init__.py
@@ -0,0 +1,3 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .evaluator import DensePoseCOCOEvaluator
diff --git a/densepose/evaluation/d2_evaluator_adapter.py b/densepose/evaluation/d2_evaluator_adapter.py
new file mode 100644
index 0000000000000000000000000000000000000000..1fbc526059a191f9414231c1b21ed3e8b7b58580
--- /dev/null
+++ b/densepose/evaluation/d2_evaluator_adapter.py
@@ -0,0 +1,50 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from detectron2.data.catalog import Metadata
+from detectron2.evaluation import COCOEvaluator
+
+from densepose.data.datasets.coco import (
+ get_contiguous_id_to_category_id_map,
+ maybe_filter_categories_cocoapi,
+)
+
+
+def _maybe_add_iscrowd_annotations(cocoapi) -> None:
+ for ann in cocoapi.dataset["annotations"]:
+ if "iscrowd" not in ann:
+ ann["iscrowd"] = 0
+
+
+class Detectron2COCOEvaluatorAdapter(COCOEvaluator):
+ def __init__(
+ self,
+ dataset_name,
+ output_dir=None,
+ distributed=True,
+ ):
+ super().__init__(dataset_name, output_dir=output_dir, distributed=distributed)
+ maybe_filter_categories_cocoapi(dataset_name, self._coco_api)
+ _maybe_add_iscrowd_annotations(self._coco_api)
+ # substitute category metadata to account for categories
+ # that are mapped to the same contiguous id
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
+ self._maybe_substitute_metadata()
+
+ def _maybe_substitute_metadata(self):
+ cont_id_2_cat_id = get_contiguous_id_to_category_id_map(self._metadata)
+ cat_id_2_cont_id = self._metadata.thing_dataset_id_to_contiguous_id
+ if len(cont_id_2_cat_id) == len(cat_id_2_cont_id):
+ return
+
+ cat_id_2_cont_id_injective = {}
+ for cat_id, cont_id in cat_id_2_cont_id.items():
+ if (cont_id in cont_id_2_cat_id) and (cont_id_2_cat_id[cont_id] == cat_id):
+ cat_id_2_cont_id_injective[cat_id] = cont_id
+
+ metadata_new = Metadata(name=self._metadata.name)
+ for key, value in self._metadata.__dict__.items():
+ if key == "thing_dataset_id_to_contiguous_id":
+ setattr(metadata_new, key, cat_id_2_cont_id_injective)
+ else:
+ setattr(metadata_new, key, value)
+ self._metadata = metadata_new
diff --git a/densepose/evaluation/densepose_coco_evaluation.py b/densepose/evaluation/densepose_coco_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..6370ba2a1bce45493e3d4bebd05b1b449334871d
--- /dev/null
+++ b/densepose/evaluation/densepose_coco_evaluation.py
@@ -0,0 +1,1303 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# All rights reserved.
+#
+# This source code is licensed under the license found in the
+# LICENSE file in the root directory of this source tree.
+# This is a modified version of cocoeval.py where we also have the densepose evaluation.
+
+__author__ = "tsungyi"
+
+import copy
+import datetime
+import logging
+import numpy as np
+import pickle
+import time
+from collections import defaultdict
+from enum import Enum
+from typing import Any, Dict, Tuple
+import scipy.spatial.distance as ssd
+import torch
+import torch.nn.functional as F
+from pycocotools import mask as maskUtils
+from scipy.io import loadmat
+from scipy.ndimage import zoom as spzoom
+
+from detectron2.utils.file_io import PathManager
+
+from densepose.converters.chart_output_to_chart_result import resample_uv_tensors_to_bbox
+from densepose.converters.segm_to_mask import (
+ resample_coarse_segm_tensor_to_bbox,
+ resample_fine_and_coarse_segm_tensors_to_bbox,
+)
+from densepose.modeling.cse.utils import squared_euclidean_distance_matrix
+from densepose.structures import DensePoseDataRelative
+from densepose.structures.mesh import create_mesh
+
+logger = logging.getLogger(__name__)
+
+
+class DensePoseEvalMode(str, Enum):
+ # use both masks and geodesic distances (GPS * IOU) to compute scores
+ GPSM = "gpsm"
+ # use only geodesic distances (GPS) to compute scores
+ GPS = "gps"
+ # use only masks (IOU) to compute scores
+ IOU = "iou"
+
+
+class DensePoseDataMode(str, Enum):
+ # use estimated IUV data (default mode)
+ IUV_DT = "iuvdt"
+ # use ground truth IUV data
+ IUV_GT = "iuvgt"
+ # use ground truth labels I and set UV to 0
+ I_GT_UV_0 = "igtuv0"
+ # use ground truth labels I and estimated UV coordinates
+ I_GT_UV_DT = "igtuvdt"
+ # use estimated labels I and set UV to 0
+ I_DT_UV_0 = "idtuv0"
+
+
+class DensePoseCocoEval:
+ # Interface for evaluating detection on the Microsoft COCO dataset.
+ #
+ # The usage for CocoEval is as follows:
+ # cocoGt=..., cocoDt=... # load dataset and results
+ # E = CocoEval(cocoGt,cocoDt); # initialize CocoEval object
+ # E.params.recThrs = ...; # set parameters as desired
+ # E.evaluate(); # run per image evaluation
+ # E.accumulate(); # accumulate per image results
+ # E.summarize(); # display summary metrics of results
+ # For example usage see evalDemo.m and http://mscoco.org/.
+ #
+ # The evaluation parameters are as follows (defaults in brackets):
+ # imgIds - [all] N img ids to use for evaluation
+ # catIds - [all] K cat ids to use for evaluation
+ # iouThrs - [.5:.05:.95] T=10 IoU thresholds for evaluation
+ # recThrs - [0:.01:1] R=101 recall thresholds for evaluation
+ # areaRng - [...] A=4 object area ranges for evaluation
+ # maxDets - [1 10 100] M=3 thresholds on max detections per image
+ # iouType - ['segm'] set iouType to 'segm', 'bbox', 'keypoints' or 'densepose'
+ # iouType replaced the now DEPRECATED useSegm parameter.
+ # useCats - [1] if true use category labels for evaluation
+ # Note: if useCats=0 category labels are ignored as in proposal scoring.
+ # Note: multiple areaRngs [Ax2] and maxDets [Mx1] can be specified.
+ #
+ # evaluate(): evaluates detections on every image and every category and
+ # concats the results into the "evalImgs" with fields:
+ # dtIds - [1xD] id for each of the D detections (dt)
+ # gtIds - [1xG] id for each of the G ground truths (gt)
+ # dtMatches - [TxD] matching gt id at each IoU or 0
+ # gtMatches - [TxG] matching dt id at each IoU or 0
+ # dtScores - [1xD] confidence of each dt
+ # gtIgnore - [1xG] ignore flag for each gt
+ # dtIgnore - [TxD] ignore flag for each dt at each IoU
+ #
+ # accumulate(): accumulates the per-image, per-category evaluation
+ # results in "evalImgs" into the dictionary "eval" with fields:
+ # params - parameters used for evaluation
+ # date - date evaluation was performed
+ # counts - [T,R,K,A,M] parameter dimensions (see above)
+ # precision - [TxRxKxAxM] precision for every evaluation setting
+ # recall - [TxKxAxM] max recall for every evaluation setting
+ # Note: precision and recall==-1 for settings with no gt objects.
+ #
+ # See also coco, mask, pycocoDemo, pycocoEvalDemo
+ #
+ # Microsoft COCO Toolbox. version 2.0
+ # Data, paper, and tutorials available at: http://mscoco.org/
+ # Code written by Piotr Dollar and Tsung-Yi Lin, 2015.
+ # Licensed under the Simplified BSD License [see coco/license.txt]
+ def __init__(
+ self,
+ cocoGt=None,
+ cocoDt=None,
+ iouType: str = "densepose",
+ multi_storage=None,
+ embedder=None,
+ dpEvalMode: DensePoseEvalMode = DensePoseEvalMode.GPS,
+ dpDataMode: DensePoseDataMode = DensePoseDataMode.IUV_DT,
+ ):
+ """
+ Initialize CocoEval using coco APIs for gt and dt
+ :param cocoGt: coco object with ground truth annotations
+ :param cocoDt: coco object with detection results
+ :return: None
+ """
+ self.cocoGt = cocoGt # ground truth COCO API
+ self.cocoDt = cocoDt # detections COCO API
+ self.multi_storage = multi_storage
+ self.embedder = embedder
+ self._dpEvalMode = dpEvalMode
+ self._dpDataMode = dpDataMode
+ self.evalImgs = defaultdict(list) # per-image per-category eval results [KxAxI]
+ self.eval = {} # accumulated evaluation results
+ self._gts = defaultdict(list) # gt for evaluation
+ self._dts = defaultdict(list) # dt for evaluation
+ self.params = Params(iouType=iouType) # parameters
+ self._paramsEval = {} # parameters for evaluation
+ self.stats = [] # result summarization
+ self.ious = {} # ious between all gts and dts
+ if cocoGt is not None:
+ self.params.imgIds = sorted(cocoGt.getImgIds())
+ self.params.catIds = sorted(cocoGt.getCatIds())
+ self.ignoreThrBB = 0.7
+ self.ignoreThrUV = 0.9
+
+ def _loadGEval(self):
+ smpl_subdiv_fpath = PathManager.get_local_path(
+ "https://dl.fbaipublicfiles.com/densepose/data/SMPL_subdiv.mat"
+ )
+ pdist_transform_fpath = PathManager.get_local_path(
+ "https://dl.fbaipublicfiles.com/densepose/data/SMPL_SUBDIV_TRANSFORM.mat"
+ )
+ pdist_matrix_fpath = PathManager.get_local_path(
+ "https://dl.fbaipublicfiles.com/densepose/data/Pdist_matrix.pkl", timeout_sec=120
+ )
+ SMPL_subdiv = loadmat(smpl_subdiv_fpath)
+ self.PDIST_transform = loadmat(pdist_transform_fpath)
+ self.PDIST_transform = self.PDIST_transform["index"].squeeze()
+ UV = np.array([SMPL_subdiv["U_subdiv"], SMPL_subdiv["V_subdiv"]]).squeeze()
+ ClosestVertInds = np.arange(UV.shape[1]) + 1
+ self.Part_UVs = []
+ self.Part_ClosestVertInds = []
+ for i in np.arange(24):
+ self.Part_UVs.append(UV[:, SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)])
+ self.Part_ClosestVertInds.append(
+ ClosestVertInds[SMPL_subdiv["Part_ID_subdiv"].squeeze() == (i + 1)]
+ )
+
+ with open(pdist_matrix_fpath, "rb") as hFile:
+ arrays = pickle.load(hFile, encoding="latin1")
+ self.Pdist_matrix = arrays["Pdist_matrix"]
+ self.Part_ids = np.array(SMPL_subdiv["Part_ID_subdiv"].squeeze())
+ # Mean geodesic distances for parts.
+ self.Mean_Distances = np.array([0, 0.351, 0.107, 0.126, 0.237, 0.173, 0.142, 0.128, 0.150])
+ # Coarse Part labels.
+ self.CoarseParts = np.array(
+ [0, 1, 1, 2, 2, 3, 3, 4, 4, 4, 4, 5, 5, 5, 5, 6, 6, 6, 6, 7, 7, 7, 7, 8, 8]
+ )
+
+ def _prepare(self):
+ """
+ Prepare ._gts and ._dts for evaluation based on params
+ :return: None
+ """
+
+ def _toMask(anns, coco):
+ # modify ann['segmentation'] by reference
+ for ann in anns:
+ # safeguard for invalid segmentation annotation;
+ # annotations containing empty lists exist in the posetrack
+ # dataset. This is not a correct segmentation annotation
+ # in terms of COCO format; we need to deal with it somehow
+ segm = ann["segmentation"]
+ if type(segm) == list and len(segm) == 0:
+ ann["segmentation"] = None
+ continue
+ rle = coco.annToRLE(ann)
+ ann["segmentation"] = rle
+
+ def _getIgnoreRegion(iid, coco):
+ img = coco.imgs[iid]
+
+ if "ignore_regions_x" not in img.keys():
+ return None
+
+ if len(img["ignore_regions_x"]) == 0:
+ return None
+
+ rgns_merged = [
+ [v for xy in zip(region_x, region_y) for v in xy]
+ for region_x, region_y in zip(img["ignore_regions_x"], img["ignore_regions_y"])
+ ]
+ rles = maskUtils.frPyObjects(rgns_merged, img["height"], img["width"])
+ rle = maskUtils.merge(rles)
+ return maskUtils.decode(rle)
+
+ def _checkIgnore(dt, iregion):
+ if iregion is None:
+ return True
+
+ bb = np.array(dt["bbox"]).astype(int)
+ x1, y1, x2, y2 = bb[0], bb[1], bb[0] + bb[2], bb[1] + bb[3]
+ x2 = min([x2, iregion.shape[1]])
+ y2 = min([y2, iregion.shape[0]])
+
+ if bb[2] * bb[3] == 0:
+ return False
+
+ crop_iregion = iregion[y1:y2, x1:x2]
+
+ if crop_iregion.sum() == 0:
+ return True
+
+ if "densepose" not in dt.keys(): # filtering boxes
+ return crop_iregion.sum() / bb[2] / bb[3] < self.ignoreThrBB
+
+ # filtering UVs
+ ignoremask = np.require(crop_iregion, requirements=["F"])
+ mask = self._extract_mask(dt)
+ uvmask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"])
+ uvmask_ = maskUtils.encode(uvmask)
+ ignoremask_ = maskUtils.encode(ignoremask)
+ uviou = maskUtils.iou([uvmask_], [ignoremask_], [1])[0]
+ return uviou < self.ignoreThrUV
+
+ p = self.params
+
+ if p.useCats:
+ gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
+ dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds, catIds=p.catIds))
+ else:
+ gts = self.cocoGt.loadAnns(self.cocoGt.getAnnIds(imgIds=p.imgIds))
+ dts = self.cocoDt.loadAnns(self.cocoDt.getAnnIds(imgIds=p.imgIds))
+
+ imns = self.cocoGt.loadImgs(p.imgIds)
+ self.size_mapping = {}
+ for im in imns:
+ self.size_mapping[im["id"]] = [im["height"], im["width"]]
+
+ # if iouType == 'uv', add point gt annotations
+ if p.iouType == "densepose":
+ self._loadGEval()
+
+ # convert ground truth to mask if iouType == 'segm'
+ if p.iouType == "segm":
+ _toMask(gts, self.cocoGt)
+ _toMask(dts, self.cocoDt)
+
+ # set ignore flag
+ for gt in gts:
+ gt["ignore"] = gt["ignore"] if "ignore" in gt else 0
+ gt["ignore"] = "iscrowd" in gt and gt["iscrowd"]
+ if p.iouType == "keypoints":
+ gt["ignore"] = (gt["num_keypoints"] == 0) or gt["ignore"]
+ if p.iouType == "densepose":
+ gt["ignore"] = ("dp_x" in gt) == 0
+ if p.iouType == "segm":
+ gt["ignore"] = gt["segmentation"] is None
+
+ self._gts = defaultdict(list) # gt for evaluation
+ self._dts = defaultdict(list) # dt for evaluation
+ self._igrgns = defaultdict(list)
+
+ for gt in gts:
+ iid = gt["image_id"]
+ if iid not in self._igrgns.keys():
+ self._igrgns[iid] = _getIgnoreRegion(iid, self.cocoGt)
+ if _checkIgnore(gt, self._igrgns[iid]):
+ self._gts[iid, gt["category_id"]].append(gt)
+ for dt in dts:
+ iid = dt["image_id"]
+ if (iid not in self._igrgns) or _checkIgnore(dt, self._igrgns[iid]):
+ self._dts[iid, dt["category_id"]].append(dt)
+
+ self.evalImgs = defaultdict(list) # per-image per-category evaluation results
+ self.eval = {} # accumulated evaluation results
+
+ def evaluate(self):
+ """
+ Run per image evaluation on given images and store results (a list of dict) in self.evalImgs
+ :return: None
+ """
+ tic = time.time()
+ logger.info("Running per image DensePose evaluation... {}".format(self.params.iouType))
+ p = self.params
+ # add backward compatibility if useSegm is specified in params
+ if p.useSegm is not None:
+ p.iouType = "segm" if p.useSegm == 1 else "bbox"
+ logger.info("useSegm (deprecated) is not None. Running DensePose evaluation")
+ p.imgIds = list(np.unique(p.imgIds))
+ if p.useCats:
+ p.catIds = list(np.unique(p.catIds))
+ p.maxDets = sorted(p.maxDets)
+ self.params = p
+
+ self._prepare()
+ # loop through images, area range, max detection number
+ catIds = p.catIds if p.useCats else [-1]
+
+ if p.iouType in ["segm", "bbox"]:
+ computeIoU = self.computeIoU
+ elif p.iouType == "keypoints":
+ computeIoU = self.computeOks
+ elif p.iouType == "densepose":
+ computeIoU = self.computeOgps
+ if self._dpEvalMode in {DensePoseEvalMode.GPSM, DensePoseEvalMode.IOU}:
+ self.real_ious = {
+ (imgId, catId): self.computeDPIoU(imgId, catId)
+ for imgId in p.imgIds
+ for catId in catIds
+ }
+
+ self.ious = {
+ (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
+ }
+
+ evaluateImg = self.evaluateImg
+ maxDet = p.maxDets[-1]
+ self.evalImgs = [
+ evaluateImg(imgId, catId, areaRng, maxDet)
+ for catId in catIds
+ for areaRng in p.areaRng
+ for imgId in p.imgIds
+ ]
+ self._paramsEval = copy.deepcopy(self.params)
+ toc = time.time()
+ logger.info("DensePose evaluation DONE (t={:0.2f}s).".format(toc - tic))
+
+ def getDensePoseMask(self, polys):
+ maskGen = np.zeros([256, 256])
+ stop = min(len(polys) + 1, 15)
+ for i in range(1, stop):
+ if polys[i - 1]:
+ currentMask = maskUtils.decode(polys[i - 1])
+ maskGen[currentMask > 0] = i
+ return maskGen
+
+ def _generate_rlemask_on_image(self, mask, imgId, data):
+ bbox_xywh = np.array(data["bbox"])
+ x, y, w, h = bbox_xywh
+ im_h, im_w = self.size_mapping[imgId]
+ im_mask = np.zeros((im_h, im_w), dtype=np.uint8)
+ if mask is not None:
+ x0 = max(int(x), 0)
+ x1 = min(int(x + w), im_w, int(x) + mask.shape[1])
+ y0 = max(int(y), 0)
+ y1 = min(int(y + h), im_h, int(y) + mask.shape[0])
+ y = int(y)
+ x = int(x)
+ im_mask[y0:y1, x0:x1] = mask[y0 - y : y1 - y, x0 - x : x1 - x]
+ im_mask = np.require(np.asarray(im_mask > 0), dtype=np.uint8, requirements=["F"])
+ rle_mask = maskUtils.encode(np.array(im_mask[:, :, np.newaxis], order="F"))[0]
+ return rle_mask
+
+ def computeDPIoU(self, imgId, catId):
+ p = self.params
+ if p.useCats:
+ gt = self._gts[imgId, catId]
+ dt = self._dts[imgId, catId]
+ else:
+ gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
+ dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
+ if len(gt) == 0 and len(dt) == 0:
+ return []
+ inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
+ dt = [dt[i] for i in inds]
+ if len(dt) > p.maxDets[-1]:
+ dt = dt[0 : p.maxDets[-1]]
+
+ gtmasks = []
+ for g in gt:
+ if DensePoseDataRelative.S_KEY in g:
+ # convert DensePose mask to a binary mask
+ mask = np.minimum(self.getDensePoseMask(g[DensePoseDataRelative.S_KEY]), 1.0)
+ _, _, w, h = g["bbox"]
+ scale_x = float(max(w, 1)) / mask.shape[1]
+ scale_y = float(max(h, 1)) / mask.shape[0]
+ mask = spzoom(mask, (scale_y, scale_x), order=1, prefilter=False)
+ mask = np.array(mask > 0.5, dtype=np.uint8)
+ rle_mask = self._generate_rlemask_on_image(mask, imgId, g)
+ elif "segmentation" in g:
+ segmentation = g["segmentation"]
+ if isinstance(segmentation, list) and segmentation:
+ # polygons
+ im_h, im_w = self.size_mapping[imgId]
+ rles = maskUtils.frPyObjects(segmentation, im_h, im_w)
+ rle_mask = maskUtils.merge(rles)
+ elif isinstance(segmentation, dict):
+ if isinstance(segmentation["counts"], list):
+ # uncompressed RLE
+ im_h, im_w = self.size_mapping[imgId]
+ rle_mask = maskUtils.frPyObjects(segmentation, im_h, im_w)
+ else:
+ # compressed RLE
+ rle_mask = segmentation
+ else:
+ rle_mask = self._generate_rlemask_on_image(None, imgId, g)
+ else:
+ rle_mask = self._generate_rlemask_on_image(None, imgId, g)
+ gtmasks.append(rle_mask)
+
+ dtmasks = []
+ for d in dt:
+ mask = self._extract_mask(d)
+ mask = np.require(np.asarray(mask > 0), dtype=np.uint8, requirements=["F"])
+ rle_mask = self._generate_rlemask_on_image(mask, imgId, d)
+ dtmasks.append(rle_mask)
+
+ # compute iou between each dt and gt region
+ iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
+ iousDP = maskUtils.iou(dtmasks, gtmasks, iscrowd)
+ return iousDP
+
+ def computeIoU(self, imgId, catId):
+ p = self.params
+ if p.useCats:
+ gt = self._gts[imgId, catId]
+ dt = self._dts[imgId, catId]
+ else:
+ gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
+ dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
+ if len(gt) == 0 and len(dt) == 0:
+ return []
+ inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
+ dt = [dt[i] for i in inds]
+ if len(dt) > p.maxDets[-1]:
+ dt = dt[0 : p.maxDets[-1]]
+
+ if p.iouType == "segm":
+ g = [g["segmentation"] for g in gt if g["segmentation"] is not None]
+ d = [d["segmentation"] for d in dt if d["segmentation"] is not None]
+ elif p.iouType == "bbox":
+ g = [g["bbox"] for g in gt]
+ d = [d["bbox"] for d in dt]
+ else:
+ raise Exception("unknown iouType for iou computation")
+
+ # compute iou between each dt and gt region
+ iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
+ ious = maskUtils.iou(d, g, iscrowd)
+ return ious
+
+ def computeOks(self, imgId, catId):
+ p = self.params
+ # dimension here should be Nxm
+ gts = self._gts[imgId, catId]
+ dts = self._dts[imgId, catId]
+ inds = np.argsort([-d["score"] for d in dts], kind="mergesort")
+ dts = [dts[i] for i in inds]
+ if len(dts) > p.maxDets[-1]:
+ dts = dts[0 : p.maxDets[-1]]
+ # if len(gts) == 0 and len(dts) == 0:
+ if len(gts) == 0 or len(dts) == 0:
+ return []
+ ious = np.zeros((len(dts), len(gts)))
+ sigmas = (
+ np.array(
+ [
+ 0.26,
+ 0.25,
+ 0.25,
+ 0.35,
+ 0.35,
+ 0.79,
+ 0.79,
+ 0.72,
+ 0.72,
+ 0.62,
+ 0.62,
+ 1.07,
+ 1.07,
+ 0.87,
+ 0.87,
+ 0.89,
+ 0.89,
+ ]
+ )
+ / 10.0
+ )
+ vars = (sigmas * 2) ** 2
+ k = len(sigmas)
+ # compute oks between each detection and ground truth object
+ for j, gt in enumerate(gts):
+ # create bounds for ignore regions(double the gt bbox)
+ g = np.array(gt["keypoints"])
+ xg = g[0::3]
+ yg = g[1::3]
+ vg = g[2::3]
+ k1 = np.count_nonzero(vg > 0)
+ bb = gt["bbox"]
+ x0 = bb[0] - bb[2]
+ x1 = bb[0] + bb[2] * 2
+ y0 = bb[1] - bb[3]
+ y1 = bb[1] + bb[3] * 2
+ for i, dt in enumerate(dts):
+ d = np.array(dt["keypoints"])
+ xd = d[0::3]
+ yd = d[1::3]
+ if k1 > 0:
+ # measure the per-keypoint distance if keypoints visible
+ dx = xd - xg
+ dy = yd - yg
+ else:
+ # measure minimum distance to keypoints in (x0,y0) & (x1,y1)
+ z = np.zeros(k)
+ dx = np.max((z, x0 - xd), axis=0) + np.max((z, xd - x1), axis=0)
+ dy = np.max((z, y0 - yd), axis=0) + np.max((z, yd - y1), axis=0)
+ e = (dx**2 + dy**2) / vars / (gt["area"] + np.spacing(1)) / 2
+ if k1 > 0:
+ e = e[vg > 0]
+ ious[i, j] = np.sum(np.exp(-e)) / e.shape[0]
+ return ious
+
+ def _extract_mask(self, dt: Dict[str, Any]) -> np.ndarray:
+ if "densepose" in dt:
+ densepose_results_quantized = dt["densepose"]
+ return densepose_results_quantized.labels_uv_uint8[0].numpy()
+ elif "cse_mask" in dt:
+ return dt["cse_mask"]
+ elif "coarse_segm" in dt:
+ dy = max(int(dt["bbox"][3]), 1)
+ dx = max(int(dt["bbox"][2]), 1)
+ return (
+ F.interpolate(
+ dt["coarse_segm"].unsqueeze(0),
+ (dy, dx),
+ mode="bilinear",
+ align_corners=False,
+ )
+ .squeeze(0)
+ .argmax(0)
+ .numpy()
+ .astype(np.uint8)
+ )
+ elif "record_id" in dt:
+ assert (
+ self.multi_storage is not None
+ ), f"Storage record id encountered in a detection {dt}, but no storage provided!"
+ record = self.multi_storage.get(dt["rank"], dt["record_id"])
+ coarse_segm = record["coarse_segm"]
+ dy = max(int(dt["bbox"][3]), 1)
+ dx = max(int(dt["bbox"][2]), 1)
+ return (
+ F.interpolate(
+ coarse_segm.unsqueeze(0),
+ (dy, dx),
+ mode="bilinear",
+ align_corners=False,
+ )
+ .squeeze(0)
+ .argmax(0)
+ .numpy()
+ .astype(np.uint8)
+ )
+ else:
+ raise Exception(f"No mask data in the detection: {dt}")
+ raise ValueError('The prediction dict needs to contain either "densepose" or "cse_mask"')
+
+ def _extract_iuv(
+ self, densepose_data: np.ndarray, py: np.ndarray, px: np.ndarray, gt: Dict[str, Any]
+ ) -> Tuple[np.ndarray, np.ndarray, np.ndarray]:
+ """
+ Extract arrays of I, U and V values at given points as numpy arrays
+ given the data mode stored in self._dpDataMode
+ """
+ if self._dpDataMode == DensePoseDataMode.IUV_DT:
+ # estimated labels and UV (default)
+ ipoints = densepose_data[0, py, px]
+ upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.
+ vpoints = densepose_data[2, py, px] / 255.0
+ elif self._dpDataMode == DensePoseDataMode.IUV_GT:
+ # ground truth
+ ipoints = np.array(gt["dp_I"])
+ upoints = np.array(gt["dp_U"])
+ vpoints = np.array(gt["dp_V"])
+ elif self._dpDataMode == DensePoseDataMode.I_GT_UV_0:
+ # ground truth labels, UV = 0
+ ipoints = np.array(gt["dp_I"])
+ upoints = upoints * 0.0
+ vpoints = vpoints * 0.0
+ elif self._dpDataMode == DensePoseDataMode.I_GT_UV_DT:
+ # ground truth labels, estimated UV
+ ipoints = np.array(gt["dp_I"])
+ upoints = densepose_data[1, py, px] / 255.0 # convert from uint8 by /255.
+ vpoints = densepose_data[2, py, px] / 255.0
+ elif self._dpDataMode == DensePoseDataMode.I_DT_UV_0:
+ # estimated labels, UV = 0
+ ipoints = densepose_data[0, py, px]
+ upoints = upoints * 0.0
+ vpoints = vpoints * 0.0
+ else:
+ raise ValueError(f"Unknown data mode: {self._dpDataMode}")
+ return ipoints, upoints, vpoints
+
+ def computeOgps_single_pair(self, dt, gt, py, px, pt_mask):
+ if "densepose" in dt:
+ ipoints, upoints, vpoints = self.extract_iuv_from_quantized(dt, gt, py, px, pt_mask)
+ return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints)
+ elif "u" in dt:
+ ipoints, upoints, vpoints = self.extract_iuv_from_raw(dt, gt, py, px, pt_mask)
+ return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints)
+ elif "record_id" in dt:
+ assert (
+ self.multi_storage is not None
+ ), f"Storage record id encountered in detection {dt}, but no storage provided!"
+ record = self.multi_storage.get(dt["rank"], dt["record_id"])
+ record["bbox"] = dt["bbox"]
+ if "u" in record:
+ ipoints, upoints, vpoints = self.extract_iuv_from_raw(record, gt, py, px, pt_mask)
+ return self.computeOgps_single_pair_iuv(dt, gt, ipoints, upoints, vpoints)
+ elif "embedding" in record:
+ return self.computeOgps_single_pair_cse(
+ dt,
+ gt,
+ py,
+ px,
+ pt_mask,
+ record["coarse_segm"],
+ record["embedding"],
+ record["bbox"],
+ )
+ else:
+ raise Exception(f"Unknown record format: {record}")
+ elif "embedding" in dt:
+ return self.computeOgps_single_pair_cse(
+ dt, gt, py, px, pt_mask, dt["coarse_segm"], dt["embedding"], dt["bbox"]
+ )
+ raise Exception(f"Unknown detection format: {dt}")
+
+ def extract_iuv_from_quantized(self, dt, gt, py, px, pt_mask):
+ densepose_results_quantized = dt["densepose"]
+ ipoints, upoints, vpoints = self._extract_iuv(
+ densepose_results_quantized.labels_uv_uint8.numpy(), py, px, gt
+ )
+ ipoints[pt_mask == -1] = 0
+ return ipoints, upoints, vpoints
+
+ def extract_iuv_from_raw(self, dt, gt, py, px, pt_mask):
+ labels_dt = resample_fine_and_coarse_segm_tensors_to_bbox(
+ dt["fine_segm"].unsqueeze(0),
+ dt["coarse_segm"].unsqueeze(0),
+ dt["bbox"],
+ )
+ uv = resample_uv_tensors_to_bbox(
+ dt["u"].unsqueeze(0), dt["v"].unsqueeze(0), labels_dt.squeeze(0), dt["bbox"]
+ )
+ labels_uv_uint8 = torch.cat((labels_dt.byte(), (uv * 255).clamp(0, 255).byte()))
+ ipoints, upoints, vpoints = self._extract_iuv(labels_uv_uint8.numpy(), py, px, gt)
+ ipoints[pt_mask == -1] = 0
+ return ipoints, upoints, vpoints
+
+ def computeOgps_single_pair_iuv(self, dt, gt, ipoints, upoints, vpoints):
+ cVertsGT, ClosestVertsGTTransformed = self.findAllClosestVertsGT(gt)
+ cVerts = self.findAllClosestVertsUV(upoints, vpoints, ipoints)
+ # Get pairwise geodesic distances between gt and estimated mesh points.
+ dist = self.getDistancesUV(ClosestVertsGTTransformed, cVerts)
+ # Compute the Ogps measure.
+ # Find the mean geodesic normalization distance for
+ # each GT point, based on which part it is on.
+ Current_Mean_Distances = self.Mean_Distances[
+ self.CoarseParts[self.Part_ids[cVertsGT[cVertsGT > 0].astype(int) - 1]]
+ ]
+ return dist, Current_Mean_Distances
+
+ def computeOgps_single_pair_cse(
+ self, dt, gt, py, px, pt_mask, coarse_segm, embedding, bbox_xywh_abs
+ ):
+ # 0-based mesh vertex indices
+ cVertsGT = torch.as_tensor(gt["dp_vertex"], dtype=torch.int64)
+ # label for each pixel of the bbox, [H, W] tensor of long
+ labels_dt = resample_coarse_segm_tensor_to_bbox(
+ coarse_segm.unsqueeze(0), bbox_xywh_abs
+ ).squeeze(0)
+ x, y, w, h = bbox_xywh_abs
+ # embedding for each pixel of the bbox, [D, H, W] tensor of float32
+ embedding = F.interpolate(
+ embedding.unsqueeze(0), (int(h), int(w)), mode="bilinear", align_corners=False
+ ).squeeze(0)
+ # valid locations py, px
+ py_pt = torch.from_numpy(py[pt_mask > -1])
+ px_pt = torch.from_numpy(px[pt_mask > -1])
+ cVerts = torch.ones_like(cVertsGT) * -1
+ cVerts[pt_mask > -1] = self.findClosestVertsCse(
+ embedding, py_pt, px_pt, labels_dt, gt["ref_model"]
+ )
+ # Get pairwise geodesic distances between gt and estimated mesh points.
+ dist = self.getDistancesCse(cVertsGT, cVerts, gt["ref_model"])
+ # normalize distances
+ if (gt["ref_model"] == "smpl_27554") and ("dp_I" in gt):
+ Current_Mean_Distances = self.Mean_Distances[
+ self.CoarseParts[np.array(gt["dp_I"], dtype=int)]
+ ]
+ else:
+ Current_Mean_Distances = 0.255
+ return dist, Current_Mean_Distances
+
+ def computeOgps(self, imgId, catId):
+ p = self.params
+ # dimension here should be Nxm
+ g = self._gts[imgId, catId]
+ d = self._dts[imgId, catId]
+ inds = np.argsort([-d_["score"] for d_ in d], kind="mergesort")
+ d = [d[i] for i in inds]
+ if len(d) > p.maxDets[-1]:
+ d = d[0 : p.maxDets[-1]]
+ # if len(gts) == 0 and len(dts) == 0:
+ if len(g) == 0 or len(d) == 0:
+ return []
+ ious = np.zeros((len(d), len(g)))
+ # compute opgs between each detection and ground truth object
+ # sigma = self.sigma #0.255 # dist = 0.3m corresponds to ogps = 0.5
+ # 1 # dist = 0.3m corresponds to ogps = 0.96
+ # 1.45 # dist = 1.7m (person height) corresponds to ogps = 0.5)
+ for j, gt in enumerate(g):
+ if not gt["ignore"]:
+ g_ = gt["bbox"]
+ for i, dt in enumerate(d):
+ #
+ dy = int(dt["bbox"][3])
+ dx = int(dt["bbox"][2])
+ dp_x = np.array(gt["dp_x"]) * g_[2] / 255.0
+ dp_y = np.array(gt["dp_y"]) * g_[3] / 255.0
+ py = (dp_y + g_[1] - dt["bbox"][1]).astype(int)
+ px = (dp_x + g_[0] - dt["bbox"][0]).astype(int)
+ #
+ pts = np.zeros(len(px))
+ pts[px >= dx] = -1
+ pts[py >= dy] = -1
+ pts[px < 0] = -1
+ pts[py < 0] = -1
+ if len(pts) < 1:
+ ogps = 0.0
+ elif np.max(pts) == -1:
+ ogps = 0.0
+ else:
+ px[pts == -1] = 0
+ py[pts == -1] = 0
+ dists_between_matches, dist_norm_coeffs = self.computeOgps_single_pair(
+ dt, gt, py, px, pts
+ )
+ # Compute gps
+ ogps_values = np.exp(
+ -(dists_between_matches**2) / (2 * (dist_norm_coeffs**2))
+ )
+ #
+ ogps = np.mean(ogps_values) if len(ogps_values) > 0 else 0.0
+ ious[i, j] = ogps
+
+ gbb = [gt["bbox"] for gt in g]
+ dbb = [dt["bbox"] for dt in d]
+
+ # compute iou between each dt and gt region
+ iscrowd = [int(o.get("iscrowd", 0)) for o in g]
+ ious_bb = maskUtils.iou(dbb, gbb, iscrowd)
+ return ious, ious_bb
+
+ def evaluateImg(self, imgId, catId, aRng, maxDet):
+ """
+ perform evaluation for single category and image
+ :return: dict (single image results)
+ """
+
+ p = self.params
+ if p.useCats:
+ gt = self._gts[imgId, catId]
+ dt = self._dts[imgId, catId]
+ else:
+ gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
+ dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
+ if len(gt) == 0 and len(dt) == 0:
+ return None
+
+ for g in gt:
+ # g['_ignore'] = g['ignore']
+ if g["ignore"] or (g["area"] < aRng[0] or g["area"] > aRng[1]):
+ g["_ignore"] = True
+ else:
+ g["_ignore"] = False
+
+ # sort dt highest score first, sort gt ignore last
+ gtind = np.argsort([g["_ignore"] for g in gt], kind="mergesort")
+ gt = [gt[i] for i in gtind]
+ dtind = np.argsort([-d["score"] for d in dt], kind="mergesort")
+ dt = [dt[i] for i in dtind[0:maxDet]]
+ iscrowd = [int(o.get("iscrowd", 0)) for o in gt]
+ # load computed ious
+ if p.iouType == "densepose":
+ # print('Checking the length', len(self.ious[imgId, catId]))
+ # if len(self.ious[imgId, catId]) == 0:
+ # print(self.ious[imgId, catId])
+ ious = (
+ self.ious[imgId, catId][0][:, gtind]
+ if len(self.ious[imgId, catId]) > 0
+ else self.ious[imgId, catId]
+ )
+ ioubs = (
+ self.ious[imgId, catId][1][:, gtind]
+ if len(self.ious[imgId, catId]) > 0
+ else self.ious[imgId, catId]
+ )
+ if self._dpEvalMode in {DensePoseEvalMode.GPSM, DensePoseEvalMode.IOU}:
+ iousM = (
+ self.real_ious[imgId, catId][:, gtind]
+ if len(self.real_ious[imgId, catId]) > 0
+ else self.real_ious[imgId, catId]
+ )
+ else:
+ ious = (
+ self.ious[imgId, catId][:, gtind]
+ if len(self.ious[imgId, catId]) > 0
+ else self.ious[imgId, catId]
+ )
+
+ T = len(p.iouThrs)
+ G = len(gt)
+ D = len(dt)
+ gtm = np.zeros((T, G))
+ dtm = np.zeros((T, D))
+ gtIg = np.array([g["_ignore"] for g in gt])
+ dtIg = np.zeros((T, D))
+ if np.all(gtIg) and p.iouType == "densepose":
+ dtIg = np.logical_or(dtIg, True)
+
+ if len(ious) > 0: # and not p.iouType == 'densepose':
+ for tind, t in enumerate(p.iouThrs):
+ for dind, d in enumerate(dt):
+ # information about best match so far (m=-1 -> unmatched)
+ iou = min([t, 1 - 1e-10])
+ m = -1
+ for gind, _g in enumerate(gt):
+ # if this gt already matched, and not a crowd, continue
+ if gtm[tind, gind] > 0 and not iscrowd[gind]:
+ continue
+ # if dt matched to reg gt, and on ignore gt, stop
+ if m > -1 and gtIg[m] == 0 and gtIg[gind] == 1:
+ break
+ if p.iouType == "densepose":
+ if self._dpEvalMode == DensePoseEvalMode.GPSM:
+ new_iou = np.sqrt(iousM[dind, gind] * ious[dind, gind])
+ elif self._dpEvalMode == DensePoseEvalMode.IOU:
+ new_iou = iousM[dind, gind]
+ elif self._dpEvalMode == DensePoseEvalMode.GPS:
+ new_iou = ious[dind, gind]
+ else:
+ new_iou = ious[dind, gind]
+ if new_iou < iou:
+ continue
+ if new_iou == 0.0:
+ continue
+ # if match successful and best so far, store appropriately
+ iou = new_iou
+ m = gind
+ # if match made store id of match for both dt and gt
+ if m == -1:
+ continue
+ dtIg[tind, dind] = gtIg[m]
+ dtm[tind, dind] = gt[m]["id"]
+ gtm[tind, m] = d["id"]
+
+ if p.iouType == "densepose":
+ if not len(ioubs) == 0:
+ for dind, d in enumerate(dt):
+ # information about best match so far (m=-1 -> unmatched)
+ if dtm[tind, dind] == 0:
+ ioub = 0.8
+ m = -1
+ for gind, _g in enumerate(gt):
+ # if this gt already matched, and not a crowd, continue
+ if gtm[tind, gind] > 0 and not iscrowd[gind]:
+ continue
+ # continue to next gt unless better match made
+ if ioubs[dind, gind] < ioub:
+ continue
+ # if match successful and best so far, store appropriately
+ ioub = ioubs[dind, gind]
+ m = gind
+ # if match made store id of match for both dt and gt
+ if m > -1:
+ dtIg[:, dind] = gtIg[m]
+ if gtIg[m]:
+ dtm[tind, dind] = gt[m]["id"]
+ gtm[tind, m] = d["id"]
+ # set unmatched detections outside of area range to ignore
+ a = np.array([d["area"] < aRng[0] or d["area"] > aRng[1] for d in dt]).reshape((1, len(dt)))
+ dtIg = np.logical_or(dtIg, np.logical_and(dtm == 0, np.repeat(a, T, 0)))
+ # store results for given image and category
+ # print('Done with the function', len(self.ious[imgId, catId]))
+ return {
+ "image_id": imgId,
+ "category_id": catId,
+ "aRng": aRng,
+ "maxDet": maxDet,
+ "dtIds": [d["id"] for d in dt],
+ "gtIds": [g["id"] for g in gt],
+ "dtMatches": dtm,
+ "gtMatches": gtm,
+ "dtScores": [d["score"] for d in dt],
+ "gtIgnore": gtIg,
+ "dtIgnore": dtIg,
+ }
+
+ def accumulate(self, p=None):
+ """
+ Accumulate per image evaluation results and store the result in self.eval
+ :param p: input params for evaluation
+ :return: None
+ """
+ logger.info("Accumulating evaluation results...")
+ tic = time.time()
+ if not self.evalImgs:
+ logger.info("Please run evaluate() first")
+ # allows input customized parameters
+ if p is None:
+ p = self.params
+ p.catIds = p.catIds if p.useCats == 1 else [-1]
+ T = len(p.iouThrs)
+ R = len(p.recThrs)
+ K = len(p.catIds) if p.useCats else 1
+ A = len(p.areaRng)
+ M = len(p.maxDets)
+ precision = -(np.ones((T, R, K, A, M))) # -1 for the precision of absent categories
+ recall = -(np.ones((T, K, A, M)))
+
+ # create dictionary for future indexing
+ logger.info("Categories: {}".format(p.catIds))
+ _pe = self._paramsEval
+ catIds = _pe.catIds if _pe.useCats else [-1]
+ setK = set(catIds)
+ setA = set(map(tuple, _pe.areaRng))
+ setM = set(_pe.maxDets)
+ setI = set(_pe.imgIds)
+ # get inds to evaluate
+ k_list = [n for n, k in enumerate(p.catIds) if k in setK]
+ m_list = [m for n, m in enumerate(p.maxDets) if m in setM]
+ a_list = [n for n, a in enumerate(map(lambda x: tuple(x), p.areaRng)) if a in setA]
+ i_list = [n for n, i in enumerate(p.imgIds) if i in setI]
+ I0 = len(_pe.imgIds)
+ A0 = len(_pe.areaRng)
+ # retrieve E at each category, area range, and max number of detections
+ for k, k0 in enumerate(k_list):
+ Nk = k0 * A0 * I0
+ for a, a0 in enumerate(a_list):
+ Na = a0 * I0
+ for m, maxDet in enumerate(m_list):
+ E = [self.evalImgs[Nk + Na + i] for i in i_list]
+ E = [e for e in E if e is not None]
+ if len(E) == 0:
+ continue
+ dtScores = np.concatenate([e["dtScores"][0:maxDet] for e in E])
+
+ # different sorting method generates slightly different results.
+ # mergesort is used to be consistent as Matlab implementation.
+ inds = np.argsort(-dtScores, kind="mergesort")
+
+ dtm = np.concatenate([e["dtMatches"][:, 0:maxDet] for e in E], axis=1)[:, inds]
+ dtIg = np.concatenate([e["dtIgnore"][:, 0:maxDet] for e in E], axis=1)[:, inds]
+ gtIg = np.concatenate([e["gtIgnore"] for e in E])
+ npig = np.count_nonzero(gtIg == 0)
+ if npig == 0:
+ continue
+ tps = np.logical_and(dtm, np.logical_not(dtIg))
+ fps = np.logical_and(np.logical_not(dtm), np.logical_not(dtIg))
+ tp_sum = np.cumsum(tps, axis=1).astype(dtype=float)
+ fp_sum = np.cumsum(fps, axis=1).astype(dtype=float)
+ for t, (tp, fp) in enumerate(zip(tp_sum, fp_sum)):
+ tp = np.array(tp)
+ fp = np.array(fp)
+ nd = len(tp)
+ rc = tp / npig
+ pr = tp / (fp + tp + np.spacing(1))
+ q = np.zeros((R,))
+
+ if nd:
+ recall[t, k, a, m] = rc[-1]
+ else:
+ recall[t, k, a, m] = 0
+
+ # numpy is slow without cython optimization for accessing elements
+ # use python array gets significant speed improvement
+ pr = pr.tolist()
+ q = q.tolist()
+
+ for i in range(nd - 1, 0, -1):
+ if pr[i] > pr[i - 1]:
+ pr[i - 1] = pr[i]
+
+ inds = np.searchsorted(rc, p.recThrs, side="left")
+ try:
+ for ri, pi in enumerate(inds):
+ q[ri] = pr[pi]
+ except Exception:
+ pass
+ precision[t, :, k, a, m] = np.array(q)
+ logger.info(
+ "Final: max precision {}, min precision {}".format(np.max(precision), np.min(precision))
+ )
+ self.eval = {
+ "params": p,
+ "counts": [T, R, K, A, M],
+ "date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"),
+ "precision": precision,
+ "recall": recall,
+ }
+ toc = time.time()
+ logger.info("DONE (t={:0.2f}s).".format(toc - tic))
+
+ def summarize(self):
+ """
+ Compute and display summary metrics for evaluation results.
+ Note this function can *only* be applied on the default parameter setting
+ """
+
+ def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
+ p = self.params
+ iStr = " {:<18} {} @[ {}={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
+ titleStr = "Average Precision" if ap == 1 else "Average Recall"
+ typeStr = "(AP)" if ap == 1 else "(AR)"
+ measure = "IoU"
+ if self.params.iouType == "keypoints":
+ measure = "OKS"
+ elif self.params.iouType == "densepose":
+ measure = "OGPS"
+ iouStr = (
+ "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
+ if iouThr is None
+ else "{:0.2f}".format(iouThr)
+ )
+
+ aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
+ mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
+ if ap == 1:
+ # dimension of precision: [TxRxKxAxM]
+ s = self.eval["precision"]
+ # IoU
+ if iouThr is not None:
+ t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]
+ s = s[t]
+ s = s[:, :, :, aind, mind]
+ else:
+ # dimension of recall: [TxKxAxM]
+ s = self.eval["recall"]
+ if iouThr is not None:
+ t = np.where(np.abs(iouThr - p.iouThrs) < 0.001)[0]
+ s = s[t]
+ s = s[:, :, aind, mind]
+ if len(s[s > -1]) == 0:
+ mean_s = -1
+ else:
+ mean_s = np.mean(s[s > -1])
+ logger.info(iStr.format(titleStr, typeStr, measure, iouStr, areaRng, maxDets, mean_s))
+ return mean_s
+
+ def _summarizeDets():
+ stats = np.zeros((12,))
+ stats[0] = _summarize(1)
+ stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
+ stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
+ stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
+ stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
+ stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
+ stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
+ stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
+ stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
+ stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
+ stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
+ stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
+ return stats
+
+ def _summarizeKps():
+ stats = np.zeros((10,))
+ stats[0] = _summarize(1, maxDets=20)
+ stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
+ stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
+ stats[3] = _summarize(1, maxDets=20, areaRng="medium")
+ stats[4] = _summarize(1, maxDets=20, areaRng="large")
+ stats[5] = _summarize(0, maxDets=20)
+ stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
+ stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
+ stats[8] = _summarize(0, maxDets=20, areaRng="medium")
+ stats[9] = _summarize(0, maxDets=20, areaRng="large")
+ return stats
+
+ def _summarizeUvs():
+ stats = [_summarize(1, maxDets=self.params.maxDets[0])]
+ min_threshold = self.params.iouThrs.min()
+ if min_threshold <= 0.201:
+ stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.2)]
+ if min_threshold <= 0.301:
+ stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.3)]
+ if min_threshold <= 0.401:
+ stats += [_summarize(1, maxDets=self.params.maxDets[0], iouThr=0.4)]
+ stats += [
+ _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5),
+ _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75),
+ _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium"),
+ _summarize(1, maxDets=self.params.maxDets[0], areaRng="large"),
+ _summarize(0, maxDets=self.params.maxDets[0]),
+ _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5),
+ _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75),
+ _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium"),
+ _summarize(0, maxDets=self.params.maxDets[0], areaRng="large"),
+ ]
+ return np.array(stats)
+
+ def _summarizeUvsOld():
+ stats = np.zeros((18,))
+ stats[0] = _summarize(1, maxDets=self.params.maxDets[0])
+ stats[1] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.5)
+ stats[2] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.55)
+ stats[3] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.60)
+ stats[4] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.65)
+ stats[5] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.70)
+ stats[6] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.75)
+ stats[7] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.80)
+ stats[8] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.85)
+ stats[9] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.90)
+ stats[10] = _summarize(1, maxDets=self.params.maxDets[0], iouThr=0.95)
+ stats[11] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="medium")
+ stats[12] = _summarize(1, maxDets=self.params.maxDets[0], areaRng="large")
+ stats[13] = _summarize(0, maxDets=self.params.maxDets[0])
+ stats[14] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.5)
+ stats[15] = _summarize(0, maxDets=self.params.maxDets[0], iouThr=0.75)
+ stats[16] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="medium")
+ stats[17] = _summarize(0, maxDets=self.params.maxDets[0], areaRng="large")
+ return stats
+
+ if not self.eval:
+ raise Exception("Please run accumulate() first")
+ iouType = self.params.iouType
+ if iouType in ["segm", "bbox"]:
+ summarize = _summarizeDets
+ elif iouType in ["keypoints"]:
+ summarize = _summarizeKps
+ elif iouType in ["densepose"]:
+ summarize = _summarizeUvs
+ self.stats = summarize()
+
+ def __str__(self):
+ self.summarize()
+
+ # ================ functions for dense pose ==============================
+ def findAllClosestVertsUV(self, U_points, V_points, Index_points):
+ ClosestVerts = np.ones(Index_points.shape) * -1
+ for i in np.arange(24):
+ #
+ if (i + 1) in Index_points:
+ UVs = np.array(
+ [U_points[Index_points == (i + 1)], V_points[Index_points == (i + 1)]]
+ )
+ Current_Part_UVs = self.Part_UVs[i]
+ Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
+ D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
+ ClosestVerts[Index_points == (i + 1)] = Current_Part_ClosestVertInds[
+ np.argmin(D, axis=0)
+ ]
+ ClosestVertsTransformed = self.PDIST_transform[ClosestVerts.astype(int) - 1]
+ ClosestVertsTransformed[ClosestVerts < 0] = 0
+ return ClosestVertsTransformed
+
+ def findClosestVertsCse(self, embedding, py, px, mask, mesh_name):
+ mesh_vertex_embeddings = self.embedder(mesh_name)
+ pixel_embeddings = embedding[:, py, px].t().to(device="cuda")
+ mask_vals = mask[py, px]
+ edm = squared_euclidean_distance_matrix(pixel_embeddings, mesh_vertex_embeddings)
+ vertex_indices = edm.argmin(dim=1).cpu()
+ vertex_indices[mask_vals <= 0] = -1
+ return vertex_indices
+
+ def findAllClosestVertsGT(self, gt):
+ #
+ I_gt = np.array(gt["dp_I"])
+ U_gt = np.array(gt["dp_U"])
+ V_gt = np.array(gt["dp_V"])
+ #
+ # print(I_gt)
+ #
+ ClosestVertsGT = np.ones(I_gt.shape) * -1
+ for i in np.arange(24):
+ if (i + 1) in I_gt:
+ UVs = np.array([U_gt[I_gt == (i + 1)], V_gt[I_gt == (i + 1)]])
+ Current_Part_UVs = self.Part_UVs[i]
+ Current_Part_ClosestVertInds = self.Part_ClosestVertInds[i]
+ D = ssd.cdist(Current_Part_UVs.transpose(), UVs.transpose()).squeeze()
+ ClosestVertsGT[I_gt == (i + 1)] = Current_Part_ClosestVertInds[np.argmin(D, axis=0)]
+ #
+ ClosestVertsGTTransformed = self.PDIST_transform[ClosestVertsGT.astype(int) - 1]
+ ClosestVertsGTTransformed[ClosestVertsGT < 0] = 0
+ return ClosestVertsGT, ClosestVertsGTTransformed
+
+ def getDistancesCse(self, cVertsGT, cVerts, mesh_name):
+ geodists_vertices = torch.ones_like(cVertsGT) * float("inf")
+ selected = (cVertsGT >= 0) * (cVerts >= 0)
+ mesh = create_mesh(mesh_name, "cpu")
+ geodists_vertices[selected] = mesh.geodists[cVertsGT[selected], cVerts[selected]]
+ return geodists_vertices.numpy()
+
+ def getDistancesUV(self, cVertsGT, cVerts):
+ #
+ n = 27554
+ dists = []
+ for d in range(len(cVertsGT)):
+ if cVertsGT[d] > 0:
+ if cVerts[d] > 0:
+ i = cVertsGT[d] - 1
+ j = cVerts[d] - 1
+ if j == i:
+ dists.append(0)
+ elif j > i:
+ ccc = i
+ i = j
+ j = ccc
+ i = n - i - 1
+ j = n - j - 1
+ k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1
+ k = (n * n - n) / 2 - k - 1
+ dists.append(self.Pdist_matrix[int(k)][0])
+ else:
+ i = n - i - 1
+ j = n - j - 1
+ k = (n * (n - 1) / 2) - (n - i) * ((n - i) - 1) / 2 + j - i - 1
+ k = (n * n - n) / 2 - k - 1
+ dists.append(self.Pdist_matrix[int(k)][0])
+ else:
+ dists.append(np.inf)
+ return np.atleast_1d(np.array(dists).squeeze())
+
+
+class Params:
+ """
+ Params for coco evaluation api
+ """
+
+ def setDetParams(self):
+ self.imgIds = []
+ self.catIds = []
+ # np.arange causes trouble. the data point on arange is slightly larger than the true value
+ self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)
+ self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
+ self.maxDets = [1, 10, 100]
+ self.areaRng = [
+ [0**2, 1e5**2],
+ [0**2, 32**2],
+ [32**2, 96**2],
+ [96**2, 1e5**2],
+ ]
+ self.areaRngLbl = ["all", "small", "medium", "large"]
+ self.useCats = 1
+
+ def setKpParams(self):
+ self.imgIds = []
+ self.catIds = []
+ # np.arange causes trouble. the data point on arange is slightly larger than the true value
+ self.iouThrs = np.linspace(0.5, 0.95, np.round((0.95 - 0.5) / 0.05) + 1, endpoint=True)
+ self.recThrs = np.linspace(0.0, 1.00, np.round((1.00 - 0.0) / 0.01) + 1, endpoint=True)
+ self.maxDets = [20]
+ self.areaRng = [[0**2, 1e5**2], [32**2, 96**2], [96**2, 1e5**2]]
+ self.areaRngLbl = ["all", "medium", "large"]
+ self.useCats = 1
+
+ def setUvParams(self):
+ self.imgIds = []
+ self.catIds = []
+ self.iouThrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True)
+ self.recThrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True)
+ self.maxDets = [20]
+ self.areaRng = [[0**2, 1e5**2], [32**2, 96**2], [96**2, 1e5**2]]
+ self.areaRngLbl = ["all", "medium", "large"]
+ self.useCats = 1
+
+ def __init__(self, iouType="segm"):
+ if iouType == "segm" or iouType == "bbox":
+ self.setDetParams()
+ elif iouType == "keypoints":
+ self.setKpParams()
+ elif iouType == "densepose":
+ self.setUvParams()
+ else:
+ raise Exception("iouType not supported")
+ self.iouType = iouType
+ # useSegm is deprecated
+ self.useSegm = None
diff --git a/densepose/evaluation/evaluator.py b/densepose/evaluation/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..d5d1d789bbe4b8791aa8529518ba1b964d31daca
--- /dev/null
+++ b/densepose/evaluation/evaluator.py
@@ -0,0 +1,421 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import contextlib
+import copy
+import io
+import itertools
+import logging
+import numpy as np
+import os
+from collections import OrderedDict
+from typing import Dict, Iterable, List, Optional
+import pycocotools.mask as mask_utils
+import torch
+from pycocotools.coco import COCO
+from tabulate import tabulate
+
+from detectron2.config import CfgNode
+from detectron2.data import MetadataCatalog
+from detectron2.evaluation import DatasetEvaluator
+from detectron2.structures import BoxMode
+from detectron2.utils.comm import gather, get_rank, is_main_process, synchronize
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import create_small_table
+
+from densepose.converters import ToChartResultConverter, ToMaskConverter
+from densepose.data.datasets.coco import maybe_filter_and_map_categories_cocoapi
+from densepose.structures import (
+ DensePoseChartPredictorOutput,
+ DensePoseEmbeddingPredictorOutput,
+ quantize_densepose_chart_result,
+)
+
+from .densepose_coco_evaluation import DensePoseCocoEval, DensePoseEvalMode
+from .mesh_alignment_evaluator import MeshAlignmentEvaluator
+from .tensor_storage import (
+ SingleProcessFileTensorStorage,
+ SingleProcessRamTensorStorage,
+ SingleProcessTensorStorage,
+ SizeData,
+ storage_gather,
+)
+
+
+class DensePoseCOCOEvaluator(DatasetEvaluator):
+ def __init__(
+ self,
+ dataset_name,
+ distributed,
+ output_dir=None,
+ evaluator_type: str = "iuv",
+ min_iou_threshold: float = 0.5,
+ storage: Optional[SingleProcessTensorStorage] = None,
+ embedder=None,
+ should_evaluate_mesh_alignment: bool = False,
+ mesh_alignment_mesh_names: Optional[List[str]] = None,
+ ):
+ self._embedder = embedder
+ self._distributed = distributed
+ self._output_dir = output_dir
+ self._evaluator_type = evaluator_type
+ self._storage = storage
+ self._should_evaluate_mesh_alignment = should_evaluate_mesh_alignment
+
+ assert not (
+ should_evaluate_mesh_alignment and embedder is None
+ ), "Mesh alignment evaluation is activated, but no vertex embedder provided!"
+ if should_evaluate_mesh_alignment:
+ self._mesh_alignment_evaluator = MeshAlignmentEvaluator(
+ embedder,
+ mesh_alignment_mesh_names,
+ )
+
+ self._cpu_device = torch.device("cpu")
+ self._logger = logging.getLogger(__name__)
+
+ self._metadata = MetadataCatalog.get(dataset_name)
+ self._min_threshold = min_iou_threshold
+ json_file = PathManager.get_local_path(self._metadata.json_file)
+ with contextlib.redirect_stdout(io.StringIO()):
+ self._coco_api = COCO(json_file)
+ maybe_filter_and_map_categories_cocoapi(dataset_name, self._coco_api)
+
+ def reset(self):
+ self._predictions = []
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
+ It is a list of dict. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name", "image_id".
+ outputs: the outputs of a COCO model. It is a list of dicts with key
+ "instances" that contains :class:`Instances`.
+ The :class:`Instances` object needs to have `densepose` field.
+ """
+ for input, output in zip(inputs, outputs):
+ instances = output["instances"].to(self._cpu_device)
+ if not instances.has("pred_densepose"):
+ continue
+ prediction_list = prediction_to_dict(
+ instances,
+ input["image_id"],
+ self._embedder,
+ self._metadata.class_to_mesh_name,
+ self._storage is not None,
+ )
+ if self._storage is not None:
+ for prediction_dict in prediction_list:
+ dict_to_store = {}
+ for field_name in self._storage.data_schema:
+ dict_to_store[field_name] = prediction_dict[field_name]
+ record_id = self._storage.put(dict_to_store)
+ prediction_dict["record_id"] = record_id
+ prediction_dict["rank"] = get_rank()
+ for field_name in self._storage.data_schema:
+ del prediction_dict[field_name]
+ self._predictions.extend(prediction_list)
+
+ def evaluate(self, img_ids=None):
+ if self._distributed:
+ synchronize()
+ predictions = gather(self._predictions)
+ predictions = list(itertools.chain(*predictions))
+ else:
+ predictions = self._predictions
+
+ multi_storage = storage_gather(self._storage) if self._storage is not None else None
+
+ if not is_main_process():
+ return
+ return copy.deepcopy(self._eval_predictions(predictions, multi_storage, img_ids))
+
+ def _eval_predictions(self, predictions, multi_storage=None, img_ids=None):
+ """
+ Evaluate predictions on densepose.
+ Return results with the metrics of the tasks.
+ """
+ self._logger.info("Preparing results for COCO format ...")
+
+ if self._output_dir:
+ PathManager.mkdirs(self._output_dir)
+ file_path = os.path.join(self._output_dir, "coco_densepose_predictions.pth")
+ with PathManager.open(file_path, "wb") as f:
+ torch.save(predictions, f)
+
+ self._logger.info("Evaluating predictions ...")
+ res = OrderedDict()
+ results_gps, results_gpsm, results_segm = _evaluate_predictions_on_coco(
+ self._coco_api,
+ predictions,
+ multi_storage,
+ self._embedder,
+ class_names=self._metadata.get("thing_classes"),
+ min_threshold=self._min_threshold,
+ img_ids=img_ids,
+ )
+ res["densepose_gps"] = results_gps
+ res["densepose_gpsm"] = results_gpsm
+ res["densepose_segm"] = results_segm
+ if self._should_evaluate_mesh_alignment:
+ res["densepose_mesh_alignment"] = self._evaluate_mesh_alignment()
+ return res
+
+ def _evaluate_mesh_alignment(self):
+ self._logger.info("Mesh alignment evaluation ...")
+ mean_ge, mean_gps, per_mesh_metrics = self._mesh_alignment_evaluator.evaluate()
+ results = {
+ "GE": mean_ge * 100,
+ "GPS": mean_gps * 100,
+ }
+ mesh_names = set()
+ for metric_name in per_mesh_metrics:
+ for mesh_name, value in per_mesh_metrics[metric_name].items():
+ results[f"{metric_name}-{mesh_name}"] = value * 100
+ mesh_names.add(mesh_name)
+ self._print_mesh_alignment_results(results, mesh_names)
+ return results
+
+ def _print_mesh_alignment_results(self, results: Dict[str, float], mesh_names: Iterable[str]):
+ self._logger.info("Evaluation results for densepose, mesh alignment:")
+ self._logger.info(f'| {"Mesh":13s} | {"GErr":7s} | {"GPS":7s} |')
+ self._logger.info("| :-----------: | :-----: | :-----: |")
+ for mesh_name in mesh_names:
+ ge_key = f"GE-{mesh_name}"
+ ge_str = f"{results[ge_key]:.4f}" if ge_key in results else " "
+ gps_key = f"GPS-{mesh_name}"
+ gps_str = f"{results[gps_key]:.4f}" if gps_key in results else " "
+ self._logger.info(f"| {mesh_name:13s} | {ge_str:7s} | {gps_str:7s} |")
+ self._logger.info("| :-------------------------------: |")
+ ge_key = "GE"
+ ge_str = f"{results[ge_key]:.4f}" if ge_key in results else " "
+ gps_key = "GPS"
+ gps_str = f"{results[gps_key]:.4f}" if gps_key in results else " "
+ self._logger.info(f'| {"MEAN":13s} | {ge_str:7s} | {gps_str:7s} |')
+
+
+def prediction_to_dict(instances, img_id, embedder, class_to_mesh_name, use_storage):
+ """
+ Args:
+ instances (Instances): the output of the model
+ img_id (str): the image id in COCO
+
+ Returns:
+ list[dict]: the results in densepose evaluation format
+ """
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+ raw_boxes_xywh = BoxMode.convert(
+ instances.pred_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
+ )
+
+ if isinstance(instances.pred_densepose, DensePoseEmbeddingPredictorOutput):
+ results_densepose = densepose_cse_predictions_to_dict(
+ instances, embedder, class_to_mesh_name, use_storage
+ )
+ elif isinstance(instances.pred_densepose, DensePoseChartPredictorOutput):
+ if not use_storage:
+ results_densepose = densepose_chart_predictions_to_dict(instances)
+ else:
+ results_densepose = densepose_chart_predictions_to_storage_dict(instances)
+
+ results = []
+ for k in range(len(instances)):
+ result = {
+ "image_id": img_id,
+ "category_id": classes[k],
+ "bbox": raw_boxes_xywh[k].tolist(),
+ "score": scores[k],
+ }
+ results.append({**result, **results_densepose[k]})
+ return results
+
+
+def densepose_chart_predictions_to_dict(instances):
+ segmentations = ToMaskConverter.convert(
+ instances.pred_densepose, instances.pred_boxes, instances.image_size
+ )
+
+ results = []
+ for k in range(len(instances)):
+ densepose_results_quantized = quantize_densepose_chart_result(
+ ToChartResultConverter.convert(instances.pred_densepose[k], instances.pred_boxes[k])
+ )
+ densepose_results_quantized.labels_uv_uint8 = (
+ densepose_results_quantized.labels_uv_uint8.cpu()
+ )
+ segmentation = segmentations.tensor[k]
+ segmentation_encoded = mask_utils.encode(
+ np.require(segmentation.numpy(), dtype=np.uint8, requirements=["F"])
+ )
+ segmentation_encoded["counts"] = segmentation_encoded["counts"].decode("utf-8")
+ result = {
+ "densepose": densepose_results_quantized,
+ "segmentation": segmentation_encoded,
+ }
+ results.append(result)
+ return results
+
+
+def densepose_chart_predictions_to_storage_dict(instances):
+ results = []
+ for k in range(len(instances)):
+ densepose_predictor_output = instances.pred_densepose[k]
+ result = {
+ "coarse_segm": densepose_predictor_output.coarse_segm.squeeze(0).cpu(),
+ "fine_segm": densepose_predictor_output.fine_segm.squeeze(0).cpu(),
+ "u": densepose_predictor_output.u.squeeze(0).cpu(),
+ "v": densepose_predictor_output.v.squeeze(0).cpu(),
+ }
+ results.append(result)
+ return results
+
+
+def densepose_cse_predictions_to_dict(instances, embedder, class_to_mesh_name, use_storage):
+ results = []
+ for k in range(len(instances)):
+ cse = instances.pred_densepose[k]
+ results.append(
+ {
+ "coarse_segm": cse.coarse_segm[0].cpu(),
+ "embedding": cse.embedding[0].cpu(),
+ }
+ )
+ return results
+
+
+def _evaluate_predictions_on_coco(
+ coco_gt,
+ coco_results,
+ multi_storage=None,
+ embedder=None,
+ class_names=None,
+ min_threshold: float = 0.5,
+ img_ids=None,
+):
+ logger = logging.getLogger(__name__)
+
+ densepose_metrics = _get_densepose_metrics(min_threshold)
+ if len(coco_results) == 0: # cocoapi does not handle empty results very well
+ logger.warn("No predictions from the model! Set scores to -1")
+ results_gps = {metric: -1 for metric in densepose_metrics}
+ results_gpsm = {metric: -1 for metric in densepose_metrics}
+ results_segm = {metric: -1 for metric in densepose_metrics}
+ return results_gps, results_gpsm, results_segm
+
+ coco_dt = coco_gt.loadRes(coco_results)
+
+ results = []
+ for eval_mode_name in ["GPS", "GPSM", "IOU"]:
+ eval_mode = getattr(DensePoseEvalMode, eval_mode_name)
+ coco_eval = DensePoseCocoEval(
+ coco_gt, coco_dt, "densepose", multi_storage, embedder, dpEvalMode=eval_mode
+ )
+ result = _derive_results_from_coco_eval(
+ coco_eval, eval_mode_name, densepose_metrics, class_names, min_threshold, img_ids
+ )
+ results.append(result)
+ return results
+
+
+def _get_densepose_metrics(min_threshold: float = 0.5):
+ metrics = ["AP"]
+ if min_threshold <= 0.201:
+ metrics += ["AP20"]
+ if min_threshold <= 0.301:
+ metrics += ["AP30"]
+ if min_threshold <= 0.401:
+ metrics += ["AP40"]
+ metrics.extend(["AP50", "AP75", "APm", "APl", "AR", "AR50", "AR75", "ARm", "ARl"])
+ return metrics
+
+
+def _derive_results_from_coco_eval(
+ coco_eval, eval_mode_name, metrics, class_names, min_threshold: float, img_ids
+):
+ if img_ids is not None:
+ coco_eval.params.imgIds = img_ids
+ coco_eval.params.iouThrs = np.linspace(
+ min_threshold, 0.95, int(np.round((0.95 - min_threshold) / 0.05)) + 1, endpoint=True
+ )
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ coco_eval.summarize()
+ results = {metric: float(coco_eval.stats[idx] * 100) for idx, metric in enumerate(metrics)}
+ logger = logging.getLogger(__name__)
+ logger.info(
+ f"Evaluation results for densepose, {eval_mode_name} metric: \n"
+ + create_small_table(results)
+ )
+ if class_names is None or len(class_names) <= 1:
+ return results
+
+ # Compute per-category AP, the same way as it is done in D2
+ # (see detectron2/evaluation/coco_evaluation.py):
+ precisions = coco_eval.eval["precision"]
+ # precision has dims (iou, recall, cls, area range, max dets)
+ assert len(class_names) == precisions.shape[2]
+
+ results_per_category = []
+ for idx, name in enumerate(class_names):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ ap = np.mean(precision) if precision.size else float("nan")
+ results_per_category.append((f"{name}", float(ap * 100)))
+
+ # tabulate it
+ n_cols = min(6, len(results_per_category) * 2)
+ results_flatten = list(itertools.chain(*results_per_category))
+ results_2d = itertools.zip_longest(*[results_flatten[i::n_cols] for i in range(n_cols)])
+ table = tabulate(
+ results_2d,
+ tablefmt="pipe",
+ floatfmt=".3f",
+ headers=["category", "AP"] * (n_cols // 2),
+ numalign="left",
+ )
+ logger.info(f"Per-category {eval_mode_name} AP: \n" + table)
+
+ results.update({"AP-" + name: ap for name, ap in results_per_category})
+ return results
+
+
+def build_densepose_evaluator_storage(cfg: CfgNode, output_folder: str):
+ storage_spec = cfg.DENSEPOSE_EVALUATION.STORAGE
+ if storage_spec == "none":
+ return None
+ evaluator_type = cfg.DENSEPOSE_EVALUATION.TYPE
+ # common output tensor sizes
+ hout = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
+ wout = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
+ n_csc = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
+ # specific output tensors
+ if evaluator_type == "iuv":
+ n_fsc = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
+ schema = {
+ "coarse_segm": SizeData(dtype="float32", shape=(n_csc, hout, wout)),
+ "fine_segm": SizeData(dtype="float32", shape=(n_fsc, hout, wout)),
+ "u": SizeData(dtype="float32", shape=(n_fsc, hout, wout)),
+ "v": SizeData(dtype="float32", shape=(n_fsc, hout, wout)),
+ }
+ elif evaluator_type == "cse":
+ embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
+ schema = {
+ "coarse_segm": SizeData(dtype="float32", shape=(n_csc, hout, wout)),
+ "embedding": SizeData(dtype="float32", shape=(embed_size, hout, wout)),
+ }
+ else:
+ raise ValueError(f"Unknown evaluator type: {evaluator_type}")
+ # storage types
+ if storage_spec == "ram":
+ storage = SingleProcessRamTensorStorage(schema, io.BytesIO())
+ elif storage_spec == "file":
+ fpath = os.path.join(output_folder, f"DensePoseEvaluatorStorage.{get_rank()}.bin")
+ PathManager.mkdirs(output_folder)
+ storage = SingleProcessFileTensorStorage(schema, fpath, "wb")
+ else:
+ raise ValueError(f"Unknown storage specification: {storage_spec}")
+ return storage
diff --git a/densepose/evaluation/mesh_alignment_evaluator.py b/densepose/evaluation/mesh_alignment_evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d67c1a88a56332fb708c4618a34e96900926083
--- /dev/null
+++ b/densepose/evaluation/mesh_alignment_evaluator.py
@@ -0,0 +1,66 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import json
+import logging
+from typing import List, Optional
+import torch
+from torch import nn
+
+from detectron2.utils.file_io import PathManager
+
+from densepose.structures.mesh import create_mesh
+
+
+class MeshAlignmentEvaluator:
+ """
+ Class for evaluation of 3D mesh alignment based on the learned vertex embeddings
+ """
+
+ def __init__(self, embedder: nn.Module, mesh_names: Optional[List[str]]):
+ self.embedder = embedder
+ # use the provided mesh names if not None and not an empty list
+ self.mesh_names = mesh_names if mesh_names else embedder.mesh_names
+ self.logger = logging.getLogger(__name__)
+ with PathManager.open(
+ "https://dl.fbaipublicfiles.com/densepose/data/cse/mesh_keyvertices_v0.json", "r"
+ ) as f:
+ self.mesh_keyvertices = json.load(f)
+
+ def evaluate(self):
+ ge_per_mesh = {}
+ gps_per_mesh = {}
+ for mesh_name_1 in self.mesh_names:
+ avg_errors = []
+ avg_gps = []
+ embeddings_1 = self.embedder(mesh_name_1)
+ keyvertices_1 = self.mesh_keyvertices[mesh_name_1]
+ keyvertex_names_1 = list(keyvertices_1.keys())
+ keyvertex_indices_1 = [keyvertices_1[name] for name in keyvertex_names_1]
+ for mesh_name_2 in self.mesh_names:
+ if mesh_name_1 == mesh_name_2:
+ continue
+ embeddings_2 = self.embedder(mesh_name_2)
+ keyvertices_2 = self.mesh_keyvertices[mesh_name_2]
+ sim_matrix_12 = embeddings_1[keyvertex_indices_1].mm(embeddings_2.T)
+ vertices_2_matching_keyvertices_1 = sim_matrix_12.argmax(axis=1)
+ mesh_2 = create_mesh(mesh_name_2, embeddings_2.device)
+ geodists = mesh_2.geodists[
+ vertices_2_matching_keyvertices_1,
+ [keyvertices_2[name] for name in keyvertex_names_1],
+ ]
+ Current_Mean_Distances = 0.255
+ gps = (-(geodists**2) / (2 * (Current_Mean_Distances**2))).exp()
+ avg_errors.append(geodists.mean().item())
+ avg_gps.append(gps.mean().item())
+
+ ge_mean = torch.as_tensor(avg_errors).mean().item()
+ gps_mean = torch.as_tensor(avg_gps).mean().item()
+ ge_per_mesh[mesh_name_1] = ge_mean
+ gps_per_mesh[mesh_name_1] = gps_mean
+ ge_mean_global = torch.as_tensor(list(ge_per_mesh.values())).mean().item()
+ gps_mean_global = torch.as_tensor(list(gps_per_mesh.values())).mean().item()
+ per_mesh_metrics = {
+ "GE": ge_per_mesh,
+ "GPS": gps_per_mesh,
+ }
+ return ge_mean_global, gps_mean_global, per_mesh_metrics
diff --git a/densepose/evaluation/tensor_storage.py b/densepose/evaluation/tensor_storage.py
new file mode 100644
index 0000000000000000000000000000000000000000..db57c6ac73a423e39b1ed2e5a4a1f824aa233737
--- /dev/null
+++ b/densepose/evaluation/tensor_storage.py
@@ -0,0 +1,239 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import io
+import numpy as np
+import os
+from dataclasses import dataclass
+from functools import reduce
+from operator import mul
+from typing import BinaryIO, Dict, Optional, Tuple
+import torch
+
+from detectron2.utils.comm import gather, get_rank
+from detectron2.utils.file_io import PathManager
+
+
+@dataclass
+class SizeData:
+ dtype: str
+ shape: Tuple[int]
+
+
+def _calculate_record_field_size_b(data_schema: Dict[str, SizeData], field_name: str) -> int:
+ schema = data_schema[field_name]
+ element_size_b = np.dtype(schema.dtype).itemsize
+ record_field_size_b = reduce(mul, schema.shape) * element_size_b
+ return record_field_size_b
+
+
+def _calculate_record_size_b(data_schema: Dict[str, SizeData]) -> int:
+ record_size_b = 0
+ for field_name in data_schema:
+ record_field_size_b = _calculate_record_field_size_b(data_schema, field_name)
+ record_size_b += record_field_size_b
+ return record_size_b
+
+
+def _calculate_record_field_sizes_b(data_schema: Dict[str, SizeData]) -> Dict[str, int]:
+ field_sizes_b = {}
+ for field_name in data_schema:
+ field_sizes_b[field_name] = _calculate_record_field_size_b(data_schema, field_name)
+ return field_sizes_b
+
+
+class SingleProcessTensorStorage:
+ """
+ Compact tensor storage to keep tensor data of predefined size and type.
+ """
+
+ def __init__(self, data_schema: Dict[str, SizeData], storage_impl: BinaryIO):
+ """
+ Construct tensor storage based on information on data shape and size.
+ Internally uses numpy to interpret the type specification.
+ The storage must support operations `seek(offset, whence=os.SEEK_SET)` and
+ `read(size)` to be able to perform the `get` operation.
+ The storage must support operation `write(bytes)` to be able to perform
+ the `put` operation.
+
+ Args:
+ data_schema (dict: str -> SizeData): dictionary which maps tensor name
+ to its size data (shape and data type), e.g.
+ ```
+ {
+ "coarse_segm": SizeData(dtype="float32", shape=(112, 112)),
+ "embedding": SizeData(dtype="float32", shape=(16, 112, 112)),
+ }
+ ```
+ storage_impl (BinaryIO): io instance that handles file-like seek, read
+ and write operations, e.g. a file handle or a memory buffer like io.BytesIO
+ """
+ self.data_schema = data_schema
+ self.record_size_b = _calculate_record_size_b(data_schema)
+ self.record_field_sizes_b = _calculate_record_field_sizes_b(data_schema)
+ self.storage_impl = storage_impl
+ self.next_record_id = 0
+
+ def get(self, record_id: int) -> Dict[str, torch.Tensor]:
+ """
+ Load tensors from the storage by record ID
+
+ Args:
+ record_id (int): Record ID, for which to load the data
+
+ Return:
+ dict: str -> tensor: tensor name mapped to tensor data, recorded under the provided ID
+ """
+ self.storage_impl.seek(record_id * self.record_size_b, os.SEEK_SET)
+ data_bytes = self.storage_impl.read(self.record_size_b)
+ assert len(data_bytes) == self.record_size_b, (
+ f"Expected data size {self.record_size_b} B could not be read: "
+ f"got {len(data_bytes)} B"
+ )
+ record = {}
+ cur_idx = 0
+ # it's important to read and write in the same order
+ for field_name in sorted(self.data_schema):
+ schema = self.data_schema[field_name]
+ field_size_b = self.record_field_sizes_b[field_name]
+ chunk = data_bytes[cur_idx : cur_idx + field_size_b]
+ data_np = np.frombuffer(
+ chunk, dtype=schema.dtype, count=reduce(mul, schema.shape)
+ ).reshape(schema.shape)
+ record[field_name] = torch.from_numpy(data_np)
+ cur_idx += field_size_b
+ return record
+
+ def put(self, data: Dict[str, torch.Tensor]) -> int:
+ """
+ Store tensors in the storage
+
+ Args:
+ data (dict: str -> tensor): data to store, a dictionary which maps
+ tensor names into tensors; tensor shapes must match those specified
+ in data schema.
+ Return:
+ int: record ID, under which the data is stored
+ """
+ # it's important to read and write in the same order
+ for field_name in sorted(self.data_schema):
+ assert (
+ field_name in data
+ ), f"Field '{field_name}' not present in data: data keys are {data.keys()}"
+ value = data[field_name]
+ assert value.shape == self.data_schema[field_name].shape, (
+ f"Mismatched tensor shapes for field '{field_name}': "
+ f"expected {self.data_schema[field_name].shape}, got {value.shape}"
+ )
+ data_bytes = value.cpu().numpy().tobytes()
+ assert len(data_bytes) == self.record_field_sizes_b[field_name], (
+ f"Expected field {field_name} to be of size "
+ f"{self.record_field_sizes_b[field_name]} B, got {len(data_bytes)} B"
+ )
+ self.storage_impl.write(data_bytes)
+ record_id = self.next_record_id
+ self.next_record_id += 1
+ return record_id
+
+
+class SingleProcessFileTensorStorage(SingleProcessTensorStorage):
+ """
+ Implementation of a single process tensor storage which stores data in a file
+ """
+
+ def __init__(self, data_schema: Dict[str, SizeData], fpath: str, mode: str):
+ self.fpath = fpath
+ assert "b" in mode, f"Tensor storage should be opened in binary mode, got '{mode}'"
+ if "w" in mode:
+ # pyre-fixme[6]: For 2nd argument expected `Union[typing_extensions.Liter...
+ file_h = PathManager.open(fpath, mode)
+ elif "r" in mode:
+ local_fpath = PathManager.get_local_path(fpath)
+ file_h = open(local_fpath, mode)
+ else:
+ raise ValueError(f"Unsupported file mode {mode}, supported modes: rb, wb")
+ super().__init__(data_schema, file_h) # pyre-ignore[6]
+
+
+class SingleProcessRamTensorStorage(SingleProcessTensorStorage):
+ """
+ Implementation of a single process tensor storage which stores data in RAM
+ """
+
+ def __init__(self, data_schema: Dict[str, SizeData], buf: io.BytesIO):
+ super().__init__(data_schema, buf)
+
+
+class MultiProcessTensorStorage:
+ """
+ Representation of a set of tensor storages created by individual processes,
+ allows to access those storages from a single owner process. The storages
+ should either be shared or broadcasted to the owner process.
+ The processes are identified by their rank, data is uniquely defined by
+ the rank of the process and the record ID.
+ """
+
+ def __init__(self, rank_to_storage: Dict[int, SingleProcessTensorStorage]):
+ self.rank_to_storage = rank_to_storage
+
+ def get(self, rank: int, record_id: int) -> Dict[str, torch.Tensor]:
+ storage = self.rank_to_storage[rank]
+ return storage.get(record_id)
+
+ def put(self, rank: int, data: Dict[str, torch.Tensor]) -> int:
+ storage = self.rank_to_storage[rank]
+ return storage.put(data)
+
+
+class MultiProcessFileTensorStorage(MultiProcessTensorStorage):
+ def __init__(self, data_schema: Dict[str, SizeData], rank_to_fpath: Dict[int, str], mode: str):
+ rank_to_storage = {
+ rank: SingleProcessFileTensorStorage(data_schema, fpath, mode)
+ for rank, fpath in rank_to_fpath.items()
+ }
+ super().__init__(rank_to_storage) # pyre-ignore[6]
+
+
+class MultiProcessRamTensorStorage(MultiProcessTensorStorage):
+ def __init__(self, data_schema: Dict[str, SizeData], rank_to_buffer: Dict[int, io.BytesIO]):
+ rank_to_storage = {
+ rank: SingleProcessRamTensorStorage(data_schema, buf)
+ for rank, buf in rank_to_buffer.items()
+ }
+ super().__init__(rank_to_storage) # pyre-ignore[6]
+
+
+def _ram_storage_gather(
+ storage: SingleProcessRamTensorStorage, dst_rank: int = 0
+) -> Optional[MultiProcessRamTensorStorage]:
+ storage.storage_impl.seek(0, os.SEEK_SET)
+ # TODO: overhead, pickling a bytes object, can just pass bytes in a tensor directly
+ # see detectron2/utils.comm.py
+ data_list = gather(storage.storage_impl.read(), dst=dst_rank)
+ if get_rank() != dst_rank:
+ return None
+ rank_to_buffer = {i: io.BytesIO(data_list[i]) for i in range(len(data_list))}
+ multiprocess_storage = MultiProcessRamTensorStorage(storage.data_schema, rank_to_buffer)
+ return multiprocess_storage
+
+
+def _file_storage_gather(
+ storage: SingleProcessFileTensorStorage,
+ dst_rank: int = 0,
+ mode: str = "rb",
+) -> Optional[MultiProcessFileTensorStorage]:
+ storage.storage_impl.close()
+ fpath_list = gather(storage.fpath, dst=dst_rank)
+ if get_rank() != dst_rank:
+ return None
+ rank_to_fpath = {i: fpath_list[i] for i in range(len(fpath_list))}
+ return MultiProcessFileTensorStorage(storage.data_schema, rank_to_fpath, mode)
+
+
+def storage_gather(
+ storage: SingleProcessTensorStorage, dst_rank: int = 0
+) -> Optional[MultiProcessTensorStorage]:
+ if isinstance(storage, SingleProcessRamTensorStorage):
+ return _ram_storage_gather(storage, dst_rank)
+ elif isinstance(storage, SingleProcessFileTensorStorage):
+ return _file_storage_gather(storage, dst_rank)
+ raise Exception(f"Unsupported storage for gather operation: {storage}")
diff --git a/densepose/modeling/__init__.py b/densepose/modeling/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4c49f6da0d182cc97f5fe6b21d77c8f8330d3c3d
--- /dev/null
+++ b/densepose/modeling/__init__.py
@@ -0,0 +1,13 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
+from .filter import DensePoseDataFilter
+from .inference import densepose_inference
+from .utils import initialize_module_params
+from .build import (
+ build_densepose_data_filter,
+ build_densepose_embedder,
+ build_densepose_head,
+ build_densepose_losses,
+ build_densepose_predictor,
+)
diff --git a/densepose/modeling/build.py b/densepose/modeling/build.py
new file mode 100644
index 0000000000000000000000000000000000000000..bb7f54b4a1044bc518d66d89432dd52c79fdf293
--- /dev/null
+++ b/densepose/modeling/build.py
@@ -0,0 +1,87 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Optional
+from torch import nn
+
+from detectron2.config import CfgNode
+
+from .cse.embedder import Embedder
+from .filter import DensePoseDataFilter
+
+
+def build_densepose_predictor(cfg: CfgNode, input_channels: int):
+ """
+ Create an instance of DensePose predictor based on configuration options.
+
+ Args:
+ cfg (CfgNode): configuration options
+ input_channels (int): input tensor size along the channel dimension
+ Return:
+ An instance of DensePose predictor
+ """
+ from .predictors import DENSEPOSE_PREDICTOR_REGISTRY
+
+ predictor_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.PREDICTOR_NAME
+ return DENSEPOSE_PREDICTOR_REGISTRY.get(predictor_name)(cfg, input_channels)
+
+
+def build_densepose_data_filter(cfg: CfgNode):
+ """
+ Build DensePose data filter which selects data for training
+
+ Args:
+ cfg (CfgNode): configuration options
+
+ Return:
+ Callable: list(Tensor), list(Instances) -> list(Tensor), list(Instances)
+ An instance of DensePose filter, which takes feature tensors and proposals
+ as an input and returns filtered features and proposals
+ """
+ dp_filter = DensePoseDataFilter(cfg)
+ return dp_filter
+
+
+def build_densepose_head(cfg: CfgNode, input_channels: int):
+ """
+ Build DensePose head based on configurations options
+
+ Args:
+ cfg (CfgNode): configuration options
+ input_channels (int): input tensor size along the channel dimension
+ Return:
+ An instance of DensePose head
+ """
+ from .roi_heads.registry import ROI_DENSEPOSE_HEAD_REGISTRY
+
+ head_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.NAME
+ return ROI_DENSEPOSE_HEAD_REGISTRY.get(head_name)(cfg, input_channels)
+
+
+def build_densepose_losses(cfg: CfgNode):
+ """
+ Build DensePose loss based on configurations options
+
+ Args:
+ cfg (CfgNode): configuration options
+ Return:
+ An instance of DensePose loss
+ """
+ from .losses import DENSEPOSE_LOSS_REGISTRY
+
+ loss_name = cfg.MODEL.ROI_DENSEPOSE_HEAD.LOSS_NAME
+ return DENSEPOSE_LOSS_REGISTRY.get(loss_name)(cfg)
+
+
+def build_densepose_embedder(cfg: CfgNode) -> Optional[nn.Module]:
+ """
+ Build embedder used to embed mesh vertices into an embedding space.
+ Embedder contains sub-embedders, one for each mesh ID.
+
+ Args:
+ cfg (cfgNode): configuration options
+ Return:
+ Embedding module
+ """
+ if cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS:
+ return Embedder(cfg)
+ return None
diff --git a/densepose/modeling/confidence.py b/densepose/modeling/confidence.py
new file mode 100644
index 0000000000000000000000000000000000000000..6f4a72efec06e055036ba70bc75b2624d20e1e0e
--- /dev/null
+++ b/densepose/modeling/confidence.py
@@ -0,0 +1,73 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from dataclasses import dataclass
+from enum import Enum
+
+from detectron2.config import CfgNode
+
+
+class DensePoseUVConfidenceType(Enum):
+ """
+ Statistical model type for confidence learning, possible values:
+ - "iid_iso": statistically independent identically distributed residuals
+ with anisotropic covariance
+ - "indep_aniso": statistically independent residuals with anisotropic
+ covariances
+ For details, see:
+ N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
+ Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
+ """
+
+ # fmt: off
+ IID_ISO = "iid_iso"
+ INDEP_ANISO = "indep_aniso"
+ # fmt: on
+
+
+@dataclass
+class DensePoseUVConfidenceConfig:
+ """
+ Configuration options for confidence on UV data
+ """
+
+ enabled: bool = False
+ # lower bound on UV confidences
+ epsilon: float = 0.01
+ type: DensePoseUVConfidenceType = DensePoseUVConfidenceType.IID_ISO
+
+
+@dataclass
+class DensePoseSegmConfidenceConfig:
+ """
+ Configuration options for confidence on segmentation
+ """
+
+ enabled: bool = False
+ # lower bound on confidence values
+ epsilon: float = 0.01
+
+
+@dataclass
+class DensePoseConfidenceModelConfig:
+ """
+ Configuration options for confidence models
+ """
+
+ # confidence for U and V values
+ uv_confidence: DensePoseUVConfidenceConfig
+ # segmentation confidence
+ segm_confidence: DensePoseSegmConfidenceConfig
+
+ @staticmethod
+ def from_cfg(cfg: CfgNode) -> "DensePoseConfidenceModelConfig":
+ return DensePoseConfidenceModelConfig(
+ uv_confidence=DensePoseUVConfidenceConfig(
+ enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.ENABLED,
+ epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.EPSILON,
+ type=DensePoseUVConfidenceType(cfg.MODEL.ROI_DENSEPOSE_HEAD.UV_CONFIDENCE.TYPE),
+ ),
+ segm_confidence=DensePoseSegmConfidenceConfig(
+ enabled=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.ENABLED,
+ epsilon=cfg.MODEL.ROI_DENSEPOSE_HEAD.SEGM_CONFIDENCE.EPSILON,
+ ),
+ )
diff --git a/densepose/modeling/cse/__init__.py b/densepose/modeling/cse/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a2273609cc54fb96d002a49dcd58788060945059
--- /dev/null
+++ b/densepose/modeling/cse/__init__.py
@@ -0,0 +1,5 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from .vertex_direct_embedder import VertexDirectEmbedder
+from .vertex_feature_embedder import VertexFeatureEmbedder
+from .embedder import Embedder
diff --git a/densepose/modeling/cse/embedder.py b/densepose/modeling/cse/embedder.py
new file mode 100644
index 0000000000000000000000000000000000000000..56f5cb9860b13aa38b2069e6b25c3f5f71ab1ecc
--- /dev/null
+++ b/densepose/modeling/cse/embedder.py
@@ -0,0 +1,128 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import logging
+import numpy as np
+import pickle
+from enum import Enum
+from typing import Optional
+import torch
+from torch import nn
+
+from detectron2.config import CfgNode
+from detectron2.utils.file_io import PathManager
+
+from .vertex_direct_embedder import VertexDirectEmbedder
+from .vertex_feature_embedder import VertexFeatureEmbedder
+
+
+class EmbedderType(Enum):
+ """
+ Embedder type which defines how vertices are mapped into the embedding space:
+ - "vertex_direct": direct vertex embedding
+ - "vertex_feature": embedding vertex features
+ """
+
+ VERTEX_DIRECT = "vertex_direct"
+ VERTEX_FEATURE = "vertex_feature"
+
+
+def create_embedder(embedder_spec: CfgNode, embedder_dim: int) -> nn.Module:
+ """
+ Create an embedder based on the provided configuration
+
+ Args:
+ embedder_spec (CfgNode): embedder configuration
+ embedder_dim (int): embedding space dimensionality
+ Return:
+ An embedder instance for the specified configuration
+ Raises ValueError, in case of unexpected embedder type
+ """
+ embedder_type = EmbedderType(embedder_spec.TYPE)
+ if embedder_type == EmbedderType.VERTEX_DIRECT:
+ embedder = VertexDirectEmbedder(
+ num_vertices=embedder_spec.NUM_VERTICES,
+ embed_dim=embedder_dim,
+ )
+ if embedder_spec.INIT_FILE != "":
+ embedder.load(embedder_spec.INIT_FILE)
+ elif embedder_type == EmbedderType.VERTEX_FEATURE:
+ embedder = VertexFeatureEmbedder(
+ num_vertices=embedder_spec.NUM_VERTICES,
+ feature_dim=embedder_spec.FEATURE_DIM,
+ embed_dim=embedder_dim,
+ train_features=embedder_spec.FEATURES_TRAINABLE,
+ )
+ if embedder_spec.INIT_FILE != "":
+ embedder.load(embedder_spec.INIT_FILE)
+ else:
+ raise ValueError(f"Unexpected embedder type {embedder_type}")
+
+ if not embedder_spec.IS_TRAINABLE:
+ embedder.requires_grad_(False)
+
+ return embedder
+
+
+class Embedder(nn.Module):
+ """
+ Embedder module that serves as a container for embedders to use with different
+ meshes. Extends Module to automatically save / load state dict.
+ """
+
+ DEFAULT_MODEL_CHECKPOINT_PREFIX = "roi_heads.embedder."
+
+ def __init__(self, cfg: CfgNode):
+ """
+ Initialize mesh embedders. An embedder for mesh `i` is stored in a submodule
+ "embedder_{i}".
+
+ Args:
+ cfg (CfgNode): configuration options
+ """
+ super(Embedder, self).__init__()
+ self.mesh_names = set()
+ embedder_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
+ logger = logging.getLogger(__name__)
+ for mesh_name, embedder_spec in cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.items():
+ logger.info(f"Adding embedder embedder_{mesh_name} with spec {embedder_spec}")
+ self.add_module(f"embedder_{mesh_name}", create_embedder(embedder_spec, embedder_dim))
+ self.mesh_names.add(mesh_name)
+ if cfg.MODEL.WEIGHTS != "":
+ self.load_from_model_checkpoint(cfg.MODEL.WEIGHTS)
+
+ def load_from_model_checkpoint(self, fpath: str, prefix: Optional[str] = None):
+ if prefix is None:
+ prefix = Embedder.DEFAULT_MODEL_CHECKPOINT_PREFIX
+ state_dict = None
+ if fpath.endswith(".pkl"):
+ with PathManager.open(fpath, "rb") as hFile:
+ state_dict = pickle.load(hFile, encoding="latin1")
+ else:
+ with PathManager.open(fpath, "rb") as hFile:
+ state_dict = torch.load(hFile, map_location=torch.device("cpu"))
+ if state_dict is not None and "model" in state_dict:
+ state_dict_local = {}
+ for key in state_dict["model"]:
+ if key.startswith(prefix):
+ v_key = state_dict["model"][key]
+ if isinstance(v_key, np.ndarray):
+ v_key = torch.from_numpy(v_key)
+ state_dict_local[key[len(prefix) :]] = v_key
+ # non-strict loading to finetune on different meshes
+ self.load_state_dict(state_dict_local, strict=False)
+
+ def forward(self, mesh_name: str) -> torch.Tensor:
+ """
+ Produce vertex embeddings for the specific mesh; vertex embeddings are
+ a tensor of shape [N, D] where:
+ N = number of vertices
+ D = number of dimensions in the embedding space
+ Args:
+ mesh_name (str): name of a mesh for which to obtain vertex embeddings
+ Return:
+ Vertex embeddings, a tensor of shape [N, D]
+ """
+ return getattr(self, f"embedder_{mesh_name}")()
+
+ def has_embeddings(self, mesh_name: str) -> bool:
+ return hasattr(self, f"embedder_{mesh_name}")
diff --git a/densepose/modeling/cse/utils.py b/densepose/modeling/cse/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..6e70d25df7c8e2c1c408866cf7a6f0156b64114a
--- /dev/null
+++ b/densepose/modeling/cse/utils.py
@@ -0,0 +1,81 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import torch
+from torch.nn import functional as F
+
+
+def squared_euclidean_distance_matrix(pts1: torch.Tensor, pts2: torch.Tensor) -> torch.Tensor:
+ """
+ Get squared Euclidean Distance Matrix
+ Computes pairwise squared Euclidean distances between points
+
+ Args:
+ pts1: Tensor [M x D], M is the number of points, D is feature dimensionality
+ pts2: Tensor [N x D], N is the number of points, D is feature dimensionality
+
+ Return:
+ Tensor [M, N]: matrix of squared Euclidean distances; at index (m, n)
+ it contains || pts1[m] - pts2[n] ||^2
+ """
+ edm = torch.mm(-2 * pts1, pts2.t())
+ edm += (pts1 * pts1).sum(1, keepdim=True) + (pts2 * pts2).sum(1, keepdim=True).t()
+ return edm.contiguous()
+
+
+def normalize_embeddings(embeddings: torch.Tensor, epsilon: float = 1e-6) -> torch.Tensor:
+ """
+ Normalize N D-dimensional embedding vectors arranged in a tensor [N, D]
+
+ Args:
+ embeddings (tensor [N, D]): N D-dimensional embedding vectors
+ epsilon (float): minimum value for a vector norm
+ Return:
+ Normalized embeddings (tensor [N, D]), such that L2 vector norms are all equal to 1.
+ """
+ return embeddings / torch.clamp(embeddings.norm(p=None, dim=1, keepdim=True), min=epsilon)
+
+
+def get_closest_vertices_mask_from_ES(
+ E: torch.Tensor,
+ S: torch.Tensor,
+ h: int,
+ w: int,
+ mesh_vertex_embeddings: torch.Tensor,
+ device: torch.device,
+):
+ """
+ Interpolate Embeddings and Segmentations to the size of a given bounding box,
+ and compute closest vertices and the segmentation mask
+
+ Args:
+ E (tensor [1, D, H, W]): D-dimensional embedding vectors for every point of the
+ default-sized box
+ S (tensor [1, 2, H, W]): 2-dimensional segmentation mask for every point of the
+ default-sized box
+ h (int): height of the target bounding box
+ w (int): width of the target bounding box
+ mesh_vertex_embeddings (tensor [N, D]): vertex embeddings for a chosen mesh
+ N is the number of vertices in the mesh, D is feature dimensionality
+ device (torch.device): device to move the tensors to
+ Return:
+ Closest Vertices (tensor [h, w]), int, for every point of the resulting box
+ Segmentation mask (tensor [h, w]), boolean, for every point of the resulting box
+ """
+ embedding_resized = F.interpolate(E, size=(h, w), mode="bilinear")[0].to(device)
+ coarse_segm_resized = F.interpolate(S, size=(h, w), mode="bilinear")[0].to(device)
+ mask = coarse_segm_resized.argmax(0) > 0
+ closest_vertices = torch.zeros(mask.shape, dtype=torch.long, device=device)
+ all_embeddings = embedding_resized[:, mask].t()
+ size_chunk = 10_000 # Chunking to avoid possible OOM
+ edm = []
+ if len(all_embeddings) == 0:
+ return closest_vertices, mask
+ for chunk in range((len(all_embeddings) - 1) // size_chunk + 1):
+ chunk_embeddings = all_embeddings[size_chunk * chunk : size_chunk * (chunk + 1)]
+ edm.append(
+ torch.argmin(
+ squared_euclidean_distance_matrix(chunk_embeddings, mesh_vertex_embeddings), dim=1
+ )
+ )
+ closest_vertices[mask] = torch.cat(edm)
+ return closest_vertices, mask
diff --git a/densepose/modeling/cse/vertex_direct_embedder.py b/densepose/modeling/cse/vertex_direct_embedder.py
new file mode 100644
index 0000000000000000000000000000000000000000..9d802adf10c18beaedb3bd56963366662ba753f7
--- /dev/null
+++ b/densepose/modeling/cse/vertex_direct_embedder.py
@@ -0,0 +1,64 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import pickle
+import torch
+from torch import nn
+
+from detectron2.utils.file_io import PathManager
+
+from .utils import normalize_embeddings
+
+
+class VertexDirectEmbedder(nn.Module):
+ """
+ Class responsible for embedding vertices. Vertex embeddings take
+ the form of a tensor of size [N, D], where
+ N = number of vertices
+ D = number of dimensions in the embedding space
+ """
+
+ def __init__(self, num_vertices: int, embed_dim: int):
+ """
+ Initialize embedder, set random embeddings
+
+ Args:
+ num_vertices (int): number of vertices to embed
+ embed_dim (int): number of dimensions in the embedding space
+ """
+ super(VertexDirectEmbedder, self).__init__()
+ self.embeddings = nn.Parameter(torch.Tensor(num_vertices, embed_dim))
+ self.reset_parameters()
+
+ @torch.no_grad()
+ def reset_parameters(self):
+ """
+ Reset embeddings to random values
+ """
+ self.embeddings.zero_()
+
+ def forward(self) -> torch.Tensor:
+ """
+ Produce vertex embeddings, a tensor of shape [N, D] where:
+ N = number of vertices
+ D = number of dimensions in the embedding space
+
+ Return:
+ Full vertex embeddings, a tensor of shape [N, D]
+ """
+ return normalize_embeddings(self.embeddings)
+
+ @torch.no_grad()
+ def load(self, fpath: str):
+ """
+ Load data from a file
+
+ Args:
+ fpath (str): file path to load data from
+ """
+ with PathManager.open(fpath, "rb") as hFile:
+ data = pickle.load(hFile)
+ for name in ["embeddings"]:
+ if name in data:
+ getattr(self, name).copy_(
+ torch.tensor(data[name]).float().to(device=getattr(self, name).device)
+ )
diff --git a/densepose/modeling/cse/vertex_feature_embedder.py b/densepose/modeling/cse/vertex_feature_embedder.py
new file mode 100644
index 0000000000000000000000000000000000000000..e9c7709c1eaec8f3e39441aadbc2b749c67874f2
--- /dev/null
+++ b/densepose/modeling/cse/vertex_feature_embedder.py
@@ -0,0 +1,75 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import pickle
+import torch
+from torch import nn
+
+from detectron2.utils.file_io import PathManager
+
+from .utils import normalize_embeddings
+
+
+class VertexFeatureEmbedder(nn.Module):
+ """
+ Class responsible for embedding vertex features. Mapping from
+ feature space to the embedding space is a tensor of size [K, D], where
+ K = number of dimensions in the feature space
+ D = number of dimensions in the embedding space
+ Vertex features is a tensor of size [N, K], where
+ N = number of vertices
+ K = number of dimensions in the feature space
+ Vertex embeddings are computed as F * E = tensor of size [N, D]
+ """
+
+ def __init__(
+ self, num_vertices: int, feature_dim: int, embed_dim: int, train_features: bool = False
+ ):
+ """
+ Initialize embedder, set random embeddings
+
+ Args:
+ num_vertices (int): number of vertices to embed
+ feature_dim (int): number of dimensions in the feature space
+ embed_dim (int): number of dimensions in the embedding space
+ train_features (bool): determines whether vertex features should
+ be trained (default: False)
+ """
+ super(VertexFeatureEmbedder, self).__init__()
+ if train_features:
+ self.features = nn.Parameter(torch.Tensor(num_vertices, feature_dim))
+ else:
+ self.register_buffer("features", torch.Tensor(num_vertices, feature_dim))
+ self.embeddings = nn.Parameter(torch.Tensor(feature_dim, embed_dim))
+ self.reset_parameters()
+
+ @torch.no_grad()
+ def reset_parameters(self):
+ self.features.zero_()
+ self.embeddings.zero_()
+
+ def forward(self) -> torch.Tensor:
+ """
+ Produce vertex embeddings, a tensor of shape [N, D] where:
+ N = number of vertices
+ D = number of dimensions in the embedding space
+
+ Return:
+ Full vertex embeddings, a tensor of shape [N, D]
+ """
+ return normalize_embeddings(torch.mm(self.features, self.embeddings))
+
+ @torch.no_grad()
+ def load(self, fpath: str):
+ """
+ Load data from a file
+
+ Args:
+ fpath (str): file path to load data from
+ """
+ with PathManager.open(fpath, "rb") as hFile:
+ data = pickle.load(hFile)
+ for name in ["features", "embeddings"]:
+ if name in data:
+ getattr(self, name).copy_(
+ torch.tensor(data[name]).float().to(device=getattr(self, name).device)
+ )
diff --git a/densepose/modeling/densepose_checkpoint.py b/densepose/modeling/densepose_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..8c2b4f2e2cc9c6c798cf1bdb9c38dedc84058bd5
--- /dev/null
+++ b/densepose/modeling/densepose_checkpoint.py
@@ -0,0 +1,35 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from collections import OrderedDict
+
+from detectron2.checkpoint import DetectionCheckpointer
+
+
+def _rename_HRNet_weights(weights):
+ # We detect and rename HRNet weights for DensePose. 1956 and 1716 are values that are
+ # common to all HRNet pretrained weights, and should be enough to accurately identify them
+ if (
+ len(weights["model"].keys()) == 1956
+ and len([k for k in weights["model"].keys() if k.startswith("stage")]) == 1716
+ ):
+ hrnet_weights = OrderedDict()
+ for k in weights["model"].keys():
+ hrnet_weights["backbone.bottom_up." + str(k)] = weights["model"][k]
+ return {"model": hrnet_weights}
+ else:
+ return weights
+
+
+class DensePoseCheckpointer(DetectionCheckpointer):
+ """
+ Same as :class:`DetectionCheckpointer`, but is able to handle HRNet weights
+ """
+
+ def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
+ super().__init__(model, save_dir, save_to_disk=save_to_disk, **checkpointables)
+
+ def _load_file(self, filename: str) -> object:
+ """
+ Adding hrnet support
+ """
+ weights = super()._load_file(filename)
+ return _rename_HRNet_weights(weights)
diff --git a/densepose/modeling/filter.py b/densepose/modeling/filter.py
new file mode 100644
index 0000000000000000000000000000000000000000..4682b225dbba1ce330c8f4ed6ad14dafcc935e5c
--- /dev/null
+++ b/densepose/modeling/filter.py
@@ -0,0 +1,94 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import List
+import torch
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+from detectron2.structures.boxes import matched_pairwise_iou
+
+
+class DensePoseDataFilter:
+ def __init__(self, cfg: CfgNode):
+ self.iou_threshold = cfg.MODEL.ROI_DENSEPOSE_HEAD.FG_IOU_THRESHOLD
+ self.keep_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
+
+ @torch.no_grad()
+ def __call__(self, features: List[torch.Tensor], proposals_with_targets: List[Instances]):
+ """
+ Filters proposals with targets to keep only the ones relevant for
+ DensePose training
+
+ Args:
+ features (list[Tensor]): input data as a list of features,
+ each feature is a tensor. Axis 0 represents the number of
+ images `N` in the input data; axes 1-3 are channels,
+ height, and width, which may vary between features
+ (e.g., if a feature pyramid is used).
+ proposals_with_targets (list[Instances]): length `N` list of
+ `Instances`. The i-th `Instances` contains instances
+ (proposals, GT) for the i-th input image,
+ Returns:
+ list[Tensor]: filtered features
+ list[Instances]: filtered proposals
+ """
+ proposals_filtered = []
+ # TODO: the commented out code was supposed to correctly deal with situations
+ # where no valid DensePose GT is available for certain images. The corresponding
+ # image features were sliced and proposals were filtered. This led to performance
+ # deterioration, both in terms of runtime and in terms of evaluation results.
+ #
+ # feature_mask = torch.ones(
+ # len(proposals_with_targets),
+ # dtype=torch.bool,
+ # device=features[0].device if len(features) > 0 else torch.device("cpu"),
+ # )
+ for i, proposals_per_image in enumerate(proposals_with_targets):
+ if not proposals_per_image.has("gt_densepose") and (
+ not proposals_per_image.has("gt_masks") or not self.keep_masks
+ ):
+ # feature_mask[i] = 0
+ continue
+ gt_boxes = proposals_per_image.gt_boxes
+ est_boxes = proposals_per_image.proposal_boxes
+ # apply match threshold for densepose head
+ iou = matched_pairwise_iou(gt_boxes, est_boxes)
+ iou_select = iou > self.iou_threshold
+ proposals_per_image = proposals_per_image[iou_select] # pyre-ignore[6]
+
+ N_gt_boxes = len(proposals_per_image.gt_boxes)
+ assert N_gt_boxes == len(proposals_per_image.proposal_boxes), (
+ f"The number of GT boxes {N_gt_boxes} is different from the "
+ f"number of proposal boxes {len(proposals_per_image.proposal_boxes)}"
+ )
+ # filter out any target without suitable annotation
+ if self.keep_masks:
+ gt_masks = (
+ proposals_per_image.gt_masks
+ if hasattr(proposals_per_image, "gt_masks")
+ else [None] * N_gt_boxes
+ )
+ else:
+ gt_masks = [None] * N_gt_boxes
+ gt_densepose = (
+ proposals_per_image.gt_densepose
+ if hasattr(proposals_per_image, "gt_densepose")
+ else [None] * N_gt_boxes
+ )
+ assert len(gt_masks) == N_gt_boxes
+ assert len(gt_densepose) == N_gt_boxes
+ selected_indices = [
+ i
+ for i, (dp_target, mask_target) in enumerate(zip(gt_densepose, gt_masks))
+ if (dp_target is not None) or (mask_target is not None)
+ ]
+ # if not len(selected_indices):
+ # feature_mask[i] = 0
+ # continue
+ if len(selected_indices) != N_gt_boxes:
+ proposals_per_image = proposals_per_image[selected_indices] # pyre-ignore[6]
+ assert len(proposals_per_image.gt_boxes) == len(proposals_per_image.proposal_boxes)
+ proposals_filtered.append(proposals_per_image)
+ # features_filtered = [feature[feature_mask] for feature in features]
+ # return features_filtered, proposals_filtered
+ return features, proposals_filtered
diff --git a/densepose/modeling/hrfpn.py b/densepose/modeling/hrfpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..08ec420fa24e1e8f5074baf2e9ae737aff2ab12e
--- /dev/null
+++ b/densepose/modeling/hrfpn.py
@@ -0,0 +1,182 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+"""
+MIT License
+Copyright (c) 2019 Microsoft
+Permission is hereby granted, free of charge, to any person obtaining a copy
+of this software and associated documentation files (the "Software"), to deal
+in the Software without restriction, including without limitation the rights
+to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
+copies of the Software, and to permit persons to whom the Software is
+furnished to do so, subject to the following conditions:
+The above copyright notice and this permission notice shall be included in all
+copies or substantial portions of the Software.
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
+AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
+LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
+OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
+SOFTWARE.
+"""
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from detectron2.layers import ShapeSpec
+from detectron2.modeling.backbone import BACKBONE_REGISTRY
+from detectron2.modeling.backbone.backbone import Backbone
+
+from .hrnet import build_pose_hrnet_backbone
+
+
+class HRFPN(Backbone):
+ """HRFPN (High Resolution Feature Pyramids)
+ Transforms outputs of HRNet backbone so they are suitable for the ROI_heads
+ arXiv: https://arxiv.org/abs/1904.04514
+ Adapted from https://github.com/open-mmlab/mmdetection/blob/master/mmdet/models/necks/hrfpn.py
+ Args:
+ bottom_up: (list) output of HRNet
+ in_features (list): names of the input features (output of HRNet)
+ in_channels (list): number of channels for each branch
+ out_channels (int): output channels of feature pyramids
+ n_out_features (int): number of output stages
+ pooling (str): pooling for generating feature pyramids (from {MAX, AVG})
+ share_conv (bool): Have one conv per output, or share one with all the outputs
+ """
+
+ def __init__(
+ self,
+ bottom_up,
+ in_features,
+ n_out_features,
+ in_channels,
+ out_channels,
+ pooling="AVG",
+ share_conv=False,
+ ):
+ super(HRFPN, self).__init__()
+ assert isinstance(in_channels, list)
+ self.bottom_up = bottom_up
+ self.in_features = in_features
+ self.n_out_features = n_out_features
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.num_ins = len(in_channels)
+ self.share_conv = share_conv
+
+ if self.share_conv:
+ self.fpn_conv = nn.Conv2d(
+ in_channels=out_channels, out_channels=out_channels, kernel_size=3, padding=1
+ )
+ else:
+ self.fpn_conv = nn.ModuleList()
+ for _ in range(self.n_out_features):
+ self.fpn_conv.append(
+ nn.Conv2d(
+ in_channels=out_channels,
+ out_channels=out_channels,
+ kernel_size=3,
+ padding=1,
+ )
+ )
+
+ # Custom change: Replaces a simple bilinear interpolation
+ self.interp_conv = nn.ModuleList()
+ for i in range(len(self.in_features)):
+ self.interp_conv.append(
+ nn.Sequential(
+ nn.ConvTranspose2d(
+ in_channels=in_channels[i],
+ out_channels=in_channels[i],
+ kernel_size=4,
+ stride=2**i,
+ padding=0,
+ output_padding=0,
+ bias=False,
+ ),
+ nn.BatchNorm2d(in_channels[i], momentum=0.1),
+ nn.ReLU(inplace=True),
+ )
+ )
+
+ # Custom change: Replaces a couple (reduction conv + pooling) by one conv
+ self.reduction_pooling_conv = nn.ModuleList()
+ for i in range(self.n_out_features):
+ self.reduction_pooling_conv.append(
+ nn.Sequential(
+ nn.Conv2d(sum(in_channels), out_channels, kernel_size=2**i, stride=2**i),
+ nn.BatchNorm2d(out_channels, momentum=0.1),
+ nn.ReLU(inplace=True),
+ )
+ )
+
+ if pooling == "MAX":
+ self.pooling = F.max_pool2d
+ else:
+ self.pooling = F.avg_pool2d
+
+ self._out_features = []
+ self._out_feature_channels = {}
+ self._out_feature_strides = {}
+
+ for i in range(self.n_out_features):
+ self._out_features.append("p%d" % (i + 1))
+ self._out_feature_channels.update({self._out_features[-1]: self.out_channels})
+ self._out_feature_strides.update({self._out_features[-1]: 2 ** (i + 2)})
+
+ # default init_weights for conv(msra) and norm in ConvModule
+ def init_weights(self):
+ for m in self.modules():
+ if isinstance(m, nn.Conv2d):
+ nn.init.kaiming_normal_(m.weight, a=1)
+ nn.init.constant_(m.bias, 0)
+
+ def forward(self, inputs):
+ bottom_up_features = self.bottom_up(inputs)
+ assert len(bottom_up_features) == len(self.in_features)
+ inputs = [bottom_up_features[f] for f in self.in_features]
+
+ outs = []
+ for i in range(len(inputs)):
+ outs.append(self.interp_conv[i](inputs[i]))
+ shape_2 = min(o.shape[2] for o in outs)
+ shape_3 = min(o.shape[3] for o in outs)
+ out = torch.cat([o[:, :, :shape_2, :shape_3] for o in outs], dim=1)
+ outs = []
+ for i in range(self.n_out_features):
+ outs.append(self.reduction_pooling_conv[i](out))
+ for i in range(len(outs)): # Make shapes consistent
+ outs[-1 - i] = outs[-1 - i][
+ :, :, : outs[-1].shape[2] * 2**i, : outs[-1].shape[3] * 2**i
+ ]
+ outputs = []
+ for i in range(len(outs)):
+ if self.share_conv:
+ outputs.append(self.fpn_conv(outs[i]))
+ else:
+ outputs.append(self.fpn_conv[i](outs[i]))
+
+ assert len(self._out_features) == len(outputs)
+ return dict(zip(self._out_features, outputs))
+
+
+@BACKBONE_REGISTRY.register()
+def build_hrfpn_backbone(cfg, input_shape: ShapeSpec) -> HRFPN:
+
+ in_channels = cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS
+ in_features = ["p%d" % (i + 1) for i in range(cfg.MODEL.HRNET.STAGE4.NUM_BRANCHES)]
+ n_out_features = len(cfg.MODEL.ROI_HEADS.IN_FEATURES)
+ out_channels = cfg.MODEL.HRNET.HRFPN.OUT_CHANNELS
+ hrnet = build_pose_hrnet_backbone(cfg, input_shape)
+ hrfpn = HRFPN(
+ hrnet,
+ in_features,
+ n_out_features,
+ in_channels,
+ out_channels,
+ pooling="AVG",
+ share_conv=False,
+ )
+
+ return hrfpn
diff --git a/densepose/modeling/hrnet.py b/densepose/modeling/hrnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca2467107e8e5a50167de38ef6827fac646d1245
--- /dev/null
+++ b/densepose/modeling/hrnet.py
@@ -0,0 +1,474 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# ------------------------------------------------------------------------------
+# Copyright (c) Microsoft
+# Licensed under the MIT License.
+# Written by Bin Xiao (leoxiaobin@gmail.com)
+# Modified by Bowen Cheng (bcheng9@illinois.edu)
+# Adapted from https://github.com/HRNet/Higher-HRNet-Human-Pose-Estimation/blob/master/lib/models/pose_higher_hrnet.py # noqa
+# ------------------------------------------------------------------------------
+
+from __future__ import absolute_import, division, print_function
+import logging
+import torch.nn as nn
+
+from detectron2.layers import ShapeSpec
+from detectron2.modeling.backbone import BACKBONE_REGISTRY
+from detectron2.modeling.backbone.backbone import Backbone
+
+BN_MOMENTUM = 0.1
+logger = logging.getLogger(__name__)
+
+__all__ = ["build_pose_hrnet_backbone", "PoseHigherResolutionNet"]
+
+
+def conv3x3(in_planes, out_planes, stride=1):
+ """3x3 convolution with padding"""
+ return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False)
+
+
+class BasicBlock(nn.Module):
+ expansion = 1
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(BasicBlock, self).__init__()
+ self.conv1 = conv3x3(inplanes, planes, stride)
+ self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=True)
+ self.conv2 = conv3x3(planes, planes)
+ self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class Bottleneck(nn.Module):
+ expansion = 4
+
+ def __init__(self, inplanes, planes, stride=1, downsample=None):
+ super(Bottleneck, self).__init__()
+ self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(planes, momentum=BN_MOMENTUM)
+ self.conv3 = nn.Conv2d(planes, planes * self.expansion, kernel_size=1, bias=False)
+ self.bn3 = nn.BatchNorm2d(planes * self.expansion, momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=True)
+ self.downsample = downsample
+ self.stride = stride
+
+ def forward(self, x):
+ residual = x
+
+ out = self.conv1(x)
+ out = self.bn1(out)
+ out = self.relu(out)
+
+ out = self.conv2(out)
+ out = self.bn2(out)
+ out = self.relu(out)
+
+ out = self.conv3(out)
+ out = self.bn3(out)
+
+ if self.downsample is not None:
+ residual = self.downsample(x)
+
+ out += residual
+ out = self.relu(out)
+
+ return out
+
+
+class HighResolutionModule(nn.Module):
+ """HighResolutionModule
+ Building block of the PoseHigherResolutionNet (see lower)
+ arXiv: https://arxiv.org/abs/1908.10357
+ Args:
+ num_branches (int): number of branches of the modyle
+ blocks (str): type of block of the module
+ num_blocks (int): number of blocks of the module
+ num_inchannels (int): number of input channels of the module
+ num_channels (list): number of channels of each branch
+ multi_scale_output (bool): only used by the last module of PoseHigherResolutionNet
+ """
+
+ def __init__(
+ self,
+ num_branches,
+ blocks,
+ num_blocks,
+ num_inchannels,
+ num_channels,
+ multi_scale_output=True,
+ ):
+ super(HighResolutionModule, self).__init__()
+ self._check_branches(num_branches, blocks, num_blocks, num_inchannels, num_channels)
+
+ self.num_inchannels = num_inchannels
+ self.num_branches = num_branches
+
+ self.multi_scale_output = multi_scale_output
+
+ self.branches = self._make_branches(num_branches, blocks, num_blocks, num_channels)
+ self.fuse_layers = self._make_fuse_layers()
+ self.relu = nn.ReLU(True)
+
+ def _check_branches(self, num_branches, blocks, num_blocks, num_inchannels, num_channels):
+ if num_branches != len(num_blocks):
+ error_msg = "NUM_BRANCHES({}) <> NUM_BLOCKS({})".format(num_branches, len(num_blocks))
+ logger.error(error_msg)
+ raise ValueError(error_msg)
+
+ if num_branches != len(num_channels):
+ error_msg = "NUM_BRANCHES({}) <> NUM_CHANNELS({})".format(
+ num_branches, len(num_channels)
+ )
+ logger.error(error_msg)
+ raise ValueError(error_msg)
+
+ if num_branches != len(num_inchannels):
+ error_msg = "NUM_BRANCHES({}) <> NUM_INCHANNELS({})".format(
+ num_branches, len(num_inchannels)
+ )
+ logger.error(error_msg)
+ raise ValueError(error_msg)
+
+ def _make_one_branch(self, branch_index, block, num_blocks, num_channels, stride=1):
+ downsample = None
+ if (
+ stride != 1
+ or self.num_inchannels[branch_index] != num_channels[branch_index] * block.expansion
+ ):
+ downsample = nn.Sequential(
+ nn.Conv2d(
+ self.num_inchannels[branch_index],
+ num_channels[branch_index] * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False,
+ ),
+ nn.BatchNorm2d(num_channels[branch_index] * block.expansion, momentum=BN_MOMENTUM),
+ )
+
+ layers = []
+ layers.append(
+ block(self.num_inchannels[branch_index], num_channels[branch_index], stride, downsample)
+ )
+ self.num_inchannels[branch_index] = num_channels[branch_index] * block.expansion
+ for _ in range(1, num_blocks[branch_index]):
+ layers.append(block(self.num_inchannels[branch_index], num_channels[branch_index]))
+
+ return nn.Sequential(*layers)
+
+ def _make_branches(self, num_branches, block, num_blocks, num_channels):
+ branches = []
+
+ for i in range(num_branches):
+ branches.append(self._make_one_branch(i, block, num_blocks, num_channels))
+
+ return nn.ModuleList(branches)
+
+ def _make_fuse_layers(self):
+ if self.num_branches == 1:
+ return None
+
+ num_branches = self.num_branches
+ num_inchannels = self.num_inchannels
+ fuse_layers = []
+ for i in range(num_branches if self.multi_scale_output else 1):
+ fuse_layer = []
+ for j in range(num_branches):
+ if j > i:
+ fuse_layer.append(
+ nn.Sequential(
+ nn.Conv2d(num_inchannels[j], num_inchannels[i], 1, 1, 0, bias=False),
+ nn.BatchNorm2d(num_inchannels[i]),
+ nn.Upsample(scale_factor=2 ** (j - i), mode="nearest"),
+ )
+ )
+ elif j == i:
+ fuse_layer.append(None)
+ else:
+ conv3x3s = []
+ for k in range(i - j):
+ if k == i - j - 1:
+ num_outchannels_conv3x3 = num_inchannels[i]
+ conv3x3s.append(
+ nn.Sequential(
+ nn.Conv2d(
+ num_inchannels[j],
+ num_outchannels_conv3x3,
+ 3,
+ 2,
+ 1,
+ bias=False,
+ ),
+ nn.BatchNorm2d(num_outchannels_conv3x3),
+ )
+ )
+ else:
+ num_outchannels_conv3x3 = num_inchannels[j]
+ conv3x3s.append(
+ nn.Sequential(
+ nn.Conv2d(
+ num_inchannels[j],
+ num_outchannels_conv3x3,
+ 3,
+ 2,
+ 1,
+ bias=False,
+ ),
+ nn.BatchNorm2d(num_outchannels_conv3x3),
+ nn.ReLU(True),
+ )
+ )
+ fuse_layer.append(nn.Sequential(*conv3x3s))
+ fuse_layers.append(nn.ModuleList(fuse_layer))
+
+ return nn.ModuleList(fuse_layers)
+
+ def get_num_inchannels(self):
+ return self.num_inchannels
+
+ def forward(self, x):
+ if self.num_branches == 1:
+ return [self.branches[0](x[0])]
+
+ for i in range(self.num_branches):
+ x[i] = self.branches[i](x[i])
+
+ x_fuse = []
+
+ for i in range(len(self.fuse_layers)):
+ y = x[0] if i == 0 else self.fuse_layers[i][0](x[0])
+ for j in range(1, self.num_branches):
+ if i == j:
+ y = y + x[j]
+ else:
+ z = self.fuse_layers[i][j](x[j])[:, :, : y.shape[2], : y.shape[3]]
+ y = y + z
+ x_fuse.append(self.relu(y))
+
+ return x_fuse
+
+
+blocks_dict = {"BASIC": BasicBlock, "BOTTLENECK": Bottleneck}
+
+
+class PoseHigherResolutionNet(Backbone):
+ """PoseHigherResolutionNet
+ Composed of several HighResolutionModule tied together with ConvNets
+ Adapted from the GitHub version to fit with HRFPN and the Detectron2 infrastructure
+ arXiv: https://arxiv.org/abs/1908.10357
+ """
+
+ def __init__(self, cfg, **kwargs):
+ self.inplanes = cfg.MODEL.HRNET.STEM_INPLANES
+ super(PoseHigherResolutionNet, self).__init__()
+
+ # stem net
+ self.conv1 = nn.Conv2d(3, 64, kernel_size=3, stride=2, padding=1, bias=False)
+ self.bn1 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
+ self.conv2 = nn.Conv2d(64, 64, kernel_size=3, stride=2, padding=1, bias=False)
+ self.bn2 = nn.BatchNorm2d(64, momentum=BN_MOMENTUM)
+ self.relu = nn.ReLU(inplace=True)
+ self.layer1 = self._make_layer(Bottleneck, 64, 4)
+
+ self.stage2_cfg = cfg.MODEL.HRNET.STAGE2
+ num_channels = self.stage2_cfg.NUM_CHANNELS
+ block = blocks_dict[self.stage2_cfg.BLOCK]
+ num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
+ self.transition1 = self._make_transition_layer([256], num_channels)
+ self.stage2, pre_stage_channels = self._make_stage(self.stage2_cfg, num_channels)
+
+ self.stage3_cfg = cfg.MODEL.HRNET.STAGE3
+ num_channels = self.stage3_cfg.NUM_CHANNELS
+ block = blocks_dict[self.stage3_cfg.BLOCK]
+ num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
+ self.transition2 = self._make_transition_layer(pre_stage_channels, num_channels)
+ self.stage3, pre_stage_channels = self._make_stage(self.stage3_cfg, num_channels)
+
+ self.stage4_cfg = cfg.MODEL.HRNET.STAGE4
+ num_channels = self.stage4_cfg.NUM_CHANNELS
+ block = blocks_dict[self.stage4_cfg.BLOCK]
+ num_channels = [num_channels[i] * block.expansion for i in range(len(num_channels))]
+ self.transition3 = self._make_transition_layer(pre_stage_channels, num_channels)
+ self.stage4, pre_stage_channels = self._make_stage(
+ self.stage4_cfg, num_channels, multi_scale_output=True
+ )
+
+ self._out_features = []
+ self._out_feature_channels = {}
+ self._out_feature_strides = {}
+
+ for i in range(cfg.MODEL.HRNET.STAGE4.NUM_BRANCHES):
+ self._out_features.append("p%d" % (i + 1))
+ self._out_feature_channels.update(
+ {self._out_features[-1]: cfg.MODEL.HRNET.STAGE4.NUM_CHANNELS[i]}
+ )
+ self._out_feature_strides.update({self._out_features[-1]: 1})
+
+ def _get_deconv_cfg(self, deconv_kernel):
+ if deconv_kernel == 4:
+ padding = 1
+ output_padding = 0
+ elif deconv_kernel == 3:
+ padding = 1
+ output_padding = 1
+ elif deconv_kernel == 2:
+ padding = 0
+ output_padding = 0
+
+ return deconv_kernel, padding, output_padding
+
+ def _make_transition_layer(self, num_channels_pre_layer, num_channels_cur_layer):
+ num_branches_cur = len(num_channels_cur_layer)
+ num_branches_pre = len(num_channels_pre_layer)
+
+ transition_layers = []
+ for i in range(num_branches_cur):
+ if i < num_branches_pre:
+ if num_channels_cur_layer[i] != num_channels_pre_layer[i]:
+ transition_layers.append(
+ nn.Sequential(
+ nn.Conv2d(
+ num_channels_pre_layer[i],
+ num_channels_cur_layer[i],
+ 3,
+ 1,
+ 1,
+ bias=False,
+ ),
+ nn.BatchNorm2d(num_channels_cur_layer[i]),
+ nn.ReLU(inplace=True),
+ )
+ )
+ else:
+ transition_layers.append(None)
+ else:
+ conv3x3s = []
+ for j in range(i + 1 - num_branches_pre):
+ inchannels = num_channels_pre_layer[-1]
+ outchannels = (
+ num_channels_cur_layer[i] if j == i - num_branches_pre else inchannels
+ )
+ conv3x3s.append(
+ nn.Sequential(
+ nn.Conv2d(inchannels, outchannels, 3, 2, 1, bias=False),
+ nn.BatchNorm2d(outchannels),
+ nn.ReLU(inplace=True),
+ )
+ )
+ transition_layers.append(nn.Sequential(*conv3x3s))
+
+ return nn.ModuleList(transition_layers)
+
+ def _make_layer(self, block, planes, blocks, stride=1):
+ downsample = None
+ if stride != 1 or self.inplanes != planes * block.expansion:
+ downsample = nn.Sequential(
+ nn.Conv2d(
+ self.inplanes,
+ planes * block.expansion,
+ kernel_size=1,
+ stride=stride,
+ bias=False,
+ ),
+ nn.BatchNorm2d(planes * block.expansion, momentum=BN_MOMENTUM),
+ )
+
+ layers = []
+ layers.append(block(self.inplanes, planes, stride, downsample))
+ self.inplanes = planes * block.expansion
+ for _ in range(1, blocks):
+ layers.append(block(self.inplanes, planes))
+
+ return nn.Sequential(*layers)
+
+ def _make_stage(self, layer_config, num_inchannels, multi_scale_output=True):
+ num_modules = layer_config["NUM_MODULES"]
+ num_branches = layer_config["NUM_BRANCHES"]
+ num_blocks = layer_config["NUM_BLOCKS"]
+ num_channels = layer_config["NUM_CHANNELS"]
+ block = blocks_dict[layer_config["BLOCK"]]
+
+ modules = []
+ for i in range(num_modules):
+ # multi_scale_output is only used last module
+ if not multi_scale_output and i == num_modules - 1:
+ reset_multi_scale_output = False
+ else:
+ reset_multi_scale_output = True
+
+ modules.append(
+ HighResolutionModule(
+ num_branches,
+ block,
+ num_blocks,
+ num_inchannels,
+ num_channels,
+ reset_multi_scale_output,
+ )
+ )
+ num_inchannels = modules[-1].get_num_inchannels()
+
+ return nn.Sequential(*modules), num_inchannels
+
+ def forward(self, x):
+ x = self.conv1(x)
+ x = self.bn1(x)
+ x = self.relu(x)
+ x = self.conv2(x)
+ x = self.bn2(x)
+ x = self.relu(x)
+ x = self.layer1(x)
+
+ x_list = []
+ for i in range(self.stage2_cfg.NUM_BRANCHES):
+ if self.transition1[i] is not None:
+ x_list.append(self.transition1[i](x))
+ else:
+ x_list.append(x)
+ y_list = self.stage2(x_list)
+
+ x_list = []
+ for i in range(self.stage3_cfg.NUM_BRANCHES):
+ if self.transition2[i] is not None:
+ x_list.append(self.transition2[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ y_list = self.stage3(x_list)
+
+ x_list = []
+ for i in range(self.stage4_cfg.NUM_BRANCHES):
+ if self.transition3[i] is not None:
+ x_list.append(self.transition3[i](y_list[-1]))
+ else:
+ x_list.append(y_list[i])
+ y_list = self.stage4(x_list)
+
+ assert len(self._out_features) == len(y_list)
+ return dict(zip(self._out_features, y_list)) # final_outputs
+
+
+@BACKBONE_REGISTRY.register()
+def build_pose_hrnet_backbone(cfg, input_shape: ShapeSpec):
+ model = PoseHigherResolutionNet(cfg)
+ return model
diff --git a/densepose/modeling/inference.py b/densepose/modeling/inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..81049649edddb23aeebeac4085514da838f1463b
--- /dev/null
+++ b/densepose/modeling/inference.py
@@ -0,0 +1,44 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from dataclasses import fields
+from typing import Any, List
+import torch
+
+from detectron2.structures import Instances
+
+
+def densepose_inference(densepose_predictor_output: Any, detections: List[Instances]) -> None:
+ """
+ Splits DensePose predictor outputs into chunks, each chunk corresponds to
+ detections on one image. Predictor output chunks are stored in `pred_densepose`
+ attribute of the corresponding `Instances` object.
+
+ Args:
+ densepose_predictor_output: a dataclass instance (can be of different types,
+ depending on predictor used for inference). Each field can be `None`
+ (if the corresponding output was not inferred) or a tensor of size
+ [N, ...], where N = N_1 + N_2 + .. + N_k is a total number of
+ detections on all images, N_1 is the number of detections on image 1,
+ N_2 is the number of detections on image 2, etc.
+ detections: a list of objects of type `Instance`, k-th object corresponds
+ to detections on k-th image.
+ """
+ k = 0
+ for detection_i in detections:
+ if densepose_predictor_output is None:
+ # don't add `pred_densepose` attribute
+ continue
+ n_i = detection_i.__len__()
+
+ PredictorOutput = type(densepose_predictor_output)
+ output_i_dict = {}
+ # we assume here that `densepose_predictor_output` is a dataclass object
+ for field in fields(densepose_predictor_output):
+ field_value = getattr(densepose_predictor_output, field.name)
+ # slice tensors
+ if isinstance(field_value, torch.Tensor):
+ output_i_dict[field.name] = field_value[k : k + n_i]
+ # leave others as is
+ else:
+ output_i_dict[field.name] = field_value
+ detection_i.pred_densepose = PredictorOutput(**output_i_dict)
+ k += n_i
diff --git a/densepose/modeling/losses/__init__.py b/densepose/modeling/losses/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5c593700e7274ea9cbaf8f4a52e8a229ef4c5a1
--- /dev/null
+++ b/densepose/modeling/losses/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .chart import DensePoseChartLoss
+from .chart_with_confidences import DensePoseChartWithConfidenceLoss
+from .cse import DensePoseCseLoss
+from .registry import DENSEPOSE_LOSS_REGISTRY
+
+
+__all__ = [
+ "DensePoseChartLoss",
+ "DensePoseChartWithConfidenceLoss",
+ "DensePoseCseLoss",
+ "DENSEPOSE_LOSS_REGISTRY",
+]
diff --git a/densepose/modeling/losses/chart.py b/densepose/modeling/losses/chart.py
new file mode 100644
index 0000000000000000000000000000000000000000..02cdae8db3a41fc197be7fcc792c7119c7a21726
--- /dev/null
+++ b/densepose/modeling/losses/chart.py
@@ -0,0 +1,291 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any, List
+import torch
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from .mask_or_segm import MaskOrSegmentationLoss
+from .registry import DENSEPOSE_LOSS_REGISTRY
+from .utils import (
+ BilinearInterpolationHelper,
+ ChartBasedAnnotationsAccumulator,
+ LossDict,
+ extract_packed_annotations_from_matches,
+)
+
+
+@DENSEPOSE_LOSS_REGISTRY.register()
+class DensePoseChartLoss:
+ """
+ DensePose loss for chart-based training. A mesh is split into charts,
+ each chart is given a label (I) and parametrized by 2 coordinates referred to
+ as U and V. Ground truth consists of a number of points annotated with
+ I, U and V values and coarse segmentation S defined for all pixels of the
+ object bounding box. In some cases (see `COARSE_SEGM_TRAINED_BY_MASKS`),
+ semantic segmentation annotations can be used as ground truth inputs as well.
+
+ Estimated values are tensors:
+ * U coordinates, tensor of shape [N, C, S, S]
+ * V coordinates, tensor of shape [N, C, S, S]
+ * fine segmentation estimates, tensor of shape [N, C, S, S] with raw unnormalized
+ scores for each fine segmentation label at each location
+ * coarse segmentation estimates, tensor of shape [N, D, S, S] with raw unnormalized
+ scores for each coarse segmentation label at each location
+ where N is the number of detections, C is the number of fine segmentation
+ labels, S is the estimate size ( = width = height) and D is the number of
+ coarse segmentation channels.
+
+ The losses are:
+ * regression (smooth L1) loss for U and V coordinates
+ * cross entropy loss for fine (I) and coarse (S) segmentations
+ Each loss has an associated weight
+ """
+
+ def __init__(self, cfg: CfgNode):
+ """
+ Initialize chart-based loss from configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ """
+ # fmt: off
+ self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
+ self.w_points = cfg.MODEL.ROI_DENSEPOSE_HEAD.POINT_REGRESSION_WEIGHTS
+ self.w_part = cfg.MODEL.ROI_DENSEPOSE_HEAD.PART_WEIGHTS
+ self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS
+ self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
+ # fmt: on
+ self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
+ self.segm_loss = MaskOrSegmentationLoss(cfg)
+
+ def __call__(
+ self, proposals_with_gt: List[Instances], densepose_predictor_outputs: Any, **kwargs
+ ) -> LossDict:
+ """
+ Produce chart-based DensePose losses
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated ground truth data
+ densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
+ with estimated values; assumed to have the following attributes:
+ * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
+ * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
+ * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ where N is the number of detections, C is the number of fine segmentation
+ labels, S is the estimate size ( = width = height) and D is the number of
+ coarse segmentation channels.
+
+ Return:
+ dict: str -> tensor: dict of losses with the following entries:
+ * `loss_densepose_U`: smooth L1 loss for U coordinate estimates
+ * `loss_densepose_V`: smooth L1 loss for V coordinate estimates
+ * `loss_densepose_I`: cross entropy for raw unnormalized scores for fine
+ segmentation estimates given ground truth labels;
+ * `loss_densepose_S`: cross entropy for raw unnormalized scores for coarse
+ segmentation estimates given ground truth labels;
+ """
+ # densepose outputs are computed for all images and all bounding boxes;
+ # i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively,
+ # the outputs will have size(0) == 3+1+2+1 == 7
+
+ if not len(proposals_with_gt):
+ return self.produce_fake_densepose_losses(densepose_predictor_outputs)
+
+ accumulator = ChartBasedAnnotationsAccumulator()
+ packed_annotations = extract_packed_annotations_from_matches(proposals_with_gt, accumulator)
+
+ # NOTE: we need to keep the same computation graph on all the GPUs to
+ # perform reduction properly. Hence even if we have no data on one
+ # of the GPUs, we still need to generate the computation graph.
+ # Add fake (zero) loss in the form Tensor.sum() * 0
+ if packed_annotations is None:
+ return self.produce_fake_densepose_losses(densepose_predictor_outputs)
+
+ h, w = densepose_predictor_outputs.u.shape[2:]
+ interpolator = BilinearInterpolationHelper.from_matches(
+ packed_annotations,
+ (h, w),
+ )
+
+ j_valid_fg = interpolator.j_valid * ( # pyre-ignore[16]
+ packed_annotations.fine_segm_labels_gt > 0
+ )
+ # pyre-fixme[6]: For 1st param expected `Tensor` but got `int`.
+ if not torch.any(j_valid_fg):
+ return self.produce_fake_densepose_losses(densepose_predictor_outputs)
+
+ losses_uv = self.produce_densepose_losses_uv(
+ proposals_with_gt,
+ densepose_predictor_outputs,
+ packed_annotations,
+ interpolator,
+ j_valid_fg, # pyre-ignore[6]
+ )
+
+ losses_segm = self.produce_densepose_losses_segm(
+ proposals_with_gt,
+ densepose_predictor_outputs,
+ packed_annotations,
+ interpolator,
+ j_valid_fg, # pyre-ignore[6]
+ )
+
+ return {**losses_uv, **losses_segm}
+
+ def produce_fake_densepose_losses(self, densepose_predictor_outputs: Any) -> LossDict:
+ """
+ Fake losses for fine segmentation and U/V coordinates. These are used when
+ no suitable ground truth data was found in a batch. The loss has a value 0
+ and is primarily used to construct the computation graph, so that
+ `DistributedDataParallel` has similar graphs on all GPUs and can perform
+ reduction properly.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have the following attributes:
+ * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
+ * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ Return:
+ dict: str -> tensor: dict of losses with the following entries:
+ * `loss_densepose_U`: has value 0
+ * `loss_densepose_V`: has value 0
+ * `loss_densepose_I`: has value 0
+ * `loss_densepose_S`: has value 0
+ """
+ losses_uv = self.produce_fake_densepose_losses_uv(densepose_predictor_outputs)
+ losses_segm = self.produce_fake_densepose_losses_segm(densepose_predictor_outputs)
+ return {**losses_uv, **losses_segm}
+
+ def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict:
+ """
+ Fake losses for U/V coordinates. These are used when no suitable ground
+ truth data was found in a batch. The loss has a value 0
+ and is primarily used to construct the computation graph, so that
+ `DistributedDataParallel` has similar graphs on all GPUs and can perform
+ reduction properly.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have the following attributes:
+ * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ Return:
+ dict: str -> tensor: dict of losses with the following entries:
+ * `loss_densepose_U`: has value 0
+ * `loss_densepose_V`: has value 0
+ """
+ return {
+ "loss_densepose_U": densepose_predictor_outputs.u.sum() * 0,
+ "loss_densepose_V": densepose_predictor_outputs.v.sum() * 0,
+ }
+
+ def produce_fake_densepose_losses_segm(self, densepose_predictor_outputs: Any) -> LossDict:
+ """
+ Fake losses for fine / coarse segmentation. These are used when
+ no suitable ground truth data was found in a batch. The loss has a value 0
+ and is primarily used to construct the computation graph, so that
+ `DistributedDataParallel` has similar graphs on all GPUs and can perform
+ reduction properly.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have the following attributes:
+ * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
+ * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
+ Return:
+ dict: str -> tensor: dict of losses with the following entries:
+ * `loss_densepose_I`: has value 0
+ * `loss_densepose_S`: has value 0, added only if `segm_trained_by_masks` is False
+ """
+ losses = {
+ "loss_densepose_I": densepose_predictor_outputs.fine_segm.sum() * 0,
+ "loss_densepose_S": self.segm_loss.fake_value(densepose_predictor_outputs),
+ }
+ return losses
+
+ def produce_densepose_losses_uv(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: Any,
+ interpolator: BilinearInterpolationHelper,
+ j_valid_fg: torch.Tensor,
+ ) -> LossDict:
+ """
+ Compute losses for U/V coordinates: smooth L1 loss between
+ estimated coordinates and the ground truth.
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated ground truth data
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have the following attributes:
+ * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ Return:
+ dict: str -> tensor: dict of losses with the following entries:
+ * `loss_densepose_U`: smooth L1 loss for U coordinate estimates
+ * `loss_densepose_V`: smooth L1 loss for V coordinate estimates
+ """
+ u_gt = packed_annotations.u_gt[j_valid_fg]
+ u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg]
+ v_gt = packed_annotations.v_gt[j_valid_fg]
+ v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg]
+ return {
+ "loss_densepose_U": F.smooth_l1_loss(u_est, u_gt, reduction="sum") * self.w_points,
+ "loss_densepose_V": F.smooth_l1_loss(v_est, v_gt, reduction="sum") * self.w_points,
+ }
+
+ def produce_densepose_losses_segm(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: Any,
+ interpolator: BilinearInterpolationHelper,
+ j_valid_fg: torch.Tensor,
+ ) -> LossDict:
+ """
+ Losses for fine / coarse segmentation: cross-entropy
+ for segmentation unnormalized scores given ground truth labels at
+ annotated points for fine segmentation and dense mask annotations
+ for coarse segmentation.
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated ground truth data
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have the following attributes:
+ * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
+ * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
+ Return:
+ dict: str -> tensor: dict of losses with the following entries:
+ * `loss_densepose_I`: cross entropy for raw unnormalized scores for fine
+ segmentation estimates given ground truth labels
+ * `loss_densepose_S`: cross entropy for raw unnormalized scores for coarse
+ segmentation estimates given ground truth labels;
+ may be included if coarse segmentation is only trained
+ using DensePose ground truth; if additional supervision through
+ instance segmentation data is performed (`segm_trained_by_masks` is True),
+ this loss is handled by `produce_mask_losses` instead
+ """
+ fine_segm_gt = packed_annotations.fine_segm_labels_gt[
+ interpolator.j_valid # pyre-ignore[16]
+ ]
+ fine_segm_est = interpolator.extract_at_points(
+ densepose_predictor_outputs.fine_segm,
+ slice_fine_segm=slice(None),
+ w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]
+ w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]
+ w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]
+ w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]
+ )[interpolator.j_valid, :]
+ return {
+ "loss_densepose_I": F.cross_entropy(fine_segm_est, fine_segm_gt.long()) * self.w_part,
+ "loss_densepose_S": self.segm_loss(
+ proposals_with_gt, densepose_predictor_outputs, packed_annotations
+ )
+ * self.w_segm,
+ }
diff --git a/densepose/modeling/losses/chart_with_confidences.py b/densepose/modeling/losses/chart_with_confidences.py
new file mode 100644
index 0000000000000000000000000000000000000000..78ce7c6cb02fa01f6319d088349ff4f422001839
--- /dev/null
+++ b/densepose/modeling/losses/chart_with_confidences.py
@@ -0,0 +1,209 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import math
+from typing import Any, List
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from .. import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
+from .chart import DensePoseChartLoss
+from .registry import DENSEPOSE_LOSS_REGISTRY
+from .utils import BilinearInterpolationHelper, LossDict
+
+
+@DENSEPOSE_LOSS_REGISTRY.register()
+class DensePoseChartWithConfidenceLoss(DensePoseChartLoss):
+ """ """
+
+ def __init__(self, cfg: CfgNode):
+ super().__init__(cfg)
+ self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
+ if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
+ self.uv_loss_with_confidences = IIDIsotropicGaussianUVLoss(
+ self.confidence_model_cfg.uv_confidence.epsilon
+ )
+ elif self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.INDEP_ANISO:
+ self.uv_loss_with_confidences = IndepAnisotropicGaussianUVLoss(
+ self.confidence_model_cfg.uv_confidence.epsilon
+ )
+
+ def produce_fake_densepose_losses_uv(self, densepose_predictor_outputs: Any) -> LossDict:
+ """
+ Overrides fake losses for fine segmentation and U/V coordinates to
+ include computation graphs for additional confidence parameters.
+ These are used when no suitable ground truth data was found in a batch.
+ The loss has a value 0 and is primarily used to construct the computation graph,
+ so that `DistributedDataParallel` has similar graphs on all GPUs and can
+ perform reduction properly.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have the following attributes:
+ * fine_segm - fine segmentation estimates, tensor of shape [N, C, S, S]
+ * u - U coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ * v - V coordinate estimates per fine labels, tensor of shape [N, C, S, S]
+ Return:
+ dict: str -> tensor: dict of losses with the following entries:
+ * `loss_densepose_U`: has value 0
+ * `loss_densepose_V`: has value 0
+ * `loss_densepose_I`: has value 0
+ """
+ conf_type = self.confidence_model_cfg.uv_confidence.type
+ if self.confidence_model_cfg.uv_confidence.enabled:
+ loss_uv = (
+ densepose_predictor_outputs.u.sum() + densepose_predictor_outputs.v.sum()
+ ) * 0
+ if conf_type == DensePoseUVConfidenceType.IID_ISO:
+ loss_uv += densepose_predictor_outputs.sigma_2.sum() * 0
+ elif conf_type == DensePoseUVConfidenceType.INDEP_ANISO:
+ loss_uv += (
+ densepose_predictor_outputs.sigma_2.sum()
+ + densepose_predictor_outputs.kappa_u.sum()
+ + densepose_predictor_outputs.kappa_v.sum()
+ ) * 0
+ return {"loss_densepose_UV": loss_uv}
+ else:
+ return super().produce_fake_densepose_losses_uv(densepose_predictor_outputs)
+
+ def produce_densepose_losses_uv(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: Any,
+ interpolator: BilinearInterpolationHelper,
+ j_valid_fg: torch.Tensor,
+ ) -> LossDict:
+ conf_type = self.confidence_model_cfg.uv_confidence.type
+ if self.confidence_model_cfg.uv_confidence.enabled:
+ u_gt = packed_annotations.u_gt[j_valid_fg]
+ u_est = interpolator.extract_at_points(densepose_predictor_outputs.u)[j_valid_fg]
+ v_gt = packed_annotations.v_gt[j_valid_fg]
+ v_est = interpolator.extract_at_points(densepose_predictor_outputs.v)[j_valid_fg]
+ sigma_2_est = interpolator.extract_at_points(densepose_predictor_outputs.sigma_2)[
+ j_valid_fg
+ ]
+ if conf_type == DensePoseUVConfidenceType.IID_ISO:
+ return {
+ "loss_densepose_UV": (
+ self.uv_loss_with_confidences(u_est, v_est, sigma_2_est, u_gt, v_gt)
+ * self.w_points
+ )
+ }
+ elif conf_type in [DensePoseUVConfidenceType.INDEP_ANISO]:
+ kappa_u_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_u)[
+ j_valid_fg
+ ]
+ kappa_v_est = interpolator.extract_at_points(densepose_predictor_outputs.kappa_v)[
+ j_valid_fg
+ ]
+ return {
+ "loss_densepose_UV": (
+ self.uv_loss_with_confidences(
+ u_est, v_est, sigma_2_est, kappa_u_est, kappa_v_est, u_gt, v_gt
+ )
+ * self.w_points
+ )
+ }
+ return super().produce_densepose_losses_uv(
+ proposals_with_gt,
+ densepose_predictor_outputs,
+ packed_annotations,
+ interpolator,
+ j_valid_fg,
+ )
+
+
+class IIDIsotropicGaussianUVLoss(nn.Module):
+ """
+ Loss for the case of iid residuals with isotropic covariance:
+ $Sigma_i = sigma_i^2 I$
+ The loss (negative log likelihood) is then:
+ $1/2 sum_{i=1}^n (log(2 pi) + 2 log sigma_i^2 + ||delta_i||^2 / sigma_i^2)$,
+ where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
+ difference between estimated and ground truth UV values
+ For details, see:
+ N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
+ Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
+ """
+
+ def __init__(self, sigma_lower_bound: float):
+ super(IIDIsotropicGaussianUVLoss, self).__init__()
+ self.sigma_lower_bound = sigma_lower_bound
+ self.log2pi = math.log(2 * math.pi)
+
+ def forward(
+ self,
+ u: torch.Tensor,
+ v: torch.Tensor,
+ sigma_u: torch.Tensor,
+ target_u: torch.Tensor,
+ target_v: torch.Tensor,
+ ):
+ # compute $\sigma_i^2$
+ # use sigma_lower_bound to avoid degenerate solution for variance
+ # (sigma -> 0)
+ sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
+ # compute \|delta_i\|^2
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ delta_t_delta = (u - target_u) ** 2 + (v - target_v) ** 2
+ # the total loss from the formula above:
+ loss = 0.5 * (self.log2pi + 2 * torch.log(sigma2) + delta_t_delta / sigma2)
+ return loss.sum()
+
+
+class IndepAnisotropicGaussianUVLoss(nn.Module):
+ """
+ Loss for the case of independent residuals with anisotropic covariances:
+ $Sigma_i = sigma_i^2 I + r_i r_i^T$
+ The loss (negative log likelihood) is then:
+ $1/2 sum_{i=1}^n (log(2 pi)
+ + log sigma_i^2 (sigma_i^2 + ||r_i||^2)
+ + ||delta_i||^2 / sigma_i^2
+ - ^2 / (sigma_i^2 * (sigma_i^2 + ||r_i||^2)))$,
+ where $delta_i=(u - u', v - v')$ is a 2D vector containing UV coordinates
+ difference between estimated and ground truth UV values
+ For details, see:
+ N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
+ Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
+ """
+
+ def __init__(self, sigma_lower_bound: float):
+ super(IndepAnisotropicGaussianUVLoss, self).__init__()
+ self.sigma_lower_bound = sigma_lower_bound
+ self.log2pi = math.log(2 * math.pi)
+
+ def forward(
+ self,
+ u: torch.Tensor,
+ v: torch.Tensor,
+ sigma_u: torch.Tensor,
+ kappa_u_est: torch.Tensor,
+ kappa_v_est: torch.Tensor,
+ target_u: torch.Tensor,
+ target_v: torch.Tensor,
+ ):
+ # compute $\sigma_i^2$
+ sigma2 = F.softplus(sigma_u) + self.sigma_lower_bound
+ # compute \|r_i\|^2
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ r_sqnorm2 = kappa_u_est**2 + kappa_v_est**2
+ delta_u = u - target_u
+ delta_v = v - target_v
+ # compute \|delta_i\|^2
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ delta_sqnorm = delta_u**2 + delta_v**2
+ delta_u_r_u = delta_u * kappa_u_est
+ delta_v_r_v = delta_v * kappa_v_est
+ # compute the scalar product
+ delta_r = delta_u_r_u + delta_v_r_v
+ # compute squared scalar product ^2
+ # pyre-fixme[58]: `**` is not supported for operand types `Tensor` and `int`.
+ delta_r_sqnorm = delta_r**2
+ denom2 = sigma2 * (sigma2 + r_sqnorm2)
+ loss = 0.5 * (
+ self.log2pi + torch.log(denom2) + delta_sqnorm / sigma2 - delta_r_sqnorm / denom2
+ )
+ return loss.sum()
diff --git a/densepose/modeling/losses/cse.py b/densepose/modeling/losses/cse.py
new file mode 100644
index 0000000000000000000000000000000000000000..dd561ad518f42c769fd9a5c8517409ddc33edf6f
--- /dev/null
+++ b/densepose/modeling/losses/cse.py
@@ -0,0 +1,115 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from typing import Any, List
+from torch import nn
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from .cycle_pix2shape import PixToShapeCycleLoss
+from .cycle_shape2shape import ShapeToShapeCycleLoss
+from .embed import EmbeddingLoss
+from .embed_utils import CseAnnotationsAccumulator
+from .mask_or_segm import MaskOrSegmentationLoss
+from .registry import DENSEPOSE_LOSS_REGISTRY
+from .soft_embed import SoftEmbeddingLoss
+from .utils import BilinearInterpolationHelper, LossDict, extract_packed_annotations_from_matches
+
+
+@DENSEPOSE_LOSS_REGISTRY.register()
+class DensePoseCseLoss:
+ """ """
+
+ _EMBED_LOSS_REGISTRY = {
+ EmbeddingLoss.__name__: EmbeddingLoss,
+ SoftEmbeddingLoss.__name__: SoftEmbeddingLoss,
+ }
+
+ def __init__(self, cfg: CfgNode):
+ """
+ Initialize CSE loss from configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ """
+ self.w_segm = cfg.MODEL.ROI_DENSEPOSE_HEAD.INDEX_WEIGHTS
+ self.w_embed = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_WEIGHT
+ self.segm_loss = MaskOrSegmentationLoss(cfg)
+ self.embed_loss = DensePoseCseLoss.create_embed_loss(cfg)
+ self.do_shape2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.ENABLED
+ if self.do_shape2shape:
+ self.w_shape2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.WEIGHT
+ self.shape2shape_loss = ShapeToShapeCycleLoss(cfg)
+ self.do_pix2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.ENABLED
+ if self.do_pix2shape:
+ self.w_pix2shape = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.WEIGHT
+ self.pix2shape_loss = PixToShapeCycleLoss(cfg)
+
+ @classmethod
+ def create_embed_loss(cls, cfg: CfgNode):
+ # registry not used here, since embedding losses are currently local
+ # and are not used anywhere else
+ return cls._EMBED_LOSS_REGISTRY[cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_LOSS_NAME](cfg)
+
+ def __call__(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ embedder: nn.Module,
+ ) -> LossDict:
+ if not len(proposals_with_gt):
+ return self.produce_fake_losses(densepose_predictor_outputs, embedder)
+ accumulator = CseAnnotationsAccumulator()
+ packed_annotations = extract_packed_annotations_from_matches(proposals_with_gt, accumulator)
+ if packed_annotations is None:
+ return self.produce_fake_losses(densepose_predictor_outputs, embedder)
+ h, w = densepose_predictor_outputs.embedding.shape[2:]
+ interpolator = BilinearInterpolationHelper.from_matches(
+ packed_annotations,
+ (h, w),
+ )
+ meshid_to_embed_losses = self.embed_loss(
+ proposals_with_gt,
+ densepose_predictor_outputs,
+ packed_annotations,
+ interpolator,
+ embedder,
+ )
+ embed_loss_dict = {
+ f"loss_densepose_E{meshid}": self.w_embed * meshid_to_embed_losses[meshid]
+ for meshid in meshid_to_embed_losses
+ }
+ all_loss_dict = {
+ "loss_densepose_S": self.w_segm
+ * self.segm_loss(proposals_with_gt, densepose_predictor_outputs, packed_annotations),
+ **embed_loss_dict,
+ }
+ if self.do_shape2shape:
+ all_loss_dict["loss_shape2shape"] = self.w_shape2shape * self.shape2shape_loss(embedder)
+ if self.do_pix2shape:
+ all_loss_dict["loss_pix2shape"] = self.w_pix2shape * self.pix2shape_loss(
+ proposals_with_gt, densepose_predictor_outputs, packed_annotations, embedder
+ )
+ return all_loss_dict
+
+ def produce_fake_losses(
+ self, densepose_predictor_outputs: Any, embedder: nn.Module
+ ) -> LossDict:
+ meshname_to_embed_losses = self.embed_loss.fake_values(
+ densepose_predictor_outputs, embedder=embedder
+ )
+ embed_loss_dict = {
+ f"loss_densepose_E{mesh_name}": meshname_to_embed_losses[mesh_name]
+ for mesh_name in meshname_to_embed_losses
+ }
+ all_loss_dict = {
+ "loss_densepose_S": self.segm_loss.fake_value(densepose_predictor_outputs),
+ **embed_loss_dict,
+ }
+ if self.do_shape2shape:
+ all_loss_dict["loss_shape2shape"] = self.shape2shape_loss.fake_value(embedder)
+ if self.do_pix2shape:
+ all_loss_dict["loss_pix2shape"] = self.pix2shape_loss.fake_value(
+ densepose_predictor_outputs, embedder
+ )
+ return all_loss_dict
diff --git a/densepose/modeling/losses/cycle_pix2shape.py b/densepose/modeling/losses/cycle_pix2shape.py
new file mode 100644
index 0000000000000000000000000000000000000000..e305d29850ef04a712a0a3e7bdbffba887257777
--- /dev/null
+++ b/densepose/modeling/losses/cycle_pix2shape.py
@@ -0,0 +1,152 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from typing import Any, List
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from densepose.data.meshes.catalog import MeshCatalog
+from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix
+
+from .embed_utils import PackedCseAnnotations
+from .mask import extract_data_for_mask_loss_from_matches
+
+
+def _create_pixel_dist_matrix(grid_size: int) -> torch.Tensor:
+ rows = torch.arange(grid_size)
+ cols = torch.arange(grid_size)
+ # at index `i` contains [row, col], where
+ # row = i // grid_size
+ # col = i % grid_size
+ pix_coords = (
+ torch.stack(torch.meshgrid(rows, cols), -1).reshape((grid_size * grid_size, 2)).float()
+ )
+ return squared_euclidean_distance_matrix(pix_coords, pix_coords)
+
+
+def _sample_fg_pixels_randperm(fg_mask: torch.Tensor, sample_size: int) -> torch.Tensor:
+ fg_mask_flattened = fg_mask.reshape((-1,))
+ num_pixels = int(fg_mask_flattened.sum().item())
+ fg_pixel_indices = fg_mask_flattened.nonzero(as_tuple=True)[0]
+ if (sample_size <= 0) or (num_pixels <= sample_size):
+ return fg_pixel_indices
+ sample_indices = torch.randperm(num_pixels, device=fg_mask.device)[:sample_size]
+ return fg_pixel_indices[sample_indices]
+
+
+def _sample_fg_pixels_multinomial(fg_mask: torch.Tensor, sample_size: int) -> torch.Tensor:
+ fg_mask_flattened = fg_mask.reshape((-1,))
+ num_pixels = int(fg_mask_flattened.sum().item())
+ if (sample_size <= 0) or (num_pixels <= sample_size):
+ return fg_mask_flattened.nonzero(as_tuple=True)[0]
+ return fg_mask_flattened.float().multinomial(sample_size, replacement=False)
+
+
+class PixToShapeCycleLoss(nn.Module):
+ """
+ Cycle loss for pixel-vertex correspondence
+ """
+
+ def __init__(self, cfg: CfgNode):
+ super().__init__()
+ self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys())
+ self.embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
+ self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NORM_P
+ self.use_all_meshes_not_gt_only = (
+ cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.USE_ALL_MESHES_NOT_GT_ONLY
+ )
+ self.num_pixels_to_sample = (
+ cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.NUM_PIXELS_TO_SAMPLE
+ )
+ self.pix_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.PIXEL_SIGMA
+ self.temperature_pix_to_vertex = (
+ cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_PIXEL_TO_VERTEX
+ )
+ self.temperature_vertex_to_pix = (
+ cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.PIX_TO_SHAPE_CYCLE_LOSS.TEMPERATURE_VERTEX_TO_PIXEL
+ )
+ self.pixel_dists = _create_pixel_dist_matrix(cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE)
+
+ def forward(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: PackedCseAnnotations,
+ embedder: nn.Module,
+ ):
+ """
+ Args:
+ proposals_with_gt (list of Instances): detections with associated
+ ground truth data; each item corresponds to instances detected
+ on 1 image; the number of items corresponds to the number of
+ images in a batch
+ densepose_predictor_outputs: an object of a dataclass that contains predictor
+ outputs with estimated values; assumed to have the following attributes:
+ * embedding - embedding estimates, tensor of shape [N, D, S, S], where
+ N = number of instances (= sum N_i, where N_i is the number of
+ instances on image i)
+ D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)
+ S = output size (width and height)
+ packed_annotations (PackedCseAnnotations): contains various data useful
+ for loss computation, each data is packed into a single tensor
+ embedder (nn.Module): module that computes vertex embeddings for different meshes
+ """
+ pix_embeds = densepose_predictor_outputs.embedding
+ if self.pixel_dists.device != pix_embeds.device:
+ # should normally be done only once
+ self.pixel_dists = self.pixel_dists.to(device=pix_embeds.device)
+ with torch.no_grad():
+ mask_loss_data = extract_data_for_mask_loss_from_matches(
+ proposals_with_gt, densepose_predictor_outputs.coarse_segm
+ )
+ # GT masks - tensor of shape [N, S, S] of int64
+ masks_gt = mask_loss_data.masks_gt.long() # pyre-ignore[16]
+ assert len(pix_embeds) == len(masks_gt), (
+ f"Number of instances with embeddings {len(pix_embeds)} != "
+ f"number of instances with GT masks {len(masks_gt)}"
+ )
+ losses = []
+ mesh_names = (
+ self.shape_names
+ if self.use_all_meshes_not_gt_only
+ else [
+ MeshCatalog.get_mesh_name(mesh_id.item())
+ for mesh_id in packed_annotations.vertex_mesh_ids_gt.unique()
+ ]
+ )
+ for pixel_embeddings, mask_gt in zip(pix_embeds, masks_gt):
+ # pixel_embeddings [D, S, S]
+ # mask_gt [S, S]
+ for mesh_name in mesh_names:
+ mesh_vertex_embeddings = embedder(mesh_name)
+ # pixel indices [M]
+ pixel_indices_flattened = _sample_fg_pixels_randperm(
+ mask_gt, self.num_pixels_to_sample
+ )
+ # pixel distances [M, M]
+ pixel_dists = self.pixel_dists.to(pixel_embeddings.device)[
+ torch.meshgrid(pixel_indices_flattened, pixel_indices_flattened)
+ ]
+ # pixel embeddings [M, D]
+ pixel_embeddings_sampled = normalize_embeddings(
+ pixel_embeddings.reshape((self.embed_size, -1))[:, pixel_indices_flattened].T
+ )
+ # pixel-vertex similarity [M, K]
+ sim_matrix = pixel_embeddings_sampled.mm(mesh_vertex_embeddings.T)
+ c_pix_vertex = F.softmax(sim_matrix / self.temperature_pix_to_vertex, dim=1)
+ c_vertex_pix = F.softmax(sim_matrix.T / self.temperature_vertex_to_pix, dim=1)
+ c_cycle = c_pix_vertex.mm(c_vertex_pix)
+ loss_cycle = torch.norm(pixel_dists * c_cycle, p=self.norm_p)
+ losses.append(loss_cycle)
+
+ if len(losses) == 0:
+ return pix_embeds.sum() * 0
+ return torch.stack(losses, dim=0).mean()
+
+ def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module):
+ losses = [embedder(mesh_name).sum() * 0 for mesh_name in embedder.mesh_names]
+ losses.append(densepose_predictor_outputs.embedding.sum() * 0)
+ return torch.mean(torch.stack(losses))
diff --git a/densepose/modeling/losses/cycle_shape2shape.py b/densepose/modeling/losses/cycle_shape2shape.py
new file mode 100644
index 0000000000000000000000000000000000000000..f71dbab7c5bd7484cd9001c3c15059971ff0f0cf
--- /dev/null
+++ b/densepose/modeling/losses/cycle_shape2shape.py
@@ -0,0 +1,117 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import random
+from typing import Tuple
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+
+from densepose.structures.mesh import create_mesh
+
+from .utils import sample_random_indices
+
+
+class ShapeToShapeCycleLoss(nn.Module):
+ """
+ Cycle Loss for Shapes.
+ Inspired by:
+ "Mapping in a Cycle: Sinkhorn Regularized Unsupervised Learning for Point Cloud Shapes".
+ """
+
+ def __init__(self, cfg: CfgNode):
+ super().__init__()
+ self.shape_names = list(cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDERS.keys())
+ self.all_shape_pairs = [
+ (x, y) for i, x in enumerate(self.shape_names) for y in self.shape_names[i + 1 :]
+ ]
+ random.shuffle(self.all_shape_pairs)
+ self.cur_pos = 0
+ self.norm_p = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.NORM_P
+ self.temperature = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.TEMPERATURE
+ self.max_num_vertices = (
+ cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.SHAPE_TO_SHAPE_CYCLE_LOSS.MAX_NUM_VERTICES
+ )
+
+ def _sample_random_pair(self) -> Tuple[str, str]:
+ """
+ Produce a random pair of different mesh names
+
+ Return:
+ tuple(str, str): a pair of different mesh names
+ """
+ if self.cur_pos >= len(self.all_shape_pairs):
+ random.shuffle(self.all_shape_pairs)
+ self.cur_pos = 0
+ shape_pair = self.all_shape_pairs[self.cur_pos]
+ self.cur_pos += 1
+ return shape_pair
+
+ def forward(self, embedder: nn.Module):
+ """
+ Do a forward pass with a random pair (src, dst) pair of shapes
+ Args:
+ embedder (nn.Module): module that computes vertex embeddings for different meshes
+ """
+ src_mesh_name, dst_mesh_name = self._sample_random_pair()
+ return self._forward_one_pair(embedder, src_mesh_name, dst_mesh_name)
+
+ def fake_value(self, embedder: nn.Module):
+ losses = []
+ for mesh_name in embedder.mesh_names:
+ losses.append(embedder(mesh_name).sum() * 0)
+ return torch.mean(torch.stack(losses))
+
+ def _get_embeddings_and_geodists_for_mesh(
+ self, embedder: nn.Module, mesh_name: str
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
+ """
+ Produces embeddings and geodesic distance tensors for a given mesh. May subsample
+ the mesh, if it contains too many vertices (controlled by
+ SHAPE_CYCLE_LOSS_MAX_NUM_VERTICES parameter).
+ Args:
+ embedder (nn.Module): module that computes embeddings for mesh vertices
+ mesh_name (str): mesh name
+ Return:
+ embeddings (torch.Tensor of size [N, D]): embeddings for selected mesh
+ vertices (N = number of selected vertices, D = embedding space dim)
+ geodists (torch.Tensor of size [N, N]): geodesic distances for the selected
+ mesh vertices (N = number of selected vertices)
+ """
+ embeddings = embedder(mesh_name)
+ indices = sample_random_indices(
+ embeddings.shape[0], self.max_num_vertices, embeddings.device
+ )
+ mesh = create_mesh(mesh_name, embeddings.device)
+ geodists = mesh.geodists
+ if indices is not None:
+ embeddings = embeddings[indices]
+ geodists = geodists[torch.meshgrid(indices, indices)]
+ return embeddings, geodists
+
+ def _forward_one_pair(
+ self, embedder: nn.Module, mesh_name_1: str, mesh_name_2: str
+ ) -> torch.Tensor:
+ """
+ Do a forward pass with a selected pair of meshes
+ Args:
+ embedder (nn.Module): module that computes vertex embeddings for different meshes
+ mesh_name_1 (str): first mesh name
+ mesh_name_2 (str): second mesh name
+ Return:
+ Tensor containing the loss value
+ """
+ embeddings_1, geodists_1 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_1)
+ embeddings_2, geodists_2 = self._get_embeddings_and_geodists_for_mesh(embedder, mesh_name_2)
+ sim_matrix_12 = embeddings_1.mm(embeddings_2.T)
+
+ c_12 = F.softmax(sim_matrix_12 / self.temperature, dim=1)
+ c_21 = F.softmax(sim_matrix_12.T / self.temperature, dim=1)
+ c_11 = c_12.mm(c_21)
+ c_22 = c_21.mm(c_12)
+
+ loss_cycle_11 = torch.norm(geodists_1 * c_11, p=self.norm_p)
+ loss_cycle_22 = torch.norm(geodists_2 * c_22, p=self.norm_p)
+
+ return loss_cycle_11 + loss_cycle_22
diff --git a/densepose/modeling/losses/embed.py b/densepose/modeling/losses/embed.py
new file mode 100644
index 0000000000000000000000000000000000000000..1e3a069763ca6fab0acc7c455b416b9634ceaedf
--- /dev/null
+++ b/densepose/modeling/losses/embed.py
@@ -0,0 +1,119 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from typing import Any, Dict, List
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from densepose.data.meshes.catalog import MeshCatalog
+from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix
+
+from .embed_utils import PackedCseAnnotations
+from .utils import BilinearInterpolationHelper
+
+
+class EmbeddingLoss:
+ """
+ Computes losses for estimated embeddings given annotated vertices.
+ Instances in a minibatch that correspond to the same mesh are grouped
+ together. For each group, loss is computed as cross-entropy for
+ unnormalized scores given ground truth mesh vertex ids.
+ Scores are based on squared distances between estimated vertex embeddings
+ and mesh vertex embeddings.
+ """
+
+ def __init__(self, cfg: CfgNode):
+ """
+ Initialize embedding loss from config
+ """
+ self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA
+
+ def __call__(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: PackedCseAnnotations,
+ interpolator: BilinearInterpolationHelper,
+ embedder: nn.Module,
+ ) -> Dict[int, torch.Tensor]:
+ """
+ Produces losses for estimated embeddings given annotated vertices.
+ Embeddings for all the vertices of a mesh are computed by the embedder.
+ Embeddings for observed pixels are estimated by a predictor.
+ Losses are computed as cross-entropy for squared distances between
+ observed vertex embeddings and all mesh vertex embeddings given
+ ground truth vertex IDs.
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated
+ ground truth data; each item corresponds to instances detected
+ on 1 image; the number of items corresponds to the number of
+ images in a batch
+ densepose_predictor_outputs: an object of a dataclass that contains predictor
+ outputs with estimated values; assumed to have the following attributes:
+ * embedding - embedding estimates, tensor of shape [N, D, S, S], where
+ N = number of instances (= sum N_i, where N_i is the number of
+ instances on image i)
+ D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)
+ S = output size (width and height)
+ packed_annotations (PackedCseAnnotations): contains various data useful
+ for loss computation, each data is packed into a single tensor
+ interpolator (BilinearInterpolationHelper): bilinear interpolation helper
+ embedder (nn.Module): module that computes vertex embeddings for different meshes
+ Return:
+ dict(int -> tensor): losses for different mesh IDs
+ """
+ losses = {}
+ for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique():
+ mesh_id = mesh_id_tensor.item()
+ mesh_name = MeshCatalog.get_mesh_name(mesh_id)
+ # valid points are those that fall into estimated bbox
+ # and correspond to the current mesh
+ j_valid = interpolator.j_valid * ( # pyre-ignore[16]
+ packed_annotations.vertex_mesh_ids_gt == mesh_id
+ )
+ if not torch.any(j_valid):
+ continue
+ # extract estimated embeddings for valid points
+ # -> tensor [J, D]
+ vertex_embeddings_i = normalize_embeddings(
+ interpolator.extract_at_points(
+ densepose_predictor_outputs.embedding,
+ slice_fine_segm=slice(None),
+ w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]
+ w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]
+ w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]
+ w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]
+ )[j_valid, :]
+ )
+ # extract vertex ids for valid points
+ # -> tensor [J]
+ vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid]
+ # embeddings for all mesh vertices
+ # -> tensor [K, D]
+ mesh_vertex_embeddings = embedder(mesh_name)
+ # unnormalized scores for valid points
+ # -> tensor [J, K]
+ scores = squared_euclidean_distance_matrix(
+ vertex_embeddings_i, mesh_vertex_embeddings
+ ) / (-self.embdist_gauss_sigma)
+ losses[mesh_name] = F.cross_entropy(scores, vertex_indices_i, ignore_index=-1)
+
+ for mesh_name in embedder.mesh_names:
+ if mesh_name not in losses:
+ losses[mesh_name] = self.fake_value(
+ densepose_predictor_outputs, embedder, mesh_name
+ )
+ return losses
+
+ def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module):
+ losses = {}
+ for mesh_name in embedder.mesh_names:
+ losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name)
+ return losses
+
+ def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str):
+ return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0
diff --git a/densepose/modeling/losses/embed_utils.py b/densepose/modeling/losses/embed_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f2ca16fd3809b89e1c05636242a84d02d3a42d88
--- /dev/null
+++ b/densepose/modeling/losses/embed_utils.py
@@ -0,0 +1,137 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from dataclasses import dataclass
+from typing import Any, Optional
+import torch
+
+from detectron2.structures import BoxMode, Instances
+
+from .utils import AnnotationsAccumulator
+
+
+@dataclass
+class PackedCseAnnotations:
+ x_gt: torch.Tensor
+ y_gt: torch.Tensor
+ coarse_segm_gt: Optional[torch.Tensor]
+ vertex_mesh_ids_gt: torch.Tensor
+ vertex_ids_gt: torch.Tensor
+ bbox_xywh_gt: torch.Tensor
+ bbox_xywh_est: torch.Tensor
+ point_bbox_with_dp_indices: torch.Tensor
+ point_bbox_indices: torch.Tensor
+ bbox_indices: torch.Tensor
+
+
+class CseAnnotationsAccumulator(AnnotationsAccumulator):
+ """
+ Accumulates annotations by batches that correspond to objects detected on
+ individual images. Can pack them together into single tensors.
+ """
+
+ def __init__(self):
+ self.x_gt = []
+ self.y_gt = []
+ self.s_gt = []
+ self.vertex_mesh_ids_gt = []
+ self.vertex_ids_gt = []
+ self.bbox_xywh_gt = []
+ self.bbox_xywh_est = []
+ self.point_bbox_with_dp_indices = []
+ self.point_bbox_indices = []
+ self.bbox_indices = []
+ self.nxt_bbox_with_dp_index = 0
+ self.nxt_bbox_index = 0
+
+ def accumulate(self, instances_one_image: Instances):
+ """
+ Accumulate instances data for one image
+
+ Args:
+ instances_one_image (Instances): instances data to accumulate
+ """
+ boxes_xywh_est = BoxMode.convert(
+ instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
+ )
+ boxes_xywh_gt = BoxMode.convert(
+ instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
+ )
+ n_matches = len(boxes_xywh_gt)
+ assert n_matches == len(
+ boxes_xywh_est
+ ), f"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes"
+ if not n_matches:
+ # no detection - GT matches
+ return
+ if (
+ not hasattr(instances_one_image, "gt_densepose")
+ or instances_one_image.gt_densepose is None
+ ):
+ # no densepose GT for the detections, just increase the bbox index
+ self.nxt_bbox_index += n_matches
+ return
+ for box_xywh_est, box_xywh_gt, dp_gt in zip(
+ boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose
+ ):
+ if (dp_gt is not None) and (len(dp_gt.x) > 0):
+ # pyre-fixme[6]: For 1st argument expected `Tensor` but got `float`.
+ # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `float`.
+ self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt)
+ self.nxt_bbox_index += 1
+
+ def _do_accumulate(self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: Any):
+ """
+ Accumulate instances data for one image, given that the data is not empty
+
+ Args:
+ box_xywh_gt (tensor): GT bounding box
+ box_xywh_est (tensor): estimated bounding box
+ dp_gt: GT densepose data with the following attributes:
+ - x: normalized X coordinates
+ - y: normalized Y coordinates
+ - segm: tensor of size [S, S] with coarse segmentation
+ -
+ """
+ self.x_gt.append(dp_gt.x)
+ self.y_gt.append(dp_gt.y)
+ if hasattr(dp_gt, "segm"):
+ self.s_gt.append(dp_gt.segm.unsqueeze(0))
+ self.vertex_ids_gt.append(dp_gt.vertex_ids)
+ self.vertex_mesh_ids_gt.append(torch.full_like(dp_gt.vertex_ids, dp_gt.mesh_id))
+ self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4))
+ self.bbox_xywh_est.append(box_xywh_est.view(-1, 4))
+ self.point_bbox_with_dp_indices.append(
+ torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_with_dp_index)
+ )
+ self.point_bbox_indices.append(torch.full_like(dp_gt.vertex_ids, self.nxt_bbox_index))
+ self.bbox_indices.append(self.nxt_bbox_index)
+ self.nxt_bbox_with_dp_index += 1
+
+ def pack(self) -> Optional[PackedCseAnnotations]:
+ """
+ Pack data into tensors
+ """
+ if not len(self.x_gt):
+ # TODO:
+ # returning proper empty annotations would require
+ # creating empty tensors of appropriate shape and
+ # type on an appropriate device;
+ # we return None so far to indicate empty annotations
+ return None
+ return PackedCseAnnotations(
+ x_gt=torch.cat(self.x_gt, 0),
+ y_gt=torch.cat(self.y_gt, 0),
+ vertex_mesh_ids_gt=torch.cat(self.vertex_mesh_ids_gt, 0),
+ vertex_ids_gt=torch.cat(self.vertex_ids_gt, 0),
+ # ignore segmentation annotations, if not all the instances contain those
+ coarse_segm_gt=torch.cat(self.s_gt, 0)
+ if len(self.s_gt) == len(self.bbox_xywh_gt)
+ else None,
+ bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0),
+ bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0),
+ point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0),
+ point_bbox_indices=torch.cat(self.point_bbox_indices, 0),
+ bbox_indices=torch.as_tensor(
+ self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device
+ ),
+ )
diff --git a/densepose/modeling/losses/mask.py b/densepose/modeling/losses/mask.py
new file mode 100644
index 0000000000000000000000000000000000000000..c16b15c53de9f02dc734148e05f2bde799046aa0
--- /dev/null
+++ b/densepose/modeling/losses/mask.py
@@ -0,0 +1,125 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from dataclasses import dataclass
+from typing import Any, Iterable, List, Optional
+import torch
+from torch.nn import functional as F
+
+from detectron2.structures import Instances
+
+
+@dataclass
+class DataForMaskLoss:
+ """
+ Contains mask GT and estimated data for proposals from multiple images:
+ """
+
+ # tensor of size (K, H, W) containing GT labels
+ masks_gt: Optional[torch.Tensor] = None
+ # tensor of size (K, C, H, W) containing estimated scores
+ masks_est: Optional[torch.Tensor] = None
+
+
+def extract_data_for_mask_loss_from_matches(
+ proposals_targets: Iterable[Instances], estimated_segm: torch.Tensor
+) -> DataForMaskLoss:
+ """
+ Extract data for mask loss from instances that contain matched GT and
+ estimated bounding boxes.
+ Args:
+ proposals_targets: Iterable[Instances]
+ matched GT and estimated results, each item in the iterable
+ corresponds to data in 1 image
+ estimated_segm: tensor(K, C, S, S) of float - raw unnormalized
+ segmentation scores, here S is the size to which GT masks are
+ to be resized
+ Return:
+ masks_est: tensor(K, C, S, S) of float - class scores
+ masks_gt: tensor(K, S, S) of int64 - labels
+ """
+ data = DataForMaskLoss()
+ masks_gt = []
+ offset = 0
+ assert estimated_segm.shape[2] == estimated_segm.shape[3], (
+ f"Expected estimated segmentation to have a square shape, "
+ f"but the actual shape is {estimated_segm.shape[2:]}"
+ )
+ mask_size = estimated_segm.shape[2]
+ num_proposals = sum(inst.proposal_boxes.tensor.size(0) for inst in proposals_targets)
+ num_estimated = estimated_segm.shape[0]
+ assert (
+ num_proposals == num_estimated
+ ), "The number of proposals {} must be equal to the number of estimates {}".format(
+ num_proposals, num_estimated
+ )
+
+ for proposals_targets_per_image in proposals_targets:
+ n_i = proposals_targets_per_image.proposal_boxes.tensor.size(0)
+ if not n_i:
+ continue
+ gt_masks_per_image = proposals_targets_per_image.gt_masks.crop_and_resize(
+ proposals_targets_per_image.proposal_boxes.tensor, mask_size
+ ).to(device=estimated_segm.device)
+ masks_gt.append(gt_masks_per_image)
+ offset += n_i
+ if masks_gt:
+ data.masks_est = estimated_segm
+ data.masks_gt = torch.cat(masks_gt, dim=0)
+ return data
+
+
+class MaskLoss:
+ """
+ Mask loss as cross-entropy for raw unnormalized scores given ground truth labels.
+ Mask ground truth labels are defined for the whole image and not only the
+ bounding box of interest. They are stored as objects that are assumed to implement
+ the `crop_and_resize` interface (e.g. BitMasks, PolygonMasks).
+ """
+
+ def __call__(
+ self, proposals_with_gt: List[Instances], densepose_predictor_outputs: Any
+ ) -> torch.Tensor:
+ """
+ Computes segmentation loss as cross-entropy for raw unnormalized
+ scores given ground truth labels.
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated ground truth data
+ densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
+ with estimated values; assumed to have the following attribute:
+ * coarse_segm (tensor of shape [N, D, S, S]): coarse segmentation estimates
+ as raw unnormalized scores
+ where N is the number of detections, S is the estimate size ( = width = height)
+ and D is the number of coarse segmentation channels.
+ Return:
+ Cross entropy for raw unnormalized scores for coarse segmentation given
+ ground truth labels from masks
+ """
+ if not len(proposals_with_gt):
+ return self.fake_value(densepose_predictor_outputs)
+ # densepose outputs are computed for all images and all bounding boxes;
+ # i.e. if a batch has 4 images with (3, 1, 2, 1) proposals respectively,
+ # the outputs will have size(0) == 3+1+2+1 == 7
+ with torch.no_grad():
+ mask_loss_data = extract_data_for_mask_loss_from_matches(
+ proposals_with_gt, densepose_predictor_outputs.coarse_segm
+ )
+ if (mask_loss_data.masks_gt is None) or (mask_loss_data.masks_est is None):
+ return self.fake_value(densepose_predictor_outputs)
+ return F.cross_entropy(mask_loss_data.masks_est, mask_loss_data.masks_gt.long())
+
+ def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:
+ """
+ Fake segmentation loss used when no suitable ground truth data
+ was found in a batch. The loss has a value 0 and is primarily used to
+ construct the computation graph, so that `DistributedDataParallel`
+ has similar graphs on all GPUs and can perform reduction properly.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have `coarse_segm`
+ attribute
+ Return:
+ Zero value loss with proper computation graph
+ """
+ return densepose_predictor_outputs.coarse_segm.sum() * 0
diff --git a/densepose/modeling/losses/mask_or_segm.py b/densepose/modeling/losses/mask_or_segm.py
new file mode 100644
index 0000000000000000000000000000000000000000..98b773d99fd29a48cbdfa94c5882c9c3d94003ee
--- /dev/null
+++ b/densepose/modeling/losses/mask_or_segm.py
@@ -0,0 +1,72 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from typing import Any, List
+import torch
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from .mask import MaskLoss
+from .segm import SegmentationLoss
+
+
+class MaskOrSegmentationLoss:
+ """
+ Mask or segmentation loss as cross-entropy for raw unnormalized scores
+ given ground truth labels. Ground truth labels are either defined by coarse
+ segmentation annotation, or by mask annotation, depending on the config
+ value MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
+ """
+
+ def __init__(self, cfg: CfgNode):
+ """
+ Initialize segmentation loss from configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ """
+ self.segm_trained_by_masks = cfg.MODEL.ROI_DENSEPOSE_HEAD.COARSE_SEGM_TRAINED_BY_MASKS
+ if self.segm_trained_by_masks:
+ self.mask_loss = MaskLoss()
+ self.segm_loss = SegmentationLoss(cfg)
+
+ def __call__(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: Any,
+ ) -> torch.Tensor:
+ """
+ Compute segmentation loss as cross-entropy between aligned unnormalized
+ score estimates and ground truth; with ground truth given
+ either by masks, or by coarse segmentation annotations.
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated ground truth data
+ densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
+ with estimated values; assumed to have the following attributes:
+ * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
+ packed_annotations: packed annotations for efficient loss computation
+ Return:
+ tensor: loss value as cross-entropy for raw unnormalized scores
+ given ground truth labels
+ """
+ if self.segm_trained_by_masks:
+ return self.mask_loss(proposals_with_gt, densepose_predictor_outputs)
+ return self.segm_loss(proposals_with_gt, densepose_predictor_outputs, packed_annotations)
+
+ def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:
+ """
+ Fake segmentation loss used when no suitable ground truth data
+ was found in a batch. The loss has a value 0 and is primarily used to
+ construct the computation graph, so that `DistributedDataParallel`
+ has similar graphs on all GPUs and can perform reduction properly.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have `coarse_segm`
+ attribute
+ Return:
+ Zero value loss with proper computation graph
+ """
+ return densepose_predictor_outputs.coarse_segm.sum() * 0
diff --git a/densepose/modeling/losses/registry.py b/densepose/modeling/losses/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..d9c8817a743e42b2aec382818f0cc1bb39a66004
--- /dev/null
+++ b/densepose/modeling/losses/registry.py
@@ -0,0 +1,5 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from detectron2.utils.registry import Registry
+
+DENSEPOSE_LOSS_REGISTRY = Registry("DENSEPOSE_LOSS")
diff --git a/densepose/modeling/losses/segm.py b/densepose/modeling/losses/segm.py
new file mode 100644
index 0000000000000000000000000000000000000000..1962b886e1946fa4896776da8a007ae0a9a4fab3
--- /dev/null
+++ b/densepose/modeling/losses/segm.py
@@ -0,0 +1,83 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from typing import Any, List
+import torch
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from .utils import resample_data
+
+
+class SegmentationLoss:
+ """
+ Segmentation loss as cross-entropy for raw unnormalized scores given ground truth
+ labels. Segmentation ground truth labels are defined for the bounding box of
+ interest at some fixed resolution [S, S], where
+ S = MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE.
+ """
+
+ def __init__(self, cfg: CfgNode):
+ """
+ Initialize segmentation loss from configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ """
+ self.heatmap_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.HEATMAP_SIZE
+ self.n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
+
+ def __call__(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: Any,
+ ) -> torch.Tensor:
+ """
+ Compute segmentation loss as cross-entropy on aligned segmentation
+ ground truth and estimated scores.
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated ground truth data
+ densepose_predictor_outputs: an object of a dataclass that contains predictor outputs
+ with estimated values; assumed to have the following attributes:
+ * coarse_segm - coarse segmentation estimates, tensor of shape [N, D, S, S]
+ packed_annotations: packed annotations for efficient loss computation;
+ the following attributes are used:
+ - coarse_segm_gt
+ - bbox_xywh_gt
+ - bbox_xywh_est
+ """
+ if packed_annotations.coarse_segm_gt is None:
+ return self.fake_value(densepose_predictor_outputs)
+ coarse_segm_est = densepose_predictor_outputs.coarse_segm[packed_annotations.bbox_indices]
+ with torch.no_grad():
+ coarse_segm_gt = resample_data(
+ packed_annotations.coarse_segm_gt.unsqueeze(1),
+ packed_annotations.bbox_xywh_gt,
+ packed_annotations.bbox_xywh_est,
+ self.heatmap_size,
+ self.heatmap_size,
+ mode="nearest",
+ padding_mode="zeros",
+ ).squeeze(1)
+ if self.n_segm_chan == 2:
+ coarse_segm_gt = coarse_segm_gt > 0
+ return F.cross_entropy(coarse_segm_est, coarse_segm_gt.long())
+
+ def fake_value(self, densepose_predictor_outputs: Any) -> torch.Tensor:
+ """
+ Fake segmentation loss used when no suitable ground truth data
+ was found in a batch. The loss has a value 0 and is primarily used to
+ construct the computation graph, so that `DistributedDataParallel`
+ has similar graphs on all GPUs and can perform reduction properly.
+
+ Args:
+ densepose_predictor_outputs: DensePose predictor outputs, an object
+ of a dataclass that is assumed to have `coarse_segm`
+ attribute
+ Return:
+ Zero value loss with proper computation graph
+ """
+ return densepose_predictor_outputs.coarse_segm.sum() * 0
diff --git a/densepose/modeling/losses/soft_embed.py b/densepose/modeling/losses/soft_embed.py
new file mode 100644
index 0000000000000000000000000000000000000000..03b69ec36a59ae0d69bb77efa77f93c6f95fad97
--- /dev/null
+++ b/densepose/modeling/losses/soft_embed.py
@@ -0,0 +1,133 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from typing import Any, Dict, List
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.structures import Instances
+
+from densepose.data.meshes.catalog import MeshCatalog
+from densepose.modeling.cse.utils import normalize_embeddings, squared_euclidean_distance_matrix
+from densepose.structures.mesh import create_mesh
+
+from .embed_utils import PackedCseAnnotations
+from .utils import BilinearInterpolationHelper
+
+
+class SoftEmbeddingLoss:
+ """
+ Computes losses for estimated embeddings given annotated vertices.
+ Instances in a minibatch that correspond to the same mesh are grouped
+ together. For each group, loss is computed as cross-entropy for
+ unnormalized scores given ground truth mesh vertex ids.
+ Scores are based on:
+ 1) squared distances between estimated vertex embeddings
+ and mesh vertex embeddings;
+ 2) geodesic distances between vertices of a mesh
+ """
+
+ def __init__(self, cfg: CfgNode):
+ """
+ Initialize embedding loss from config
+ """
+ self.embdist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBEDDING_DIST_GAUSS_SIGMA
+ self.geodist_gauss_sigma = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.GEODESIC_DIST_GAUSS_SIGMA
+
+ def __call__(
+ self,
+ proposals_with_gt: List[Instances],
+ densepose_predictor_outputs: Any,
+ packed_annotations: PackedCseAnnotations,
+ interpolator: BilinearInterpolationHelper,
+ embedder: nn.Module,
+ ) -> Dict[int, torch.Tensor]:
+ """
+ Produces losses for estimated embeddings given annotated vertices.
+ Embeddings for all the vertices of a mesh are computed by the embedder.
+ Embeddings for observed pixels are estimated by a predictor.
+ Losses are computed as cross-entropy for unnormalized scores given
+ ground truth vertex IDs.
+ 1) squared distances between estimated vertex embeddings
+ and mesh vertex embeddings;
+ 2) geodesic distances between vertices of a mesh
+
+ Args:
+ proposals_with_gt (list of Instances): detections with associated
+ ground truth data; each item corresponds to instances detected
+ on 1 image; the number of items corresponds to the number of
+ images in a batch
+ densepose_predictor_outputs: an object of a dataclass that contains predictor
+ outputs with estimated values; assumed to have the following attributes:
+ * embedding - embedding estimates, tensor of shape [N, D, S, S], where
+ N = number of instances (= sum N_i, where N_i is the number of
+ instances on image i)
+ D = embedding space dimensionality (MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE)
+ S = output size (width and height)
+ packed_annotations (PackedCseAnnotations): contains various data useful
+ for loss computation, each data is packed into a single tensor
+ interpolator (BilinearInterpolationHelper): bilinear interpolation helper
+ embedder (nn.Module): module that computes vertex embeddings for different meshes
+ Return:
+ dict(int -> tensor): losses for different mesh IDs
+ """
+ losses = {}
+ for mesh_id_tensor in packed_annotations.vertex_mesh_ids_gt.unique():
+ mesh_id = mesh_id_tensor.item()
+ mesh_name = MeshCatalog.get_mesh_name(mesh_id)
+ # valid points are those that fall into estimated bbox
+ # and correspond to the current mesh
+ j_valid = interpolator.j_valid * ( # pyre-ignore[16]
+ packed_annotations.vertex_mesh_ids_gt == mesh_id
+ )
+ if not torch.any(j_valid):
+ continue
+ # extract estimated embeddings for valid points
+ # -> tensor [J, D]
+ vertex_embeddings_i = normalize_embeddings(
+ interpolator.extract_at_points(
+ densepose_predictor_outputs.embedding,
+ slice_fine_segm=slice(None),
+ w_ylo_xlo=interpolator.w_ylo_xlo[:, None], # pyre-ignore[16]
+ w_ylo_xhi=interpolator.w_ylo_xhi[:, None], # pyre-ignore[16]
+ w_yhi_xlo=interpolator.w_yhi_xlo[:, None], # pyre-ignore[16]
+ w_yhi_xhi=interpolator.w_yhi_xhi[:, None], # pyre-ignore[16]
+ )[j_valid, :]
+ )
+ # extract vertex ids for valid points
+ # -> tensor [J]
+ vertex_indices_i = packed_annotations.vertex_ids_gt[j_valid]
+ # embeddings for all mesh vertices
+ # -> tensor [K, D]
+ mesh_vertex_embeddings = embedder(mesh_name)
+ # softmax values of geodesic distances for GT mesh vertices
+ # -> tensor [J, K]
+ mesh = create_mesh(mesh_name, mesh_vertex_embeddings.device)
+ geodist_softmax_values = F.softmax(
+ mesh.geodists[vertex_indices_i] / (-self.geodist_gauss_sigma), dim=1
+ )
+ # logsoftmax values for valid points
+ # -> tensor [J, K]
+ embdist_logsoftmax_values = F.log_softmax(
+ squared_euclidean_distance_matrix(vertex_embeddings_i, mesh_vertex_embeddings)
+ / (-self.embdist_gauss_sigma),
+ dim=1,
+ )
+ losses[mesh_name] = (-geodist_softmax_values * embdist_logsoftmax_values).sum(1).mean()
+
+ for mesh_name in embedder.mesh_names:
+ if mesh_name not in losses:
+ losses[mesh_name] = self.fake_value(
+ densepose_predictor_outputs, embedder, mesh_name
+ )
+ return losses
+
+ def fake_values(self, densepose_predictor_outputs: Any, embedder: nn.Module):
+ losses = {}
+ for mesh_name in embedder.mesh_names:
+ losses[mesh_name] = self.fake_value(densepose_predictor_outputs, embedder, mesh_name)
+ return losses
+
+ def fake_value(self, densepose_predictor_outputs: Any, embedder: nn.Module, mesh_name: str):
+ return densepose_predictor_outputs.embedding.sum() * 0 + embedder(mesh_name).sum() * 0
diff --git a/densepose/modeling/losses/utils.py b/densepose/modeling/losses/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ceea981d11650af80cb007fe129a3ee4864fc48f
--- /dev/null
+++ b/densepose/modeling/losses/utils.py
@@ -0,0 +1,443 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from abc import ABC, abstractmethod
+from dataclasses import dataclass
+from typing import Any, Dict, List, Optional, Tuple
+import torch
+from torch.nn import functional as F
+
+from detectron2.structures import BoxMode, Instances
+
+from densepose import DensePoseDataRelative
+
+LossDict = Dict[str, torch.Tensor]
+
+
+def _linear_interpolation_utilities(v_norm, v0_src, size_src, v0_dst, size_dst, size_z):
+ """
+ Computes utility values for linear interpolation at points v.
+ The points are given as normalized offsets in the source interval
+ (v0_src, v0_src + size_src), more precisely:
+ v = v0_src + v_norm * size_src / 256.0
+ The computed utilities include lower points v_lo, upper points v_hi,
+ interpolation weights v_w and flags j_valid indicating whether the
+ points falls into the destination interval (v0_dst, v0_dst + size_dst).
+
+ Args:
+ v_norm (:obj: `torch.Tensor`): tensor of size N containing
+ normalized point offsets
+ v0_src (:obj: `torch.Tensor`): tensor of size N containing
+ left bounds of source intervals for normalized points
+ size_src (:obj: `torch.Tensor`): tensor of size N containing
+ source interval sizes for normalized points
+ v0_dst (:obj: `torch.Tensor`): tensor of size N containing
+ left bounds of destination intervals
+ size_dst (:obj: `torch.Tensor`): tensor of size N containing
+ destination interval sizes
+ size_z (int): interval size for data to be interpolated
+
+ Returns:
+ v_lo (:obj: `torch.Tensor`): int tensor of size N containing
+ indices of lower values used for interpolation, all values are
+ integers from [0, size_z - 1]
+ v_hi (:obj: `torch.Tensor`): int tensor of size N containing
+ indices of upper values used for interpolation, all values are
+ integers from [0, size_z - 1]
+ v_w (:obj: `torch.Tensor`): float tensor of size N containing
+ interpolation weights
+ j_valid (:obj: `torch.Tensor`): uint8 tensor of size N containing
+ 0 for points outside the estimation interval
+ (v0_est, v0_est + size_est) and 1 otherwise
+ """
+ v = v0_src + v_norm * size_src / 256.0
+ j_valid = (v - v0_dst >= 0) * (v - v0_dst < size_dst)
+ v_grid = (v - v0_dst) * size_z / size_dst
+ v_lo = v_grid.floor().long().clamp(min=0, max=size_z - 1)
+ v_hi = (v_lo + 1).clamp(max=size_z - 1)
+ v_grid = torch.min(v_hi.float(), v_grid)
+ v_w = v_grid - v_lo.float()
+ return v_lo, v_hi, v_w, j_valid
+
+
+class BilinearInterpolationHelper:
+ """
+ Args:
+ packed_annotations: object that contains packed annotations
+ j_valid (:obj: `torch.Tensor`): uint8 tensor of size M containing
+ 0 for points to be discarded and 1 for points to be selected
+ y_lo (:obj: `torch.Tensor`): int tensor of indices of upper values
+ in z_est for each point
+ y_hi (:obj: `torch.Tensor`): int tensor of indices of lower values
+ in z_est for each point
+ x_lo (:obj: `torch.Tensor`): int tensor of indices of left values
+ in z_est for each point
+ x_hi (:obj: `torch.Tensor`): int tensor of indices of right values
+ in z_est for each point
+ w_ylo_xlo (:obj: `torch.Tensor`): float tensor of size M;
+ contains upper-left value weight for each point
+ w_ylo_xhi (:obj: `torch.Tensor`): float tensor of size M;
+ contains upper-right value weight for each point
+ w_yhi_xlo (:obj: `torch.Tensor`): float tensor of size M;
+ contains lower-left value weight for each point
+ w_yhi_xhi (:obj: `torch.Tensor`): float tensor of size M;
+ contains lower-right value weight for each point
+ """
+
+ def __init__(
+ self,
+ packed_annotations: Any,
+ j_valid: torch.Tensor,
+ y_lo: torch.Tensor,
+ y_hi: torch.Tensor,
+ x_lo: torch.Tensor,
+ x_hi: torch.Tensor,
+ w_ylo_xlo: torch.Tensor,
+ w_ylo_xhi: torch.Tensor,
+ w_yhi_xlo: torch.Tensor,
+ w_yhi_xhi: torch.Tensor,
+ ):
+ for k, v in locals().items():
+ if k != "self":
+ setattr(self, k, v)
+
+ @staticmethod
+ def from_matches(
+ packed_annotations: Any, densepose_outputs_size_hw: Tuple[int, int]
+ ) -> "BilinearInterpolationHelper":
+ """
+ Args:
+ packed_annotations: annotations packed into tensors, the following
+ attributes are required:
+ - bbox_xywh_gt
+ - bbox_xywh_est
+ - x_gt
+ - y_gt
+ - point_bbox_with_dp_indices
+ - point_bbox_indices
+ densepose_outputs_size_hw (tuple [int, int]): resolution of
+ DensePose predictor outputs (H, W)
+ Return:
+ An instance of `BilinearInterpolationHelper` used to perform
+ interpolation for the given annotation points and output resolution
+ """
+
+ zh, zw = densepose_outputs_size_hw
+ x0_gt, y0_gt, w_gt, h_gt = packed_annotations.bbox_xywh_gt[
+ packed_annotations.point_bbox_with_dp_indices
+ ].unbind(dim=1)
+ x0_est, y0_est, w_est, h_est = packed_annotations.bbox_xywh_est[
+ packed_annotations.point_bbox_with_dp_indices
+ ].unbind(dim=1)
+ x_lo, x_hi, x_w, jx_valid = _linear_interpolation_utilities(
+ packed_annotations.x_gt, x0_gt, w_gt, x0_est, w_est, zw
+ )
+ y_lo, y_hi, y_w, jy_valid = _linear_interpolation_utilities(
+ packed_annotations.y_gt, y0_gt, h_gt, y0_est, h_est, zh
+ )
+ j_valid = jx_valid * jy_valid
+
+ w_ylo_xlo = (1.0 - x_w) * (1.0 - y_w)
+ w_ylo_xhi = x_w * (1.0 - y_w)
+ w_yhi_xlo = (1.0 - x_w) * y_w
+ w_yhi_xhi = x_w * y_w
+
+ return BilinearInterpolationHelper(
+ packed_annotations,
+ j_valid,
+ y_lo,
+ y_hi,
+ x_lo,
+ x_hi,
+ w_ylo_xlo, # pyre-ignore[6]
+ w_ylo_xhi,
+ # pyre-fixme[6]: Expected `Tensor` for 9th param but got `float`.
+ w_yhi_xlo,
+ w_yhi_xhi,
+ )
+
+ def extract_at_points(
+ self,
+ z_est,
+ slice_fine_segm=None,
+ w_ylo_xlo=None,
+ w_ylo_xhi=None,
+ w_yhi_xlo=None,
+ w_yhi_xhi=None,
+ ):
+ """
+ Extract ground truth values z_gt for valid point indices and estimated
+ values z_est using bilinear interpolation over top-left (y_lo, x_lo),
+ top-right (y_lo, x_hi), bottom-left (y_hi, x_lo) and bottom-right
+ (y_hi, x_hi) values in z_est with corresponding weights:
+ w_ylo_xlo, w_ylo_xhi, w_yhi_xlo and w_yhi_xhi.
+ Use slice_fine_segm to slice dim=1 in z_est
+ """
+ slice_fine_segm = (
+ self.packed_annotations.fine_segm_labels_gt
+ if slice_fine_segm is None
+ else slice_fine_segm
+ )
+ w_ylo_xlo = self.w_ylo_xlo if w_ylo_xlo is None else w_ylo_xlo
+ w_ylo_xhi = self.w_ylo_xhi if w_ylo_xhi is None else w_ylo_xhi
+ w_yhi_xlo = self.w_yhi_xlo if w_yhi_xlo is None else w_yhi_xlo
+ w_yhi_xhi = self.w_yhi_xhi if w_yhi_xhi is None else w_yhi_xhi
+
+ index_bbox = self.packed_annotations.point_bbox_indices
+ z_est_sampled = (
+ z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_lo] * w_ylo_xlo
+ + z_est[index_bbox, slice_fine_segm, self.y_lo, self.x_hi] * w_ylo_xhi
+ + z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_lo] * w_yhi_xlo
+ + z_est[index_bbox, slice_fine_segm, self.y_hi, self.x_hi] * w_yhi_xhi
+ )
+ return z_est_sampled
+
+
+def resample_data(
+ z, bbox_xywh_src, bbox_xywh_dst, wout, hout, mode: str = "nearest", padding_mode: str = "zeros"
+):
+ """
+ Args:
+ z (:obj: `torch.Tensor`): tensor of size (N,C,H,W) with data to be
+ resampled
+ bbox_xywh_src (:obj: `torch.Tensor`): tensor of size (N,4) containing
+ source bounding boxes in format XYWH
+ bbox_xywh_dst (:obj: `torch.Tensor`): tensor of size (N,4) containing
+ destination bounding boxes in format XYWH
+ Return:
+ zresampled (:obj: `torch.Tensor`): tensor of size (N, C, Hout, Wout)
+ with resampled values of z, where D is the discretization size
+ """
+ n = bbox_xywh_src.size(0)
+ assert n == bbox_xywh_dst.size(0), (
+ "The number of "
+ "source ROIs for resampling ({}) should be equal to the number "
+ "of destination ROIs ({})".format(bbox_xywh_src.size(0), bbox_xywh_dst.size(0))
+ )
+ x0src, y0src, wsrc, hsrc = bbox_xywh_src.unbind(dim=1)
+ x0dst, y0dst, wdst, hdst = bbox_xywh_dst.unbind(dim=1)
+ x0dst_norm = 2 * (x0dst - x0src) / wsrc - 1
+ y0dst_norm = 2 * (y0dst - y0src) / hsrc - 1
+ x1dst_norm = 2 * (x0dst + wdst - x0src) / wsrc - 1
+ y1dst_norm = 2 * (y0dst + hdst - y0src) / hsrc - 1
+ grid_w = torch.arange(wout, device=z.device, dtype=torch.float) / wout
+ grid_h = torch.arange(hout, device=z.device, dtype=torch.float) / hout
+ grid_w_expanded = grid_w[None, None, :].expand(n, hout, wout)
+ grid_h_expanded = grid_h[None, :, None].expand(n, hout, wout)
+ dx_expanded = (x1dst_norm - x0dst_norm)[:, None, None].expand(n, hout, wout)
+ dy_expanded = (y1dst_norm - y0dst_norm)[:, None, None].expand(n, hout, wout)
+ x0_expanded = x0dst_norm[:, None, None].expand(n, hout, wout)
+ y0_expanded = y0dst_norm[:, None, None].expand(n, hout, wout)
+ grid_x = grid_w_expanded * dx_expanded + x0_expanded
+ grid_y = grid_h_expanded * dy_expanded + y0_expanded
+ grid = torch.stack((grid_x, grid_y), dim=3)
+ # resample Z from (N, C, H, W) into (N, C, Hout, Wout)
+ zresampled = F.grid_sample(z, grid, mode=mode, padding_mode=padding_mode, align_corners=True)
+ return zresampled
+
+
+class AnnotationsAccumulator(ABC):
+ """
+ Abstract class for an accumulator for annotations that can produce
+ dense annotations packed into tensors.
+ """
+
+ @abstractmethod
+ def accumulate(self, instances_one_image: Instances):
+ """
+ Accumulate instances data for one image
+
+ Args:
+ instances_one_image (Instances): instances data to accumulate
+ """
+ pass
+
+ @abstractmethod
+ def pack(self) -> Any:
+ """
+ Pack data into tensors
+ """
+ pass
+
+
+@dataclass
+class PackedChartBasedAnnotations:
+ """
+ Packed annotations for chart-based model training. The following attributes
+ are defined:
+ - fine_segm_labels_gt (tensor [K] of `int64`): GT fine segmentation point labels
+ - x_gt (tensor [K] of `float32`): GT normalized X point coordinates
+ - y_gt (tensor [K] of `float32`): GT normalized Y point coordinates
+ - u_gt (tensor [K] of `float32`): GT point U values
+ - v_gt (tensor [K] of `float32`): GT point V values
+ - coarse_segm_gt (tensor [N, S, S] of `float32`): GT segmentation for bounding boxes
+ - bbox_xywh_gt (tensor [N, 4] of `float32`): selected GT bounding boxes in
+ XYWH format
+ - bbox_xywh_est (tensor [N, 4] of `float32`): selected matching estimated
+ bounding boxes in XYWH format
+ - point_bbox_with_dp_indices (tensor [K] of `int64`): indices of bounding boxes
+ with DensePose annotations that correspond to the point data
+ - point_bbox_indices (tensor [K] of `int64`): indices of bounding boxes
+ (not necessarily the selected ones with DensePose data) that correspond
+ to the point data
+ - bbox_indices (tensor [N] of `int64`): global indices of selected bounding
+ boxes with DensePose annotations; these indices could be used to access
+ features that are computed for all bounding boxes, not only the ones with
+ DensePose annotations.
+ Here K is the total number of points and N is the total number of instances
+ with DensePose annotations.
+ """
+
+ fine_segm_labels_gt: torch.Tensor
+ x_gt: torch.Tensor
+ y_gt: torch.Tensor
+ u_gt: torch.Tensor
+ v_gt: torch.Tensor
+ coarse_segm_gt: Optional[torch.Tensor]
+ bbox_xywh_gt: torch.Tensor
+ bbox_xywh_est: torch.Tensor
+ point_bbox_with_dp_indices: torch.Tensor
+ point_bbox_indices: torch.Tensor
+ bbox_indices: torch.Tensor
+
+
+class ChartBasedAnnotationsAccumulator(AnnotationsAccumulator):
+ """
+ Accumulates annotations by batches that correspond to objects detected on
+ individual images. Can pack them together into single tensors.
+ """
+
+ def __init__(self):
+ self.i_gt = []
+ self.x_gt = []
+ self.y_gt = []
+ self.u_gt = []
+ self.v_gt = []
+ self.s_gt = []
+ self.bbox_xywh_gt = []
+ self.bbox_xywh_est = []
+ self.point_bbox_with_dp_indices = []
+ self.point_bbox_indices = []
+ self.bbox_indices = []
+ self.nxt_bbox_with_dp_index = 0
+ self.nxt_bbox_index = 0
+
+ def accumulate(self, instances_one_image: Instances):
+ """
+ Accumulate instances data for one image
+
+ Args:
+ instances_one_image (Instances): instances data to accumulate
+ """
+ boxes_xywh_est = BoxMode.convert(
+ instances_one_image.proposal_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
+ )
+ boxes_xywh_gt = BoxMode.convert(
+ instances_one_image.gt_boxes.tensor.clone(), BoxMode.XYXY_ABS, BoxMode.XYWH_ABS
+ )
+ n_matches = len(boxes_xywh_gt)
+ assert n_matches == len(
+ boxes_xywh_est
+ ), f"Got {len(boxes_xywh_est)} proposal boxes and {len(boxes_xywh_gt)} GT boxes"
+ if not n_matches:
+ # no detection - GT matches
+ return
+ if (
+ not hasattr(instances_one_image, "gt_densepose")
+ or instances_one_image.gt_densepose is None
+ ):
+ # no densepose GT for the detections, just increase the bbox index
+ self.nxt_bbox_index += n_matches
+ return
+ for box_xywh_est, box_xywh_gt, dp_gt in zip(
+ boxes_xywh_est, boxes_xywh_gt, instances_one_image.gt_densepose
+ ):
+ if (dp_gt is not None) and (len(dp_gt.x) > 0):
+ # pyre-fixme[6]: For 1st argument expected `Tensor` but got `float`.
+ # pyre-fixme[6]: For 2nd argument expected `Tensor` but got `float`.
+ self._do_accumulate(box_xywh_gt, box_xywh_est, dp_gt)
+ self.nxt_bbox_index += 1
+
+ def _do_accumulate(
+ self, box_xywh_gt: torch.Tensor, box_xywh_est: torch.Tensor, dp_gt: DensePoseDataRelative
+ ):
+ """
+ Accumulate instances data for one image, given that the data is not empty
+
+ Args:
+ box_xywh_gt (tensor): GT bounding box
+ box_xywh_est (tensor): estimated bounding box
+ dp_gt (DensePoseDataRelative): GT densepose data
+ """
+ self.i_gt.append(dp_gt.i)
+ self.x_gt.append(dp_gt.x)
+ self.y_gt.append(dp_gt.y)
+ self.u_gt.append(dp_gt.u)
+ self.v_gt.append(dp_gt.v)
+ if hasattr(dp_gt, "segm"):
+ self.s_gt.append(dp_gt.segm.unsqueeze(0))
+ self.bbox_xywh_gt.append(box_xywh_gt.view(-1, 4))
+ self.bbox_xywh_est.append(box_xywh_est.view(-1, 4))
+ self.point_bbox_with_dp_indices.append(
+ torch.full_like(dp_gt.i, self.nxt_bbox_with_dp_index)
+ )
+ self.point_bbox_indices.append(torch.full_like(dp_gt.i, self.nxt_bbox_index))
+ self.bbox_indices.append(self.nxt_bbox_index)
+ self.nxt_bbox_with_dp_index += 1
+
+ def pack(self) -> Optional[PackedChartBasedAnnotations]:
+ """
+ Pack data into tensors
+ """
+ if not len(self.i_gt):
+ # TODO:
+ # returning proper empty annotations would require
+ # creating empty tensors of appropriate shape and
+ # type on an appropriate device;
+ # we return None so far to indicate empty annotations
+ return None
+ return PackedChartBasedAnnotations(
+ fine_segm_labels_gt=torch.cat(self.i_gt, 0).long(),
+ x_gt=torch.cat(self.x_gt, 0),
+ y_gt=torch.cat(self.y_gt, 0),
+ u_gt=torch.cat(self.u_gt, 0),
+ v_gt=torch.cat(self.v_gt, 0),
+ # ignore segmentation annotations, if not all the instances contain those
+ coarse_segm_gt=torch.cat(self.s_gt, 0)
+ if len(self.s_gt) == len(self.bbox_xywh_gt)
+ else None,
+ bbox_xywh_gt=torch.cat(self.bbox_xywh_gt, 0),
+ bbox_xywh_est=torch.cat(self.bbox_xywh_est, 0),
+ point_bbox_with_dp_indices=torch.cat(self.point_bbox_with_dp_indices, 0).long(),
+ point_bbox_indices=torch.cat(self.point_bbox_indices, 0).long(),
+ bbox_indices=torch.as_tensor(
+ self.bbox_indices, dtype=torch.long, device=self.x_gt[0].device
+ ).long(),
+ )
+
+
+def extract_packed_annotations_from_matches(
+ proposals_with_targets: List[Instances], accumulator: AnnotationsAccumulator
+) -> Any:
+ for proposals_targets_per_image in proposals_with_targets:
+ accumulator.accumulate(proposals_targets_per_image)
+ return accumulator.pack()
+
+
+def sample_random_indices(
+ n_indices: int, n_samples: int, device: Optional[torch.device] = None
+) -> Optional[torch.Tensor]:
+ """
+ Samples `n_samples` random indices from range `[0..n_indices - 1]`.
+ If `n_indices` is smaller than `n_samples`, returns `None` meaning that all indices
+ are selected.
+ Args:
+ n_indices (int): total number of indices
+ n_samples (int): number of indices to sample
+ device (torch.device): the desired device of returned tensor
+ Return:
+ Tensor of selected vertex indices, or `None`, if all vertices are selected
+ """
+ if (n_samples <= 0) or (n_indices <= n_samples):
+ return None
+ indices = torch.randperm(n_indices, device=device)[:n_samples]
+ return indices
diff --git a/densepose/modeling/predictors/__init__.py b/densepose/modeling/predictors/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..1ece0757acf2a4924079c884cab44a71cea22c37
--- /dev/null
+++ b/densepose/modeling/predictors/__init__.py
@@ -0,0 +1,9 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .chart import DensePoseChartPredictor
+from .chart_confidence import DensePoseChartConfidencePredictorMixin
+from .chart_with_confidence import DensePoseChartWithConfidencePredictor
+from .cse import DensePoseEmbeddingPredictor
+from .cse_confidence import DensePoseEmbeddingConfidencePredictorMixin
+from .cse_with_confidence import DensePoseEmbeddingWithConfidencePredictor
+from .registry import DENSEPOSE_PREDICTOR_REGISTRY
diff --git a/densepose/modeling/predictors/chart.py b/densepose/modeling/predictors/chart.py
new file mode 100644
index 0000000000000000000000000000000000000000..3bcd13f7c592e37c2751556cda1f6e9cd3400b73
--- /dev/null
+++ b/densepose/modeling/predictors/chart.py
@@ -0,0 +1,94 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import torch
+from torch import nn
+
+from detectron2.config import CfgNode
+from detectron2.layers import ConvTranspose2d, interpolate
+
+from ...structures import DensePoseChartPredictorOutput
+from ..utils import initialize_module_params
+from .registry import DENSEPOSE_PREDICTOR_REGISTRY
+
+
+@DENSEPOSE_PREDICTOR_REGISTRY.register()
+class DensePoseChartPredictor(nn.Module):
+ """
+ Predictor (last layers of a DensePose model) that takes DensePose head outputs as an input
+ and produces 4 tensors which represent DensePose results for predefined body parts
+ (patches / charts):
+ * coarse segmentation, a tensor of shape [N, K, Hout, Wout]
+ * fine segmentation, a tensor of shape [N, C, Hout, Wout]
+ * U coordinates, a tensor of shape [N, C, Hout, Wout]
+ * V coordinates, a tensor of shape [N, C, Hout, Wout]
+ where
+ - N is the number of instances
+ - K is the number of coarse segmentation channels (
+ 2 = foreground / background,
+ 15 = one of 14 body parts / background)
+ - C is the number of fine segmentation channels (
+ 24 fine body parts / background)
+ - Hout and Wout are height and width of predictions
+ """
+
+ def __init__(self, cfg: CfgNode, input_channels: int):
+ """
+ Initialize predictor using configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ input_channels (int): input tensor size along the channel dimension
+ """
+ super().__init__()
+ dim_in = input_channels
+ n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
+ dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
+ kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
+ # coarse segmentation
+ self.ann_index_lowres = ConvTranspose2d(
+ dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ # fine segmentation
+ self.index_uv_lowres = ConvTranspose2d(
+ dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ # U
+ self.u_lowres = ConvTranspose2d(
+ dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ # V
+ self.v_lowres = ConvTranspose2d(
+ dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE
+ initialize_module_params(self)
+
+ def interp2d(self, tensor_nchw: torch.Tensor):
+ """
+ Bilinear interpolation method to be used for upscaling
+
+ Args:
+ tensor_nchw (tensor): tensor of shape (N, C, H, W)
+ Return:
+ tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed
+ by applying the scale factor to H and W
+ """
+ return interpolate(
+ tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False
+ )
+
+ def forward(self, head_outputs: torch.Tensor):
+ """
+ Perform forward step on DensePose head outputs
+
+ Args:
+ head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W]
+ Return:
+ An instance of DensePoseChartPredictorOutput
+ """
+ return DensePoseChartPredictorOutput(
+ coarse_segm=self.interp2d(self.ann_index_lowres(head_outputs)),
+ fine_segm=self.interp2d(self.index_uv_lowres(head_outputs)),
+ u=self.interp2d(self.u_lowres(head_outputs)),
+ v=self.interp2d(self.v_lowres(head_outputs)),
+ )
diff --git a/densepose/modeling/predictors/chart_confidence.py b/densepose/modeling/predictors/chart_confidence.py
new file mode 100644
index 0000000000000000000000000000000000000000..0c0099952f3e675e42aa7d3b6d35065fdaf43dbb
--- /dev/null
+++ b/densepose/modeling/predictors/chart_confidence.py
@@ -0,0 +1,174 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any
+import torch
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.layers import ConvTranspose2d
+
+from ...structures import decorate_predictor_output_class_with_confidences
+from ..confidence import DensePoseConfidenceModelConfig, DensePoseUVConfidenceType
+from ..utils import initialize_module_params
+
+
+class DensePoseChartConfidencePredictorMixin:
+ """
+ Predictor contains the last layers of a DensePose model that take DensePose head
+ outputs as an input and produce model outputs. Confidence predictor mixin is used
+ to generate confidences for segmentation and UV tensors estimated by some
+ base predictor. Several assumptions need to hold for the base predictor:
+ 1) the `forward` method must return SIUV tuple as the first result (
+ S = coarse segmentation, I = fine segmentation, U and V are intrinsic
+ chart coordinates)
+ 2) `interp2d` method must be defined to perform bilinear interpolation;
+ the same method is typically used for SIUV and confidences
+ Confidence predictor mixin provides confidence estimates, as described in:
+ N. Neverova et al., Correlated Uncertainty for Learning Dense Correspondences
+ from Noisy Labels, NeurIPS 2019
+ A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
+ """
+
+ def __init__(self, cfg: CfgNode, input_channels: int):
+ """
+ Initialize confidence predictor using configuration options.
+
+ Args:
+ cfg (CfgNode): configuration options
+ input_channels (int): number of input channels
+ """
+ # we rely on base predictor to call nn.Module.__init__
+ super().__init__(cfg, input_channels) # pyre-ignore[19]
+ self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
+ self._initialize_confidence_estimation_layers(cfg, input_channels)
+ self._registry = {}
+ initialize_module_params(self) # pyre-ignore[6]
+
+ def _initialize_confidence_estimation_layers(self, cfg: CfgNode, dim_in: int):
+ """
+ Initialize confidence estimation layers based on configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ dim_in (int): number of input channels
+ """
+ dim_out_patches = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_PATCHES + 1
+ kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
+ if self.confidence_model_cfg.uv_confidence.enabled:
+ if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
+ self.sigma_2_lowres = ConvTranspose2d( # pyre-ignore[16]
+ dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ elif (
+ self.confidence_model_cfg.uv_confidence.type
+ == DensePoseUVConfidenceType.INDEP_ANISO
+ ):
+ self.sigma_2_lowres = ConvTranspose2d(
+ dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ self.kappa_u_lowres = ConvTranspose2d( # pyre-ignore[16]
+ dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ self.kappa_v_lowres = ConvTranspose2d( # pyre-ignore[16]
+ dim_in, dim_out_patches, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ else:
+ raise ValueError(
+ f"Unknown confidence model type: "
+ f"{self.confidence_model_cfg.confidence_model_type}"
+ )
+ if self.confidence_model_cfg.segm_confidence.enabled:
+ self.fine_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16]
+ dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ self.coarse_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16]
+ dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+
+ def forward(self, head_outputs: torch.Tensor):
+ """
+ Perform forward operation on head outputs used as inputs for the predictor.
+ Calls forward method from the base predictor and uses its outputs to compute
+ confidences.
+
+ Args:
+ head_outputs (Tensor): head outputs used as predictor inputs
+ Return:
+ An instance of outputs with confidences,
+ see `decorate_predictor_output_class_with_confidences`
+ """
+ # assuming base class returns SIUV estimates in its first result
+ base_predictor_outputs = super().forward(head_outputs) # pyre-ignore[16]
+
+ # create output instance by extending base predictor outputs:
+ output = self._create_output_instance(base_predictor_outputs)
+
+ if self.confidence_model_cfg.uv_confidence.enabled:
+ if self.confidence_model_cfg.uv_confidence.type == DensePoseUVConfidenceType.IID_ISO:
+ # assuming base class defines interp2d method for bilinear interpolation
+ output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs)) # pyre-ignore[16]
+ elif (
+ self.confidence_model_cfg.uv_confidence.type
+ == DensePoseUVConfidenceType.INDEP_ANISO
+ ):
+ # assuming base class defines interp2d method for bilinear interpolation
+ output.sigma_2 = self.interp2d(self.sigma_2_lowres(head_outputs))
+ output.kappa_u = self.interp2d(self.kappa_u_lowres(head_outputs)) # pyre-ignore[16]
+ output.kappa_v = self.interp2d(self.kappa_v_lowres(head_outputs)) # pyre-ignore[16]
+ else:
+ raise ValueError(
+ f"Unknown confidence model type: "
+ f"{self.confidence_model_cfg.confidence_model_type}"
+ )
+ if self.confidence_model_cfg.segm_confidence.enabled:
+ # base predictor outputs are assumed to have `fine_segm` and `coarse_segm` attributes
+ # base predictor is assumed to define `interp2d` method for bilinear interpolation
+ output.fine_segm_confidence = (
+ F.softplus(
+ self.interp2d(self.fine_segm_confidence_lowres(head_outputs)) # pyre-ignore[16]
+ )
+ + self.confidence_model_cfg.segm_confidence.epsilon
+ )
+ output.fine_segm = base_predictor_outputs.fine_segm * torch.repeat_interleave(
+ output.fine_segm_confidence, base_predictor_outputs.fine_segm.shape[1], dim=1
+ )
+ output.coarse_segm_confidence = (
+ F.softplus(
+ self.interp2d(
+ self.coarse_segm_confidence_lowres(head_outputs) # pyre-ignore[16]
+ )
+ )
+ + self.confidence_model_cfg.segm_confidence.epsilon
+ )
+ output.coarse_segm = base_predictor_outputs.coarse_segm * torch.repeat_interleave(
+ output.coarse_segm_confidence, base_predictor_outputs.coarse_segm.shape[1], dim=1
+ )
+
+ return output
+
+ def _create_output_instance(self, base_predictor_outputs: Any):
+ """
+ Create an instance of predictor outputs by copying the outputs from the
+ base predictor and initializing confidence
+
+ Args:
+ base_predictor_outputs: an instance of base predictor outputs
+ (the outputs type is assumed to be a dataclass)
+ Return:
+ An instance of outputs with confidences
+ """
+ PredictorOutput = decorate_predictor_output_class_with_confidences(
+ type(base_predictor_outputs) # pyre-ignore[6]
+ )
+ # base_predictor_outputs is assumed to be a dataclass
+ # reassign all the fields from base_predictor_outputs (no deep copy!), add new fields
+ output = PredictorOutput(
+ **base_predictor_outputs.__dict__,
+ coarse_segm_confidence=None,
+ fine_segm_confidence=None,
+ sigma_1=None,
+ sigma_2=None,
+ kappa_u=None,
+ kappa_v=None,
+ )
+ return output
diff --git a/densepose/modeling/predictors/chart_with_confidence.py b/densepose/modeling/predictors/chart_with_confidence.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c1cd6cc8fda56e831fbc02a8ffdd844866c0e4f
--- /dev/null
+++ b/densepose/modeling/predictors/chart_with_confidence.py
@@ -0,0 +1,15 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from . import DensePoseChartConfidencePredictorMixin, DensePoseChartPredictor
+from .registry import DENSEPOSE_PREDICTOR_REGISTRY
+
+
+@DENSEPOSE_PREDICTOR_REGISTRY.register()
+class DensePoseChartWithConfidencePredictor(
+ DensePoseChartConfidencePredictorMixin, DensePoseChartPredictor
+):
+ """
+ Predictor that combines chart and chart confidence estimation
+ """
+
+ pass
diff --git a/densepose/modeling/predictors/cse.py b/densepose/modeling/predictors/cse.py
new file mode 100644
index 0000000000000000000000000000000000000000..466a5ecddbfa338a2b603facf06d1f4510fff6eb
--- /dev/null
+++ b/densepose/modeling/predictors/cse.py
@@ -0,0 +1,70 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import torch
+from torch import nn
+
+from detectron2.config import CfgNode
+from detectron2.layers import ConvTranspose2d, interpolate
+
+from ...structures import DensePoseEmbeddingPredictorOutput
+from ..utils import initialize_module_params
+from .registry import DENSEPOSE_PREDICTOR_REGISTRY
+
+
+@DENSEPOSE_PREDICTOR_REGISTRY.register()
+class DensePoseEmbeddingPredictor(nn.Module):
+ """
+ Last layers of a DensePose model that take DensePose head outputs as an input
+ and produce model outputs for continuous surface embeddings (CSE).
+ """
+
+ def __init__(self, cfg: CfgNode, input_channels: int):
+ """
+ Initialize predictor using configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ input_channels (int): input tensor size along the channel dimension
+ """
+ super().__init__()
+ dim_in = input_channels
+ n_segm_chan = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
+ embed_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
+ kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
+ # coarse segmentation
+ self.coarse_segm_lowres = ConvTranspose2d(
+ dim_in, n_segm_chan, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ # embedding
+ self.embed_lowres = ConvTranspose2d(
+ dim_in, embed_size, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+ self.scale_factor = cfg.MODEL.ROI_DENSEPOSE_HEAD.UP_SCALE
+ initialize_module_params(self)
+
+ def interp2d(self, tensor_nchw: torch.Tensor):
+ """
+ Bilinear interpolation method to be used for upscaling
+
+ Args:
+ tensor_nchw (tensor): tensor of shape (N, C, H, W)
+ Return:
+ tensor of shape (N, C, Hout, Wout), where Hout and Wout are computed
+ by applying the scale factor to H and W
+ """
+ return interpolate(
+ tensor_nchw, scale_factor=self.scale_factor, mode="bilinear", align_corners=False
+ )
+
+ def forward(self, head_outputs):
+ """
+ Perform forward step on DensePose head outputs
+
+ Args:
+ head_outputs (tensor): DensePose head outputs, tensor of shape [N, D, H, W]
+ """
+ embed_lowres = self.embed_lowres(head_outputs)
+ coarse_segm_lowres = self.coarse_segm_lowres(head_outputs)
+ embed = self.interp2d(embed_lowres)
+ coarse_segm = self.interp2d(coarse_segm_lowres)
+ return DensePoseEmbeddingPredictorOutput(embedding=embed, coarse_segm=coarse_segm)
diff --git a/densepose/modeling/predictors/cse_confidence.py b/densepose/modeling/predictors/cse_confidence.py
new file mode 100644
index 0000000000000000000000000000000000000000..8220337cea8eb87bbdf74378079551259dcc37e2
--- /dev/null
+++ b/densepose/modeling/predictors/cse_confidence.py
@@ -0,0 +1,115 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from typing import Any
+import torch
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.layers import ConvTranspose2d
+
+from densepose.modeling.confidence import DensePoseConfidenceModelConfig
+from densepose.modeling.utils import initialize_module_params
+from densepose.structures import decorate_cse_predictor_output_class_with_confidences
+
+
+class DensePoseEmbeddingConfidencePredictorMixin:
+ """
+ Predictor contains the last layers of a DensePose model that take DensePose head
+ outputs as an input and produce model outputs. Confidence predictor mixin is used
+ to generate confidences for coarse segmentation estimated by some
+ base predictor. Several assumptions need to hold for the base predictor:
+ 1) the `forward` method must return CSE DensePose head outputs,
+ tensor of shape [N, D, H, W]
+ 2) `interp2d` method must be defined to perform bilinear interpolation;
+ the same method is typically used for masks and confidences
+ Confidence predictor mixin provides confidence estimates, as described in:
+ N. Neverova et al., Correlated Uncertainty for Learning Dense Correspondences
+ from Noisy Labels, NeurIPS 2019
+ A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
+ """
+
+ def __init__(self, cfg: CfgNode, input_channels: int):
+ """
+ Initialize confidence predictor using configuration options.
+
+ Args:
+ cfg (CfgNode): configuration options
+ input_channels (int): number of input channels
+ """
+ # we rely on base predictor to call nn.Module.__init__
+ super().__init__(cfg, input_channels) # pyre-ignore[19]
+ self.confidence_model_cfg = DensePoseConfidenceModelConfig.from_cfg(cfg)
+ self._initialize_confidence_estimation_layers(cfg, input_channels)
+ self._registry = {}
+ initialize_module_params(self) # pyre-ignore[6]
+
+ def _initialize_confidence_estimation_layers(self, cfg: CfgNode, dim_in: int):
+ """
+ Initialize confidence estimation layers based on configuration options
+
+ Args:
+ cfg (CfgNode): configuration options
+ dim_in (int): number of input channels
+ """
+ kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECONV_KERNEL
+ if self.confidence_model_cfg.segm_confidence.enabled:
+ self.coarse_segm_confidence_lowres = ConvTranspose2d( # pyre-ignore[16]
+ dim_in, 1, kernel_size, stride=2, padding=int(kernel_size / 2 - 1)
+ )
+
+ def forward(self, head_outputs: torch.Tensor):
+ """
+ Perform forward operation on head outputs used as inputs for the predictor.
+ Calls forward method from the base predictor and uses its outputs to compute
+ confidences.
+
+ Args:
+ head_outputs (Tensor): head outputs used as predictor inputs
+ Return:
+ An instance of outputs with confidences,
+ see `decorate_cse_predictor_output_class_with_confidences`
+ """
+ # assuming base class returns SIUV estimates in its first result
+ base_predictor_outputs = super().forward(head_outputs) # pyre-ignore[16]
+
+ # create output instance by extending base predictor outputs:
+ output = self._create_output_instance(base_predictor_outputs)
+
+ if self.confidence_model_cfg.segm_confidence.enabled:
+ # base predictor outputs are assumed to have `coarse_segm` attribute
+ # base predictor is assumed to define `interp2d` method for bilinear interpolation
+ output.coarse_segm_confidence = (
+ F.softplus(
+ self.interp2d( # pyre-ignore[16]
+ self.coarse_segm_confidence_lowres(head_outputs) # pyre-ignore[16]
+ )
+ )
+ + self.confidence_model_cfg.segm_confidence.epsilon
+ )
+ output.coarse_segm = base_predictor_outputs.coarse_segm * torch.repeat_interleave(
+ output.coarse_segm_confidence, base_predictor_outputs.coarse_segm.shape[1], dim=1
+ )
+
+ return output
+
+ def _create_output_instance(self, base_predictor_outputs: Any):
+ """
+ Create an instance of predictor outputs by copying the outputs from the
+ base predictor and initializing confidence
+
+ Args:
+ base_predictor_outputs: an instance of base predictor outputs
+ (the outputs type is assumed to be a dataclass)
+ Return:
+ An instance of outputs with confidences
+ """
+ PredictorOutput = decorate_cse_predictor_output_class_with_confidences(
+ type(base_predictor_outputs) # pyre-ignore[6]
+ )
+ # base_predictor_outputs is assumed to be a dataclass
+ # reassign all the fields from base_predictor_outputs (no deep copy!), add new fields
+ output = PredictorOutput(
+ **base_predictor_outputs.__dict__,
+ coarse_segm_confidence=None,
+ )
+ return output
diff --git a/densepose/modeling/predictors/cse_with_confidence.py b/densepose/modeling/predictors/cse_with_confidence.py
new file mode 100644
index 0000000000000000000000000000000000000000..17ecef67ffb67cd0e64de73632eaede1d8f3c701
--- /dev/null
+++ b/densepose/modeling/predictors/cse_with_confidence.py
@@ -0,0 +1,15 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from . import DensePoseEmbeddingConfidencePredictorMixin, DensePoseEmbeddingPredictor
+from .registry import DENSEPOSE_PREDICTOR_REGISTRY
+
+
+@DENSEPOSE_PREDICTOR_REGISTRY.register()
+class DensePoseEmbeddingWithConfidencePredictor(
+ DensePoseEmbeddingConfidencePredictorMixin, DensePoseEmbeddingPredictor
+):
+ """
+ Predictor that combines CSE and CSE confidence estimation
+ """
+
+ pass
diff --git a/densepose/modeling/predictors/registry.py b/densepose/modeling/predictors/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..f96901d3242fa8f3d35d053ed0bdd7649a045b88
--- /dev/null
+++ b/densepose/modeling/predictors/registry.py
@@ -0,0 +1,5 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from detectron2.utils.registry import Registry
+
+DENSEPOSE_PREDICTOR_REGISTRY = Registry("DENSEPOSE_PREDICTOR")
diff --git a/densepose/modeling/roi_heads/__init__.py b/densepose/modeling/roi_heads/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..8403589f23ec2ffa8afafcd566ca0b0b7b2671a7
--- /dev/null
+++ b/densepose/modeling/roi_heads/__init__.py
@@ -0,0 +1,6 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .v1convx import DensePoseV1ConvXHead
+from .deeplab import DensePoseDeepLabHead
+from .registry import ROI_DENSEPOSE_HEAD_REGISTRY
+from .roi_head import Decoder, DensePoseROIHeads
diff --git a/densepose/modeling/roi_heads/deeplab.py b/densepose/modeling/roi_heads/deeplab.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e5cb483037b302ff1fb2c305275a65e4ba4e941
--- /dev/null
+++ b/densepose/modeling/roi_heads/deeplab.py
@@ -0,0 +1,263 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import fvcore.nn.weight_init as weight_init
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.layers import Conv2d
+
+from .registry import ROI_DENSEPOSE_HEAD_REGISTRY
+
+
+@ROI_DENSEPOSE_HEAD_REGISTRY.register()
+class DensePoseDeepLabHead(nn.Module):
+ """
+ DensePose head using DeepLabV3 model from
+ "Rethinking Atrous Convolution for Semantic Image Segmentation"
+ .
+ """
+
+ def __init__(self, cfg: CfgNode, input_channels: int):
+ super(DensePoseDeepLabHead, self).__init__()
+ # fmt: off
+ hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
+ kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
+ norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NORM
+ self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
+ self.use_nonlocal = cfg.MODEL.ROI_DENSEPOSE_HEAD.DEEPLAB.NONLOCAL_ON
+ # fmt: on
+ pad_size = kernel_size // 2
+ n_channels = input_channels
+
+ self.ASPP = ASPP(input_channels, [6, 12, 56], n_channels) # 6, 12, 56
+ self.add_module("ASPP", self.ASPP)
+
+ if self.use_nonlocal:
+ self.NLBlock = NONLocalBlock2D(input_channels, bn_layer=True)
+ self.add_module("NLBlock", self.NLBlock)
+ # weight_init.c2_msra_fill(self.ASPP)
+
+ for i in range(self.n_stacked_convs):
+ norm_module = nn.GroupNorm(32, hidden_dim) if norm == "GN" else None
+ layer = Conv2d(
+ n_channels,
+ hidden_dim,
+ kernel_size,
+ stride=1,
+ padding=pad_size,
+ bias=not norm,
+ norm=norm_module,
+ )
+ weight_init.c2_msra_fill(layer)
+ n_channels = hidden_dim
+ layer_name = self._get_layer_name(i)
+ self.add_module(layer_name, layer)
+ self.n_out_channels = hidden_dim
+ # initialize_module_params(self)
+
+ def forward(self, features):
+ x0 = features
+ x = self.ASPP(x0)
+ if self.use_nonlocal:
+ x = self.NLBlock(x)
+ output = x
+ for i in range(self.n_stacked_convs):
+ layer_name = self._get_layer_name(i)
+ x = getattr(self, layer_name)(x)
+ x = F.relu(x)
+ output = x
+ return output
+
+ def _get_layer_name(self, i: int):
+ layer_name = "body_conv_fcn{}".format(i + 1)
+ return layer_name
+
+
+# Copied from
+# https://github.com/pytorch/vision/blob/master/torchvision/models/segmentation/deeplabv3.py
+# See https://arxiv.org/pdf/1706.05587.pdf for details
+class ASPPConv(nn.Sequential):
+ def __init__(self, in_channels, out_channels, dilation):
+ modules = [
+ nn.Conv2d(
+ in_channels, out_channels, 3, padding=dilation, dilation=dilation, bias=False
+ ),
+ nn.GroupNorm(32, out_channels),
+ nn.ReLU(),
+ ]
+ super(ASPPConv, self).__init__(*modules)
+
+
+class ASPPPooling(nn.Sequential):
+ def __init__(self, in_channels, out_channels):
+ super(ASPPPooling, self).__init__(
+ nn.AdaptiveAvgPool2d(1),
+ nn.Conv2d(in_channels, out_channels, 1, bias=False),
+ nn.GroupNorm(32, out_channels),
+ nn.ReLU(),
+ )
+
+ def forward(self, x):
+ size = x.shape[-2:]
+ x = super(ASPPPooling, self).forward(x)
+ return F.interpolate(x, size=size, mode="bilinear", align_corners=False)
+
+
+class ASPP(nn.Module):
+ def __init__(self, in_channels, atrous_rates, out_channels):
+ super(ASPP, self).__init__()
+ modules = []
+ modules.append(
+ nn.Sequential(
+ nn.Conv2d(in_channels, out_channels, 1, bias=False),
+ nn.GroupNorm(32, out_channels),
+ nn.ReLU(),
+ )
+ )
+
+ rate1, rate2, rate3 = tuple(atrous_rates)
+ modules.append(ASPPConv(in_channels, out_channels, rate1))
+ modules.append(ASPPConv(in_channels, out_channels, rate2))
+ modules.append(ASPPConv(in_channels, out_channels, rate3))
+ modules.append(ASPPPooling(in_channels, out_channels))
+
+ self.convs = nn.ModuleList(modules)
+
+ self.project = nn.Sequential(
+ nn.Conv2d(5 * out_channels, out_channels, 1, bias=False),
+ # nn.BatchNorm2d(out_channels),
+ nn.ReLU()
+ # nn.Dropout(0.5)
+ )
+
+ def forward(self, x):
+ res = []
+ for conv in self.convs:
+ res.append(conv(x))
+ res = torch.cat(res, dim=1)
+ return self.project(res)
+
+
+# copied from
+# https://github.com/AlexHex7/Non-local_pytorch/blob/master/lib/non_local_embedded_gaussian.py
+# See https://arxiv.org/abs/1711.07971 for details
+class _NonLocalBlockND(nn.Module):
+ def __init__(
+ self, in_channels, inter_channels=None, dimension=3, sub_sample=True, bn_layer=True
+ ):
+ super(_NonLocalBlockND, self).__init__()
+
+ assert dimension in [1, 2, 3]
+
+ self.dimension = dimension
+ self.sub_sample = sub_sample
+
+ self.in_channels = in_channels
+ self.inter_channels = inter_channels
+
+ if self.inter_channels is None:
+ self.inter_channels = in_channels // 2
+ if self.inter_channels == 0:
+ self.inter_channels = 1
+
+ if dimension == 3:
+ conv_nd = nn.Conv3d
+ max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2))
+ bn = nn.GroupNorm # (32, hidden_dim) #nn.BatchNorm3d
+ elif dimension == 2:
+ conv_nd = nn.Conv2d
+ max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2))
+ bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm2d
+ else:
+ conv_nd = nn.Conv1d
+ max_pool_layer = nn.MaxPool1d(kernel_size=2)
+ bn = nn.GroupNorm # (32, hidden_dim)nn.BatchNorm1d
+
+ self.g = conv_nd(
+ in_channels=self.in_channels,
+ out_channels=self.inter_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ )
+
+ if bn_layer:
+ self.W = nn.Sequential(
+ conv_nd(
+ in_channels=self.inter_channels,
+ out_channels=self.in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ ),
+ bn(32, self.in_channels),
+ )
+ nn.init.constant_(self.W[1].weight, 0)
+ nn.init.constant_(self.W[1].bias, 0)
+ else:
+ self.W = conv_nd(
+ in_channels=self.inter_channels,
+ out_channels=self.in_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ )
+ nn.init.constant_(self.W.weight, 0)
+ nn.init.constant_(self.W.bias, 0)
+
+ self.theta = conv_nd(
+ in_channels=self.in_channels,
+ out_channels=self.inter_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ )
+ self.phi = conv_nd(
+ in_channels=self.in_channels,
+ out_channels=self.inter_channels,
+ kernel_size=1,
+ stride=1,
+ padding=0,
+ )
+
+ if sub_sample:
+ self.g = nn.Sequential(self.g, max_pool_layer)
+ self.phi = nn.Sequential(self.phi, max_pool_layer)
+
+ def forward(self, x):
+ """
+ :param x: (b, c, t, h, w)
+ :return:
+ """
+
+ batch_size = x.size(0)
+
+ g_x = self.g(x).view(batch_size, self.inter_channels, -1)
+ g_x = g_x.permute(0, 2, 1)
+
+ theta_x = self.theta(x).view(batch_size, self.inter_channels, -1)
+ theta_x = theta_x.permute(0, 2, 1)
+ phi_x = self.phi(x).view(batch_size, self.inter_channels, -1)
+ f = torch.matmul(theta_x, phi_x)
+ f_div_C = F.softmax(f, dim=-1)
+
+ y = torch.matmul(f_div_C, g_x)
+ y = y.permute(0, 2, 1).contiguous()
+ y = y.view(batch_size, self.inter_channels, *x.size()[2:])
+ W_y = self.W(y)
+ z = W_y + x
+
+ return z
+
+
+class NONLocalBlock2D(_NonLocalBlockND):
+ def __init__(self, in_channels, inter_channels=None, sub_sample=True, bn_layer=True):
+ super(NONLocalBlock2D, self).__init__(
+ in_channels,
+ inter_channels=inter_channels,
+ dimension=2,
+ sub_sample=sub_sample,
+ bn_layer=bn_layer,
+ )
diff --git a/densepose/modeling/roi_heads/registry.py b/densepose/modeling/roi_heads/registry.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1cea432f1fda3861266fa636d002667b3fb46a0
--- /dev/null
+++ b/densepose/modeling/roi_heads/registry.py
@@ -0,0 +1,5 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from detectron2.utils.registry import Registry
+
+ROI_DENSEPOSE_HEAD_REGISTRY = Registry("ROI_DENSEPOSE_HEAD")
diff --git a/densepose/modeling/roi_heads/roi_head.py b/densepose/modeling/roi_heads/roi_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..aee645fde0d8321de9181a624a0c921b6dc167c4
--- /dev/null
+++ b/densepose/modeling/roi_heads/roi_head.py
@@ -0,0 +1,218 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import numpy as np
+from typing import Dict, List, Optional
+import fvcore.nn.weight_init as weight_init
+import torch
+import torch.nn as nn
+from torch.nn import functional as F
+
+from detectron2.layers import Conv2d, ShapeSpec, get_norm
+from detectron2.modeling import ROI_HEADS_REGISTRY, StandardROIHeads
+from detectron2.modeling.poolers import ROIPooler
+from detectron2.modeling.roi_heads import select_foreground_proposals
+from detectron2.structures import ImageList, Instances
+
+from .. import (
+ build_densepose_data_filter,
+ build_densepose_embedder,
+ build_densepose_head,
+ build_densepose_losses,
+ build_densepose_predictor,
+ densepose_inference,
+)
+
+
+class Decoder(nn.Module):
+ """
+ A semantic segmentation head described in detail in the Panoptic Feature Pyramid Networks paper
+ (https://arxiv.org/abs/1901.02446). It takes FPN features as input and merges information from
+ all levels of the FPN into single output.
+ """
+
+ def __init__(self, cfg, input_shape: Dict[str, ShapeSpec], in_features):
+ super(Decoder, self).__init__()
+
+ # fmt: off
+ self.in_features = in_features
+ feature_strides = {k: v.stride for k, v in input_shape.items()}
+ feature_channels = {k: v.channels for k, v in input_shape.items()}
+ num_classes = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NUM_CLASSES
+ conv_dims = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_CONV_DIMS
+ self.common_stride = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_COMMON_STRIDE
+ norm = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_NORM
+ # fmt: on
+
+ self.scale_heads = []
+ for in_feature in self.in_features:
+ head_ops = []
+ head_length = max(
+ 1, int(np.log2(feature_strides[in_feature]) - np.log2(self.common_stride))
+ )
+ for k in range(head_length):
+ conv = Conv2d(
+ feature_channels[in_feature] if k == 0 else conv_dims,
+ conv_dims,
+ kernel_size=3,
+ stride=1,
+ padding=1,
+ bias=not norm,
+ norm=get_norm(norm, conv_dims),
+ activation=F.relu,
+ )
+ weight_init.c2_msra_fill(conv)
+ head_ops.append(conv)
+ if feature_strides[in_feature] != self.common_stride:
+ head_ops.append(
+ nn.Upsample(scale_factor=2, mode="bilinear", align_corners=False)
+ )
+ self.scale_heads.append(nn.Sequential(*head_ops))
+ self.add_module(in_feature, self.scale_heads[-1])
+ self.predictor = Conv2d(conv_dims, num_classes, kernel_size=1, stride=1, padding=0)
+ weight_init.c2_msra_fill(self.predictor)
+
+ def forward(self, features: List[torch.Tensor]):
+ for i, _ in enumerate(self.in_features):
+ if i == 0:
+ x = self.scale_heads[i](features[i])
+ else:
+ x = x + self.scale_heads[i](features[i])
+ x = self.predictor(x)
+ return x
+
+
+@ROI_HEADS_REGISTRY.register()
+class DensePoseROIHeads(StandardROIHeads):
+ """
+ A Standard ROIHeads which contains an addition of DensePose head.
+ """
+
+ def __init__(self, cfg, input_shape):
+ super().__init__(cfg, input_shape)
+ self._init_densepose_head(cfg, input_shape)
+
+ def _init_densepose_head(self, cfg, input_shape):
+ # fmt: off
+ self.densepose_on = cfg.MODEL.DENSEPOSE_ON
+ if not self.densepose_on:
+ return
+ self.densepose_data_filter = build_densepose_data_filter(cfg)
+ dp_pooler_resolution = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_RESOLUTION
+ dp_pooler_sampling_ratio = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_SAMPLING_RATIO
+ dp_pooler_type = cfg.MODEL.ROI_DENSEPOSE_HEAD.POOLER_TYPE
+ self.use_decoder = cfg.MODEL.ROI_DENSEPOSE_HEAD.DECODER_ON
+ # fmt: on
+ if self.use_decoder:
+ dp_pooler_scales = (1.0 / input_shape[self.in_features[0]].stride,)
+ else:
+ dp_pooler_scales = tuple(1.0 / input_shape[k].stride for k in self.in_features)
+ in_channels = [input_shape[f].channels for f in self.in_features][0]
+
+ if self.use_decoder:
+ self.decoder = Decoder(cfg, input_shape, self.in_features)
+
+ self.densepose_pooler = ROIPooler(
+ output_size=dp_pooler_resolution,
+ scales=dp_pooler_scales,
+ sampling_ratio=dp_pooler_sampling_ratio,
+ pooler_type=dp_pooler_type,
+ )
+ self.densepose_head = build_densepose_head(cfg, in_channels)
+ self.densepose_predictor = build_densepose_predictor(
+ cfg, self.densepose_head.n_out_channels
+ )
+ self.densepose_losses = build_densepose_losses(cfg)
+ self.embedder = build_densepose_embedder(cfg)
+
+ def _forward_densepose(self, features: Dict[str, torch.Tensor], instances: List[Instances]):
+ """
+ Forward logic of the densepose prediction branch.
+
+ Args:
+ features (dict[str, Tensor]): input data as a mapping from feature
+ map name to tensor. Axis 0 represents the number of images `N` in
+ the input data; axes 1-3 are channels, height, and width, which may
+ vary between feature maps (e.g., if a feature pyramid is used).
+ instances (list[Instances]): length `N` list of `Instances`. The i-th
+ `Instances` contains instances for the i-th input image,
+ In training, they can be the proposals.
+ In inference, they can be the predicted boxes.
+
+ Returns:
+ In training, a dict of losses.
+ In inference, update `instances` with new fields "densepose" and return it.
+ """
+ if not self.densepose_on:
+ return {} if self.training else instances
+
+ features_list = [features[f] for f in self.in_features]
+ if self.training:
+ proposals, _ = select_foreground_proposals(instances, self.num_classes)
+ features_list, proposals = self.densepose_data_filter(features_list, proposals)
+ if len(proposals) > 0:
+ proposal_boxes = [x.proposal_boxes for x in proposals]
+
+ if self.use_decoder:
+ features_list = [self.decoder(features_list)]
+
+ features_dp = self.densepose_pooler(features_list, proposal_boxes)
+ densepose_head_outputs = self.densepose_head(features_dp)
+ densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)
+ densepose_loss_dict = self.densepose_losses(
+ proposals, densepose_predictor_outputs, embedder=self.embedder
+ )
+ return densepose_loss_dict
+ else:
+ pred_boxes = [x.pred_boxes for x in instances]
+
+ if self.use_decoder:
+ features_list = [self.decoder(features_list)]
+
+ features_dp = self.densepose_pooler(features_list, pred_boxes)
+ if len(features_dp) > 0:
+ densepose_head_outputs = self.densepose_head(features_dp)
+ densepose_predictor_outputs = self.densepose_predictor(densepose_head_outputs)
+ else:
+ densepose_predictor_outputs = None
+
+ densepose_inference(densepose_predictor_outputs, instances)
+ return instances
+
+ def forward(
+ self,
+ images: ImageList,
+ features: Dict[str, torch.Tensor],
+ proposals: List[Instances],
+ targets: Optional[List[Instances]] = None,
+ ):
+ instances, losses = super().forward(images, features, proposals, targets)
+ del targets, images
+
+ if self.training:
+ losses.update(self._forward_densepose(features, instances))
+ return instances, losses
+
+ def forward_with_given_boxes(
+ self, features: Dict[str, torch.Tensor], instances: List[Instances]
+ ):
+ """
+ Use the given boxes in `instances` to produce other (non-box) per-ROI outputs.
+
+ This is useful for downstream tasks where a box is known, but need to obtain
+ other attributes (outputs of other heads).
+ Test-time augmentation also uses this.
+
+ Args:
+ features: same as in `forward()`
+ instances (list[Instances]): instances to predict other outputs. Expect the keys
+ "pred_boxes" and "pred_classes" to exist.
+
+ Returns:
+ instances (list[Instances]):
+ the same `Instances` objects, with extra
+ fields such as `pred_masks` or `pred_keypoints`.
+ """
+
+ instances = super().forward_with_given_boxes(features, instances)
+ instances = self._forward_densepose(features, instances)
+ return instances
diff --git a/densepose/modeling/roi_heads/v1convx.py b/densepose/modeling/roi_heads/v1convx.py
new file mode 100644
index 0000000000000000000000000000000000000000..df79f658d8f7149e44aa1a31072adc4dadd89a48
--- /dev/null
+++ b/densepose/modeling/roi_heads/v1convx.py
@@ -0,0 +1,64 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.config import CfgNode
+from detectron2.layers import Conv2d
+
+from ..utils import initialize_module_params
+from .registry import ROI_DENSEPOSE_HEAD_REGISTRY
+
+
+@ROI_DENSEPOSE_HEAD_REGISTRY.register()
+class DensePoseV1ConvXHead(nn.Module):
+ """
+ Fully convolutional DensePose head.
+ """
+
+ def __init__(self, cfg: CfgNode, input_channels: int):
+ """
+ Initialize DensePose fully convolutional head
+
+ Args:
+ cfg (CfgNode): configuration options
+ input_channels (int): number of input channels
+ """
+ super(DensePoseV1ConvXHead, self).__init__()
+ # fmt: off
+ hidden_dim = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_DIM
+ kernel_size = cfg.MODEL.ROI_DENSEPOSE_HEAD.CONV_HEAD_KERNEL
+ self.n_stacked_convs = cfg.MODEL.ROI_DENSEPOSE_HEAD.NUM_STACKED_CONVS
+ # fmt: on
+ pad_size = kernel_size // 2
+ n_channels = input_channels
+ for i in range(self.n_stacked_convs):
+ layer = Conv2d(n_channels, hidden_dim, kernel_size, stride=1, padding=pad_size)
+ layer_name = self._get_layer_name(i)
+ self.add_module(layer_name, layer)
+ n_channels = hidden_dim
+ self.n_out_channels = n_channels
+ initialize_module_params(self)
+
+ def forward(self, features: torch.Tensor):
+ """
+ Apply DensePose fully convolutional head to the input features
+
+ Args:
+ features (tensor): input features
+ Result:
+ A tensor of DensePose head outputs
+ """
+ x = features
+ output = x
+ for i in range(self.n_stacked_convs):
+ layer_name = self._get_layer_name(i)
+ x = getattr(self, layer_name)(x)
+ x = F.relu(x)
+ output = x
+ return output
+
+ def _get_layer_name(self, i: int):
+ layer_name = "body_conv_fcn{}".format(i + 1)
+ return layer_name
diff --git a/densepose/modeling/test_time_augmentation.py b/densepose/modeling/test_time_augmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..ec2022ed16727f538993d2c7db60a60a1183b90d
--- /dev/null
+++ b/densepose/modeling/test_time_augmentation.py
@@ -0,0 +1,207 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import numpy as np
+import torch
+from fvcore.transforms import HFlipTransform, TransformList
+from torch.nn import functional as F
+
+from detectron2.data.transforms import RandomRotation, RotationTransform, apply_transform_gens
+from detectron2.modeling.postprocessing import detector_postprocess
+from detectron2.modeling.test_time_augmentation import DatasetMapperTTA, GeneralizedRCNNWithTTA
+
+from ..converters import HFlipConverter
+
+
+class DensePoseDatasetMapperTTA(DatasetMapperTTA):
+ def __init__(self, cfg):
+ super().__init__(cfg=cfg)
+ self.angles = cfg.TEST.AUG.ROTATION_ANGLES
+
+ def __call__(self, dataset_dict):
+ ret = super().__call__(dataset_dict=dataset_dict)
+ numpy_image = dataset_dict["image"].permute(1, 2, 0).numpy()
+ for angle in self.angles:
+ rotate = RandomRotation(angle=angle, expand=True)
+ new_numpy_image, tfms = apply_transform_gens([rotate], np.copy(numpy_image))
+ torch_image = torch.from_numpy(np.ascontiguousarray(new_numpy_image.transpose(2, 0, 1)))
+ dic = copy.deepcopy(dataset_dict)
+ # In DatasetMapperTTA, there is a pre_tfm transform (resize or no-op) that is
+ # added at the beginning of each TransformList. That's '.transforms[0]'.
+ dic["transforms"] = TransformList(
+ [ret[-1]["transforms"].transforms[0]] + tfms.transforms
+ )
+ dic["image"] = torch_image
+ ret.append(dic)
+ return ret
+
+
+class DensePoseGeneralizedRCNNWithTTA(GeneralizedRCNNWithTTA):
+ def __init__(self, cfg, model, transform_data, tta_mapper=None, batch_size=1):
+ """
+ Args:
+ cfg (CfgNode):
+ model (GeneralizedRCNN): a GeneralizedRCNN to apply TTA on.
+ transform_data (DensePoseTransformData): contains symmetry label
+ transforms used for horizontal flip
+ tta_mapper (callable): takes a dataset dict and returns a list of
+ augmented versions of the dataset dict. Defaults to
+ `DatasetMapperTTA(cfg)`.
+ batch_size (int): batch the augmented images into this batch size for inference.
+ """
+ self._transform_data = transform_data.to(model.device)
+ super().__init__(cfg=cfg, model=model, tta_mapper=tta_mapper, batch_size=batch_size)
+
+ # the implementation follows closely the one from detectron2/modeling
+ def _inference_one_image(self, input):
+ """
+ Args:
+ input (dict): one dataset dict with "image" field being a CHW tensor
+
+ Returns:
+ dict: one output dict
+ """
+ orig_shape = (input["height"], input["width"])
+ # For some reason, resize with uint8 slightly increases box AP but decreases densepose AP
+ input["image"] = input["image"].to(torch.uint8)
+ augmented_inputs, tfms = self._get_augmented_inputs(input)
+ # Detect boxes from all augmented versions
+ with self._turn_off_roi_heads(["mask_on", "keypoint_on", "densepose_on"]):
+ # temporarily disable roi heads
+ all_boxes, all_scores, all_classes = self._get_augmented_boxes(augmented_inputs, tfms)
+ merged_instances = self._merge_detections(all_boxes, all_scores, all_classes, orig_shape)
+
+ if self.cfg.MODEL.MASK_ON or self.cfg.MODEL.DENSEPOSE_ON:
+ # Use the detected boxes to obtain new fields
+ augmented_instances = self._rescale_detected_boxes(
+ augmented_inputs, merged_instances, tfms
+ )
+ # run forward on the detected boxes
+ outputs = self._batch_inference(augmented_inputs, augmented_instances)
+ # Delete now useless variables to avoid being out of memory
+ del augmented_inputs, augmented_instances
+ # average the predictions
+ if self.cfg.MODEL.MASK_ON:
+ merged_instances.pred_masks = self._reduce_pred_masks(outputs, tfms)
+ if self.cfg.MODEL.DENSEPOSE_ON:
+ merged_instances.pred_densepose = self._reduce_pred_densepose(outputs, tfms)
+ # postprocess
+ merged_instances = detector_postprocess(merged_instances, *orig_shape)
+ return {"instances": merged_instances}
+ else:
+ return {"instances": merged_instances}
+
+ def _get_augmented_boxes(self, augmented_inputs, tfms):
+ # Heavily based on detectron2/modeling/test_time_augmentation.py
+ # Only difference is that RotationTransform is excluded from bbox computation
+ # 1: forward with all augmented images
+ outputs = self._batch_inference(augmented_inputs)
+ # 2: union the results
+ all_boxes = []
+ all_scores = []
+ all_classes = []
+ for output, tfm in zip(outputs, tfms):
+ # Need to inverse the transforms on boxes, to obtain results on original image
+ if not any(isinstance(t, RotationTransform) for t in tfm.transforms):
+ # Some transforms can't compute bbox correctly
+ pred_boxes = output.pred_boxes.tensor
+ original_pred_boxes = tfm.inverse().apply_box(pred_boxes.cpu().numpy())
+ all_boxes.append(torch.from_numpy(original_pred_boxes).to(pred_boxes.device))
+ all_scores.extend(output.scores)
+ all_classes.extend(output.pred_classes)
+ all_boxes = torch.cat(all_boxes, dim=0)
+ return all_boxes, all_scores, all_classes
+
+ def _reduce_pred_densepose(self, outputs, tfms):
+ # Should apply inverse transforms on densepose preds.
+ # We assume only rotation, resize & flip are used. pred_masks is a scale-invariant
+ # representation, so we handle the other ones specially
+ for idx, (output, tfm) in enumerate(zip(outputs, tfms)):
+ for t in tfm.transforms:
+ for attr in ["coarse_segm", "fine_segm", "u", "v"]:
+ setattr(
+ output.pred_densepose,
+ attr,
+ _inverse_rotation(
+ getattr(output.pred_densepose, attr), output.pred_boxes.tensor, t
+ ),
+ )
+ if any(isinstance(t, HFlipTransform) for t in tfm.transforms):
+ output.pred_densepose = HFlipConverter.convert(
+ output.pred_densepose, self._transform_data
+ )
+ self._incremental_avg_dp(outputs[0].pred_densepose, output.pred_densepose, idx)
+ return outputs[0].pred_densepose
+
+ # incrementally computed average: u_(n + 1) = u_n + (x_(n+1) - u_n) / (n + 1).
+ def _incremental_avg_dp(self, avg, new_el, idx):
+ for attr in ["coarse_segm", "fine_segm", "u", "v"]:
+ setattr(avg, attr, (getattr(avg, attr) * idx + getattr(new_el, attr)) / (idx + 1))
+ if idx:
+ # Deletion of the > 0 index intermediary values to prevent GPU OOM
+ setattr(new_el, attr, None)
+ return avg
+
+
+def _inverse_rotation(densepose_attrs, boxes, transform):
+ # resample outputs to image size and rotate back the densepose preds
+ # on the rotated images to the space of the original image
+ if len(boxes) == 0 or not isinstance(transform, RotationTransform):
+ return densepose_attrs
+ boxes = boxes.int().cpu().numpy()
+ wh_boxes = boxes[:, 2:] - boxes[:, :2] # bboxes in the rotated space
+ inv_boxes = rotate_box_inverse(transform, boxes).astype(int) # bboxes in original image
+ wh_diff = (inv_boxes[:, 2:] - inv_boxes[:, :2] - wh_boxes) // 2 # diff between new/old bboxes
+ rotation_matrix = torch.tensor([transform.rm_image]).to(device=densepose_attrs.device).float()
+ rotation_matrix[:, :, -1] = 0
+ # To apply grid_sample for rotation, we need to have enough space to fit the original and
+ # rotated bboxes. l_bds and r_bds are the left/right bounds that will be used to
+ # crop the difference once the rotation is done
+ l_bds = np.maximum(0, -wh_diff)
+ for i in range(len(densepose_attrs)):
+ if min(wh_boxes[i]) <= 0:
+ continue
+ densepose_attr = densepose_attrs[[i]].clone()
+ # 1. Interpolate densepose attribute to size of the rotated bbox
+ densepose_attr = F.interpolate(densepose_attr, wh_boxes[i].tolist()[::-1], mode="bilinear")
+ # 2. Pad the interpolated attribute so it has room for the original + rotated bbox
+ densepose_attr = F.pad(densepose_attr, tuple(np.repeat(np.maximum(0, wh_diff[i]), 2)))
+ # 3. Compute rotation grid and transform
+ grid = F.affine_grid(rotation_matrix, size=densepose_attr.shape)
+ densepose_attr = F.grid_sample(densepose_attr, grid)
+ # 4. Compute right bounds and crop the densepose_attr to the size of the original bbox
+ r_bds = densepose_attr.shape[2:][::-1] - l_bds[i]
+ densepose_attr = densepose_attr[:, :, l_bds[i][1] : r_bds[1], l_bds[i][0] : r_bds[0]]
+ if min(densepose_attr.shape) > 0:
+ # Interpolate back to the original size of the densepose attribute
+ densepose_attr = F.interpolate(
+ densepose_attr, densepose_attrs.shape[-2:], mode="bilinear"
+ )
+ # Adding a very small probability to the background class to fill padded zones
+ densepose_attr[:, 0] += 1e-10
+ densepose_attrs[i] = densepose_attr
+ return densepose_attrs
+
+
+def rotate_box_inverse(rot_tfm, rotated_box):
+ """
+ rotated_box is a N * 4 array of [x0, y0, x1, y1] boxes
+ When a bbox is rotated, it gets bigger, because we need to surround the tilted bbox
+ So when a bbox is rotated then inverse-rotated, it is much bigger than the original
+ This function aims to invert the rotation on the box, but also resize it to its original size
+ """
+ # 1. Compute the inverse rotation of the rotated bboxes (bigger than it )
+ invrot_box = rot_tfm.inverse().apply_box(rotated_box)
+ h, w = rotated_box[:, 3] - rotated_box[:, 1], rotated_box[:, 2] - rotated_box[:, 0]
+ ih, iw = invrot_box[:, 3] - invrot_box[:, 1], invrot_box[:, 2] - invrot_box[:, 0]
+ assert 2 * rot_tfm.abs_sin**2 != 1, "45 degrees angle can't be inverted"
+ # 2. Inverse the corresponding computation in the rotation transform
+ # to get the original height/width of the rotated boxes
+ orig_h = (h * rot_tfm.abs_cos - w * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin**2)
+ orig_w = (w * rot_tfm.abs_cos - h * rot_tfm.abs_sin) / (1 - 2 * rot_tfm.abs_sin**2)
+ # 3. Resize the inverse-rotated bboxes to their original size
+ invrot_box[:, 0] += (iw - orig_w) / 2
+ invrot_box[:, 1] += (ih - orig_h) / 2
+ invrot_box[:, 2] -= (iw - orig_w) / 2
+ invrot_box[:, 3] -= (ih - orig_h) / 2
+
+ return invrot_box
diff --git a/densepose/modeling/utils.py b/densepose/modeling/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e76eb9535a68dcb4ccb065556c55289294e42c8
--- /dev/null
+++ b/densepose/modeling/utils.py
@@ -0,0 +1,11 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from torch import nn
+
+
+def initialize_module_params(module: nn.Module) -> None:
+ for name, param in module.named_parameters():
+ if "bias" in name:
+ nn.init.constant_(param, 0)
+ elif "weight" in name:
+ nn.init.kaiming_normal_(param, mode="fan_out", nonlinearity="relu")
diff --git a/densepose/structures/__init__.py b/densepose/structures/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ed32c5e9d6c4c1599ba960681d9e86889e2cdbd8
--- /dev/null
+++ b/densepose/structures/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .chart import DensePoseChartPredictorOutput
+from .chart_confidence import decorate_predictor_output_class_with_confidences
+from .cse_confidence import decorate_cse_predictor_output_class_with_confidences
+from .chart_result import (
+ DensePoseChartResult,
+ DensePoseChartResultWithConfidences,
+ quantize_densepose_chart_result,
+ compress_quantized_densepose_chart_result,
+ decompress_compressed_densepose_chart_result,
+)
+from .cse import DensePoseEmbeddingPredictorOutput
+from .data_relative import DensePoseDataRelative
+from .list import DensePoseList
+from .mesh import Mesh, create_mesh
+from .transform_data import DensePoseTransformData, normalized_coords_transform
diff --git a/densepose/structures/chart.py b/densepose/structures/chart.py
new file mode 100644
index 0000000000000000000000000000000000000000..115cc084e98115c537382494af9eb0e246cd375b
--- /dev/null
+++ b/densepose/structures/chart.py
@@ -0,0 +1,70 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from dataclasses import dataclass
+from typing import Union
+import torch
+
+
+@dataclass
+class DensePoseChartPredictorOutput:
+ """
+ Predictor output that contains segmentation and inner coordinates predictions for predefined
+ body parts:
+ * coarse segmentation, a tensor of shape [N, K, Hout, Wout]
+ * fine segmentation, a tensor of shape [N, C, Hout, Wout]
+ * U coordinates, a tensor of shape [N, C, Hout, Wout]
+ * V coordinates, a tensor of shape [N, C, Hout, Wout]
+ where
+ - N is the number of instances
+ - K is the number of coarse segmentation channels (
+ 2 = foreground / background,
+ 15 = one of 14 body parts / background)
+ - C is the number of fine segmentation channels (
+ 24 fine body parts / background)
+ - Hout and Wout are height and width of predictions
+ """
+
+ coarse_segm: torch.Tensor
+ fine_segm: torch.Tensor
+ u: torch.Tensor
+ v: torch.Tensor
+
+ def __len__(self):
+ """
+ Number of instances (N) in the output
+ """
+ return self.coarse_segm.size(0)
+
+ def __getitem__(
+ self, item: Union[int, slice, torch.BoolTensor]
+ ) -> "DensePoseChartPredictorOutput":
+ """
+ Get outputs for the selected instance(s)
+
+ Args:
+ item (int or slice or tensor): selected items
+ """
+ if isinstance(item, int):
+ return DensePoseChartPredictorOutput(
+ coarse_segm=self.coarse_segm[item].unsqueeze(0),
+ fine_segm=self.fine_segm[item].unsqueeze(0),
+ u=self.u[item].unsqueeze(0),
+ v=self.v[item].unsqueeze(0),
+ )
+ else:
+ return DensePoseChartPredictorOutput(
+ coarse_segm=self.coarse_segm[item],
+ fine_segm=self.fine_segm[item],
+ u=self.u[item],
+ v=self.v[item],
+ )
+
+ def to(self, device: torch.device):
+ """
+ Transfers all tensors to the given device
+ """
+ coarse_segm = self.coarse_segm.to(device)
+ fine_segm = self.fine_segm.to(device)
+ u = self.u.to(device)
+ v = self.v.to(device)
+ return DensePoseChartPredictorOutput(coarse_segm=coarse_segm, fine_segm=fine_segm, u=u, v=v)
diff --git a/densepose/structures/chart_confidence.py b/densepose/structures/chart_confidence.py
new file mode 100644
index 0000000000000000000000000000000000000000..57c63257a7c176af1522e2f143ed594c26906c76
--- /dev/null
+++ b/densepose/structures/chart_confidence.py
@@ -0,0 +1,98 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from dataclasses import make_dataclass
+from functools import lru_cache
+from typing import Any, Optional
+import torch
+
+
+@lru_cache(maxsize=None)
+def decorate_predictor_output_class_with_confidences(BasePredictorOutput: type) -> type:
+ """
+ Create a new output class from an existing one by adding new attributes
+ related to confidence estimation:
+ - sigma_1 (tensor)
+ - sigma_2 (tensor)
+ - kappa_u (tensor)
+ - kappa_v (tensor)
+ - fine_segm_confidence (tensor)
+ - coarse_segm_confidence (tensor)
+
+ Details on confidence estimation parameters can be found in:
+ N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
+ Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
+ A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
+
+ The new class inherits the provided `BasePredictorOutput` class,
+ it's name is composed of the name of the provided class and
+ "WithConfidences" suffix.
+
+ Args:
+ BasePredictorOutput (type): output type to which confidence data
+ is to be added, assumed to be a dataclass
+ Return:
+ New dataclass derived from the provided one that has attributes
+ for confidence estimation
+ """
+
+ PredictorOutput = make_dataclass(
+ BasePredictorOutput.__name__ + "WithConfidences",
+ fields=[
+ ("sigma_1", Optional[torch.Tensor], None),
+ ("sigma_2", Optional[torch.Tensor], None),
+ ("kappa_u", Optional[torch.Tensor], None),
+ ("kappa_v", Optional[torch.Tensor], None),
+ ("fine_segm_confidence", Optional[torch.Tensor], None),
+ ("coarse_segm_confidence", Optional[torch.Tensor], None),
+ ],
+ bases=(BasePredictorOutput,),
+ )
+
+ # add possibility to index PredictorOutput
+
+ def slice_if_not_none(data, item):
+ if data is None:
+ return None
+ if isinstance(item, int):
+ return data[item].unsqueeze(0)
+ return data[item]
+
+ def PredictorOutput_getitem(self, item):
+ PredictorOutput = type(self)
+ base_predictor_output_sliced = super(PredictorOutput, self).__getitem__(item)
+ return PredictorOutput(
+ **base_predictor_output_sliced.__dict__,
+ coarse_segm_confidence=slice_if_not_none(self.coarse_segm_confidence, item),
+ fine_segm_confidence=slice_if_not_none(self.fine_segm_confidence, item),
+ sigma_1=slice_if_not_none(self.sigma_1, item),
+ sigma_2=slice_if_not_none(self.sigma_2, item),
+ kappa_u=slice_if_not_none(self.kappa_u, item),
+ kappa_v=slice_if_not_none(self.kappa_v, item),
+ )
+
+ PredictorOutput.__getitem__ = PredictorOutput_getitem
+
+ def PredictorOutput_to(self, device: torch.device):
+ """
+ Transfers all tensors to the given device
+ """
+ PredictorOutput = type(self)
+ base_predictor_output_to = super(PredictorOutput, self).to(device) # pyre-ignore[16]
+
+ def to_device_if_tensor(var: Any):
+ if isinstance(var, torch.Tensor):
+ return var.to(device)
+ return var
+
+ return PredictorOutput(
+ **base_predictor_output_to.__dict__,
+ sigma_1=to_device_if_tensor(self.sigma_1),
+ sigma_2=to_device_if_tensor(self.sigma_2),
+ kappa_u=to_device_if_tensor(self.kappa_u),
+ kappa_v=to_device_if_tensor(self.kappa_v),
+ fine_segm_confidence=to_device_if_tensor(self.fine_segm_confidence),
+ coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence),
+ )
+
+ PredictorOutput.to = PredictorOutput_to
+ return PredictorOutput
diff --git a/densepose/structures/chart_result.py b/densepose/structures/chart_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..003933d03d153d045c0bf551c465bc7a224d90cb
--- /dev/null
+++ b/densepose/structures/chart_result.py
@@ -0,0 +1,183 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from dataclasses import dataclass
+from typing import Any, Optional, Tuple
+import torch
+
+
+@dataclass
+class DensePoseChartResult:
+ """
+ DensePose results for chart-based methods represented by labels and inner
+ coordinates (U, V) of individual charts. Each chart is a 2D manifold
+ that has an associated label and is parameterized by two coordinates U and V.
+ Both U and V take values in [0, 1].
+ Thus the results are represented by two tensors:
+ - labels (tensor [H, W] of long): contains estimated label for each pixel of
+ the detection bounding box of size (H, W)
+ - uv (tensor [2, H, W] of float): contains estimated U and V coordinates
+ for each pixel of the detection bounding box of size (H, W)
+ """
+
+ labels: torch.Tensor
+ uv: torch.Tensor
+
+ def to(self, device: torch.device):
+ """
+ Transfers all tensors to the given device
+ """
+ labels = self.labels.to(device)
+ uv = self.uv.to(device)
+ return DensePoseChartResult(labels=labels, uv=uv)
+
+
+@dataclass
+class DensePoseChartResultWithConfidences:
+ """
+ We add confidence values to DensePoseChartResult
+ Thus the results are represented by two tensors:
+ - labels (tensor [H, W] of long): contains estimated label for each pixel of
+ the detection bounding box of size (H, W)
+ - uv (tensor [2, H, W] of float): contains estimated U and V coordinates
+ for each pixel of the detection bounding box of size (H, W)
+ Plus one [H, W] tensor of float for each confidence type
+ """
+
+ labels: torch.Tensor
+ uv: torch.Tensor
+ sigma_1: Optional[torch.Tensor] = None
+ sigma_2: Optional[torch.Tensor] = None
+ kappa_u: Optional[torch.Tensor] = None
+ kappa_v: Optional[torch.Tensor] = None
+ fine_segm_confidence: Optional[torch.Tensor] = None
+ coarse_segm_confidence: Optional[torch.Tensor] = None
+
+ def to(self, device: torch.device):
+ """
+ Transfers all tensors to the given device, except if their value is None
+ """
+
+ def to_device_if_tensor(var: Any):
+ if isinstance(var, torch.Tensor):
+ return var.to(device)
+ return var
+
+ return DensePoseChartResultWithConfidences(
+ labels=self.labels.to(device),
+ uv=self.uv.to(device),
+ sigma_1=to_device_if_tensor(self.sigma_1),
+ sigma_2=to_device_if_tensor(self.sigma_2),
+ kappa_u=to_device_if_tensor(self.kappa_u),
+ kappa_v=to_device_if_tensor(self.kappa_v),
+ fine_segm_confidence=to_device_if_tensor(self.fine_segm_confidence),
+ coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence),
+ )
+
+
+@dataclass
+class DensePoseChartResultQuantized:
+ """
+ DensePose results for chart-based methods represented by labels and quantized
+ inner coordinates (U, V) of individual charts. Each chart is a 2D manifold
+ that has an associated label and is parameterized by two coordinates U and V.
+ Both U and V take values in [0, 1].
+ Quantized coordinates Uq and Vq have uint8 values which are obtained as:
+ Uq = U * 255 (hence 0 <= Uq <= 255)
+ Vq = V * 255 (hence 0 <= Vq <= 255)
+ Thus the results are represented by one tensor:
+ - labels_uv_uint8 (tensor [3, H, W] of uint8): contains estimated label
+ and quantized coordinates Uq and Vq for each pixel of the detection
+ bounding box of size (H, W)
+ """
+
+ labels_uv_uint8: torch.Tensor
+
+ def to(self, device: torch.device):
+ """
+ Transfers all tensors to the given device
+ """
+ labels_uv_uint8 = self.labels_uv_uint8.to(device)
+ return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8)
+
+
+@dataclass
+class DensePoseChartResultCompressed:
+ """
+ DensePose results for chart-based methods represented by a PNG-encoded string.
+ The tensor of quantized DensePose results of size [3, H, W] is considered
+ as an image with 3 color channels. PNG compression is applied and the result
+ is stored as a Base64-encoded string. The following attributes are defined:
+ - shape_chw (tuple of 3 int): contains shape of the result tensor
+ (number of channels, height, width)
+ - labels_uv_str (str): contains Base64-encoded results tensor of size
+ [3, H, W] compressed with PNG compression methods
+ """
+
+ shape_chw: Tuple[int, int, int]
+ labels_uv_str: str
+
+
+def quantize_densepose_chart_result(result: DensePoseChartResult) -> DensePoseChartResultQuantized:
+ """
+ Applies quantization to DensePose chart-based result.
+
+ Args:
+ result (DensePoseChartResult): DensePose chart-based result
+ Return:
+ Quantized DensePose chart-based result (DensePoseChartResultQuantized)
+ """
+ h, w = result.labels.shape
+ labels_uv_uint8 = torch.zeros([3, h, w], dtype=torch.uint8, device=result.labels.device)
+ labels_uv_uint8[0] = result.labels
+ labels_uv_uint8[1:] = (result.uv * 255).clamp(0, 255).byte()
+ return DensePoseChartResultQuantized(labels_uv_uint8=labels_uv_uint8)
+
+
+def compress_quantized_densepose_chart_result(
+ result: DensePoseChartResultQuantized,
+) -> DensePoseChartResultCompressed:
+ """
+ Compresses quantized DensePose chart-based result
+
+ Args:
+ result (DensePoseChartResultQuantized): quantized DensePose chart-based result
+ Return:
+ Compressed DensePose chart-based result (DensePoseChartResultCompressed)
+ """
+ import base64
+ import numpy as np
+ from io import BytesIO
+ from PIL import Image
+
+ labels_uv_uint8_np_chw = result.labels_uv_uint8.cpu().numpy()
+ labels_uv_uint8_np_hwc = np.moveaxis(labels_uv_uint8_np_chw, 0, -1)
+ im = Image.fromarray(labels_uv_uint8_np_hwc)
+ fstream = BytesIO()
+ im.save(fstream, format="png", optimize=True)
+ labels_uv_str = base64.encodebytes(fstream.getvalue()).decode()
+ shape_chw = labels_uv_uint8_np_chw.shape
+ return DensePoseChartResultCompressed(labels_uv_str=labels_uv_str, shape_chw=shape_chw)
+
+
+def decompress_compressed_densepose_chart_result(
+ result: DensePoseChartResultCompressed,
+) -> DensePoseChartResultQuantized:
+ """
+ Decompresses DensePose chart-based result encoded into a base64 string
+
+ Args:
+ result (DensePoseChartResultCompressed): compressed DensePose chart result
+ Return:
+ Quantized DensePose chart-based result (DensePoseChartResultQuantized)
+ """
+ import base64
+ import numpy as np
+ from io import BytesIO
+ from PIL import Image
+
+ fstream = BytesIO(base64.decodebytes(result.labels_uv_str.encode()))
+ im = Image.open(fstream)
+ labels_uv_uint8_np_chw = np.moveaxis(np.array(im, dtype=np.uint8), -1, 0)
+ return DensePoseChartResultQuantized(
+ labels_uv_uint8=torch.from_numpy(labels_uv_uint8_np_chw.reshape(result.shape_chw))
+ )
diff --git a/densepose/structures/cse.py b/densepose/structures/cse.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cd65da96c04613053e21494bc2dcc04f37fe1fd
--- /dev/null
+++ b/densepose/structures/cse.py
@@ -0,0 +1,52 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+from dataclasses import dataclass
+from typing import Union
+import torch
+
+
+@dataclass
+class DensePoseEmbeddingPredictorOutput:
+ """
+ Predictor output that contains embedding and coarse segmentation data:
+ * embedding: float tensor of size [N, D, H, W], contains estimated embeddings
+ * coarse_segm: float tensor of size [N, K, H, W]
+ Here D = MODEL.ROI_DENSEPOSE_HEAD.CSE.EMBED_SIZE
+ K = MODEL.ROI_DENSEPOSE_HEAD.NUM_COARSE_SEGM_CHANNELS
+ """
+
+ embedding: torch.Tensor
+ coarse_segm: torch.Tensor
+
+ def __len__(self):
+ """
+ Number of instances (N) in the output
+ """
+ return self.coarse_segm.size(0)
+
+ def __getitem__(
+ self, item: Union[int, slice, torch.BoolTensor]
+ ) -> "DensePoseEmbeddingPredictorOutput":
+ """
+ Get outputs for the selected instance(s)
+
+ Args:
+ item (int or slice or tensor): selected items
+ """
+ if isinstance(item, int):
+ return DensePoseEmbeddingPredictorOutput(
+ coarse_segm=self.coarse_segm[item].unsqueeze(0),
+ embedding=self.embedding[item].unsqueeze(0),
+ )
+ else:
+ return DensePoseEmbeddingPredictorOutput(
+ coarse_segm=self.coarse_segm[item], embedding=self.embedding[item]
+ )
+
+ def to(self, device: torch.device):
+ """
+ Transfers all tensors to the given device
+ """
+ coarse_segm = self.coarse_segm.to(device)
+ embedding = self.embedding.to(device)
+ return DensePoseEmbeddingPredictorOutput(coarse_segm=coarse_segm, embedding=embedding)
diff --git a/densepose/structures/cse_confidence.py b/densepose/structures/cse_confidence.py
new file mode 100644
index 0000000000000000000000000000000000000000..ee5166f82d45ecb4ea829ec2ecab248161c19421
--- /dev/null
+++ b/densepose/structures/cse_confidence.py
@@ -0,0 +1,78 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from dataclasses import make_dataclass
+from functools import lru_cache
+from typing import Any, Optional
+import torch
+
+
+@lru_cache(maxsize=None)
+def decorate_cse_predictor_output_class_with_confidences(BasePredictorOutput: type) -> type:
+ """
+ Create a new output class from an existing one by adding new attributes
+ related to confidence estimation:
+ - coarse_segm_confidence (tensor)
+
+ Details on confidence estimation parameters can be found in:
+ N. Neverova, D. Novotny, A. Vedaldi "Correlated Uncertainty for Learning
+ Dense Correspondences from Noisy Labels", p. 918--926, in Proc. NIPS 2019
+ A. Sanakoyeu et al., Transferring Dense Pose to Proximal Animal Classes, CVPR 2020
+
+ The new class inherits the provided `BasePredictorOutput` class,
+ it's name is composed of the name of the provided class and
+ "WithConfidences" suffix.
+
+ Args:
+ BasePredictorOutput (type): output type to which confidence data
+ is to be added, assumed to be a dataclass
+ Return:
+ New dataclass derived from the provided one that has attributes
+ for confidence estimation
+ """
+
+ PredictorOutput = make_dataclass(
+ BasePredictorOutput.__name__ + "WithConfidences",
+ fields=[
+ ("coarse_segm_confidence", Optional[torch.Tensor], None),
+ ],
+ bases=(BasePredictorOutput,),
+ )
+
+ # add possibility to index PredictorOutput
+
+ def slice_if_not_none(data, item):
+ if data is None:
+ return None
+ if isinstance(item, int):
+ return data[item].unsqueeze(0)
+ return data[item]
+
+ def PredictorOutput_getitem(self, item):
+ PredictorOutput = type(self)
+ base_predictor_output_sliced = super(PredictorOutput, self).__getitem__(item)
+ return PredictorOutput(
+ **base_predictor_output_sliced.__dict__,
+ coarse_segm_confidence=slice_if_not_none(self.coarse_segm_confidence, item),
+ )
+
+ PredictorOutput.__getitem__ = PredictorOutput_getitem
+
+ def PredictorOutput_to(self, device: torch.device):
+ """
+ Transfers all tensors to the given device
+ """
+ PredictorOutput = type(self)
+ base_predictor_output_to = super(PredictorOutput, self).to(device) # pyre-ignore[16]
+
+ def to_device_if_tensor(var: Any):
+ if isinstance(var, torch.Tensor):
+ return var.to(device)
+ return var
+
+ return PredictorOutput(
+ **base_predictor_output_to.__dict__,
+ coarse_segm_confidence=to_device_if_tensor(self.coarse_segm_confidence),
+ )
+
+ PredictorOutput.to = PredictorOutput_to
+ return PredictorOutput
diff --git a/densepose/structures/data_relative.py b/densepose/structures/data_relative.py
new file mode 100644
index 0000000000000000000000000000000000000000..187e140495f94a740fdd91d756f2195a0c8f4f30
--- /dev/null
+++ b/densepose/structures/data_relative.py
@@ -0,0 +1,243 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import numpy as np
+import torch
+from torch.nn import functional as F
+
+from densepose.data.meshes.catalog import MeshCatalog
+from densepose.structures.mesh import load_mesh_symmetry
+from densepose.structures.transform_data import DensePoseTransformData
+
+
+class DensePoseDataRelative:
+ """
+ Dense pose relative annotations that can be applied to any bounding box:
+ x - normalized X coordinates [0, 255] of annotated points
+ y - normalized Y coordinates [0, 255] of annotated points
+ i - body part labels 0,...,24 for annotated points
+ u - body part U coordinates [0, 1] for annotated points
+ v - body part V coordinates [0, 1] for annotated points
+ segm - 256x256 segmentation mask with values 0,...,14
+ To obtain absolute x and y data wrt some bounding box one needs to first
+ divide the data by 256, multiply by the respective bounding box size
+ and add bounding box offset:
+ x_img = x0 + x_norm * w / 256.0
+ y_img = y0 + y_norm * h / 256.0
+ Segmentation masks are typically sampled to get image-based masks.
+ """
+
+ # Key for normalized X coordinates in annotation dict
+ X_KEY = "dp_x"
+ # Key for normalized Y coordinates in annotation dict
+ Y_KEY = "dp_y"
+ # Key for U part coordinates in annotation dict (used in chart-based annotations)
+ U_KEY = "dp_U"
+ # Key for V part coordinates in annotation dict (used in chart-based annotations)
+ V_KEY = "dp_V"
+ # Key for I point labels in annotation dict (used in chart-based annotations)
+ I_KEY = "dp_I"
+ # Key for segmentation mask in annotation dict
+ S_KEY = "dp_masks"
+ # Key for vertex ids (used in continuous surface embeddings annotations)
+ VERTEX_IDS_KEY = "dp_vertex"
+ # Key for mesh id (used in continuous surface embeddings annotations)
+ MESH_NAME_KEY = "ref_model"
+ # Number of body parts in segmentation masks
+ N_BODY_PARTS = 14
+ # Number of parts in point labels
+ N_PART_LABELS = 24
+ MASK_SIZE = 256
+
+ def __init__(self, annotation, cleanup=False):
+ self.x = torch.as_tensor(annotation[DensePoseDataRelative.X_KEY])
+ self.y = torch.as_tensor(annotation[DensePoseDataRelative.Y_KEY])
+ if (
+ DensePoseDataRelative.I_KEY in annotation
+ and DensePoseDataRelative.U_KEY in annotation
+ and DensePoseDataRelative.V_KEY in annotation
+ ):
+ self.i = torch.as_tensor(annotation[DensePoseDataRelative.I_KEY])
+ self.u = torch.as_tensor(annotation[DensePoseDataRelative.U_KEY])
+ self.v = torch.as_tensor(annotation[DensePoseDataRelative.V_KEY])
+ if (
+ DensePoseDataRelative.VERTEX_IDS_KEY in annotation
+ and DensePoseDataRelative.MESH_NAME_KEY in annotation
+ ):
+ self.vertex_ids = torch.as_tensor(
+ annotation[DensePoseDataRelative.VERTEX_IDS_KEY], dtype=torch.long
+ )
+ self.mesh_id = MeshCatalog.get_mesh_id(annotation[DensePoseDataRelative.MESH_NAME_KEY])
+ if DensePoseDataRelative.S_KEY in annotation:
+ self.segm = DensePoseDataRelative.extract_segmentation_mask(annotation)
+ self.device = torch.device("cpu")
+ if cleanup:
+ DensePoseDataRelative.cleanup_annotation(annotation)
+
+ def to(self, device):
+ if self.device == device:
+ return self
+ new_data = DensePoseDataRelative.__new__(DensePoseDataRelative)
+ new_data.x = self.x.to(device)
+ new_data.y = self.y.to(device)
+ for attr in ["i", "u", "v", "vertex_ids", "segm"]:
+ if hasattr(self, attr):
+ setattr(new_data, attr, getattr(self, attr).to(device))
+ if hasattr(self, "mesh_id"):
+ new_data.mesh_id = self.mesh_id
+ new_data.device = device
+ return new_data
+
+ @staticmethod
+ def extract_segmentation_mask(annotation):
+ import pycocotools.mask as mask_utils
+
+ # TODO: annotation instance is accepted if it contains either
+ # DensePose segmentation or instance segmentation. However, here we
+ # only rely on DensePose segmentation
+ poly_specs = annotation[DensePoseDataRelative.S_KEY]
+ if isinstance(poly_specs, torch.Tensor):
+ # data is already given as mask tensors, no need to decode
+ return poly_specs
+ segm = torch.zeros((DensePoseDataRelative.MASK_SIZE,) * 2, dtype=torch.float32)
+ if isinstance(poly_specs, dict):
+ if poly_specs:
+ mask = mask_utils.decode(poly_specs)
+ segm[mask > 0] = 1
+ else:
+ for i in range(len(poly_specs)):
+ poly_i = poly_specs[i]
+ if poly_i:
+ mask_i = mask_utils.decode(poly_i)
+ segm[mask_i > 0] = i + 1
+ return segm
+
+ @staticmethod
+ def validate_annotation(annotation):
+ for key in [
+ DensePoseDataRelative.X_KEY,
+ DensePoseDataRelative.Y_KEY,
+ ]:
+ if key not in annotation:
+ return False, "no {key} data in the annotation".format(key=key)
+ valid_for_iuv_setting = all(
+ key in annotation
+ for key in [
+ DensePoseDataRelative.I_KEY,
+ DensePoseDataRelative.U_KEY,
+ DensePoseDataRelative.V_KEY,
+ ]
+ )
+ valid_for_cse_setting = all(
+ key in annotation
+ for key in [
+ DensePoseDataRelative.VERTEX_IDS_KEY,
+ DensePoseDataRelative.MESH_NAME_KEY,
+ ]
+ )
+ if not valid_for_iuv_setting and not valid_for_cse_setting:
+ return (
+ False,
+ "expected either {} (IUV setting) or {} (CSE setting) annotations".format(
+ ", ".join(
+ [
+ DensePoseDataRelative.I_KEY,
+ DensePoseDataRelative.U_KEY,
+ DensePoseDataRelative.V_KEY,
+ ]
+ ),
+ ", ".join(
+ [
+ DensePoseDataRelative.VERTEX_IDS_KEY,
+ DensePoseDataRelative.MESH_NAME_KEY,
+ ]
+ ),
+ ),
+ )
+ return True, None
+
+ @staticmethod
+ def cleanup_annotation(annotation):
+ for key in [
+ DensePoseDataRelative.X_KEY,
+ DensePoseDataRelative.Y_KEY,
+ DensePoseDataRelative.I_KEY,
+ DensePoseDataRelative.U_KEY,
+ DensePoseDataRelative.V_KEY,
+ DensePoseDataRelative.S_KEY,
+ DensePoseDataRelative.VERTEX_IDS_KEY,
+ DensePoseDataRelative.MESH_NAME_KEY,
+ ]:
+ if key in annotation:
+ del annotation[key]
+
+ def apply_transform(self, transforms, densepose_transform_data):
+ self._transform_pts(transforms, densepose_transform_data)
+ if hasattr(self, "segm"):
+ self._transform_segm(transforms, densepose_transform_data)
+
+ def _transform_pts(self, transforms, dp_transform_data):
+ import detectron2.data.transforms as T
+
+ # NOTE: This assumes that HorizFlipTransform is the only one that does flip
+ do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
+ if do_hflip:
+ self.x = self.MASK_SIZE - self.x
+ if hasattr(self, "i"):
+ self._flip_iuv_semantics(dp_transform_data)
+ if hasattr(self, "vertex_ids"):
+ self._flip_vertices()
+
+ for t in transforms.transforms:
+ if isinstance(t, T.RotationTransform):
+ xy_scale = np.array((t.w, t.h)) / DensePoseDataRelative.MASK_SIZE
+ xy = t.apply_coords(np.stack((self.x, self.y), axis=1) * xy_scale)
+ self.x, self.y = torch.tensor(xy / xy_scale, dtype=self.x.dtype).T
+
+ def _flip_iuv_semantics(self, dp_transform_data: DensePoseTransformData) -> None:
+ i_old = self.i.clone()
+ uv_symmetries = dp_transform_data.uv_symmetries
+ pt_label_symmetries = dp_transform_data.point_label_symmetries
+ for i in range(self.N_PART_LABELS):
+ if i + 1 in i_old:
+ annot_indices_i = i_old == i + 1
+ if pt_label_symmetries[i + 1] != i + 1:
+ self.i[annot_indices_i] = pt_label_symmetries[i + 1]
+ u_loc = (self.u[annot_indices_i] * 255).long()
+ v_loc = (self.v[annot_indices_i] * 255).long()
+ self.u[annot_indices_i] = uv_symmetries["U_transforms"][i][v_loc, u_loc].to(
+ device=self.u.device
+ )
+ self.v[annot_indices_i] = uv_symmetries["V_transforms"][i][v_loc, u_loc].to(
+ device=self.v.device
+ )
+
+ def _flip_vertices(self):
+ mesh_info = MeshCatalog[MeshCatalog.get_mesh_name(self.mesh_id)]
+ mesh_symmetry = (
+ load_mesh_symmetry(mesh_info.symmetry) if mesh_info.symmetry is not None else None
+ )
+ self.vertex_ids = mesh_symmetry["vertex_transforms"][self.vertex_ids]
+
+ def _transform_segm(self, transforms, dp_transform_data):
+ import detectron2.data.transforms as T
+
+ # NOTE: This assumes that HorizFlipTransform is the only one that does flip
+ do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
+ if do_hflip:
+ self.segm = torch.flip(self.segm, [1])
+ self._flip_segm_semantics(dp_transform_data)
+
+ for t in transforms.transforms:
+ if isinstance(t, T.RotationTransform):
+ self._transform_segm_rotation(t)
+
+ def _flip_segm_semantics(self, dp_transform_data):
+ old_segm = self.segm.clone()
+ mask_label_symmetries = dp_transform_data.mask_label_symmetries
+ for i in range(self.N_BODY_PARTS):
+ if mask_label_symmetries[i + 1] != i + 1:
+ self.segm[old_segm == i + 1] = mask_label_symmetries[i + 1]
+
+ def _transform_segm_rotation(self, rotation):
+ self.segm = F.interpolate(self.segm[None, None, :], (rotation.h, rotation.w)).numpy()
+ self.segm = torch.tensor(rotation.apply_segmentation(self.segm[0, 0]))[None, None, :]
+ self.segm = F.interpolate(self.segm, [DensePoseDataRelative.MASK_SIZE] * 2)[0, 0]
diff --git a/densepose/structures/list.py b/densepose/structures/list.py
new file mode 100644
index 0000000000000000000000000000000000000000..7631f8f78f4e9b1a94653d4e47639c50affe58eb
--- /dev/null
+++ b/densepose/structures/list.py
@@ -0,0 +1,70 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import torch
+
+from densepose.structures.data_relative import DensePoseDataRelative
+
+
+class DensePoseList:
+
+ _TORCH_DEVICE_CPU = torch.device("cpu")
+
+ def __init__(self, densepose_datas, boxes_xyxy_abs, image_size_hw, device=_TORCH_DEVICE_CPU):
+ assert len(densepose_datas) == len(
+ boxes_xyxy_abs
+ ), "Attempt to initialize DensePoseList with {} DensePose datas " "and {} boxes".format(
+ len(densepose_datas), len(boxes_xyxy_abs)
+ )
+ self.densepose_datas = []
+ for densepose_data in densepose_datas:
+ assert isinstance(densepose_data, DensePoseDataRelative) or densepose_data is None, (
+ "Attempt to initialize DensePoseList with DensePose datas "
+ "of type {}, expected DensePoseDataRelative".format(type(densepose_data))
+ )
+ densepose_data_ondevice = (
+ densepose_data.to(device) if densepose_data is not None else None
+ )
+ self.densepose_datas.append(densepose_data_ondevice)
+ self.boxes_xyxy_abs = boxes_xyxy_abs.to(device)
+ self.image_size_hw = image_size_hw
+ self.device = device
+
+ def to(self, device):
+ if self.device == device:
+ return self
+ return DensePoseList(self.densepose_datas, self.boxes_xyxy_abs, self.image_size_hw, device)
+
+ def __iter__(self):
+ return iter(self.densepose_datas)
+
+ def __len__(self):
+ return len(self.densepose_datas)
+
+ def __repr__(self):
+ s = self.__class__.__name__ + "("
+ s += "num_instances={}, ".format(len(self.densepose_datas))
+ s += "image_width={}, ".format(self.image_size_hw[1])
+ s += "image_height={})".format(self.image_size_hw[0])
+ return s
+
+ def __getitem__(self, item):
+ if isinstance(item, int):
+ densepose_data_rel = self.densepose_datas[item]
+ return densepose_data_rel
+ elif isinstance(item, slice):
+ densepose_datas_rel = self.densepose_datas[item]
+ boxes_xyxy_abs = self.boxes_xyxy_abs[item]
+ return DensePoseList(
+ densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
+ )
+ elif isinstance(item, torch.Tensor) and (item.dtype == torch.bool):
+ densepose_datas_rel = [self.densepose_datas[i] for i, x in enumerate(item) if x > 0]
+ boxes_xyxy_abs = self.boxes_xyxy_abs[item]
+ return DensePoseList(
+ densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
+ )
+ else:
+ densepose_datas_rel = [self.densepose_datas[i] for i in item]
+ boxes_xyxy_abs = self.boxes_xyxy_abs[item]
+ return DensePoseList(
+ densepose_datas_rel, boxes_xyxy_abs, self.image_size_hw, self.device
+ )
diff --git a/densepose/structures/mesh.py b/densepose/structures/mesh.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5abd3419b35234e6b44c0577bef2818f99a5cdc
--- /dev/null
+++ b/densepose/structures/mesh.py
@@ -0,0 +1,170 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+
+import pickle
+from functools import lru_cache
+from typing import Dict, Optional, Tuple
+import torch
+
+from detectron2.utils.file_io import PathManager
+
+from densepose.data.meshes.catalog import MeshCatalog, MeshInfo
+
+
+def _maybe_copy_to_device(
+ attribute: Optional[torch.Tensor], device: torch.device
+) -> Optional[torch.Tensor]:
+ if attribute is None:
+ return None
+ return attribute.to(device)
+
+
+class Mesh:
+ def __init__(
+ self,
+ vertices: Optional[torch.Tensor] = None,
+ faces: Optional[torch.Tensor] = None,
+ geodists: Optional[torch.Tensor] = None,
+ symmetry: Optional[Dict[str, torch.Tensor]] = None,
+ texcoords: Optional[torch.Tensor] = None,
+ mesh_info: Optional[MeshInfo] = None,
+ device: Optional[torch.device] = None,
+ ):
+ """
+ Args:
+ vertices (tensor [N, 3] of float32): vertex coordinates in 3D
+ faces (tensor [M, 3] of long): triangular face represented as 3
+ vertex indices
+ geodists (tensor [N, N] of float32): geodesic distances from
+ vertex `i` to vertex `j` (optional, default: None)
+ symmetry (dict: str -> tensor): various mesh symmetry data:
+ - "vertex_transforms": vertex mapping under horizontal flip,
+ tensor of size [N] of type long; vertex `i` is mapped to
+ vertex `tensor[i]` (optional, default: None)
+ texcoords (tensor [N, 2] of float32): texture coordinates, i.e. global
+ and normalized mesh UVs (optional, default: None)
+ mesh_info (MeshInfo type): necessary to load the attributes on-the-go,
+ can be used instead of passing all the variables one by one
+ device (torch.device): device of the Mesh. If not provided, will use
+ the device of the vertices
+ """
+ self._vertices = vertices
+ self._faces = faces
+ self._geodists = geodists
+ self._symmetry = symmetry
+ self._texcoords = texcoords
+ self.mesh_info = mesh_info
+ self.device = device
+
+ assert self._vertices is not None or self.mesh_info is not None
+
+ all_fields = [self._vertices, self._faces, self._geodists, self._texcoords]
+
+ if self.device is None:
+ for field in all_fields:
+ if field is not None:
+ self.device = field.device
+ break
+ if self.device is None and symmetry is not None:
+ for key in symmetry:
+ self.device = symmetry[key].device
+ break
+ self.device = torch.device("cpu") if self.device is None else self.device
+
+ assert all([var.device == self.device for var in all_fields if var is not None])
+ if symmetry:
+ assert all(symmetry[key].device == self.device for key in symmetry)
+ if texcoords and vertices:
+ assert len(vertices) == len(texcoords)
+
+ def to(self, device: torch.device):
+ device_symmetry = self._symmetry
+ if device_symmetry:
+ device_symmetry = {key: value.to(device) for key, value in device_symmetry.items()}
+ return Mesh(
+ _maybe_copy_to_device(self._vertices, device),
+ _maybe_copy_to_device(self._faces, device),
+ _maybe_copy_to_device(self._geodists, device),
+ device_symmetry,
+ _maybe_copy_to_device(self._texcoords, device),
+ self.mesh_info,
+ device,
+ )
+
+ @property
+ def vertices(self):
+ if self._vertices is None and self.mesh_info is not None:
+ self._vertices = load_mesh_data(self.mesh_info.data, "vertices", self.device)
+ return self._vertices
+
+ @property
+ def faces(self):
+ if self._faces is None and self.mesh_info is not None:
+ self._faces = load_mesh_data(self.mesh_info.data, "faces", self.device)
+ return self._faces
+
+ @property
+ def geodists(self):
+ if self._geodists is None and self.mesh_info is not None:
+ self._geodists = load_mesh_auxiliary_data(self.mesh_info.geodists, self.device)
+ return self._geodists
+
+ @property
+ def symmetry(self):
+ if self._symmetry is None and self.mesh_info is not None:
+ self._symmetry = load_mesh_symmetry(self.mesh_info.symmetry, self.device)
+ return self._symmetry
+
+ @property
+ def texcoords(self):
+ if self._texcoords is None and self.mesh_info is not None:
+ self._texcoords = load_mesh_auxiliary_data(self.mesh_info.texcoords, self.device)
+ return self._texcoords
+
+ def get_geodists(self):
+ if self.geodists is None:
+ self.geodists = self._compute_geodists()
+ return self.geodists
+
+ def _compute_geodists(self):
+ # TODO: compute using Laplace-Beltrami
+ geodists = None
+ return geodists
+
+
+def load_mesh_data(
+ mesh_fpath: str, field: str, device: Optional[torch.device] = None
+) -> Tuple[Optional[torch.Tensor], Optional[torch.Tensor]]:
+ with PathManager.open(mesh_fpath, "rb") as hFile:
+ # pyre-fixme[7]: Expected `Tuple[Optional[Tensor], Optional[Tensor]]` but
+ # got `Tensor`.
+ return torch.as_tensor(pickle.load(hFile)[field], dtype=torch.float).to(device)
+ return None
+
+
+def load_mesh_auxiliary_data(
+ fpath: str, device: Optional[torch.device] = None
+) -> Optional[torch.Tensor]:
+ fpath_local = PathManager.get_local_path(fpath)
+ with PathManager.open(fpath_local, "rb") as hFile:
+ return torch.as_tensor(pickle.load(hFile), dtype=torch.float).to(device)
+ return None
+
+
+@lru_cache()
+def load_mesh_symmetry(
+ symmetry_fpath: str, device: Optional[torch.device] = None
+) -> Optional[Dict[str, torch.Tensor]]:
+ with PathManager.open(symmetry_fpath, "rb") as hFile:
+ symmetry_loaded = pickle.load(hFile)
+ symmetry = {
+ "vertex_transforms": torch.as_tensor(
+ symmetry_loaded["vertex_transforms"], dtype=torch.long
+ ).to(device),
+ }
+ return symmetry
+ return None
+
+
+@lru_cache()
+def create_mesh(mesh_name: str, device: Optional[torch.device] = None) -> Mesh:
+ return Mesh(mesh_info=MeshCatalog[mesh_name], device=device)
diff --git a/densepose/structures/transform_data.py b/densepose/structures/transform_data.py
new file mode 100644
index 0000000000000000000000000000000000000000..a345d66945fbd709ea6644caa7a71435aa0ed569
--- /dev/null
+++ b/densepose/structures/transform_data.py
@@ -0,0 +1,71 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from typing import BinaryIO, Dict, Union
+import torch
+
+
+def normalized_coords_transform(x0, y0, w, h):
+ """
+ Coordinates transform that maps top left corner to (-1, -1) and bottom
+ right corner to (1, 1). Used for torch.grid_sample to initialize the
+ grid
+ """
+
+ def f(p):
+ return (2 * (p[0] - x0) / w - 1, 2 * (p[1] - y0) / h - 1)
+
+ return f
+
+
+class DensePoseTransformData:
+
+ # Horizontal symmetry label transforms used for horizontal flip
+ MASK_LABEL_SYMMETRIES = [0, 1, 3, 2, 5, 4, 7, 6, 9, 8, 11, 10, 13, 12, 14]
+ # fmt: off
+ POINT_LABEL_SYMMETRIES = [ 0, 1, 2, 4, 3, 6, 5, 8, 7, 10, 9, 12, 11, 14, 13, 16, 15, 18, 17, 20, 19, 22, 21, 24, 23] # noqa
+ # fmt: on
+
+ def __init__(self, uv_symmetries: Dict[str, torch.Tensor], device: torch.device):
+ self.mask_label_symmetries = DensePoseTransformData.MASK_LABEL_SYMMETRIES
+ self.point_label_symmetries = DensePoseTransformData.POINT_LABEL_SYMMETRIES
+ self.uv_symmetries = uv_symmetries
+ self.device = torch.device("cpu")
+
+ def to(self, device: torch.device, copy: bool = False) -> "DensePoseTransformData":
+ """
+ Convert transform data to the specified device
+
+ Args:
+ device (torch.device): device to convert the data to
+ copy (bool): flag that specifies whether to copy or to reference the data
+ in case the device is the same
+ Return:
+ An instance of `DensePoseTransformData` with data stored on the specified device
+ """
+ if self.device == device and not copy:
+ return self
+ uv_symmetry_map = {}
+ for key in self.uv_symmetries:
+ uv_symmetry_map[key] = self.uv_symmetries[key].to(device=device, copy=copy)
+ return DensePoseTransformData(uv_symmetry_map, device)
+
+ @staticmethod
+ def load(io: Union[str, BinaryIO]):
+ """
+ Args:
+ io: (str or binary file-like object): input file to load data from
+ Returns:
+ An instance of `DensePoseTransformData` with transforms loaded from the file
+ """
+ import scipy.io
+
+ uv_symmetry_map = scipy.io.loadmat(io)
+ uv_symmetry_map_torch = {}
+ for key in ["U_transforms", "V_transforms"]:
+ uv_symmetry_map_torch[key] = []
+ map_src = uv_symmetry_map[key]
+ map_dst = uv_symmetry_map_torch[key]
+ for i in range(map_src.shape[1]):
+ map_dst.append(torch.from_numpy(map_src[0, i]).to(dtype=torch.float))
+ uv_symmetry_map_torch[key] = torch.stack(map_dst, dim=0)
+ transform_data = DensePoseTransformData(uv_symmetry_map_torch, device=torch.device("cpu"))
+ return transform_data
diff --git a/densepose/utils/__init__.py b/densepose/utils/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/densepose/utils/dbhelper.py b/densepose/utils/dbhelper.py
new file mode 100644
index 0000000000000000000000000000000000000000..772e31874b2f65da9ae8b4e03c7515d5af282586
--- /dev/null
+++ b/densepose/utils/dbhelper.py
@@ -0,0 +1,147 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from typing import Any, Dict, Optional, Tuple
+
+
+class EntrySelector:
+ """
+ Base class for entry selectors
+ """
+
+ @staticmethod
+ def from_string(spec: str) -> "EntrySelector":
+ if spec == "*":
+ return AllEntrySelector()
+ return FieldEntrySelector(spec)
+
+
+class AllEntrySelector(EntrySelector):
+ """
+ Selector that accepts all entries
+ """
+
+ SPECIFIER = "*"
+
+ def __call__(self, entry):
+ return True
+
+
+class FieldEntrySelector(EntrySelector):
+ """
+ Selector that accepts only entries that match provided field
+ specifier(s). Only a limited set of specifiers is supported for now:
+ ::=[]
+ ::=[]
+ is a valid identifier
+ ::= "int" | "str"
+ ::= "="
+ ::= ","
+ ::= ":"
+ ::= |
+ ::=
+ ::= "-"
+ is a string without spaces and special symbols
+ (e.g. , , , )
+ """
+
+ _SPEC_DELIM = ","
+ _TYPE_DELIM = ":"
+ _RANGE_DELIM = "-"
+ _EQUAL = "="
+ _ERROR_PREFIX = "Invalid field selector specifier"
+
+ class _FieldEntryValuePredicate:
+ """
+ Predicate that checks strict equality for the specified entry field
+ """
+
+ def __init__(self, name: str, typespec: Optional[str], value: str):
+ import builtins
+
+ self.name = name
+ self.type = getattr(builtins, typespec) if typespec is not None else str
+ self.value = value
+
+ def __call__(self, entry):
+ return entry[self.name] == self.type(self.value)
+
+ class _FieldEntryRangePredicate:
+ """
+ Predicate that checks whether an entry field falls into the specified range
+ """
+
+ def __init__(self, name: str, typespec: Optional[str], vmin: str, vmax: str):
+ import builtins
+
+ self.name = name
+ self.type = getattr(builtins, typespec) if typespec is not None else str
+ self.vmin = vmin
+ self.vmax = vmax
+
+ def __call__(self, entry):
+ return (entry[self.name] >= self.type(self.vmin)) and (
+ entry[self.name] <= self.type(self.vmax)
+ )
+
+ def __init__(self, spec: str):
+ self._predicates = self._parse_specifier_into_predicates(spec)
+
+ def __call__(self, entry: Dict[str, Any]):
+ for predicate in self._predicates:
+ if not predicate(entry):
+ return False
+ return True
+
+ def _parse_specifier_into_predicates(self, spec: str):
+ predicates = []
+ specs = spec.split(self._SPEC_DELIM)
+ for subspec in specs:
+ eq_idx = subspec.find(self._EQUAL)
+ if eq_idx > 0:
+ field_name_with_type = subspec[:eq_idx]
+ field_name, field_type = self._parse_field_name_type(field_name_with_type)
+ field_value_or_range = subspec[eq_idx + 1 :]
+ if self._is_range_spec(field_value_or_range):
+ vmin, vmax = self._get_range_spec(field_value_or_range)
+ predicate = FieldEntrySelector._FieldEntryRangePredicate(
+ field_name, field_type, vmin, vmax
+ )
+ else:
+ predicate = FieldEntrySelector._FieldEntryValuePredicate(
+ field_name, field_type, field_value_or_range
+ )
+ predicates.append(predicate)
+ elif eq_idx == 0:
+ self._parse_error(f'"{subspec}", field name is empty!')
+ else:
+ self._parse_error(f'"{subspec}", should have format ' "=!")
+ return predicates
+
+ def _parse_field_name_type(self, field_name_with_type: str) -> Tuple[str, Optional[str]]:
+ type_delim_idx = field_name_with_type.find(self._TYPE_DELIM)
+ if type_delim_idx > 0:
+ field_name = field_name_with_type[:type_delim_idx]
+ field_type = field_name_with_type[type_delim_idx + 1 :]
+ elif type_delim_idx == 0:
+ self._parse_error(f'"{field_name_with_type}", field name is empty!')
+ else:
+ field_name = field_name_with_type
+ field_type = None
+ # pyre-fixme[61]: `field_name` may not be initialized here.
+ # pyre-fixme[61]: `field_type` may not be initialized here.
+ return field_name, field_type
+
+ def _is_range_spec(self, field_value_or_range):
+ delim_idx = field_value_or_range.find(self._RANGE_DELIM)
+ return delim_idx > 0
+
+ def _get_range_spec(self, field_value_or_range):
+ if self._is_range_spec(field_value_or_range):
+ delim_idx = field_value_or_range.find(self._RANGE_DELIM)
+ vmin = field_value_or_range[:delim_idx]
+ vmax = field_value_or_range[delim_idx + 1 :]
+ return vmin, vmax
+ else:
+ self._parse_error('"field_value_or_range", range of values expected!')
+
+ def _parse_error(self, msg):
+ raise ValueError(f"{self._ERROR_PREFIX}: {msg}")
diff --git a/densepose/utils/logger.py b/densepose/utils/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..70cd3cb0eb0fc7495b1a4b50a05725a0e5b1baba
--- /dev/null
+++ b/densepose/utils/logger.py
@@ -0,0 +1,13 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+
+
+def verbosity_to_level(verbosity) -> int:
+ if verbosity is not None:
+ if verbosity == 0:
+ return logging.WARNING
+ elif verbosity == 1:
+ return logging.INFO
+ elif verbosity >= 2:
+ return logging.DEBUG
+ return logging.WARNING
diff --git a/densepose/utils/transform.py b/densepose/utils/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..8dc4ae7be878302ec39b7f235e3ae1b7a3ca29ee
--- /dev/null
+++ b/densepose/utils/transform.py
@@ -0,0 +1,15 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from detectron2.data import MetadataCatalog
+from detectron2.utils.file_io import PathManager
+
+from densepose import DensePoseTransformData
+
+
+def load_for_dataset(dataset_name):
+ path = MetadataCatalog.get(dataset_name).densepose_transform_src
+ densepose_transform_data_fpath = PathManager.get_local_path(path)
+ return DensePoseTransformData.load(densepose_transform_data_fpath)
+
+
+def load_from_cfg(cfg):
+ return load_for_dataset(cfg.DATASETS.TEST[0])
diff --git a/densepose/vis/__init__.py b/densepose/vis/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/densepose/vis/base.py b/densepose/vis/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..a5c7e4ad19b6df600d8dcecacde6a3488c83a3e1
--- /dev/null
+++ b/densepose/vis/base.py
@@ -0,0 +1,192 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import numpy as np
+import cv2
+import torch
+
+Image = np.ndarray
+Boxes = torch.Tensor
+
+
+class MatrixVisualizer:
+ """
+ Base visualizer for matrix data
+ """
+
+ def __init__(
+ self,
+ inplace=True,
+ cmap=cv2.COLORMAP_PARULA,
+ val_scale=1.0,
+ alpha=0.7,
+ interp_method_matrix=cv2.INTER_LINEAR,
+ interp_method_mask=cv2.INTER_NEAREST,
+ ):
+ self.inplace = inplace
+ self.cmap = cmap
+ self.val_scale = val_scale
+ self.alpha = alpha
+ self.interp_method_matrix = interp_method_matrix
+ self.interp_method_mask = interp_method_mask
+
+ def visualize(self, image_bgr, mask, matrix, bbox_xywh):
+ self._check_image(image_bgr)
+ self._check_mask_matrix(mask, matrix)
+ if self.inplace:
+ image_target_bgr = image_bgr
+ else:
+ image_target_bgr = image_bgr
+ image_target_bgr *= 0
+ x, y, w, h = [int(v) for v in bbox_xywh]
+ if w <= 0 or h <= 0:
+ return image_bgr
+ mask, matrix = self._resize(mask, matrix, w, h)
+ mask_bg = np.tile((mask == 0)[:, :, np.newaxis], [1, 1, 3])
+ matrix_scaled = matrix.astype(np.float32) * self.val_scale
+ _EPSILON = 1e-6
+ if np.any(matrix_scaled > 255 + _EPSILON):
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ f"Matrix has values > {255 + _EPSILON} after " f"scaling, clipping to [0..255]"
+ )
+ matrix_scaled_8u = matrix_scaled.clip(0, 255).astype(np.uint8)
+ matrix_vis = cv2.applyColorMap(matrix_scaled_8u, self.cmap)
+ matrix_vis[mask_bg] = image_target_bgr[y : y + h, x : x + w, :][mask_bg]
+ image_target_bgr[y : y + h, x : x + w, :] = (
+ image_target_bgr[y : y + h, x : x + w, :] * (1.0 - self.alpha) + matrix_vis * self.alpha
+ )
+ return image_target_bgr.astype(np.uint8)
+
+ def _resize(self, mask, matrix, w, h):
+ if (w != mask.shape[1]) or (h != mask.shape[0]):
+ mask = cv2.resize(mask, (w, h), self.interp_method_mask)
+ if (w != matrix.shape[1]) or (h != matrix.shape[0]):
+ matrix = cv2.resize(matrix, (w, h), self.interp_method_matrix)
+ return mask, matrix
+
+ def _check_image(self, image_rgb):
+ assert len(image_rgb.shape) == 3
+ assert image_rgb.shape[2] == 3
+ assert image_rgb.dtype == np.uint8
+
+ def _check_mask_matrix(self, mask, matrix):
+ assert len(matrix.shape) == 2
+ assert len(mask.shape) == 2
+ assert mask.dtype == np.uint8
+
+
+class RectangleVisualizer:
+
+ _COLOR_GREEN = (18, 127, 15)
+
+ def __init__(self, color=_COLOR_GREEN, thickness=1):
+ self.color = color
+ self.thickness = thickness
+
+ def visualize(self, image_bgr, bbox_xywh, color=None, thickness=None):
+ x, y, w, h = bbox_xywh
+ color = color or self.color
+ thickness = thickness or self.thickness
+ cv2.rectangle(image_bgr, (int(x), int(y)), (int(x + w), int(y + h)), color, thickness)
+ return image_bgr
+
+
+class PointsVisualizer:
+
+ _COLOR_GREEN = (18, 127, 15)
+
+ def __init__(self, color_bgr=_COLOR_GREEN, r=5):
+ self.color_bgr = color_bgr
+ self.r = r
+
+ def visualize(self, image_bgr, pts_xy, colors_bgr=None, rs=None):
+ for j, pt_xy in enumerate(pts_xy):
+ x, y = pt_xy
+ color_bgr = colors_bgr[j] if colors_bgr is not None else self.color_bgr
+ r = rs[j] if rs is not None else self.r
+ cv2.circle(image_bgr, (x, y), r, color_bgr, -1)
+ return image_bgr
+
+
+class TextVisualizer:
+
+ _COLOR_GRAY = (218, 227, 218)
+ _COLOR_WHITE = (255, 255, 255)
+
+ def __init__(
+ self,
+ font_face=cv2.FONT_HERSHEY_SIMPLEX,
+ font_color_bgr=_COLOR_GRAY,
+ font_scale=0.35,
+ font_line_type=cv2.LINE_AA,
+ font_line_thickness=1,
+ fill_color_bgr=_COLOR_WHITE,
+ fill_color_transparency=1.0,
+ frame_color_bgr=_COLOR_WHITE,
+ frame_color_transparency=1.0,
+ frame_thickness=1,
+ ):
+ self.font_face = font_face
+ self.font_color_bgr = font_color_bgr
+ self.font_scale = font_scale
+ self.font_line_type = font_line_type
+ self.font_line_thickness = font_line_thickness
+ self.fill_color_bgr = fill_color_bgr
+ self.fill_color_transparency = fill_color_transparency
+ self.frame_color_bgr = frame_color_bgr
+ self.frame_color_transparency = frame_color_transparency
+ self.frame_thickness = frame_thickness
+
+ def visualize(self, image_bgr, txt, topleft_xy):
+ txt_w, txt_h = self.get_text_size_wh(txt)
+ topleft_xy = tuple(map(int, topleft_xy))
+ x, y = topleft_xy
+ if self.frame_color_transparency < 1.0:
+ t = self.frame_thickness
+ image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :] = (
+ image_bgr[y - t : y + txt_h + t, x - t : x + txt_w + t, :]
+ * self.frame_color_transparency
+ + np.array(self.frame_color_bgr) * (1.0 - self.frame_color_transparency)
+ ).astype(float)
+ if self.fill_color_transparency < 1.0:
+ image_bgr[y : y + txt_h, x : x + txt_w, :] = (
+ image_bgr[y : y + txt_h, x : x + txt_w, :] * self.fill_color_transparency
+ + np.array(self.fill_color_bgr) * (1.0 - self.fill_color_transparency)
+ ).astype(float)
+ cv2.putText(
+ image_bgr,
+ txt,
+ topleft_xy,
+ self.font_face,
+ self.font_scale,
+ self.font_color_bgr,
+ self.font_line_thickness,
+ self.font_line_type,
+ )
+ return image_bgr
+
+ def get_text_size_wh(self, txt):
+ ((txt_w, txt_h), _) = cv2.getTextSize(
+ txt, self.font_face, self.font_scale, self.font_line_thickness
+ )
+ return txt_w, txt_h
+
+
+class CompoundVisualizer:
+ def __init__(self, visualizers):
+ self.visualizers = visualizers
+
+ def visualize(self, image_bgr, data):
+ assert len(data) == len(
+ self.visualizers
+ ), "The number of datas {} should match the number of visualizers" " {}".format(
+ len(data), len(self.visualizers)
+ )
+ image = image_bgr
+ for i, visualizer in enumerate(self.visualizers):
+ image = visualizer.visualize(image, data[i])
+ return image
+
+ def __str__(self):
+ visualizer_str = ", ".join([str(v) for v in self.visualizers])
+ return "Compound Visualizer [{}]".format(visualizer_str)
diff --git a/densepose/vis/bounding_box.py b/densepose/vis/bounding_box.py
new file mode 100644
index 0000000000000000000000000000000000000000..6a9d37f40b1b64832a53d8c2cc8ce3bf90b30a3b
--- /dev/null
+++ b/densepose/vis/bounding_box.py
@@ -0,0 +1,37 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .base import RectangleVisualizer, TextVisualizer
+
+
+class BoundingBoxVisualizer:
+ def __init__(self):
+ self.rectangle_visualizer = RectangleVisualizer()
+
+ def visualize(self, image_bgr, boxes_xywh):
+ for bbox_xywh in boxes_xywh:
+ image_bgr = self.rectangle_visualizer.visualize(image_bgr, bbox_xywh)
+ return image_bgr
+
+
+class ScoredBoundingBoxVisualizer:
+ def __init__(self, bbox_visualizer_params=None, score_visualizer_params=None, **kwargs):
+ if bbox_visualizer_params is None:
+ bbox_visualizer_params = {}
+ if score_visualizer_params is None:
+ score_visualizer_params = {}
+ self.visualizer_bbox = RectangleVisualizer(**bbox_visualizer_params)
+ self.visualizer_score = TextVisualizer(**score_visualizer_params)
+
+ def visualize(self, image_bgr, scored_bboxes):
+ boxes_xywh, box_scores = scored_bboxes
+ assert len(boxes_xywh) == len(
+ box_scores
+ ), "Number of bounding boxes {} should be equal to the number of scores {}".format(
+ len(boxes_xywh), len(box_scores)
+ )
+ for i, box_xywh in enumerate(boxes_xywh):
+ score_i = box_scores[i]
+ image_bgr = self.visualizer_bbox.visualize(image_bgr, box_xywh)
+ score_txt = "{0:6.4f}".format(score_i)
+ topleft_xy = box_xywh[0], box_xywh[1]
+ image_bgr = self.visualizer_score.visualize(image_bgr, score_txt, topleft_xy)
+ return image_bgr
diff --git a/densepose/vis/densepose_data_points.py b/densepose/vis/densepose_data_points.py
new file mode 100644
index 0000000000000000000000000000000000000000..17e67cbf96022e09363cf1deac21814e4544f570
--- /dev/null
+++ b/densepose/vis/densepose_data_points.py
@@ -0,0 +1,106 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import numpy as np
+from typing import Iterable, Optional, Tuple
+import cv2
+
+from densepose.structures import DensePoseDataRelative
+
+from .base import Boxes, Image, MatrixVisualizer, PointsVisualizer
+
+
+class DensePoseDataCoarseSegmentationVisualizer:
+ """
+ Visualizer for ground truth segmentation
+ """
+
+ def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
+ self.mask_visualizer = MatrixVisualizer(
+ inplace=inplace,
+ cmap=cmap,
+ val_scale=255.0 / DensePoseDataRelative.N_BODY_PARTS,
+ alpha=alpha,
+ )
+
+ def visualize(
+ self,
+ image_bgr: Image,
+ bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]],
+ ) -> Image:
+ if bbox_densepose_datas is None:
+ return image_bgr
+ for bbox_xywh, densepose_data in zip(*bbox_densepose_datas):
+ matrix = densepose_data.segm.numpy()
+ mask = np.zeros(matrix.shape, dtype=np.uint8)
+ mask[matrix > 0] = 1
+ image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh.numpy())
+ return image_bgr
+
+
+class DensePoseDataPointsVisualizer:
+ def __init__(self, densepose_data_to_value_fn=None, cmap=cv2.COLORMAP_PARULA, **kwargs):
+ self.points_visualizer = PointsVisualizer()
+ self.densepose_data_to_value_fn = densepose_data_to_value_fn
+ self.cmap = cmap
+
+ def visualize(
+ self,
+ image_bgr: Image,
+ bbox_densepose_datas: Optional[Tuple[Iterable[Boxes], Iterable[DensePoseDataRelative]]],
+ ) -> Image:
+ if bbox_densepose_datas is None:
+ return image_bgr
+ for bbox_xywh, densepose_data in zip(*bbox_densepose_datas):
+ x0, y0, w, h = bbox_xywh.numpy()
+ x = densepose_data.x.numpy() * w / 255.0 + x0
+ y = densepose_data.y.numpy() * h / 255.0 + y0
+ pts_xy = zip(x, y)
+ if self.densepose_data_to_value_fn is None:
+ image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy)
+ else:
+ v = self.densepose_data_to_value_fn(densepose_data)
+ img_colors_bgr = cv2.applyColorMap(v, self.cmap)
+ colors_bgr = [
+ [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr
+ ]
+ image_bgr = self.points_visualizer.visualize(image_bgr, pts_xy, colors_bgr)
+ return image_bgr
+
+
+def _densepose_data_u_for_cmap(densepose_data):
+ u = np.clip(densepose_data.u.numpy(), 0, 1) * 255.0
+ return u.astype(np.uint8)
+
+
+def _densepose_data_v_for_cmap(densepose_data):
+ v = np.clip(densepose_data.v.numpy(), 0, 1) * 255.0
+ return v.astype(np.uint8)
+
+
+def _densepose_data_i_for_cmap(densepose_data):
+ i = (
+ np.clip(densepose_data.i.numpy(), 0.0, DensePoseDataRelative.N_PART_LABELS)
+ * 255.0
+ / DensePoseDataRelative.N_PART_LABELS
+ )
+ return i.astype(np.uint8)
+
+
+class DensePoseDataPointsUVisualizer(DensePoseDataPointsVisualizer):
+ def __init__(self, **kwargs):
+ super(DensePoseDataPointsUVisualizer, self).__init__(
+ densepose_data_to_value_fn=_densepose_data_u_for_cmap, **kwargs
+ )
+
+
+class DensePoseDataPointsVVisualizer(DensePoseDataPointsVisualizer):
+ def __init__(self, **kwargs):
+ super(DensePoseDataPointsVVisualizer, self).__init__(
+ densepose_data_to_value_fn=_densepose_data_v_for_cmap, **kwargs
+ )
+
+
+class DensePoseDataPointsIVisualizer(DensePoseDataPointsVisualizer):
+ def __init__(self, **kwargs):
+ super(DensePoseDataPointsIVisualizer, self).__init__(
+ densepose_data_to_value_fn=_densepose_data_i_for_cmap, **kwargs
+ )
diff --git a/densepose/vis/densepose_outputs_iuv.py b/densepose/vis/densepose_outputs_iuv.py
new file mode 100644
index 0000000000000000000000000000000000000000..454627cbe9fd5507cae8cbb9aab7c7bc0ffcfab7
--- /dev/null
+++ b/densepose/vis/densepose_outputs_iuv.py
@@ -0,0 +1,101 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import numpy as np
+from typing import Optional, Tuple
+import cv2
+
+from densepose.structures import DensePoseDataRelative
+
+from ..structures import DensePoseChartPredictorOutput
+from .base import Boxes, Image, MatrixVisualizer
+
+
+class DensePoseOutputsVisualizer:
+ def __init__(
+ self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, to_visualize=None, **kwargs
+ ):
+ assert to_visualize in "IUV", "can only visualize IUV"
+ self.to_visualize = to_visualize
+
+ if self.to_visualize == "I":
+ val_scale = 255.0 / DensePoseDataRelative.N_PART_LABELS
+ else:
+ val_scale = 1.0
+ self.mask_visualizer = MatrixVisualizer(
+ inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha
+ )
+
+ def visualize(
+ self,
+ image_bgr: Image,
+ dp_output_with_bboxes: Tuple[Optional[DensePoseChartPredictorOutput], Optional[Boxes]],
+ ) -> Image:
+ densepose_output, bboxes_xywh = dp_output_with_bboxes
+ if densepose_output is None or bboxes_xywh is None:
+ return image_bgr
+
+ assert isinstance(
+ densepose_output, DensePoseChartPredictorOutput
+ ), "DensePoseChartPredictorOutput expected, {} encountered".format(type(densepose_output))
+
+ S = densepose_output.coarse_segm
+ I = densepose_output.fine_segm # noqa
+ U = densepose_output.u
+ V = densepose_output.v
+ N = S.size(0)
+ assert N == I.size(
+ 0
+ ), "densepose outputs S {} and I {}" " should have equal first dim size".format(
+ S.size(), I.size()
+ )
+ assert N == U.size(
+ 0
+ ), "densepose outputs S {} and U {}" " should have equal first dim size".format(
+ S.size(), U.size()
+ )
+ assert N == V.size(
+ 0
+ ), "densepose outputs S {} and V {}" " should have equal first dim size".format(
+ S.size(), V.size()
+ )
+ assert N == len(
+ bboxes_xywh
+ ), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format(
+ len(bboxes_xywh), N
+ )
+ for n in range(N):
+ Sn = S[n].argmax(dim=0)
+ In = I[n].argmax(dim=0) * (Sn > 0).long()
+ segmentation = In.cpu().numpy().astype(np.uint8)
+ mask = np.zeros(segmentation.shape, dtype=np.uint8)
+ mask[segmentation > 0] = 1
+ bbox_xywh = bboxes_xywh[n]
+
+ if self.to_visualize == "I":
+ vis = segmentation
+ elif self.to_visualize in "UV":
+ U_or_Vn = {"U": U, "V": V}[self.to_visualize][n].cpu().numpy().astype(np.float32)
+ vis = np.zeros(segmentation.shape, dtype=np.float32)
+ for partId in range(U_or_Vn.shape[0]):
+ vis[segmentation == partId] = (
+ U_or_Vn[partId][segmentation == partId].clip(0, 1) * 255
+ )
+
+ # pyre-fixme[61]: `vis` may not be initialized here.
+ image_bgr = self.mask_visualizer.visualize(image_bgr, mask, vis, bbox_xywh)
+
+ return image_bgr
+
+
+class DensePoseOutputsUVisualizer(DensePoseOutputsVisualizer):
+ def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
+ super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="U", **kwargs)
+
+
+class DensePoseOutputsVVisualizer(DensePoseOutputsVisualizer):
+ def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
+ super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="V", **kwargs)
+
+
+class DensePoseOutputsFineSegmentationVisualizer(DensePoseOutputsVisualizer):
+ def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
+ super().__init__(inplace=inplace, cmap=cmap, alpha=alpha, to_visualize="I", **kwargs)
diff --git a/densepose/vis/densepose_outputs_vertex.py b/densepose/vis/densepose_outputs_vertex.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e10bf82cfea92f488480d423d3ced5d61d04c49
--- /dev/null
+++ b/densepose/vis/densepose_outputs_vertex.py
@@ -0,0 +1,229 @@
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+import json
+import numpy as np
+from functools import lru_cache
+from typing import Dict, List, Optional, Tuple
+import cv2
+import torch
+
+from detectron2.utils.file_io import PathManager
+
+from densepose.modeling import build_densepose_embedder
+from densepose.modeling.cse.utils import get_closest_vertices_mask_from_ES
+
+from ..data.utils import get_class_to_mesh_name_mapping
+from ..structures import DensePoseEmbeddingPredictorOutput
+from ..structures.mesh import create_mesh
+from .base import Boxes, Image, MatrixVisualizer
+from .densepose_results_textures import get_texture_atlas
+
+
+@lru_cache()
+def get_xyz_vertex_embedding(mesh_name: str, device: torch.device):
+ if mesh_name == "smpl_27554":
+ embed_path = PathManager.get_local_path(
+ "https://dl.fbaipublicfiles.com/densepose/data/cse/mds_d=256.npy"
+ )
+ embed_map, _ = np.load(embed_path, allow_pickle=True)
+ embed_map = torch.tensor(embed_map).float()[:, 0]
+ embed_map -= embed_map.min()
+ embed_map /= embed_map.max()
+ else:
+ mesh = create_mesh(mesh_name, device)
+ embed_map = mesh.vertices.sum(dim=1)
+ embed_map -= embed_map.min()
+ embed_map /= embed_map.max()
+ embed_map = embed_map**2
+ return embed_map
+
+
+class DensePoseOutputsVertexVisualizer:
+ def __init__(
+ self,
+ cfg,
+ inplace=True,
+ cmap=cv2.COLORMAP_JET,
+ alpha=0.7,
+ device="cuda",
+ default_class=0,
+ **kwargs,
+ ):
+ self.mask_visualizer = MatrixVisualizer(
+ inplace=inplace, cmap=cmap, val_scale=1.0, alpha=alpha
+ )
+ self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
+ self.embedder = build_densepose_embedder(cfg)
+ self.device = torch.device(device)
+ self.default_class = default_class
+
+ self.mesh_vertex_embeddings = {
+ mesh_name: self.embedder(mesh_name).to(self.device)
+ for mesh_name in self.class_to_mesh_name.values()
+ if self.embedder.has_embeddings(mesh_name)
+ }
+
+ def visualize(
+ self,
+ image_bgr: Image,
+ outputs_boxes_xywh_classes: Tuple[
+ Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]]
+ ],
+ ) -> Image:
+ if outputs_boxes_xywh_classes[0] is None:
+ return image_bgr
+
+ S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes(
+ outputs_boxes_xywh_classes
+ )
+
+ for n in range(N):
+ x, y, w, h = bboxes_xywh[n].int().tolist()
+ mesh_name = self.class_to_mesh_name[pred_classes[n]]
+ closest_vertices, mask = get_closest_vertices_mask_from_ES(
+ E[[n]],
+ S[[n]],
+ h,
+ w,
+ self.mesh_vertex_embeddings[mesh_name],
+ self.device,
+ )
+ embed_map = get_xyz_vertex_embedding(mesh_name, self.device)
+ vis = (embed_map[closest_vertices].clip(0, 1) * 255.0).cpu().numpy()
+ mask_numpy = mask.cpu().numpy().astype(dtype=np.uint8)
+ image_bgr = self.mask_visualizer.visualize(image_bgr, mask_numpy, vis, [x, y, w, h])
+
+ return image_bgr
+
+ def extract_and_check_outputs_and_boxes(self, outputs_boxes_xywh_classes):
+
+ densepose_output, bboxes_xywh, pred_classes = outputs_boxes_xywh_classes
+
+ if pred_classes is None:
+ pred_classes = [self.default_class] * len(bboxes_xywh)
+
+ assert isinstance(
+ densepose_output, DensePoseEmbeddingPredictorOutput
+ ), "DensePoseEmbeddingPredictorOutput expected, {} encountered".format(
+ type(densepose_output)
+ )
+
+ S = densepose_output.coarse_segm
+ E = densepose_output.embedding
+ N = S.size(0)
+ assert N == E.size(
+ 0
+ ), "CSE coarse_segm {} and embeddings {}" " should have equal first dim size".format(
+ S.size(), E.size()
+ )
+ assert N == len(
+ bboxes_xywh
+ ), "number of bounding boxes {}" " should be equal to first dim size of outputs {}".format(
+ len(bboxes_xywh), N
+ )
+ assert N == len(pred_classes), (
+ "number of predicted classes {}"
+ " should be equal to first dim size of outputs {}".format(len(bboxes_xywh), N)
+ )
+
+ return S, E, N, bboxes_xywh, pred_classes
+
+
+def get_texture_atlases(json_str: Optional[str]) -> Optional[Dict[str, Optional[np.ndarray]]]:
+ """
+ json_str is a JSON string representing a mesh_name -> texture_atlas_path dictionary
+ """
+ if json_str is None:
+ return None
+
+ paths = json.loads(json_str)
+ return {mesh_name: get_texture_atlas(path) for mesh_name, path in paths.items()}
+
+
+class DensePoseOutputsTextureVisualizer(DensePoseOutputsVertexVisualizer):
+ def __init__(
+ self,
+ cfg,
+ texture_atlases_dict,
+ device="cuda",
+ default_class=0,
+ **kwargs,
+ ):
+ self.embedder = build_densepose_embedder(cfg)
+
+ self.texture_image_dict = {}
+ self.alpha_dict = {}
+
+ for mesh_name in texture_atlases_dict.keys():
+ if texture_atlases_dict[mesh_name].shape[-1] == 4: # Image with alpha channel
+ self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, -1] / 255.0
+ self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name][:, :, :3]
+ else:
+ self.alpha_dict[mesh_name] = texture_atlases_dict[mesh_name].sum(axis=-1) > 0
+ self.texture_image_dict[mesh_name] = texture_atlases_dict[mesh_name]
+
+ self.device = torch.device(device)
+ self.class_to_mesh_name = get_class_to_mesh_name_mapping(cfg)
+ self.default_class = default_class
+
+ self.mesh_vertex_embeddings = {
+ mesh_name: self.embedder(mesh_name).to(self.device)
+ for mesh_name in self.class_to_mesh_name.values()
+ }
+
+ def visualize(
+ self,
+ image_bgr: Image,
+ outputs_boxes_xywh_classes: Tuple[
+ Optional[DensePoseEmbeddingPredictorOutput], Optional[Boxes], Optional[List[int]]
+ ],
+ ) -> Image:
+ image_target_bgr = image_bgr.copy()
+ if outputs_boxes_xywh_classes[0] is None:
+ return image_target_bgr
+
+ S, E, N, bboxes_xywh, pred_classes = self.extract_and_check_outputs_and_boxes(
+ outputs_boxes_xywh_classes
+ )
+
+ meshes = {
+ p: create_mesh(self.class_to_mesh_name[p], self.device) for p in np.unique(pred_classes)
+ }
+
+ for n in range(N):
+ x, y, w, h = bboxes_xywh[n].int().cpu().numpy()
+ mesh_name = self.class_to_mesh_name[pred_classes[n]]
+ closest_vertices, mask = get_closest_vertices_mask_from_ES(
+ E[[n]],
+ S[[n]],
+ h,
+ w,
+ self.mesh_vertex_embeddings[mesh_name],
+ self.device,
+ )
+ uv_array = meshes[pred_classes[n]].texcoords[closest_vertices].permute((2, 0, 1))
+ uv_array = uv_array.cpu().numpy().clip(0, 1)
+ textured_image = self.generate_image_with_texture(
+ image_target_bgr[y : y + h, x : x + w],
+ uv_array,
+ mask.cpu().numpy(),
+ self.class_to_mesh_name[pred_classes[n]],
+ )
+ if textured_image is None:
+ continue
+ image_target_bgr[y : y + h, x : x + w] = textured_image
+
+ return image_target_bgr
+
+ def generate_image_with_texture(self, bbox_image_bgr, uv_array, mask, mesh_name):
+ alpha = self.alpha_dict.get(mesh_name)
+ texture_image = self.texture_image_dict.get(mesh_name)
+ if alpha is None or texture_image is None:
+ return None
+ U, V = uv_array
+ x_index = (U * texture_image.shape[1]).astype(int)
+ y_index = (V * texture_image.shape[0]).astype(int)
+ local_texture = texture_image[y_index, x_index][mask]
+ local_alpha = np.expand_dims(alpha[y_index, x_index][mask], -1)
+ output_image = bbox_image_bgr.copy()
+ output_image[mask] = output_image[mask] * (1 - local_alpha) + local_texture * local_alpha
+ return output_image.astype(np.uint8)
diff --git a/densepose/vis/densepose_results.py b/densepose/vis/densepose_results.py
new file mode 100644
index 0000000000000000000000000000000000000000..a660d26eec6c873bd5cf9b35d0c8387075789afc
--- /dev/null
+++ b/densepose/vis/densepose_results.py
@@ -0,0 +1,355 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import numpy as np
+from typing import List, Optional, Tuple
+import cv2
+import torch
+
+from densepose.structures import DensePoseDataRelative
+
+from ..structures import DensePoseChartResult
+from .base import Boxes, Image, MatrixVisualizer
+
+
+class DensePoseResultsVisualizer:
+ def visualize(
+ self,
+ image_bgr: Image,
+ results_and_boxes_xywh: Tuple[Optional[List[DensePoseChartResult]], Optional[Boxes]],
+ ) -> Image:
+ densepose_result, boxes_xywh = results_and_boxes_xywh
+ if densepose_result is None or boxes_xywh is None:
+ return image_bgr
+
+ boxes_xywh = boxes_xywh.cpu().numpy()
+ context = self.create_visualization_context(image_bgr)
+ for i, result in enumerate(densepose_result):
+ iuv_array = torch.cat(
+ (result.labels[None].type(torch.float32), result.uv * 255.0)
+ ).type(torch.uint8)
+ self.visualize_iuv_arr(context, iuv_array.cpu().numpy(), boxes_xywh[i])
+ image_bgr = self.context_to_image_bgr(context)
+ return image_bgr
+
+ def create_visualization_context(self, image_bgr: Image):
+ return image_bgr
+
+ def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None:
+ pass
+
+ def context_to_image_bgr(self, context):
+ return context
+
+ def get_image_bgr_from_context(self, context):
+ return context
+
+
+class DensePoseMaskedColormapResultsVisualizer(DensePoseResultsVisualizer):
+ def __init__(
+ self,
+ data_extractor,
+ segm_extractor,
+ inplace=True,
+ cmap=cv2.COLORMAP_PARULA,
+ alpha=0.7,
+ val_scale=1.0,
+ **kwargs,
+ ):
+ self.mask_visualizer = MatrixVisualizer(
+ inplace=inplace, cmap=cmap, val_scale=val_scale, alpha=alpha
+ )
+ self.data_extractor = data_extractor
+ self.segm_extractor = segm_extractor
+
+ def context_to_image_bgr(self, context):
+ return context
+
+ def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh) -> None:
+ image_bgr = self.get_image_bgr_from_context(context)
+ matrix = self.data_extractor(iuv_arr)
+ segm = self.segm_extractor(iuv_arr)
+ mask = np.zeros(matrix.shape, dtype=np.uint8)
+ mask[segm > 0] = 1
+ image_bgr = self.mask_visualizer.visualize(image_bgr, mask, matrix, bbox_xywh)
+
+
+def _extract_i_from_iuvarr(iuv_arr):
+ return iuv_arr[0, :, :]
+
+
+def _extract_u_from_iuvarr(iuv_arr):
+ return iuv_arr[1, :, :]
+
+
+def _extract_v_from_iuvarr(iuv_arr):
+ return iuv_arr[2, :, :]
+
+
+class DensePoseResultsMplContourVisualizer(DensePoseResultsVisualizer):
+ def __init__(self, levels=10, **kwargs):
+ self.levels = levels
+ self.plot_args = kwargs
+
+ def create_visualization_context(self, image_bgr: Image):
+ import matplotlib.pyplot as plt
+ from matplotlib.backends.backend_agg import FigureCanvasAgg as FigureCanvas
+
+ context = {}
+ context["image_bgr"] = image_bgr
+ dpi = 100
+ height_inches = float(image_bgr.shape[0]) / dpi
+ width_inches = float(image_bgr.shape[1]) / dpi
+ fig = plt.figure(figsize=(width_inches, height_inches), dpi=dpi)
+ plt.axes([0, 0, 1, 1])
+ plt.axis("off")
+ context["fig"] = fig
+ canvas = FigureCanvas(fig)
+ context["canvas"] = canvas
+ extent = (0, image_bgr.shape[1], image_bgr.shape[0], 0)
+ plt.imshow(image_bgr[:, :, ::-1], extent=extent)
+ return context
+
+ def context_to_image_bgr(self, context):
+ fig = context["fig"]
+ w, h = map(int, fig.get_size_inches() * fig.get_dpi())
+ canvas = context["canvas"]
+ canvas.draw()
+ image_1d = np.fromstring(canvas.tostring_rgb(), dtype="uint8")
+ image_rgb = image_1d.reshape(h, w, 3)
+ image_bgr = image_rgb[:, :, ::-1].copy()
+ return image_bgr
+
+ def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> None:
+ import matplotlib.pyplot as plt
+
+ u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0
+ v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0
+ extent = (
+ bbox_xywh[0],
+ bbox_xywh[0] + bbox_xywh[2],
+ bbox_xywh[1],
+ bbox_xywh[1] + bbox_xywh[3],
+ )
+ plt.contour(u, self.levels, extent=extent, **self.plot_args)
+ plt.contour(v, self.levels, extent=extent, **self.plot_args)
+
+
+class DensePoseResultsCustomContourVisualizer(DensePoseResultsVisualizer):
+ """
+ Contour visualization using marching squares
+ """
+
+ def __init__(self, levels=10, **kwargs):
+ # TODO: colormap is hardcoded
+ cmap = cv2.COLORMAP_PARULA
+ if isinstance(levels, int):
+ self.levels = np.linspace(0, 1, levels)
+ else:
+ self.levels = levels
+ if "linewidths" in kwargs:
+ self.linewidths = kwargs["linewidths"]
+ else:
+ self.linewidths = [1] * len(self.levels)
+ self.plot_args = kwargs
+ img_colors_bgr = cv2.applyColorMap((self.levels * 255).astype(np.uint8), cmap)
+ self.level_colors_bgr = [
+ [int(v) for v in img_color_bgr.ravel()] for img_color_bgr in img_colors_bgr
+ ]
+
+ def visualize_iuv_arr(self, context, iuv_arr: np.ndarray, bbox_xywh: Boxes) -> None:
+ image_bgr = self.get_image_bgr_from_context(context)
+ segm = _extract_i_from_iuvarr(iuv_arr)
+ u = _extract_u_from_iuvarr(iuv_arr).astype(float) / 255.0
+ v = _extract_v_from_iuvarr(iuv_arr).astype(float) / 255.0
+ self._contours(image_bgr, u, segm, bbox_xywh)
+ self._contours(image_bgr, v, segm, bbox_xywh)
+
+ def _contours(self, image_bgr, arr, segm, bbox_xywh):
+ for part_idx in range(1, DensePoseDataRelative.N_PART_LABELS + 1):
+ mask = segm == part_idx
+ if not np.any(mask):
+ continue
+ arr_min = np.amin(arr[mask])
+ arr_max = np.amax(arr[mask])
+ I, J = np.nonzero(mask)
+ i0 = np.amin(I)
+ i1 = np.amax(I) + 1
+ j0 = np.amin(J)
+ j1 = np.amax(J) + 1
+ if (j1 == j0 + 1) or (i1 == i0 + 1):
+ continue
+ Nw = arr.shape[1] - 1
+ Nh = arr.shape[0] - 1
+ for level_idx, level in enumerate(self.levels):
+ if (level < arr_min) or (level > arr_max):
+ continue
+ vp = arr[i0:i1, j0:j1] >= level
+ bin_codes = vp[:-1, :-1] + vp[1:, :-1] * 2 + vp[1:, 1:] * 4 + vp[:-1, 1:] * 8
+ mp = mask[i0:i1, j0:j1]
+ bin_mask_codes = mp[:-1, :-1] + mp[1:, :-1] * 2 + mp[1:, 1:] * 4 + mp[:-1, 1:] * 8
+ it = np.nditer(bin_codes, flags=["multi_index"])
+ color_bgr = self.level_colors_bgr[level_idx]
+ linewidth = self.linewidths[level_idx]
+ while not it.finished:
+ if (it[0] != 0) and (it[0] != 15):
+ i, j = it.multi_index
+ if bin_mask_codes[i, j] != 0:
+ self._draw_line(
+ image_bgr,
+ arr,
+ mask,
+ level,
+ color_bgr,
+ linewidth,
+ it[0],
+ it.multi_index,
+ bbox_xywh,
+ Nw,
+ Nh,
+ (i0, j0),
+ )
+ it.iternext()
+
+ def _draw_line(
+ self,
+ image_bgr,
+ arr,
+ mask,
+ v,
+ color_bgr,
+ linewidth,
+ bin_code,
+ multi_idx,
+ bbox_xywh,
+ Nw,
+ Nh,
+ offset,
+ ):
+ lines = self._bin_code_2_lines(arr, v, bin_code, multi_idx, Nw, Nh, offset)
+ x0, y0, w, h = bbox_xywh
+ x1 = x0 + w
+ y1 = y0 + h
+ for line in lines:
+ x0r, y0r = line[0]
+ x1r, y1r = line[1]
+ pt0 = (int(x0 + x0r * (x1 - x0)), int(y0 + y0r * (y1 - y0)))
+ pt1 = (int(x0 + x1r * (x1 - x0)), int(y0 + y1r * (y1 - y0)))
+ cv2.line(image_bgr, pt0, pt1, color_bgr, linewidth)
+
+ def _bin_code_2_lines(self, arr, v, bin_code, multi_idx, Nw, Nh, offset):
+ i0, j0 = offset
+ i, j = multi_idx
+ i += i0
+ j += j0
+ v0, v1, v2, v3 = arr[i, j], arr[i + 1, j], arr[i + 1, j + 1], arr[i, j + 1]
+ x0i = float(j) / Nw
+ y0j = float(i) / Nh
+ He = 1.0 / Nh
+ We = 1.0 / Nw
+ if (bin_code == 1) or (bin_code == 14):
+ a = (v - v0) / (v1 - v0)
+ b = (v - v0) / (v3 - v0)
+ pt1 = (x0i, y0j + a * He)
+ pt2 = (x0i + b * We, y0j)
+ return [(pt1, pt2)]
+ elif (bin_code == 2) or (bin_code == 13):
+ a = (v - v0) / (v1 - v0)
+ b = (v - v1) / (v2 - v1)
+ pt1 = (x0i, y0j + a * He)
+ pt2 = (x0i + b * We, y0j + He)
+ return [(pt1, pt2)]
+ elif (bin_code == 3) or (bin_code == 12):
+ a = (v - v0) / (v3 - v0)
+ b = (v - v1) / (v2 - v1)
+ pt1 = (x0i + a * We, y0j)
+ pt2 = (x0i + b * We, y0j + He)
+ return [(pt1, pt2)]
+ elif (bin_code == 4) or (bin_code == 11):
+ a = (v - v1) / (v2 - v1)
+ b = (v - v3) / (v2 - v3)
+ pt1 = (x0i + a * We, y0j + He)
+ pt2 = (x0i + We, y0j + b * He)
+ return [(pt1, pt2)]
+ elif (bin_code == 6) or (bin_code == 9):
+ a = (v - v0) / (v1 - v0)
+ b = (v - v3) / (v2 - v3)
+ pt1 = (x0i, y0j + a * He)
+ pt2 = (x0i + We, y0j + b * He)
+ return [(pt1, pt2)]
+ elif (bin_code == 7) or (bin_code == 8):
+ a = (v - v0) / (v3 - v0)
+ b = (v - v3) / (v2 - v3)
+ pt1 = (x0i + a * We, y0j)
+ pt2 = (x0i + We, y0j + b * He)
+ return [(pt1, pt2)]
+ elif bin_code == 5:
+ a1 = (v - v0) / (v1 - v0)
+ b1 = (v - v1) / (v2 - v1)
+ pt11 = (x0i, y0j + a1 * He)
+ pt12 = (x0i + b1 * We, y0j + He)
+ a2 = (v - v0) / (v3 - v0)
+ b2 = (v - v3) / (v2 - v3)
+ pt21 = (x0i + a2 * We, y0j)
+ pt22 = (x0i + We, y0j + b2 * He)
+ return [(pt11, pt12), (pt21, pt22)]
+ elif bin_code == 10:
+ a1 = (v - v0) / (v3 - v0)
+ b1 = (v - v0) / (v1 - v0)
+ pt11 = (x0i + a1 * We, y0j)
+ pt12 = (x0i, y0j + b1 * He)
+ a2 = (v - v1) / (v2 - v1)
+ b2 = (v - v3) / (v2 - v3)
+ pt21 = (x0i + a2 * We, y0j + He)
+ pt22 = (x0i + We, y0j + b2 * He)
+ return [(pt11, pt12), (pt21, pt22)]
+ return []
+
+
+try:
+ import matplotlib
+
+ matplotlib.use("Agg")
+ DensePoseResultsContourVisualizer = DensePoseResultsMplContourVisualizer
+except ModuleNotFoundError:
+ logger = logging.getLogger(__name__)
+ logger.warning("Could not import matplotlib, using custom contour visualizer")
+ DensePoseResultsContourVisualizer = DensePoseResultsCustomContourVisualizer
+
+
+class DensePoseResultsFineSegmentationVisualizer(DensePoseMaskedColormapResultsVisualizer):
+ def __init__(self, inplace=False, cmap=cv2.COLORMAP_PARULA, alpha=1, **kwargs):
+ super(DensePoseResultsFineSegmentationVisualizer, self).__init__(
+ _extract_i_from_iuvarr,
+ _extract_i_from_iuvarr,
+ inplace,
+ cmap,
+ alpha,
+ val_scale=255.0 / DensePoseDataRelative.N_PART_LABELS,
+ **kwargs,
+ )
+
+
+class DensePoseResultsUVisualizer(DensePoseMaskedColormapResultsVisualizer):
+ def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
+ super(DensePoseResultsUVisualizer, self).__init__(
+ _extract_u_from_iuvarr,
+ _extract_i_from_iuvarr,
+ inplace,
+ cmap,
+ alpha,
+ val_scale=1.0,
+ **kwargs,
+ )
+
+
+class DensePoseResultsVVisualizer(DensePoseMaskedColormapResultsVisualizer):
+ def __init__(self, inplace=True, cmap=cv2.COLORMAP_PARULA, alpha=0.7, **kwargs):
+ super(DensePoseResultsVVisualizer, self).__init__(
+ _extract_v_from_iuvarr,
+ _extract_i_from_iuvarr,
+ inplace,
+ cmap,
+ alpha,
+ val_scale=1.0,
+ **kwargs,
+ )
diff --git a/densepose/vis/densepose_results_textures.py b/densepose/vis/densepose_results_textures.py
new file mode 100644
index 0000000000000000000000000000000000000000..8b02f2bdbaa8bb1b70bc0f690a568ac4f8f1c91a
--- /dev/null
+++ b/densepose/vis/densepose_results_textures.py
@@ -0,0 +1,91 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import numpy as np
+from typing import List, Optional, Tuple
+import torch
+
+from detectron2.data.detection_utils import read_image
+
+from ..structures import DensePoseChartResult
+from .base import Boxes, Image
+from .densepose_results import DensePoseResultsVisualizer
+
+
+def get_texture_atlas(path: Optional[str]) -> Optional[np.ndarray]:
+ if path is None:
+ return None
+
+ # Reading images like that downsamples 16-bit images to 8-bit
+ # If 16-bit images are needed, we can replace that by cv2.imread with the
+ # cv2.IMREAD_UNCHANGED flag (with cv2 we also need it to keep alpha channels)
+ # The rest of the pipeline would need to be adapted to 16-bit images too
+ bgr_image = read_image(path)
+ rgb_image = np.copy(bgr_image) # Convert BGR -> RGB
+ rgb_image[:, :, :3] = rgb_image[:, :, 2::-1] # Works with alpha channel
+ return rgb_image
+
+
+class DensePoseResultsVisualizerWithTexture(DensePoseResultsVisualizer):
+ """
+ texture_atlas: An image, size 6N * 4N, with N * N squares for each of the 24 body parts.
+ It must follow the grid found at https://github.com/facebookresearch/DensePose/blob/master/DensePoseData/demo_data/texture_atlas_200.png # noqa
+ For each body part, U is proportional to the x coordinate, and (1 - V) to y
+ """
+
+ def __init__(self, texture_atlas, **kwargs):
+ self.texture_atlas = texture_atlas
+ self.body_part_size = texture_atlas.shape[0] // 6
+ assert self.body_part_size == texture_atlas.shape[1] // 4
+
+ def visualize(
+ self,
+ image_bgr: Image,
+ results_and_boxes_xywh: Tuple[Optional[List[DensePoseChartResult]], Optional[Boxes]],
+ ) -> Image:
+ densepose_result, boxes_xywh = results_and_boxes_xywh
+ if densepose_result is None or boxes_xywh is None:
+ return image_bgr
+
+ boxes_xywh = boxes_xywh.int().cpu().numpy()
+ texture_image, alpha = self.get_texture()
+ for i, result in enumerate(densepose_result):
+ iuv_array = torch.cat((result.labels[None], result.uv.clamp(0, 1)))
+ x, y, w, h = boxes_xywh[i]
+ bbox_image = image_bgr[y : y + h, x : x + w]
+ image_bgr[y : y + h, x : x + w] = self.generate_image_with_texture(
+ texture_image, alpha, bbox_image, iuv_array.cpu().numpy()
+ )
+ return image_bgr
+
+ def get_texture(self):
+ N = self.body_part_size
+ texture_image = np.zeros([24, N, N, self.texture_atlas.shape[-1]])
+ for i in range(4):
+ for j in range(6):
+ texture_image[(6 * i + j), :, :, :] = self.texture_atlas[
+ N * j : N * (j + 1), N * i : N * (i + 1), :
+ ]
+
+ if texture_image.shape[-1] == 4: # Image with alpha channel
+ alpha = texture_image[:, :, :, -1] / 255.0
+ texture_image = texture_image[:, :, :, :3]
+ else:
+ alpha = texture_image.sum(axis=-1) > 0
+
+ return texture_image, alpha
+
+ def generate_image_with_texture(self, texture_image, alpha, bbox_image_bgr, iuv_array):
+
+ I, U, V = iuv_array
+ generated_image_bgr = bbox_image_bgr.copy()
+
+ for PartInd in range(1, 25):
+ x, y = np.where(I == PartInd)
+ x_index = (U[x, y] * (self.body_part_size - 1)).astype(int)
+ y_index = ((1 - V[x, y]) * (self.body_part_size - 1)).astype(int)
+ part_alpha = np.expand_dims(alpha[PartInd - 1, y_index, x_index], -1)
+ generated_image_bgr[I == PartInd] = (
+ generated_image_bgr[I == PartInd] * (1 - part_alpha)
+ + texture_image[PartInd - 1, y_index, x_index] * part_alpha
+ )
+
+ return generated_image_bgr.astype(np.uint8)
diff --git a/densepose/vis/extractor.py b/densepose/vis/extractor.py
new file mode 100644
index 0000000000000000000000000000000000000000..543efac9272746b8cc130534d5d09251404c8d97
--- /dev/null
+++ b/densepose/vis/extractor.py
@@ -0,0 +1,199 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+from typing import List, Optional, Sequence, Tuple
+import torch
+
+from detectron2.layers.nms import batched_nms
+from detectron2.structures.instances import Instances
+
+from densepose.converters import ToChartResultConverterWithConfidences
+from densepose.structures import (
+ DensePoseChartResultWithConfidences,
+ DensePoseEmbeddingPredictorOutput,
+)
+from densepose.vis.bounding_box import BoundingBoxVisualizer, ScoredBoundingBoxVisualizer
+from densepose.vis.densepose_outputs_vertex import DensePoseOutputsVertexVisualizer
+from densepose.vis.densepose_results import DensePoseResultsVisualizer
+
+from .base import CompoundVisualizer
+
+Scores = Sequence[float]
+DensePoseChartResultsWithConfidences = List[DensePoseChartResultWithConfidences]
+
+
+def extract_scores_from_instances(instances: Instances, select=None):
+ if instances.has("scores"):
+ return instances.scores if select is None else instances.scores[select]
+ return None
+
+
+def extract_boxes_xywh_from_instances(instances: Instances, select=None):
+ if instances.has("pred_boxes"):
+ boxes_xywh = instances.pred_boxes.tensor.clone()
+ boxes_xywh[:, 2] -= boxes_xywh[:, 0]
+ boxes_xywh[:, 3] -= boxes_xywh[:, 1]
+ return boxes_xywh if select is None else boxes_xywh[select]
+ return None
+
+
+def create_extractor(visualizer: object):
+ """
+ Create an extractor for the provided visualizer
+ """
+ if isinstance(visualizer, CompoundVisualizer):
+ extractors = [create_extractor(v) for v in visualizer.visualizers]
+ return CompoundExtractor(extractors)
+ elif isinstance(visualizer, DensePoseResultsVisualizer):
+ return DensePoseResultExtractor()
+ elif isinstance(visualizer, ScoredBoundingBoxVisualizer):
+ return CompoundExtractor([extract_boxes_xywh_from_instances, extract_scores_from_instances])
+ elif isinstance(visualizer, BoundingBoxVisualizer):
+ return extract_boxes_xywh_from_instances
+ elif isinstance(visualizer, DensePoseOutputsVertexVisualizer):
+ return DensePoseOutputsExtractor()
+ else:
+ logger = logging.getLogger(__name__)
+ logger.error(f"Could not create extractor for {visualizer}")
+ return None
+
+
+class BoundingBoxExtractor:
+ """
+ Extracts bounding boxes from instances
+ """
+
+ def __call__(self, instances: Instances):
+ boxes_xywh = extract_boxes_xywh_from_instances(instances)
+ return boxes_xywh
+
+
+class ScoredBoundingBoxExtractor:
+ """
+ Extracts bounding boxes from instances
+ """
+
+ def __call__(self, instances: Instances, select=None):
+ scores = extract_scores_from_instances(instances)
+ boxes_xywh = extract_boxes_xywh_from_instances(instances)
+ if (scores is None) or (boxes_xywh is None):
+ return (boxes_xywh, scores)
+ if select is not None:
+ scores = scores[select]
+ boxes_xywh = boxes_xywh[select]
+ return (boxes_xywh, scores)
+
+
+class DensePoseResultExtractor:
+ """
+ Extracts DensePose chart result with confidences from instances
+ """
+
+ def __call__(
+ self, instances: Instances, select=None
+ ) -> Tuple[Optional[DensePoseChartResultsWithConfidences], Optional[torch.Tensor]]:
+ if instances.has("pred_densepose") and instances.has("pred_boxes"):
+ dpout = instances.pred_densepose
+ boxes_xyxy = instances.pred_boxes
+ boxes_xywh = extract_boxes_xywh_from_instances(instances)
+ if select is not None:
+ dpout = dpout[select]
+ boxes_xyxy = boxes_xyxy[select]
+ converter = ToChartResultConverterWithConfidences()
+ results = [converter.convert(dpout[i], boxes_xyxy[[i]]) for i in range(len(dpout))]
+ return results, boxes_xywh
+ else:
+ return None, None
+
+
+class DensePoseOutputsExtractor:
+ """
+ Extracts DensePose result from instances
+ """
+
+ def __call__(
+ self,
+ instances: Instances,
+ select=None,
+ ) -> Tuple[
+ Optional[DensePoseEmbeddingPredictorOutput], Optional[torch.Tensor], Optional[List[int]]
+ ]:
+ if not (instances.has("pred_densepose") and instances.has("pred_boxes")):
+ return None, None, None
+
+ dpout = instances.pred_densepose
+ boxes_xyxy = instances.pred_boxes
+ boxes_xywh = extract_boxes_xywh_from_instances(instances)
+
+ if instances.has("pred_classes"):
+ classes = instances.pred_classes.tolist()
+ else:
+ classes = None
+
+ if select is not None:
+ dpout = dpout[select]
+ boxes_xyxy = boxes_xyxy[select]
+ if classes is not None:
+ classes = classes[select]
+
+ return dpout, boxes_xywh, classes
+
+
+class CompoundExtractor:
+ """
+ Extracts data for CompoundVisualizer
+ """
+
+ def __init__(self, extractors):
+ self.extractors = extractors
+
+ def __call__(self, instances: Instances, select=None):
+ datas = []
+ for extractor in self.extractors:
+ data = extractor(instances, select)
+ datas.append(data)
+ return datas
+
+
+class NmsFilteredExtractor:
+ """
+ Extracts data in the format accepted by NmsFilteredVisualizer
+ """
+
+ def __init__(self, extractor, iou_threshold):
+ self.extractor = extractor
+ self.iou_threshold = iou_threshold
+
+ def __call__(self, instances: Instances, select=None):
+ scores = extract_scores_from_instances(instances)
+ boxes_xywh = extract_boxes_xywh_from_instances(instances)
+ if boxes_xywh is None:
+ return None
+ select_local_idx = batched_nms(
+ boxes_xywh,
+ scores,
+ torch.zeros(len(scores), dtype=torch.int32),
+ iou_threshold=self.iou_threshold,
+ ).squeeze()
+ select_local = torch.zeros(len(boxes_xywh), dtype=torch.bool, device=boxes_xywh.device)
+ select_local[select_local_idx] = True
+ select = select_local if select is None else (select & select_local)
+ return self.extractor(instances, select=select)
+
+
+class ScoreThresholdedExtractor:
+ """
+ Extracts data in the format accepted by ScoreThresholdedVisualizer
+ """
+
+ def __init__(self, extractor, min_score):
+ self.extractor = extractor
+ self.min_score = min_score
+
+ def __call__(self, instances: Instances, select=None):
+ scores = extract_scores_from_instances(instances)
+ if scores is None:
+ return None
+ select_local = scores > self.min_score
+ select = select_local if select is None else (select & select_local)
+ data = self.extractor(instances, select=select)
+ return data
diff --git a/detectron2/__init__.py b/detectron2/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bdd994b49294485c27610772f97f177741f5518f
--- /dev/null
+++ b/detectron2/__init__.py
@@ -0,0 +1,10 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .utils.env import setup_environment
+
+setup_environment()
+
+
+# This line will be programatically read/write by setup.py.
+# Leave them at the bottom of this file and don't touch them.
+__version__ = "0.6"
diff --git a/detectron2/checkpoint/__init__.py b/detectron2/checkpoint/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..99da0469ae7e169d8970e4b642fed3f870076860
--- /dev/null
+++ b/detectron2/checkpoint/__init__.py
@@ -0,0 +1,10 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+# File:
+
+
+from . import catalog as _UNUSED # register the handler
+from .detection_checkpoint import DetectionCheckpointer
+from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
+
+__all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
diff --git a/detectron2/checkpoint/c2_model_loading.py b/detectron2/checkpoint/c2_model_loading.py
new file mode 100644
index 0000000000000000000000000000000000000000..0cf8b77a93f76a3a6149f121222918acbb9e3994
--- /dev/null
+++ b/detectron2/checkpoint/c2_model_loading.py
@@ -0,0 +1,407 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import re
+from typing import Dict, List
+import torch
+from tabulate import tabulate
+
+
+def convert_basic_c2_names(original_keys):
+ """
+ Apply some basic name conversion to names in C2 weights.
+ It only deals with typical backbone models.
+
+ Args:
+ original_keys (list[str]):
+ Returns:
+ list[str]: The same number of strings matching those in original_keys.
+ """
+ layer_keys = copy.deepcopy(original_keys)
+ layer_keys = [
+ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
+ ] # some hard-coded mappings
+
+ layer_keys = [k.replace("_", ".") for k in layer_keys]
+ layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
+ layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
+ # Uniform both bn and gn names to "norm"
+ layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
+ layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
+ layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
+ layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
+
+ # stem
+ layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
+ # to avoid mis-matching with "conv1" in other components (e.g. detection head)
+ layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
+
+ # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
+ # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
+ # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
+ # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
+ # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
+
+ # blocks
+ layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
+ layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
+ layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
+ layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
+
+ # DensePose substitutions
+ layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
+ layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
+ layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
+ layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
+ layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
+ return layer_keys
+
+
+def convert_c2_detectron_names(weights):
+ """
+ Map Caffe2 Detectron weight names to Detectron2 names.
+
+ Args:
+ weights (dict): name -> tensor
+
+ Returns:
+ dict: detectron2 names -> tensor
+ dict: detectron2 names -> C2 names
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Renaming Caffe2 weights ......")
+ original_keys = sorted(weights.keys())
+ layer_keys = copy.deepcopy(original_keys)
+
+ layer_keys = convert_basic_c2_names(layer_keys)
+
+ # --------------------------------------------------------------------------
+ # RPN hidden representation conv
+ # --------------------------------------------------------------------------
+ # FPN case
+ # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
+ # shared for all other levels, hence the appearance of "fpn2"
+ layer_keys = [
+ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
+ ]
+ # Non-FPN case
+ layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # RPN box transformation conv
+ # --------------------------------------------------------------------------
+ # FPN case (see note above about "fpn2")
+ layer_keys = [
+ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
+ for k in layer_keys
+ ]
+ layer_keys = [
+ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
+ for k in layer_keys
+ ]
+ # Non-FPN case
+ layer_keys = [
+ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
+ ]
+ layer_keys = [
+ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
+ for k in layer_keys
+ ]
+
+ # --------------------------------------------------------------------------
+ # Fast R-CNN box head
+ # --------------------------------------------------------------------------
+ layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
+ layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
+ layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
+ layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
+ # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
+ layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # FPN lateral and output convolutions
+ # --------------------------------------------------------------------------
+ def fpn_map(name):
+ """
+ Look for keys with the following patterns:
+ 1) Starts with "fpn.inner."
+ Example: "fpn.inner.res2.2.sum.lateral.weight"
+ Meaning: These are lateral pathway convolutions
+ 2) Starts with "fpn.res"
+ Example: "fpn.res2.2.sum.weight"
+ Meaning: These are FPN output convolutions
+ """
+ splits = name.split(".")
+ norm = ".norm" if "norm" in splits else ""
+ if name.startswith("fpn.inner."):
+ # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
+ stage = int(splits[2][len("res") :])
+ return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
+ elif name.startswith("fpn.res"):
+ # splits example: ['fpn', 'res2', '2', 'sum', 'weight']
+ stage = int(splits[1][len("res") :])
+ return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
+ return name
+
+ layer_keys = [fpn_map(k) for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # Mask R-CNN mask head
+ # --------------------------------------------------------------------------
+ # roi_heads.StandardROIHeads case
+ layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
+ layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
+ layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
+ # roi_heads.Res5ROIHeads case
+ layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # Keypoint R-CNN head
+ # --------------------------------------------------------------------------
+ # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
+ layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
+ layer_keys = [
+ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
+ ]
+ layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
+
+ # --------------------------------------------------------------------------
+ # Done with replacements
+ # --------------------------------------------------------------------------
+ assert len(set(layer_keys)) == len(layer_keys)
+ assert len(original_keys) == len(layer_keys)
+
+ new_weights = {}
+ new_keys_to_original_keys = {}
+ for orig, renamed in zip(original_keys, layer_keys):
+ new_keys_to_original_keys[renamed] = orig
+ if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
+ # remove the meaningless prediction weight for background class
+ new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
+ new_weights[renamed] = weights[orig][new_start_idx:]
+ logger.info(
+ "Remove prediction weight for background class in {}. The shape changes from "
+ "{} to {}.".format(
+ renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
+ )
+ )
+ elif renamed.startswith("cls_score."):
+ # move weights of bg class from original index 0 to last index
+ logger.info(
+ "Move classification weights for background class in {} from index 0 to "
+ "index {}.".format(renamed, weights[orig].shape[0] - 1)
+ )
+ new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
+ else:
+ new_weights[renamed] = weights[orig]
+
+ return new_weights, new_keys_to_original_keys
+
+
+# Note the current matching is not symmetric.
+# it assumes model_state_dict will have longer names.
+def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
+ """
+ Match names between the two state-dict, and returns a new chkpt_state_dict with names
+ converted to match model_state_dict with heuristics. The returned dict can be later
+ loaded with fvcore checkpointer.
+ If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
+ model and will be renamed at first.
+
+ Strategy: suppose that the models that we will create will have prefixes appended
+ to each of its keys, for example due to an extra level of nesting that the original
+ pre-trained weights from ImageNet won't contain. For example, model.state_dict()
+ might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
+ res2.conv1.weight. We thus want to match both parameters together.
+ For that, we look for each model weight, look among all loaded keys if there is one
+ that is a suffix of the current weight name, and use it if that's the case.
+ If multiple matches exist, take the one with longest size
+ of the corresponding name. For example, for the same model as before, the pretrained
+ weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
+ we want to match backbone[0].body.conv1.weight to conv1.weight, and
+ backbone[0].body.res2.conv1.weight to res2.conv1.weight.
+ """
+ model_keys = sorted(model_state_dict.keys())
+ if c2_conversion:
+ ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
+ # original_keys: the name in the original dict (before renaming)
+ else:
+ original_keys = {x: x for x in ckpt_state_dict.keys()}
+ ckpt_keys = sorted(ckpt_state_dict.keys())
+
+ def match(a, b):
+ # Matched ckpt_key should be a complete (starts with '.') suffix.
+ # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
+ # but matches whatever_conv1 or mesh_head.whatever_conv1.
+ return a == b or a.endswith("." + b)
+
+ # get a matrix of string matches, where each (i, j) entry correspond to the size of the
+ # ckpt_key string, if it matches
+ match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
+ match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
+ # use the matched one with longest size in case of multiple matches
+ max_match_size, idxs = match_matrix.max(1)
+ # remove indices that correspond to no-match
+ idxs[max_match_size == 0] = -1
+
+ logger = logging.getLogger(__name__)
+ # matched_pairs (matched checkpoint key --> matched model key)
+ matched_keys = {}
+ result_state_dict = {}
+ for idx_model, idx_ckpt in enumerate(idxs.tolist()):
+ if idx_ckpt == -1:
+ continue
+ key_model = model_keys[idx_model]
+ key_ckpt = ckpt_keys[idx_ckpt]
+ value_ckpt = ckpt_state_dict[key_ckpt]
+ shape_in_model = model_state_dict[key_model].shape
+
+ if shape_in_model != value_ckpt.shape:
+ logger.warning(
+ "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
+ key_ckpt, value_ckpt.shape, key_model, shape_in_model
+ )
+ )
+ logger.warning(
+ "{} will not be loaded. Please double check and see if this is desired.".format(
+ key_ckpt
+ )
+ )
+ continue
+
+ assert key_model not in result_state_dict
+ result_state_dict[key_model] = value_ckpt
+ if key_ckpt in matched_keys: # already added to matched_keys
+ logger.error(
+ "Ambiguity found for {} in checkpoint!"
+ "It matches at least two keys in the model ({} and {}).".format(
+ key_ckpt, key_model, matched_keys[key_ckpt]
+ )
+ )
+ raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
+
+ matched_keys[key_ckpt] = key_model
+
+ # logging:
+ matched_model_keys = sorted(matched_keys.values())
+ if len(matched_model_keys) == 0:
+ logger.warning("No weights in checkpoint matched with model.")
+ return ckpt_state_dict
+ common_prefix = _longest_common_prefix(matched_model_keys)
+ rev_matched_keys = {v: k for k, v in matched_keys.items()}
+ original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
+
+ model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
+ table = []
+ memo = set()
+ for key_model in matched_model_keys:
+ if key_model in memo:
+ continue
+ if key_model in model_key_groups:
+ group = model_key_groups[key_model]
+ memo |= set(group)
+ shapes = [tuple(model_state_dict[k].shape) for k in group]
+ table.append(
+ (
+ _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
+ _group_str([original_keys[k] for k in group]),
+ " ".join([str(x).replace(" ", "") for x in shapes]),
+ )
+ )
+ else:
+ key_checkpoint = original_keys[key_model]
+ shape = str(tuple(model_state_dict[key_model].shape))
+ table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
+ submodule_str = common_prefix[:-1] if common_prefix else "model"
+ logger.info(
+ f"Following weights matched with submodule {submodule_str} - Total num: {len(table)}"
+ )
+
+ unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
+ for k in unmatched_ckpt_keys:
+ result_state_dict[k] = ckpt_state_dict[k]
+ return result_state_dict
+
+
+def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
+ """
+ Params in the same submodule are grouped together.
+
+ Args:
+ keys: names of all parameters
+ original_names: mapping from parameter name to their name in the checkpoint
+
+ Returns:
+ dict[name -> all other names in the same group]
+ """
+
+ def _submodule_name(key):
+ pos = key.rfind(".")
+ if pos < 0:
+ return None
+ prefix = key[: pos + 1]
+ return prefix
+
+ all_submodules = [_submodule_name(k) for k in keys]
+ all_submodules = [x for x in all_submodules if x]
+ all_submodules = sorted(all_submodules, key=len)
+
+ ret = {}
+ for prefix in all_submodules:
+ group = [k for k in keys if k.startswith(prefix)]
+ if len(group) <= 1:
+ continue
+ original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
+ if len(original_name_lcp) == 0:
+ # don't group weights if original names don't share prefix
+ continue
+
+ for k in group:
+ if k in ret:
+ continue
+ ret[k] = group
+ return ret
+
+
+def _longest_common_prefix(names: List[str]) -> str:
+ """
+ ["abc.zfg", "abc.zef"] -> "abc."
+ """
+ names = [n.split(".") for n in names]
+ m1, m2 = min(names), max(names)
+ ret = [a for a, b in zip(m1, m2) if a == b]
+ ret = ".".join(ret) + "." if len(ret) else ""
+ return ret
+
+
+def _longest_common_prefix_str(names: List[str]) -> str:
+ m1, m2 = min(names), max(names)
+ lcp = []
+ for a, b in zip(m1, m2):
+ if a == b:
+ lcp.append(a)
+ else:
+ break
+ lcp = "".join(lcp)
+ return lcp
+
+
+def _group_str(names: List[str]) -> str:
+ """
+ Turn "common1", "common2", "common3" into "common{1,2,3}"
+ """
+ lcp = _longest_common_prefix_str(names)
+ rest = [x[len(lcp) :] for x in names]
+ rest = "{" + ",".join(rest) + "}"
+ ret = lcp + rest
+
+ # add some simplification for BN specifically
+ ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
+ ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
+ return ret
diff --git a/detectron2/checkpoint/catalog.py b/detectron2/checkpoint/catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..c954fde210ba9b8124239c989f0a97e3ffcffcfe
--- /dev/null
+++ b/detectron2/checkpoint/catalog.py
@@ -0,0 +1,115 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+
+from detectron2.utils.file_io import PathHandler, PathManager
+
+
+class ModelCatalog:
+ """
+ Store mappings from names to third-party models.
+ """
+
+ S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
+
+ # MSRA models have STRIDE_IN_1X1=True. False otherwise.
+ # NOTE: all BN models here have fused BN into an affine layer.
+ # As a result, you should only load them to a model with "FrozenBN".
+ # Loading them to a model with regular BN or SyncBN is wrong.
+ # Even when loaded to FrozenBN, it is still different from affine by an epsilon,
+ # which should be negligible for training.
+ # NOTE: all models here uses PIXEL_STD=[1,1,1]
+ # NOTE: Most of the BN models here are no longer used. We use the
+ # re-converted pre-trained models under detectron2 model zoo instead.
+ C2_IMAGENET_MODELS = {
+ "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
+ "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
+ "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
+ "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
+ "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
+ "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
+ "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
+ }
+
+ C2_DETECTRON_PATH_FORMAT = (
+ "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
+ )
+
+ C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
+ C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
+
+ # format: {model_name} -> part of the url
+ C2_DETECTRON_MODELS = {
+ "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
+ "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
+ "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
+ "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
+ "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
+ "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
+ "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
+ "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
+ "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
+ "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
+ "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
+ "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
+ "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
+ }
+
+ @staticmethod
+ def get(name):
+ if name.startswith("Caffe2Detectron/COCO"):
+ return ModelCatalog._get_c2_detectron_baseline(name)
+ if name.startswith("ImageNetPretrained/"):
+ return ModelCatalog._get_c2_imagenet_pretrained(name)
+ raise RuntimeError("model not present in the catalog: {}".format(name))
+
+ @staticmethod
+ def _get_c2_imagenet_pretrained(name):
+ prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
+ name = name[len("ImageNetPretrained/") :]
+ name = ModelCatalog.C2_IMAGENET_MODELS[name]
+ url = "/".join([prefix, name])
+ return url
+
+ @staticmethod
+ def _get_c2_detectron_baseline(name):
+ name = name[len("Caffe2Detectron/COCO/") :]
+ url = ModelCatalog.C2_DETECTRON_MODELS[name]
+ if "keypoint_rcnn" in name:
+ dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
+ else:
+ dataset = ModelCatalog.C2_DATASET_COCO
+
+ if "35998355/rpn_R-50-C4_1x" in name:
+ # this one model is somehow different from others ..
+ type = "rpn"
+ else:
+ type = "generalized_rcnn"
+
+ # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
+ url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
+ prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
+ )
+ return url
+
+
+class ModelCatalogHandler(PathHandler):
+ """
+ Resolve URL like catalog://.
+ """
+
+ PREFIX = "catalog://"
+
+ def _get_supported_prefixes(self):
+ return [self.PREFIX]
+
+ def _get_local_path(self, path, **kwargs):
+ logger = logging.getLogger(__name__)
+ catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
+ logger.info("Catalog entry {} points to {}".format(path, catalog_path))
+ return PathManager.get_local_path(catalog_path, **kwargs)
+
+ def _open(self, path, mode="r", **kwargs):
+ return PathManager.open(self._get_local_path(path), mode, **kwargs)
+
+
+PathManager.register_handler(ModelCatalogHandler())
diff --git a/detectron2/checkpoint/detection_checkpoint.py b/detectron2/checkpoint/detection_checkpoint.py
new file mode 100644
index 0000000000000000000000000000000000000000..cecb1fc2cfe46283b47096bcbcb2be3181431bf2
--- /dev/null
+++ b/detectron2/checkpoint/detection_checkpoint.py
@@ -0,0 +1,143 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import os
+import pickle
+from urllib.parse import parse_qs, urlparse
+import torch
+from fvcore.common.checkpoint import Checkpointer
+from torch.nn.parallel import DistributedDataParallel
+
+import detectron2.utils.comm as comm
+from detectron2.utils.file_io import PathManager
+
+from .c2_model_loading import align_and_update_state_dicts
+
+
+class DetectionCheckpointer(Checkpointer):
+ """
+ Same as :class:`Checkpointer`, but is able to:
+ 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
+ 2. correctly load checkpoints that are only available on the master worker
+ """
+
+ def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
+ is_main_process = comm.is_main_process()
+ super().__init__(
+ model,
+ save_dir,
+ save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
+ **checkpointables,
+ )
+ self.path_manager = PathManager
+ self._parsed_url_during_load = None
+
+ def load(self, path, *args, **kwargs):
+ assert self._parsed_url_during_load is None
+ need_sync = False
+ logger = logging.getLogger(__name__)
+ logger.info("[DetectionCheckpointer] Loading from {} ...".format(path))
+
+ if path and isinstance(self.model, DistributedDataParallel):
+ path = self.path_manager.get_local_path(path)
+ has_file = os.path.isfile(path)
+ all_has_file = comm.all_gather(has_file)
+ if not all_has_file[0]:
+ raise OSError(f"File {path} not found on main worker.")
+ if not all(all_has_file):
+ logger.warning(
+ f"Not all workers can read checkpoint {path}. "
+ "Training may fail to fully resume."
+ )
+ # TODO: broadcast the checkpoint file contents from main
+ # worker, and load from it instead.
+ need_sync = True
+ if not has_file:
+ path = None # don't load if not readable
+
+ if path:
+ parsed_url = urlparse(path)
+ self._parsed_url_during_load = parsed_url
+ path = parsed_url._replace(query="").geturl() # remove query from filename
+ path = self.path_manager.get_local_path(path)
+ ret = super().load(path, *args, **kwargs)
+
+ if need_sync:
+ logger.info("Broadcasting model states from main worker ...")
+ self.model._sync_params_and_buffers()
+ self._parsed_url_during_load = None # reset to None
+ return ret
+
+ def _load_file(self, filename):
+ if filename.endswith(".pkl"):
+ with PathManager.open(filename, "rb") as f:
+ data = pickle.load(f, encoding="latin1")
+ if "model" in data and "__author__" in data:
+ # file is in Detectron2 model zoo format
+ self.logger.info("Reading a file from '{}'".format(data["__author__"]))
+ return data
+ else:
+ # assume file is from Caffe2 / Detectron1 model zoo
+ if "blobs" in data:
+ # Detection models have "blobs", but ImageNet models don't
+ data = data["blobs"]
+ data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
+ return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
+ elif filename.endswith(".pyth"):
+ # assume file is from pycls; no one else seems to use the ".pyth" extension
+ with PathManager.open(filename, "rb") as f:
+ data = torch.load(f)
+ assert (
+ "model_state" in data
+ ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
+ model_state = {
+ k: v
+ for k, v in data["model_state"].items()
+ if not k.endswith("num_batches_tracked")
+ }
+ return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
+
+ loaded = self._torch_load(filename)
+ if "model" not in loaded:
+ loaded = {"model": loaded}
+ assert self._parsed_url_during_load is not None, "`_load_file` must be called inside `load`"
+ parsed_url = self._parsed_url_during_load
+ queries = parse_qs(parsed_url.query)
+ if queries.pop("matching_heuristics", "False") == ["True"]:
+ loaded["matching_heuristics"] = True
+ if len(queries) > 0:
+ raise ValueError(
+ f"Unsupported query remaining: f{queries}, orginal filename: {parsed_url.geturl()}"
+ )
+ return loaded
+
+ def _torch_load(self, f):
+ return super()._load_file(f)
+
+ def _load_model(self, checkpoint):
+ if checkpoint.get("matching_heuristics", False):
+ self._convert_ndarray_to_tensor(checkpoint["model"])
+ # convert weights by name-matching heuristics
+ checkpoint["model"] = align_and_update_state_dicts(
+ self.model.state_dict(),
+ checkpoint["model"],
+ c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
+ )
+ # for non-caffe2 models, use standard ways to load it
+ incompatible = super()._load_model(checkpoint)
+
+ model_buffers = dict(self.model.named_buffers(recurse=False))
+ for k in ["pixel_mean", "pixel_std"]:
+ # Ignore missing key message about pixel_mean/std.
+ # Though they may be missing in old checkpoints, they will be correctly
+ # initialized from config anyway.
+ if k in model_buffers:
+ try:
+ incompatible.missing_keys.remove(k)
+ except ValueError:
+ pass
+ for k in incompatible.unexpected_keys[:]:
+ # Ignore unexpected keys about cell anchors. They exist in old checkpoints
+ # but now they are non-persistent buffers and will not be in new checkpoints.
+ if "anchor_generator.cell_anchors" in k:
+ incompatible.unexpected_keys.remove(k)
+ return incompatible
diff --git a/detectron2/config/__init__.py b/detectron2/config/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..4e648e632d55c70f160d49630378d202fbde4e45
--- /dev/null
+++ b/detectron2/config/__init__.py
@@ -0,0 +1,24 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .compat import downgrade_config, upgrade_config
+from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable
+from .instantiate import instantiate
+from .lazy import LazyCall, LazyConfig
+
+__all__ = [
+ "CfgNode",
+ "get_cfg",
+ "global_cfg",
+ "set_global_cfg",
+ "downgrade_config",
+ "upgrade_config",
+ "configurable",
+ "instantiate",
+ "LazyCall",
+ "LazyConfig",
+]
+
+
+from detectron2.utils.env import fixup_module_metadata
+
+fixup_module_metadata(__name__, globals(), __all__)
+del fixup_module_metadata
diff --git a/detectron2/config/compat.py b/detectron2/config/compat.py
new file mode 100644
index 0000000000000000000000000000000000000000..11a08c439bf14defd880e37a938fab8a08e68eeb
--- /dev/null
+++ b/detectron2/config/compat.py
@@ -0,0 +1,229 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+"""
+Backward compatibility of configs.
+
+Instructions to bump version:
++ It's not needed to bump version if new keys are added.
+ It's only needed when backward-incompatible changes happen
+ (i.e., some existing keys disappear, or the meaning of a key changes)
++ To bump version, do the following:
+ 1. Increment _C.VERSION in defaults.py
+ 2. Add a converter in this file.
+
+ Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X,
+ and a function "downgrade" which in-place downgrades config from X to X-1
+
+ In each function, VERSION is left unchanged.
+
+ Each converter assumes that its input has the relevant keys
+ (i.e., the input is not a partial config).
+ 3. Run the tests (test_config.py) to make sure the upgrade & downgrade
+ functions are consistent.
+"""
+
+import logging
+from typing import List, Optional, Tuple
+
+from .config import CfgNode as CN
+from .defaults import _C
+
+__all__ = ["upgrade_config", "downgrade_config"]
+
+
+def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
+ """
+ Upgrade a config from its current version to a newer version.
+
+ Args:
+ cfg (CfgNode):
+ to_version (int): defaults to the latest version.
+ """
+ cfg = cfg.clone()
+ if to_version is None:
+ to_version = _C.VERSION
+
+ assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
+ cfg.VERSION, to_version
+ )
+ for k in range(cfg.VERSION, to_version):
+ converter = globals()["ConverterV" + str(k + 1)]
+ converter.upgrade(cfg)
+ cfg.VERSION = k + 1
+ return cfg
+
+
+def downgrade_config(cfg: CN, to_version: int) -> CN:
+ """
+ Downgrade a config from its current version to an older version.
+
+ Args:
+ cfg (CfgNode):
+ to_version (int):
+
+ Note:
+ A general downgrade of arbitrary configs is not always possible due to the
+ different functionalities in different versions.
+ The purpose of downgrade is only to recover the defaults in old versions,
+ allowing it to load an old partial yaml config.
+ Therefore, the implementation only needs to fill in the default values
+ in the old version when a general downgrade is not possible.
+ """
+ cfg = cfg.clone()
+ assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
+ cfg.VERSION, to_version
+ )
+ for k in range(cfg.VERSION, to_version, -1):
+ converter = globals()["ConverterV" + str(k)]
+ converter.downgrade(cfg)
+ cfg.VERSION = k - 1
+ return cfg
+
+
+def guess_version(cfg: CN, filename: str) -> int:
+ """
+ Guess the version of a partial config where the VERSION field is not specified.
+ Returns the version, or the latest if cannot make a guess.
+
+ This makes it easier for users to migrate.
+ """
+ logger = logging.getLogger(__name__)
+
+ def _has(name: str) -> bool:
+ cur = cfg
+ for n in name.split("."):
+ if n not in cur:
+ return False
+ cur = cur[n]
+ return True
+
+ # Most users' partial configs have "MODEL.WEIGHT", so guess on it
+ ret = None
+ if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
+ ret = 1
+
+ if ret is not None:
+ logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
+ else:
+ ret = _C.VERSION
+ logger.warning(
+ "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
+ filename, ret
+ )
+ )
+ return ret
+
+
+def _rename(cfg: CN, old: str, new: str) -> None:
+ old_keys = old.split(".")
+ new_keys = new.split(".")
+
+ def _set(key_seq: List[str], val: str) -> None:
+ cur = cfg
+ for k in key_seq[:-1]:
+ if k not in cur:
+ cur[k] = CN()
+ cur = cur[k]
+ cur[key_seq[-1]] = val
+
+ def _get(key_seq: List[str]) -> CN:
+ cur = cfg
+ for k in key_seq:
+ cur = cur[k]
+ return cur
+
+ def _del(key_seq: List[str]) -> None:
+ cur = cfg
+ for k in key_seq[:-1]:
+ cur = cur[k]
+ del cur[key_seq[-1]]
+ if len(cur) == 0 and len(key_seq) > 1:
+ _del(key_seq[:-1])
+
+ _set(new_keys, _get(old_keys))
+ _del(old_keys)
+
+
+class _RenameConverter:
+ """
+ A converter that handles simple rename.
+ """
+
+ RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
+
+ @classmethod
+ def upgrade(cls, cfg: CN) -> None:
+ for old, new in cls.RENAME:
+ _rename(cfg, old, new)
+
+ @classmethod
+ def downgrade(cls, cfg: CN) -> None:
+ for old, new in cls.RENAME[::-1]:
+ _rename(cfg, new, old)
+
+
+class ConverterV1(_RenameConverter):
+ RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
+
+
+class ConverterV2(_RenameConverter):
+ """
+ A large bulk of rename, before public release.
+ """
+
+ RENAME = [
+ ("MODEL.WEIGHT", "MODEL.WEIGHTS"),
+ ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
+ ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
+ ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
+ ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
+ (
+ "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
+ "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
+ ),
+ (
+ "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
+ "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
+ ),
+ (
+ "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
+ "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
+ ),
+ ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
+ ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
+ ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
+ ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
+ ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
+ ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
+ ("TEST.AUG_ON", "TEST.AUG.ENABLED"),
+ ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
+ ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
+ ("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
+ ]
+
+ @classmethod
+ def upgrade(cls, cfg: CN) -> None:
+ super().upgrade(cfg)
+
+ if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
+ _rename(
+ cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
+ )
+ _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
+ del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
+ del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
+ else:
+ _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
+ _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
+
+ @classmethod
+ def downgrade(cls, cfg: CN) -> None:
+ super().downgrade(cfg)
+
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
+ cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
+ cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
+ cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
diff --git a/detectron2/config/config.py b/detectron2/config/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..49a55b1bc87509e2bb24b902ae12c21d5aaeda81
--- /dev/null
+++ b/detectron2/config/config.py
@@ -0,0 +1,265 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import functools
+import inspect
+import logging
+from fvcore.common.config import CfgNode as _CfgNode
+
+from detectron2.utils.file_io import PathManager
+
+
+class CfgNode(_CfgNode):
+ """
+ The same as `fvcore.common.config.CfgNode`, but different in:
+
+ 1. Use unsafe yaml loading by default.
+ Note that this may lead to arbitrary code execution: you must not
+ load a config file from untrusted sources before manually inspecting
+ the content of the file.
+ 2. Support config versioning.
+ When attempting to merge an old config, it will convert the old config automatically.
+
+ .. automethod:: clone
+ .. automethod:: freeze
+ .. automethod:: defrost
+ .. automethod:: is_frozen
+ .. automethod:: load_yaml_with_base
+ .. automethod:: merge_from_list
+ .. automethod:: merge_from_other_cfg
+ """
+
+ @classmethod
+ def _open_cfg(cls, filename):
+ return PathManager.open(filename, "r")
+
+ # Note that the default value of allow_unsafe is changed to True
+ def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
+ """
+ Load content from the given config file and merge it into self.
+
+ Args:
+ cfg_filename: config filename
+ allow_unsafe: allow unsafe yaml syntax
+ """
+ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
+ loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
+ loaded_cfg = type(self)(loaded_cfg)
+
+ # defaults.py needs to import CfgNode
+ from .defaults import _C
+
+ latest_ver = _C.VERSION
+ assert (
+ latest_ver == self.VERSION
+ ), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
+
+ logger = logging.getLogger(__name__)
+
+ loaded_ver = loaded_cfg.get("VERSION", None)
+ if loaded_ver is None:
+ from .compat import guess_version
+
+ loaded_ver = guess_version(loaded_cfg, cfg_filename)
+ assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
+ loaded_ver, self.VERSION
+ )
+
+ if loaded_ver == self.VERSION:
+ self.merge_from_other_cfg(loaded_cfg)
+ else:
+ # compat.py needs to import CfgNode
+ from .compat import upgrade_config, downgrade_config
+
+ logger.warning(
+ "Loading an old v{} config file '{}' by automatically upgrading to v{}. "
+ "See docs/CHANGELOG.md for instructions to update your files.".format(
+ loaded_ver, cfg_filename, self.VERSION
+ )
+ )
+ # To convert, first obtain a full config at an old version
+ old_self = downgrade_config(self, to_version=loaded_ver)
+ old_self.merge_from_other_cfg(loaded_cfg)
+ new_config = upgrade_config(old_self)
+ self.clear()
+ self.update(new_config)
+
+ def dump(self, *args, **kwargs):
+ """
+ Returns:
+ str: a yaml string representation of the config
+ """
+ # to make it show up in docs
+ return super().dump(*args, **kwargs)
+
+
+global_cfg = CfgNode()
+
+
+def get_cfg() -> CfgNode:
+ """
+ Get a copy of the default config.
+
+ Returns:
+ a detectron2 CfgNode instance.
+ """
+ from .defaults import _C
+
+ return _C.clone()
+
+
+def set_global_cfg(cfg: CfgNode) -> None:
+ """
+ Let the global config point to the given cfg.
+
+ Assume that the given "cfg" has the key "KEY", after calling
+ `set_global_cfg(cfg)`, the key can be accessed by:
+ ::
+ from detectron2.config import global_cfg
+ print(global_cfg.KEY)
+
+ By using a hacky global config, you can access these configs anywhere,
+ without having to pass the config object or the values deep into the code.
+ This is a hacky feature introduced for quick prototyping / research exploration.
+ """
+ global global_cfg
+ global_cfg.clear()
+ global_cfg.update(cfg)
+
+
+def configurable(init_func=None, *, from_config=None):
+ """
+ Decorate a function or a class's __init__ method so that it can be called
+ with a :class:`CfgNode` object using a :func:`from_config` function that translates
+ :class:`CfgNode` to arguments.
+
+ Examples:
+ ::
+ # Usage 1: Decorator on __init__:
+ class A:
+ @configurable
+ def __init__(self, a, b=2, c=3):
+ pass
+
+ @classmethod
+ def from_config(cls, cfg): # 'cfg' must be the first argument
+ # Returns kwargs to be passed to __init__
+ return {"a": cfg.A, "b": cfg.B}
+
+ a1 = A(a=1, b=2) # regular construction
+ a2 = A(cfg) # construct with a cfg
+ a3 = A(cfg, b=3, c=4) # construct with extra overwrite
+
+ # Usage 2: Decorator on any function. Needs an extra from_config argument:
+ @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
+ def a_func(a, b=2, c=3):
+ pass
+
+ a1 = a_func(a=1, b=2) # regular call
+ a2 = a_func(cfg) # call with a cfg
+ a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
+
+ Args:
+ init_func (callable): a class's ``__init__`` method in usage 1. The
+ class must have a ``from_config`` classmethod which takes `cfg` as
+ the first argument.
+ from_config (callable): the from_config function in usage 2. It must take `cfg`
+ as its first argument.
+ """
+
+ if init_func is not None:
+ assert (
+ inspect.isfunction(init_func)
+ and from_config is None
+ and init_func.__name__ == "__init__"
+ ), "Incorrect use of @configurable. Check API documentation for examples."
+
+ @functools.wraps(init_func)
+ def wrapped(self, *args, **kwargs):
+ try:
+ from_config_func = type(self).from_config
+ except AttributeError as e:
+ raise AttributeError(
+ "Class with @configurable must have a 'from_config' classmethod."
+ ) from e
+ if not inspect.ismethod(from_config_func):
+ raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
+
+ if _called_with_cfg(*args, **kwargs):
+ explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
+ init_func(self, **explicit_args)
+ else:
+ init_func(self, *args, **kwargs)
+
+ return wrapped
+
+ else:
+ if from_config is None:
+ return configurable # @configurable() is made equivalent to @configurable
+ assert inspect.isfunction(
+ from_config
+ ), "from_config argument of configurable must be a function!"
+
+ def wrapper(orig_func):
+ @functools.wraps(orig_func)
+ def wrapped(*args, **kwargs):
+ if _called_with_cfg(*args, **kwargs):
+ explicit_args = _get_args_from_config(from_config, *args, **kwargs)
+ return orig_func(**explicit_args)
+ else:
+ return orig_func(*args, **kwargs)
+
+ wrapped.from_config = from_config
+ return wrapped
+
+ return wrapper
+
+
+def _get_args_from_config(from_config_func, *args, **kwargs):
+ """
+ Use `from_config` to obtain explicit arguments.
+
+ Returns:
+ dict: arguments to be used for cls.__init__
+ """
+ signature = inspect.signature(from_config_func)
+ if list(signature.parameters.keys())[0] != "cfg":
+ if inspect.isfunction(from_config_func):
+ name = from_config_func.__name__
+ else:
+ name = f"{from_config_func.__self__}.from_config"
+ raise TypeError(f"{name} must take 'cfg' as the first argument!")
+ support_var_arg = any(
+ param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
+ for param in signature.parameters.values()
+ )
+ if support_var_arg: # forward all arguments to from_config, if from_config accepts them
+ ret = from_config_func(*args, **kwargs)
+ else:
+ # forward supported arguments to from_config
+ supported_arg_names = set(signature.parameters.keys())
+ extra_kwargs = {}
+ for name in list(kwargs.keys()):
+ if name not in supported_arg_names:
+ extra_kwargs[name] = kwargs.pop(name)
+ ret = from_config_func(*args, **kwargs)
+ # forward the other arguments to __init__
+ ret.update(extra_kwargs)
+ return ret
+
+
+def _called_with_cfg(*args, **kwargs):
+ """
+ Returns:
+ bool: whether the arguments contain CfgNode and should be considered
+ forwarded to from_config.
+ """
+ from omegaconf import DictConfig
+
+ if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
+ return True
+ if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
+ return True
+ # `from_config`'s first argument is forced to be "cfg".
+ # So the above check covers all cases.
+ return False
diff --git a/detectron2/config/defaults.py b/detectron2/config/defaults.py
new file mode 100644
index 0000000000000000000000000000000000000000..bd2a5f6b2de4af2caa1f65c64ab93a5e3ac21780
--- /dev/null
+++ b/detectron2/config/defaults.py
@@ -0,0 +1,650 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .config import CfgNode as CN
+
+# NOTE: given the new config system
+# (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html),
+# we will stop adding new functionalities to default CfgNode.
+
+# -----------------------------------------------------------------------------
+# Convention about Training / Test specific parameters
+# -----------------------------------------------------------------------------
+# Whenever an argument can be either used for training or for testing, the
+# corresponding name will be post-fixed by a _TRAIN for a training parameter,
+# or _TEST for a test-specific parameter.
+# For example, the number of images during training will be
+# IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
+# IMAGES_PER_BATCH_TEST
+
+# -----------------------------------------------------------------------------
+# Config definition
+# -----------------------------------------------------------------------------
+
+_C = CN()
+
+# The version number, to upgrade from old configs to new ones if any
+# changes happen. It's recommended to keep a VERSION in your config file.
+_C.VERSION = 2
+
+_C.MODEL = CN()
+_C.MODEL.LOAD_PROPOSALS = False
+_C.MODEL.MASK_ON = False
+_C.MODEL.KEYPOINT_ON = False
+_C.MODEL.DEVICE = "cuda"
+_C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
+
+# Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file
+# to be loaded to the model. You can find available models in the model zoo.
+_C.MODEL.WEIGHTS = ""
+
+# Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).
+# To train on images of different number of channels, just set different mean & std.
+# Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
+_C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
+# When using pre-trained models in Detectron1 or any MSRA models,
+# std has been absorbed into its conv1 weights, so the std needs to be set 1.
+# Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
+_C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
+
+
+# -----------------------------------------------------------------------------
+# INPUT
+# -----------------------------------------------------------------------------
+_C.INPUT = CN()
+# By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge.
+# Please refer to ResizeShortestEdge for detailed definition.
+# Size of the smallest side of the image during training
+_C.INPUT.MIN_SIZE_TRAIN = (800,)
+# Sample size of smallest side by choice or random selection from range give by
+# INPUT.MIN_SIZE_TRAIN
+_C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
+# Maximum size of the side of the image during training
+_C.INPUT.MAX_SIZE_TRAIN = 1333
+# Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
+_C.INPUT.MIN_SIZE_TEST = 800
+# Maximum size of the side of the image during testing
+_C.INPUT.MAX_SIZE_TEST = 1333
+# Mode for flipping images used in data augmentation during training
+# choose one of ["horizontal, "vertical", "none"]
+_C.INPUT.RANDOM_FLIP = "horizontal"
+
+# `True` if cropping is used for data augmentation during training
+_C.INPUT.CROP = CN({"ENABLED": False})
+# Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation.
+_C.INPUT.CROP.TYPE = "relative_range"
+# Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
+# pixels if CROP.TYPE is "absolute"
+_C.INPUT.CROP.SIZE = [0.9, 0.9]
+
+
+# Whether the model needs RGB, YUV, HSV etc.
+# Should be one of the modes defined here, as we use PIL to read the image:
+# https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
+# with BGR being the one exception. One can set image format to BGR, we will
+# internally use RGB for conversion and flip the channels over
+_C.INPUT.FORMAT = "BGR"
+# The ground truth mask format that the model will use.
+# Mask R-CNN supports either "polygon" or "bitmask" as ground truth.
+_C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
+
+
+# -----------------------------------------------------------------------------
+# Dataset
+# -----------------------------------------------------------------------------
+_C.DATASETS = CN()
+# List of the dataset names for training. Must be registered in DatasetCatalog
+# Samples from these datasets will be merged and used as one dataset.
+_C.DATASETS.TRAIN = ()
+# List of the pre-computed proposal files for training, which must be consistent
+# with datasets listed in DATASETS.TRAIN.
+_C.DATASETS.PROPOSAL_FILES_TRAIN = ()
+# Number of top scoring precomputed proposals to keep for training
+_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
+# List of the dataset names for testing. Must be registered in DatasetCatalog
+_C.DATASETS.TEST = ()
+# List of the pre-computed proposal files for test, which must be consistent
+# with datasets listed in DATASETS.TEST.
+_C.DATASETS.PROPOSAL_FILES_TEST = ()
+# Number of top scoring precomputed proposals to keep for test
+_C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
+
+# -----------------------------------------------------------------------------
+# DataLoader
+# -----------------------------------------------------------------------------
+_C.DATALOADER = CN()
+# Number of data loading threads
+_C.DATALOADER.NUM_WORKERS = 4
+# If True, each batch should contain only images for which the aspect ratio
+# is compatible. This groups portrait images together, and landscape images
+# are not batched with portrait images.
+_C.DATALOADER.ASPECT_RATIO_GROUPING = True
+# Options: TrainingSampler, RepeatFactorTrainingSampler
+_C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
+# Repeat threshold for RepeatFactorTrainingSampler
+_C.DATALOADER.REPEAT_THRESHOLD = 0.0
+# Tf True, when working on datasets that have instance annotations, the
+# training dataloader will filter out images without associated annotations
+_C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
+
+# ---------------------------------------------------------------------------- #
+# Backbone options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.BACKBONE = CN()
+
+_C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
+# Freeze the first several stages so they are not trained.
+# There are 5 stages in ResNet. The first is a convolution, and the following
+# stages are each group of residual blocks.
+_C.MODEL.BACKBONE.FREEZE_AT = 2
+
+
+# ---------------------------------------------------------------------------- #
+# FPN options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.FPN = CN()
+# Names of the input feature maps to be used by FPN
+# They must have contiguous power of 2 strides
+# e.g., ["res2", "res3", "res4", "res5"]
+_C.MODEL.FPN.IN_FEATURES = []
+_C.MODEL.FPN.OUT_CHANNELS = 256
+
+# Options: "" (no norm), "GN"
+_C.MODEL.FPN.NORM = ""
+
+# Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
+_C.MODEL.FPN.FUSE_TYPE = "sum"
+
+
+# ---------------------------------------------------------------------------- #
+# Proposal generator options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.PROPOSAL_GENERATOR = CN()
+# Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
+_C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
+# Proposal height and width both need to be greater than MIN_SIZE
+# (a the scale used during training or inference)
+_C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
+
+
+# ---------------------------------------------------------------------------- #
+# Anchor generator options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ANCHOR_GENERATOR = CN()
+# The generator can be any name in the ANCHOR_GENERATOR registry
+_C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
+# Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
+# Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for
+# IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1.
+# When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES.
+_C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
+# Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect
+# ratios are generated by an anchor generator.
+# Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W)
+# to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
+# or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
+# for all IN_FEATURES.
+_C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
+# Anchor angles.
+# list[list[float]], the angle in degrees, for each input feature map.
+# ANGLES[i] specifies the list of angles for IN_FEATURES[i].
+_C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
+# Relative offset between the center of the first anchor and the top-left corner of the image
+# Value has to be in [0, 1). Recommend to use 0.5, which means half stride.
+# The value is not expected to affect model accuracy.
+_C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
+
+# ---------------------------------------------------------------------------- #
+# RPN options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.RPN = CN()
+_C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
+
+# Names of the input feature maps to be used by RPN
+# e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
+_C.MODEL.RPN.IN_FEATURES = ["res4"]
+# Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
+# Set to -1 or a large value, e.g. 100000, to disable pruning anchors
+_C.MODEL.RPN.BOUNDARY_THRESH = -1
+# IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
+# Minimum overlap required between an anchor and ground-truth box for the
+# (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
+# ==> positive RPN example: 1)
+# Maximum overlap allowed between an anchor and ground-truth box for the
+# (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
+# ==> negative RPN example: 0)
+# Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
+# are ignored (-1)
+_C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
+_C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
+# Number of regions per image used to train RPN
+_C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
+# Target fraction of foreground (positive) examples per RPN minibatch
+_C.MODEL.RPN.POSITIVE_FRACTION = 0.5
+# Options are: "smooth_l1", "giou", "diou", "ciou"
+_C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1"
+_C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0
+# Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
+_C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
+# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
+_C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
+_C.MODEL.RPN.LOSS_WEIGHT = 1.0
+# Number of top scoring RPN proposals to keep before applying NMS
+# When FPN is used, this is *per FPN level* (not total)
+_C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
+_C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
+# Number of top scoring RPN proposals to keep after applying NMS
+# When FPN is used, this limit is applied per level and then again to the union
+# of proposals from all levels
+# NOTE: When FPN is used, the meaning of this config is different from Detectron1.
+# It means per-batch topk in Detectron1, but per-image topk here.
+# See the "find_top_rpn_proposals" function for details.
+_C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
+_C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
+# NMS threshold used on RPN proposals
+_C.MODEL.RPN.NMS_THRESH = 0.7
+# Set this to -1 to use the same number of output channels as input channels.
+_C.MODEL.RPN.CONV_DIMS = [-1]
+
+# ---------------------------------------------------------------------------- #
+# ROI HEADS options
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_HEADS = CN()
+_C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
+# Number of foreground classes
+_C.MODEL.ROI_HEADS.NUM_CLASSES = 80
+# Names of the input feature maps to be used by ROI heads
+# Currently all heads (box, mask, ...) use the same input feature map list
+# e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
+_C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
+# IOU overlap ratios [IOU_THRESHOLD]
+# Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
+# Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
+_C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
+_C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
+# RoI minibatch size *per image* (number of regions of interest [ROIs]) during training
+# Total number of RoIs per training minibatch =
+# ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
+# E.g., a common configuration is: 512 * 16 = 8192
+_C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
+# Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
+_C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
+
+# Only used on test mode
+
+# Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
+# balance obtaining high recall with not having too many low precision
+# detections that will slow down inference post processing steps (like NMS)
+# A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
+# inference.
+_C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
+# Overlap threshold used for non-maximum suppression (suppress boxes with
+# IoU >= this threshold)
+_C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
+# If True, augment proposals with ground-truth boxes before sampling proposals to
+# train ROI heads.
+_C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
+
+# ---------------------------------------------------------------------------- #
+# Box Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_BOX_HEAD = CN()
+# C4 don't use head name option
+# Options for non-C4 models: FastRCNNConvFCHead,
+_C.MODEL.ROI_BOX_HEAD.NAME = ""
+# Options are: "smooth_l1", "giou", "diou", "ciou"
+_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1"
+# The final scaling coefficient on the box regression loss, used to balance the magnitude of its
+# gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`.
+_C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0
+# Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
+# These are empirically chosen to approximately lead to unit variance targets
+_C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
+# The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
+_C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
+_C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
+_C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
+# Type of pooling operation applied to the incoming feature map for each RoI
+_C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
+
+_C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
+# Hidden layer dimension for FC layers in the RoI box head
+_C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
+_C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
+# Channel dimension for Conv layers in the RoI box head
+_C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
+# Normalization method for the convolution layers.
+# Options: "" (no norm), "GN", "SyncBN".
+_C.MODEL.ROI_BOX_HEAD.NORM = ""
+# Whether to use class agnostic for bbox regression
+_C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
+# If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes.
+_C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False
+
+# Federated loss can be used to improve the training of LVIS
+_C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False
+# Sigmoid cross entrophy is used with federated loss
+_C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False
+# The power value applied to image_count when calcualting frequency weight
+_C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER = 0.5
+# Number of classes to keep in total
+_C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES = 50
+
+# ---------------------------------------------------------------------------- #
+# Cascaded Box Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
+# The number of cascade stages is implicitly defined by the length of the following two configs.
+_C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
+ (10.0, 10.0, 5.0, 5.0),
+ (20.0, 20.0, 10.0, 10.0),
+ (30.0, 30.0, 15.0, 15.0),
+)
+_C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
+
+
+# ---------------------------------------------------------------------------- #
+# Mask Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_MASK_HEAD = CN()
+_C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
+_C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
+_C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
+_C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
+_C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
+# Normalization method for the convolution layers.
+# Options: "" (no norm), "GN", "SyncBN".
+_C.MODEL.ROI_MASK_HEAD.NORM = ""
+# Whether to use class agnostic for mask prediction
+_C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
+# Type of pooling operation applied to the incoming feature map for each RoI
+_C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
+
+
+# ---------------------------------------------------------------------------- #
+# Keypoint Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.ROI_KEYPOINT_HEAD = CN()
+_C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
+_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
+_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
+_C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
+_C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
+
+# Images with too few (or no) keypoints are excluded from training.
+_C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
+# Normalize by the total number of visible keypoints in the minibatch if True.
+# Otherwise, normalize by the total number of keypoints that could ever exist
+# in the minibatch.
+# The keypoint softmax loss is only calculated on visible keypoints.
+# Since the number of visible keypoints can vary significantly between
+# minibatches, this has the effect of up-weighting the importance of
+# minibatches with few visible keypoints. (Imagine the extreme case of
+# only one visible keypoint versus N: in the case of N, each one
+# contributes 1/N to the gradient compared to the single keypoint
+# determining the gradient direction). Instead, we can normalize the
+# loss by the total number of keypoints, if it were the case that all
+# keypoints were visible in a full minibatch. (Returning to the example,
+# this means that the one visible keypoint contributes as much as each
+# of the N keypoints.)
+_C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
+# Multi-task loss weight to use for keypoints
+# Recommended values:
+# - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
+# - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
+_C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
+# Type of pooling operation applied to the incoming feature map for each RoI
+_C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
+
+# ---------------------------------------------------------------------------- #
+# Semantic Segmentation Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.SEM_SEG_HEAD = CN()
+_C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
+_C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
+# Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
+# the correposnding pixel.
+_C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
+# Number of classes in the semantic segmentation head
+_C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
+# Number of channels in the 3x3 convs inside semantic-FPN heads.
+_C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
+# Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
+_C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
+# Normalization method for the convolution layers. Options: "" (no norm), "GN".
+_C.MODEL.SEM_SEG_HEAD.NORM = "GN"
+_C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
+
+_C.MODEL.PANOPTIC_FPN = CN()
+# Scaling of all losses from instance detection / segmentation head.
+_C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
+
+# options when combining instance & semantic segmentation outputs
+_C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used
+_C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
+_C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
+_C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
+
+
+# ---------------------------------------------------------------------------- #
+# RetinaNet Head
+# ---------------------------------------------------------------------------- #
+_C.MODEL.RETINANET = CN()
+
+# This is the number of foreground classes.
+_C.MODEL.RETINANET.NUM_CLASSES = 80
+
+_C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
+
+# Convolutions to use in the cls and bbox tower
+# NOTE: this doesn't include the last conv for logits
+_C.MODEL.RETINANET.NUM_CONVS = 4
+
+# IoU overlap ratio [bg, fg] for labeling anchors.
+# Anchors with < bg are labeled negative (0)
+# Anchors with >= bg and < fg are ignored (-1)
+# Anchors with >= fg are labeled positive (1)
+_C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
+_C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
+
+# Prior prob for rare case (i.e. foreground) at the beginning of training.
+# This is used to set the bias for the logits layer of the classifier subnet.
+# This improves training stability in the case of heavy class imbalance.
+_C.MODEL.RETINANET.PRIOR_PROB = 0.01
+
+# Inference cls score threshold, only anchors with score > INFERENCE_TH are
+# considered for inference (to improve speed)
+_C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
+# Select topk candidates before NMS
+_C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
+_C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
+
+# Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
+_C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
+
+# Loss parameters
+_C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
+_C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
+_C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
+# Options are: "smooth_l1", "giou", "diou", "ciou"
+_C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
+
+# One of BN, SyncBN, FrozenBN, GN
+# Only supports GN until unshared norm is implemented
+_C.MODEL.RETINANET.NORM = ""
+
+
+# ---------------------------------------------------------------------------- #
+# ResNe[X]t options (ResNets = {ResNet, ResNeXt}
+# Note that parts of a resnet may be used for both the backbone and the head
+# These options apply to both
+# ---------------------------------------------------------------------------- #
+_C.MODEL.RESNETS = CN()
+
+_C.MODEL.RESNETS.DEPTH = 50
+_C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
+
+# Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
+_C.MODEL.RESNETS.NUM_GROUPS = 1
+
+# Options: FrozenBN, GN, "SyncBN", "BN"
+_C.MODEL.RESNETS.NORM = "FrozenBN"
+
+# Baseline width of each group.
+# Scaling this parameters will scale the width of all bottleneck layers.
+_C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
+
+# Place the stride 2 conv on the 1x1 filter
+# Use True only for the original MSRA ResNet; use False for C2 and Torch models
+_C.MODEL.RESNETS.STRIDE_IN_1X1 = True
+
+# Apply dilation in stage "res5"
+_C.MODEL.RESNETS.RES5_DILATION = 1
+
+# Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
+# For R18 and R34, this needs to be set to 64
+_C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
+_C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
+
+# Apply Deformable Convolution in stages
+# Specify if apply deform_conv on Res2, Res3, Res4, Res5
+_C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
+# Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
+# Use False for DeformableV1.
+_C.MODEL.RESNETS.DEFORM_MODULATED = False
+# Number of groups in deformable conv.
+_C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
+
+
+# ---------------------------------------------------------------------------- #
+# Solver
+# ---------------------------------------------------------------------------- #
+_C.SOLVER = CN()
+
+# Options: WarmupMultiStepLR, WarmupCosineLR.
+# See detectron2/solver/build.py for definition.
+_C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR"
+
+_C.SOLVER.MAX_ITER = 40000
+
+_C.SOLVER.BASE_LR = 0.001
+# The end lr, only used by WarmupCosineLR
+_C.SOLVER.BASE_LR_END = 0.0
+
+_C.SOLVER.MOMENTUM = 0.9
+
+_C.SOLVER.NESTEROV = False
+
+_C.SOLVER.WEIGHT_DECAY = 0.0001
+# The weight decay that's applied to parameters of normalization layers
+# (typically the affine transformation)
+_C.SOLVER.WEIGHT_DECAY_NORM = 0.0
+
+_C.SOLVER.GAMMA = 0.1
+# The iteration number to decrease learning rate by GAMMA.
+_C.SOLVER.STEPS = (30000,)
+# Number of decays in WarmupStepWithFixedGammaLR schedule
+_C.SOLVER.NUM_DECAYS = 3
+
+_C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
+_C.SOLVER.WARMUP_ITERS = 1000
+_C.SOLVER.WARMUP_METHOD = "linear"
+# Whether to rescale the interval for the learning schedule after warmup
+_C.SOLVER.RESCALE_INTERVAL = False
+
+# Save a checkpoint after every this number of iterations
+_C.SOLVER.CHECKPOINT_PERIOD = 5000
+
+# Number of images per batch across all machines. This is also the number
+# of training images per step (i.e. per iteration). If we use 16 GPUs
+# and IMS_PER_BATCH = 32, each GPU will see 2 images per batch.
+# May be adjusted automatically if REFERENCE_WORLD_SIZE is set.
+_C.SOLVER.IMS_PER_BATCH = 16
+
+# The reference number of workers (GPUs) this config is meant to train with.
+# It takes no effect when set to 0.
+# With a non-zero value, it will be used by DefaultTrainer to compute a desired
+# per-worker batch size, and then scale the other related configs (total batch size,
+# learning rate, etc) to match the per-worker batch size.
+# See documentation of `DefaultTrainer.auto_scale_workers` for details:
+_C.SOLVER.REFERENCE_WORLD_SIZE = 0
+
+# Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for
+# biases. This is not useful (at least for recent models). You should avoid
+# changing these and they exist only to reproduce Detectron v1 training if
+# desired.
+_C.SOLVER.BIAS_LR_FACTOR = 1.0
+_C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY
+
+# Gradient clipping
+_C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False})
+# Type of gradient clipping, currently 2 values are supported:
+# - "value": the absolute values of elements of each gradients are clipped
+# - "norm": the norm of the gradient for each parameter is clipped thus
+# affecting all elements in the parameter
+_C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value"
+# Maximum absolute value used for clipping gradients
+_C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0
+# Floating point number p for L-p norm to be used with the "norm"
+# gradient clipping type; for L-inf, please specify .inf
+_C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0
+
+# Enable automatic mixed precision for training
+# Note that this does not change model's inference behavior.
+# To use AMP in inference, run inference under autocast()
+_C.SOLVER.AMP = CN({"ENABLED": False})
+
+# ---------------------------------------------------------------------------- #
+# Specific test options
+# ---------------------------------------------------------------------------- #
+_C.TEST = CN()
+# For end-to-end tests to verify the expected accuracy.
+# Each item is [task, metric, value, tolerance]
+# e.g.: [['bbox', 'AP', 38.5, 0.2]]
+_C.TEST.EXPECTED_RESULTS = []
+# The period (in terms of steps) to evaluate the model during training.
+# Set to 0 to disable.
+_C.TEST.EVAL_PERIOD = 0
+# The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval
+# When empty, it will use the defaults in COCO.
+# Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
+_C.TEST.KEYPOINT_OKS_SIGMAS = []
+# Maximum number of detections to return per image during inference (100 is
+# based on the limit established for the COCO dataset).
+_C.TEST.DETECTIONS_PER_IMAGE = 100
+
+_C.TEST.AUG = CN({"ENABLED": False})
+_C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
+_C.TEST.AUG.MAX_SIZE = 4000
+_C.TEST.AUG.FLIP = True
+
+_C.TEST.PRECISE_BN = CN({"ENABLED": False})
+_C.TEST.PRECISE_BN.NUM_ITER = 200
+
+# ---------------------------------------------------------------------------- #
+# Misc options
+# ---------------------------------------------------------------------------- #
+# Directory where output files are written
+_C.OUTPUT_DIR = "./output"
+# Set seed to negative to fully randomize everything.
+# Set seed to positive to use a fixed seed. Note that a fixed seed increases
+# reproducibility but does not guarantee fully deterministic behavior.
+# Disabling all parallelism further increases reproducibility.
+_C.SEED = -1
+# Benchmark different cudnn algorithms.
+# If input images have very different sizes, this option will have large overhead
+# for about 10k iterations. It usually hurts total time, but can benefit for certain models.
+# If input images have the same or similar sizes, benchmark is often helpful.
+_C.CUDNN_BENCHMARK = False
+# The period (in terms of steps) for minibatch visualization at train time.
+# Set to 0 to disable.
+_C.VIS_PERIOD = 0
+
+# global config is for quick hack purposes.
+# You can set them in command line or config files,
+# and access it with:
+#
+# from detectron2.config import global_cfg
+# print(global_cfg.HACK)
+#
+# Do not commit any configs into it.
+_C.GLOBAL = CN()
+_C.GLOBAL.HACK = 1.0
diff --git a/detectron2/config/instantiate.py b/detectron2/config/instantiate.py
new file mode 100644
index 0000000000000000000000000000000000000000..05ee2c7d21c9bf3e56a0a8e98447d2587b4b8fed
--- /dev/null
+++ b/detectron2/config/instantiate.py
@@ -0,0 +1,88 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import collections.abc as abc
+import dataclasses
+import logging
+from typing import Any
+
+from detectron2.utils.registry import _convert_target_to_string, locate
+
+__all__ = ["dump_dataclass", "instantiate"]
+
+
+def dump_dataclass(obj: Any):
+ """
+ Dump a dataclass recursively into a dict that can be later instantiated.
+
+ Args:
+ obj: a dataclass object
+
+ Returns:
+ dict
+ """
+ assert dataclasses.is_dataclass(obj) and not isinstance(
+ obj, type
+ ), "dump_dataclass() requires an instance of a dataclass."
+ ret = {"_target_": _convert_target_to_string(type(obj))}
+ for f in dataclasses.fields(obj):
+ v = getattr(obj, f.name)
+ if dataclasses.is_dataclass(v):
+ v = dump_dataclass(v)
+ if isinstance(v, (list, tuple)):
+ v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
+ ret[f.name] = v
+ return ret
+
+
+def instantiate(cfg):
+ """
+ Recursively instantiate objects defined in dictionaries by
+ "_target_" and arguments.
+
+ Args:
+ cfg: a dict-like object with "_target_" that defines the caller, and
+ other keys that define the arguments
+
+ Returns:
+ object instantiated by cfg
+ """
+ from omegaconf import ListConfig, DictConfig, OmegaConf
+
+ if isinstance(cfg, ListConfig):
+ lst = [instantiate(x) for x in cfg]
+ return ListConfig(lst, flags={"allow_objects": True})
+ if isinstance(cfg, list):
+ # Specialize for list, because many classes take
+ # list[objects] as arguments, such as ResNet, DatasetMapper
+ return [instantiate(x) for x in cfg]
+
+ # If input is a DictConfig backed by dataclasses (i.e. omegaconf's structured config),
+ # instantiate it to the actual dataclass.
+ if isinstance(cfg, DictConfig) and dataclasses.is_dataclass(cfg._metadata.object_type):
+ return OmegaConf.to_object(cfg)
+
+ if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
+ # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
+ # but faster: https://github.com/facebookresearch/hydra/issues/1200
+ cfg = {k: instantiate(v) for k, v in cfg.items()}
+ cls = cfg.pop("_target_")
+ cls = instantiate(cls)
+
+ if isinstance(cls, str):
+ cls_name = cls
+ cls = locate(cls_name)
+ assert cls is not None, cls_name
+ else:
+ try:
+ cls_name = cls.__module__ + "." + cls.__qualname__
+ except Exception:
+ # target could be anything, so the above could fail
+ cls_name = str(cls)
+ assert callable(cls), f"_target_ {cls} does not define a callable object"
+ try:
+ return cls(**cfg)
+ except TypeError:
+ logger = logging.getLogger(__name__)
+ logger.error(f"Error when instantiating {cls_name}!")
+ raise
+ return cfg # return as-is if don't know what to do
diff --git a/detectron2/config/lazy.py b/detectron2/config/lazy.py
new file mode 100644
index 0000000000000000000000000000000000000000..ea93e865acce31de07af476f95454d62128a9d1c
--- /dev/null
+++ b/detectron2/config/lazy.py
@@ -0,0 +1,436 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import ast
+import builtins
+import collections.abc as abc
+import importlib
+import inspect
+import logging
+import os
+import uuid
+from contextlib import contextmanager
+from copy import deepcopy
+from dataclasses import is_dataclass
+from typing import List, Tuple, Union
+import cloudpickle
+import yaml
+from omegaconf import DictConfig, ListConfig, OmegaConf, SCMode
+
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.registry import _convert_target_to_string
+
+__all__ = ["LazyCall", "LazyConfig"]
+
+
+class LazyCall:
+ """
+ Wrap a callable so that when it's called, the call will not be executed,
+ but returns a dict that describes the call.
+
+ LazyCall object has to be called with only keyword arguments. Positional
+ arguments are not yet supported.
+
+ Examples:
+ ::
+ from detectron2.config import instantiate, LazyCall
+
+ layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32)
+ layer_cfg.out_channels = 64 # can edit it afterwards
+ layer = instantiate(layer_cfg)
+ """
+
+ def __init__(self, target):
+ if not (callable(target) or isinstance(target, (str, abc.Mapping))):
+ raise TypeError(
+ f"target of LazyCall must be a callable or defines a callable! Got {target}"
+ )
+ self._target = target
+
+ def __call__(self, **kwargs):
+ if is_dataclass(self._target):
+ # omegaconf object cannot hold dataclass type
+ # https://github.com/omry/omegaconf/issues/784
+ target = _convert_target_to_string(self._target)
+ else:
+ target = self._target
+ kwargs["_target_"] = target
+
+ return DictConfig(content=kwargs, flags={"allow_objects": True})
+
+
+def _visit_dict_config(cfg, func):
+ """
+ Apply func recursively to all DictConfig in cfg.
+ """
+ if isinstance(cfg, DictConfig):
+ func(cfg)
+ for v in cfg.values():
+ _visit_dict_config(v, func)
+ elif isinstance(cfg, ListConfig):
+ for v in cfg:
+ _visit_dict_config(v, func)
+
+
+def _validate_py_syntax(filename):
+ # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py
+ with PathManager.open(filename, "r") as f:
+ content = f.read()
+ try:
+ ast.parse(content)
+ except SyntaxError as e:
+ raise SyntaxError(f"Config file {filename} has syntax error!") from e
+
+
+def _cast_to_config(obj):
+ # if given a dict, return DictConfig instead
+ if isinstance(obj, dict):
+ return DictConfig(obj, flags={"allow_objects": True})
+ return obj
+
+
+_CFG_PACKAGE_NAME = "detectron2._cfg_loader"
+"""
+A namespace to put all imported config into.
+"""
+
+
+def _random_package_name(filename):
+ # generate a random package name when loading config files
+ return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename)
+
+
+@contextmanager
+def _patch_import():
+ """
+ Enhance relative import statements in config files, so that they:
+ 1. locate files purely based on relative location, regardless of packages.
+ e.g. you can import file without having __init__
+ 2. do not cache modules globally; modifications of module states has no side effect
+ 3. support other storage system through PathManager, so config files can be in the cloud
+ 4. imported dict are turned into omegaconf.DictConfig automatically
+ """
+ old_import = builtins.__import__
+
+ def find_relative_file(original_file, relative_import_path, level):
+ # NOTE: "from . import x" is not handled. Because then it's unclear
+ # if such import should produce `x` as a python module or DictConfig.
+ # This can be discussed further if needed.
+ relative_import_err = """
+Relative import of directories is not allowed within config files.
+Within a config file, relative import can only import other config files.
+""".replace(
+ "\n", " "
+ )
+ if not len(relative_import_path):
+ raise ImportError(relative_import_err)
+
+ cur_file = os.path.dirname(original_file)
+ for _ in range(level - 1):
+ cur_file = os.path.dirname(cur_file)
+ cur_name = relative_import_path.lstrip(".")
+ for part in cur_name.split("."):
+ cur_file = os.path.join(cur_file, part)
+ if not cur_file.endswith(".py"):
+ cur_file += ".py"
+ if not PathManager.isfile(cur_file):
+ cur_file_no_suffix = cur_file[: -len(".py")]
+ if PathManager.isdir(cur_file_no_suffix):
+ raise ImportError(f"Cannot import from {cur_file_no_suffix}." + relative_import_err)
+ else:
+ raise ImportError(
+ f"Cannot import name {relative_import_path} from "
+ f"{original_file}: {cur_file} does not exist."
+ )
+ return cur_file
+
+ def new_import(name, globals=None, locals=None, fromlist=(), level=0):
+ if (
+ # Only deal with relative imports inside config files
+ level != 0
+ and globals is not None
+ and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME)
+ ):
+ cur_file = find_relative_file(globals["__file__"], name, level)
+ _validate_py_syntax(cur_file)
+ spec = importlib.machinery.ModuleSpec(
+ _random_package_name(cur_file), None, origin=cur_file
+ )
+ module = importlib.util.module_from_spec(spec)
+ module.__file__ = cur_file
+ with PathManager.open(cur_file) as f:
+ content = f.read()
+ exec(compile(content, cur_file, "exec"), module.__dict__)
+ for name in fromlist: # turn imported dict into DictConfig automatically
+ val = _cast_to_config(module.__dict__[name])
+ module.__dict__[name] = val
+ return module
+ return old_import(name, globals, locals, fromlist=fromlist, level=level)
+
+ builtins.__import__ = new_import
+ yield new_import
+ builtins.__import__ = old_import
+
+
+class LazyConfig:
+ """
+ Provide methods to save, load, and overrides an omegaconf config object
+ which may contain definition of lazily-constructed objects.
+ """
+
+ @staticmethod
+ def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
+ """
+ Similar to :meth:`load()`, but load path relative to the caller's
+ source file.
+
+ This has the same functionality as a relative import, except that this method
+ accepts filename as a string, so more characters are allowed in the filename.
+ """
+ caller_frame = inspect.stack()[1]
+ caller_fname = caller_frame[0].f_code.co_filename
+ assert caller_fname != "", "load_rel Unable to find caller"
+ caller_dir = os.path.dirname(caller_fname)
+ filename = os.path.join(caller_dir, filename)
+ return LazyConfig.load(filename, keys)
+
+ @staticmethod
+ def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
+ """
+ Load a config file.
+
+ Args:
+ filename: absolute path or relative path w.r.t. the current working directory
+ keys: keys to load and return. If not given, return all keys
+ (whose values are config objects) in a dict.
+ """
+ has_keys = keys is not None
+ filename = filename.replace("/./", "/") # redundant
+ if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]:
+ raise ValueError(f"Config file {filename} has to be a python or yaml file.")
+ if filename.endswith(".py"):
+ _validate_py_syntax(filename)
+
+ with _patch_import():
+ # Record the filename
+ module_namespace = {
+ "__file__": filename,
+ "__package__": _random_package_name(filename),
+ }
+ with PathManager.open(filename) as f:
+ content = f.read()
+ # Compile first with filename to:
+ # 1. make filename appears in stacktrace
+ # 2. make load_rel able to find its parent's (possibly remote) location
+ exec(compile(content, filename, "exec"), module_namespace)
+
+ ret = module_namespace
+ else:
+ with PathManager.open(filename) as f:
+ obj = yaml.unsafe_load(f)
+ ret = OmegaConf.create(obj, flags={"allow_objects": True})
+
+ if has_keys:
+ if isinstance(keys, str):
+ return _cast_to_config(ret[keys])
+ else:
+ return tuple(_cast_to_config(ret[a]) for a in keys)
+ else:
+ if filename.endswith(".py"):
+ # when not specified, only load those that are config objects
+ ret = DictConfig(
+ {
+ name: _cast_to_config(value)
+ for name, value in ret.items()
+ if isinstance(value, (DictConfig, ListConfig, dict))
+ and not name.startswith("_")
+ },
+ flags={"allow_objects": True},
+ )
+ return ret
+
+ @staticmethod
+ def save(cfg, filename: str):
+ """
+ Save a config object to a yaml file.
+ Note that when the config dictionary contains complex objects (e.g. lambda),
+ it can't be saved to yaml. In that case we will print an error and
+ attempt to save to a pkl file instead.
+
+ Args:
+ cfg: an omegaconf config object
+ filename: yaml file name to save the config file
+ """
+ logger = logging.getLogger(__name__)
+ try:
+ cfg = deepcopy(cfg)
+ except Exception:
+ pass
+ else:
+ # if it's deep-copyable, then...
+ def _replace_type_by_name(x):
+ if "_target_" in x and callable(x._target_):
+ try:
+ x._target_ = _convert_target_to_string(x._target_)
+ except AttributeError:
+ pass
+
+ # not necessary, but makes yaml looks nicer
+ _visit_dict_config(cfg, _replace_type_by_name)
+
+ save_pkl = False
+ try:
+ dict = OmegaConf.to_container(
+ cfg,
+ # Do not resolve interpolation when saving, i.e. do not turn ${a} into
+ # actual values when saving.
+ resolve=False,
+ # Save structures (dataclasses) in a format that can be instantiated later.
+ # Without this option, the type information of the dataclass will be erased.
+ structured_config_mode=SCMode.INSTANTIATE,
+ )
+ dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999)
+ with PathManager.open(filename, "w") as f:
+ f.write(dumped)
+
+ try:
+ _ = yaml.unsafe_load(dumped) # test that it is loadable
+ except Exception:
+ logger.warning(
+ "The config contains objects that cannot serialize to a valid yaml. "
+ f"{filename} is human-readable but cannot be loaded."
+ )
+ save_pkl = True
+ except Exception:
+ logger.exception("Unable to serialize the config to yaml. Error:")
+ save_pkl = True
+
+ if save_pkl:
+ new_filename = filename + ".pkl"
+ try:
+ # retry by pickle
+ with PathManager.open(new_filename, "wb") as f:
+ cloudpickle.dump(cfg, f)
+ logger.warning(f"Config is saved using cloudpickle at {new_filename}.")
+ except Exception:
+ pass
+
+ @staticmethod
+ def apply_overrides(cfg, overrides: List[str]):
+ """
+ In-place override contents of cfg.
+
+ Args:
+ cfg: an omegaconf config object
+ overrides: list of strings in the format of "a=b" to override configs.
+ See https://hydra.cc/docs/next/advanced/override_grammar/basic/
+ for syntax.
+
+ Returns:
+ the cfg object
+ """
+
+ def safe_update(cfg, key, value):
+ parts = key.split(".")
+ for idx in range(1, len(parts)):
+ prefix = ".".join(parts[:idx])
+ v = OmegaConf.select(cfg, prefix, default=None)
+ if v is None:
+ break
+ if not OmegaConf.is_config(v):
+ raise KeyError(
+ f"Trying to update key {key}, but {prefix} "
+ f"is not a config, but has type {type(v)}."
+ )
+ OmegaConf.update(cfg, key, value, merge=True)
+
+ try:
+ from hydra.core.override_parser.overrides_parser import OverridesParser
+
+ has_hydra = True
+ except ImportError:
+ has_hydra = False
+
+ if has_hydra:
+ parser = OverridesParser.create()
+ overrides = parser.parse_overrides(overrides)
+ for o in overrides:
+ key = o.key_or_group
+ value = o.value()
+ if o.is_delete():
+ # TODO support this
+ raise NotImplementedError("deletion is not yet a supported override")
+ safe_update(cfg, key, value)
+ else:
+ # Fallback. Does not support all the features and error checking like hydra.
+ for o in overrides:
+ key, value = o.split("=")
+ try:
+ value = eval(value, {})
+ except NameError:
+ pass
+ safe_update(cfg, key, value)
+ return cfg
+
+ @staticmethod
+ def to_py(cfg, prefix: str = "cfg."):
+ """
+ Try to convert a config object into Python-like psuedo code.
+
+ Note that perfect conversion is not always possible. So the returned
+ results are mainly meant to be human-readable, and not meant to be executed.
+
+ Args:
+ cfg: an omegaconf config object
+ prefix: root name for the resulting code (default: "cfg.")
+
+
+ Returns:
+ str of formatted Python code
+ """
+ import black
+
+ cfg = OmegaConf.to_container(cfg, resolve=True)
+
+ def _to_str(obj, prefix=None, inside_call=False):
+ if prefix is None:
+ prefix = []
+ if isinstance(obj, abc.Mapping) and "_target_" in obj:
+ # Dict representing a function call
+ target = _convert_target_to_string(obj.pop("_target_"))
+ args = []
+ for k, v in sorted(obj.items()):
+ args.append(f"{k}={_to_str(v, inside_call=True)}")
+ args = ", ".join(args)
+ call = f"{target}({args})"
+ return "".join(prefix) + call
+ elif isinstance(obj, abc.Mapping) and not inside_call:
+ # Dict that is not inside a call is a list of top-level config objects that we
+ # render as one object per line with dot separated prefixes
+ key_list = []
+ for k, v in sorted(obj.items()):
+ if isinstance(v, abc.Mapping) and "_target_" not in v:
+ key_list.append(_to_str(v, prefix=prefix + [k + "."]))
+ else:
+ key = "".join(prefix) + k
+ key_list.append(f"{key}={_to_str(v)}")
+ return "\n".join(key_list)
+ elif isinstance(obj, abc.Mapping):
+ # Dict that is inside a call is rendered as a regular dict
+ return (
+ "{"
+ + ",".join(
+ f"{repr(k)}: {_to_str(v, inside_call=inside_call)}"
+ for k, v in sorted(obj.items())
+ )
+ + "}"
+ )
+ elif isinstance(obj, list):
+ return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]"
+ else:
+ return repr(obj)
+
+ py_str = _to_str(cfg, prefix=[prefix])
+ try:
+ return black.format_str(py_str, mode=black.Mode())
+ except black.InvalidInput:
+ return py_str
diff --git a/detectron2/data/__init__.py b/detectron2/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..259f669b78bd05815cb8d3351fd6c5fc9a1b85a1
--- /dev/null
+++ b/detectron2/data/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from . import transforms # isort:skip
+
+from .build import (
+ build_batch_data_loader,
+ build_detection_test_loader,
+ build_detection_train_loader,
+ get_detection_dataset_dicts,
+ load_proposals_into_dataset,
+ print_instances_class_histogram,
+)
+from .catalog import DatasetCatalog, MetadataCatalog, Metadata
+from .common import DatasetFromList, MapDataset, ToIterableDataset
+from .dataset_mapper import DatasetMapper
+
+# ensure the builtin datasets are registered
+from . import datasets, samplers # isort:skip
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/detectron2/data/benchmark.py b/detectron2/data/benchmark.py
new file mode 100644
index 0000000000000000000000000000000000000000..ac2f372a4b111ad40b8e720adea208608271bab6
--- /dev/null
+++ b/detectron2/data/benchmark.py
@@ -0,0 +1,225 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import numpy as np
+from itertools import count
+from typing import List, Tuple
+import torch
+import tqdm
+from fvcore.common.timer import Timer
+
+from detectron2.utils import comm
+
+from .build import build_batch_data_loader
+from .common import DatasetFromList, MapDataset
+from .samplers import TrainingSampler
+
+logger = logging.getLogger(__name__)
+
+
+class _EmptyMapDataset(torch.utils.data.Dataset):
+ """
+ Map anything to emptiness.
+ """
+
+ def __init__(self, dataset):
+ self.ds = dataset
+
+ def __len__(self):
+ return len(self.ds)
+
+ def __getitem__(self, idx):
+ _ = self.ds[idx]
+ return [0]
+
+
+def iter_benchmark(
+ iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60
+) -> Tuple[float, List[float]]:
+ """
+ Benchmark an iterator/iterable for `num_iter` iterations with an extra
+ `warmup` iterations of warmup.
+ End early if `max_time_seconds` time is spent on iterations.
+
+ Returns:
+ float: average time (seconds) per iteration
+ list[float]: time spent on each iteration. Sometimes useful for further analysis.
+ """
+ num_iter, warmup = int(num_iter), int(warmup)
+
+ iterator = iter(iterator)
+ for _ in range(warmup):
+ next(iterator)
+ timer = Timer()
+ all_times = []
+ for curr_iter in tqdm.trange(num_iter):
+ start = timer.seconds()
+ if start > max_time_seconds:
+ num_iter = curr_iter
+ break
+ next(iterator)
+ all_times.append(timer.seconds() - start)
+ avg = timer.seconds() / num_iter
+ return avg, all_times
+
+
+class DataLoaderBenchmark:
+ """
+ Some common benchmarks that help understand perf bottleneck of a standard dataloader
+ made of dataset, mapper and sampler.
+ """
+
+ def __init__(
+ self,
+ dataset,
+ *,
+ mapper,
+ sampler=None,
+ total_batch_size,
+ num_workers=0,
+ max_time_seconds: int = 90,
+ ):
+ """
+ Args:
+ max_time_seconds (int): maximum time to spent for each benchmark
+ other args: same as in `build.py:build_detection_train_loader`
+ """
+ if isinstance(dataset, list):
+ dataset = DatasetFromList(dataset, copy=False, serialize=True)
+ if sampler is None:
+ sampler = TrainingSampler(len(dataset))
+
+ self.dataset = dataset
+ self.mapper = mapper
+ self.sampler = sampler
+ self.total_batch_size = total_batch_size
+ self.num_workers = num_workers
+ self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size()
+
+ self.max_time_seconds = max_time_seconds
+
+ def _benchmark(self, iterator, num_iter, warmup, msg=None):
+ avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds)
+ if msg is not None:
+ self._log_time(msg, avg, all_times)
+ return avg, all_times
+
+ def _log_time(self, msg, avg, all_times, distributed=False):
+ percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]]
+ if not distributed:
+ logger.info(
+ f"{msg}: avg={1.0/avg:.1f} it/s, "
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
+ )
+ return
+ avg_per_gpu = comm.all_gather(avg)
+ percentiles_per_gpu = comm.all_gather(percentiles)
+ if comm.get_rank() > 0:
+ return
+ for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu):
+ logger.info(
+ f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, "
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
+ )
+
+ def benchmark_dataset(self, num_iter, warmup=5):
+ """
+ Benchmark the speed of taking raw samples from the dataset.
+ """
+
+ def loader():
+ while True:
+ for k in self.sampler:
+ yield self.dataset[k]
+
+ self._benchmark(loader(), num_iter, warmup, "Dataset Alone")
+
+ def benchmark_mapper(self, num_iter, warmup=5):
+ """
+ Benchmark the speed of taking raw samples from the dataset and map
+ them in a single process.
+ """
+
+ def loader():
+ while True:
+ for k in self.sampler:
+ yield self.mapper(self.dataset[k])
+
+ self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)")
+
+ def benchmark_workers(self, num_iter, warmup=10):
+ """
+ Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers].
+ """
+ candidates = [0, 1]
+ if self.num_workers not in candidates:
+ candidates.append(self.num_workers)
+
+ dataset = MapDataset(self.dataset, self.mapper)
+ for n in candidates:
+ loader = build_batch_data_loader(
+ dataset,
+ self.sampler,
+ self.total_batch_size,
+ num_workers=n,
+ )
+ self._benchmark(
+ iter(loader),
+ num_iter * max(n, 1),
+ warmup * max(n, 1),
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})",
+ )
+ del loader
+
+ def benchmark_IPC(self, num_iter, warmup=10):
+ """
+ Benchmark the dataloader where each worker outputs nothing. This
+ eliminates the IPC overhead compared to the regular dataloader.
+
+ PyTorch multiprocessing's IPC only optimizes for torch tensors.
+ Large numpy arrays or other data structure may incur large IPC overhead.
+ """
+ n = self.num_workers
+ dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper))
+ loader = build_batch_data_loader(
+ dataset, self.sampler, self.total_batch_size, num_workers=n
+ )
+ self._benchmark(
+ iter(loader),
+ num_iter * max(n, 1),
+ warmup * max(n, 1),
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm",
+ )
+
+ def benchmark_distributed(self, num_iter, warmup=10):
+ """
+ Benchmark the dataloader in each distributed worker, and log results of
+ all workers. This helps understand the final performance as well as
+ the variances among workers.
+
+ It also prints startup time (first iter) of the dataloader.
+ """
+ gpu = comm.get_world_size()
+ dataset = MapDataset(self.dataset, self.mapper)
+ n = self.num_workers
+ loader = build_batch_data_loader(
+ dataset, self.sampler, self.total_batch_size, num_workers=n
+ )
+
+ timer = Timer()
+ loader = iter(loader)
+ next(loader)
+ startup_time = timer.seconds()
+ logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time))
+
+ comm.synchronize()
+
+ avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1))
+ del loader
+ self._log_time(
+ f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})",
+ avg,
+ all_times,
+ True,
+ )
diff --git a/detectron2/data/build.py b/detectron2/data/build.py
new file mode 100644
index 0000000000000000000000000000000000000000..42867687e329c67f37cb7c5e938fd44150500d20
--- /dev/null
+++ b/detectron2/data/build.py
@@ -0,0 +1,678 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import logging
+import numpy as np
+import operator
+import pickle
+from collections import OrderedDict, defaultdict
+from typing import Any, Callable, Dict, List, Optional, Union
+import torch
+import torch.utils.data as torchdata
+from tabulate import tabulate
+from termcolor import colored
+
+from detectron2.config import configurable
+from detectron2.structures import BoxMode
+from detectron2.utils.comm import get_world_size
+from detectron2.utils.env import seed_all_rng
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import _log_api_usage, log_first_n
+
+from .catalog import DatasetCatalog, MetadataCatalog
+from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset
+from .dataset_mapper import DatasetMapper
+from .detection_utils import check_metadata_consistency
+from .samplers import (
+ InferenceSampler,
+ RandomSubsetTrainingSampler,
+ RepeatFactorTrainingSampler,
+ TrainingSampler,
+)
+
+"""
+This file contains the default logic to build a dataloader for training or testing.
+"""
+
+__all__ = [
+ "build_batch_data_loader",
+ "build_detection_train_loader",
+ "build_detection_test_loader",
+ "get_detection_dataset_dicts",
+ "load_proposals_into_dataset",
+ "print_instances_class_histogram",
+]
+
+
+def filter_images_with_only_crowd_annotations(dataset_dicts):
+ """
+ Filter out images with none annotations or only crowd annotations
+ (i.e., images without non-crowd annotations).
+ A common training-time preprocessing on COCO dataset.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
+
+ Returns:
+ list[dict]: the same format, but filtered.
+ """
+ num_before = len(dataset_dicts)
+
+ def valid(anns):
+ for ann in anns:
+ if ann.get("iscrowd", 0) == 0:
+ return True
+ return False
+
+ dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
+ num_after = len(dataset_dicts)
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Removed {} images with no usable annotations. {} images left.".format(
+ num_before - num_after, num_after
+ )
+ )
+ return dataset_dicts
+
+
+def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
+ """
+ Filter out images with too few number of keypoints.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
+
+ Returns:
+ list[dict]: the same format as dataset_dicts, but filtered.
+ """
+ num_before = len(dataset_dicts)
+
+ def visible_keypoints_in_image(dic):
+ # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
+ annotations = dic["annotations"]
+ return sum(
+ (np.array(ann["keypoints"][2::3]) > 0).sum()
+ for ann in annotations
+ if "keypoints" in ann
+ )
+
+ dataset_dicts = [
+ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
+ ]
+ num_after = len(dataset_dicts)
+ logger = logging.getLogger(__name__)
+ logger.info(
+ "Removed {} images with fewer than {} keypoints.".format(
+ num_before - num_after, min_keypoints_per_image
+ )
+ )
+ return dataset_dicts
+
+
+def load_proposals_into_dataset(dataset_dicts, proposal_file):
+ """
+ Load precomputed object proposals into the dataset.
+
+ The proposal file should be a pickled dict with the following keys:
+
+ - "ids": list[int] or list[str], the image ids
+ - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
+ - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
+ corresponding to the boxes.
+ - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
+ proposal_file (str): file path of pre-computed proposals, in pkl format.
+
+ Returns:
+ list[dict]: the same format as dataset_dicts, but added proposal field.
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Loading proposals from: {}".format(proposal_file))
+
+ with PathManager.open(proposal_file, "rb") as f:
+ proposals = pickle.load(f, encoding="latin1")
+
+ # Rename the key names in D1 proposal files
+ rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
+ for key in rename_keys:
+ if key in proposals:
+ proposals[rename_keys[key]] = proposals.pop(key)
+
+ # Fetch the indexes of all proposals that are in the dataset
+ # Convert image_id to str since they could be int.
+ img_ids = set({str(record["image_id"]) for record in dataset_dicts})
+ id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
+
+ # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
+ bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
+
+ for record in dataset_dicts:
+ # Get the index of the proposal
+ i = id_to_index[str(record["image_id"])]
+
+ boxes = proposals["boxes"][i]
+ objectness_logits = proposals["objectness_logits"][i]
+ # Sort the proposals in descending order of the scores
+ inds = objectness_logits.argsort()[::-1]
+ record["proposal_boxes"] = boxes[inds]
+ record["proposal_objectness_logits"] = objectness_logits[inds]
+ record["proposal_bbox_mode"] = bbox_mode
+
+ return dataset_dicts
+
+
+def print_instances_class_histogram(dataset_dicts, class_names):
+ """
+ Args:
+ dataset_dicts (list[dict]): list of dataset dicts.
+ class_names (list[str]): list of class names (zero-indexed).
+ """
+ num_classes = len(class_names)
+ hist_bins = np.arange(num_classes + 1)
+ histogram = np.zeros((num_classes,), dtype=int)
+ for entry in dataset_dicts:
+ annos = entry["annotations"]
+ classes = np.asarray(
+ [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=int
+ )
+ if len(classes):
+ assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
+ assert (
+ classes.max() < num_classes
+ ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
+ histogram += np.histogram(classes, bins=hist_bins)[0]
+
+ N_COLS = min(6, len(class_names) * 2)
+
+ def short_name(x):
+ # make long class names shorter. useful for lvis
+ if len(x) > 13:
+ return x[:11] + ".."
+ return x
+
+ data = list(
+ itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
+ )
+ total_num_instances = sum(data[1::2])
+ data.extend([None] * (N_COLS - (len(data) % N_COLS)))
+ if num_classes > 1:
+ data.extend(["total", total_num_instances])
+ data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
+ table = tabulate(
+ data,
+ headers=["category", "#instances"] * (N_COLS // 2),
+ tablefmt="pipe",
+ numalign="left",
+ stralign="center",
+ )
+ log_first_n(
+ logging.INFO,
+ "Distribution of instances among all {} categories:\n".format(num_classes)
+ + colored(table, "cyan"),
+ key="message",
+ )
+
+
+def get_detection_dataset_dicts(
+ names,
+ filter_empty=True,
+ min_keypoints=0,
+ proposal_files=None,
+ check_consistency=True,
+):
+ """
+ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
+
+ Args:
+ names (str or list[str]): a dataset name or a list of dataset names
+ filter_empty (bool): whether to filter out images without instance annotations
+ min_keypoints (int): filter out images with fewer keypoints than
+ `min_keypoints`. Set to 0 to do nothing.
+ proposal_files (list[str]): if given, a list of object proposal files
+ that match each dataset in `names`.
+ check_consistency (bool): whether to check if datasets have consistent metadata.
+
+ Returns:
+ list[dict]: a list of dicts following the standard dataset dict format.
+ """
+ if isinstance(names, str):
+ names = [names]
+ assert len(names), names
+
+ available_datasets = DatasetCatalog.keys()
+ names_set = set(names)
+ if not names_set.issubset(available_datasets):
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ "The following dataset names are not registered in the DatasetCatalog: "
+ f"{names_set - available_datasets}. "
+ f"Available datasets are {available_datasets}"
+ )
+
+ dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
+
+ if isinstance(dataset_dicts[0], torchdata.Dataset):
+ if len(dataset_dicts) > 1:
+ # ConcatDataset does not work for iterable style dataset.
+ # We could support concat for iterable as well, but it's often
+ # not a good idea to concat iterables anyway.
+ return torchdata.ConcatDataset(dataset_dicts)
+ return dataset_dicts[0]
+
+ for dataset_name, dicts in zip(names, dataset_dicts):
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
+
+ if proposal_files is not None:
+ assert len(names) == len(proposal_files)
+ # load precomputed proposals from proposal files
+ dataset_dicts = [
+ load_proposals_into_dataset(dataset_i_dicts, proposal_file)
+ for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
+ ]
+
+ dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
+
+ has_instances = "annotations" in dataset_dicts[0]
+ if filter_empty and has_instances:
+ dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
+ if min_keypoints > 0 and has_instances:
+ dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
+
+ if check_consistency and has_instances:
+ try:
+ class_names = MetadataCatalog.get(names[0]).thing_classes
+ check_metadata_consistency("thing_classes", names)
+ print_instances_class_histogram(dataset_dicts, class_names)
+ except AttributeError: # class names are not available for this dataset
+ pass
+
+ assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
+ return dataset_dicts
+
+
+def build_batch_data_loader(
+ dataset,
+ sampler,
+ total_batch_size,
+ *,
+ aspect_ratio_grouping=False,
+ num_workers=0,
+ collate_fn=None,
+ drop_last: bool = True,
+ single_gpu_batch_size=None,
+ seed=None,
+ **kwargs,
+):
+ """
+ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are:
+ 1. support aspect ratio grouping options
+ 2. use no "batch collation", because this is common for detection training
+
+ Args:
+ dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset.
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices.
+ Must be provided iff. ``dataset`` is a map-style dataset.
+ total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see
+ :func:`build_detection_train_loader`.
+ single_gpu_batch_size: You can specify either `single_gpu_batch_size` or `total_batch_size`.
+ `single_gpu_batch_size` specifies the batch size that will be used for each gpu/process.
+ `total_batch_size` allows you to specify the total aggregate batch size across gpus.
+ It is an error to supply a value for both.
+ drop_last (bool): if ``True``, the dataloader will drop incomplete batches.
+
+ Returns:
+ iterable[list]. Length of each list is the batch size of the current
+ GPU. Each element in the list comes from the dataset.
+ """
+ if single_gpu_batch_size:
+ if total_batch_size:
+ raise ValueError(
+ """total_batch_size and single_gpu_batch_size are mutually incompatible.
+ Please specify only one. """
+ )
+ batch_size = single_gpu_batch_size
+ else:
+ world_size = get_world_size()
+ assert (
+ total_batch_size > 0 and total_batch_size % world_size == 0
+ ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
+ total_batch_size, world_size
+ )
+ batch_size = total_batch_size // world_size
+ logger = logging.getLogger(__name__)
+ logger.info("Making batched data loader with batch_size=%d", batch_size)
+
+ if isinstance(dataset, torchdata.IterableDataset):
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
+ else:
+ dataset = ToIterableDataset(dataset, sampler, shard_chunk_size=batch_size)
+
+ generator = None
+ if seed is not None:
+ generator = torch.Generator()
+ generator.manual_seed(seed)
+
+ if aspect_ratio_grouping:
+ assert drop_last, "Aspect ratio grouping will drop incomplete batches."
+ data_loader = torchdata.DataLoader(
+ dataset,
+ num_workers=num_workers,
+ collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
+ worker_init_fn=worker_init_reset_seed,
+ generator=generator,
+ **kwargs
+ ) # yield individual mapped dict
+ data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
+ if collate_fn is None:
+ return data_loader
+ return MapDataset(data_loader, collate_fn)
+ else:
+ return torchdata.DataLoader(
+ dataset,
+ batch_size=batch_size,
+ drop_last=drop_last,
+ num_workers=num_workers,
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
+ worker_init_fn=worker_init_reset_seed,
+ generator=generator,
+ **kwargs
+ )
+
+
+def _get_train_datasets_repeat_factors(cfg) -> Dict[str, float]:
+ repeat_factors = cfg.DATASETS.TRAIN_REPEAT_FACTOR
+ assert all(len(tup) == 2 for tup in repeat_factors)
+ name_to_weight = defaultdict(lambda: 1, dict(repeat_factors))
+ # The sampling weights map should only contain datasets in train config
+ unrecognized = set(name_to_weight.keys()) - set(cfg.DATASETS.TRAIN)
+ assert not unrecognized, f"unrecognized datasets: {unrecognized}"
+ logger = logging.getLogger(__name__)
+ logger.info(f"Found repeat factors: {list(name_to_weight.items())}")
+
+ # pyre-fixme[7]: Expected `Dict[str, float]` but got `DefaultDict[typing.Any, int]`.
+ return name_to_weight
+
+
+def _build_weighted_sampler(cfg, enable_category_balance=False):
+ dataset_repeat_factors = _get_train_datasets_repeat_factors(cfg)
+ # OrderedDict to guarantee order of values() consistent with repeat factors
+ dataset_name_to_dicts = OrderedDict(
+ {
+ name: get_detection_dataset_dicts(
+ [name],
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
+ if cfg.MODEL.KEYPOINT_ON
+ else 0,
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN
+ if cfg.MODEL.LOAD_PROPOSALS
+ else None,
+ )
+ for name in cfg.DATASETS.TRAIN
+ }
+ )
+ # Repeat factor for every sample in the dataset
+ repeat_factors = [
+ [dataset_repeat_factors[dsname]] * len(dataset_name_to_dicts[dsname])
+ for dsname in cfg.DATASETS.TRAIN
+ ]
+
+ repeat_factors = list(itertools.chain.from_iterable(repeat_factors))
+
+ repeat_factors = torch.tensor(repeat_factors)
+ logger = logging.getLogger(__name__)
+ if enable_category_balance:
+ """
+ 1. Calculate repeat factors using category frequency for each dataset and then merge them.
+ 2. Element wise dot producting the dataset frequency repeat factors with
+ the category frequency repeat factors gives the final repeat factors.
+ """
+ category_repeat_factors = [
+ RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
+ dataset_dict, cfg.DATALOADER.REPEAT_THRESHOLD
+ )
+ for dataset_dict in dataset_name_to_dicts.values()
+ ]
+ # flatten the category repeat factors from all datasets
+ category_repeat_factors = list(itertools.chain.from_iterable(category_repeat_factors))
+ category_repeat_factors = torch.tensor(category_repeat_factors)
+ repeat_factors = torch.mul(category_repeat_factors, repeat_factors)
+ repeat_factors = repeat_factors / torch.min(repeat_factors)
+ logger.info(
+ "Using WeightedCategoryTrainingSampler with repeat_factors={}".format(
+ cfg.DATASETS.TRAIN_REPEAT_FACTOR
+ )
+ )
+ else:
+ logger.info(
+ "Using WeightedTrainingSampler with repeat_factors={}".format(
+ cfg.DATASETS.TRAIN_REPEAT_FACTOR
+ )
+ )
+
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
+ return sampler
+
+
+def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
+ if dataset is None:
+ dataset = get_detection_dataset_dicts(
+ cfg.DATASETS.TRAIN,
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
+ if cfg.MODEL.KEYPOINT_ON
+ else 0,
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
+ )
+ _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
+
+ if mapper is None:
+ mapper = DatasetMapper(cfg, True)
+
+ if sampler is None:
+ sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
+ logger = logging.getLogger(__name__)
+ if isinstance(dataset, torchdata.IterableDataset):
+ logger.info("Not using any sampler since the dataset is IterableDataset.")
+ sampler = None
+ else:
+ logger.info("Using training sampler {}".format(sampler_name))
+ if sampler_name == "TrainingSampler":
+ sampler = TrainingSampler(len(dataset))
+ elif sampler_name == "RepeatFactorTrainingSampler":
+ repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
+ dataset, cfg.DATALOADER.REPEAT_THRESHOLD
+ )
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
+ elif sampler_name == "RandomSubsetTrainingSampler":
+ sampler = RandomSubsetTrainingSampler(
+ len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO
+ )
+ elif sampler_name == "WeightedTrainingSampler":
+ sampler = _build_weighted_sampler(cfg)
+ elif sampler_name == "WeightedCategoryTrainingSampler":
+ sampler = _build_weighted_sampler(cfg, enable_category_balance=True)
+ else:
+ raise ValueError("Unknown training sampler: {}".format(sampler_name))
+
+ return {
+ "dataset": dataset,
+ "sampler": sampler,
+ "mapper": mapper,
+ "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
+ "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
+ }
+
+
+@configurable(from_config=_train_loader_from_config)
+def build_detection_train_loader(
+ dataset,
+ *,
+ mapper,
+ sampler=None,
+ total_batch_size,
+ aspect_ratio_grouping=True,
+ num_workers=0,
+ collate_fn=None,
+ **kwargs
+):
+ """
+ Build a dataloader for object detection with some default features.
+
+ Args:
+ dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
+ or a pytorch dataset (either map-style or iterable). It can be obtained
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
+ mapper (callable): a callable which takes a sample (dict) from dataset and
+ returns the format to be consumed by the model.
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
+ indices to be applied on ``dataset``.
+ If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
+ which coordinates an infinite random shuffle sequence across all workers.
+ Sampler must be None if ``dataset`` is iterable.
+ total_batch_size (int): total batch size across all workers.
+ aspect_ratio_grouping (bool): whether to group images with similar
+ aspect ratio for efficiency. When enabled, it requires each
+ element in dataset be a dict with keys "width" and "height".
+ num_workers (int): number of parallel data loading workers
+ collate_fn: a function that determines how to do batching, same as the argument of
+ `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of
+ data. No collation is OK for small batch size and simple data structures.
+ If your batch size is large and each sample contains too many small tensors,
+ it's more efficient to collate them in data loader.
+
+ Returns:
+ torch.utils.data.DataLoader:
+ a dataloader. Each output from it is a ``list[mapped_element]`` of length
+ ``total_batch_size / num_workers``, where ``mapped_element`` is produced
+ by the ``mapper``.
+ """
+ if isinstance(dataset, list):
+ dataset = DatasetFromList(dataset, copy=False)
+ if mapper is not None:
+ dataset = MapDataset(dataset, mapper)
+
+ if isinstance(dataset, torchdata.IterableDataset):
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
+ else:
+ if sampler is None:
+ sampler = TrainingSampler(len(dataset))
+ assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
+ return build_batch_data_loader(
+ dataset,
+ sampler,
+ total_batch_size,
+ aspect_ratio_grouping=aspect_ratio_grouping,
+ num_workers=num_workers,
+ collate_fn=collate_fn,
+ **kwargs
+ )
+
+
+def _test_loader_from_config(cfg, dataset_name, mapper=None):
+ """
+ Uses the given `dataset_name` argument (instead of the names in cfg), because the
+ standard practice is to evaluate each test set individually (not combining them).
+ """
+ if isinstance(dataset_name, str):
+ dataset_name = [dataset_name]
+
+ dataset = get_detection_dataset_dicts(
+ dataset_name,
+ filter_empty=False,
+ proposal_files=[
+ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
+ ]
+ if cfg.MODEL.LOAD_PROPOSALS
+ else None,
+ )
+ if mapper is None:
+ mapper = DatasetMapper(cfg, False)
+ return {
+ "dataset": dataset,
+ "mapper": mapper,
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
+ "sampler": InferenceSampler(len(dataset))
+ if not isinstance(dataset, torchdata.IterableDataset)
+ else None,
+ }
+
+
+@configurable(from_config=_test_loader_from_config)
+def build_detection_test_loader(
+ dataset: Union[List[Any], torchdata.Dataset],
+ *,
+ mapper: Callable[[Dict[str, Any]], Any],
+ sampler: Optional[torchdata.Sampler] = None,
+ batch_size: int = 1,
+ num_workers: int = 0,
+ collate_fn: Optional[Callable[[List[Any]], Any]] = None,
+) -> torchdata.DataLoader:
+ """
+ Similar to `build_detection_train_loader`, with default batch size = 1,
+ and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
+ to produce the exact set of all samples.
+
+ Args:
+ dataset: a list of dataset dicts,
+ or a pytorch dataset (either map-style or iterable). They can be obtained
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
+ mapper: a callable which takes a sample (dict) from dataset
+ and returns the format to be consumed by the model.
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
+ sampler: a sampler that produces
+ indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
+ which splits the dataset across all workers. Sampler must be None
+ if `dataset` is iterable.
+ batch_size: the batch size of the data loader to be created.
+ Default to 1 image per worker since this is the standard when reporting
+ inference time in papers.
+ num_workers: number of parallel data loading workers
+ collate_fn: same as the argument of `torch.utils.data.DataLoader`.
+ Defaults to do no collation and return a list of data.
+
+ Returns:
+ DataLoader: a torch DataLoader, that loads the given detection
+ dataset, with test-time transformation and batching.
+
+ Examples:
+ ::
+ data_loader = build_detection_test_loader(
+ DatasetRegistry.get("my_test"),
+ mapper=DatasetMapper(...))
+
+ # or, instantiate with a CfgNode:
+ data_loader = build_detection_test_loader(cfg, "my_test")
+ """
+ if isinstance(dataset, list):
+ dataset = DatasetFromList(dataset, copy=False)
+ if mapper is not None:
+ dataset = MapDataset(dataset, mapper)
+ if isinstance(dataset, torchdata.IterableDataset):
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
+ else:
+ if sampler is None:
+ sampler = InferenceSampler(len(dataset))
+ return torchdata.DataLoader(
+ dataset,
+ batch_size=batch_size,
+ sampler=sampler,
+ drop_last=False,
+ num_workers=num_workers,
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
+ )
+
+
+def trivial_batch_collator(batch):
+ """
+ A batch collator that does nothing.
+ """
+ return batch
+
+
+def worker_init_reset_seed(worker_id):
+ initial_seed = torch.initial_seed() % 2**31
+ seed_all_rng(initial_seed + worker_id)
diff --git a/detectron2/data/catalog.py b/detectron2/data/catalog.py
new file mode 100644
index 0000000000000000000000000000000000000000..45c110c19508f23921b9033cdaf0aa8056f0c125
--- /dev/null
+++ b/detectron2/data/catalog.py
@@ -0,0 +1,236 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import types
+from collections import UserDict
+from typing import List
+
+from detectron2.utils.logger import log_first_n
+
+__all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"]
+
+
+class _DatasetCatalog(UserDict):
+ """
+ A global dictionary that stores information about the datasets and how to obtain them.
+
+ It contains a mapping from strings
+ (which are names that identify a dataset, e.g. "coco_2014_train")
+ to a function which parses the dataset and returns the samples in the
+ format of `list[dict]`.
+
+ The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
+ if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
+
+ The purpose of having this catalog is to make it easy to choose
+ different datasets, by just using the strings in the config.
+ """
+
+ def register(self, name, func):
+ """
+ Args:
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
+ func (callable): a callable which takes no arguments and returns a list of dicts.
+ It must return the same results if called multiple times.
+ """
+ assert callable(func), "You must register a function with `DatasetCatalog.register`!"
+ assert name not in self, "Dataset '{}' is already registered!".format(name)
+ self[name] = func
+
+ def get(self, name):
+ """
+ Call the registered function and return its results.
+
+ Args:
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
+
+ Returns:
+ list[dict]: dataset annotations.
+ """
+ try:
+ f = self[name]
+ except KeyError as e:
+ raise KeyError(
+ "Dataset '{}' is not registered! Available datasets are: {}".format(
+ name, ", ".join(list(self.keys()))
+ )
+ ) from e
+ return f()
+
+ def list(self) -> List[str]:
+ """
+ List all registered datasets.
+
+ Returns:
+ list[str]
+ """
+ return list(self.keys())
+
+ def remove(self, name):
+ """
+ Alias of ``pop``.
+ """
+ self.pop(name)
+
+ def __str__(self):
+ return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys()))
+
+ __repr__ = __str__
+
+
+DatasetCatalog = _DatasetCatalog()
+DatasetCatalog.__doc__ = (
+ _DatasetCatalog.__doc__
+ + """
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.register
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.get
+"""
+)
+
+
+class Metadata(types.SimpleNamespace):
+ """
+ A class that supports simple attribute setter/getter.
+ It is intended for storing metadata of a dataset and make it accessible globally.
+
+ Examples:
+ ::
+ # somewhere when you load the data:
+ MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"]
+
+ # somewhere when you print statistics or visualize:
+ classes = MetadataCatalog.get("mydataset").thing_classes
+ """
+
+ # the name of the dataset
+ # set default to N/A so that `self.name` in the errors will not trigger getattr again
+ name: str = "N/A"
+
+ _RENAMED = {
+ "class_names": "thing_classes",
+ "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
+ "stuff_class_names": "stuff_classes",
+ }
+
+ def __getattr__(self, key):
+ if key in self._RENAMED:
+ log_first_n(
+ logging.WARNING,
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
+ n=10,
+ )
+ return getattr(self, self._RENAMED[key])
+
+ # "name" exists in every metadata
+ if len(self.__dict__) > 1:
+ raise AttributeError(
+ "Attribute '{}' does not exist in the metadata of dataset '{}'. Available "
+ "keys are {}.".format(key, self.name, str(self.__dict__.keys()))
+ )
+ else:
+ raise AttributeError(
+ f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': "
+ "metadata is empty."
+ )
+
+ def __setattr__(self, key, val):
+ if key in self._RENAMED:
+ log_first_n(
+ logging.WARNING,
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
+ n=10,
+ )
+ setattr(self, self._RENAMED[key], val)
+
+ # Ensure that metadata of the same name stays consistent
+ try:
+ oldval = getattr(self, key)
+ assert oldval == val, (
+ "Attribute '{}' in the metadata of '{}' cannot be set "
+ "to a different value!\n{} != {}".format(key, self.name, oldval, val)
+ )
+ except AttributeError:
+ super().__setattr__(key, val)
+
+ def as_dict(self):
+ """
+ Returns all the metadata as a dict.
+ Note that modifications to the returned dict will not reflect on the Metadata object.
+ """
+ return copy.copy(self.__dict__)
+
+ def set(self, **kwargs):
+ """
+ Set multiple metadata with kwargs.
+ """
+ for k, v in kwargs.items():
+ setattr(self, k, v)
+ return self
+
+ def get(self, key, default=None):
+ """
+ Access an attribute and return its value if exists.
+ Otherwise return default.
+ """
+ try:
+ return getattr(self, key)
+ except AttributeError:
+ return default
+
+
+class _MetadataCatalog(UserDict):
+ """
+ MetadataCatalog is a global dictionary that provides access to
+ :class:`Metadata` of a given dataset.
+
+ The metadata associated with a certain name is a singleton: once created, the
+ metadata will stay alive and will be returned by future calls to ``get(name)``.
+
+ It's like global variables, so don't abuse it.
+ It's meant for storing knowledge that's constant and shared across the execution
+ of the program, e.g.: the class names in COCO.
+ """
+
+ def get(self, name):
+ """
+ Args:
+ name (str): name of a dataset (e.g. coco_2014_train).
+
+ Returns:
+ Metadata: The :class:`Metadata` instance associated with this name,
+ or create an empty one if none is available.
+ """
+ assert len(name)
+ r = super().get(name, None)
+ if r is None:
+ r = self[name] = Metadata(name=name)
+ return r
+
+ def list(self):
+ """
+ List all registered metadata.
+
+ Returns:
+ list[str]: keys (names of datasets) of all registered metadata
+ """
+ return list(self.keys())
+
+ def remove(self, name):
+ """
+ Alias of ``pop``.
+ """
+ self.pop(name)
+
+ def __str__(self):
+ return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys()))
+
+ __repr__ = __str__
+
+
+MetadataCatalog = _MetadataCatalog()
+MetadataCatalog.__doc__ = (
+ _MetadataCatalog.__doc__
+ + """
+ .. automethod:: detectron2.data.catalog.MetadataCatalog.get
+"""
+)
diff --git a/detectron2/data/common.py b/detectron2/data/common.py
new file mode 100644
index 0000000000000000000000000000000000000000..38770597093360c46fe72719a8d2ec428874aed4
--- /dev/null
+++ b/detectron2/data/common.py
@@ -0,0 +1,339 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import copy
+import itertools
+import logging
+import numpy as np
+import pickle
+import random
+from typing import Callable, Union
+import torch
+import torch.utils.data as data
+from torch.utils.data.sampler import Sampler
+
+from detectron2.utils.serialize import PicklableWrapper
+
+__all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"]
+
+logger = logging.getLogger(__name__)
+
+
+# copied from: https://docs.python.org/3/library/itertools.html#recipes
+def _roundrobin(*iterables):
+ "roundrobin('ABC', 'D', 'EF') --> A D E B F C"
+ # Recipe credited to George Sakkis
+ num_active = len(iterables)
+ nexts = itertools.cycle(iter(it).__next__ for it in iterables)
+ while num_active:
+ try:
+ for next in nexts:
+ yield next()
+ except StopIteration:
+ # Remove the iterator we just exhausted from the cycle.
+ num_active -= 1
+ nexts = itertools.cycle(itertools.islice(nexts, num_active))
+
+
+def _shard_iterator_dataloader_worker(iterable, chunk_size=1):
+ # Shard the iterable if we're currently inside pytorch dataloader worker.
+ worker_info = data.get_worker_info()
+ if worker_info is None or worker_info.num_workers == 1:
+ # do nothing
+ yield from iterable
+ else:
+ # worker0: 0, 1, ..., chunk_size-1, num_workers*chunk_size, num_workers*chunk_size+1, ...
+ # worker1: chunk_size, chunk_size+1, ...
+ # worker2: 2*chunk_size, 2*chunk_size+1, ...
+ # ...
+ yield from _roundrobin(
+ *[
+ itertools.islice(
+ iterable,
+ worker_info.id * chunk_size + chunk_i,
+ None,
+ worker_info.num_workers * chunk_size,
+ )
+ for chunk_i in range(chunk_size)
+ ]
+ )
+
+
+class _MapIterableDataset(data.IterableDataset):
+ """
+ Map a function over elements in an IterableDataset.
+
+ Similar to pytorch's MapIterDataPipe, but support filtering when map_func
+ returns None.
+
+ This class is not public-facing. Will be called by `MapDataset`.
+ """
+
+ def __init__(self, dataset, map_func):
+ self._dataset = dataset
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
+
+ def __len__(self):
+ return len(self._dataset)
+
+ def __iter__(self):
+ for x in map(self._map_func, self._dataset):
+ if x is not None:
+ yield x
+
+
+class MapDataset(data.Dataset):
+ """
+ Map a function over the elements in a dataset.
+ """
+
+ def __init__(self, dataset, map_func):
+ """
+ Args:
+ dataset: a dataset where map function is applied. Can be either
+ map-style or iterable dataset. When given an iterable dataset,
+ the returned object will also be an iterable dataset.
+ map_func: a callable which maps the element in dataset. map_func can
+ return None to skip the data (e.g. in case of errors).
+ How None is handled depends on the style of `dataset`.
+ If `dataset` is map-style, it randomly tries other elements.
+ If `dataset` is iterable, it skips the data and tries the next.
+ """
+ self._dataset = dataset
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
+
+ self._rng = random.Random(42)
+ self._fallback_candidates = set(range(len(dataset)))
+
+ def __new__(cls, dataset, map_func):
+ is_iterable = isinstance(dataset, data.IterableDataset)
+ if is_iterable:
+ return _MapIterableDataset(dataset, map_func)
+ else:
+ return super().__new__(cls)
+
+ def __getnewargs__(self):
+ return self._dataset, self._map_func
+
+ def __len__(self):
+ return len(self._dataset)
+
+ def __getitem__(self, idx):
+ retry_count = 0
+ cur_idx = int(idx)
+
+ while True:
+ data = self._map_func(self._dataset[cur_idx])
+ if data is not None:
+ self._fallback_candidates.add(cur_idx)
+ return data
+
+ # _map_func fails for this idx, use a random new index from the pool
+ retry_count += 1
+ self._fallback_candidates.discard(cur_idx)
+ cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
+
+ if retry_count >= 3:
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ "Failed to apply `_map_func` for idx: {}, retry count: {}".format(
+ idx, retry_count
+ )
+ )
+
+
+class _TorchSerializedList:
+ """
+ A list-like object whose items are serialized and stored in a torch tensor. When
+ launching a process that uses TorchSerializedList with "fork" start method,
+ the subprocess can read the same buffer without triggering copy-on-access. When
+ launching a process that uses TorchSerializedList with "spawn/forkserver" start
+ method, the list will be pickled by a special ForkingPickler registered by PyTorch
+ that moves data to shared memory. In both cases, this allows parent and child
+ processes to share RAM for the list data, hence avoids the issue in
+ https://github.com/pytorch/pytorch/issues/13246.
+
+ See also https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/
+ on how it works.
+ """
+
+ def __init__(self, lst: list):
+ self._lst = lst
+
+ def _serialize(data):
+ buffer = pickle.dumps(data, protocol=-1)
+ return np.frombuffer(buffer, dtype=np.uint8)
+
+ logger.info(
+ "Serializing {} elements to byte tensors and concatenating them all ...".format(
+ len(self._lst)
+ )
+ )
+ self._lst = [_serialize(x) for x in self._lst]
+ self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
+ self._addr = torch.from_numpy(np.cumsum(self._addr))
+ self._lst = torch.from_numpy(np.concatenate(self._lst))
+ logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024**2))
+
+ def __len__(self):
+ return len(self._addr)
+
+ def __getitem__(self, idx):
+ start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
+ end_addr = self._addr[idx].item()
+ bytes = memoryview(self._lst[start_addr:end_addr].numpy())
+
+ # @lint-ignore PYTHONPICKLEISBAD
+ return pickle.loads(bytes)
+
+
+_DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = _TorchSerializedList
+
+
+@contextlib.contextmanager
+def set_default_dataset_from_list_serialize_method(new):
+ """
+ Context manager for using custom serialize function when creating DatasetFromList
+ """
+
+ global _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
+ orig = _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = new
+ yield
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = orig
+
+
+class DatasetFromList(data.Dataset):
+ """
+ Wrap a list to a torch Dataset. It produces elements of the list as data.
+ """
+
+ def __init__(
+ self,
+ lst: list,
+ copy: bool = True,
+ serialize: Union[bool, Callable] = True,
+ ):
+ """
+ Args:
+ lst (list): a list which contains elements to produce.
+ copy (bool): whether to deepcopy the element when producing it,
+ so that the result can be modified in place without affecting the
+ source in the list.
+ serialize (bool or callable): whether to serialize the stroage to other
+ backend. If `True`, the default serialize method will be used, if given
+ a callable, the callable will be used as serialize method.
+ """
+ self._lst = lst
+ self._copy = copy
+ if not isinstance(serialize, (bool, Callable)):
+ raise TypeError(f"Unsupported type for argument `serailzie`: {serialize}")
+ self._serialize = serialize is not False
+
+ if self._serialize:
+ serialize_method = (
+ serialize
+ if isinstance(serialize, Callable)
+ else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
+ )
+ logger.info(f"Serializing the dataset using: {serialize_method}")
+ self._lst = serialize_method(self._lst)
+
+ def __len__(self):
+ return len(self._lst)
+
+ def __getitem__(self, idx):
+ if self._copy and not self._serialize:
+ return copy.deepcopy(self._lst[idx])
+ else:
+ return self._lst[idx]
+
+
+class ToIterableDataset(data.IterableDataset):
+ """
+ Convert an old indices-based (also called map-style) dataset
+ to an iterable-style dataset.
+ """
+
+ def __init__(
+ self,
+ dataset: data.Dataset,
+ sampler: Sampler,
+ shard_sampler: bool = True,
+ shard_chunk_size: int = 1,
+ ):
+ """
+ Args:
+ dataset: an old-style dataset with ``__getitem__``
+ sampler: a cheap iterable that produces indices to be applied on ``dataset``.
+ shard_sampler: whether to shard the sampler based on the current pytorch data loader
+ worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple
+ workers, it is responsible for sharding its data based on worker id so that workers
+ don't produce identical data.
+
+ Most samplers (like our TrainingSampler) do not shard based on dataloader worker id
+ and this argument should be set to True. But certain samplers may be already
+ sharded, in that case this argument should be set to False.
+ shard_chunk_size: when sharding the sampler, each worker will
+ """
+ assert not isinstance(dataset, data.IterableDataset), dataset
+ assert isinstance(sampler, Sampler), sampler
+ self.dataset = dataset
+ self.sampler = sampler
+ self.shard_sampler = shard_sampler
+ self.shard_chunk_size = shard_chunk_size
+
+ def __iter__(self):
+ if not self.shard_sampler:
+ sampler = self.sampler
+ else:
+ # With map-style dataset, `DataLoader(dataset, sampler)` runs the
+ # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`
+ # will run sampler in every of the N worker. So we should only keep 1/N of the ids on
+ # each worker. The assumption is that sampler is cheap to iterate so it's fine to
+ # discard ids in workers.
+ sampler = _shard_iterator_dataloader_worker(self.sampler, self.shard_chunk_size)
+ for idx in sampler:
+ yield self.dataset[idx]
+
+ def __len__(self):
+ return len(self.sampler)
+
+
+class AspectRatioGroupedDataset(data.IterableDataset):
+ """
+ Batch data that have similar aspect ratio together.
+ In this implementation, images whose aspect ratio < (or >) 1 will
+ be batched together.
+ This improves training speed because the images then need less padding
+ to form a batch.
+
+ It assumes the underlying dataset produces dicts with "width" and "height" keys.
+ It will then produce a list of original dicts with length = batch_size,
+ all with similar aspect ratios.
+ """
+
+ def __init__(self, dataset, batch_size):
+ """
+ Args:
+ dataset: an iterable. Each element must be a dict with keys
+ "width" and "height", which will be used to batch data.
+ batch_size (int):
+ """
+ self.dataset = dataset
+ self.batch_size = batch_size
+ self._buckets = [[] for _ in range(2)]
+ # Hard-coded two aspect ratio groups: w > h and w < h.
+ # Can add support for more aspect ratio groups, but doesn't seem useful
+
+ def __iter__(self):
+ for d in self.dataset:
+ w, h = d["width"], d["height"]
+ bucket_id = 0 if w > h else 1
+ bucket = self._buckets[bucket_id]
+ bucket.append(d)
+ if len(bucket) == self.batch_size:
+ data = bucket[:]
+ # Clear bucket first, because code after yield is not
+ # guaranteed to execute
+ del bucket[:]
+ yield data
diff --git a/detectron2/data/dataset_mapper.py b/detectron2/data/dataset_mapper.py
new file mode 100644
index 0000000000000000000000000000000000000000..a8714f7990f11e146a01e03d108518e0356b50c4
--- /dev/null
+++ b/detectron2/data/dataset_mapper.py
@@ -0,0 +1,191 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import numpy as np
+from typing import List, Optional, Union
+import torch
+
+from detectron2.config import configurable
+
+from . import detection_utils as utils
+from . import transforms as T
+
+"""
+This file contains the default mapping that's applied to "dataset dicts".
+"""
+
+__all__ = ["DatasetMapper"]
+
+
+class DatasetMapper:
+ """
+ A callable which takes a dataset dict in Detectron2 Dataset format,
+ and map it into a format used by the model.
+
+ This is the default callable to be used to map your dataset dict into training data.
+ You may need to follow it to implement your own one for customized logic,
+ such as a different way to read or transform images.
+ See :doc:`/tutorials/data_loading` for details.
+
+ The callable currently does the following:
+
+ 1. Read the image from "file_name"
+ 2. Applies cropping/geometric transforms to the image and annotations
+ 3. Prepare data and annotations to Tensor and :class:`Instances`
+ """
+
+ @configurable
+ def __init__(
+ self,
+ is_train: bool,
+ *,
+ augmentations: List[Union[T.Augmentation, T.Transform]],
+ image_format: str,
+ use_instance_mask: bool = False,
+ use_keypoint: bool = False,
+ instance_mask_format: str = "polygon",
+ keypoint_hflip_indices: Optional[np.ndarray] = None,
+ precomputed_proposal_topk: Optional[int] = None,
+ recompute_boxes: bool = False,
+ ):
+ """
+ NOTE: this interface is experimental.
+
+ Args:
+ is_train: whether it's used in training or inference
+ augmentations: a list of augmentations or deterministic transforms to apply
+ image_format: an image format supported by :func:`detection_utils.read_image`.
+ use_instance_mask: whether to process instance segmentation annotations, if available
+ use_keypoint: whether to process keypoint annotations if available
+ instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
+ masks into this format.
+ keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
+ precomputed_proposal_topk: if given, will load pre-computed
+ proposals from dataset_dict and keep the top k proposals for each image.
+ recompute_boxes: whether to overwrite bounding box annotations
+ by computing tight bounding boxes from instance mask annotations.
+ """
+ if recompute_boxes:
+ assert use_instance_mask, "recompute_boxes requires instance masks"
+ # fmt: off
+ self.is_train = is_train
+ self.augmentations = T.AugmentationList(augmentations)
+ self.image_format = image_format
+ self.use_instance_mask = use_instance_mask
+ self.instance_mask_format = instance_mask_format
+ self.use_keypoint = use_keypoint
+ self.keypoint_hflip_indices = keypoint_hflip_indices
+ self.proposal_topk = precomputed_proposal_topk
+ self.recompute_boxes = recompute_boxes
+ # fmt: on
+ logger = logging.getLogger(__name__)
+ mode = "training" if is_train else "inference"
+ logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
+
+ @classmethod
+ def from_config(cls, cfg, is_train: bool = True):
+ augs = utils.build_augmentation(cfg, is_train)
+ if cfg.INPUT.CROP.ENABLED and is_train:
+ augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
+ recompute_boxes = cfg.MODEL.MASK_ON
+ else:
+ recompute_boxes = False
+
+ ret = {
+ "is_train": is_train,
+ "augmentations": augs,
+ "image_format": cfg.INPUT.FORMAT,
+ "use_instance_mask": cfg.MODEL.MASK_ON,
+ "instance_mask_format": cfg.INPUT.MASK_FORMAT,
+ "use_keypoint": cfg.MODEL.KEYPOINT_ON,
+ "recompute_boxes": recompute_boxes,
+ }
+
+ if cfg.MODEL.KEYPOINT_ON:
+ ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
+
+ if cfg.MODEL.LOAD_PROPOSALS:
+ ret["precomputed_proposal_topk"] = (
+ cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
+ if is_train
+ else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
+ )
+ return ret
+
+ def _transform_annotations(self, dataset_dict, transforms, image_shape):
+ # USER: Modify this if you want to keep them for some reason.
+ for anno in dataset_dict["annotations"]:
+ if not self.use_instance_mask:
+ anno.pop("segmentation", None)
+ if not self.use_keypoint:
+ anno.pop("keypoints", None)
+
+ # USER: Implement additional transformations if you have other types of data
+ annos = [
+ utils.transform_instance_annotations(
+ obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
+ )
+ for obj in dataset_dict.pop("annotations")
+ if obj.get("iscrowd", 0) == 0
+ ]
+ instances = utils.annotations_to_instances(
+ annos, image_shape, mask_format=self.instance_mask_format
+ )
+
+ # After transforms such as cropping are applied, the bounding box may no longer
+ # tightly bound the object. As an example, imagine a triangle object
+ # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
+ # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
+ # the intersection of original bounding box and the cropping box.
+ if self.recompute_boxes:
+ instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
+
+ def __call__(self, dataset_dict):
+ """
+ Args:
+ dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
+
+ Returns:
+ dict: a format that builtin models in detectron2 accept
+ """
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
+ # USER: Write your own image loading if it's not from a file
+ image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
+ utils.check_image_size(dataset_dict, image)
+
+ # USER: Remove if you don't do semantic/panoptic segmentation.
+ if "sem_seg_file_name" in dataset_dict:
+ sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
+ else:
+ sem_seg_gt = None
+
+ aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
+ transforms = self.augmentations(aug_input)
+ image, sem_seg_gt = aug_input.image, aug_input.sem_seg
+
+ image_shape = image.shape[:2] # h, w
+ # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
+ # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
+ # Therefore it's important to use torch.Tensor.
+ dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
+ if sem_seg_gt is not None:
+ dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
+
+ # USER: Remove if you don't use pre-computed proposals.
+ # Most users would not need this feature.
+ if self.proposal_topk is not None:
+ utils.transform_proposals(
+ dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
+ )
+
+ if not self.is_train:
+ # USER: Modify this if you want to keep them for some reason.
+ dataset_dict.pop("annotations", None)
+ dataset_dict.pop("sem_seg_file_name", None)
+ return dataset_dict
+
+ if "annotations" in dataset_dict:
+ self._transform_annotations(dataset_dict, transforms, image_shape)
+
+ return dataset_dict
diff --git a/detectron2/data/datasets/README.md b/detectron2/data/datasets/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..9fb3e4f7afec17137c95c78be6ef06d520ec8032
--- /dev/null
+++ b/detectron2/data/datasets/README.md
@@ -0,0 +1,9 @@
+
+
+### Common Datasets
+
+The dataset implemented here do not need to load the data into the final format.
+It should provide the minimal data structure needed to use the dataset, so it can be very efficient.
+
+For example, for an image dataset, just provide the file names and labels, but don't read the images.
+Let the downstream decide how to read.
diff --git a/detectron2/data/datasets/__init__.py b/detectron2/data/datasets/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..a44bedc15e5f0e762fc4d77efd6f1b07c6ff77d0
--- /dev/null
+++ b/detectron2/data/datasets/__init__.py
@@ -0,0 +1,9 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json
+from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
+from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta
+from .pascal_voc import load_voc_instances, register_pascal_voc
+from . import builtin as _builtin # ensure the builtin datasets are registered
+
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/detectron2/data/datasets/builtin.py b/detectron2/data/datasets/builtin.py
new file mode 100644
index 0000000000000000000000000000000000000000..c3a68aa833f12f0fa324a269c36190f21b8a75bd
--- /dev/null
+++ b/detectron2/data/datasets/builtin.py
@@ -0,0 +1,259 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+
+"""
+This file registers pre-defined datasets at hard-coded paths, and their metadata.
+
+We hard-code metadata for common datasets. This will enable:
+1. Consistency check when loading the datasets
+2. Use models on these standard datasets directly and run demos,
+ without having to download the dataset annotations
+
+We hard-code some paths to the dataset that's assumed to
+exist in "./datasets/".
+
+Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
+To add new dataset, refer to the tutorial "docs/DATASETS.md".
+"""
+
+import os
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+
+from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
+from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
+from .cityscapes_panoptic import register_all_cityscapes_panoptic
+from .coco import load_sem_seg, register_coco_instances
+from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
+from .lvis import get_lvis_instances_meta, register_lvis_instances
+from .pascal_voc import register_pascal_voc
+
+# ==== Predefined datasets and splits for COCO ==========
+
+_PREDEFINED_SPLITS_COCO = {}
+_PREDEFINED_SPLITS_COCO["coco"] = {
+ "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
+ "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
+ "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
+ "coco_2014_valminusminival": (
+ "coco/val2014",
+ "coco/annotations/instances_valminusminival2014.json",
+ ),
+ "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
+ "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
+ "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
+ "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
+ "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
+}
+
+_PREDEFINED_SPLITS_COCO["coco_person"] = {
+ "keypoints_coco_2014_train": (
+ "coco/train2014",
+ "coco/annotations/person_keypoints_train2014.json",
+ ),
+ "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
+ "keypoints_coco_2014_minival": (
+ "coco/val2014",
+ "coco/annotations/person_keypoints_minival2014.json",
+ ),
+ "keypoints_coco_2014_valminusminival": (
+ "coco/val2014",
+ "coco/annotations/person_keypoints_valminusminival2014.json",
+ ),
+ "keypoints_coco_2017_train": (
+ "coco/train2017",
+ "coco/annotations/person_keypoints_train2017.json",
+ ),
+ "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
+ "keypoints_coco_2017_val_100": (
+ "coco/val2017",
+ "coco/annotations/person_keypoints_val2017_100.json",
+ ),
+}
+
+
+_PREDEFINED_SPLITS_COCO_PANOPTIC = {
+ "coco_2017_train_panoptic": (
+ # This is the original panoptic annotation directory
+ "coco/panoptic_train2017",
+ "coco/annotations/panoptic_train2017.json",
+ # This directory contains semantic annotations that are
+ # converted from panoptic annotations.
+ # It is used by PanopticFPN.
+ # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
+ # to create these directories.
+ "coco/panoptic_stuff_train2017",
+ ),
+ "coco_2017_val_panoptic": (
+ "coco/panoptic_val2017",
+ "coco/annotations/panoptic_val2017.json",
+ "coco/panoptic_stuff_val2017",
+ ),
+ "coco_2017_val_100_panoptic": (
+ "coco/panoptic_val2017_100",
+ "coco/annotations/panoptic_val2017_100.json",
+ "coco/panoptic_stuff_val2017_100",
+ ),
+}
+
+
+def register_all_coco(root):
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
+ for key, (image_root, json_file) in splits_per_dataset.items():
+ # Assume pre-defined datasets live in `./datasets`.
+ register_coco_instances(
+ key,
+ _get_builtin_metadata(dataset_name),
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
+ os.path.join(root, image_root),
+ )
+
+ for (
+ prefix,
+ (panoptic_root, panoptic_json, semantic_root),
+ ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
+ prefix_instances = prefix[: -len("_panoptic")]
+ instances_meta = MetadataCatalog.get(prefix_instances)
+ image_root, instances_json = instances_meta.image_root, instances_meta.json_file
+ # The "separated" version of COCO panoptic segmentation dataset,
+ # e.g. used by Panoptic FPN
+ register_coco_panoptic_separated(
+ prefix,
+ _get_builtin_metadata("coco_panoptic_separated"),
+ image_root,
+ os.path.join(root, panoptic_root),
+ os.path.join(root, panoptic_json),
+ os.path.join(root, semantic_root),
+ instances_json,
+ )
+ # The "standard" version of COCO panoptic segmentation dataset,
+ # e.g. used by Panoptic-DeepLab
+ register_coco_panoptic(
+ prefix,
+ _get_builtin_metadata("coco_panoptic_standard"),
+ image_root,
+ os.path.join(root, panoptic_root),
+ os.path.join(root, panoptic_json),
+ instances_json,
+ )
+
+
+# ==== Predefined datasets and splits for LVIS ==========
+
+
+_PREDEFINED_SPLITS_LVIS = {
+ "lvis_v1": {
+ "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"),
+ "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"),
+ "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"),
+ "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"),
+ },
+ "lvis_v0.5": {
+ "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"),
+ "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"),
+ "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"),
+ "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"),
+ },
+ "lvis_v0.5_cocofied": {
+ "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"),
+ "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"),
+ },
+}
+
+
+def register_all_lvis(root):
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
+ for key, (image_root, json_file) in splits_per_dataset.items():
+ register_lvis_instances(
+ key,
+ get_lvis_instances_meta(dataset_name),
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
+ os.path.join(root, image_root),
+ )
+
+
+# ==== Predefined splits for raw cityscapes images ===========
+_RAW_CITYSCAPES_SPLITS = {
+ "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"),
+ "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"),
+ "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"),
+}
+
+
+def register_all_cityscapes(root):
+ for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
+ meta = _get_builtin_metadata("cityscapes")
+ image_dir = os.path.join(root, image_dir)
+ gt_dir = os.path.join(root, gt_dir)
+
+ inst_key = key.format(task="instance_seg")
+ DatasetCatalog.register(
+ inst_key,
+ lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
+ x, y, from_json=True, to_polygons=True
+ ),
+ )
+ MetadataCatalog.get(inst_key).set(
+ image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
+ )
+
+ sem_key = key.format(task="sem_seg")
+ DatasetCatalog.register(
+ sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
+ )
+ MetadataCatalog.get(sem_key).set(
+ image_dir=image_dir,
+ gt_dir=gt_dir,
+ evaluator_type="cityscapes_sem_seg",
+ ignore_label=255,
+ **meta,
+ )
+
+
+# ==== Predefined splits for PASCAL VOC ===========
+def register_all_pascal_voc(root):
+ SPLITS = [
+ ("voc_2007_trainval", "VOC2007", "trainval"),
+ ("voc_2007_train", "VOC2007", "train"),
+ ("voc_2007_val", "VOC2007", "val"),
+ ("voc_2007_test", "VOC2007", "test"),
+ ("voc_2012_trainval", "VOC2012", "trainval"),
+ ("voc_2012_train", "VOC2012", "train"),
+ ("voc_2012_val", "VOC2012", "val"),
+ ]
+ for name, dirname, split in SPLITS:
+ year = 2007 if "2007" in name else 2012
+ register_pascal_voc(name, os.path.join(root, dirname), split, year)
+ MetadataCatalog.get(name).evaluator_type = "pascal_voc"
+
+
+def register_all_ade20k(root):
+ root = os.path.join(root, "ADEChallengeData2016")
+ for name, dirname in [("train", "training"), ("val", "validation")]:
+ image_dir = os.path.join(root, "images", dirname)
+ gt_dir = os.path.join(root, "annotations_detectron2", dirname)
+ name = f"ade20k_sem_seg_{name}"
+ DatasetCatalog.register(
+ name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
+ )
+ MetadataCatalog.get(name).set(
+ stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:],
+ image_root=image_dir,
+ sem_seg_root=gt_dir,
+ evaluator_type="sem_seg",
+ ignore_label=255,
+ )
+
+
+# True for open source;
+# Internally at fb, we register them elsewhere
+if __name__.endswith(".builtin"):
+ # Assume pre-defined datasets live in `./datasets`.
+ _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
+ register_all_coco(_root)
+ register_all_lvis(_root)
+ register_all_cityscapes(_root)
+ register_all_cityscapes_panoptic(_root)
+ register_all_pascal_voc(_root)
+ register_all_ade20k(_root)
diff --git a/detectron2/data/datasets/builtin_meta.py b/detectron2/data/datasets/builtin_meta.py
new file mode 100644
index 0000000000000000000000000000000000000000..63c7a1a31b31dd89b82011effee26471faccacf5
--- /dev/null
+++ b/detectron2/data/datasets/builtin_meta.py
@@ -0,0 +1,350 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+Note:
+For your custom dataset, there is no need to hard-code metadata anywhere in the code.
+For example, for COCO-format dataset, metadata will be obtained automatically
+when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
+during loading.
+
+However, we hard-coded metadata for a few common dataset here.
+The only goal is to allow users who don't have these dataset to use pre-trained models.
+Users don't have to download a COCO json (which contains metadata), in order to visualize a
+COCO model (with correct class names and colors).
+"""
+
+
+# All coco categories, together with their nice-looking visualization colors
+# It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
+COCO_CATEGORIES = [
+ {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
+ {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
+ {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
+ {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
+ {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
+ {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
+ {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
+ {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
+ {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
+ {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
+ {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
+ {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
+ {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
+ {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
+ {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
+ {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
+ {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
+ {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
+ {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
+ {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
+ {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
+ {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
+ {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
+ {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
+ {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
+ {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
+ {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
+ {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
+ {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
+ {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
+ {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
+ {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
+ {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
+ {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
+ {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
+ {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
+ {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
+ {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
+ {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
+ {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
+ {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
+ {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
+ {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
+ {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
+ {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
+ {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
+ {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
+ {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
+ {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
+ {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
+ {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
+ {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
+ {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
+ {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
+ {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
+ {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
+ {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
+ {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
+ {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
+ {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
+ {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
+ {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
+ {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
+ {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
+ {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
+ {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
+ {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
+ {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
+ {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
+ {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
+ {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
+ {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
+ {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
+ {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
+ {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
+ {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
+ {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
+ {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
+ {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
+ {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
+ {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
+ {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
+ {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
+ {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
+ {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
+ {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
+ {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
+ {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
+ {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
+ {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
+ {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
+ {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
+ {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
+ {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
+ {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
+ {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
+ {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
+ {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
+ {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
+ {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
+ {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
+ {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
+ {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
+ {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
+ {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
+ {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
+ {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
+ {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
+ {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
+ {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
+ {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
+ {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
+ {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
+ {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
+ {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
+ {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
+ {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
+ {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
+ {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
+ {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
+ {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
+ {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
+ {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
+ {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
+ {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
+ {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
+ {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
+ {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
+ {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
+ {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
+ {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
+ {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
+ {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
+]
+
+# fmt: off
+COCO_PERSON_KEYPOINT_NAMES = (
+ "nose",
+ "left_eye", "right_eye",
+ "left_ear", "right_ear",
+ "left_shoulder", "right_shoulder",
+ "left_elbow", "right_elbow",
+ "left_wrist", "right_wrist",
+ "left_hip", "right_hip",
+ "left_knee", "right_knee",
+ "left_ankle", "right_ankle",
+)
+# fmt: on
+
+# Pairs of keypoints that should be exchanged under horizontal flipping
+COCO_PERSON_KEYPOINT_FLIP_MAP = (
+ ("left_eye", "right_eye"),
+ ("left_ear", "right_ear"),
+ ("left_shoulder", "right_shoulder"),
+ ("left_elbow", "right_elbow"),
+ ("left_wrist", "right_wrist"),
+ ("left_hip", "right_hip"),
+ ("left_knee", "right_knee"),
+ ("left_ankle", "right_ankle"),
+)
+
+# rules for pairs of keypoints to draw a line between, and the line color to use.
+KEYPOINT_CONNECTION_RULES = [
+ # face
+ ("left_ear", "left_eye", (102, 204, 255)),
+ ("right_ear", "right_eye", (51, 153, 255)),
+ ("left_eye", "nose", (102, 0, 204)),
+ ("nose", "right_eye", (51, 102, 255)),
+ # upper-body
+ ("left_shoulder", "right_shoulder", (255, 128, 0)),
+ ("left_shoulder", "left_elbow", (153, 255, 204)),
+ ("right_shoulder", "right_elbow", (128, 229, 255)),
+ ("left_elbow", "left_wrist", (153, 255, 153)),
+ ("right_elbow", "right_wrist", (102, 255, 224)),
+ # lower-body
+ ("left_hip", "right_hip", (255, 102, 0)),
+ ("left_hip", "left_knee", (255, 255, 77)),
+ ("right_hip", "right_knee", (153, 255, 204)),
+ ("left_knee", "left_ankle", (191, 255, 128)),
+ ("right_knee", "right_ankle", (255, 195, 77)),
+]
+
+# All Cityscapes categories, together with their nice-looking visualization colors
+# It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa
+CITYSCAPES_CATEGORIES = [
+ {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"},
+ {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"},
+ {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"},
+ {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"},
+ {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"},
+ {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"},
+ {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"},
+ {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"},
+ {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"},
+ {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"},
+ {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"},
+ {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"},
+ {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"},
+ {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"},
+ {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"},
+ {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"},
+ {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"},
+ {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"},
+ {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"},
+]
+
+# fmt: off
+ADE20K_SEM_SEG_CATEGORIES = [
+ "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa
+]
+# After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore
+# fmt: on
+
+
+def _get_coco_instances_meta():
+ thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
+ thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
+ assert len(thing_ids) == 80, len(thing_ids)
+ # Mapping from the incontiguous COCO category id to an id in [0, 79]
+ thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
+ thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
+ ret = {
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
+ "thing_classes": thing_classes,
+ "thing_colors": thing_colors,
+ }
+ return ret
+
+
+def _get_coco_panoptic_separated_meta():
+ """
+ Returns metadata for "separated" version of the panoptic segmentation dataset.
+ """
+ stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0]
+ assert len(stuff_ids) == 53, len(stuff_ids)
+
+ # For semantic segmentation, this mapping maps from contiguous stuff id
+ # (in [0, 53], used in models) to ids in the dataset (used for processing results)
+ # The id 0 is mapped to an extra category "thing".
+ stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
+ # When converting COCO panoptic annotations to semantic annotations
+ # We label the "thing" category to 0
+ stuff_dataset_id_to_contiguous_id[0] = 0
+
+ # 54 names for COCO stuff categories (including "things")
+ stuff_classes = ["things"] + [
+ k["name"].replace("-other", "").replace("-merged", "")
+ for k in COCO_CATEGORIES
+ if k["isthing"] == 0
+ ]
+
+ # NOTE: I randomly picked a color for things
+ stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0]
+ ret = {
+ "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
+ "stuff_classes": stuff_classes,
+ "stuff_colors": stuff_colors,
+ }
+ ret.update(_get_coco_instances_meta())
+ return ret
+
+
+def _get_builtin_metadata(dataset_name):
+ if dataset_name == "coco":
+ return _get_coco_instances_meta()
+ if dataset_name == "coco_panoptic_separated":
+ return _get_coco_panoptic_separated_meta()
+ elif dataset_name == "coco_panoptic_standard":
+ meta = {}
+ # The following metadata maps contiguous id from [0, #thing categories +
+ # #stuff categories) to their names and colors. We have to replica of the
+ # same name and color under "thing_*" and "stuff_*" because the current
+ # visualization function in D2 handles thing and class classes differently
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
+ # enable reusing existing visualization functions.
+ thing_classes = [k["name"] for k in COCO_CATEGORIES]
+ thing_colors = [k["color"] for k in COCO_CATEGORIES]
+ stuff_classes = [k["name"] for k in COCO_CATEGORIES]
+ stuff_colors = [k["color"] for k in COCO_CATEGORIES]
+
+ meta["thing_classes"] = thing_classes
+ meta["thing_colors"] = thing_colors
+ meta["stuff_classes"] = stuff_classes
+ meta["stuff_colors"] = stuff_colors
+
+ # Convert category id for training:
+ # category id: like semantic segmentation, it is the class id for each
+ # pixel. Since there are some classes not used in evaluation, the category
+ # id is not always contiguous and thus we have two set of category ids:
+ # - original category id: category id in the original dataset, mainly
+ # used for evaluation.
+ # - contiguous category id: [0, #classes), in order to train the linear
+ # softmax classifier.
+ thing_dataset_id_to_contiguous_id = {}
+ stuff_dataset_id_to_contiguous_id = {}
+
+ for i, cat in enumerate(COCO_CATEGORIES):
+ if cat["isthing"]:
+ thing_dataset_id_to_contiguous_id[cat["id"]] = i
+ else:
+ stuff_dataset_id_to_contiguous_id[cat["id"]] = i
+
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
+
+ return meta
+ elif dataset_name == "coco_person":
+ return {
+ "thing_classes": ["person"],
+ "keypoint_names": COCO_PERSON_KEYPOINT_NAMES,
+ "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP,
+ "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES,
+ }
+ elif dataset_name == "cityscapes":
+ # fmt: off
+ CITYSCAPES_THING_CLASSES = [
+ "person", "rider", "car", "truck",
+ "bus", "train", "motorcycle", "bicycle",
+ ]
+ CITYSCAPES_STUFF_CLASSES = [
+ "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
+ "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
+ "truck", "bus", "train", "motorcycle", "bicycle",
+ ]
+ # fmt: on
+ return {
+ "thing_classes": CITYSCAPES_THING_CLASSES,
+ "stuff_classes": CITYSCAPES_STUFF_CLASSES,
+ }
+ raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
diff --git a/detectron2/data/datasets/cityscapes.py b/detectron2/data/datasets/cityscapes.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a82256725f5f7152751e102bfe6ca6410f7f983
--- /dev/null
+++ b/detectron2/data/datasets/cityscapes.py
@@ -0,0 +1,345 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import functools
+import json
+import logging
+import multiprocessing as mp
+import os
+from itertools import chain
+
+import numpy as np
+import pycocotools.mask as mask_util
+
+from detectron2.structures import BoxMode
+from detectron2.utils.comm import get_world_size
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import setup_logger
+from PIL import Image
+
+try:
+ import cv2 # noqa
+except ImportError:
+ # OpenCV is an optional dependency at the moment
+ pass
+
+
+logger = logging.getLogger(__name__)
+
+
+def _get_cityscapes_files(image_dir, gt_dir):
+ files = []
+ # scan through the directory
+ cities = PathManager.ls(image_dir)
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
+ for city in cities:
+ city_img_dir = os.path.join(image_dir, city)
+ city_gt_dir = os.path.join(gt_dir, city)
+ for basename in PathManager.ls(city_img_dir):
+ image_file = os.path.join(city_img_dir, basename)
+
+ suffix = "leftImg8bit.png"
+ assert basename.endswith(suffix), basename
+ basename = basename[: -len(suffix)]
+
+ instance_file = os.path.join(
+ city_gt_dir, basename + "gtFine_instanceIds.png"
+ )
+ label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
+ json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
+
+ files.append((image_file, instance_file, label_file, json_file))
+ assert len(files), "No images found in {}".format(image_dir)
+ for f in files[0]:
+ assert PathManager.isfile(f), f
+ return files
+
+
+def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
+ from_json (bool): whether to read annotations from the raw json file or the png files.
+ to_polygons (bool): whether to represent the segmentation as polygons
+ (COCO's format) instead of masks (cityscapes's format).
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+ """
+ if from_json:
+ assert to_polygons, (
+ "Cityscapes's json annotations are in polygon format. "
+ "Converting to mask format is not supported now."
+ )
+ files = _get_cityscapes_files(image_dir, gt_dir)
+
+ logger.info("Preprocessing cityscapes annotations ...")
+ # This is still not fast: all workers will execute duplicate works and will
+ # take up to 10m on a 8GPU server.
+ pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
+
+ ret = pool.map(
+ functools.partial(
+ _cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons
+ ),
+ files,
+ )
+ logger.info("Loaded {} images from {}".format(len(ret), image_dir))
+
+ # Map cityscape ids to contiguous ids
+ from cityscapesscripts.helpers.labels import labels
+
+ labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
+ dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
+ for dict_per_image in ret:
+ for anno in dict_per_image["annotations"]:
+ anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
+ return ret
+
+
+def load_cityscapes_semantic(image_dir, gt_dir):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
+
+ Returns:
+ list[dict]: a list of dict, each has "file_name" and
+ "sem_seg_file_name".
+ """
+ ret = []
+ # gt_dir is small and contain many small files. make sense to fetch to local first
+ gt_dir = PathManager.get_local_path(gt_dir)
+ for image_file, _, label_file, json_file in _get_cityscapes_files(
+ image_dir, gt_dir
+ ):
+ label_file = label_file.replace("labelIds", "labelTrainIds")
+
+ with PathManager.open(json_file, "r") as f:
+ jsonobj = json.load(f)
+ ret.append(
+ {
+ "file_name": image_file,
+ "sem_seg_file_name": label_file,
+ "height": jsonobj["imgHeight"],
+ "width": jsonobj["imgWidth"],
+ }
+ )
+ assert len(ret), f"No images found in {image_dir}!"
+ assert PathManager.isfile(
+ ret[0]["sem_seg_file_name"]
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
+ return ret
+
+
+def _cityscapes_files_to_dict(files, from_json, to_polygons):
+ """
+ Parse cityscapes annotation files to a instance segmentation dataset dict.
+
+ Args:
+ files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
+ from_json (bool): whether to read annotations from the raw json file or the png files.
+ to_polygons (bool): whether to represent the segmentation as polygons
+ (COCO's format) instead of masks (cityscapes's format).
+
+ Returns:
+ A dict in Detectron2 Dataset format.
+ """
+ from cityscapesscripts.helpers.labels import id2label, name2label
+
+ image_file, instance_id_file, _, json_file = files
+
+ annos = []
+
+ if from_json:
+ from shapely.geometry import MultiPolygon, Polygon
+
+ with PathManager.open(json_file, "r") as f:
+ jsonobj = json.load(f)
+ ret = {
+ "file_name": image_file,
+ "image_id": os.path.basename(image_file),
+ "height": jsonobj["imgHeight"],
+ "width": jsonobj["imgWidth"],
+ }
+
+ # `polygons_union` contains the union of all valid polygons.
+ polygons_union = Polygon()
+
+ # CityscapesScripts draw the polygons in sequential order
+ # and each polygon *overwrites* existing ones. See
+ # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
+ # We use reverse order, and each polygon *avoids* early ones.
+ # This will resolve the ploygon overlaps in the same way as CityscapesScripts.
+ for obj in jsonobj["objects"][::-1]:
+ if "deleted" in obj: # cityscapes data format specific
+ continue
+ label_name = obj["label"]
+
+ try:
+ label = name2label[label_name]
+ except KeyError:
+ if label_name.endswith("group"): # crowd area
+ label = name2label[label_name[: -len("group")]]
+ else:
+ raise
+ if label.id < 0: # cityscapes data format
+ continue
+
+ # Cityscapes's raw annotations uses integer coordinates
+ # Therefore +0.5 here
+ poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
+ # CityscapesScript uses PIL.ImageDraw.polygon to rasterize
+ # polygons for evaluation. This function operates in integer space
+ # and draws each pixel whose center falls into the polygon.
+ # Therefore it draws a polygon which is 0.5 "fatter" in expectation.
+ # We therefore dilate the input polygon by 0.5 as our input.
+ poly = Polygon(poly_coord).buffer(0.5, resolution=4)
+
+ if not label.hasInstances or label.ignoreInEval:
+ # even if we won't store the polygon it still contributes to overlaps resolution
+ polygons_union = polygons_union.union(poly)
+ continue
+
+ # Take non-overlapping part of the polygon
+ poly_wo_overlaps = poly.difference(polygons_union)
+ if poly_wo_overlaps.is_empty:
+ continue
+ polygons_union = polygons_union.union(poly)
+
+ anno = {}
+ anno["iscrowd"] = label_name.endswith("group")
+ anno["category_id"] = label.id
+
+ if isinstance(poly_wo_overlaps, Polygon):
+ poly_list = [poly_wo_overlaps]
+ elif isinstance(poly_wo_overlaps, MultiPolygon):
+ poly_list = poly_wo_overlaps.geoms
+ else:
+ raise NotImplementedError(
+ "Unknown geometric structure {}".format(poly_wo_overlaps)
+ )
+
+ poly_coord = []
+ for poly_el in poly_list:
+ # COCO API can work only with exterior boundaries now, hence we store only them.
+ # TODO: store both exterior and interior boundaries once other parts of the
+ # codebase support holes in polygons.
+ poly_coord.append(list(chain(*poly_el.exterior.coords)))
+ anno["segmentation"] = poly_coord
+ (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
+
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
+
+ annos.append(anno)
+ else:
+ # See also the official annotation parsing scripts at
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
+ with PathManager.open(instance_id_file, "rb") as f:
+ inst_image = np.asarray(Image.open(f), order="F")
+ # ids < 24 are stuff labels (filtering them first is about 5% faster)
+ flattened_ids = np.unique(inst_image[inst_image >= 24])
+
+ ret = {
+ "file_name": image_file,
+ "image_id": os.path.basename(image_file),
+ "height": inst_image.shape[0],
+ "width": inst_image.shape[1],
+ }
+
+ for instance_id in flattened_ids:
+ # For non-crowd annotations, instance_id // 1000 is the label_id
+ # Crowd annotations have <1000 instance ids
+ label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
+ label = id2label[label_id]
+ if not label.hasInstances or label.ignoreInEval:
+ continue
+
+ anno = {}
+ anno["iscrowd"] = instance_id < 1000
+ anno["category_id"] = label.id
+
+ mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
+
+ inds = np.nonzero(mask)
+ ymin, ymax = inds[0].min(), inds[0].max()
+ xmin, xmax = inds[1].min(), inds[1].max()
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
+ if xmax <= xmin or ymax <= ymin:
+ continue
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
+ if to_polygons:
+ # This conversion comes from D4809743 and D5171122,
+ # when Mask-RCNN was first developed.
+ contours = cv2.findContours(
+ mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE
+ )[-2]
+ polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
+ # opencv's can produce invalid polygons
+ if len(polygons) == 0:
+ continue
+ anno["segmentation"] = polygons
+ else:
+ anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
+ annos.append(anno)
+ ret["annotations"] = annos
+ return ret
+
+
+def main() -> None:
+ global logger, labels
+ """
+ Test the cityscapes dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.cityscapes \
+ cityscapes/leftImg8bit/train cityscapes/gtFine/train
+ """
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("image_dir")
+ parser.add_argument("gt_dir")
+ parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
+ args = parser.parse_args()
+ from cityscapesscripts.helpers.labels import labels
+ from detectron2.data.catalog import Metadata
+ from detectron2.utils.visualizer import Visualizer
+
+ logger = setup_logger(name=__name__)
+
+ dirname = "cityscapes-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+
+ if args.type == "instance":
+ dicts = load_cityscapes_instances(
+ args.image_dir, args.gt_dir, from_json=True, to_polygons=True
+ )
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ thing_classes = [
+ k.name for k in labels if k.hasInstances and not k.ignoreInEval
+ ]
+ meta = Metadata().set(thing_classes=thing_classes)
+
+ else:
+ dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ stuff_classes = [k.name for k in labels if k.trainId != 255]
+ stuff_colors = [k.color for k in labels if k.trainId != 255]
+ meta = Metadata().set(stuff_classes=stuff_classes, stuff_colors=stuff_colors)
+
+ for d in dicts:
+ img = np.array(Image.open(PathManager.open(d["file_name"], "rb")))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ # cv2.imshow("a", vis.get_image()[:, :, ::-1])
+ # cv2.waitKey()
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
+
+
+if __name__ == "__main__":
+ main() # pragma: no cover
diff --git a/detectron2/data/datasets/cityscapes_panoptic.py b/detectron2/data/datasets/cityscapes_panoptic.py
new file mode 100644
index 0000000000000000000000000000000000000000..48c136f1623261b079591065fec7c7fc38165076
--- /dev/null
+++ b/detectron2/data/datasets/cityscapes_panoptic.py
@@ -0,0 +1,187 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import json
+import logging
+import os
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
+from detectron2.utils.file_io import PathManager
+
+"""
+This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
+"""
+
+
+logger = logging.getLogger(__name__)
+
+
+def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
+ files = []
+ # scan through the directory
+ cities = PathManager.ls(image_dir)
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
+ image_dict = {}
+ for city in cities:
+ city_img_dir = os.path.join(image_dir, city)
+ for basename in PathManager.ls(city_img_dir):
+ image_file = os.path.join(city_img_dir, basename)
+
+ suffix = "_leftImg8bit.png"
+ assert basename.endswith(suffix), basename
+ basename = os.path.basename(basename)[: -len(suffix)]
+
+ image_dict[basename] = image_file
+
+ for ann in json_info["annotations"]:
+ image_file = image_dict.get(ann["image_id"], None)
+ assert image_file is not None, "No image {} found for annotation {}".format(
+ ann["image_id"], ann["file_name"]
+ )
+ label_file = os.path.join(gt_dir, ann["file_name"])
+ segments_info = ann["segments_info"]
+
+ files.append((image_file, label_file, segments_info))
+
+ assert len(files), "No images found in {}".format(image_dir)
+ assert PathManager.isfile(files[0][0]), files[0][0]
+ assert PathManager.isfile(files[0][1]), files[0][1]
+ return files
+
+
+def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
+ gt_dir (str): path to the raw annotations. e.g.,
+ "~/cityscapes/gtFine/cityscapes_panoptic_train".
+ gt_json (str): path to the json file. e.g.,
+ "~/cityscapes/gtFine/cityscapes_panoptic_train.json".
+ meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
+ and "stuff_dataset_id_to_contiguous_id" to map category ids to
+ contiguous ids for training.
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+ """
+
+ def _convert_category_id(segment_info, meta):
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ else:
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ return segment_info
+
+ assert os.path.exists(
+ gt_json
+ ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
+ with open(gt_json) as f:
+ json_info = json.load(f)
+ files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
+ ret = []
+ for image_file, label_file, segments_info in files:
+ sem_label_file = (
+ image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
+ )
+ segments_info = [_convert_category_id(x, meta) for x in segments_info]
+ ret.append(
+ {
+ "file_name": image_file,
+ "image_id": "_".join(
+ os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
+ ),
+ "sem_seg_file_name": sem_label_file,
+ "pan_seg_file_name": label_file,
+ "segments_info": segments_info,
+ }
+ )
+ assert len(ret), f"No images found in {image_dir}!"
+ assert PathManager.isfile(
+ ret[0]["sem_seg_file_name"]
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
+ assert PathManager.isfile(
+ ret[0]["pan_seg_file_name"]
+ ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
+ return ret
+
+
+_RAW_CITYSCAPES_PANOPTIC_SPLITS = {
+ "cityscapes_fine_panoptic_train": (
+ "cityscapes/leftImg8bit/train",
+ "cityscapes/gtFine/cityscapes_panoptic_train",
+ "cityscapes/gtFine/cityscapes_panoptic_train.json",
+ ),
+ "cityscapes_fine_panoptic_val": (
+ "cityscapes/leftImg8bit/val",
+ "cityscapes/gtFine/cityscapes_panoptic_val",
+ "cityscapes/gtFine/cityscapes_panoptic_val.json",
+ ),
+ # "cityscapes_fine_panoptic_test": not supported yet
+}
+
+
+def register_all_cityscapes_panoptic(root):
+ meta = {}
+ # The following metadata maps contiguous id from [0, #thing categories +
+ # #stuff categories) to their names and colors. We have to replica of the
+ # same name and color under "thing_*" and "stuff_*" because the current
+ # visualization function in D2 handles thing and class classes differently
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
+ # enable reusing existing visualization functions.
+ thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
+ thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
+ stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
+ stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
+
+ meta["thing_classes"] = thing_classes
+ meta["thing_colors"] = thing_colors
+ meta["stuff_classes"] = stuff_classes
+ meta["stuff_colors"] = stuff_colors
+
+ # There are three types of ids in cityscapes panoptic segmentation:
+ # (1) category id: like semantic segmentation, it is the class id for each
+ # pixel. Since there are some classes not used in evaluation, the category
+ # id is not always contiguous and thus we have two set of category ids:
+ # - original category id: category id in the original dataset, mainly
+ # used for evaluation.
+ # - contiguous category id: [0, #classes), in order to train the classifier
+ # (2) instance id: this id is used to differentiate different instances from
+ # the same category. For "stuff" classes, the instance id is always 0; for
+ # "thing" classes, the instance id starts from 1 and 0 is reserved for
+ # ignored instances (e.g. crowd annotation).
+ # (3) panoptic id: this is the compact id that encode both category and
+ # instance id by: category_id * 1000 + instance_id.
+ thing_dataset_id_to_contiguous_id = {}
+ stuff_dataset_id_to_contiguous_id = {}
+
+ for k in CITYSCAPES_CATEGORIES:
+ if k["isthing"] == 1:
+ thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
+ else:
+ stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
+
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
+
+ for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
+ image_dir = os.path.join(root, image_dir)
+ gt_dir = os.path.join(root, gt_dir)
+ gt_json = os.path.join(root, gt_json)
+
+ DatasetCatalog.register(
+ key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
+ )
+ MetadataCatalog.get(key).set(
+ panoptic_root=gt_dir,
+ image_root=image_dir,
+ panoptic_json=gt_json,
+ gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
+ evaluator_type="cityscapes_panoptic_seg",
+ ignore_label=255,
+ label_divisor=1000,
+ **meta,
+ )
diff --git a/detectron2/data/datasets/coco.py b/detectron2/data/datasets/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b88f7da36edd50bb8c5618eb49cd971447f255d
--- /dev/null
+++ b/detectron2/data/datasets/coco.py
@@ -0,0 +1,586 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import datetime
+import io
+import json
+import logging
+import os
+import shutil
+
+import numpy as np
+import pycocotools.mask as mask_util
+
+from detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
+from detectron2.utils.file_io import PathManager
+from fvcore.common.timer import Timer
+from iopath.common.file_io import file_lock
+from PIL import Image
+
+from .. import DatasetCatalog, MetadataCatalog
+
+"""
+This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
+"""
+
+
+logger = logging.getLogger(__name__)
+
+__all__ = [
+ "load_coco_json",
+ "load_sem_seg",
+ "convert_to_coco_json",
+ "register_coco_instances",
+]
+
+
+def load_coco_json(
+ json_file, image_root, dataset_name=None, extra_annotation_keys=None
+):
+ """
+ Load a json file with COCO's instances annotation format.
+ Currently supports instance detection, instance segmentation,
+ and person keypoints annotations.
+
+ Args:
+ json_file (str): full path to the json file in COCO instances annotation format.
+ image_root (str or path-like): the directory where the images in this json file exists.
+ dataset_name (str or None): the name of the dataset (e.g., coco_2017_train).
+ When provided, this function will also do the following:
+
+ * Put "thing_classes" into the metadata associated with this dataset.
+ * Map the category ids into a contiguous range (needed by standard dataset format),
+ and add "thing_dataset_id_to_contiguous_id" to the metadata associated
+ with this dataset.
+
+ This option should usually be provided, unless users need to load
+ the original json content and apply more processing manually.
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
+ loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
+ "category_id", "segmentation"). The values for these keys will be returned as-is.
+ For example, the densepose annotations are loaded in this way.
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See
+ `Using Custom Datasets `_ ) when `dataset_name` is not None.
+ If `dataset_name` is None, the returned `category_ids` may be
+ incontiguous and may not conform to the Detectron2 standard format.
+
+ Notes:
+ 1. This function does not read the image files.
+ The results do not have the "image" field.
+ """
+ from pycocotools.coco import COCO
+
+ timer = Timer()
+ json_file = PathManager.get_local_path(json_file)
+ with contextlib.redirect_stdout(io.StringIO()):
+ coco_api = COCO(json_file)
+ if timer.seconds() > 1:
+ logger.info(
+ "Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
+ )
+
+ id_map = None
+ if dataset_name is not None:
+ meta = MetadataCatalog.get(dataset_name)
+ cat_ids = sorted(coco_api.getCatIds())
+ cats = coco_api.loadCats(cat_ids)
+ # The categories in a custom json file may not be sorted.
+ thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
+ meta.thing_classes = thing_classes
+
+ # In COCO, certain category ids are artificially removed,
+ # and by convention they are always ignored.
+ # We deal with COCO's id issue and translate
+ # the category ids to contiguous ids in [0, 80).
+
+ # It works by looking at the "categories" field in the json, therefore
+ # if users' own json also have incontiguous ids, we'll
+ # apply this mapping as well but print a warning.
+ if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
+ if "coco" not in dataset_name:
+ logger.warning(
+ """
+Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
+"""
+ )
+ id_map = {v: i for i, v in enumerate(cat_ids)}
+ meta.thing_dataset_id_to_contiguous_id = id_map
+
+ # sort indices for reproducible results
+ img_ids = sorted(coco_api.imgs.keys())
+ # imgs is a list of dicts, each looks something like:
+ # {'license': 4,
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
+ # 'height': 427,
+ # 'width': 640,
+ # 'date_captured': '2013-11-17 05:57:24',
+ # 'id': 1268}
+ imgs = coco_api.loadImgs(img_ids)
+ # anns is a list[list[dict]], where each dict is an annotation
+ # record for an object. The inner list enumerates the objects in an image
+ # and the outer list enumerates over images. Example of anns[0]:
+ # [{'segmentation': [[192.81,
+ # 247.09,
+ # ...
+ # 219.03,
+ # 249.06]],
+ # 'area': 1035.749,
+ # 'iscrowd': 0,
+ # 'image_id': 1268,
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
+ # 'category_id': 16,
+ # 'id': 42986},
+ # ...]
+ anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
+ total_num_valid_anns = sum([len(x) for x in anns])
+ total_num_anns = len(coco_api.anns)
+ if total_num_valid_anns < total_num_anns:
+ logger.warning(
+ f"{json_file} contains {total_num_anns} annotations, but only "
+ f"{total_num_valid_anns} of them match to images in the file."
+ )
+
+ if "minival" not in json_file:
+ # The popular valminusminival & minival annotations for COCO2014 contain this bug.
+ # However the ratio of buggy annotations there is tiny and does not affect accuracy.
+ # Therefore we explicitly white-list them.
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(
+ ann_ids
+ ), "Annotation ids in '{}' are not unique!".format(json_file)
+
+ imgs_anns = list(zip(imgs, anns))
+ logger.info(
+ "Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file)
+ )
+
+ dataset_dicts = []
+
+ ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (
+ extra_annotation_keys or []
+ )
+
+ num_instances_without_valid_segmentation = 0
+
+ for (img_dict, anno_dict_list) in imgs_anns:
+ record = {}
+ record["file_name"] = os.path.join(image_root, img_dict["file_name"])
+ record["height"] = img_dict["height"]
+ record["width"] = img_dict["width"]
+ image_id = record["image_id"] = img_dict["id"]
+
+ objs = []
+ for anno in anno_dict_list:
+ # Check that the image_id in this annotation is the same as
+ # the image_id we're looking at.
+ # This fails only when the data parsing logic or the annotation file is buggy.
+
+ # The original COCO valminusminival2014 & minival2014 annotation files
+ # actually contains bugs that, together with certain ways of using COCO API,
+ # can trigger this assertion.
+ assert anno["image_id"] == image_id
+
+ assert (
+ anno.get("ignore", 0) == 0
+ ), '"ignore" in COCO json file is not supported.'
+
+ obj = {key: anno[key] for key in ann_keys if key in anno}
+ if "bbox" in obj and len(obj["bbox"]) == 0:
+ raise ValueError(
+ f"One annotation of image {image_id} contains empty 'bbox' value! "
+ "This json does not have valid COCO format."
+ )
+
+ segm = anno.get("segmentation", None)
+ if segm: # either list[list[float]] or dict(RLE)
+ if isinstance(segm, dict):
+ if isinstance(segm["counts"], list):
+ # convert to compressed RLE
+ segm = mask_util.frPyObjects(segm, *segm["size"])
+ else:
+ # filter out invalid polygons (< 3 points)
+ segm = [
+ poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6
+ ]
+ if len(segm) == 0:
+ num_instances_without_valid_segmentation += 1
+ continue # ignore this instance
+ obj["segmentation"] = segm
+
+ keypts = anno.get("keypoints", None)
+ if keypts: # list[int]
+ for idx, v in enumerate(keypts):
+ if idx % 3 != 2:
+ # COCO's segmentation coordinates are floating points in [0, H or W],
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
+ # Therefore we assume the coordinates are "pixel indices" and
+ # add 0.5 to convert to floating point coordinates.
+ keypts[idx] = v + 0.5
+ obj["keypoints"] = keypts
+
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
+ if id_map:
+ annotation_category_id = obj["category_id"]
+ try:
+ obj["category_id"] = id_map[annotation_category_id]
+ except KeyError as e:
+ raise KeyError(
+ f"Encountered category_id={annotation_category_id} "
+ "but this id does not exist in 'categories' of the json file."
+ ) from e
+ objs.append(obj)
+ record["annotations"] = objs
+ dataset_dicts.append(record)
+
+ if num_instances_without_valid_segmentation > 0:
+ logger.warning(
+ "Filtered out {} instances without valid segmentation. ".format(
+ num_instances_without_valid_segmentation
+ )
+ + "There might be issues in your dataset generation process. Please "
+ "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
+ )
+ return dataset_dicts
+
+
+def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
+ """
+ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
+ treated as ground truth annotations and all files under "image_root" with "image_ext" extension
+ as input images. Ground truth and input images are matched using file paths relative to
+ "gt_root" and "image_root" respectively without taking into account file extensions.
+ This works for COCO as well as some other datasets.
+
+ Args:
+ gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
+ annotations are stored as images with integer values in pixels that represent
+ corresponding semantic labels.
+ image_root (str): the directory where the input images are.
+ gt_ext (str): file extension for ground truth annotations.
+ image_ext (str): file extension for input images.
+
+ Returns:
+ list[dict]:
+ a list of dicts in detectron2 standard format without instance-level
+ annotation.
+
+ Notes:
+ 1. This function does not read the image and ground truth files.
+ The results do not have the "image" and "sem_seg" fields.
+ """
+
+ # We match input images with ground truth based on their relative filepaths (without file
+ # extensions) starting from 'image_root' and 'gt_root' respectively.
+ def file2id(folder_path, file_path):
+ # extract relative path starting from `folder_path`
+ image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
+ # remove file extension
+ image_id = os.path.splitext(image_id)[0]
+ return image_id
+
+ input_files = sorted(
+ (
+ os.path.join(image_root, f)
+ for f in PathManager.ls(image_root)
+ if f.endswith(image_ext)
+ ),
+ key=lambda file_path: file2id(image_root, file_path),
+ )
+ gt_files = sorted(
+ (
+ os.path.join(gt_root, f)
+ for f in PathManager.ls(gt_root)
+ if f.endswith(gt_ext)
+ ),
+ key=lambda file_path: file2id(gt_root, file_path),
+ )
+
+ assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
+
+ # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
+ if len(input_files) != len(gt_files):
+ logger.warn(
+ "Directory {} and {} has {} and {} files, respectively.".format(
+ image_root, gt_root, len(input_files), len(gt_files)
+ )
+ )
+ input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
+ gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
+ intersect = list(set(input_basenames) & set(gt_basenames))
+ # sort, otherwise each worker may obtain a list[dict] in different order
+ intersect = sorted(intersect)
+ logger.warn("Will use their intersection of {} files.".format(len(intersect)))
+ input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
+ gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
+
+ logger.info(
+ "Loaded {} images with semantic segmentation from {}".format(
+ len(input_files), image_root
+ )
+ )
+
+ dataset_dicts = []
+ for (img_path, gt_path) in zip(input_files, gt_files):
+ record = {}
+ record["file_name"] = img_path
+ record["sem_seg_file_name"] = gt_path
+ dataset_dicts.append(record)
+
+ return dataset_dicts
+
+
+def convert_to_coco_dict(dataset_name):
+ """
+ Convert an instance detection/segmentation or keypoint detection dataset
+ in detectron2's standard format into COCO json format.
+
+ Generic dataset description can be found here:
+ https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
+
+ COCO data format description can be found here:
+ http://cocodataset.org/#format-data
+
+ Args:
+ dataset_name (str):
+ name of the source dataset
+ Must be registered in DatastCatalog and in detectron2's standard format.
+ Must have corresponding metadata "thing_classes"
+ Returns:
+ coco_dict: serializable dict in COCO json format
+ """
+
+ dataset_dicts = DatasetCatalog.get(dataset_name)
+ metadata = MetadataCatalog.get(dataset_name)
+
+ # unmap the category mapping ids for COCO
+ if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
+ reverse_id_mapping = {
+ v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()
+ }
+ reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
+ else:
+ reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
+
+ categories = [
+ {"id": reverse_id_mapper(id), "name": name}
+ for id, name in enumerate(metadata.thing_classes)
+ ]
+
+ logger.info("Converting dataset dicts into COCO format")
+ coco_images = []
+ coco_annotations = []
+
+ for image_id, image_dict in enumerate(dataset_dicts):
+ coco_image = {
+ "id": image_dict.get("image_id", image_id),
+ "width": int(image_dict["width"]),
+ "height": int(image_dict["height"]),
+ "file_name": str(image_dict["file_name"]),
+ }
+ coco_images.append(coco_image)
+
+ anns_per_image = image_dict.get("annotations", [])
+ for annotation in anns_per_image:
+ # create a new dict with only COCO fields
+ coco_annotation = {}
+
+ # COCO requirement: XYWH box format for axis-align and XYWHA for rotated
+ bbox = annotation["bbox"]
+ if isinstance(bbox, np.ndarray):
+ if bbox.ndim != 1:
+ raise ValueError(
+ f"bbox has to be 1-dimensional. Got shape={bbox.shape}."
+ )
+ bbox = bbox.tolist()
+ if len(bbox) not in [4, 5]:
+ raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.")
+ from_bbox_mode = annotation["bbox_mode"]
+ to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS
+ bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
+
+ # COCO requirement: instance area
+ if "segmentation" in annotation:
+ # Computing areas for instances by counting the pixels
+ segmentation = annotation["segmentation"]
+ # TODO: check segmentation type: RLE, BinaryMask or Polygon
+ if isinstance(segmentation, list):
+ polygons = PolygonMasks([segmentation])
+ area = polygons.area()[0].item()
+ elif isinstance(segmentation, dict): # RLE
+ area = mask_util.area(segmentation).item()
+ else:
+ raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
+ else:
+ # Computing areas using bounding boxes
+ if to_bbox_mode == BoxMode.XYWH_ABS:
+ bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
+ area = Boxes([bbox_xy]).area()[0].item()
+ else:
+ area = RotatedBoxes([bbox]).area()[0].item()
+
+ if "keypoints" in annotation:
+ keypoints = annotation["keypoints"] # list[int]
+ for idx, v in enumerate(keypoints):
+ if idx % 3 != 2:
+ # COCO's segmentation coordinates are floating points in [0, H or W],
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
+ # For COCO format consistency we substract 0.5
+ # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
+ keypoints[idx] = v - 0.5
+ if "num_keypoints" in annotation:
+ num_keypoints = annotation["num_keypoints"]
+ else:
+ num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
+
+ # COCO requirement:
+ # linking annotations to images
+ # "id" field must start with 1
+ coco_annotation["id"] = len(coco_annotations) + 1
+ coco_annotation["image_id"] = coco_image["id"]
+ coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
+ coco_annotation["area"] = float(area)
+ coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0))
+ coco_annotation["category_id"] = int(
+ reverse_id_mapper(annotation["category_id"])
+ )
+
+ # Add optional fields
+ if "keypoints" in annotation:
+ coco_annotation["keypoints"] = keypoints
+ coco_annotation["num_keypoints"] = num_keypoints
+
+ if "segmentation" in annotation:
+ seg = coco_annotation["segmentation"] = annotation["segmentation"]
+ if isinstance(seg, dict): # RLE
+ counts = seg["counts"]
+ if not isinstance(counts, str):
+ # make it json-serializable
+ seg["counts"] = counts.decode("ascii")
+
+ coco_annotations.append(coco_annotation)
+
+ logger.info(
+ "Conversion finished, "
+ f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}"
+ )
+
+ info = {
+ "date_created": str(datetime.datetime.now()),
+ "description": "Automatically generated COCO json file for Detectron2.",
+ }
+ coco_dict = {
+ "info": info,
+ "images": coco_images,
+ "categories": categories,
+ "licenses": None,
+ }
+ if len(coco_annotations) > 0:
+ coco_dict["annotations"] = coco_annotations
+ return coco_dict
+
+
+def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
+ """
+ Converts dataset into COCO format and saves it to a json file.
+ dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
+
+ Args:
+ dataset_name:
+ reference from the config file to the catalogs
+ must be registered in DatasetCatalog and in detectron2's standard format
+ output_file: path of json file that will be saved to
+ allow_cached: if json file is already present then skip conversion
+ """
+
+ # TODO: The dataset or the conversion script *may* change,
+ # a checksum would be useful for validating the cached data
+
+ PathManager.mkdirs(os.path.dirname(output_file))
+ with file_lock(output_file):
+ if PathManager.exists(output_file) and allow_cached:
+ logger.warning(
+ f"Using previously cached COCO format annotations at '{output_file}'. "
+ "You need to clear the cache file if your dataset has been modified."
+ )
+ else:
+ logger.info(
+ f"Converting annotations of dataset '{dataset_name}' to COCO format ...)"
+ )
+ coco_dict = convert_to_coco_dict(dataset_name)
+
+ logger.info(f"Caching COCO format annotations at '{output_file}' ...")
+ tmp_file = output_file + ".tmp"
+ with PathManager.open(tmp_file, "w") as f:
+ json.dump(coco_dict, f)
+ shutil.move(tmp_file, output_file)
+
+
+def register_coco_instances(name, metadata, json_file, image_root):
+ """
+ Register a dataset in COCO's json annotation format for
+ instance detection, instance segmentation and keypoint detection.
+ (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
+ `instances*.json` and `person_keypoints*.json` in the dataset).
+
+ This is an example of how to register a new dataset.
+ You can do something similar to this function, to register new datasets.
+
+ Args:
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
+ metadata (dict): extra metadata associated with this dataset. You can
+ leave it as an empty dict.
+ json_file (str): path to the json instance annotation file.
+ image_root (str or path-like): directory which contains all the images.
+ """
+ assert isinstance(name, str), name
+ assert isinstance(json_file, (str, os.PathLike)), json_file
+ assert isinstance(image_root, (str, os.PathLike)), image_root
+ # 1. register a function which returns dicts
+ DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
+
+ # 2. Optionally, add metadata about this dataset,
+ # since they might be useful in evaluation, visualization or logging
+ MetadataCatalog.get(name).set(
+ json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
+ )
+
+
+def main() -> None:
+ global logger
+ """
+ Test the COCO json dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.coco \
+ path/to/json path/to/image_root dataset_name
+
+ "dataset_name" can be "coco_2014_minival_100", or other
+ pre-registered ones
+ """
+ import sys
+
+ import detectron2.data.datasets # noqa # add pre-defined metadata
+ from detectron2.utils.logger import setup_logger
+ from detectron2.utils.visualizer import Visualizer
+
+ logger = setup_logger(name=__name__)
+ assert sys.argv[3] in DatasetCatalog.list()
+ meta = MetadataCatalog.get(sys.argv[3])
+
+ dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ dirname = "coco-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+ for d in dicts:
+ img = np.array(Image.open(d["file_name"]))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
+
+
+if __name__ == "__main__":
+ main() # pragma: no cover
diff --git a/detectron2/data/datasets/coco_panoptic.py b/detectron2/data/datasets/coco_panoptic.py
new file mode 100644
index 0000000000000000000000000000000000000000..b8dae44317b556610d7fed39017e082d7e855956
--- /dev/null
+++ b/detectron2/data/datasets/coco_panoptic.py
@@ -0,0 +1,228 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import json
+import os
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.utils.file_io import PathManager
+
+from .coco import load_coco_json, load_sem_seg
+
+__all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"]
+
+
+def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta):
+ """
+ Args:
+ image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
+ gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
+ json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+ """
+
+ def _convert_category_id(segment_info, meta):
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ segment_info["isthing"] = True
+ else:
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
+ segment_info["category_id"]
+ ]
+ segment_info["isthing"] = False
+ return segment_info
+
+ with PathManager.open(json_file) as f:
+ json_info = json.load(f)
+
+ ret = []
+ for ann in json_info["annotations"]:
+ image_id = int(ann["image_id"])
+ # TODO: currently we assume image and label has the same filename but
+ # different extension, and images have extension ".jpg" for COCO. Need
+ # to make image extension a user-provided argument if we extend this
+ # function to support other COCO-like datasets.
+ image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
+ label_file = os.path.join(gt_dir, ann["file_name"])
+ segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
+ ret.append(
+ {
+ "file_name": image_file,
+ "image_id": image_id,
+ "pan_seg_file_name": label_file,
+ "segments_info": segments_info,
+ }
+ )
+ assert len(ret), f"No images found in {image_dir}!"
+ assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
+ assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
+ return ret
+
+
+def register_coco_panoptic(
+ name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None
+):
+ """
+ Register a "standard" version of COCO panoptic segmentation dataset named `name`.
+ The dictionaries in this registered dataset follows detectron2's standard format.
+ Hence it's called "standard".
+
+ Args:
+ name (str): the name that identifies a dataset,
+ e.g. "coco_2017_train_panoptic"
+ metadata (dict): extra metadata associated with this dataset.
+ image_root (str): directory which contains all the images
+ panoptic_root (str): directory which contains panoptic annotation images in COCO format
+ panoptic_json (str): path to the json panoptic annotation file in COCO format
+ sem_seg_root (none): not used, to be consistent with
+ `register_coco_panoptic_separated`.
+ instances_json (str): path to the json instance annotation file
+ """
+ panoptic_name = name
+ DatasetCatalog.register(
+ panoptic_name,
+ lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),
+ )
+ MetadataCatalog.get(panoptic_name).set(
+ panoptic_root=panoptic_root,
+ image_root=image_root,
+ panoptic_json=panoptic_json,
+ json_file=instances_json,
+ evaluator_type="coco_panoptic_seg",
+ ignore_label=255,
+ label_divisor=1000,
+ **metadata,
+ )
+
+
+def register_coco_panoptic_separated(
+ name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
+):
+ """
+ Register a "separated" version of COCO panoptic segmentation dataset named `name`.
+ The annotations in this registered dataset will contain both instance annotations and
+ semantic annotations, each with its own contiguous ids. Hence it's called "separated".
+
+ It follows the setting used by the PanopticFPN paper:
+
+ 1. The instance annotations directly come from polygons in the COCO
+ instances annotation task, rather than from the masks in the COCO panoptic annotations.
+
+ The two format have small differences:
+ Polygons in the instance annotations may have overlaps.
+ The mask annotations are produced by labeling the overlapped polygons
+ with depth ordering.
+
+ 2. The semantic annotations are converted from panoptic annotations, where
+ all "things" are assigned a semantic id of 0.
+ All semantic categories will therefore have ids in contiguous
+ range [1, #stuff_categories].
+
+ This function will also register a pure semantic segmentation dataset
+ named ``name + '_stuffonly'``.
+
+ Args:
+ name (str): the name that identifies a dataset,
+ e.g. "coco_2017_train_panoptic"
+ metadata (dict): extra metadata associated with this dataset.
+ image_root (str): directory which contains all the images
+ panoptic_root (str): directory which contains panoptic annotation images
+ panoptic_json (str): path to the json panoptic annotation file
+ sem_seg_root (str): directory which contains all the ground truth segmentation annotations.
+ instances_json (str): path to the json instance annotation file
+ """
+ panoptic_name = name + "_separated"
+ DatasetCatalog.register(
+ panoptic_name,
+ lambda: merge_to_panoptic(
+ load_coco_json(instances_json, image_root, panoptic_name),
+ load_sem_seg(sem_seg_root, image_root),
+ ),
+ )
+ MetadataCatalog.get(panoptic_name).set(
+ panoptic_root=panoptic_root,
+ image_root=image_root,
+ panoptic_json=panoptic_json,
+ sem_seg_root=sem_seg_root,
+ json_file=instances_json, # TODO rename
+ evaluator_type="coco_panoptic_seg",
+ ignore_label=255,
+ **metadata,
+ )
+
+ semantic_name = name + "_stuffonly"
+ DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))
+ MetadataCatalog.get(semantic_name).set(
+ sem_seg_root=sem_seg_root,
+ image_root=image_root,
+ evaluator_type="sem_seg",
+ ignore_label=255,
+ **metadata,
+ )
+
+
+def merge_to_panoptic(detection_dicts, sem_seg_dicts):
+ """
+ Create dataset dicts for panoptic segmentation, by
+ merging two dicts using "file_name" field to match their entries.
+
+ Args:
+ detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation.
+ sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation.
+
+ Returns:
+ list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in
+ both detection_dicts and sem_seg_dicts that correspond to the same image.
+ The function assumes that the same key in different dicts has the same value.
+ """
+ results = []
+ sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts}
+ assert len(sem_seg_file_to_entry) > 0
+
+ for det_dict in detection_dicts:
+ dic = copy.copy(det_dict)
+ dic.update(sem_seg_file_to_entry[dic["file_name"]])
+ results.append(dic)
+ return results
+
+
+if __name__ == "__main__":
+ """
+ Test the COCO panoptic dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.coco_panoptic \
+ path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10
+
+ "dataset_name" can be "coco_2017_train_panoptic", or other
+ pre-registered ones
+ """
+ from detectron2.utils.logger import setup_logger
+ from detectron2.utils.visualizer import Visualizer
+ import detectron2.data.datasets # noqa # add pre-defined metadata
+ import sys
+ from PIL import Image
+ import numpy as np
+
+ logger = setup_logger(name=__name__)
+ assert sys.argv[4] in DatasetCatalog.list()
+ meta = MetadataCatalog.get(sys.argv[4])
+
+ dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict())
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ dirname = "coco-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+ num_imgs_to_vis = int(sys.argv[5])
+ for i, d in enumerate(dicts):
+ img = np.array(Image.open(d["file_name"]))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
+ if i + 1 >= num_imgs_to_vis:
+ break
diff --git a/detectron2/data/datasets/lvis.py b/detectron2/data/datasets/lvis.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a28463428a5a9a0311647bd39063f73e6abc0b4
--- /dev/null
+++ b/detectron2/data/datasets/lvis.py
@@ -0,0 +1,268 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import os
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.structures import BoxMode
+from detectron2.utils.file_io import PathManager
+from fvcore.common.timer import Timer
+
+from .builtin_meta import _get_coco_instances_meta
+from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES
+from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES
+from .lvis_v1_category_image_count import (
+ LVIS_CATEGORY_IMAGE_COUNT as LVIS_V1_CATEGORY_IMAGE_COUNT,
+)
+
+"""
+This file contains functions to parse LVIS-format annotations into dicts in the
+"Detectron2 format".
+"""
+
+logger = logging.getLogger(__name__)
+
+__all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
+
+
+def register_lvis_instances(name, metadata, json_file, image_root):
+ """
+ Register a dataset in LVIS's json annotation format for instance detection and segmentation.
+
+ Args:
+ name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
+ metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
+ json_file (str): path to the json instance annotation file.
+ image_root (str or path-like): directory which contains all the images.
+ """
+ DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
+ MetadataCatalog.get(name).set(
+ json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
+ )
+
+
+def load_lvis_json(
+ json_file, image_root, dataset_name=None, extra_annotation_keys=None
+):
+ """
+ Load a json file in LVIS's annotation format.
+
+ Args:
+ json_file (str): full path to the LVIS json annotation file.
+ image_root (str): the directory where the images in this json file exists.
+ dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
+ If provided, this function will put "thing_classes" into the metadata
+ associated with this dataset.
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
+ loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id",
+ "segmentation"). The values for these keys will be returned as-is.
+
+ Returns:
+ list[dict]: a list of dicts in Detectron2 standard format. (See
+ `Using Custom Datasets `_ )
+
+ Notes:
+ 1. This function does not read the image files.
+ The results do not have the "image" field.
+ """
+ from lvis import LVIS
+
+ json_file = PathManager.get_local_path(json_file)
+
+ timer = Timer()
+ lvis_api = LVIS(json_file)
+ if timer.seconds() > 1:
+ logger.info(
+ "Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds())
+ )
+
+ if dataset_name is not None:
+ meta = get_lvis_instances_meta(dataset_name)
+ MetadataCatalog.get(dataset_name).set(**meta)
+
+ # sort indices for reproducible results
+ img_ids = sorted(lvis_api.imgs.keys())
+ # imgs is a list of dicts, each looks something like:
+ # {'license': 4,
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
+ # 'height': 427,
+ # 'width': 640,
+ # 'date_captured': '2013-11-17 05:57:24',
+ # 'id': 1268}
+ imgs = lvis_api.load_imgs(img_ids)
+ # anns is a list[list[dict]], where each dict is an annotation
+ # record for an object. The inner list enumerates the objects in an image
+ # and the outer list enumerates over images. Example of anns[0]:
+ # [{'segmentation': [[192.81,
+ # 247.09,
+ # ...
+ # 219.03,
+ # 249.06]],
+ # 'area': 1035.749,
+ # 'image_id': 1268,
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
+ # 'category_id': 16,
+ # 'id': 42986},
+ # ...]
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
+
+ # Sanity check that each annotation has a unique id
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
+ assert len(set(ann_ids)) == len(
+ ann_ids
+ ), "Annotation ids in '{}' are not unique".format(json_file)
+
+ imgs_anns = list(zip(imgs, anns))
+
+ logger.info(
+ "Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file)
+ )
+
+ if extra_annotation_keys:
+ logger.info(
+ "The following extra annotation keys will be loaded: {} ".format(
+ extra_annotation_keys
+ )
+ )
+ else:
+ extra_annotation_keys = []
+
+ def get_file_name(img_root, img_dict):
+ # Determine the path including the split folder ("train2017", "val2017", "test2017") from
+ # the coco_url field. Example:
+ # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
+ split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
+ return os.path.join(img_root + split_folder, file_name)
+
+ dataset_dicts = []
+
+ for (img_dict, anno_dict_list) in imgs_anns:
+ record = {}
+ record["file_name"] = get_file_name(image_root, img_dict)
+ record["height"] = img_dict["height"]
+ record["width"] = img_dict["width"]
+ record["not_exhaustive_category_ids"] = img_dict.get(
+ "not_exhaustive_category_ids", []
+ )
+ record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
+ image_id = record["image_id"] = img_dict["id"]
+
+ objs = []
+ for anno in anno_dict_list:
+ # Check that the image_id in this annotation is the same as
+ # the image_id we're looking at.
+ # This fails only when the data parsing logic or the annotation file is buggy.
+ assert anno["image_id"] == image_id
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
+ # LVIS data loader can be used to load COCO dataset categories. In this case `meta`
+ # variable will have a field with COCO-specific category mapping.
+ if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta:
+ obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
+ anno["category_id"]
+ ]
+ else:
+ obj["category_id"] = (
+ anno["category_id"] - 1
+ ) # Convert 1-indexed to 0-indexed
+ segm = anno["segmentation"] # list[list[float]]
+ # filter out invalid polygons (< 3 points)
+ valid_segm = [
+ poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6
+ ]
+ assert len(segm) == len(
+ valid_segm
+ ), "Annotation contains an invalid polygon with < 3 points"
+ assert len(segm) > 0
+ obj["segmentation"] = segm
+ for extra_ann_key in extra_annotation_keys:
+ obj[extra_ann_key] = anno[extra_ann_key]
+ objs.append(obj)
+ record["annotations"] = objs
+ dataset_dicts.append(record)
+
+ return dataset_dicts
+
+
+def get_lvis_instances_meta(dataset_name):
+ """
+ Load LVIS metadata.
+
+ Args:
+ dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
+
+ Returns:
+ dict: LVIS metadata with keys: thing_classes
+ """
+ if "cocofied" in dataset_name:
+ return _get_coco_instances_meta()
+ if "v0.5" in dataset_name:
+ return _get_lvis_instances_meta_v0_5()
+ elif "v1" in dataset_name:
+ return _get_lvis_instances_meta_v1()
+ raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
+
+
+def _get_lvis_instances_meta_v0_5():
+ assert len(LVIS_V0_5_CATEGORIES) == 1230
+ cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES]
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
+ cat_ids
+ ), "Category ids are not in [1, #categories], as expected"
+ # Ensure that the category list is sorted by id
+ lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"])
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
+ meta = {"thing_classes": thing_classes}
+ return meta
+
+
+def _get_lvis_instances_meta_v1():
+ assert len(LVIS_V1_CATEGORIES) == 1203
+ cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES]
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
+ cat_ids
+ ), "Category ids are not in [1, #categories], as expected"
+ # Ensure that the category list is sorted by id
+ lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"])
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
+ meta = {
+ "thing_classes": thing_classes,
+ "class_image_count": LVIS_V1_CATEGORY_IMAGE_COUNT,
+ }
+ return meta
+
+
+def main() -> None:
+ global logger
+ """
+ Test the LVIS json dataset loader.
+
+ Usage:
+ python -m detectron2.data.datasets.lvis \
+ path/to/json path/to/image_root dataset_name vis_limit
+ """
+ import sys
+
+ import detectron2.data.datasets # noqa # add pre-defined metadata
+ import numpy as np
+ from detectron2.utils.logger import setup_logger
+ from detectron2.utils.visualizer import Visualizer
+ from PIL import Image
+
+ logger = setup_logger(name=__name__)
+ meta = MetadataCatalog.get(sys.argv[3])
+
+ dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
+ logger.info("Done loading {} samples.".format(len(dicts)))
+
+ dirname = "lvis-data-vis"
+ os.makedirs(dirname, exist_ok=True)
+ for d in dicts[: int(sys.argv[4])]:
+ img = np.array(Image.open(d["file_name"]))
+ visualizer = Visualizer(img, metadata=meta)
+ vis = visualizer.draw_dataset_dict(d)
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
+ vis.save(fpath)
+
+
+if __name__ == "__main__":
+ main() # pragma: no cover
diff --git a/detectron2/data/datasets/lvis_v0_5_categories.py b/detectron2/data/datasets/lvis_v0_5_categories.py
new file mode 100644
index 0000000000000000000000000000000000000000..d3dab6198da614937b08682f4c9edf52bdf1d236
--- /dev/null
+++ b/detectron2/data/datasets/lvis_v0_5_categories.py
@@ -0,0 +1,13 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Autogen with
+# with open("lvis_v0.5_val.json", "r") as f:
+# a = json.load(f)
+# c = a["categories"]
+# for x in c:
+# del x["image_count"]
+# del x["instance_count"]
+# LVIS_CATEGORIES = repr(c) + " # noqa"
+
+# fmt: off
+LVIS_CATEGORIES = [{'frequency': 'r', 'id': 1, 'synset': 'acorn.n.01', 'synonyms': ['acorn'], 'def': 'nut from an oak tree', 'name': 'acorn'}, {'frequency': 'c', 'id': 2, 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'id': 3, 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'id': 4, 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'c', 'id': 5, 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'id': 6, 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'r', 'id': 7, 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'id': 8, 'synset': 'almond.n.02', 'synonyms': ['almond'], 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'id': 9, 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'r', 'id': 10, 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'id': 11, 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'id': 12, 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'id': 13, 'synset': 'apple.n.01', 'synonyms': ['apple'], 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'id': 14, 'synset': 'apple_juice.n.01', 'synonyms': ['apple_juice'], 'def': 'the juice of apples', 'name': 'apple_juice'}, {'frequency': 'r', 'id': 15, 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'id': 16, 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'id': 17, 'synset': 'apron.n.01', 'synonyms': ['apron'], 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'id': 18, 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'c', 'id': 19, 'synset': 'armband.n.02', 'synonyms': ['armband'], 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'id': 20, 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'id': 21, 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'id': 22, 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'id': 23, 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'id': 24, 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'id': 25, 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'id': 26, 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'id': 27, 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'c', 'id': 28, 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'id': 29, 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'id': 30, 'synset': 'awning.n.01', 'synonyms': ['awning'], 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'id': 31, 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'f', 'id': 32, 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'id': 33, 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'id': 34, 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'id': 35, 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'id': 36, 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'id': 37, 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'id': 38, 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'id': 39, 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'id': 40, 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'id': 41, 'synset': 'ball.n.06', 'synonyms': ['ball'], 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'id': 42, 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'id': 43, 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'id': 44, 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'id': 45, 'synset': 'banana.n.02', 'synonyms': ['banana'], 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'r', 'id': 46, 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'id': 47, 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'c', 'id': 48, 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'id': 49, 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'id': 50, 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'id': 51, 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'id': 52, 'synset': 'barge.n.01', 'synonyms': ['barge'], 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'id': 53, 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'id': 54, 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'id': 55, 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'id': 56, 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'id': 57, 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'id': 58, 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'id': 59, 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'id': 60, 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'id': 61, 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'id': 62, 'synset': 'basket.n.03', 'synonyms': ['basketball_hoop'], 'def': 'metal hoop supporting a net through which players try to throw the basketball', 'name': 'basketball_hoop'}, {'frequency': 'c', 'id': 63, 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'id': 64, 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'r', 'id': 65, 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'id': 66, 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'id': 67, 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'id': 68, 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'id': 69, 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'id': 70, 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'id': 71, 'synset': 'battery.n.02', 'synonyms': ['battery'], 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'id': 72, 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'id': 73, 'synset': 'bead.n.01', 'synonyms': ['bead'], 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'r', 'id': 74, 'synset': 'beaker.n.01', 'synonyms': ['beaker'], 'def': 'a flatbottomed jar made of glass or plastic; used for chemistry', 'name': 'beaker'}, {'frequency': 'c', 'id': 75, 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'id': 76, 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'id': 77, 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'id': 78, 'synset': 'bear.n.01', 'synonyms': ['bear'], 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'id': 79, 'synset': 'bed.n.01', 'synonyms': ['bed'], 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'c', 'id': 80, 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'id': 81, 'synset': 'beef.n.01', 'synonyms': ['cow'], 'def': 'cattle that are reared for their meat', 'name': 'cow'}, {'frequency': 'c', 'id': 82, 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'id': 83, 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'id': 84, 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'id': 85, 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'id': 86, 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'id': 87, 'synset': 'bell.n.01', 'synonyms': ['bell'], 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'id': 88, 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'id': 89, 'synset': 'belt.n.02', 'synonyms': ['belt'], 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'id': 90, 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'id': 91, 'synset': 'bench.n.01', 'synonyms': ['bench'], 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'id': 92, 'synset': 'beret.n.01', 'synonyms': ['beret'], 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'id': 93, 'synset': 'bib.n.02', 'synonyms': ['bib'], 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'id': 94, 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'id': 95, 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'id': 96, 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'c', 'id': 97, 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'id': 98, 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'id': 99, 'synset': 'bird.n.01', 'synonyms': ['bird'], 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'r', 'id': 100, 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'r', 'id': 101, 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'id': 102, 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'id': 103, 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'id': 104, 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'id': 105, 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'id': 106, 'synset': 'biscuit.n.01', 'synonyms': ['biscuit_(bread)'], 'def': 'small round bread leavened with baking-powder or soda', 'name': 'biscuit_(bread)'}, {'frequency': 'r', 'id': 107, 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'id': 108, 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'id': 109, 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'id': 110, 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'id': 111, 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'id': 112, 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'id': 113, 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'c', 'id': 114, 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'c', 'id': 115, 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'id': 116, 'synset': 'boar.n.02', 'synonyms': ['boar'], 'def': 'an uncastrated male hog', 'name': 'boar'}, {'frequency': 'r', 'id': 117, 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'id': 118, 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'c', 'id': 119, 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'r', 'id': 120, 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'id': 121, 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'id': 122, 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'id': 123, 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'id': 124, 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'id': 125, 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'id': 126, 'synset': 'book.n.01', 'synonyms': ['book'], 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'r', 'id': 127, 'synset': 'book_bag.n.01', 'synonyms': ['book_bag'], 'def': 'a bag in which students carry their books', 'name': 'book_bag'}, {'frequency': 'c', 'id': 128, 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'id': 129, 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'id': 130, 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'id': 131, 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'id': 132, 'synset': 'boot.n.01', 'synonyms': ['boot'], 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'id': 133, 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'id': 134, 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'id': 135, 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'id': 136, 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'id': 137, 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'id': 138, 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'id': 139, 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'id': 140, 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'id': 141, 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'id': 142, 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'r', 'id': 143, 'synset': 'bowling_pin.n.01', 'synonyms': ['bowling_pin'], 'def': 'a club-shaped wooden object used in bowling', 'name': 'bowling_pin'}, {'frequency': 'r', 'id': 144, 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'id': 145, 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'id': 146, 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'id': 147, 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'id': 148, 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'id': 149, 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'r', 'id': 150, 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'c', 'id': 151, 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'id': 152, 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'c', 'id': 153, 'synset': 'bristle_brush.n.01', 'synonyms': ['bristle_brush'], 'def': 'a brush that is made with the short stiff hairs of an animal or plant', 'name': 'bristle_brush'}, {'frequency': 'f', 'id': 154, 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'id': 155, 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'id': 156, 'synset': 'broom.n.01', 'synonyms': ['broom'], 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'id': 157, 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'id': 158, 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'id': 159, 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'id': 160, 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'id': 161, 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'id': 162, 'synset': 'bull.n.11', 'synonyms': ['bull'], 'def': 'mature male cow', 'name': 'bull'}, {'frequency': 'r', 'id': 163, 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'id': 164, 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'id': 165, 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'id': 166, 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'id': 167, 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'id': 168, 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'r', 'id': 169, 'synset': 'bully_beef.n.01', 'synonyms': ['corned_beef', 'corn_beef'], 'def': 'beef cured or pickled in brine', 'name': 'corned_beef'}, {'frequency': 'f', 'id': 170, 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'id': 171, 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'id': 172, 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'id': 173, 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'id': 174, 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'id': 175, 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'c', 'id': 176, 'synset': 'butcher_knife.n.01', 'synonyms': ['butcher_knife'], 'def': 'a large sharp knife for cutting or trimming meat', 'name': 'butcher_knife'}, {'frequency': 'c', 'id': 177, 'synset': 'butter.n.01', 'synonyms': ['butter'], 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'id': 178, 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'id': 179, 'synset': 'button.n.01', 'synonyms': ['button'], 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'id': 180, 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'id': 181, 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'r', 'id': 182, 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'id': 183, 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'id': 184, 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'id': 185, 'synset': 'cake.n.03', 'synonyms': ['cake'], 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'id': 186, 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'id': 187, 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'id': 188, 'synset': 'calf.n.01', 'synonyms': ['calf'], 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'id': 189, 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'id': 190, 'synset': 'camel.n.01', 'synonyms': ['camel'], 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'id': 191, 'synset': 'camera.n.01', 'synonyms': ['camera'], 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'id': 192, 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'id': 193, 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'id': 194, 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'id': 195, 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'r', 'id': 196, 'synset': 'candelabrum.n.01', 'synonyms': ['candelabrum', 'candelabra'], 'def': 'branched candlestick; ornamental; has several lights', 'name': 'candelabrum'}, {'frequency': 'f', 'id': 197, 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'id': 198, 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'id': 199, 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'id': 200, 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'id': 201, 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'id': 202, 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'r', 'id': 203, 'synset': 'cannon.n.02', 'synonyms': ['cannon'], 'def': 'heavy gun fired from a tank', 'name': 'cannon'}, {'frequency': 'c', 'id': 204, 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'r', 'id': 205, 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'id': 206, 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'c', 'id': 207, 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'id': 208, 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'r', 'id': 209, 'synset': 'cape.n.02', 'synonyms': ['cape'], 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'id': 210, 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'id': 211, 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'id': 212, 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'def': 'a wheeled vehicle adapted to the rails of railroad', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'id': 213, 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'id': 214, 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'id': 215, 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'id': 216, 'synset': 'card.n.03', 'synonyms': ['card'], 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'r', 'id': 217, 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'id': 218, 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'id': 219, 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'id': 220, 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'id': 221, 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'c', 'id': 222, 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'id': 223, 'synset': 'cart.n.01', 'synonyms': ['cart'], 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'id': 224, 'synset': 'carton.n.02', 'synonyms': ['carton'], 'def': 'a box made of cardboard; opens by flaps on top', 'name': 'carton'}, {'frequency': 'c', 'id': 225, 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'id': 226, 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'id': 227, 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'id': 228, 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'id': 229, 'synset': 'cat.n.01', 'synonyms': ['cat'], 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'c', 'id': 230, 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'r', 'id': 231, 'synset': 'caviar.n.01', 'synonyms': ['caviar', 'caviare'], 'def': "salted roe of sturgeon or other large fish; usually served as an hors d'oeuvre", 'name': 'caviar'}, {'frequency': 'c', 'id': 232, 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'id': 233, 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'c', 'id': 234, 'synset': 'celery.n.01', 'synonyms': ['celery'], 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'id': 235, 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'id': 236, 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'id': 237, 'synset': 'chair.n.01', 'synonyms': ['chair'], 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'id': 238, 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'id': 239, 'synset': 'champagne.n.01', 'synonyms': ['champagne'], 'def': 'a white sparkling wine produced in Champagne or resembling that produced there', 'name': 'champagne'}, {'frequency': 'f', 'id': 240, 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'id': 241, 'synset': 'chap.n.04', 'synonyms': ['chap'], 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'id': 242, 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'id': 243, 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'id': 244, 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'id': 245, 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'r', 'id': 246, 'synset': 'chest_of_drawers.n.01', 'synonyms': ['chest_of_drawers_(furniture)', 'bureau_(furniture)', 'chest_(furniture)'], 'def': 'furniture with drawers for keeping clothes', 'name': 'chest_of_drawers_(furniture)'}, {'frequency': 'c', 'id': 247, 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'id': 248, 'synset': 'chicken_wire.n.01', 'synonyms': ['chicken_wire'], 'def': 'a galvanized wire network with a hexagonal mesh; used to build fences', 'name': 'chicken_wire'}, {'frequency': 'r', 'id': 249, 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'r', 'id': 250, 'synset': 'chihuahua.n.03', 'synonyms': ['Chihuahua'], 'def': 'an old breed of tiny short-haired dog with protruding eyes from Mexico', 'name': 'Chihuahua'}, {'frequency': 'r', 'id': 251, 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'id': 252, 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'id': 253, 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'id': 254, 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'id': 255, 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'id': 256, 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'id': 257, 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'id': 258, 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'id': 259, 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'id': 260, 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'def': 'necklace that fits tightly around the neck', 'name': 'choker'}, {'frequency': 'f', 'id': 261, 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'c', 'id': 262, 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'id': 263, 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'id': 264, 'synset': 'chute.n.02', 'synonyms': ['slide'], 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'id': 265, 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'id': 266, 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'c', 'id': 267, 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'id': 268, 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'id': 269, 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'id': 270, 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'r', 'id': 271, 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'id': 272, 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'id': 273, 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'id': 274, 'synset': 'clip.n.03', 'synonyms': ['clip'], 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'id': 275, 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'f', 'id': 276, 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'id': 277, 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'id': 278, 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'id': 279, 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'id': 280, 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'id': 281, 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'id': 282, 'synset': 'coat.n.01', 'synonyms': ['coat'], 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'id': 283, 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'r', 'id': 284, 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'id': 285, 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'c', 'id': 286, 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'r', 'id': 287, 'synset': 'coffee_filter.n.01', 'synonyms': ['coffee_filter'], 'def': 'filter (usually of paper) that passes the coffee and retains the coffee grounds', 'name': 'coffee_filter'}, {'frequency': 'f', 'id': 288, 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'id': 289, 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'id': 290, 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'id': 291, 'synset': 'coil.n.05', 'synonyms': ['coil'], 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'id': 292, 'synset': 'coin.n.01', 'synonyms': ['coin'], 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'r', 'id': 293, 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'id': 294, 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'id': 295, 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'id': 296, 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'id': 297, 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'id': 298, 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'f', 'id': 299, 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'r', 'id': 300, 'synset': 'concrete_mixer.n.01', 'synonyms': ['concrete_mixer', 'cement_mixer'], 'def': 'a machine with a large revolving drum in which cement/concrete is mixed', 'name': 'concrete_mixer'}, {'frequency': 'f', 'id': 301, 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'id': 302, 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'id': 303, 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'id': 304, 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'c', 'id': 305, 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'id': 306, 'synset': 'cookie_jar.n.01', 'synonyms': ['cookie_jar', 'cooky_jar'], 'def': 'a jar in which cookies are kept (and sometimes money is hidden)', 'name': 'cookie_jar'}, {'frequency': 'r', 'id': 307, 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'id': 308, 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'c', 'id': 309, 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'id': 310, 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'r', 'id': 311, 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'c', 'id': 312, 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'def': 'ears of corn that can be prepared and served for human food', 'name': 'edible_corn'}, {'frequency': 'r', 'id': 313, 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'id': 314, 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'id': 315, 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'id': 316, 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'r', 'id': 317, 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'r', 'id': 318, 'synset': 'cos.n.02', 'synonyms': ['romaine_lettuce'], 'def': 'lettuce with long dark-green leaves in a loosely packed elongated head', 'name': 'romaine_lettuce'}, {'frequency': 'c', 'id': 319, 'synset': 'costume.n.04', 'synonyms': ['costume'], 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'id': 320, 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'id': 321, 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'r', 'id': 322, 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'id': 323, 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'r', 'id': 324, 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'c', 'id': 325, 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'id': 326, 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'id': 327, 'synset': 'crate.n.01', 'synonyms': ['crate'], 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'r', 'id': 328, 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'id': 329, 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'r', 'id': 330, 'synset': 'credit_card.n.01', 'synonyms': ['credit_card', 'charge_card', 'debit_card'], 'def': 'a card, usually plastic, used to pay for goods and services', 'name': 'credit_card'}, {'frequency': 'c', 'id': 331, 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'id': 332, 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'id': 333, 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'def': 'an earthen jar (made of baked clay)', 'name': 'crock_pot'}, {'frequency': 'f', 'id': 334, 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'id': 335, 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'r', 'id': 336, 'synset': 'crow.n.01', 'synonyms': ['crow'], 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'c', 'id': 337, 'synset': 'crown.n.04', 'synonyms': ['crown'], 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'id': 338, 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'id': 339, 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'id': 340, 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'c', 'id': 341, 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'r', 'id': 342, 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'id': 343, 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'r', 'id': 344, 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'id': 345, 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'id': 346, 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'id': 347, 'synset': 'cup.n.01', 'synonyms': ['cup'], 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'id': 348, 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'def': 'a metal vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'c', 'id': 349, 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'id': 350, 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'id': 351, 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'id': 352, 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'id': 353, 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'id': 354, 'synset': 'custard.n.01', 'synonyms': ['custard'], 'def': 'sweetened mixture of milk and eggs baked or boiled or frozen', 'name': 'custard'}, {'frequency': 'c', 'id': 355, 'synset': 'cutter.n.06', 'synonyms': ['cutting_tool'], 'def': 'a cutting implement; a tool for cutting', 'name': 'cutting_tool'}, {'frequency': 'r', 'id': 356, 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'id': 357, 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'id': 358, 'synset': 'dachshund.n.01', 'synonyms': ['dachshund', 'dachsie', 'badger_dog'], 'def': 'small long-bodied short-legged breed of dog having a short sleek coat and long drooping ears', 'name': 'dachshund'}, {'frequency': 'r', 'id': 359, 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'id': 360, 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'id': 361, 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'id': 362, 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'id': 363, 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'id': 364, 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'id': 365, 'synset': 'desk.n.01', 'synonyms': ['desk'], 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'id': 366, 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'id': 367, 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'id': 368, 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'def': 'a daily written record of (usually personal) experiences and observations', 'name': 'diary'}, {'frequency': 'r', 'id': 369, 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'id': 370, 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'id': 371, 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'id': 372, 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'c', 'id': 373, 'synset': 'dish.n.01', 'synonyms': ['dish'], 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'id': 374, 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'id': 375, 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'def': 'a cloth for washing dishes', 'name': 'dishrag'}, {'frequency': 'c', 'id': 376, 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'id': 377, 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'id': 378, 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid'], 'def': 'a low-sudsing detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'r', 'id': 379, 'synset': 'diskette.n.01', 'synonyms': ['diskette', 'floppy', 'floppy_disk'], 'def': 'a small plastic magnetic disk enclosed in a stiff envelope used to store data', 'name': 'diskette'}, {'frequency': 'c', 'id': 380, 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'c', 'id': 381, 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'id': 382, 'synset': 'dog.n.01', 'synonyms': ['dog'], 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'id': 383, 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'c', 'id': 384, 'synset': 'doll.n.01', 'synonyms': ['doll'], 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'id': 385, 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'id': 386, 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'id': 387, 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'r', 'id': 388, 'synset': 'domino.n.03', 'synonyms': ['eye_mask'], 'def': 'a mask covering the upper part of the face but with holes for the eyes', 'name': 'eye_mask'}, {'frequency': 'r', 'id': 389, 'synset': 'doorbell.n.01', 'synonyms': ['doorbell', 'buzzer'], 'def': 'a button at an outer door that gives a ringing or buzzing signal when pushed', 'name': 'doorbell'}, {'frequency': 'f', 'id': 390, 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'id': 391, 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'id': 392, 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'id': 393, 'synset': 'dove.n.01', 'synonyms': ['dove'], 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'id': 394, 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'id': 395, 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'id': 396, 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'id': 397, 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'id': 398, 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'c', 'id': 399, 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'c', 'id': 400, 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'id': 401, 'synset': 'drill.n.01', 'synonyms': ['drill'], 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'id': 402, 'synset': 'drinking_fountain.n.01', 'synonyms': ['drinking_fountain'], 'def': 'a public fountain to provide a jet of drinking water', 'name': 'drinking_fountain'}, {'frequency': 'r', 'id': 403, 'synset': 'drone.n.04', 'synonyms': ['drone'], 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'id': 404, 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'id': 405, 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'id': 406, 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'id': 407, 'synset': 'duck.n.01', 'synonyms': ['duck'], 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'r', 'id': 408, 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'id': 409, 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'id': 410, 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'def': 'a large cylindrical bag of heavy cloth', 'name': 'duffel_bag'}, {'frequency': 'r', 'id': 411, 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'id': 412, 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'id': 413, 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'r', 'id': 414, 'synset': 'dutch_oven.n.02', 'synonyms': ['Dutch_oven'], 'def': 'iron or earthenware cooking pot; used for stews', 'name': 'Dutch_oven'}, {'frequency': 'c', 'id': 415, 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'id': 416, 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'id': 417, 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'id': 418, 'synset': 'earring.n.01', 'synonyms': ['earring'], 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'id': 419, 'synset': 'easel.n.01', 'synonyms': ['easel'], 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'id': 420, 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'id': 421, 'synset': 'eel.n.01', 'synonyms': ['eel'], 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'id': 422, 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'id': 423, 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'id': 424, 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'id': 425, 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'id': 426, 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'id': 427, 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'id': 428, 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'id': 429, 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'r', 'id': 430, 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'id': 431, 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'id': 432, 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'id': 433, 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'id': 434, 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'id': 435, 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'id': 436, 'synset': 'fan.n.01', 'synonyms': ['fan'], 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'id': 437, 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'id': 438, 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'id': 439, 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'id': 440, 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'r', 'id': 441, 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'id': 442, 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'id': 443, 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'id': 444, 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'id': 445, 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'id': 446, 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'id': 447, 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'c', 'id': 448, 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'c', 'id': 449, 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'id': 450, 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'id': 451, 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'id': 452, 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'c', 'id': 453, 'synset': 'fish.n.01', 'synonyms': ['fish'], 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'r', 'id': 454, 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'id': 455, 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'r', 'id': 456, 'synset': 'fishing_boat.n.01', 'synonyms': ['fishing_boat', 'fishing_vessel'], 'def': 'a vessel for fishing', 'name': 'fishing_boat'}, {'frequency': 'c', 'id': 457, 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'id': 458, 'synset': 'flag.n.01', 'synonyms': ['flag'], 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'id': 459, 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'id': 460, 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'id': 461, 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'r', 'id': 462, 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'id': 463, 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'id': 464, 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'id': 465, 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'id': 466, 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'id': 467, 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'id': 468, 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'r', 'id': 469, 'synset': 'foal.n.01', 'synonyms': ['foal'], 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'id': 470, 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'id': 471, 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'id': 472, 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'id': 473, 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'id': 474, 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'id': 475, 'synset': 'fork.n.01', 'synonyms': ['fork'], 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'r', 'id': 476, 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'r', 'id': 477, 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'r', 'id': 478, 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'id': 479, 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'def': 'anything that freshens', 'name': 'freshener'}, {'frequency': 'f', 'id': 480, 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'id': 481, 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'id': 482, 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'r', 'id': 483, 'synset': 'fruit_salad.n.01', 'synonyms': ['fruit_salad'], 'def': 'salad composed of fruits', 'name': 'fruit_salad'}, {'frequency': 'c', 'id': 484, 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'id': 485, 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'id': 486, 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'c', 'id': 487, 'synset': 'futon.n.01', 'synonyms': ['futon'], 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'id': 488, 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'id': 489, 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'id': 490, 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'id': 491, 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'id': 492, 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'id': 493, 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'id': 494, 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'id': 495, 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'r', 'id': 496, 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'id': 497, 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'id': 498, 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'c', 'id': 499, 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'id': 500, 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'id': 501, 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'id': 502, 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'id': 503, 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'id': 504, 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'id': 505, 'synset': 'globe.n.03', 'synonyms': ['globe'], 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'id': 506, 'synset': 'glove.n.02', 'synonyms': ['glove'], 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'id': 507, 'synset': 'goat.n.01', 'synonyms': ['goat'], 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'id': 508, 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'id': 509, 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'r', 'id': 510, 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'id': 511, 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'id': 512, 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'id': 513, 'synset': 'goose.n.01', 'synonyms': ['goose'], 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'id': 514, 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'id': 515, 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'r', 'id': 516, 'synset': 'gown.n.04', 'synonyms': ['surgical_gown', 'scrubs_(surgical_clothing)'], 'def': 'protective garment worn by surgeons during operations', 'name': 'surgical_gown'}, {'frequency': 'f', 'id': 517, 'synset': 'grape.n.01', 'synonyms': ['grape'], 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'r', 'id': 518, 'synset': 'grasshopper.n.01', 'synonyms': ['grasshopper'], 'def': 'plant-eating insect with hind legs adapted for leaping', 'name': 'grasshopper'}, {'frequency': 'c', 'id': 519, 'synset': 'grater.n.01', 'synonyms': ['grater'], 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'id': 520, 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'id': 521, 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'c', 'id': 522, 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'c', 'id': 523, 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'id': 524, 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'r', 'id': 525, 'synset': 'grillroom.n.01', 'synonyms': ['grillroom', 'grill_(restaurant)'], 'def': 'a restaurant where food is cooked on a grill', 'name': 'grillroom'}, {'frequency': 'r', 'id': 526, 'synset': 'grinder.n.04', 'synonyms': ['grinder_(tool)'], 'def': 'a machine tool that polishes metal', 'name': 'grinder_(tool)'}, {'frequency': 'r', 'id': 527, 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'id': 528, 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'id': 529, 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'r', 'id': 530, 'synset': 'guacamole.n.01', 'synonyms': ['guacamole'], 'def': 'a dip made of mashed avocado mixed with chopped onions and other seasonings', 'name': 'guacamole'}, {'frequency': 'f', 'id': 531, 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'id': 532, 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'id': 533, 'synset': 'gun.n.01', 'synonyms': ['gun'], 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'r', 'id': 534, 'synset': 'hair_spray.n.01', 'synonyms': ['hair_spray'], 'def': 'substance sprayed on the hair to hold it in place', 'name': 'hair_spray'}, {'frequency': 'c', 'id': 535, 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'id': 536, 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'id': 537, 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'f', 'id': 538, 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'id': 539, 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'id': 540, 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'r', 'id': 541, 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'id': 542, 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'r', 'id': 543, 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'c', 'id': 544, 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'id': 545, 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'id': 546, 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'id': 547, 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'id': 548, 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'id': 549, 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'id': 550, 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'id': 551, 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'id': 552, 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'id': 553, 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'id': 554, 'synset': 'hat.n.01', 'synonyms': ['hat'], 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'id': 555, 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'r', 'id': 556, 'synset': 'hatch.n.03', 'synonyms': ['hatch'], 'def': 'a movable barrier covering a hatchway', 'name': 'hatch'}, {'frequency': 'c', 'id': 557, 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'def': 'a garment that covers the head and face', 'name': 'veil'}, {'frequency': 'f', 'id': 558, 'synset': 'headband.n.01', 'synonyms': ['headband'], 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'id': 559, 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'id': 560, 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'id': 561, 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'id': 562, 'synset': 'headset.n.01', 'synonyms': ['headset'], 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'id': 563, 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'r', 'id': 564, 'synset': 'hearing_aid.n.02', 'synonyms': ['hearing_aid'], 'def': 'an acoustic device used to direct sound to the ear of a hearing-impaired person', 'name': 'hearing_aid'}, {'frequency': 'c', 'id': 565, 'synset': 'heart.n.02', 'synonyms': ['heart'], 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'id': 566, 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'id': 567, 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'id': 568, 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'id': 569, 'synset': 'heron.n.02', 'synonyms': ['heron'], 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'id': 570, 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'id': 571, 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'id': 572, 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'id': 573, 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'id': 574, 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'id': 575, 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'id': 576, 'synset': 'honey.n.01', 'synonyms': ['honey'], 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'id': 577, 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'id': 578, 'synset': 'hook.n.05', 'synonyms': ['hook'], 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'f', 'id': 579, 'synset': 'horse.n.01', 'synonyms': ['horse'], 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'id': 580, 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'id': 581, 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'id': 582, 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'id': 583, 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'id': 584, 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'id': 585, 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'r', 'id': 586, 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'id': 587, 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'c', 'id': 588, 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'id': 589, 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'id': 590, 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'id': 591, 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'id': 592, 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'id': 593, 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'r', 'id': 594, 'synset': 'ice_tea.n.01', 'synonyms': ['ice_tea', 'iced_tea'], 'def': 'strong tea served over ice', 'name': 'ice_tea'}, {'frequency': 'c', 'id': 595, 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'id': 596, 'synset': 'incense.n.01', 'synonyms': ['incense'], 'def': 'a substance that produces a fragrant odor when burned', 'name': 'incense'}, {'frequency': 'r', 'id': 597, 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'c', 'id': 598, 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'id': 599, 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'r', 'id': 600, 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'id': 601, 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'r', 'id': 602, 'synset': 'jam.n.01', 'synonyms': ['jam'], 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'id': 603, 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'id': 604, 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'id': 605, 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'id': 606, 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'id': 607, 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'c', 'id': 608, 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'id': 609, 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'r', 'id': 610, 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'id': 611, 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'id': 612, 'synset': 'keg.n.02', 'synonyms': ['keg'], 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'id': 613, 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'id': 614, 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'id': 615, 'synset': 'key.n.01', 'synonyms': ['key'], 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'id': 616, 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'r', 'id': 617, 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'id': 618, 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'id': 619, 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'c', 'id': 620, 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'id': 621, 'synset': 'kite.n.03', 'synonyms': ['kite'], 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'id': 622, 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'id': 623, 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'id': 624, 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'id': 625, 'synset': 'knife.n.01', 'synonyms': ['knife'], 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'id': 626, 'synset': 'knight.n.02', 'synonyms': ['knight_(chess_piece)', 'horse_(chess_piece)'], 'def': 'a chess game piece shaped to resemble the head of a horse', 'name': 'knight_(chess_piece)'}, {'frequency': 'r', 'id': 627, 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'id': 628, 'synset': 'knob.n.02', 'synonyms': ['knob'], 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'id': 629, 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'id': 630, 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'id': 631, 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'id': 632, 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'id': 633, 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'r', 'id': 634, 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'c', 'id': 635, 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'id': 636, 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'id': 637, 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'id': 638, 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'id': 639, 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'id': 640, 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'id': 641, 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'id': 642, 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'id': 643, 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'c', 'id': 644, 'synset': 'latch.n.02', 'synonyms': ['latch'], 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'id': 645, 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'id': 646, 'synset': 'leather.n.01', 'synonyms': ['leather'], 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'id': 647, 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'id': 648, 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'f', 'id': 649, 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'id': 650, 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'id': 651, 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'id': 652, 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'id': 653, 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'id': 654, 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'id': 655, 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'def': 'glass bulb or tube shaped electric device that emits light (DO NOT MARK LAMPS AS A WHOLE)', 'name': 'lightbulb'}, {'frequency': 'r', 'id': 656, 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'c', 'id': 657, 'synset': 'lime.n.06', 'synonyms': ['lime'], 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'id': 658, 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'r', 'id': 659, 'synset': 'linen.n.02', 'synonyms': ['linen_paper'], 'def': 'a high-quality paper made of linen fibers or with a linen finish', 'name': 'linen_paper'}, {'frequency': 'c', 'id': 660, 'synset': 'lion.n.01', 'synonyms': ['lion'], 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'id': 661, 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'c', 'id': 662, 'synset': 'lipstick.n.01', 'synonyms': ['lipstick', 'lip_rouge'], 'def': 'makeup that is used to color the lips', 'name': 'lipstick'}, {'frequency': 'r', 'id': 663, 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'def': 'an alcoholic beverage that is distilled rather than fermented', 'name': 'liquor'}, {'frequency': 'r', 'id': 664, 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'r', 'id': 665, 'synset': 'loafer.n.02', 'synonyms': ['Loafer_(type_of_shoe)'], 'def': 'a low leather step-in shoe', 'name': 'Loafer_(type_of_shoe)'}, {'frequency': 'f', 'id': 666, 'synset': 'log.n.01', 'synonyms': ['log'], 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'id': 667, 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'c', 'id': 668, 'synset': 'lotion.n.01', 'synonyms': ['lotion'], 'def': 'any of various cosmetic preparations that are applied to the skin', 'name': 'lotion'}, {'frequency': 'f', 'id': 669, 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'id': 670, 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'id': 671, 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'id': 672, 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'id': 673, 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'r', 'id': 674, 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'c', 'id': 675, 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'id': 676, 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'id': 677, 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'c', 'id': 678, 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'id': 679, 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'id': 680, 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'c', 'id': 681, 'synset': 'map.n.01', 'synonyms': ['map'], 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'c', 'id': 682, 'synset': 'marker.n.03', 'synonyms': ['marker'], 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'id': 683, 'synset': 'martini.n.01', 'synonyms': ['martini'], 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'id': 684, 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'id': 685, 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'id': 686, 'synset': 'masher.n.02', 'synonyms': ['masher'], 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'id': 687, 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'id': 688, 'synset': 'mast.n.01', 'synonyms': ['mast'], 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'id': 689, 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'id': 690, 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'id': 691, 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'id': 692, 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'id': 693, 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'id': 694, 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'id': 695, 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'r', 'id': 696, 'synset': 'melon.n.01', 'synonyms': ['melon'], 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'id': 697, 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'id': 698, 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'id': 699, 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'id': 700, 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'c', 'id': 701, 'synset': 'milk.n.01', 'synonyms': ['milk'], 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'f', 'id': 702, 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'id': 703, 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'id': 704, 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'id': 705, 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'id': 706, 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'id': 707, 'synset': 'money.n.03', 'synonyms': ['money'], 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'id': 708, 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'id': 709, 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'id': 710, 'synset': 'motor.n.01', 'synonyms': ['motor'], 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'id': 711, 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'id': 712, 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'r', 'id': 713, 'synset': 'motorboat.n.01', 'synonyms': ['motorboat', 'powerboat'], 'def': 'a boat propelled by an internal-combustion engine', 'name': 'motorboat'}, {'frequency': 'f', 'id': 714, 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'id': 715, 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'r', 'id': 716, 'synset': 'mouse.n.01', 'synonyms': ['mouse_(animal_rodent)'], 'def': 'a small rodent with pointed snouts and small ears on elongated bodies with slender usually hairless tails', 'name': 'mouse_(animal_rodent)'}, {'frequency': 'f', 'id': 717, 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'def': 'a computer input device that controls an on-screen pointer', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'id': 718, 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'id': 719, 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'id': 720, 'synset': 'mug.n.04', 'synonyms': ['mug'], 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'id': 721, 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'id': 722, 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'r', 'id': 723, 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'id': 724, 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'r', 'id': 725, 'synset': 'nameplate.n.01', 'synonyms': ['nameplate'], 'def': 'a plate bearing a name', 'name': 'nameplate'}, {'frequency': 'f', 'id': 726, 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'id': 727, 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'id': 728, 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'id': 729, 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'r', 'id': 730, 'synset': 'needle.n.03', 'synonyms': ['needle'], 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'id': 731, 'synset': 'nest.n.01', 'synonyms': ['nest'], 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'r', 'id': 732, 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'id': 733, 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'id': 734, 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'r', 'id': 735, 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'id': 736, 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'id': 737, 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'c', 'id': 738, 'synset': 'nut.n.03', 'synonyms': ['nut'], 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'id': 739, 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'c', 'id': 740, 'synset': 'oar.n.01', 'synonyms': ['oar'], 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'id': 741, 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'id': 742, 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'id': 743, 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'id': 744, 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'id': 745, 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'id': 746, 'synset': 'onion.n.01', 'synonyms': ['onion'], 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'id': 747, 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'id': 748, 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'r', 'id': 749, 'synset': 'oregano.n.01', 'synonyms': ['oregano', 'marjoram'], 'def': 'aromatic Eurasian perennial herb used in cooking and baking', 'name': 'oregano'}, {'frequency': 'c', 'id': 750, 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'c', 'id': 751, 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'def': 'thick cushion used as a seat', 'name': 'ottoman'}, {'frequency': 'c', 'id': 752, 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'id': 753, 'synset': 'owl.n.01', 'synonyms': ['owl'], 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'id': 754, 'synset': 'packet.n.03', 'synonyms': ['packet'], 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'id': 755, 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'id': 756, 'synset': 'pad.n.04', 'synonyms': ['pad'], 'def': 'a flat mass of soft material used for protection, stuffing, or comfort', 'name': 'pad'}, {'frequency': 'c', 'id': 757, 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'id': 758, 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'r', 'id': 759, 'synset': 'paintbox.n.01', 'synonyms': ['paintbox'], 'def': "a box containing a collection of cubes or tubes of artists' paint", 'name': 'paintbox'}, {'frequency': 'c', 'id': 760, 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'id': 761, 'synset': 'painting.n.01', 'synonyms': ['painting'], 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'c', 'id': 762, 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'id': 763, 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'id': 764, 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'id': 765, 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'id': 766, 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'id': 767, 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'id': 768, 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'r', 'id': 769, 'synset': 'paper_clip.n.01', 'synonyms': ['paperclip'], 'def': 'a wire or plastic clip for holding sheets of paper together', 'name': 'paperclip'}, {'frequency': 'f', 'id': 770, 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'id': 771, 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'id': 772, 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'id': 773, 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'id': 774, 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'r', 'id': 775, 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'id': 776, 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'r', 'id': 777, 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'r', 'id': 778, 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'id': 779, 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'id': 780, 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'id': 781, 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'id': 782, 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'r', 'id': 783, 'synset': 'passport.n.02', 'synonyms': ['passport'], 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'id': 784, 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'id': 785, 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'id': 786, 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'id': 787, 'synset': 'peach.n.03', 'synonyms': ['peach'], 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'id': 788, 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'c', 'id': 789, 'synset': 'pear.n.01', 'synonyms': ['pear'], 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'r', 'id': 790, 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'id': 791, 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'id': 792, 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'id': 793, 'synset': 'pen.n.01', 'synonyms': ['pen'], 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'c', 'id': 794, 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'id': 795, 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'id': 796, 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'id': 797, 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'id': 798, 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'id': 799, 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'id': 800, 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'c', 'id': 801, 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'id': 802, 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'id': 803, 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'id': 804, 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'id': 805, 'synset': 'person.n.01', 'synonyms': ['baby', 'child', 'boy', 'girl', 'man', 'woman', 'person', 'human'], 'def': 'a human being', 'name': 'baby'}, {'frequency': 'r', 'id': 806, 'synset': 'pet.n.01', 'synonyms': ['pet'], 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'r', 'id': 807, 'synset': 'petfood.n.01', 'synonyms': ['petfood', 'pet-food'], 'def': 'food prepared for animal pets', 'name': 'petfood'}, {'frequency': 'r', 'id': 808, 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'id': 809, 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'id': 810, 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'c', 'id': 811, 'synset': 'piano.n.01', 'synonyms': ['piano'], 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'id': 812, 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'id': 813, 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'id': 814, 'synset': 'pie.n.01', 'synonyms': ['pie'], 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'id': 815, 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'id': 816, 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'id': 817, 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'id': 818, 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'id': 819, 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'id': 820, 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'id': 821, 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'id': 822, 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'id': 823, 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'id': 824, 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'id': 825, 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'r', 'id': 826, 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'id': 827, 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'id': 828, 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'id': 829, 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'id': 830, 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'id': 831, 'synset': 'plate.n.04', 'synonyms': ['plate'], 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'id': 832, 'synset': 'platter.n.01', 'synonyms': ['platter'], 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'id': 833, 'synset': 'playing_card.n.01', 'synonyms': ['playing_card'], 'def': 'one of a pack of cards that are used to play card games', 'name': 'playing_card'}, {'frequency': 'r', 'id': 834, 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'id': 835, 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'id': 836, 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'id': 837, 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'id': 838, 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'id': 839, 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'id': 840, 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'r', 'id': 841, 'synset': 'police_van.n.01', 'synonyms': ['police_van', 'police_wagon', 'paddy_wagon', 'patrol_wagon'], 'def': 'van used by police to transport prisoners', 'name': 'police_van'}, {'frequency': 'f', 'id': 842, 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'id': 843, 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'id': 844, 'synset': 'pony.n.05', 'synonyms': ['pony'], 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'id': 845, 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'id': 846, 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'r', 'id': 847, 'synset': 'portrait.n.02', 'synonyms': ['portrait', 'portrayal'], 'def': 'any likeness of a person, in any medium', 'name': 'portrait'}, {'frequency': 'c', 'id': 848, 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'id': 849, 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'id': 850, 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'id': 851, 'synset': 'pot.n.01', 'synonyms': ['pot'], 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'id': 852, 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'id': 853, 'synset': 'potato.n.01', 'synonyms': ['potato'], 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'id': 854, 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'id': 855, 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'id': 856, 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'r', 'id': 857, 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'id': 858, 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'f', 'id': 859, 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'id': 860, 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'id': 861, 'synset': 'projector.n.02', 'synonyms': ['projector'], 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'id': 862, 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'id': 863, 'synset': 'prune.n.01', 'synonyms': ['prune'], 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'id': 864, 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'id': 865, 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'id': 866, 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'id': 867, 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'id': 868, 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'id': 869, 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'id': 870, 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'r', 'id': 871, 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'id': 872, 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'id': 873, 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'id': 874, 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'id': 875, 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'id': 876, 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'id': 877, 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'id': 878, 'synset': 'radar.n.01', 'synonyms': ['radar'], 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'c', 'id': 879, 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'id': 880, 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'id': 881, 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'id': 882, 'synset': 'raft.n.01', 'synonyms': ['raft'], 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'id': 883, 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'id': 884, 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'id': 885, 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'id': 886, 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'id': 887, 'synset': 'rat.n.01', 'synonyms': ['rat'], 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'id': 888, 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'id': 889, 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'id': 890, 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'def': 'car mirror that reflects the view out of the rear window', 'name': 'rearview_mirror'}, {'frequency': 'c', 'id': 891, 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'id': 892, 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'r', 'id': 893, 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'r', 'id': 894, 'synset': 'red_cabbage.n.02', 'synonyms': ['red_cabbage'], 'def': 'compact head of purplish-red leaves', 'name': 'red_cabbage'}, {'frequency': 'f', 'id': 895, 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'id': 896, 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'id': 897, 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'id': 898, 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'r', 'id': 899, 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'id': 900, 'synset': 'ring.n.08', 'synonyms': ['ring'], 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'id': 901, 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'id': 902, 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'id': 903, 'synset': 'robe.n.01', 'synonyms': ['robe'], 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'id': 904, 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'id': 905, 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'id': 906, 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'id': 907, 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'id': 908, 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'id': 909, 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'id': 910, 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'id': 911, 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'id': 912, 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'id': 913, 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'id': 914, 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'id': 915, 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'id': 916, 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'c', 'id': 917, 'synset': 'sail.n.01', 'synonyms': ['sail'], 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'c', 'id': 918, 'synset': 'salad.n.01', 'synonyms': ['salad'], 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'id': 919, 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'r', 'id': 920, 'synset': 'salami.n.01', 'synonyms': ['salami'], 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'r', 'id': 921, 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'id': 922, 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'r', 'id': 923, 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'id': 924, 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'id': 925, 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'id': 926, 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'id': 927, 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'id': 928, 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'id': 929, 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'id': 930, 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'id': 931, 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'id': 932, 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'id': 933, 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'id': 934, 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'id': 935, 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'id': 936, 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'id': 937, 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'c', 'id': 938, 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'c', 'id': 939, 'synset': 'scrambled_eggs.n.01', 'synonyms': ['scrambled_eggs'], 'def': 'eggs beaten and cooked to a soft firm consistency while stirring', 'name': 'scrambled_eggs'}, {'frequency': 'r', 'id': 940, 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'r', 'id': 941, 'synset': 'scratcher.n.03', 'synonyms': ['scratcher'], 'def': 'a device used for scratching', 'name': 'scratcher'}, {'frequency': 'c', 'id': 942, 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'c', 'id': 943, 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'id': 944, 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'r', 'id': 945, 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'r', 'id': 946, 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'id': 947, 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'id': 948, 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'r', 'id': 949, 'synset': 'seedling.n.01', 'synonyms': ['seedling'], 'def': 'young plant or tree grown from a seed', 'name': 'seedling'}, {'frequency': 'c', 'id': 950, 'synset': 'serving_dish.n.01', 'synonyms': ['serving_dish'], 'def': 'a dish used for serving food', 'name': 'serving_dish'}, {'frequency': 'r', 'id': 951, 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'r', 'id': 952, 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'id': 953, 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'r', 'id': 954, 'synset': 'shark.n.01', 'synonyms': ['shark'], 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'id': 955, 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'id': 956, 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'id': 957, 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'id': 958, 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'id': 959, 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'id': 960, 'synset': 'shears.n.01', 'synonyms': ['shears'], 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'id': 961, 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'id': 962, 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'id': 963, 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'r', 'id': 964, 'synset': 'shield.n.02', 'synonyms': ['shield'], 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'id': 965, 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'id': 966, 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'c', 'id': 967, 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'id': 968, 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'id': 969, 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'id': 970, 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'c', 'id': 971, 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'id': 972, 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'id': 973, 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'f', 'id': 974, 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'id': 975, 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'r', 'id': 976, 'synset': 'sieve.n.01', 'synonyms': ['sieve', 'screen_(sieve)'], 'def': 'a strainer for separating lumps from powdered material or grading particles', 'name': 'sieve'}, {'frequency': 'f', 'id': 977, 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'id': 978, 'synset': 'silo.n.01', 'synonyms': ['silo'], 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'id': 979, 'synset': 'sink.n.01', 'synonyms': ['sink'], 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'id': 980, 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'id': 981, 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'id': 982, 'synset': 'ski.n.01', 'synonyms': ['ski'], 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'id': 983, 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'id': 984, 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'id': 985, 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'id': 986, 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'c', 'id': 987, 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'id': 988, 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'id': 989, 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'id': 990, 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'id': 991, 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'id': 992, 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'id': 993, 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'id': 994, 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'id': 995, 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'id': 996, 'synset': 'soap.n.01', 'synonyms': ['soap'], 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'id': 997, 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'id': 998, 'synset': 'sock.n.01', 'synonyms': ['sock'], 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'r', 'id': 999, 'synset': 'soda_fountain.n.02', 'synonyms': ['soda_fountain'], 'def': 'an apparatus for dispensing soda water', 'name': 'soda_fountain'}, {'frequency': 'r', 'id': 1000, 'synset': 'soda_water.n.01', 'synonyms': ['carbonated_water', 'club_soda', 'seltzer', 'sparkling_water'], 'def': 'effervescent beverage artificially charged with carbon dioxide', 'name': 'carbonated_water'}, {'frequency': 'f', 'id': 1001, 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'id': 1002, 'synset': 'softball.n.01', 'synonyms': ['softball'], 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'id': 1003, 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'id': 1004, 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'c', 'id': 1005, 'synset': 'soup.n.01', 'synonyms': ['soup'], 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'id': 1006, 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'id': 1007, 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'id': 1008, 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'id': 1009, 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'id': 1010, 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'id': 1011, 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'id': 1012, 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'id': 1013, 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'id': 1014, 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'id': 1015, 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'r', 'id': 1016, 'synset': 'spider.n.01', 'synonyms': ['spider'], 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'c', 'id': 1017, 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'id': 1018, 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'id': 1019, 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'id': 1020, 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'id': 1021, 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'c', 'id': 1022, 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'r', 'id': 1023, 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'id': 1024, 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'id': 1025, 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'id': 1026, 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'r', 'id': 1027, 'synset': 'steamer.n.02', 'synonyms': ['steamer_(kitchen_appliance)'], 'def': 'a cooking utensil that can be used to cook food by steaming it', 'name': 'steamer_(kitchen_appliance)'}, {'frequency': 'f', 'id': 1028, 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'id': 1029, 'synset': 'stencil.n.01', 'synonyms': ['stencil'], 'def': 'a sheet of material (metal, plastic, etc.) that has been perforated with a pattern; ink or paint can pass through the perforations to create the printed pattern on the surface below', 'name': 'stencil'}, {'frequency': 'r', 'id': 1030, 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'id': 1031, 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'id': 1032, 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'id': 1033, 'synset': 'stew.n.02', 'synonyms': ['stew'], 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'id': 1034, 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'id': 1035, 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'c', 'id': 1036, 'synset': 'stocking.n.01', 'synonyms': ['stockings_(leg_wear)'], 'def': 'close-fitting hosiery to cover the foot and leg; come in matched pairs', 'name': 'stockings_(leg_wear)'}, {'frequency': 'f', 'id': 1037, 'synset': 'stool.n.01', 'synonyms': ['stool'], 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'id': 1038, 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'id': 1039, 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'id': 1040, 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'id': 1041, 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'id': 1042, 'synset': 'strap.n.01', 'synonyms': ['strap'], 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'id': 1043, 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'id': 1044, 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'id': 1045, 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'id': 1046, 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'id': 1047, 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'id': 1048, 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'def': 'a pointed tool for writing or drawing or engraving', 'name': 'stylus'}, {'frequency': 'r', 'id': 1049, 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'id': 1050, 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'id': 1051, 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'c', 'id': 1052, 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'id': 1053, 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'id': 1054, 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'id': 1055, 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'r', 'id': 1056, 'synset': 'sunscreen.n.01', 'synonyms': ['sunscreen', 'sunblock'], 'def': 'a cream spread on the skin; contains a chemical to filter out ultraviolet light and so protect from sunburn', 'name': 'sunscreen'}, {'frequency': 'f', 'id': 1057, 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'id': 1058, 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'id': 1059, 'synset': 'swab.n.02', 'synonyms': ['mop'], 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'id': 1060, 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'id': 1061, 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'id': 1062, 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'id': 1063, 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'id': 1064, 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'id': 1065, 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'id': 1066, 'synset': 'sword.n.01', 'synonyms': ['sword'], 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'id': 1067, 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'id': 1068, 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'id': 1069, 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'id': 1070, 'synset': 'table.n.02', 'synonyms': ['table'], 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'id': 1071, 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'id': 1072, 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'id': 1073, 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'id': 1074, 'synset': 'taco.n.02', 'synonyms': ['taco'], 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'id': 1075, 'synset': 'tag.n.02', 'synonyms': ['tag'], 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'id': 1076, 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'id': 1077, 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'id': 1078, 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'c', 'id': 1079, 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'id': 1080, 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'c', 'id': 1081, 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'id': 1082, 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'id': 1083, 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'id': 1084, 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'id': 1085, 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'id': 1086, 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'r', 'id': 1087, 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'id': 1088, 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'id': 1089, 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'c', 'id': 1090, 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'id': 1091, 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'id': 1092, 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'def': 'electronic device for communicating by voice over long distances', 'name': 'telephone'}, {'frequency': 'c', 'id': 1093, 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'id': 1094, 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'id': 1095, 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'id': 1096, 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'id': 1097, 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'id': 1098, 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'id': 1099, 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'id': 1100, 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'id': 1101, 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'id': 1102, 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'c', 'id': 1103, 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'id': 1104, 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'id': 1105, 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'id': 1106, 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'id': 1107, 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'id': 1108, 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'id': 1109, 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'id': 1110, 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'id': 1111, 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'r', 'id': 1112, 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'id': 1113, 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'id': 1114, 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'id': 1115, 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'c', 'id': 1116, 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'id': 1117, 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'id': 1118, 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'id': 1119, 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'c', 'id': 1120, 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'id': 1121, 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'id': 1122, 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'id': 1123, 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'c', 'id': 1124, 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'c', 'id': 1125, 'synset': 'top.n.09', 'synonyms': ['cover'], 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'id': 1126, 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'id': 1127, 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'id': 1128, 'synset': 'towel.n.01', 'synonyms': ['towel'], 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'id': 1129, 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'id': 1130, 'synset': 'toy.n.03', 'synonyms': ['toy'], 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'id': 1131, 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'id': 1132, 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'r', 'id': 1133, 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'c', 'id': 1134, 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'id': 1135, 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'id': 1136, 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'id': 1137, 'synset': 'tray.n.01', 'synonyms': ['tray'], 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'id': 1138, 'synset': 'tree_house.n.01', 'synonyms': ['tree_house'], 'def': '(NOT A TREE) a PLAYHOUSE built in the branches of a tree', 'name': 'tree_house'}, {'frequency': 'r', 'id': 1139, 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'id': 1140, 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'r', 'id': 1141, 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'c', 'id': 1142, 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'id': 1143, 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'id': 1144, 'synset': 'truck.n.01', 'synonyms': ['truck'], 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'id': 1145, 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'id': 1146, 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'id': 1147, 'synset': 'tub.n.02', 'synonyms': ['vat'], 'def': 'a large open vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'id': 1148, 'synset': 'turban.n.01', 'synonyms': ['turban'], 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'r', 'id': 1149, 'synset': 'turkey.n.01', 'synonyms': ['turkey_(bird)'], 'def': 'large gallinaceous bird with fan-shaped tail; widely domesticated for food', 'name': 'turkey_(bird)'}, {'frequency': 'c', 'id': 1150, 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'id': 1151, 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'id': 1152, 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'r', 'id': 1153, 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'r', 'id': 1154, 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'id': 1155, 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'c', 'id': 1156, 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'id': 1157, 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'c', 'id': 1158, 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'r', 'id': 1159, 'synset': 'urn.n.01', 'synonyms': ['urn'], 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'id': 1160, 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'c', 'id': 1161, 'synset': 'valve.n.03', 'synonyms': ['valve'], 'def': 'control consisting of a mechanical device for controlling the flow of a fluid', 'name': 'valve'}, {'frequency': 'f', 'id': 1162, 'synset': 'vase.n.01', 'synonyms': ['vase'], 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'id': 1163, 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'id': 1164, 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'c', 'id': 1165, 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'id': 1166, 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'id': 1167, 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'id': 1168, 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'r', 'id': 1169, 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'id': 1170, 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'id': 1171, 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'id': 1172, 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'id': 1173, 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'id': 1174, 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'id': 1175, 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'id': 1176, 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'id': 1177, 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'c', 'id': 1178, 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'id': 1179, 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'id': 1180, 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'id': 1181, 'synset': 'wasabi.n.02', 'synonyms': ['wasabi'], 'def': 'the thick green root of the wasabi plant that the Japanese use in cooking and that tastes like strong horseradish', 'name': 'wasabi'}, {'frequency': 'c', 'id': 1182, 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'id': 1183, 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'id': 1184, 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'id': 1185, 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'id': 1186, 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'id': 1187, 'synset': 'water_filter.n.01', 'synonyms': ['water_filter'], 'def': 'a filter to remove impurities from the water supply', 'name': 'water_filter'}, {'frequency': 'r', 'id': 1188, 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'r', 'id': 1189, 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'id': 1190, 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'id': 1191, 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'id': 1192, 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'id': 1193, 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'id': 1194, 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'c', 'id': 1195, 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'id': 1196, 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'id': 1197, 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'id': 1198, 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'id': 1199, 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'id': 1200, 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'id': 1201, 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'id': 1202, 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'id': 1203, 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'r', 'id': 1204, 'synset': 'whiskey.n.01', 'synonyms': ['whiskey'], 'def': 'a liquor made from fermented mash of grain', 'name': 'whiskey'}, {'frequency': 'r', 'id': 1205, 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'r', 'id': 1206, 'synset': 'wick.n.02', 'synonyms': ['wick'], 'def': 'a loosely woven cord in a candle or oil lamp that is lit on fire', 'name': 'wick'}, {'frequency': 'c', 'id': 1207, 'synset': 'wig.n.01', 'synonyms': ['wig'], 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'id': 1208, 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'id': 1209, 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'def': 'a mill that is powered by the wind', 'name': 'windmill'}, {'frequency': 'c', 'id': 1210, 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'id': 1211, 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'id': 1212, 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'id': 1213, 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'r', 'id': 1214, 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'id': 1215, 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'r', 'id': 1216, 'synset': 'wing_chair.n.01', 'synonyms': ['wing_chair'], 'def': 'easy chair having wings on each side of a high back', 'name': 'wing_chair'}, {'frequency': 'c', 'id': 1217, 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'id': 1218, 'synset': 'wok.n.01', 'synonyms': ['wok'], 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'id': 1219, 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'id': 1220, 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'id': 1221, 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'id': 1222, 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'c', 'id': 1223, 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'id': 1224, 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'r', 'id': 1225, 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'r', 'id': 1226, 'synset': 'yak.n.02', 'synonyms': ['yak'], 'def': 'large long-haired wild ox of Tibet often domesticated', 'name': 'yak'}, {'frequency': 'c', 'id': 1227, 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'r', 'id': 1228, 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'id': 1229, 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'id': 1230, 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa
+# fmt: on
diff --git a/detectron2/data/datasets/lvis_v1_categories.py b/detectron2/data/datasets/lvis_v1_categories.py
new file mode 100644
index 0000000000000000000000000000000000000000..7374e6968bb006f5d8c49e75d9d3b31ea3d77d05
--- /dev/null
+++ b/detectron2/data/datasets/lvis_v1_categories.py
@@ -0,0 +1,16 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Autogen with
+# with open("lvis_v1_val.json", "r") as f:
+# a = json.load(f)
+# c = a["categories"]
+# for x in c:
+# del x["image_count"]
+# del x["instance_count"]
+# LVIS_CATEGORIES = repr(c) + " # noqa"
+# with open("/tmp/lvis_categories.py", "wt") as f:
+# f.write(f"LVIS_CATEGORIES = {LVIS_CATEGORIES}")
+# Then paste the contents of that file below
+
+# fmt: off
+LVIS_CATEGORIES = [{'frequency': 'c', 'synset': 'aerosol.n.02', 'synonyms': ['aerosol_can', 'spray_can'], 'id': 1, 'def': 'a dispenser that holds a substance under pressure', 'name': 'aerosol_can'}, {'frequency': 'f', 'synset': 'air_conditioner.n.01', 'synonyms': ['air_conditioner'], 'id': 2, 'def': 'a machine that keeps air cool and dry', 'name': 'air_conditioner'}, {'frequency': 'f', 'synset': 'airplane.n.01', 'synonyms': ['airplane', 'aeroplane'], 'id': 3, 'def': 'an aircraft that has a fixed wing and is powered by propellers or jets', 'name': 'airplane'}, {'frequency': 'f', 'synset': 'alarm_clock.n.01', 'synonyms': ['alarm_clock'], 'id': 4, 'def': 'a clock that wakes a sleeper at some preset time', 'name': 'alarm_clock'}, {'frequency': 'c', 'synset': 'alcohol.n.01', 'synonyms': ['alcohol', 'alcoholic_beverage'], 'id': 5, 'def': 'a liquor or brew containing alcohol as the active agent', 'name': 'alcohol'}, {'frequency': 'c', 'synset': 'alligator.n.02', 'synonyms': ['alligator', 'gator'], 'id': 6, 'def': 'amphibious reptiles related to crocodiles but with shorter broader snouts', 'name': 'alligator'}, {'frequency': 'c', 'synset': 'almond.n.02', 'synonyms': ['almond'], 'id': 7, 'def': 'oval-shaped edible seed of the almond tree', 'name': 'almond'}, {'frequency': 'c', 'synset': 'ambulance.n.01', 'synonyms': ['ambulance'], 'id': 8, 'def': 'a vehicle that takes people to and from hospitals', 'name': 'ambulance'}, {'frequency': 'c', 'synset': 'amplifier.n.01', 'synonyms': ['amplifier'], 'id': 9, 'def': 'electronic equipment that increases strength of signals', 'name': 'amplifier'}, {'frequency': 'c', 'synset': 'anklet.n.03', 'synonyms': ['anklet', 'ankle_bracelet'], 'id': 10, 'def': 'an ornament worn around the ankle', 'name': 'anklet'}, {'frequency': 'f', 'synset': 'antenna.n.01', 'synonyms': ['antenna', 'aerial', 'transmitting_aerial'], 'id': 11, 'def': 'an electrical device that sends or receives radio or television signals', 'name': 'antenna'}, {'frequency': 'f', 'synset': 'apple.n.01', 'synonyms': ['apple'], 'id': 12, 'def': 'fruit with red or yellow or green skin and sweet to tart crisp whitish flesh', 'name': 'apple'}, {'frequency': 'r', 'synset': 'applesauce.n.01', 'synonyms': ['applesauce'], 'id': 13, 'def': 'puree of stewed apples usually sweetened and spiced', 'name': 'applesauce'}, {'frequency': 'r', 'synset': 'apricot.n.02', 'synonyms': ['apricot'], 'id': 14, 'def': 'downy yellow to rosy-colored fruit resembling a small peach', 'name': 'apricot'}, {'frequency': 'f', 'synset': 'apron.n.01', 'synonyms': ['apron'], 'id': 15, 'def': 'a garment of cloth that is tied about the waist and worn to protect clothing', 'name': 'apron'}, {'frequency': 'c', 'synset': 'aquarium.n.01', 'synonyms': ['aquarium', 'fish_tank'], 'id': 16, 'def': 'a tank/pool/bowl filled with water for keeping live fish and underwater animals', 'name': 'aquarium'}, {'frequency': 'r', 'synset': 'arctic.n.02', 'synonyms': ['arctic_(type_of_shoe)', 'galosh', 'golosh', 'rubber_(type_of_shoe)', 'gumshoe'], 'id': 17, 'def': 'a waterproof overshoe that protects shoes from water or snow', 'name': 'arctic_(type_of_shoe)'}, {'frequency': 'c', 'synset': 'armband.n.02', 'synonyms': ['armband'], 'id': 18, 'def': 'a band worn around the upper arm', 'name': 'armband'}, {'frequency': 'f', 'synset': 'armchair.n.01', 'synonyms': ['armchair'], 'id': 19, 'def': 'chair with a support on each side for arms', 'name': 'armchair'}, {'frequency': 'r', 'synset': 'armoire.n.01', 'synonyms': ['armoire'], 'id': 20, 'def': 'a large wardrobe or cabinet', 'name': 'armoire'}, {'frequency': 'r', 'synset': 'armor.n.01', 'synonyms': ['armor', 'armour'], 'id': 21, 'def': 'protective covering made of metal and used in combat', 'name': 'armor'}, {'frequency': 'c', 'synset': 'artichoke.n.02', 'synonyms': ['artichoke'], 'id': 22, 'def': 'a thistlelike flower head with edible fleshy leaves and heart', 'name': 'artichoke'}, {'frequency': 'f', 'synset': 'ashcan.n.01', 'synonyms': ['trash_can', 'garbage_can', 'wastebin', 'dustbin', 'trash_barrel', 'trash_bin'], 'id': 23, 'def': 'a bin that holds rubbish until it is collected', 'name': 'trash_can'}, {'frequency': 'c', 'synset': 'ashtray.n.01', 'synonyms': ['ashtray'], 'id': 24, 'def': "a receptacle for the ash from smokers' cigars or cigarettes", 'name': 'ashtray'}, {'frequency': 'c', 'synset': 'asparagus.n.02', 'synonyms': ['asparagus'], 'id': 25, 'def': 'edible young shoots of the asparagus plant', 'name': 'asparagus'}, {'frequency': 'c', 'synset': 'atomizer.n.01', 'synonyms': ['atomizer', 'atomiser', 'spray', 'sprayer', 'nebulizer', 'nebuliser'], 'id': 26, 'def': 'a dispenser that turns a liquid (such as perfume) into a fine mist', 'name': 'atomizer'}, {'frequency': 'f', 'synset': 'avocado.n.01', 'synonyms': ['avocado'], 'id': 27, 'def': 'a pear-shaped fruit with green or blackish skin and rich yellowish pulp enclosing a single large seed', 'name': 'avocado'}, {'frequency': 'c', 'synset': 'award.n.02', 'synonyms': ['award', 'accolade'], 'id': 28, 'def': 'a tangible symbol signifying approval or distinction', 'name': 'award'}, {'frequency': 'f', 'synset': 'awning.n.01', 'synonyms': ['awning'], 'id': 29, 'def': 'a canopy made of canvas to shelter people or things from rain or sun', 'name': 'awning'}, {'frequency': 'r', 'synset': 'ax.n.01', 'synonyms': ['ax', 'axe'], 'id': 30, 'def': 'an edge tool with a heavy bladed head mounted across a handle', 'name': 'ax'}, {'frequency': 'r', 'synset': 'baboon.n.01', 'synonyms': ['baboon'], 'id': 31, 'def': 'large terrestrial monkeys having doglike muzzles', 'name': 'baboon'}, {'frequency': 'f', 'synset': 'baby_buggy.n.01', 'synonyms': ['baby_buggy', 'baby_carriage', 'perambulator', 'pram', 'stroller'], 'id': 32, 'def': 'a small vehicle with four wheels in which a baby or child is pushed around', 'name': 'baby_buggy'}, {'frequency': 'c', 'synset': 'backboard.n.01', 'synonyms': ['basketball_backboard'], 'id': 33, 'def': 'a raised vertical board with basket attached; used to play basketball', 'name': 'basketball_backboard'}, {'frequency': 'f', 'synset': 'backpack.n.01', 'synonyms': ['backpack', 'knapsack', 'packsack', 'rucksack', 'haversack'], 'id': 34, 'def': 'a bag carried by a strap on your back or shoulder', 'name': 'backpack'}, {'frequency': 'f', 'synset': 'bag.n.04', 'synonyms': ['handbag', 'purse', 'pocketbook'], 'id': 35, 'def': 'a container used for carrying money and small personal items or accessories', 'name': 'handbag'}, {'frequency': 'f', 'synset': 'bag.n.06', 'synonyms': ['suitcase', 'baggage', 'luggage'], 'id': 36, 'def': 'cases used to carry belongings when traveling', 'name': 'suitcase'}, {'frequency': 'c', 'synset': 'bagel.n.01', 'synonyms': ['bagel', 'beigel'], 'id': 37, 'def': 'glazed yeast-raised doughnut-shaped roll with hard crust', 'name': 'bagel'}, {'frequency': 'r', 'synset': 'bagpipe.n.01', 'synonyms': ['bagpipe'], 'id': 38, 'def': 'a tubular wind instrument; the player blows air into a bag and squeezes it out', 'name': 'bagpipe'}, {'frequency': 'r', 'synset': 'baguet.n.01', 'synonyms': ['baguet', 'baguette'], 'id': 39, 'def': 'narrow French stick loaf', 'name': 'baguet'}, {'frequency': 'r', 'synset': 'bait.n.02', 'synonyms': ['bait', 'lure'], 'id': 40, 'def': 'something used to lure fish or other animals into danger so they can be trapped or killed', 'name': 'bait'}, {'frequency': 'f', 'synset': 'ball.n.06', 'synonyms': ['ball'], 'id': 41, 'def': 'a spherical object used as a plaything', 'name': 'ball'}, {'frequency': 'r', 'synset': 'ballet_skirt.n.01', 'synonyms': ['ballet_skirt', 'tutu'], 'id': 42, 'def': 'very short skirt worn by ballerinas', 'name': 'ballet_skirt'}, {'frequency': 'f', 'synset': 'balloon.n.01', 'synonyms': ['balloon'], 'id': 43, 'def': 'large tough nonrigid bag filled with gas or heated air', 'name': 'balloon'}, {'frequency': 'c', 'synset': 'bamboo.n.02', 'synonyms': ['bamboo'], 'id': 44, 'def': 'woody tropical grass having hollow woody stems', 'name': 'bamboo'}, {'frequency': 'f', 'synset': 'banana.n.02', 'synonyms': ['banana'], 'id': 45, 'def': 'elongated crescent-shaped yellow fruit with soft sweet flesh', 'name': 'banana'}, {'frequency': 'c', 'synset': 'band_aid.n.01', 'synonyms': ['Band_Aid'], 'id': 46, 'def': 'trade name for an adhesive bandage to cover small cuts or blisters', 'name': 'Band_Aid'}, {'frequency': 'c', 'synset': 'bandage.n.01', 'synonyms': ['bandage'], 'id': 47, 'def': 'a piece of soft material that covers and protects an injured part of the body', 'name': 'bandage'}, {'frequency': 'f', 'synset': 'bandanna.n.01', 'synonyms': ['bandanna', 'bandana'], 'id': 48, 'def': 'large and brightly colored handkerchief; often used as a neckerchief', 'name': 'bandanna'}, {'frequency': 'r', 'synset': 'banjo.n.01', 'synonyms': ['banjo'], 'id': 49, 'def': 'a stringed instrument of the guitar family with a long neck and circular body', 'name': 'banjo'}, {'frequency': 'f', 'synset': 'banner.n.01', 'synonyms': ['banner', 'streamer'], 'id': 50, 'def': 'long strip of cloth or paper used for decoration or advertising', 'name': 'banner'}, {'frequency': 'r', 'synset': 'barbell.n.01', 'synonyms': ['barbell'], 'id': 51, 'def': 'a bar to which heavy discs are attached at each end; used in weightlifting', 'name': 'barbell'}, {'frequency': 'r', 'synset': 'barge.n.01', 'synonyms': ['barge'], 'id': 52, 'def': 'a flatbottom boat for carrying heavy loads (especially on canals)', 'name': 'barge'}, {'frequency': 'f', 'synset': 'barrel.n.02', 'synonyms': ['barrel', 'cask'], 'id': 53, 'def': 'a cylindrical container that holds liquids', 'name': 'barrel'}, {'frequency': 'c', 'synset': 'barrette.n.01', 'synonyms': ['barrette'], 'id': 54, 'def': "a pin for holding women's hair in place", 'name': 'barrette'}, {'frequency': 'c', 'synset': 'barrow.n.03', 'synonyms': ['barrow', 'garden_cart', 'lawn_cart', 'wheelbarrow'], 'id': 55, 'def': 'a cart for carrying small loads; has handles and one or more wheels', 'name': 'barrow'}, {'frequency': 'f', 'synset': 'base.n.03', 'synonyms': ['baseball_base'], 'id': 56, 'def': 'a place that the runner must touch before scoring', 'name': 'baseball_base'}, {'frequency': 'f', 'synset': 'baseball.n.02', 'synonyms': ['baseball'], 'id': 57, 'def': 'a ball used in playing baseball', 'name': 'baseball'}, {'frequency': 'f', 'synset': 'baseball_bat.n.01', 'synonyms': ['baseball_bat'], 'id': 58, 'def': 'an implement used in baseball by the batter', 'name': 'baseball_bat'}, {'frequency': 'f', 'synset': 'baseball_cap.n.01', 'synonyms': ['baseball_cap', 'jockey_cap', 'golf_cap'], 'id': 59, 'def': 'a cap with a bill', 'name': 'baseball_cap'}, {'frequency': 'f', 'synset': 'baseball_glove.n.01', 'synonyms': ['baseball_glove', 'baseball_mitt'], 'id': 60, 'def': 'the handwear used by fielders in playing baseball', 'name': 'baseball_glove'}, {'frequency': 'f', 'synset': 'basket.n.01', 'synonyms': ['basket', 'handbasket'], 'id': 61, 'def': 'a container that is usually woven and has handles', 'name': 'basket'}, {'frequency': 'c', 'synset': 'basketball.n.02', 'synonyms': ['basketball'], 'id': 62, 'def': 'an inflated ball used in playing basketball', 'name': 'basketball'}, {'frequency': 'r', 'synset': 'bass_horn.n.01', 'synonyms': ['bass_horn', 'sousaphone', 'tuba'], 'id': 63, 'def': 'the lowest brass wind instrument', 'name': 'bass_horn'}, {'frequency': 'c', 'synset': 'bat.n.01', 'synonyms': ['bat_(animal)'], 'id': 64, 'def': 'nocturnal mouselike mammal with forelimbs modified to form membranous wings', 'name': 'bat_(animal)'}, {'frequency': 'f', 'synset': 'bath_mat.n.01', 'synonyms': ['bath_mat'], 'id': 65, 'def': 'a heavy towel or mat to stand on while drying yourself after a bath', 'name': 'bath_mat'}, {'frequency': 'f', 'synset': 'bath_towel.n.01', 'synonyms': ['bath_towel'], 'id': 66, 'def': 'a large towel; to dry yourself after a bath', 'name': 'bath_towel'}, {'frequency': 'c', 'synset': 'bathrobe.n.01', 'synonyms': ['bathrobe'], 'id': 67, 'def': 'a loose-fitting robe of towelling; worn after a bath or swim', 'name': 'bathrobe'}, {'frequency': 'f', 'synset': 'bathtub.n.01', 'synonyms': ['bathtub', 'bathing_tub'], 'id': 68, 'def': 'a large open container that you fill with water and use to wash the body', 'name': 'bathtub'}, {'frequency': 'r', 'synset': 'batter.n.02', 'synonyms': ['batter_(food)'], 'id': 69, 'def': 'a liquid or semiliquid mixture, as of flour, eggs, and milk, used in cooking', 'name': 'batter_(food)'}, {'frequency': 'c', 'synset': 'battery.n.02', 'synonyms': ['battery'], 'id': 70, 'def': 'a portable device that produces electricity', 'name': 'battery'}, {'frequency': 'r', 'synset': 'beach_ball.n.01', 'synonyms': ['beachball'], 'id': 71, 'def': 'large and light ball; for play at the seaside', 'name': 'beachball'}, {'frequency': 'c', 'synset': 'bead.n.01', 'synonyms': ['bead'], 'id': 72, 'def': 'a small ball with a hole through the middle used for ornamentation, jewellery, etc.', 'name': 'bead'}, {'frequency': 'c', 'synset': 'bean_curd.n.01', 'synonyms': ['bean_curd', 'tofu'], 'id': 73, 'def': 'cheeselike food made of curdled soybean milk', 'name': 'bean_curd'}, {'frequency': 'c', 'synset': 'beanbag.n.01', 'synonyms': ['beanbag'], 'id': 74, 'def': 'a bag filled with dried beans or similar items; used in games or to sit on', 'name': 'beanbag'}, {'frequency': 'f', 'synset': 'beanie.n.01', 'synonyms': ['beanie', 'beany'], 'id': 75, 'def': 'a small skullcap; formerly worn by schoolboys and college freshmen', 'name': 'beanie'}, {'frequency': 'f', 'synset': 'bear.n.01', 'synonyms': ['bear'], 'id': 76, 'def': 'large carnivorous or omnivorous mammals with shaggy coats and claws', 'name': 'bear'}, {'frequency': 'f', 'synset': 'bed.n.01', 'synonyms': ['bed'], 'id': 77, 'def': 'a piece of furniture that provides a place to sleep', 'name': 'bed'}, {'frequency': 'r', 'synset': 'bedpan.n.01', 'synonyms': ['bedpan'], 'id': 78, 'def': 'a shallow vessel used by a bedridden patient for defecation and urination', 'name': 'bedpan'}, {'frequency': 'f', 'synset': 'bedspread.n.01', 'synonyms': ['bedspread', 'bedcover', 'bed_covering', 'counterpane', 'spread'], 'id': 79, 'def': 'decorative cover for a bed', 'name': 'bedspread'}, {'frequency': 'f', 'synset': 'beef.n.01', 'synonyms': ['cow'], 'id': 80, 'def': 'cattle/cow', 'name': 'cow'}, {'frequency': 'f', 'synset': 'beef.n.02', 'synonyms': ['beef_(food)', 'boeuf_(food)'], 'id': 81, 'def': 'meat from an adult domestic bovine', 'name': 'beef_(food)'}, {'frequency': 'r', 'synset': 'beeper.n.01', 'synonyms': ['beeper', 'pager'], 'id': 82, 'def': 'an device that beeps when the person carrying it is being paged', 'name': 'beeper'}, {'frequency': 'f', 'synset': 'beer_bottle.n.01', 'synonyms': ['beer_bottle'], 'id': 83, 'def': 'a bottle that holds beer', 'name': 'beer_bottle'}, {'frequency': 'c', 'synset': 'beer_can.n.01', 'synonyms': ['beer_can'], 'id': 84, 'def': 'a can that holds beer', 'name': 'beer_can'}, {'frequency': 'r', 'synset': 'beetle.n.01', 'synonyms': ['beetle'], 'id': 85, 'def': 'insect with hard wing covers', 'name': 'beetle'}, {'frequency': 'f', 'synset': 'bell.n.01', 'synonyms': ['bell'], 'id': 86, 'def': 'a hollow device made of metal that makes a ringing sound when struck', 'name': 'bell'}, {'frequency': 'f', 'synset': 'bell_pepper.n.02', 'synonyms': ['bell_pepper', 'capsicum'], 'id': 87, 'def': 'large bell-shaped sweet pepper in green or red or yellow or orange or black varieties', 'name': 'bell_pepper'}, {'frequency': 'f', 'synset': 'belt.n.02', 'synonyms': ['belt'], 'id': 88, 'def': 'a band to tie or buckle around the body (usually at the waist)', 'name': 'belt'}, {'frequency': 'f', 'synset': 'belt_buckle.n.01', 'synonyms': ['belt_buckle'], 'id': 89, 'def': 'the buckle used to fasten a belt', 'name': 'belt_buckle'}, {'frequency': 'f', 'synset': 'bench.n.01', 'synonyms': ['bench'], 'id': 90, 'def': 'a long seat for more than one person', 'name': 'bench'}, {'frequency': 'c', 'synset': 'beret.n.01', 'synonyms': ['beret'], 'id': 91, 'def': 'a cap with no brim or bill; made of soft cloth', 'name': 'beret'}, {'frequency': 'c', 'synset': 'bib.n.02', 'synonyms': ['bib'], 'id': 92, 'def': 'a napkin tied under the chin of a child while eating', 'name': 'bib'}, {'frequency': 'r', 'synset': 'bible.n.01', 'synonyms': ['Bible'], 'id': 93, 'def': 'the sacred writings of the Christian religions', 'name': 'Bible'}, {'frequency': 'f', 'synset': 'bicycle.n.01', 'synonyms': ['bicycle', 'bike_(bicycle)'], 'id': 94, 'def': 'a wheeled vehicle that has two wheels and is moved by foot pedals', 'name': 'bicycle'}, {'frequency': 'f', 'synset': 'bill.n.09', 'synonyms': ['visor', 'vizor'], 'id': 95, 'def': 'a brim that projects to the front to shade the eyes', 'name': 'visor'}, {'frequency': 'f', 'synset': 'billboard.n.01', 'synonyms': ['billboard'], 'id': 96, 'def': 'large outdoor signboard', 'name': 'billboard'}, {'frequency': 'c', 'synset': 'binder.n.03', 'synonyms': ['binder', 'ring-binder'], 'id': 97, 'def': 'holds loose papers or magazines', 'name': 'binder'}, {'frequency': 'c', 'synset': 'binoculars.n.01', 'synonyms': ['binoculars', 'field_glasses', 'opera_glasses'], 'id': 98, 'def': 'an optical instrument designed for simultaneous use by both eyes', 'name': 'binoculars'}, {'frequency': 'f', 'synset': 'bird.n.01', 'synonyms': ['bird'], 'id': 99, 'def': 'animal characterized by feathers and wings', 'name': 'bird'}, {'frequency': 'c', 'synset': 'bird_feeder.n.01', 'synonyms': ['birdfeeder'], 'id': 100, 'def': 'an outdoor device that supplies food for wild birds', 'name': 'birdfeeder'}, {'frequency': 'c', 'synset': 'birdbath.n.01', 'synonyms': ['birdbath'], 'id': 101, 'def': 'an ornamental basin (usually in a garden) for birds to bathe in', 'name': 'birdbath'}, {'frequency': 'c', 'synset': 'birdcage.n.01', 'synonyms': ['birdcage'], 'id': 102, 'def': 'a cage in which a bird can be kept', 'name': 'birdcage'}, {'frequency': 'c', 'synset': 'birdhouse.n.01', 'synonyms': ['birdhouse'], 'id': 103, 'def': 'a shelter for birds', 'name': 'birdhouse'}, {'frequency': 'f', 'synset': 'birthday_cake.n.01', 'synonyms': ['birthday_cake'], 'id': 104, 'def': 'decorated cake served at a birthday party', 'name': 'birthday_cake'}, {'frequency': 'r', 'synset': 'birthday_card.n.01', 'synonyms': ['birthday_card'], 'id': 105, 'def': 'a card expressing a birthday greeting', 'name': 'birthday_card'}, {'frequency': 'r', 'synset': 'black_flag.n.01', 'synonyms': ['pirate_flag'], 'id': 106, 'def': 'a flag usually bearing a white skull and crossbones on a black background', 'name': 'pirate_flag'}, {'frequency': 'c', 'synset': 'black_sheep.n.02', 'synonyms': ['black_sheep'], 'id': 107, 'def': 'sheep with a black coat', 'name': 'black_sheep'}, {'frequency': 'c', 'synset': 'blackberry.n.01', 'synonyms': ['blackberry'], 'id': 108, 'def': 'large sweet black or very dark purple edible aggregate fruit', 'name': 'blackberry'}, {'frequency': 'f', 'synset': 'blackboard.n.01', 'synonyms': ['blackboard', 'chalkboard'], 'id': 109, 'def': 'sheet of slate; for writing with chalk', 'name': 'blackboard'}, {'frequency': 'f', 'synset': 'blanket.n.01', 'synonyms': ['blanket'], 'id': 110, 'def': 'bedding that keeps a person warm in bed', 'name': 'blanket'}, {'frequency': 'c', 'synset': 'blazer.n.01', 'synonyms': ['blazer', 'sport_jacket', 'sport_coat', 'sports_jacket', 'sports_coat'], 'id': 111, 'def': 'lightweight jacket; often striped in the colors of a club or school', 'name': 'blazer'}, {'frequency': 'f', 'synset': 'blender.n.01', 'synonyms': ['blender', 'liquidizer', 'liquidiser'], 'id': 112, 'def': 'an electrically powered mixer that mix or chop or liquefy foods', 'name': 'blender'}, {'frequency': 'r', 'synset': 'blimp.n.02', 'synonyms': ['blimp'], 'id': 113, 'def': 'a small nonrigid airship used for observation or as a barrage balloon', 'name': 'blimp'}, {'frequency': 'f', 'synset': 'blinker.n.01', 'synonyms': ['blinker', 'flasher'], 'id': 114, 'def': 'a light that flashes on and off; used as a signal or to send messages', 'name': 'blinker'}, {'frequency': 'f', 'synset': 'blouse.n.01', 'synonyms': ['blouse'], 'id': 115, 'def': 'a top worn by women', 'name': 'blouse'}, {'frequency': 'f', 'synset': 'blueberry.n.02', 'synonyms': ['blueberry'], 'id': 116, 'def': 'sweet edible dark-blue berries of blueberry plants', 'name': 'blueberry'}, {'frequency': 'r', 'synset': 'board.n.09', 'synonyms': ['gameboard'], 'id': 117, 'def': 'a flat portable surface (usually rectangular) designed for board games', 'name': 'gameboard'}, {'frequency': 'f', 'synset': 'boat.n.01', 'synonyms': ['boat', 'ship_(boat)'], 'id': 118, 'def': 'a vessel for travel on water', 'name': 'boat'}, {'frequency': 'r', 'synset': 'bob.n.05', 'synonyms': ['bob', 'bobber', 'bobfloat'], 'id': 119, 'def': 'a small float usually made of cork; attached to a fishing line', 'name': 'bob'}, {'frequency': 'c', 'synset': 'bobbin.n.01', 'synonyms': ['bobbin', 'spool', 'reel'], 'id': 120, 'def': 'a thing around which thread/tape/film or other flexible materials can be wound', 'name': 'bobbin'}, {'frequency': 'c', 'synset': 'bobby_pin.n.01', 'synonyms': ['bobby_pin', 'hairgrip'], 'id': 121, 'def': 'a flat wire hairpin used to hold bobbed hair in place', 'name': 'bobby_pin'}, {'frequency': 'c', 'synset': 'boiled_egg.n.01', 'synonyms': ['boiled_egg', 'coddled_egg'], 'id': 122, 'def': 'egg cooked briefly in the shell in gently boiling water', 'name': 'boiled_egg'}, {'frequency': 'r', 'synset': 'bolo_tie.n.01', 'synonyms': ['bolo_tie', 'bolo', 'bola_tie', 'bola'], 'id': 123, 'def': 'a cord fastened around the neck with an ornamental clasp and worn as a necktie', 'name': 'bolo_tie'}, {'frequency': 'c', 'synset': 'bolt.n.03', 'synonyms': ['deadbolt'], 'id': 124, 'def': 'the part of a lock that is engaged or withdrawn with a key', 'name': 'deadbolt'}, {'frequency': 'f', 'synset': 'bolt.n.06', 'synonyms': ['bolt'], 'id': 125, 'def': 'a screw that screws into a nut to form a fastener', 'name': 'bolt'}, {'frequency': 'r', 'synset': 'bonnet.n.01', 'synonyms': ['bonnet'], 'id': 126, 'def': 'a hat tied under the chin', 'name': 'bonnet'}, {'frequency': 'f', 'synset': 'book.n.01', 'synonyms': ['book'], 'id': 127, 'def': 'a written work or composition that has been published', 'name': 'book'}, {'frequency': 'c', 'synset': 'bookcase.n.01', 'synonyms': ['bookcase'], 'id': 128, 'def': 'a piece of furniture with shelves for storing books', 'name': 'bookcase'}, {'frequency': 'c', 'synset': 'booklet.n.01', 'synonyms': ['booklet', 'brochure', 'leaflet', 'pamphlet'], 'id': 129, 'def': 'a small book usually having a paper cover', 'name': 'booklet'}, {'frequency': 'r', 'synset': 'bookmark.n.01', 'synonyms': ['bookmark', 'bookmarker'], 'id': 130, 'def': 'a marker (a piece of paper or ribbon) placed between the pages of a book', 'name': 'bookmark'}, {'frequency': 'r', 'synset': 'boom.n.04', 'synonyms': ['boom_microphone', 'microphone_boom'], 'id': 131, 'def': 'a pole carrying an overhead microphone projected over a film or tv set', 'name': 'boom_microphone'}, {'frequency': 'f', 'synset': 'boot.n.01', 'synonyms': ['boot'], 'id': 132, 'def': 'footwear that covers the whole foot and lower leg', 'name': 'boot'}, {'frequency': 'f', 'synset': 'bottle.n.01', 'synonyms': ['bottle'], 'id': 133, 'def': 'a glass or plastic vessel used for storing drinks or other liquids', 'name': 'bottle'}, {'frequency': 'c', 'synset': 'bottle_opener.n.01', 'synonyms': ['bottle_opener'], 'id': 134, 'def': 'an opener for removing caps or corks from bottles', 'name': 'bottle_opener'}, {'frequency': 'c', 'synset': 'bouquet.n.01', 'synonyms': ['bouquet'], 'id': 135, 'def': 'an arrangement of flowers that is usually given as a present', 'name': 'bouquet'}, {'frequency': 'r', 'synset': 'bow.n.04', 'synonyms': ['bow_(weapon)'], 'id': 136, 'def': 'a weapon for shooting arrows', 'name': 'bow_(weapon)'}, {'frequency': 'f', 'synset': 'bow.n.08', 'synonyms': ['bow_(decorative_ribbons)'], 'id': 137, 'def': 'a decorative interlacing of ribbons', 'name': 'bow_(decorative_ribbons)'}, {'frequency': 'f', 'synset': 'bow_tie.n.01', 'synonyms': ['bow-tie', 'bowtie'], 'id': 138, 'def': "a man's tie that ties in a bow", 'name': 'bow-tie'}, {'frequency': 'f', 'synset': 'bowl.n.03', 'synonyms': ['bowl'], 'id': 139, 'def': 'a dish that is round and open at the top for serving foods', 'name': 'bowl'}, {'frequency': 'r', 'synset': 'bowl.n.08', 'synonyms': ['pipe_bowl'], 'id': 140, 'def': 'a small round container that is open at the top for holding tobacco', 'name': 'pipe_bowl'}, {'frequency': 'c', 'synset': 'bowler_hat.n.01', 'synonyms': ['bowler_hat', 'bowler', 'derby_hat', 'derby', 'plug_hat'], 'id': 141, 'def': 'a felt hat that is round and hard with a narrow brim', 'name': 'bowler_hat'}, {'frequency': 'r', 'synset': 'bowling_ball.n.01', 'synonyms': ['bowling_ball'], 'id': 142, 'def': 'a large ball with finger holes used in the sport of bowling', 'name': 'bowling_ball'}, {'frequency': 'f', 'synset': 'box.n.01', 'synonyms': ['box'], 'id': 143, 'def': 'a (usually rectangular) container; may have a lid', 'name': 'box'}, {'frequency': 'r', 'synset': 'boxing_glove.n.01', 'synonyms': ['boxing_glove'], 'id': 144, 'def': 'large glove coverings the fists of a fighter worn for the sport of boxing', 'name': 'boxing_glove'}, {'frequency': 'c', 'synset': 'brace.n.06', 'synonyms': ['suspenders'], 'id': 145, 'def': 'elastic straps that hold trousers up (usually used in the plural)', 'name': 'suspenders'}, {'frequency': 'f', 'synset': 'bracelet.n.02', 'synonyms': ['bracelet', 'bangle'], 'id': 146, 'def': 'jewelry worn around the wrist for decoration', 'name': 'bracelet'}, {'frequency': 'r', 'synset': 'brass.n.07', 'synonyms': ['brass_plaque'], 'id': 147, 'def': 'a memorial made of brass', 'name': 'brass_plaque'}, {'frequency': 'c', 'synset': 'brassiere.n.01', 'synonyms': ['brassiere', 'bra', 'bandeau'], 'id': 148, 'def': 'an undergarment worn by women to support their breasts', 'name': 'brassiere'}, {'frequency': 'c', 'synset': 'bread-bin.n.01', 'synonyms': ['bread-bin', 'breadbox'], 'id': 149, 'def': 'a container used to keep bread or cake in', 'name': 'bread-bin'}, {'frequency': 'f', 'synset': 'bread.n.01', 'synonyms': ['bread'], 'id': 150, 'def': 'food made from dough of flour or meal and usually raised with yeast or baking powder and then baked', 'name': 'bread'}, {'frequency': 'r', 'synset': 'breechcloth.n.01', 'synonyms': ['breechcloth', 'breechclout', 'loincloth'], 'id': 151, 'def': 'a garment that provides covering for the loins', 'name': 'breechcloth'}, {'frequency': 'f', 'synset': 'bridal_gown.n.01', 'synonyms': ['bridal_gown', 'wedding_gown', 'wedding_dress'], 'id': 152, 'def': 'a gown worn by the bride at a wedding', 'name': 'bridal_gown'}, {'frequency': 'c', 'synset': 'briefcase.n.01', 'synonyms': ['briefcase'], 'id': 153, 'def': 'a case with a handle; for carrying papers or files or books', 'name': 'briefcase'}, {'frequency': 'f', 'synset': 'broccoli.n.01', 'synonyms': ['broccoli'], 'id': 154, 'def': 'plant with dense clusters of tight green flower buds', 'name': 'broccoli'}, {'frequency': 'r', 'synset': 'brooch.n.01', 'synonyms': ['broach'], 'id': 155, 'def': 'a decorative pin worn by women', 'name': 'broach'}, {'frequency': 'c', 'synset': 'broom.n.01', 'synonyms': ['broom'], 'id': 156, 'def': 'bundle of straws or twigs attached to a long handle; used for cleaning', 'name': 'broom'}, {'frequency': 'c', 'synset': 'brownie.n.03', 'synonyms': ['brownie'], 'id': 157, 'def': 'square or bar of very rich chocolate cake usually with nuts', 'name': 'brownie'}, {'frequency': 'c', 'synset': 'brussels_sprouts.n.01', 'synonyms': ['brussels_sprouts'], 'id': 158, 'def': 'the small edible cabbage-like buds growing along a stalk', 'name': 'brussels_sprouts'}, {'frequency': 'r', 'synset': 'bubble_gum.n.01', 'synonyms': ['bubble_gum'], 'id': 159, 'def': 'a kind of chewing gum that can be blown into bubbles', 'name': 'bubble_gum'}, {'frequency': 'f', 'synset': 'bucket.n.01', 'synonyms': ['bucket', 'pail'], 'id': 160, 'def': 'a roughly cylindrical vessel that is open at the top', 'name': 'bucket'}, {'frequency': 'r', 'synset': 'buggy.n.01', 'synonyms': ['horse_buggy'], 'id': 161, 'def': 'a small lightweight carriage; drawn by a single horse', 'name': 'horse_buggy'}, {'frequency': 'c', 'synset': 'bull.n.11', 'synonyms': ['horned_cow'], 'id': 162, 'def': 'a cow with horns', 'name': 'bull'}, {'frequency': 'c', 'synset': 'bulldog.n.01', 'synonyms': ['bulldog'], 'id': 163, 'def': 'a thickset short-haired dog with a large head and strong undershot lower jaw', 'name': 'bulldog'}, {'frequency': 'r', 'synset': 'bulldozer.n.01', 'synonyms': ['bulldozer', 'dozer'], 'id': 164, 'def': 'large powerful tractor; a large blade in front flattens areas of ground', 'name': 'bulldozer'}, {'frequency': 'c', 'synset': 'bullet_train.n.01', 'synonyms': ['bullet_train'], 'id': 165, 'def': 'a high-speed passenger train', 'name': 'bullet_train'}, {'frequency': 'c', 'synset': 'bulletin_board.n.02', 'synonyms': ['bulletin_board', 'notice_board'], 'id': 166, 'def': 'a board that hangs on a wall; displays announcements', 'name': 'bulletin_board'}, {'frequency': 'r', 'synset': 'bulletproof_vest.n.01', 'synonyms': ['bulletproof_vest'], 'id': 167, 'def': 'a vest capable of resisting the impact of a bullet', 'name': 'bulletproof_vest'}, {'frequency': 'c', 'synset': 'bullhorn.n.01', 'synonyms': ['bullhorn', 'megaphone'], 'id': 168, 'def': 'a portable loudspeaker with built-in microphone and amplifier', 'name': 'bullhorn'}, {'frequency': 'f', 'synset': 'bun.n.01', 'synonyms': ['bun', 'roll'], 'id': 169, 'def': 'small rounded bread either plain or sweet', 'name': 'bun'}, {'frequency': 'c', 'synset': 'bunk_bed.n.01', 'synonyms': ['bunk_bed'], 'id': 170, 'def': 'beds built one above the other', 'name': 'bunk_bed'}, {'frequency': 'f', 'synset': 'buoy.n.01', 'synonyms': ['buoy'], 'id': 171, 'def': 'a float attached by rope to the seabed to mark channels in a harbor or underwater hazards', 'name': 'buoy'}, {'frequency': 'r', 'synset': 'burrito.n.01', 'synonyms': ['burrito'], 'id': 172, 'def': 'a flour tortilla folded around a filling', 'name': 'burrito'}, {'frequency': 'f', 'synset': 'bus.n.01', 'synonyms': ['bus_(vehicle)', 'autobus', 'charabanc', 'double-decker', 'motorbus', 'motorcoach'], 'id': 173, 'def': 'a vehicle carrying many passengers; used for public transport', 'name': 'bus_(vehicle)'}, {'frequency': 'c', 'synset': 'business_card.n.01', 'synonyms': ['business_card'], 'id': 174, 'def': "a card on which are printed the person's name and business affiliation", 'name': 'business_card'}, {'frequency': 'f', 'synset': 'butter.n.01', 'synonyms': ['butter'], 'id': 175, 'def': 'an edible emulsion of fat globules made by churning milk or cream; for cooking and table use', 'name': 'butter'}, {'frequency': 'c', 'synset': 'butterfly.n.01', 'synonyms': ['butterfly'], 'id': 176, 'def': 'insect typically having a slender body with knobbed antennae and broad colorful wings', 'name': 'butterfly'}, {'frequency': 'f', 'synset': 'button.n.01', 'synonyms': ['button'], 'id': 177, 'def': 'a round fastener sewn to shirts and coats etc to fit through buttonholes', 'name': 'button'}, {'frequency': 'f', 'synset': 'cab.n.03', 'synonyms': ['cab_(taxi)', 'taxi', 'taxicab'], 'id': 178, 'def': 'a car that takes passengers where they want to go in exchange for money', 'name': 'cab_(taxi)'}, {'frequency': 'r', 'synset': 'cabana.n.01', 'synonyms': ['cabana'], 'id': 179, 'def': 'a small tent used as a dressing room beside the sea or a swimming pool', 'name': 'cabana'}, {'frequency': 'c', 'synset': 'cabin_car.n.01', 'synonyms': ['cabin_car', 'caboose'], 'id': 180, 'def': 'a car on a freight train for use of the train crew; usually the last car on the train', 'name': 'cabin_car'}, {'frequency': 'f', 'synset': 'cabinet.n.01', 'synonyms': ['cabinet'], 'id': 181, 'def': 'a piece of furniture resembling a cupboard with doors and shelves and drawers', 'name': 'cabinet'}, {'frequency': 'r', 'synset': 'cabinet.n.03', 'synonyms': ['locker', 'storage_locker'], 'id': 182, 'def': 'a storage compartment for clothes and valuables; usually it has a lock', 'name': 'locker'}, {'frequency': 'f', 'synset': 'cake.n.03', 'synonyms': ['cake'], 'id': 183, 'def': 'baked goods made from or based on a mixture of flour, sugar, eggs, and fat', 'name': 'cake'}, {'frequency': 'c', 'synset': 'calculator.n.02', 'synonyms': ['calculator'], 'id': 184, 'def': 'a small machine that is used for mathematical calculations', 'name': 'calculator'}, {'frequency': 'f', 'synset': 'calendar.n.02', 'synonyms': ['calendar'], 'id': 185, 'def': 'a list or register of events (appointments/social events/court cases, etc)', 'name': 'calendar'}, {'frequency': 'c', 'synset': 'calf.n.01', 'synonyms': ['calf'], 'id': 186, 'def': 'young of domestic cattle', 'name': 'calf'}, {'frequency': 'c', 'synset': 'camcorder.n.01', 'synonyms': ['camcorder'], 'id': 187, 'def': 'a portable television camera and videocassette recorder', 'name': 'camcorder'}, {'frequency': 'c', 'synset': 'camel.n.01', 'synonyms': ['camel'], 'id': 188, 'def': 'cud-chewing mammal used as a draft or saddle animal in desert regions', 'name': 'camel'}, {'frequency': 'f', 'synset': 'camera.n.01', 'synonyms': ['camera'], 'id': 189, 'def': 'equipment for taking photographs', 'name': 'camera'}, {'frequency': 'c', 'synset': 'camera_lens.n.01', 'synonyms': ['camera_lens'], 'id': 190, 'def': 'a lens that focuses the image in a camera', 'name': 'camera_lens'}, {'frequency': 'c', 'synset': 'camper.n.02', 'synonyms': ['camper_(vehicle)', 'camping_bus', 'motor_home'], 'id': 191, 'def': 'a recreational vehicle equipped for camping out while traveling', 'name': 'camper_(vehicle)'}, {'frequency': 'f', 'synset': 'can.n.01', 'synonyms': ['can', 'tin_can'], 'id': 192, 'def': 'airtight sealed metal container for food or drink or paint etc.', 'name': 'can'}, {'frequency': 'c', 'synset': 'can_opener.n.01', 'synonyms': ['can_opener', 'tin_opener'], 'id': 193, 'def': 'a device for cutting cans open', 'name': 'can_opener'}, {'frequency': 'f', 'synset': 'candle.n.01', 'synonyms': ['candle', 'candlestick'], 'id': 194, 'def': 'stick of wax with a wick in the middle', 'name': 'candle'}, {'frequency': 'f', 'synset': 'candlestick.n.01', 'synonyms': ['candle_holder'], 'id': 195, 'def': 'a holder with sockets for candles', 'name': 'candle_holder'}, {'frequency': 'r', 'synset': 'candy_bar.n.01', 'synonyms': ['candy_bar'], 'id': 196, 'def': 'a candy shaped as a bar', 'name': 'candy_bar'}, {'frequency': 'c', 'synset': 'candy_cane.n.01', 'synonyms': ['candy_cane'], 'id': 197, 'def': 'a hard candy in the shape of a rod (usually with stripes)', 'name': 'candy_cane'}, {'frequency': 'c', 'synset': 'cane.n.01', 'synonyms': ['walking_cane'], 'id': 198, 'def': 'a stick that people can lean on to help them walk', 'name': 'walking_cane'}, {'frequency': 'c', 'synset': 'canister.n.02', 'synonyms': ['canister', 'cannister'], 'id': 199, 'def': 'metal container for storing dry foods such as tea or flour', 'name': 'canister'}, {'frequency': 'c', 'synset': 'canoe.n.01', 'synonyms': ['canoe'], 'id': 200, 'def': 'small and light boat; pointed at both ends; propelled with a paddle', 'name': 'canoe'}, {'frequency': 'c', 'synset': 'cantaloup.n.02', 'synonyms': ['cantaloup', 'cantaloupe'], 'id': 201, 'def': 'the fruit of a cantaloup vine; small to medium-sized melon with yellowish flesh', 'name': 'cantaloup'}, {'frequency': 'r', 'synset': 'canteen.n.01', 'synonyms': ['canteen'], 'id': 202, 'def': 'a flask for carrying water; used by soldiers or travelers', 'name': 'canteen'}, {'frequency': 'f', 'synset': 'cap.n.01', 'synonyms': ['cap_(headwear)'], 'id': 203, 'def': 'a tight-fitting headwear', 'name': 'cap_(headwear)'}, {'frequency': 'f', 'synset': 'cap.n.02', 'synonyms': ['bottle_cap', 'cap_(container_lid)'], 'id': 204, 'def': 'a top (as for a bottle)', 'name': 'bottle_cap'}, {'frequency': 'c', 'synset': 'cape.n.02', 'synonyms': ['cape'], 'id': 205, 'def': 'a sleeveless garment like a cloak but shorter', 'name': 'cape'}, {'frequency': 'c', 'synset': 'cappuccino.n.01', 'synonyms': ['cappuccino', 'coffee_cappuccino'], 'id': 206, 'def': 'equal parts of espresso and steamed milk', 'name': 'cappuccino'}, {'frequency': 'f', 'synset': 'car.n.01', 'synonyms': ['car_(automobile)', 'auto_(automobile)', 'automobile'], 'id': 207, 'def': 'a motor vehicle with four wheels', 'name': 'car_(automobile)'}, {'frequency': 'f', 'synset': 'car.n.02', 'synonyms': ['railcar_(part_of_a_train)', 'railway_car_(part_of_a_train)', 'railroad_car_(part_of_a_train)'], 'id': 208, 'def': 'a wheeled vehicle adapted to the rails of railroad (mark each individual railcar separately)', 'name': 'railcar_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'car.n.04', 'synonyms': ['elevator_car'], 'id': 209, 'def': 'where passengers ride up and down', 'name': 'elevator_car'}, {'frequency': 'r', 'synset': 'car_battery.n.01', 'synonyms': ['car_battery', 'automobile_battery'], 'id': 210, 'def': 'a battery in a motor vehicle', 'name': 'car_battery'}, {'frequency': 'c', 'synset': 'card.n.02', 'synonyms': ['identity_card'], 'id': 211, 'def': 'a card certifying the identity of the bearer', 'name': 'identity_card'}, {'frequency': 'c', 'synset': 'card.n.03', 'synonyms': ['card'], 'id': 212, 'def': 'a rectangular piece of paper used to send messages (e.g. greetings or pictures)', 'name': 'card'}, {'frequency': 'c', 'synset': 'cardigan.n.01', 'synonyms': ['cardigan'], 'id': 213, 'def': 'knitted jacket that is fastened up the front with buttons or a zipper', 'name': 'cardigan'}, {'frequency': 'r', 'synset': 'cargo_ship.n.01', 'synonyms': ['cargo_ship', 'cargo_vessel'], 'id': 214, 'def': 'a ship designed to carry cargo', 'name': 'cargo_ship'}, {'frequency': 'r', 'synset': 'carnation.n.01', 'synonyms': ['carnation'], 'id': 215, 'def': 'plant with pink to purple-red spice-scented usually double flowers', 'name': 'carnation'}, {'frequency': 'c', 'synset': 'carriage.n.02', 'synonyms': ['horse_carriage'], 'id': 216, 'def': 'a vehicle with wheels drawn by one or more horses', 'name': 'horse_carriage'}, {'frequency': 'f', 'synset': 'carrot.n.01', 'synonyms': ['carrot'], 'id': 217, 'def': 'deep orange edible root of the cultivated carrot plant', 'name': 'carrot'}, {'frequency': 'f', 'synset': 'carryall.n.01', 'synonyms': ['tote_bag'], 'id': 218, 'def': 'a capacious bag or basket', 'name': 'tote_bag'}, {'frequency': 'c', 'synset': 'cart.n.01', 'synonyms': ['cart'], 'id': 219, 'def': 'a heavy open wagon usually having two wheels and drawn by an animal', 'name': 'cart'}, {'frequency': 'c', 'synset': 'carton.n.02', 'synonyms': ['carton'], 'id': 220, 'def': 'a container made of cardboard for holding food or drink', 'name': 'carton'}, {'frequency': 'c', 'synset': 'cash_register.n.01', 'synonyms': ['cash_register', 'register_(for_cash_transactions)'], 'id': 221, 'def': 'a cashbox with an adding machine to register transactions', 'name': 'cash_register'}, {'frequency': 'r', 'synset': 'casserole.n.01', 'synonyms': ['casserole'], 'id': 222, 'def': 'food cooked and served in a casserole', 'name': 'casserole'}, {'frequency': 'r', 'synset': 'cassette.n.01', 'synonyms': ['cassette'], 'id': 223, 'def': 'a container that holds a magnetic tape used for recording or playing sound or video', 'name': 'cassette'}, {'frequency': 'c', 'synset': 'cast.n.05', 'synonyms': ['cast', 'plaster_cast', 'plaster_bandage'], 'id': 224, 'def': 'bandage consisting of a firm covering that immobilizes broken bones while they heal', 'name': 'cast'}, {'frequency': 'f', 'synset': 'cat.n.01', 'synonyms': ['cat'], 'id': 225, 'def': 'a domestic house cat', 'name': 'cat'}, {'frequency': 'f', 'synset': 'cauliflower.n.02', 'synonyms': ['cauliflower'], 'id': 226, 'def': 'edible compact head of white undeveloped flowers', 'name': 'cauliflower'}, {'frequency': 'c', 'synset': 'cayenne.n.02', 'synonyms': ['cayenne_(spice)', 'cayenne_pepper_(spice)', 'red_pepper_(spice)'], 'id': 227, 'def': 'ground pods and seeds of pungent red peppers of the genus Capsicum', 'name': 'cayenne_(spice)'}, {'frequency': 'c', 'synset': 'cd_player.n.01', 'synonyms': ['CD_player'], 'id': 228, 'def': 'electronic equipment for playing compact discs (CDs)', 'name': 'CD_player'}, {'frequency': 'f', 'synset': 'celery.n.01', 'synonyms': ['celery'], 'id': 229, 'def': 'widely cultivated herb with aromatic leaf stalks that are eaten raw or cooked', 'name': 'celery'}, {'frequency': 'f', 'synset': 'cellular_telephone.n.01', 'synonyms': ['cellular_telephone', 'cellular_phone', 'cellphone', 'mobile_phone', 'smart_phone'], 'id': 230, 'def': 'a hand-held mobile telephone', 'name': 'cellular_telephone'}, {'frequency': 'r', 'synset': 'chain_mail.n.01', 'synonyms': ['chain_mail', 'ring_mail', 'chain_armor', 'chain_armour', 'ring_armor', 'ring_armour'], 'id': 231, 'def': '(Middle Ages) flexible armor made of interlinked metal rings', 'name': 'chain_mail'}, {'frequency': 'f', 'synset': 'chair.n.01', 'synonyms': ['chair'], 'id': 232, 'def': 'a seat for one person, with a support for the back', 'name': 'chair'}, {'frequency': 'r', 'synset': 'chaise_longue.n.01', 'synonyms': ['chaise_longue', 'chaise', 'daybed'], 'id': 233, 'def': 'a long chair; for reclining', 'name': 'chaise_longue'}, {'frequency': 'r', 'synset': 'chalice.n.01', 'synonyms': ['chalice'], 'id': 234, 'def': 'a bowl-shaped drinking vessel; especially the Eucharistic cup', 'name': 'chalice'}, {'frequency': 'f', 'synset': 'chandelier.n.01', 'synonyms': ['chandelier'], 'id': 235, 'def': 'branched lighting fixture; often ornate; hangs from the ceiling', 'name': 'chandelier'}, {'frequency': 'r', 'synset': 'chap.n.04', 'synonyms': ['chap'], 'id': 236, 'def': 'leather leggings without a seat; worn over trousers by cowboys to protect their legs', 'name': 'chap'}, {'frequency': 'r', 'synset': 'checkbook.n.01', 'synonyms': ['checkbook', 'chequebook'], 'id': 237, 'def': 'a book issued to holders of checking accounts', 'name': 'checkbook'}, {'frequency': 'r', 'synset': 'checkerboard.n.01', 'synonyms': ['checkerboard'], 'id': 238, 'def': 'a board having 64 squares of two alternating colors', 'name': 'checkerboard'}, {'frequency': 'c', 'synset': 'cherry.n.03', 'synonyms': ['cherry'], 'id': 239, 'def': 'a red fruit with a single hard stone', 'name': 'cherry'}, {'frequency': 'r', 'synset': 'chessboard.n.01', 'synonyms': ['chessboard'], 'id': 240, 'def': 'a checkerboard used to play chess', 'name': 'chessboard'}, {'frequency': 'c', 'synset': 'chicken.n.02', 'synonyms': ['chicken_(animal)'], 'id': 241, 'def': 'a domestic fowl bred for flesh or eggs', 'name': 'chicken_(animal)'}, {'frequency': 'c', 'synset': 'chickpea.n.01', 'synonyms': ['chickpea', 'garbanzo'], 'id': 242, 'def': 'the seed of the chickpea plant; usually dried', 'name': 'chickpea'}, {'frequency': 'c', 'synset': 'chili.n.02', 'synonyms': ['chili_(vegetable)', 'chili_pepper_(vegetable)', 'chilli_(vegetable)', 'chilly_(vegetable)', 'chile_(vegetable)'], 'id': 243, 'def': 'very hot and finely tapering pepper of special pungency', 'name': 'chili_(vegetable)'}, {'frequency': 'r', 'synset': 'chime.n.01', 'synonyms': ['chime', 'gong'], 'id': 244, 'def': 'an instrument consisting of a set of bells that are struck with a hammer', 'name': 'chime'}, {'frequency': 'r', 'synset': 'chinaware.n.01', 'synonyms': ['chinaware'], 'id': 245, 'def': 'dishware made of high quality porcelain', 'name': 'chinaware'}, {'frequency': 'c', 'synset': 'chip.n.04', 'synonyms': ['crisp_(potato_chip)', 'potato_chip'], 'id': 246, 'def': 'a thin crisp slice of potato fried in deep fat', 'name': 'crisp_(potato_chip)'}, {'frequency': 'r', 'synset': 'chip.n.06', 'synonyms': ['poker_chip'], 'id': 247, 'def': 'a small disk-shaped counter used to represent money when gambling', 'name': 'poker_chip'}, {'frequency': 'c', 'synset': 'chocolate_bar.n.01', 'synonyms': ['chocolate_bar'], 'id': 248, 'def': 'a bar of chocolate candy', 'name': 'chocolate_bar'}, {'frequency': 'c', 'synset': 'chocolate_cake.n.01', 'synonyms': ['chocolate_cake'], 'id': 249, 'def': 'cake containing chocolate', 'name': 'chocolate_cake'}, {'frequency': 'r', 'synset': 'chocolate_milk.n.01', 'synonyms': ['chocolate_milk'], 'id': 250, 'def': 'milk flavored with chocolate syrup', 'name': 'chocolate_milk'}, {'frequency': 'r', 'synset': 'chocolate_mousse.n.01', 'synonyms': ['chocolate_mousse'], 'id': 251, 'def': 'dessert mousse made with chocolate', 'name': 'chocolate_mousse'}, {'frequency': 'f', 'synset': 'choker.n.03', 'synonyms': ['choker', 'collar', 'neckband'], 'id': 252, 'def': 'shirt collar, animal collar, or tight-fitting necklace', 'name': 'choker'}, {'frequency': 'f', 'synset': 'chopping_board.n.01', 'synonyms': ['chopping_board', 'cutting_board', 'chopping_block'], 'id': 253, 'def': 'a wooden board where meats or vegetables can be cut', 'name': 'chopping_board'}, {'frequency': 'f', 'synset': 'chopstick.n.01', 'synonyms': ['chopstick'], 'id': 254, 'def': 'one of a pair of slender sticks used as oriental tableware to eat food with', 'name': 'chopstick'}, {'frequency': 'f', 'synset': 'christmas_tree.n.05', 'synonyms': ['Christmas_tree'], 'id': 255, 'def': 'an ornamented evergreen used as a Christmas decoration', 'name': 'Christmas_tree'}, {'frequency': 'c', 'synset': 'chute.n.02', 'synonyms': ['slide'], 'id': 256, 'def': 'sloping channel through which things can descend', 'name': 'slide'}, {'frequency': 'r', 'synset': 'cider.n.01', 'synonyms': ['cider', 'cyder'], 'id': 257, 'def': 'a beverage made from juice pressed from apples', 'name': 'cider'}, {'frequency': 'r', 'synset': 'cigar_box.n.01', 'synonyms': ['cigar_box'], 'id': 258, 'def': 'a box for holding cigars', 'name': 'cigar_box'}, {'frequency': 'f', 'synset': 'cigarette.n.01', 'synonyms': ['cigarette'], 'id': 259, 'def': 'finely ground tobacco wrapped in paper; for smoking', 'name': 'cigarette'}, {'frequency': 'c', 'synset': 'cigarette_case.n.01', 'synonyms': ['cigarette_case', 'cigarette_pack'], 'id': 260, 'def': 'a small flat case for holding cigarettes', 'name': 'cigarette_case'}, {'frequency': 'f', 'synset': 'cistern.n.02', 'synonyms': ['cistern', 'water_tank'], 'id': 261, 'def': 'a tank that holds the water used to flush a toilet', 'name': 'cistern'}, {'frequency': 'r', 'synset': 'clarinet.n.01', 'synonyms': ['clarinet'], 'id': 262, 'def': 'a single-reed instrument with a straight tube', 'name': 'clarinet'}, {'frequency': 'c', 'synset': 'clasp.n.01', 'synonyms': ['clasp'], 'id': 263, 'def': 'a fastener (as a buckle or hook) that is used to hold two things together', 'name': 'clasp'}, {'frequency': 'c', 'synset': 'cleansing_agent.n.01', 'synonyms': ['cleansing_agent', 'cleanser', 'cleaner'], 'id': 264, 'def': 'a preparation used in cleaning something', 'name': 'cleansing_agent'}, {'frequency': 'r', 'synset': 'cleat.n.02', 'synonyms': ['cleat_(for_securing_rope)'], 'id': 265, 'def': 'a fastener (usually with two projecting horns) around which a rope can be secured', 'name': 'cleat_(for_securing_rope)'}, {'frequency': 'r', 'synset': 'clementine.n.01', 'synonyms': ['clementine'], 'id': 266, 'def': 'a variety of mandarin orange', 'name': 'clementine'}, {'frequency': 'c', 'synset': 'clip.n.03', 'synonyms': ['clip'], 'id': 267, 'def': 'any of various small fasteners used to hold loose articles together', 'name': 'clip'}, {'frequency': 'c', 'synset': 'clipboard.n.01', 'synonyms': ['clipboard'], 'id': 268, 'def': 'a small writing board with a clip at the top for holding papers', 'name': 'clipboard'}, {'frequency': 'r', 'synset': 'clipper.n.03', 'synonyms': ['clippers_(for_plants)'], 'id': 269, 'def': 'shears for cutting grass or shrubbery (often used in the plural)', 'name': 'clippers_(for_plants)'}, {'frequency': 'r', 'synset': 'cloak.n.02', 'synonyms': ['cloak'], 'id': 270, 'def': 'a loose outer garment', 'name': 'cloak'}, {'frequency': 'f', 'synset': 'clock.n.01', 'synonyms': ['clock', 'timepiece', 'timekeeper'], 'id': 271, 'def': 'a timepiece that shows the time of day', 'name': 'clock'}, {'frequency': 'f', 'synset': 'clock_tower.n.01', 'synonyms': ['clock_tower'], 'id': 272, 'def': 'a tower with a large clock visible high up on an outside face', 'name': 'clock_tower'}, {'frequency': 'c', 'synset': 'clothes_hamper.n.01', 'synonyms': ['clothes_hamper', 'laundry_basket', 'clothes_basket'], 'id': 273, 'def': 'a hamper that holds dirty clothes to be washed or wet clothes to be dried', 'name': 'clothes_hamper'}, {'frequency': 'c', 'synset': 'clothespin.n.01', 'synonyms': ['clothespin', 'clothes_peg'], 'id': 274, 'def': 'wood or plastic fastener; for holding clothes on a clothesline', 'name': 'clothespin'}, {'frequency': 'r', 'synset': 'clutch_bag.n.01', 'synonyms': ['clutch_bag'], 'id': 275, 'def': "a woman's strapless purse that is carried in the hand", 'name': 'clutch_bag'}, {'frequency': 'f', 'synset': 'coaster.n.03', 'synonyms': ['coaster'], 'id': 276, 'def': 'a covering (plate or mat) that protects the surface of a table', 'name': 'coaster'}, {'frequency': 'f', 'synset': 'coat.n.01', 'synonyms': ['coat'], 'id': 277, 'def': 'an outer garment that has sleeves and covers the body from shoulder down', 'name': 'coat'}, {'frequency': 'c', 'synset': 'coat_hanger.n.01', 'synonyms': ['coat_hanger', 'clothes_hanger', 'dress_hanger'], 'id': 278, 'def': "a hanger that is shaped like a person's shoulders", 'name': 'coat_hanger'}, {'frequency': 'c', 'synset': 'coatrack.n.01', 'synonyms': ['coatrack', 'hatrack'], 'id': 279, 'def': 'a rack with hooks for temporarily holding coats and hats', 'name': 'coatrack'}, {'frequency': 'c', 'synset': 'cock.n.04', 'synonyms': ['cock', 'rooster'], 'id': 280, 'def': 'adult male chicken', 'name': 'cock'}, {'frequency': 'r', 'synset': 'cockroach.n.01', 'synonyms': ['cockroach'], 'id': 281, 'def': 'any of numerous chiefly nocturnal insects; some are domestic pests', 'name': 'cockroach'}, {'frequency': 'r', 'synset': 'cocoa.n.01', 'synonyms': ['cocoa_(beverage)', 'hot_chocolate_(beverage)', 'drinking_chocolate'], 'id': 282, 'def': 'a beverage made from cocoa powder and milk and sugar; usually drunk hot', 'name': 'cocoa_(beverage)'}, {'frequency': 'c', 'synset': 'coconut.n.02', 'synonyms': ['coconut', 'cocoanut'], 'id': 283, 'def': 'large hard-shelled brown oval nut with a fibrous husk', 'name': 'coconut'}, {'frequency': 'f', 'synset': 'coffee_maker.n.01', 'synonyms': ['coffee_maker', 'coffee_machine'], 'id': 284, 'def': 'a kitchen appliance for brewing coffee automatically', 'name': 'coffee_maker'}, {'frequency': 'f', 'synset': 'coffee_table.n.01', 'synonyms': ['coffee_table', 'cocktail_table'], 'id': 285, 'def': 'low table where magazines can be placed and coffee or cocktails are served', 'name': 'coffee_table'}, {'frequency': 'c', 'synset': 'coffeepot.n.01', 'synonyms': ['coffeepot'], 'id': 286, 'def': 'tall pot in which coffee is brewed', 'name': 'coffeepot'}, {'frequency': 'r', 'synset': 'coil.n.05', 'synonyms': ['coil'], 'id': 287, 'def': 'tubing that is wound in a spiral', 'name': 'coil'}, {'frequency': 'c', 'synset': 'coin.n.01', 'synonyms': ['coin'], 'id': 288, 'def': 'a flat metal piece (usually a disc) used as money', 'name': 'coin'}, {'frequency': 'c', 'synset': 'colander.n.01', 'synonyms': ['colander', 'cullender'], 'id': 289, 'def': 'bowl-shaped strainer; used to wash or drain foods', 'name': 'colander'}, {'frequency': 'c', 'synset': 'coleslaw.n.01', 'synonyms': ['coleslaw', 'slaw'], 'id': 290, 'def': 'basically shredded cabbage', 'name': 'coleslaw'}, {'frequency': 'r', 'synset': 'coloring_material.n.01', 'synonyms': ['coloring_material', 'colouring_material'], 'id': 291, 'def': 'any material used for its color', 'name': 'coloring_material'}, {'frequency': 'r', 'synset': 'combination_lock.n.01', 'synonyms': ['combination_lock'], 'id': 292, 'def': 'lock that can be opened only by turning dials in a special sequence', 'name': 'combination_lock'}, {'frequency': 'c', 'synset': 'comforter.n.04', 'synonyms': ['pacifier', 'teething_ring'], 'id': 293, 'def': 'device used for an infant to suck or bite on', 'name': 'pacifier'}, {'frequency': 'r', 'synset': 'comic_book.n.01', 'synonyms': ['comic_book'], 'id': 294, 'def': 'a magazine devoted to comic strips', 'name': 'comic_book'}, {'frequency': 'r', 'synset': 'compass.n.01', 'synonyms': ['compass'], 'id': 295, 'def': 'navigational instrument for finding directions', 'name': 'compass'}, {'frequency': 'f', 'synset': 'computer_keyboard.n.01', 'synonyms': ['computer_keyboard', 'keyboard_(computer)'], 'id': 296, 'def': 'a keyboard that is a data input device for computers', 'name': 'computer_keyboard'}, {'frequency': 'f', 'synset': 'condiment.n.01', 'synonyms': ['condiment'], 'id': 297, 'def': 'a preparation (a sauce or relish or spice) to enhance flavor or enjoyment', 'name': 'condiment'}, {'frequency': 'f', 'synset': 'cone.n.01', 'synonyms': ['cone', 'traffic_cone'], 'id': 298, 'def': 'a cone-shaped object used to direct traffic', 'name': 'cone'}, {'frequency': 'f', 'synset': 'control.n.09', 'synonyms': ['control', 'controller'], 'id': 299, 'def': 'a mechanism that controls the operation of a machine', 'name': 'control'}, {'frequency': 'r', 'synset': 'convertible.n.01', 'synonyms': ['convertible_(automobile)'], 'id': 300, 'def': 'a car that has top that can be folded or removed', 'name': 'convertible_(automobile)'}, {'frequency': 'r', 'synset': 'convertible.n.03', 'synonyms': ['sofa_bed'], 'id': 301, 'def': 'a sofa that can be converted into a bed', 'name': 'sofa_bed'}, {'frequency': 'r', 'synset': 'cooker.n.01', 'synonyms': ['cooker'], 'id': 302, 'def': 'a utensil for cooking', 'name': 'cooker'}, {'frequency': 'f', 'synset': 'cookie.n.01', 'synonyms': ['cookie', 'cooky', 'biscuit_(cookie)'], 'id': 303, 'def': "any of various small flat sweet cakes (`biscuit' is the British term)", 'name': 'cookie'}, {'frequency': 'r', 'synset': 'cooking_utensil.n.01', 'synonyms': ['cooking_utensil'], 'id': 304, 'def': 'a kitchen utensil made of material that does not melt easily; used for cooking', 'name': 'cooking_utensil'}, {'frequency': 'f', 'synset': 'cooler.n.01', 'synonyms': ['cooler_(for_food)', 'ice_chest'], 'id': 305, 'def': 'an insulated box for storing food often with ice', 'name': 'cooler_(for_food)'}, {'frequency': 'f', 'synset': 'cork.n.04', 'synonyms': ['cork_(bottle_plug)', 'bottle_cork'], 'id': 306, 'def': 'the plug in the mouth of a bottle (especially a wine bottle)', 'name': 'cork_(bottle_plug)'}, {'frequency': 'r', 'synset': 'corkboard.n.01', 'synonyms': ['corkboard'], 'id': 307, 'def': 'a sheet consisting of cork granules', 'name': 'corkboard'}, {'frequency': 'c', 'synset': 'corkscrew.n.01', 'synonyms': ['corkscrew', 'bottle_screw'], 'id': 308, 'def': 'a bottle opener that pulls corks', 'name': 'corkscrew'}, {'frequency': 'f', 'synset': 'corn.n.03', 'synonyms': ['edible_corn', 'corn', 'maize'], 'id': 309, 'def': 'ears or kernels of corn that can be prepared and served for human food (only mark individual ears or kernels)', 'name': 'edible_corn'}, {'frequency': 'r', 'synset': 'cornbread.n.01', 'synonyms': ['cornbread'], 'id': 310, 'def': 'bread made primarily of cornmeal', 'name': 'cornbread'}, {'frequency': 'c', 'synset': 'cornet.n.01', 'synonyms': ['cornet', 'horn', 'trumpet'], 'id': 311, 'def': 'a brass musical instrument with a narrow tube and a flared bell and many valves', 'name': 'cornet'}, {'frequency': 'c', 'synset': 'cornice.n.01', 'synonyms': ['cornice', 'valance', 'valance_board', 'pelmet'], 'id': 312, 'def': 'a decorative framework to conceal curtain fixtures at the top of a window casing', 'name': 'cornice'}, {'frequency': 'r', 'synset': 'cornmeal.n.01', 'synonyms': ['cornmeal'], 'id': 313, 'def': 'coarsely ground corn', 'name': 'cornmeal'}, {'frequency': 'c', 'synset': 'corset.n.01', 'synonyms': ['corset', 'girdle'], 'id': 314, 'def': "a woman's close-fitting foundation garment", 'name': 'corset'}, {'frequency': 'c', 'synset': 'costume.n.04', 'synonyms': ['costume'], 'id': 315, 'def': 'the attire characteristic of a country or a time or a social class', 'name': 'costume'}, {'frequency': 'r', 'synset': 'cougar.n.01', 'synonyms': ['cougar', 'puma', 'catamount', 'mountain_lion', 'panther'], 'id': 316, 'def': 'large American feline resembling a lion', 'name': 'cougar'}, {'frequency': 'r', 'synset': 'coverall.n.01', 'synonyms': ['coverall'], 'id': 317, 'def': 'a loose-fitting protective garment that is worn over other clothing', 'name': 'coverall'}, {'frequency': 'c', 'synset': 'cowbell.n.01', 'synonyms': ['cowbell'], 'id': 318, 'def': 'a bell hung around the neck of cow so that the cow can be easily located', 'name': 'cowbell'}, {'frequency': 'f', 'synset': 'cowboy_hat.n.01', 'synonyms': ['cowboy_hat', 'ten-gallon_hat'], 'id': 319, 'def': 'a hat with a wide brim and a soft crown; worn by American ranch hands', 'name': 'cowboy_hat'}, {'frequency': 'c', 'synset': 'crab.n.01', 'synonyms': ['crab_(animal)'], 'id': 320, 'def': 'decapod having eyes on short stalks and a broad flattened shell and pincers', 'name': 'crab_(animal)'}, {'frequency': 'r', 'synset': 'crab.n.05', 'synonyms': ['crabmeat'], 'id': 321, 'def': 'the edible flesh of any of various crabs', 'name': 'crabmeat'}, {'frequency': 'c', 'synset': 'cracker.n.01', 'synonyms': ['cracker'], 'id': 322, 'def': 'a thin crisp wafer', 'name': 'cracker'}, {'frequency': 'r', 'synset': 'crape.n.01', 'synonyms': ['crape', 'crepe', 'French_pancake'], 'id': 323, 'def': 'small very thin pancake', 'name': 'crape'}, {'frequency': 'f', 'synset': 'crate.n.01', 'synonyms': ['crate'], 'id': 324, 'def': 'a rugged box (usually made of wood); used for shipping', 'name': 'crate'}, {'frequency': 'c', 'synset': 'crayon.n.01', 'synonyms': ['crayon', 'wax_crayon'], 'id': 325, 'def': 'writing or drawing implement made of a colored stick of composition wax', 'name': 'crayon'}, {'frequency': 'r', 'synset': 'cream_pitcher.n.01', 'synonyms': ['cream_pitcher'], 'id': 326, 'def': 'a small pitcher for serving cream', 'name': 'cream_pitcher'}, {'frequency': 'c', 'synset': 'crescent_roll.n.01', 'synonyms': ['crescent_roll', 'croissant'], 'id': 327, 'def': 'very rich flaky crescent-shaped roll', 'name': 'crescent_roll'}, {'frequency': 'c', 'synset': 'crib.n.01', 'synonyms': ['crib', 'cot'], 'id': 328, 'def': 'baby bed with high sides made of slats', 'name': 'crib'}, {'frequency': 'c', 'synset': 'crock.n.03', 'synonyms': ['crock_pot', 'earthenware_jar'], 'id': 329, 'def': 'an earthen jar (made of baked clay) or a modern electric crockpot', 'name': 'crock_pot'}, {'frequency': 'f', 'synset': 'crossbar.n.01', 'synonyms': ['crossbar'], 'id': 330, 'def': 'a horizontal bar that goes across something', 'name': 'crossbar'}, {'frequency': 'r', 'synset': 'crouton.n.01', 'synonyms': ['crouton'], 'id': 331, 'def': 'a small piece of toasted or fried bread; served in soup or salads', 'name': 'crouton'}, {'frequency': 'c', 'synset': 'crow.n.01', 'synonyms': ['crow'], 'id': 332, 'def': 'black birds having a raucous call', 'name': 'crow'}, {'frequency': 'r', 'synset': 'crowbar.n.01', 'synonyms': ['crowbar', 'wrecking_bar', 'pry_bar'], 'id': 333, 'def': 'a heavy iron lever with one end forged into a wedge', 'name': 'crowbar'}, {'frequency': 'c', 'synset': 'crown.n.04', 'synonyms': ['crown'], 'id': 334, 'def': 'an ornamental jeweled headdress signifying sovereignty', 'name': 'crown'}, {'frequency': 'c', 'synset': 'crucifix.n.01', 'synonyms': ['crucifix'], 'id': 335, 'def': 'representation of the cross on which Jesus died', 'name': 'crucifix'}, {'frequency': 'c', 'synset': 'cruise_ship.n.01', 'synonyms': ['cruise_ship', 'cruise_liner'], 'id': 336, 'def': 'a passenger ship used commercially for pleasure cruises', 'name': 'cruise_ship'}, {'frequency': 'c', 'synset': 'cruiser.n.01', 'synonyms': ['police_cruiser', 'patrol_car', 'police_car', 'squad_car'], 'id': 337, 'def': 'a car in which policemen cruise the streets', 'name': 'police_cruiser'}, {'frequency': 'f', 'synset': 'crumb.n.03', 'synonyms': ['crumb'], 'id': 338, 'def': 'small piece of e.g. bread or cake', 'name': 'crumb'}, {'frequency': 'c', 'synset': 'crutch.n.01', 'synonyms': ['crutch'], 'id': 339, 'def': 'a wooden or metal staff that fits under the armpit and reaches to the ground', 'name': 'crutch'}, {'frequency': 'c', 'synset': 'cub.n.03', 'synonyms': ['cub_(animal)'], 'id': 340, 'def': 'the young of certain carnivorous mammals such as the bear or wolf or lion', 'name': 'cub_(animal)'}, {'frequency': 'c', 'synset': 'cube.n.05', 'synonyms': ['cube', 'square_block'], 'id': 341, 'def': 'a block in the (approximate) shape of a cube', 'name': 'cube'}, {'frequency': 'f', 'synset': 'cucumber.n.02', 'synonyms': ['cucumber', 'cuke'], 'id': 342, 'def': 'cylindrical green fruit with thin green rind and white flesh eaten as a vegetable', 'name': 'cucumber'}, {'frequency': 'c', 'synset': 'cufflink.n.01', 'synonyms': ['cufflink'], 'id': 343, 'def': 'jewelry consisting of linked buttons used to fasten the cuffs of a shirt', 'name': 'cufflink'}, {'frequency': 'f', 'synset': 'cup.n.01', 'synonyms': ['cup'], 'id': 344, 'def': 'a small open container usually used for drinking; usually has a handle', 'name': 'cup'}, {'frequency': 'c', 'synset': 'cup.n.08', 'synonyms': ['trophy_cup'], 'id': 345, 'def': 'a metal award or cup-shaped vessel with handles that is awarded as a trophy to a competition winner', 'name': 'trophy_cup'}, {'frequency': 'f', 'synset': 'cupboard.n.01', 'synonyms': ['cupboard', 'closet'], 'id': 346, 'def': 'a small room (or recess) or cabinet used for storage space', 'name': 'cupboard'}, {'frequency': 'f', 'synset': 'cupcake.n.01', 'synonyms': ['cupcake'], 'id': 347, 'def': 'small cake baked in a muffin tin', 'name': 'cupcake'}, {'frequency': 'r', 'synset': 'curler.n.01', 'synonyms': ['hair_curler', 'hair_roller', 'hair_crimper'], 'id': 348, 'def': 'a cylindrical tube around which the hair is wound to curl it', 'name': 'hair_curler'}, {'frequency': 'r', 'synset': 'curling_iron.n.01', 'synonyms': ['curling_iron'], 'id': 349, 'def': 'a cylindrical home appliance that heats hair that has been curled around it', 'name': 'curling_iron'}, {'frequency': 'f', 'synset': 'curtain.n.01', 'synonyms': ['curtain', 'drapery'], 'id': 350, 'def': 'hanging cloth used as a blind (especially for a window)', 'name': 'curtain'}, {'frequency': 'f', 'synset': 'cushion.n.03', 'synonyms': ['cushion'], 'id': 351, 'def': 'a soft bag filled with air or padding such as feathers or foam rubber', 'name': 'cushion'}, {'frequency': 'r', 'synset': 'cylinder.n.04', 'synonyms': ['cylinder'], 'id': 352, 'def': 'a cylindrical container', 'name': 'cylinder'}, {'frequency': 'r', 'synset': 'cymbal.n.01', 'synonyms': ['cymbal'], 'id': 353, 'def': 'a percussion instrument consisting of a concave brass disk', 'name': 'cymbal'}, {'frequency': 'r', 'synset': 'dagger.n.01', 'synonyms': ['dagger'], 'id': 354, 'def': 'a short knife with a pointed blade used for piercing or stabbing', 'name': 'dagger'}, {'frequency': 'r', 'synset': 'dalmatian.n.02', 'synonyms': ['dalmatian'], 'id': 355, 'def': 'a large breed having a smooth white coat with black or brown spots', 'name': 'dalmatian'}, {'frequency': 'c', 'synset': 'dartboard.n.01', 'synonyms': ['dartboard'], 'id': 356, 'def': 'a circular board of wood or cork used as the target in the game of darts', 'name': 'dartboard'}, {'frequency': 'r', 'synset': 'date.n.08', 'synonyms': ['date_(fruit)'], 'id': 357, 'def': 'sweet edible fruit of the date palm with a single long woody seed', 'name': 'date_(fruit)'}, {'frequency': 'f', 'synset': 'deck_chair.n.01', 'synonyms': ['deck_chair', 'beach_chair'], 'id': 358, 'def': 'a folding chair for use outdoors; a wooden frame supports a length of canvas', 'name': 'deck_chair'}, {'frequency': 'c', 'synset': 'deer.n.01', 'synonyms': ['deer', 'cervid'], 'id': 359, 'def': "distinguished from Bovidae by the male's having solid deciduous antlers", 'name': 'deer'}, {'frequency': 'c', 'synset': 'dental_floss.n.01', 'synonyms': ['dental_floss', 'floss'], 'id': 360, 'def': 'a soft thread for cleaning the spaces between the teeth', 'name': 'dental_floss'}, {'frequency': 'f', 'synset': 'desk.n.01', 'synonyms': ['desk'], 'id': 361, 'def': 'a piece of furniture with a writing surface and usually drawers or other compartments', 'name': 'desk'}, {'frequency': 'r', 'synset': 'detergent.n.01', 'synonyms': ['detergent'], 'id': 362, 'def': 'a surface-active chemical widely used in industry and laundering', 'name': 'detergent'}, {'frequency': 'c', 'synset': 'diaper.n.01', 'synonyms': ['diaper'], 'id': 363, 'def': 'garment consisting of a folded cloth drawn up between the legs and fastened at the waist', 'name': 'diaper'}, {'frequency': 'r', 'synset': 'diary.n.01', 'synonyms': ['diary', 'journal'], 'id': 364, 'def': 'yearly planner book', 'name': 'diary'}, {'frequency': 'r', 'synset': 'die.n.01', 'synonyms': ['die', 'dice'], 'id': 365, 'def': 'a small cube with 1 to 6 spots on the six faces; used in gambling', 'name': 'die'}, {'frequency': 'r', 'synset': 'dinghy.n.01', 'synonyms': ['dinghy', 'dory', 'rowboat'], 'id': 366, 'def': 'a small boat of shallow draft with seats and oars with which it is propelled', 'name': 'dinghy'}, {'frequency': 'f', 'synset': 'dining_table.n.01', 'synonyms': ['dining_table'], 'id': 367, 'def': 'a table at which meals are served', 'name': 'dining_table'}, {'frequency': 'r', 'synset': 'dinner_jacket.n.01', 'synonyms': ['tux', 'tuxedo'], 'id': 368, 'def': 'semiformal evening dress for men', 'name': 'tux'}, {'frequency': 'f', 'synset': 'dish.n.01', 'synonyms': ['dish'], 'id': 369, 'def': 'a piece of dishware normally used as a container for holding or serving food', 'name': 'dish'}, {'frequency': 'c', 'synset': 'dish.n.05', 'synonyms': ['dish_antenna'], 'id': 370, 'def': 'directional antenna consisting of a parabolic reflector', 'name': 'dish_antenna'}, {'frequency': 'c', 'synset': 'dishrag.n.01', 'synonyms': ['dishrag', 'dishcloth'], 'id': 371, 'def': 'a cloth for washing dishes or cleaning in general', 'name': 'dishrag'}, {'frequency': 'f', 'synset': 'dishtowel.n.01', 'synonyms': ['dishtowel', 'tea_towel'], 'id': 372, 'def': 'a towel for drying dishes', 'name': 'dishtowel'}, {'frequency': 'f', 'synset': 'dishwasher.n.01', 'synonyms': ['dishwasher', 'dishwashing_machine'], 'id': 373, 'def': 'a machine for washing dishes', 'name': 'dishwasher'}, {'frequency': 'r', 'synset': 'dishwasher_detergent.n.01', 'synonyms': ['dishwasher_detergent', 'dishwashing_detergent', 'dishwashing_liquid', 'dishsoap'], 'id': 374, 'def': 'dishsoap or dish detergent designed for use in dishwashers', 'name': 'dishwasher_detergent'}, {'frequency': 'f', 'synset': 'dispenser.n.01', 'synonyms': ['dispenser'], 'id': 375, 'def': 'a container so designed that the contents can be used in prescribed amounts', 'name': 'dispenser'}, {'frequency': 'r', 'synset': 'diving_board.n.01', 'synonyms': ['diving_board'], 'id': 376, 'def': 'a springboard from which swimmers can dive', 'name': 'diving_board'}, {'frequency': 'f', 'synset': 'dixie_cup.n.01', 'synonyms': ['Dixie_cup', 'paper_cup'], 'id': 377, 'def': 'a disposable cup made of paper; for holding drinks', 'name': 'Dixie_cup'}, {'frequency': 'f', 'synset': 'dog.n.01', 'synonyms': ['dog'], 'id': 378, 'def': 'a common domesticated dog', 'name': 'dog'}, {'frequency': 'f', 'synset': 'dog_collar.n.01', 'synonyms': ['dog_collar'], 'id': 379, 'def': 'a collar for a dog', 'name': 'dog_collar'}, {'frequency': 'f', 'synset': 'doll.n.01', 'synonyms': ['doll'], 'id': 380, 'def': 'a toy replica of a HUMAN (NOT AN ANIMAL)', 'name': 'doll'}, {'frequency': 'r', 'synset': 'dollar.n.02', 'synonyms': ['dollar', 'dollar_bill', 'one_dollar_bill'], 'id': 381, 'def': 'a piece of paper money worth one dollar', 'name': 'dollar'}, {'frequency': 'r', 'synset': 'dollhouse.n.01', 'synonyms': ['dollhouse', "doll's_house"], 'id': 382, 'def': "a house so small that it is likened to a child's plaything", 'name': 'dollhouse'}, {'frequency': 'c', 'synset': 'dolphin.n.02', 'synonyms': ['dolphin'], 'id': 383, 'def': 'any of various small toothed whales with a beaklike snout; larger than porpoises', 'name': 'dolphin'}, {'frequency': 'c', 'synset': 'domestic_ass.n.01', 'synonyms': ['domestic_ass', 'donkey'], 'id': 384, 'def': 'domestic beast of burden descended from the African wild ass; patient but stubborn', 'name': 'domestic_ass'}, {'frequency': 'f', 'synset': 'doorknob.n.01', 'synonyms': ['doorknob', 'doorhandle'], 'id': 385, 'def': "a knob used to open a door (often called `doorhandle' in Great Britain)", 'name': 'doorknob'}, {'frequency': 'c', 'synset': 'doormat.n.02', 'synonyms': ['doormat', 'welcome_mat'], 'id': 386, 'def': 'a mat placed outside an exterior door for wiping the shoes before entering', 'name': 'doormat'}, {'frequency': 'f', 'synset': 'doughnut.n.02', 'synonyms': ['doughnut', 'donut'], 'id': 387, 'def': 'a small ring-shaped friedcake', 'name': 'doughnut'}, {'frequency': 'r', 'synset': 'dove.n.01', 'synonyms': ['dove'], 'id': 388, 'def': 'any of numerous small pigeons', 'name': 'dove'}, {'frequency': 'r', 'synset': 'dragonfly.n.01', 'synonyms': ['dragonfly'], 'id': 389, 'def': 'slender-bodied non-stinging insect having iridescent wings that are outspread at rest', 'name': 'dragonfly'}, {'frequency': 'f', 'synset': 'drawer.n.01', 'synonyms': ['drawer'], 'id': 390, 'def': 'a boxlike container in a piece of furniture; made so as to slide in and out', 'name': 'drawer'}, {'frequency': 'c', 'synset': 'drawers.n.01', 'synonyms': ['underdrawers', 'boxers', 'boxershorts'], 'id': 391, 'def': 'underpants worn by men', 'name': 'underdrawers'}, {'frequency': 'f', 'synset': 'dress.n.01', 'synonyms': ['dress', 'frock'], 'id': 392, 'def': 'a one-piece garment for a woman; has skirt and bodice', 'name': 'dress'}, {'frequency': 'c', 'synset': 'dress_hat.n.01', 'synonyms': ['dress_hat', 'high_hat', 'opera_hat', 'silk_hat', 'top_hat'], 'id': 393, 'def': "a man's hat with a tall crown; usually covered with silk or with beaver fur", 'name': 'dress_hat'}, {'frequency': 'f', 'synset': 'dress_suit.n.01', 'synonyms': ['dress_suit'], 'id': 394, 'def': 'formalwear consisting of full evening dress for men', 'name': 'dress_suit'}, {'frequency': 'f', 'synset': 'dresser.n.05', 'synonyms': ['dresser'], 'id': 395, 'def': 'a cabinet with shelves', 'name': 'dresser'}, {'frequency': 'c', 'synset': 'drill.n.01', 'synonyms': ['drill'], 'id': 396, 'def': 'a tool with a sharp rotating point for making holes in hard materials', 'name': 'drill'}, {'frequency': 'r', 'synset': 'drone.n.04', 'synonyms': ['drone'], 'id': 397, 'def': 'an aircraft without a pilot that is operated by remote control', 'name': 'drone'}, {'frequency': 'r', 'synset': 'dropper.n.01', 'synonyms': ['dropper', 'eye_dropper'], 'id': 398, 'def': 'pipet consisting of a small tube with a vacuum bulb at one end for drawing liquid in and releasing it a drop at a time', 'name': 'dropper'}, {'frequency': 'c', 'synset': 'drum.n.01', 'synonyms': ['drum_(musical_instrument)'], 'id': 399, 'def': 'a musical percussion instrument; usually consists of a hollow cylinder with a membrane stretched across each end', 'name': 'drum_(musical_instrument)'}, {'frequency': 'r', 'synset': 'drumstick.n.02', 'synonyms': ['drumstick'], 'id': 400, 'def': 'a stick used for playing a drum', 'name': 'drumstick'}, {'frequency': 'f', 'synset': 'duck.n.01', 'synonyms': ['duck'], 'id': 401, 'def': 'small web-footed broad-billed swimming bird', 'name': 'duck'}, {'frequency': 'c', 'synset': 'duckling.n.02', 'synonyms': ['duckling'], 'id': 402, 'def': 'young duck', 'name': 'duckling'}, {'frequency': 'c', 'synset': 'duct_tape.n.01', 'synonyms': ['duct_tape'], 'id': 403, 'def': 'a wide silvery adhesive tape', 'name': 'duct_tape'}, {'frequency': 'f', 'synset': 'duffel_bag.n.01', 'synonyms': ['duffel_bag', 'duffle_bag', 'duffel', 'duffle'], 'id': 404, 'def': 'a large cylindrical bag of heavy cloth (does not include suitcases)', 'name': 'duffel_bag'}, {'frequency': 'r', 'synset': 'dumbbell.n.01', 'synonyms': ['dumbbell'], 'id': 405, 'def': 'an exercising weight with two ball-like ends connected by a short handle', 'name': 'dumbbell'}, {'frequency': 'c', 'synset': 'dumpster.n.01', 'synonyms': ['dumpster'], 'id': 406, 'def': 'a container designed to receive and transport and dump waste', 'name': 'dumpster'}, {'frequency': 'r', 'synset': 'dustpan.n.02', 'synonyms': ['dustpan'], 'id': 407, 'def': 'a short-handled receptacle into which dust can be swept', 'name': 'dustpan'}, {'frequency': 'c', 'synset': 'eagle.n.01', 'synonyms': ['eagle'], 'id': 408, 'def': 'large birds of prey noted for their broad wings and strong soaring flight', 'name': 'eagle'}, {'frequency': 'f', 'synset': 'earphone.n.01', 'synonyms': ['earphone', 'earpiece', 'headphone'], 'id': 409, 'def': 'device for listening to audio that is held over or inserted into the ear', 'name': 'earphone'}, {'frequency': 'r', 'synset': 'earplug.n.01', 'synonyms': ['earplug'], 'id': 410, 'def': 'a soft plug that is inserted into the ear canal to block sound', 'name': 'earplug'}, {'frequency': 'f', 'synset': 'earring.n.01', 'synonyms': ['earring'], 'id': 411, 'def': 'jewelry to ornament the ear', 'name': 'earring'}, {'frequency': 'c', 'synset': 'easel.n.01', 'synonyms': ['easel'], 'id': 412, 'def': "an upright tripod for displaying something (usually an artist's canvas)", 'name': 'easel'}, {'frequency': 'r', 'synset': 'eclair.n.01', 'synonyms': ['eclair'], 'id': 413, 'def': 'oblong cream puff', 'name': 'eclair'}, {'frequency': 'r', 'synset': 'eel.n.01', 'synonyms': ['eel'], 'id': 414, 'def': 'an elongate fish with fatty flesh', 'name': 'eel'}, {'frequency': 'f', 'synset': 'egg.n.02', 'synonyms': ['egg', 'eggs'], 'id': 415, 'def': 'oval reproductive body of a fowl (especially a hen) used as food', 'name': 'egg'}, {'frequency': 'r', 'synset': 'egg_roll.n.01', 'synonyms': ['egg_roll', 'spring_roll'], 'id': 416, 'def': 'minced vegetables and meat wrapped in a pancake and fried', 'name': 'egg_roll'}, {'frequency': 'c', 'synset': 'egg_yolk.n.01', 'synonyms': ['egg_yolk', 'yolk_(egg)'], 'id': 417, 'def': 'the yellow spherical part of an egg', 'name': 'egg_yolk'}, {'frequency': 'c', 'synset': 'eggbeater.n.02', 'synonyms': ['eggbeater', 'eggwhisk'], 'id': 418, 'def': 'a mixer for beating eggs or whipping cream', 'name': 'eggbeater'}, {'frequency': 'c', 'synset': 'eggplant.n.01', 'synonyms': ['eggplant', 'aubergine'], 'id': 419, 'def': 'egg-shaped vegetable having a shiny skin typically dark purple', 'name': 'eggplant'}, {'frequency': 'r', 'synset': 'electric_chair.n.01', 'synonyms': ['electric_chair'], 'id': 420, 'def': 'a chair-shaped instrument of execution by electrocution', 'name': 'electric_chair'}, {'frequency': 'f', 'synset': 'electric_refrigerator.n.01', 'synonyms': ['refrigerator'], 'id': 421, 'def': 'a refrigerator in which the coolant is pumped around by an electric motor', 'name': 'refrigerator'}, {'frequency': 'f', 'synset': 'elephant.n.01', 'synonyms': ['elephant'], 'id': 422, 'def': 'a common elephant', 'name': 'elephant'}, {'frequency': 'c', 'synset': 'elk.n.01', 'synonyms': ['elk', 'moose'], 'id': 423, 'def': 'large northern deer with enormous flattened antlers in the male', 'name': 'elk'}, {'frequency': 'c', 'synset': 'envelope.n.01', 'synonyms': ['envelope'], 'id': 424, 'def': 'a flat (usually rectangular) container for a letter, thin package, etc.', 'name': 'envelope'}, {'frequency': 'c', 'synset': 'eraser.n.01', 'synonyms': ['eraser'], 'id': 425, 'def': 'an implement used to erase something', 'name': 'eraser'}, {'frequency': 'r', 'synset': 'escargot.n.01', 'synonyms': ['escargot'], 'id': 426, 'def': 'edible snail usually served in the shell with a sauce of melted butter and garlic', 'name': 'escargot'}, {'frequency': 'r', 'synset': 'eyepatch.n.01', 'synonyms': ['eyepatch'], 'id': 427, 'def': 'a protective cloth covering for an injured eye', 'name': 'eyepatch'}, {'frequency': 'r', 'synset': 'falcon.n.01', 'synonyms': ['falcon'], 'id': 428, 'def': 'birds of prey having long pointed powerful wings adapted for swift flight', 'name': 'falcon'}, {'frequency': 'f', 'synset': 'fan.n.01', 'synonyms': ['fan'], 'id': 429, 'def': 'a device for creating a current of air by movement of a surface or surfaces', 'name': 'fan'}, {'frequency': 'f', 'synset': 'faucet.n.01', 'synonyms': ['faucet', 'spigot', 'tap'], 'id': 430, 'def': 'a regulator for controlling the flow of a liquid from a reservoir', 'name': 'faucet'}, {'frequency': 'r', 'synset': 'fedora.n.01', 'synonyms': ['fedora'], 'id': 431, 'def': 'a hat made of felt with a creased crown', 'name': 'fedora'}, {'frequency': 'r', 'synset': 'ferret.n.02', 'synonyms': ['ferret'], 'id': 432, 'def': 'domesticated albino variety of the European polecat bred for hunting rats and rabbits', 'name': 'ferret'}, {'frequency': 'c', 'synset': 'ferris_wheel.n.01', 'synonyms': ['Ferris_wheel'], 'id': 433, 'def': 'a large wheel with suspended seats that remain upright as the wheel rotates', 'name': 'Ferris_wheel'}, {'frequency': 'c', 'synset': 'ferry.n.01', 'synonyms': ['ferry', 'ferryboat'], 'id': 434, 'def': 'a boat that transports people or vehicles across a body of water and operates on a regular schedule', 'name': 'ferry'}, {'frequency': 'r', 'synset': 'fig.n.04', 'synonyms': ['fig_(fruit)'], 'id': 435, 'def': 'fleshy sweet pear-shaped yellowish or purple fruit eaten fresh or preserved or dried', 'name': 'fig_(fruit)'}, {'frequency': 'c', 'synset': 'fighter.n.02', 'synonyms': ['fighter_jet', 'fighter_aircraft', 'attack_aircraft'], 'id': 436, 'def': 'a high-speed military or naval airplane designed to destroy enemy targets', 'name': 'fighter_jet'}, {'frequency': 'f', 'synset': 'figurine.n.01', 'synonyms': ['figurine'], 'id': 437, 'def': 'a small carved or molded figure', 'name': 'figurine'}, {'frequency': 'c', 'synset': 'file.n.03', 'synonyms': ['file_cabinet', 'filing_cabinet'], 'id': 438, 'def': 'office furniture consisting of a container for keeping papers in order', 'name': 'file_cabinet'}, {'frequency': 'r', 'synset': 'file.n.04', 'synonyms': ['file_(tool)'], 'id': 439, 'def': 'a steel hand tool with small sharp teeth on some or all of its surfaces; used for smoothing wood or metal', 'name': 'file_(tool)'}, {'frequency': 'f', 'synset': 'fire_alarm.n.02', 'synonyms': ['fire_alarm', 'smoke_alarm'], 'id': 440, 'def': 'an alarm that is tripped off by fire or smoke', 'name': 'fire_alarm'}, {'frequency': 'f', 'synset': 'fire_engine.n.01', 'synonyms': ['fire_engine', 'fire_truck'], 'id': 441, 'def': 'large trucks that carry firefighters and equipment to the site of a fire', 'name': 'fire_engine'}, {'frequency': 'f', 'synset': 'fire_extinguisher.n.01', 'synonyms': ['fire_extinguisher', 'extinguisher'], 'id': 442, 'def': 'a manually operated device for extinguishing small fires', 'name': 'fire_extinguisher'}, {'frequency': 'c', 'synset': 'fire_hose.n.01', 'synonyms': ['fire_hose'], 'id': 443, 'def': 'a large hose that carries water from a fire hydrant to the site of the fire', 'name': 'fire_hose'}, {'frequency': 'f', 'synset': 'fireplace.n.01', 'synonyms': ['fireplace'], 'id': 444, 'def': 'an open recess in a wall at the base of a chimney where a fire can be built', 'name': 'fireplace'}, {'frequency': 'f', 'synset': 'fireplug.n.01', 'synonyms': ['fireplug', 'fire_hydrant', 'hydrant'], 'id': 445, 'def': 'an upright hydrant for drawing water to use in fighting a fire', 'name': 'fireplug'}, {'frequency': 'r', 'synset': 'first-aid_kit.n.01', 'synonyms': ['first-aid_kit'], 'id': 446, 'def': 'kit consisting of a set of bandages and medicines for giving first aid', 'name': 'first-aid_kit'}, {'frequency': 'f', 'synset': 'fish.n.01', 'synonyms': ['fish'], 'id': 447, 'def': 'any of various mostly cold-blooded aquatic vertebrates usually having scales and breathing through gills', 'name': 'fish'}, {'frequency': 'c', 'synset': 'fish.n.02', 'synonyms': ['fish_(food)'], 'id': 448, 'def': 'the flesh of fish used as food', 'name': 'fish_(food)'}, {'frequency': 'r', 'synset': 'fishbowl.n.02', 'synonyms': ['fishbowl', 'goldfish_bowl'], 'id': 449, 'def': 'a transparent bowl in which small fish are kept', 'name': 'fishbowl'}, {'frequency': 'c', 'synset': 'fishing_rod.n.01', 'synonyms': ['fishing_rod', 'fishing_pole'], 'id': 450, 'def': 'a rod that is used in fishing to extend the fishing line', 'name': 'fishing_rod'}, {'frequency': 'f', 'synset': 'flag.n.01', 'synonyms': ['flag'], 'id': 451, 'def': 'emblem usually consisting of a rectangular piece of cloth of distinctive design (do not include pole)', 'name': 'flag'}, {'frequency': 'f', 'synset': 'flagpole.n.02', 'synonyms': ['flagpole', 'flagstaff'], 'id': 452, 'def': 'a tall staff or pole on which a flag is raised', 'name': 'flagpole'}, {'frequency': 'c', 'synset': 'flamingo.n.01', 'synonyms': ['flamingo'], 'id': 453, 'def': 'large pink web-footed bird with down-bent bill', 'name': 'flamingo'}, {'frequency': 'c', 'synset': 'flannel.n.01', 'synonyms': ['flannel'], 'id': 454, 'def': 'a soft light woolen fabric; used for clothing', 'name': 'flannel'}, {'frequency': 'c', 'synset': 'flap.n.01', 'synonyms': ['flap'], 'id': 455, 'def': 'any broad thin covering attached at one edge, such as a mud flap next to a wheel or a flap on an airplane wing', 'name': 'flap'}, {'frequency': 'r', 'synset': 'flash.n.10', 'synonyms': ['flash', 'flashbulb'], 'id': 456, 'def': 'a lamp for providing momentary light to take a photograph', 'name': 'flash'}, {'frequency': 'c', 'synset': 'flashlight.n.01', 'synonyms': ['flashlight', 'torch'], 'id': 457, 'def': 'a small portable battery-powered electric lamp', 'name': 'flashlight'}, {'frequency': 'r', 'synset': 'fleece.n.03', 'synonyms': ['fleece'], 'id': 458, 'def': 'a soft bulky fabric with deep pile; used chiefly for clothing', 'name': 'fleece'}, {'frequency': 'f', 'synset': 'flip-flop.n.02', 'synonyms': ['flip-flop_(sandal)'], 'id': 459, 'def': 'a backless sandal held to the foot by a thong between two toes', 'name': 'flip-flop_(sandal)'}, {'frequency': 'c', 'synset': 'flipper.n.01', 'synonyms': ['flipper_(footwear)', 'fin_(footwear)'], 'id': 460, 'def': 'a shoe to aid a person in swimming', 'name': 'flipper_(footwear)'}, {'frequency': 'f', 'synset': 'flower_arrangement.n.01', 'synonyms': ['flower_arrangement', 'floral_arrangement'], 'id': 461, 'def': 'a decorative arrangement of flowers', 'name': 'flower_arrangement'}, {'frequency': 'c', 'synset': 'flute.n.02', 'synonyms': ['flute_glass', 'champagne_flute'], 'id': 462, 'def': 'a tall narrow wineglass', 'name': 'flute_glass'}, {'frequency': 'c', 'synset': 'foal.n.01', 'synonyms': ['foal'], 'id': 463, 'def': 'a young horse', 'name': 'foal'}, {'frequency': 'c', 'synset': 'folding_chair.n.01', 'synonyms': ['folding_chair'], 'id': 464, 'def': 'a chair that can be folded flat for storage', 'name': 'folding_chair'}, {'frequency': 'c', 'synset': 'food_processor.n.01', 'synonyms': ['food_processor'], 'id': 465, 'def': 'a kitchen appliance for shredding, blending, chopping, or slicing food', 'name': 'food_processor'}, {'frequency': 'c', 'synset': 'football.n.02', 'synonyms': ['football_(American)'], 'id': 466, 'def': 'the inflated oblong ball used in playing American football', 'name': 'football_(American)'}, {'frequency': 'r', 'synset': 'football_helmet.n.01', 'synonyms': ['football_helmet'], 'id': 467, 'def': 'a padded helmet with a face mask to protect the head of football players', 'name': 'football_helmet'}, {'frequency': 'c', 'synset': 'footstool.n.01', 'synonyms': ['footstool', 'footrest'], 'id': 468, 'def': 'a low seat or a stool to rest the feet of a seated person', 'name': 'footstool'}, {'frequency': 'f', 'synset': 'fork.n.01', 'synonyms': ['fork'], 'id': 469, 'def': 'cutlery used for serving and eating food', 'name': 'fork'}, {'frequency': 'c', 'synset': 'forklift.n.01', 'synonyms': ['forklift'], 'id': 470, 'def': 'an industrial vehicle with a power operated fork in front that can be inserted under loads to lift and move them', 'name': 'forklift'}, {'frequency': 'c', 'synset': 'freight_car.n.01', 'synonyms': ['freight_car'], 'id': 471, 'def': 'a railway car that carries freight', 'name': 'freight_car'}, {'frequency': 'c', 'synset': 'french_toast.n.01', 'synonyms': ['French_toast'], 'id': 472, 'def': 'bread slice dipped in egg and milk and fried', 'name': 'French_toast'}, {'frequency': 'c', 'synset': 'freshener.n.01', 'synonyms': ['freshener', 'air_freshener'], 'id': 473, 'def': 'anything that freshens air by removing or covering odor', 'name': 'freshener'}, {'frequency': 'f', 'synset': 'frisbee.n.01', 'synonyms': ['frisbee'], 'id': 474, 'def': 'a light, plastic disk propelled with a flip of the wrist for recreation or competition', 'name': 'frisbee'}, {'frequency': 'c', 'synset': 'frog.n.01', 'synonyms': ['frog', 'toad', 'toad_frog'], 'id': 475, 'def': 'a tailless stout-bodied amphibians with long hind limbs for leaping', 'name': 'frog'}, {'frequency': 'c', 'synset': 'fruit_juice.n.01', 'synonyms': ['fruit_juice'], 'id': 476, 'def': 'drink produced by squeezing or crushing fruit', 'name': 'fruit_juice'}, {'frequency': 'f', 'synset': 'frying_pan.n.01', 'synonyms': ['frying_pan', 'frypan', 'skillet'], 'id': 477, 'def': 'a pan used for frying foods', 'name': 'frying_pan'}, {'frequency': 'r', 'synset': 'fudge.n.01', 'synonyms': ['fudge'], 'id': 478, 'def': 'soft creamy candy', 'name': 'fudge'}, {'frequency': 'r', 'synset': 'funnel.n.02', 'synonyms': ['funnel'], 'id': 479, 'def': 'a cone-shaped utensil used to channel a substance into a container with a small mouth', 'name': 'funnel'}, {'frequency': 'r', 'synset': 'futon.n.01', 'synonyms': ['futon'], 'id': 480, 'def': 'a pad that is used for sleeping on the floor or on a raised frame', 'name': 'futon'}, {'frequency': 'r', 'synset': 'gag.n.02', 'synonyms': ['gag', 'muzzle'], 'id': 481, 'def': "restraint put into a person's mouth to prevent speaking or shouting", 'name': 'gag'}, {'frequency': 'r', 'synset': 'garbage.n.03', 'synonyms': ['garbage'], 'id': 482, 'def': 'a receptacle where waste can be discarded', 'name': 'garbage'}, {'frequency': 'c', 'synset': 'garbage_truck.n.01', 'synonyms': ['garbage_truck'], 'id': 483, 'def': 'a truck for collecting domestic refuse', 'name': 'garbage_truck'}, {'frequency': 'c', 'synset': 'garden_hose.n.01', 'synonyms': ['garden_hose'], 'id': 484, 'def': 'a hose used for watering a lawn or garden', 'name': 'garden_hose'}, {'frequency': 'c', 'synset': 'gargle.n.01', 'synonyms': ['gargle', 'mouthwash'], 'id': 485, 'def': 'a medicated solution used for gargling and rinsing the mouth', 'name': 'gargle'}, {'frequency': 'r', 'synset': 'gargoyle.n.02', 'synonyms': ['gargoyle'], 'id': 486, 'def': 'an ornament consisting of a grotesquely carved figure of a person or animal', 'name': 'gargoyle'}, {'frequency': 'c', 'synset': 'garlic.n.02', 'synonyms': ['garlic', 'ail'], 'id': 487, 'def': 'aromatic bulb used as seasoning', 'name': 'garlic'}, {'frequency': 'r', 'synset': 'gasmask.n.01', 'synonyms': ['gasmask', 'respirator', 'gas_helmet'], 'id': 488, 'def': 'a protective face mask with a filter', 'name': 'gasmask'}, {'frequency': 'c', 'synset': 'gazelle.n.01', 'synonyms': ['gazelle'], 'id': 489, 'def': 'small swift graceful antelope of Africa and Asia having lustrous eyes', 'name': 'gazelle'}, {'frequency': 'c', 'synset': 'gelatin.n.02', 'synonyms': ['gelatin', 'jelly'], 'id': 490, 'def': 'an edible jelly made with gelatin and used as a dessert or salad base or a coating for foods', 'name': 'gelatin'}, {'frequency': 'r', 'synset': 'gem.n.02', 'synonyms': ['gemstone'], 'id': 491, 'def': 'a crystalline rock that can be cut and polished for jewelry', 'name': 'gemstone'}, {'frequency': 'r', 'synset': 'generator.n.02', 'synonyms': ['generator'], 'id': 492, 'def': 'engine that converts mechanical energy into electrical energy by electromagnetic induction', 'name': 'generator'}, {'frequency': 'c', 'synset': 'giant_panda.n.01', 'synonyms': ['giant_panda', 'panda', 'panda_bear'], 'id': 493, 'def': 'large black-and-white herbivorous mammal of bamboo forests of China and Tibet', 'name': 'giant_panda'}, {'frequency': 'c', 'synset': 'gift_wrap.n.01', 'synonyms': ['gift_wrap'], 'id': 494, 'def': 'attractive wrapping paper suitable for wrapping gifts', 'name': 'gift_wrap'}, {'frequency': 'c', 'synset': 'ginger.n.03', 'synonyms': ['ginger', 'gingerroot'], 'id': 495, 'def': 'the root of the common ginger plant; used fresh as a seasoning', 'name': 'ginger'}, {'frequency': 'f', 'synset': 'giraffe.n.01', 'synonyms': ['giraffe'], 'id': 496, 'def': 'tall animal having a spotted coat and small horns and very long neck and legs', 'name': 'giraffe'}, {'frequency': 'c', 'synset': 'girdle.n.02', 'synonyms': ['cincture', 'sash', 'waistband', 'waistcloth'], 'id': 497, 'def': 'a band of material around the waist that strengthens a skirt or trousers', 'name': 'cincture'}, {'frequency': 'f', 'synset': 'glass.n.02', 'synonyms': ['glass_(drink_container)', 'drinking_glass'], 'id': 498, 'def': 'a container for holding liquids while drinking', 'name': 'glass_(drink_container)'}, {'frequency': 'c', 'synset': 'globe.n.03', 'synonyms': ['globe'], 'id': 499, 'def': 'a sphere on which a map (especially of the earth) is represented', 'name': 'globe'}, {'frequency': 'f', 'synset': 'glove.n.02', 'synonyms': ['glove'], 'id': 500, 'def': 'handwear covering the hand', 'name': 'glove'}, {'frequency': 'c', 'synset': 'goat.n.01', 'synonyms': ['goat'], 'id': 501, 'def': 'a common goat', 'name': 'goat'}, {'frequency': 'f', 'synset': 'goggles.n.01', 'synonyms': ['goggles'], 'id': 502, 'def': 'tight-fitting spectacles worn to protect the eyes', 'name': 'goggles'}, {'frequency': 'r', 'synset': 'goldfish.n.01', 'synonyms': ['goldfish'], 'id': 503, 'def': 'small golden or orange-red freshwater fishes used as pond or aquarium pets', 'name': 'goldfish'}, {'frequency': 'c', 'synset': 'golf_club.n.02', 'synonyms': ['golf_club', 'golf-club'], 'id': 504, 'def': 'golf equipment used by a golfer to hit a golf ball', 'name': 'golf_club'}, {'frequency': 'c', 'synset': 'golfcart.n.01', 'synonyms': ['golfcart'], 'id': 505, 'def': 'a small motor vehicle in which golfers can ride between shots', 'name': 'golfcart'}, {'frequency': 'r', 'synset': 'gondola.n.02', 'synonyms': ['gondola_(boat)'], 'id': 506, 'def': 'long narrow flat-bottomed boat propelled by sculling; traditionally used on canals of Venice', 'name': 'gondola_(boat)'}, {'frequency': 'c', 'synset': 'goose.n.01', 'synonyms': ['goose'], 'id': 507, 'def': 'loud, web-footed long-necked aquatic birds usually larger than ducks', 'name': 'goose'}, {'frequency': 'r', 'synset': 'gorilla.n.01', 'synonyms': ['gorilla'], 'id': 508, 'def': 'largest ape', 'name': 'gorilla'}, {'frequency': 'r', 'synset': 'gourd.n.02', 'synonyms': ['gourd'], 'id': 509, 'def': 'any of numerous inedible fruits with hard rinds', 'name': 'gourd'}, {'frequency': 'f', 'synset': 'grape.n.01', 'synonyms': ['grape'], 'id': 510, 'def': 'any of various juicy fruit with green or purple skins; grow in clusters', 'name': 'grape'}, {'frequency': 'c', 'synset': 'grater.n.01', 'synonyms': ['grater'], 'id': 511, 'def': 'utensil with sharp perforations for shredding foods (as vegetables or cheese)', 'name': 'grater'}, {'frequency': 'c', 'synset': 'gravestone.n.01', 'synonyms': ['gravestone', 'headstone', 'tombstone'], 'id': 512, 'def': 'a stone that is used to mark a grave', 'name': 'gravestone'}, {'frequency': 'r', 'synset': 'gravy_boat.n.01', 'synonyms': ['gravy_boat', 'gravy_holder'], 'id': 513, 'def': 'a dish (often boat-shaped) for serving gravy or sauce', 'name': 'gravy_boat'}, {'frequency': 'f', 'synset': 'green_bean.n.02', 'synonyms': ['green_bean'], 'id': 514, 'def': 'a common bean plant cultivated for its slender green edible pods', 'name': 'green_bean'}, {'frequency': 'f', 'synset': 'green_onion.n.01', 'synonyms': ['green_onion', 'spring_onion', 'scallion'], 'id': 515, 'def': 'a young onion before the bulb has enlarged', 'name': 'green_onion'}, {'frequency': 'r', 'synset': 'griddle.n.01', 'synonyms': ['griddle'], 'id': 516, 'def': 'cooking utensil consisting of a flat heated surface on which food is cooked', 'name': 'griddle'}, {'frequency': 'f', 'synset': 'grill.n.02', 'synonyms': ['grill', 'grille', 'grillwork', 'radiator_grille'], 'id': 517, 'def': 'a framework of metal bars used as a partition or a grate', 'name': 'grill'}, {'frequency': 'r', 'synset': 'grits.n.01', 'synonyms': ['grits', 'hominy_grits'], 'id': 518, 'def': 'coarsely ground corn boiled as a breakfast dish', 'name': 'grits'}, {'frequency': 'c', 'synset': 'grizzly.n.01', 'synonyms': ['grizzly', 'grizzly_bear'], 'id': 519, 'def': 'powerful brownish-yellow bear of the uplands of western North America', 'name': 'grizzly'}, {'frequency': 'c', 'synset': 'grocery_bag.n.01', 'synonyms': ['grocery_bag'], 'id': 520, 'def': "a sack for holding customer's groceries", 'name': 'grocery_bag'}, {'frequency': 'f', 'synset': 'guitar.n.01', 'synonyms': ['guitar'], 'id': 521, 'def': 'a stringed instrument usually having six strings; played by strumming or plucking', 'name': 'guitar'}, {'frequency': 'c', 'synset': 'gull.n.02', 'synonyms': ['gull', 'seagull'], 'id': 522, 'def': 'mostly white aquatic bird having long pointed wings and short legs', 'name': 'gull'}, {'frequency': 'c', 'synset': 'gun.n.01', 'synonyms': ['gun'], 'id': 523, 'def': 'a weapon that discharges a bullet at high velocity from a metal tube', 'name': 'gun'}, {'frequency': 'f', 'synset': 'hairbrush.n.01', 'synonyms': ['hairbrush'], 'id': 524, 'def': "a brush used to groom a person's hair", 'name': 'hairbrush'}, {'frequency': 'c', 'synset': 'hairnet.n.01', 'synonyms': ['hairnet'], 'id': 525, 'def': 'a small net that someone wears over their hair to keep it in place', 'name': 'hairnet'}, {'frequency': 'c', 'synset': 'hairpin.n.01', 'synonyms': ['hairpin'], 'id': 526, 'def': "a double pronged pin used to hold women's hair in place", 'name': 'hairpin'}, {'frequency': 'r', 'synset': 'halter.n.03', 'synonyms': ['halter_top'], 'id': 527, 'def': "a woman's top that fastens behind the back and neck leaving the back and arms uncovered", 'name': 'halter_top'}, {'frequency': 'f', 'synset': 'ham.n.01', 'synonyms': ['ham', 'jambon', 'gammon'], 'id': 528, 'def': 'meat cut from the thigh of a hog (usually smoked)', 'name': 'ham'}, {'frequency': 'c', 'synset': 'hamburger.n.01', 'synonyms': ['hamburger', 'beefburger', 'burger'], 'id': 529, 'def': 'a sandwich consisting of a patty of minced beef served on a bun', 'name': 'hamburger'}, {'frequency': 'c', 'synset': 'hammer.n.02', 'synonyms': ['hammer'], 'id': 530, 'def': 'a hand tool with a heavy head and a handle; used to deliver an impulsive force by striking', 'name': 'hammer'}, {'frequency': 'c', 'synset': 'hammock.n.02', 'synonyms': ['hammock'], 'id': 531, 'def': 'a hanging bed of canvas or rope netting (usually suspended between two trees)', 'name': 'hammock'}, {'frequency': 'r', 'synset': 'hamper.n.02', 'synonyms': ['hamper'], 'id': 532, 'def': 'a basket usually with a cover', 'name': 'hamper'}, {'frequency': 'c', 'synset': 'hamster.n.01', 'synonyms': ['hamster'], 'id': 533, 'def': 'short-tailed burrowing rodent with large cheek pouches', 'name': 'hamster'}, {'frequency': 'f', 'synset': 'hand_blower.n.01', 'synonyms': ['hair_dryer'], 'id': 534, 'def': 'a hand-held electric blower that can blow warm air onto the hair', 'name': 'hair_dryer'}, {'frequency': 'r', 'synset': 'hand_glass.n.01', 'synonyms': ['hand_glass', 'hand_mirror'], 'id': 535, 'def': 'a mirror intended to be held in the hand', 'name': 'hand_glass'}, {'frequency': 'f', 'synset': 'hand_towel.n.01', 'synonyms': ['hand_towel', 'face_towel'], 'id': 536, 'def': 'a small towel used to dry the hands or face', 'name': 'hand_towel'}, {'frequency': 'c', 'synset': 'handcart.n.01', 'synonyms': ['handcart', 'pushcart', 'hand_truck'], 'id': 537, 'def': 'wheeled vehicle that can be pushed by a person', 'name': 'handcart'}, {'frequency': 'r', 'synset': 'handcuff.n.01', 'synonyms': ['handcuff'], 'id': 538, 'def': 'shackle that consists of a metal loop that can be locked around the wrist', 'name': 'handcuff'}, {'frequency': 'c', 'synset': 'handkerchief.n.01', 'synonyms': ['handkerchief'], 'id': 539, 'def': 'a square piece of cloth used for wiping the eyes or nose or as a costume accessory', 'name': 'handkerchief'}, {'frequency': 'f', 'synset': 'handle.n.01', 'synonyms': ['handle', 'grip', 'handgrip'], 'id': 540, 'def': 'the appendage to an object that is designed to be held in order to use or move it', 'name': 'handle'}, {'frequency': 'r', 'synset': 'handsaw.n.01', 'synonyms': ['handsaw', "carpenter's_saw"], 'id': 541, 'def': 'a saw used with one hand for cutting wood', 'name': 'handsaw'}, {'frequency': 'r', 'synset': 'hardback.n.01', 'synonyms': ['hardback_book', 'hardcover_book'], 'id': 542, 'def': 'a book with cardboard or cloth or leather covers', 'name': 'hardback_book'}, {'frequency': 'r', 'synset': 'harmonium.n.01', 'synonyms': ['harmonium', 'organ_(musical_instrument)', 'reed_organ_(musical_instrument)'], 'id': 543, 'def': 'a free-reed instrument in which air is forced through the reeds by bellows', 'name': 'harmonium'}, {'frequency': 'f', 'synset': 'hat.n.01', 'synonyms': ['hat'], 'id': 544, 'def': 'headwear that protects the head from bad weather, sun, or worn for fashion', 'name': 'hat'}, {'frequency': 'r', 'synset': 'hatbox.n.01', 'synonyms': ['hatbox'], 'id': 545, 'def': 'a round piece of luggage for carrying hats', 'name': 'hatbox'}, {'frequency': 'c', 'synset': 'head_covering.n.01', 'synonyms': ['veil'], 'id': 546, 'def': 'a garment that covers the head OR face', 'name': 'veil'}, {'frequency': 'f', 'synset': 'headband.n.01', 'synonyms': ['headband'], 'id': 547, 'def': 'a band worn around or over the head', 'name': 'headband'}, {'frequency': 'f', 'synset': 'headboard.n.01', 'synonyms': ['headboard'], 'id': 548, 'def': 'a vertical board or panel forming the head of a bedstead', 'name': 'headboard'}, {'frequency': 'f', 'synset': 'headlight.n.01', 'synonyms': ['headlight', 'headlamp'], 'id': 549, 'def': 'a powerful light with reflector; attached to the front of an automobile or locomotive', 'name': 'headlight'}, {'frequency': 'c', 'synset': 'headscarf.n.01', 'synonyms': ['headscarf'], 'id': 550, 'def': 'a kerchief worn over the head and tied under the chin', 'name': 'headscarf'}, {'frequency': 'r', 'synset': 'headset.n.01', 'synonyms': ['headset'], 'id': 551, 'def': 'receiver consisting of a pair of headphones', 'name': 'headset'}, {'frequency': 'c', 'synset': 'headstall.n.01', 'synonyms': ['headstall_(for_horses)', 'headpiece_(for_horses)'], 'id': 552, 'def': "the band that is the part of a bridle that fits around a horse's head", 'name': 'headstall_(for_horses)'}, {'frequency': 'c', 'synset': 'heart.n.02', 'synonyms': ['heart'], 'id': 553, 'def': 'a muscular organ; its contractions move the blood through the body', 'name': 'heart'}, {'frequency': 'c', 'synset': 'heater.n.01', 'synonyms': ['heater', 'warmer'], 'id': 554, 'def': 'device that heats water or supplies warmth to a room', 'name': 'heater'}, {'frequency': 'c', 'synset': 'helicopter.n.01', 'synonyms': ['helicopter'], 'id': 555, 'def': 'an aircraft without wings that obtains its lift from the rotation of overhead blades', 'name': 'helicopter'}, {'frequency': 'f', 'synset': 'helmet.n.02', 'synonyms': ['helmet'], 'id': 556, 'def': 'a protective headgear made of hard material to resist blows', 'name': 'helmet'}, {'frequency': 'r', 'synset': 'heron.n.02', 'synonyms': ['heron'], 'id': 557, 'def': 'grey or white wading bird with long neck and long legs and (usually) long bill', 'name': 'heron'}, {'frequency': 'c', 'synset': 'highchair.n.01', 'synonyms': ['highchair', 'feeding_chair'], 'id': 558, 'def': 'a chair for feeding a very young child', 'name': 'highchair'}, {'frequency': 'f', 'synset': 'hinge.n.01', 'synonyms': ['hinge'], 'id': 559, 'def': 'a joint that holds two parts together so that one can swing relative to the other', 'name': 'hinge'}, {'frequency': 'r', 'synset': 'hippopotamus.n.01', 'synonyms': ['hippopotamus'], 'id': 560, 'def': 'massive thick-skinned animal living in or around rivers of tropical Africa', 'name': 'hippopotamus'}, {'frequency': 'r', 'synset': 'hockey_stick.n.01', 'synonyms': ['hockey_stick'], 'id': 561, 'def': 'sports implement consisting of a stick used by hockey players to move the puck', 'name': 'hockey_stick'}, {'frequency': 'c', 'synset': 'hog.n.03', 'synonyms': ['hog', 'pig'], 'id': 562, 'def': 'domestic swine', 'name': 'hog'}, {'frequency': 'f', 'synset': 'home_plate.n.01', 'synonyms': ['home_plate_(baseball)', 'home_base_(baseball)'], 'id': 563, 'def': '(baseball) a rubber slab where the batter stands; it must be touched by a base runner in order to score', 'name': 'home_plate_(baseball)'}, {'frequency': 'c', 'synset': 'honey.n.01', 'synonyms': ['honey'], 'id': 564, 'def': 'a sweet yellow liquid produced by bees', 'name': 'honey'}, {'frequency': 'f', 'synset': 'hood.n.06', 'synonyms': ['fume_hood', 'exhaust_hood'], 'id': 565, 'def': 'metal covering leading to a vent that exhausts smoke or fumes', 'name': 'fume_hood'}, {'frequency': 'f', 'synset': 'hook.n.05', 'synonyms': ['hook'], 'id': 566, 'def': 'a curved or bent implement for suspending or pulling something', 'name': 'hook'}, {'frequency': 'r', 'synset': 'hookah.n.01', 'synonyms': ['hookah', 'narghile', 'nargileh', 'sheesha', 'shisha', 'water_pipe'], 'id': 567, 'def': 'a tobacco pipe with a long flexible tube connected to a container where the smoke is cooled by passing through water', 'name': 'hookah'}, {'frequency': 'r', 'synset': 'hornet.n.01', 'synonyms': ['hornet'], 'id': 568, 'def': 'large stinging wasp', 'name': 'hornet'}, {'frequency': 'f', 'synset': 'horse.n.01', 'synonyms': ['horse'], 'id': 569, 'def': 'a common horse', 'name': 'horse'}, {'frequency': 'f', 'synset': 'hose.n.03', 'synonyms': ['hose', 'hosepipe'], 'id': 570, 'def': 'a flexible pipe for conveying a liquid or gas', 'name': 'hose'}, {'frequency': 'r', 'synset': 'hot-air_balloon.n.01', 'synonyms': ['hot-air_balloon'], 'id': 571, 'def': 'balloon for travel through the air in a basket suspended below a large bag of heated air', 'name': 'hot-air_balloon'}, {'frequency': 'r', 'synset': 'hot_plate.n.01', 'synonyms': ['hotplate'], 'id': 572, 'def': 'a portable electric appliance for heating or cooking or keeping food warm', 'name': 'hotplate'}, {'frequency': 'c', 'synset': 'hot_sauce.n.01', 'synonyms': ['hot_sauce'], 'id': 573, 'def': 'a pungent peppery sauce', 'name': 'hot_sauce'}, {'frequency': 'r', 'synset': 'hourglass.n.01', 'synonyms': ['hourglass'], 'id': 574, 'def': 'a sandglass timer that runs for sixty minutes', 'name': 'hourglass'}, {'frequency': 'r', 'synset': 'houseboat.n.01', 'synonyms': ['houseboat'], 'id': 575, 'def': 'a barge that is designed and equipped for use as a dwelling', 'name': 'houseboat'}, {'frequency': 'c', 'synset': 'hummingbird.n.01', 'synonyms': ['hummingbird'], 'id': 576, 'def': 'tiny American bird having brilliant iridescent plumage and long slender bills', 'name': 'hummingbird'}, {'frequency': 'r', 'synset': 'hummus.n.01', 'synonyms': ['hummus', 'humus', 'hommos', 'hoummos', 'humous'], 'id': 577, 'def': 'a thick spread made from mashed chickpeas', 'name': 'hummus'}, {'frequency': 'f', 'synset': 'ice_bear.n.01', 'synonyms': ['polar_bear'], 'id': 578, 'def': 'white bear of Arctic regions', 'name': 'polar_bear'}, {'frequency': 'c', 'synset': 'ice_cream.n.01', 'synonyms': ['icecream'], 'id': 579, 'def': 'frozen dessert containing cream and sugar and flavoring', 'name': 'icecream'}, {'frequency': 'r', 'synset': 'ice_lolly.n.01', 'synonyms': ['popsicle'], 'id': 580, 'def': 'ice cream or water ice on a small wooden stick', 'name': 'popsicle'}, {'frequency': 'c', 'synset': 'ice_maker.n.01', 'synonyms': ['ice_maker'], 'id': 581, 'def': 'an appliance included in some electric refrigerators for making ice cubes', 'name': 'ice_maker'}, {'frequency': 'r', 'synset': 'ice_pack.n.01', 'synonyms': ['ice_pack', 'ice_bag'], 'id': 582, 'def': 'a waterproof bag filled with ice: applied to the body (especially the head) to cool or reduce swelling', 'name': 'ice_pack'}, {'frequency': 'r', 'synset': 'ice_skate.n.01', 'synonyms': ['ice_skate'], 'id': 583, 'def': 'skate consisting of a boot with a steel blade fitted to the sole', 'name': 'ice_skate'}, {'frequency': 'c', 'synset': 'igniter.n.01', 'synonyms': ['igniter', 'ignitor', 'lighter'], 'id': 584, 'def': 'a substance or device used to start a fire', 'name': 'igniter'}, {'frequency': 'r', 'synset': 'inhaler.n.01', 'synonyms': ['inhaler', 'inhalator'], 'id': 585, 'def': 'a dispenser that produces a chemical vapor to be inhaled through mouth or nose', 'name': 'inhaler'}, {'frequency': 'f', 'synset': 'ipod.n.01', 'synonyms': ['iPod'], 'id': 586, 'def': 'a pocket-sized device used to play music files', 'name': 'iPod'}, {'frequency': 'c', 'synset': 'iron.n.04', 'synonyms': ['iron_(for_clothing)', 'smoothing_iron_(for_clothing)'], 'id': 587, 'def': 'home appliance consisting of a flat metal base that is heated and used to smooth cloth', 'name': 'iron_(for_clothing)'}, {'frequency': 'c', 'synset': 'ironing_board.n.01', 'synonyms': ['ironing_board'], 'id': 588, 'def': 'narrow padded board on collapsible supports; used for ironing clothes', 'name': 'ironing_board'}, {'frequency': 'f', 'synset': 'jacket.n.01', 'synonyms': ['jacket'], 'id': 589, 'def': 'a waist-length coat', 'name': 'jacket'}, {'frequency': 'c', 'synset': 'jam.n.01', 'synonyms': ['jam'], 'id': 590, 'def': 'preserve of crushed fruit', 'name': 'jam'}, {'frequency': 'f', 'synset': 'jar.n.01', 'synonyms': ['jar'], 'id': 591, 'def': 'a vessel (usually cylindrical) with a wide mouth and without handles', 'name': 'jar'}, {'frequency': 'f', 'synset': 'jean.n.01', 'synonyms': ['jean', 'blue_jean', 'denim'], 'id': 592, 'def': '(usually plural) close-fitting trousers of heavy denim for manual work or casual wear', 'name': 'jean'}, {'frequency': 'c', 'synset': 'jeep.n.01', 'synonyms': ['jeep', 'landrover'], 'id': 593, 'def': 'a car suitable for traveling over rough terrain', 'name': 'jeep'}, {'frequency': 'r', 'synset': 'jelly_bean.n.01', 'synonyms': ['jelly_bean', 'jelly_egg'], 'id': 594, 'def': 'sugar-glazed jellied candy', 'name': 'jelly_bean'}, {'frequency': 'f', 'synset': 'jersey.n.03', 'synonyms': ['jersey', 'T-shirt', 'tee_shirt'], 'id': 595, 'def': 'a close-fitting pullover shirt', 'name': 'jersey'}, {'frequency': 'c', 'synset': 'jet.n.01', 'synonyms': ['jet_plane', 'jet-propelled_plane'], 'id': 596, 'def': 'an airplane powered by one or more jet engines', 'name': 'jet_plane'}, {'frequency': 'r', 'synset': 'jewel.n.01', 'synonyms': ['jewel', 'gem', 'precious_stone'], 'id': 597, 'def': 'a precious or semiprecious stone incorporated into a piece of jewelry', 'name': 'jewel'}, {'frequency': 'c', 'synset': 'jewelry.n.01', 'synonyms': ['jewelry', 'jewellery'], 'id': 598, 'def': 'an adornment (as a bracelet or ring or necklace) made of precious metals and set with gems (or imitation gems)', 'name': 'jewelry'}, {'frequency': 'r', 'synset': 'joystick.n.02', 'synonyms': ['joystick'], 'id': 599, 'def': 'a control device for computers consisting of a vertical handle that can move freely in two directions', 'name': 'joystick'}, {'frequency': 'c', 'synset': 'jump_suit.n.01', 'synonyms': ['jumpsuit'], 'id': 600, 'def': "one-piece garment fashioned after a parachutist's uniform", 'name': 'jumpsuit'}, {'frequency': 'c', 'synset': 'kayak.n.01', 'synonyms': ['kayak'], 'id': 601, 'def': 'a small canoe consisting of a light frame made watertight with animal skins', 'name': 'kayak'}, {'frequency': 'r', 'synset': 'keg.n.02', 'synonyms': ['keg'], 'id': 602, 'def': 'small cask or barrel', 'name': 'keg'}, {'frequency': 'r', 'synset': 'kennel.n.01', 'synonyms': ['kennel', 'doghouse'], 'id': 603, 'def': 'outbuilding that serves as a shelter for a dog', 'name': 'kennel'}, {'frequency': 'c', 'synset': 'kettle.n.01', 'synonyms': ['kettle', 'boiler'], 'id': 604, 'def': 'a metal pot for stewing or boiling; usually has a lid', 'name': 'kettle'}, {'frequency': 'f', 'synset': 'key.n.01', 'synonyms': ['key'], 'id': 605, 'def': 'metal instrument used to unlock a lock', 'name': 'key'}, {'frequency': 'r', 'synset': 'keycard.n.01', 'synonyms': ['keycard'], 'id': 606, 'def': 'a plastic card used to gain access typically to a door', 'name': 'keycard'}, {'frequency': 'c', 'synset': 'kilt.n.01', 'synonyms': ['kilt'], 'id': 607, 'def': 'a knee-length pleated tartan skirt worn by men as part of the traditional dress in the Highlands of northern Scotland', 'name': 'kilt'}, {'frequency': 'c', 'synset': 'kimono.n.01', 'synonyms': ['kimono'], 'id': 608, 'def': 'a loose robe; imitated from robes originally worn by Japanese', 'name': 'kimono'}, {'frequency': 'f', 'synset': 'kitchen_sink.n.01', 'synonyms': ['kitchen_sink'], 'id': 609, 'def': 'a sink in a kitchen', 'name': 'kitchen_sink'}, {'frequency': 'r', 'synset': 'kitchen_table.n.01', 'synonyms': ['kitchen_table'], 'id': 610, 'def': 'a table in the kitchen', 'name': 'kitchen_table'}, {'frequency': 'f', 'synset': 'kite.n.03', 'synonyms': ['kite'], 'id': 611, 'def': 'plaything consisting of a light frame covered with tissue paper; flown in wind at end of a string', 'name': 'kite'}, {'frequency': 'c', 'synset': 'kitten.n.01', 'synonyms': ['kitten', 'kitty'], 'id': 612, 'def': 'young domestic cat', 'name': 'kitten'}, {'frequency': 'c', 'synset': 'kiwi.n.03', 'synonyms': ['kiwi_fruit'], 'id': 613, 'def': 'fuzzy brown egg-shaped fruit with slightly tart green flesh', 'name': 'kiwi_fruit'}, {'frequency': 'f', 'synset': 'knee_pad.n.01', 'synonyms': ['knee_pad'], 'id': 614, 'def': 'protective garment consisting of a pad worn by football or baseball or hockey players', 'name': 'knee_pad'}, {'frequency': 'f', 'synset': 'knife.n.01', 'synonyms': ['knife'], 'id': 615, 'def': 'tool with a blade and point used as a cutting instrument', 'name': 'knife'}, {'frequency': 'r', 'synset': 'knitting_needle.n.01', 'synonyms': ['knitting_needle'], 'id': 616, 'def': 'needle consisting of a slender rod with pointed ends; usually used in pairs', 'name': 'knitting_needle'}, {'frequency': 'f', 'synset': 'knob.n.02', 'synonyms': ['knob'], 'id': 617, 'def': 'a round handle often found on a door', 'name': 'knob'}, {'frequency': 'r', 'synset': 'knocker.n.05', 'synonyms': ['knocker_(on_a_door)', 'doorknocker'], 'id': 618, 'def': 'a device (usually metal and ornamental) attached by a hinge to a door', 'name': 'knocker_(on_a_door)'}, {'frequency': 'r', 'synset': 'koala.n.01', 'synonyms': ['koala', 'koala_bear'], 'id': 619, 'def': 'sluggish tailless Australian marsupial with grey furry ears and coat', 'name': 'koala'}, {'frequency': 'r', 'synset': 'lab_coat.n.01', 'synonyms': ['lab_coat', 'laboratory_coat'], 'id': 620, 'def': 'a light coat worn to protect clothing from substances used while working in a laboratory', 'name': 'lab_coat'}, {'frequency': 'f', 'synset': 'ladder.n.01', 'synonyms': ['ladder'], 'id': 621, 'def': 'steps consisting of two parallel members connected by rungs', 'name': 'ladder'}, {'frequency': 'c', 'synset': 'ladle.n.01', 'synonyms': ['ladle'], 'id': 622, 'def': 'a spoon-shaped vessel with a long handle frequently used to transfer liquids', 'name': 'ladle'}, {'frequency': 'c', 'synset': 'ladybug.n.01', 'synonyms': ['ladybug', 'ladybeetle', 'ladybird_beetle'], 'id': 623, 'def': 'small round bright-colored and spotted beetle, typically red and black', 'name': 'ladybug'}, {'frequency': 'f', 'synset': 'lamb.n.01', 'synonyms': ['lamb_(animal)'], 'id': 624, 'def': 'young sheep', 'name': 'lamb_(animal)'}, {'frequency': 'r', 'synset': 'lamb_chop.n.01', 'synonyms': ['lamb-chop', 'lambchop'], 'id': 625, 'def': 'chop cut from a lamb', 'name': 'lamb-chop'}, {'frequency': 'f', 'synset': 'lamp.n.02', 'synonyms': ['lamp'], 'id': 626, 'def': 'a piece of furniture holding one or more electric light bulbs', 'name': 'lamp'}, {'frequency': 'f', 'synset': 'lamppost.n.01', 'synonyms': ['lamppost'], 'id': 627, 'def': 'a metal post supporting an outdoor lamp (such as a streetlight)', 'name': 'lamppost'}, {'frequency': 'f', 'synset': 'lampshade.n.01', 'synonyms': ['lampshade'], 'id': 628, 'def': 'a protective ornamental shade used to screen a light bulb from direct view', 'name': 'lampshade'}, {'frequency': 'c', 'synset': 'lantern.n.01', 'synonyms': ['lantern'], 'id': 629, 'def': 'light in a transparent protective case', 'name': 'lantern'}, {'frequency': 'f', 'synset': 'lanyard.n.02', 'synonyms': ['lanyard', 'laniard'], 'id': 630, 'def': 'a cord worn around the neck to hold a knife or whistle, etc.', 'name': 'lanyard'}, {'frequency': 'f', 'synset': 'laptop.n.01', 'synonyms': ['laptop_computer', 'notebook_computer'], 'id': 631, 'def': 'a portable computer small enough to use in your lap', 'name': 'laptop_computer'}, {'frequency': 'r', 'synset': 'lasagna.n.01', 'synonyms': ['lasagna', 'lasagne'], 'id': 632, 'def': 'baked dish of layers of lasagna pasta with sauce and cheese and meat or vegetables', 'name': 'lasagna'}, {'frequency': 'f', 'synset': 'latch.n.02', 'synonyms': ['latch'], 'id': 633, 'def': 'a bar that can be lowered or slid into a groove to fasten a door or gate', 'name': 'latch'}, {'frequency': 'r', 'synset': 'lawn_mower.n.01', 'synonyms': ['lawn_mower'], 'id': 634, 'def': 'garden tool for mowing grass on lawns', 'name': 'lawn_mower'}, {'frequency': 'r', 'synset': 'leather.n.01', 'synonyms': ['leather'], 'id': 635, 'def': 'an animal skin made smooth and flexible by removing the hair and then tanning', 'name': 'leather'}, {'frequency': 'c', 'synset': 'legging.n.01', 'synonyms': ['legging_(clothing)', 'leging_(clothing)', 'leg_covering'], 'id': 636, 'def': 'a garment covering the leg (usually extending from the knee to the ankle)', 'name': 'legging_(clothing)'}, {'frequency': 'c', 'synset': 'lego.n.01', 'synonyms': ['Lego', 'Lego_set'], 'id': 637, 'def': "a child's plastic construction set for making models from blocks", 'name': 'Lego'}, {'frequency': 'r', 'synset': 'legume.n.02', 'synonyms': ['legume'], 'id': 638, 'def': 'the fruit or seed of bean or pea plants', 'name': 'legume'}, {'frequency': 'f', 'synset': 'lemon.n.01', 'synonyms': ['lemon'], 'id': 639, 'def': 'yellow oval fruit with juicy acidic flesh', 'name': 'lemon'}, {'frequency': 'r', 'synset': 'lemonade.n.01', 'synonyms': ['lemonade'], 'id': 640, 'def': 'sweetened beverage of diluted lemon juice', 'name': 'lemonade'}, {'frequency': 'f', 'synset': 'lettuce.n.02', 'synonyms': ['lettuce'], 'id': 641, 'def': 'leafy plant commonly eaten in salad or on sandwiches', 'name': 'lettuce'}, {'frequency': 'f', 'synset': 'license_plate.n.01', 'synonyms': ['license_plate', 'numberplate'], 'id': 642, 'def': "a plate mounted on the front and back of car and bearing the car's registration number", 'name': 'license_plate'}, {'frequency': 'f', 'synset': 'life_buoy.n.01', 'synonyms': ['life_buoy', 'lifesaver', 'life_belt', 'life_ring'], 'id': 643, 'def': 'a ring-shaped life preserver used to prevent drowning (NOT a life-jacket or vest)', 'name': 'life_buoy'}, {'frequency': 'f', 'synset': 'life_jacket.n.01', 'synonyms': ['life_jacket', 'life_vest'], 'id': 644, 'def': 'life preserver consisting of a sleeveless jacket of buoyant or inflatable design', 'name': 'life_jacket'}, {'frequency': 'f', 'synset': 'light_bulb.n.01', 'synonyms': ['lightbulb'], 'id': 645, 'def': 'lightblub/source of light', 'name': 'lightbulb'}, {'frequency': 'r', 'synset': 'lightning_rod.n.02', 'synonyms': ['lightning_rod', 'lightning_conductor'], 'id': 646, 'def': 'a metallic conductor that is attached to a high point and leads to the ground', 'name': 'lightning_rod'}, {'frequency': 'f', 'synset': 'lime.n.06', 'synonyms': ['lime'], 'id': 647, 'def': 'the green acidic fruit of any of various lime trees', 'name': 'lime'}, {'frequency': 'r', 'synset': 'limousine.n.01', 'synonyms': ['limousine'], 'id': 648, 'def': 'long luxurious car; usually driven by a chauffeur', 'name': 'limousine'}, {'frequency': 'c', 'synset': 'lion.n.01', 'synonyms': ['lion'], 'id': 649, 'def': 'large gregarious predatory cat of Africa and India', 'name': 'lion'}, {'frequency': 'c', 'synset': 'lip_balm.n.01', 'synonyms': ['lip_balm'], 'id': 650, 'def': 'a balm applied to the lips', 'name': 'lip_balm'}, {'frequency': 'r', 'synset': 'liquor.n.01', 'synonyms': ['liquor', 'spirits', 'hard_liquor', 'liqueur', 'cordial'], 'id': 651, 'def': 'liquor or beer', 'name': 'liquor'}, {'frequency': 'c', 'synset': 'lizard.n.01', 'synonyms': ['lizard'], 'id': 652, 'def': 'a reptile with usually two pairs of legs and a tapering tail', 'name': 'lizard'}, {'frequency': 'f', 'synset': 'log.n.01', 'synonyms': ['log'], 'id': 653, 'def': 'a segment of the trunk of a tree when stripped of branches', 'name': 'log'}, {'frequency': 'c', 'synset': 'lollipop.n.02', 'synonyms': ['lollipop'], 'id': 654, 'def': 'hard candy on a stick', 'name': 'lollipop'}, {'frequency': 'f', 'synset': 'loudspeaker.n.01', 'synonyms': ['speaker_(stero_equipment)'], 'id': 655, 'def': 'electronic device that produces sound often as part of a stereo system', 'name': 'speaker_(stero_equipment)'}, {'frequency': 'c', 'synset': 'love_seat.n.01', 'synonyms': ['loveseat'], 'id': 656, 'def': 'small sofa that seats two people', 'name': 'loveseat'}, {'frequency': 'r', 'synset': 'machine_gun.n.01', 'synonyms': ['machine_gun'], 'id': 657, 'def': 'a rapidly firing automatic gun', 'name': 'machine_gun'}, {'frequency': 'f', 'synset': 'magazine.n.02', 'synonyms': ['magazine'], 'id': 658, 'def': 'a paperback periodic publication', 'name': 'magazine'}, {'frequency': 'f', 'synset': 'magnet.n.01', 'synonyms': ['magnet'], 'id': 659, 'def': 'a device that attracts iron and produces a magnetic field', 'name': 'magnet'}, {'frequency': 'c', 'synset': 'mail_slot.n.01', 'synonyms': ['mail_slot'], 'id': 660, 'def': 'a slot (usually in a door) through which mail can be delivered', 'name': 'mail_slot'}, {'frequency': 'f', 'synset': 'mailbox.n.01', 'synonyms': ['mailbox_(at_home)', 'letter_box_(at_home)'], 'id': 661, 'def': 'a private box for delivery of mail', 'name': 'mailbox_(at_home)'}, {'frequency': 'r', 'synset': 'mallard.n.01', 'synonyms': ['mallard'], 'id': 662, 'def': 'wild dabbling duck from which domestic ducks are descended', 'name': 'mallard'}, {'frequency': 'r', 'synset': 'mallet.n.01', 'synonyms': ['mallet'], 'id': 663, 'def': 'a sports implement with a long handle and a hammer-like head used to hit a ball', 'name': 'mallet'}, {'frequency': 'r', 'synset': 'mammoth.n.01', 'synonyms': ['mammoth'], 'id': 664, 'def': 'any of numerous extinct elephants widely distributed in the Pleistocene', 'name': 'mammoth'}, {'frequency': 'r', 'synset': 'manatee.n.01', 'synonyms': ['manatee'], 'id': 665, 'def': 'sirenian mammal of tropical coastal waters of America', 'name': 'manatee'}, {'frequency': 'c', 'synset': 'mandarin.n.05', 'synonyms': ['mandarin_orange'], 'id': 666, 'def': 'a somewhat flat reddish-orange loose skinned citrus of China', 'name': 'mandarin_orange'}, {'frequency': 'c', 'synset': 'manger.n.01', 'synonyms': ['manger', 'trough'], 'id': 667, 'def': 'a container (usually in a barn or stable) from which cattle or horses feed', 'name': 'manger'}, {'frequency': 'f', 'synset': 'manhole.n.01', 'synonyms': ['manhole'], 'id': 668, 'def': 'a hole (usually with a flush cover) through which a person can gain access to an underground structure', 'name': 'manhole'}, {'frequency': 'f', 'synset': 'map.n.01', 'synonyms': ['map'], 'id': 669, 'def': "a diagrammatic representation of the earth's surface (or part of it)", 'name': 'map'}, {'frequency': 'f', 'synset': 'marker.n.03', 'synonyms': ['marker'], 'id': 670, 'def': 'a writing implement for making a mark', 'name': 'marker'}, {'frequency': 'r', 'synset': 'martini.n.01', 'synonyms': ['martini'], 'id': 671, 'def': 'a cocktail made of gin (or vodka) with dry vermouth', 'name': 'martini'}, {'frequency': 'r', 'synset': 'mascot.n.01', 'synonyms': ['mascot'], 'id': 672, 'def': 'a person or animal that is adopted by a team or other group as a symbolic figure', 'name': 'mascot'}, {'frequency': 'c', 'synset': 'mashed_potato.n.01', 'synonyms': ['mashed_potato'], 'id': 673, 'def': 'potato that has been peeled and boiled and then mashed', 'name': 'mashed_potato'}, {'frequency': 'r', 'synset': 'masher.n.02', 'synonyms': ['masher'], 'id': 674, 'def': 'a kitchen utensil used for mashing (e.g. potatoes)', 'name': 'masher'}, {'frequency': 'f', 'synset': 'mask.n.04', 'synonyms': ['mask', 'facemask'], 'id': 675, 'def': 'a protective covering worn over the face', 'name': 'mask'}, {'frequency': 'f', 'synset': 'mast.n.01', 'synonyms': ['mast'], 'id': 676, 'def': 'a vertical spar for supporting sails', 'name': 'mast'}, {'frequency': 'c', 'synset': 'mat.n.03', 'synonyms': ['mat_(gym_equipment)', 'gym_mat'], 'id': 677, 'def': 'sports equipment consisting of a piece of thick padding on the floor for gymnastics', 'name': 'mat_(gym_equipment)'}, {'frequency': 'r', 'synset': 'matchbox.n.01', 'synonyms': ['matchbox'], 'id': 678, 'def': 'a box for holding matches', 'name': 'matchbox'}, {'frequency': 'f', 'synset': 'mattress.n.01', 'synonyms': ['mattress'], 'id': 679, 'def': 'a thick pad filled with resilient material used as a bed or part of a bed', 'name': 'mattress'}, {'frequency': 'c', 'synset': 'measuring_cup.n.01', 'synonyms': ['measuring_cup'], 'id': 680, 'def': 'graduated cup used to measure liquid or granular ingredients', 'name': 'measuring_cup'}, {'frequency': 'c', 'synset': 'measuring_stick.n.01', 'synonyms': ['measuring_stick', 'ruler_(measuring_stick)', 'measuring_rod'], 'id': 681, 'def': 'measuring instrument having a sequence of marks at regular intervals', 'name': 'measuring_stick'}, {'frequency': 'c', 'synset': 'meatball.n.01', 'synonyms': ['meatball'], 'id': 682, 'def': 'ground meat formed into a ball and fried or simmered in broth', 'name': 'meatball'}, {'frequency': 'c', 'synset': 'medicine.n.02', 'synonyms': ['medicine'], 'id': 683, 'def': 'something that treats or prevents or alleviates the symptoms of disease', 'name': 'medicine'}, {'frequency': 'c', 'synset': 'melon.n.01', 'synonyms': ['melon'], 'id': 684, 'def': 'fruit of the gourd family having a hard rind and sweet juicy flesh', 'name': 'melon'}, {'frequency': 'f', 'synset': 'microphone.n.01', 'synonyms': ['microphone'], 'id': 685, 'def': 'device for converting sound waves into electrical energy', 'name': 'microphone'}, {'frequency': 'r', 'synset': 'microscope.n.01', 'synonyms': ['microscope'], 'id': 686, 'def': 'magnifier of the image of small objects', 'name': 'microscope'}, {'frequency': 'f', 'synset': 'microwave.n.02', 'synonyms': ['microwave_oven'], 'id': 687, 'def': 'kitchen appliance that cooks food by passing an electromagnetic wave through it', 'name': 'microwave_oven'}, {'frequency': 'r', 'synset': 'milestone.n.01', 'synonyms': ['milestone', 'milepost'], 'id': 688, 'def': 'stone post at side of a road to show distances', 'name': 'milestone'}, {'frequency': 'f', 'synset': 'milk.n.01', 'synonyms': ['milk'], 'id': 689, 'def': 'a white nutritious liquid secreted by mammals and used as food by human beings', 'name': 'milk'}, {'frequency': 'r', 'synset': 'milk_can.n.01', 'synonyms': ['milk_can'], 'id': 690, 'def': 'can for transporting milk', 'name': 'milk_can'}, {'frequency': 'r', 'synset': 'milkshake.n.01', 'synonyms': ['milkshake'], 'id': 691, 'def': 'frothy drink of milk and flavoring and sometimes fruit or ice cream', 'name': 'milkshake'}, {'frequency': 'f', 'synset': 'minivan.n.01', 'synonyms': ['minivan'], 'id': 692, 'def': 'a small box-shaped passenger van', 'name': 'minivan'}, {'frequency': 'r', 'synset': 'mint.n.05', 'synonyms': ['mint_candy'], 'id': 693, 'def': 'a candy that is flavored with a mint oil', 'name': 'mint_candy'}, {'frequency': 'f', 'synset': 'mirror.n.01', 'synonyms': ['mirror'], 'id': 694, 'def': 'polished surface that forms images by reflecting light', 'name': 'mirror'}, {'frequency': 'c', 'synset': 'mitten.n.01', 'synonyms': ['mitten'], 'id': 695, 'def': 'glove that encases the thumb separately and the other four fingers together', 'name': 'mitten'}, {'frequency': 'c', 'synset': 'mixer.n.04', 'synonyms': ['mixer_(kitchen_tool)', 'stand_mixer'], 'id': 696, 'def': 'a kitchen utensil that is used for mixing foods', 'name': 'mixer_(kitchen_tool)'}, {'frequency': 'c', 'synset': 'money.n.03', 'synonyms': ['money'], 'id': 697, 'def': 'the official currency issued by a government or national bank', 'name': 'money'}, {'frequency': 'f', 'synset': 'monitor.n.04', 'synonyms': ['monitor_(computer_equipment) computer_monitor'], 'id': 698, 'def': 'a computer monitor', 'name': 'monitor_(computer_equipment) computer_monitor'}, {'frequency': 'c', 'synset': 'monkey.n.01', 'synonyms': ['monkey'], 'id': 699, 'def': 'any of various long-tailed primates', 'name': 'monkey'}, {'frequency': 'f', 'synset': 'motor.n.01', 'synonyms': ['motor'], 'id': 700, 'def': 'machine that converts other forms of energy into mechanical energy and so imparts motion', 'name': 'motor'}, {'frequency': 'f', 'synset': 'motor_scooter.n.01', 'synonyms': ['motor_scooter', 'scooter'], 'id': 701, 'def': 'a wheeled vehicle with small wheels and a low-powered engine', 'name': 'motor_scooter'}, {'frequency': 'r', 'synset': 'motor_vehicle.n.01', 'synonyms': ['motor_vehicle', 'automotive_vehicle'], 'id': 702, 'def': 'a self-propelled wheeled vehicle that does not run on rails', 'name': 'motor_vehicle'}, {'frequency': 'f', 'synset': 'motorcycle.n.01', 'synonyms': ['motorcycle'], 'id': 703, 'def': 'a motor vehicle with two wheels and a strong frame', 'name': 'motorcycle'}, {'frequency': 'f', 'synset': 'mound.n.01', 'synonyms': ['mound_(baseball)', "pitcher's_mound"], 'id': 704, 'def': '(baseball) the slight elevation on which the pitcher stands', 'name': 'mound_(baseball)'}, {'frequency': 'f', 'synset': 'mouse.n.04', 'synonyms': ['mouse_(computer_equipment)', 'computer_mouse'], 'id': 705, 'def': 'a computer input device that controls an on-screen pointer (does not include trackpads / touchpads)', 'name': 'mouse_(computer_equipment)'}, {'frequency': 'f', 'synset': 'mousepad.n.01', 'synonyms': ['mousepad'], 'id': 706, 'def': 'a small portable pad that provides an operating surface for a computer mouse', 'name': 'mousepad'}, {'frequency': 'c', 'synset': 'muffin.n.01', 'synonyms': ['muffin'], 'id': 707, 'def': 'a sweet quick bread baked in a cup-shaped pan', 'name': 'muffin'}, {'frequency': 'f', 'synset': 'mug.n.04', 'synonyms': ['mug'], 'id': 708, 'def': 'with handle and usually cylindrical', 'name': 'mug'}, {'frequency': 'f', 'synset': 'mushroom.n.02', 'synonyms': ['mushroom'], 'id': 709, 'def': 'a common mushroom', 'name': 'mushroom'}, {'frequency': 'r', 'synset': 'music_stool.n.01', 'synonyms': ['music_stool', 'piano_stool'], 'id': 710, 'def': 'a stool for piano players; usually adjustable in height', 'name': 'music_stool'}, {'frequency': 'c', 'synset': 'musical_instrument.n.01', 'synonyms': ['musical_instrument', 'instrument_(musical)'], 'id': 711, 'def': 'any of various devices or contrivances that can be used to produce musical tones or sounds', 'name': 'musical_instrument'}, {'frequency': 'r', 'synset': 'nailfile.n.01', 'synonyms': ['nailfile'], 'id': 712, 'def': 'a small flat file for shaping the nails', 'name': 'nailfile'}, {'frequency': 'f', 'synset': 'napkin.n.01', 'synonyms': ['napkin', 'table_napkin', 'serviette'], 'id': 713, 'def': 'a small piece of table linen or paper that is used to wipe the mouth and to cover the lap in order to protect clothing', 'name': 'napkin'}, {'frequency': 'r', 'synset': 'neckerchief.n.01', 'synonyms': ['neckerchief'], 'id': 714, 'def': 'a kerchief worn around the neck', 'name': 'neckerchief'}, {'frequency': 'f', 'synset': 'necklace.n.01', 'synonyms': ['necklace'], 'id': 715, 'def': 'jewelry consisting of a cord or chain (often bearing gems) worn about the neck as an ornament', 'name': 'necklace'}, {'frequency': 'f', 'synset': 'necktie.n.01', 'synonyms': ['necktie', 'tie_(necktie)'], 'id': 716, 'def': 'neckwear consisting of a long narrow piece of material worn under a collar and tied in knot at the front', 'name': 'necktie'}, {'frequency': 'c', 'synset': 'needle.n.03', 'synonyms': ['needle'], 'id': 717, 'def': 'a sharp pointed implement (usually metal)', 'name': 'needle'}, {'frequency': 'c', 'synset': 'nest.n.01', 'synonyms': ['nest'], 'id': 718, 'def': 'a structure in which animals lay eggs or give birth to their young', 'name': 'nest'}, {'frequency': 'f', 'synset': 'newspaper.n.01', 'synonyms': ['newspaper', 'paper_(newspaper)'], 'id': 719, 'def': 'a daily or weekly publication on folded sheets containing news, articles, and advertisements', 'name': 'newspaper'}, {'frequency': 'c', 'synset': 'newsstand.n.01', 'synonyms': ['newsstand'], 'id': 720, 'def': 'a stall where newspapers and other periodicals are sold', 'name': 'newsstand'}, {'frequency': 'c', 'synset': 'nightwear.n.01', 'synonyms': ['nightshirt', 'nightwear', 'sleepwear', 'nightclothes'], 'id': 721, 'def': 'garments designed to be worn in bed', 'name': 'nightshirt'}, {'frequency': 'r', 'synset': 'nosebag.n.01', 'synonyms': ['nosebag_(for_animals)', 'feedbag'], 'id': 722, 'def': 'a canvas bag that is used to feed an animal (such as a horse); covers the muzzle and fastens at the top of the head', 'name': 'nosebag_(for_animals)'}, {'frequency': 'c', 'synset': 'noseband.n.01', 'synonyms': ['noseband_(for_animals)', 'nosepiece_(for_animals)'], 'id': 723, 'def': "a strap that is the part of a bridle that goes over the animal's nose", 'name': 'noseband_(for_animals)'}, {'frequency': 'f', 'synset': 'notebook.n.01', 'synonyms': ['notebook'], 'id': 724, 'def': 'a book with blank pages for recording notes or memoranda', 'name': 'notebook'}, {'frequency': 'c', 'synset': 'notepad.n.01', 'synonyms': ['notepad'], 'id': 725, 'def': 'a pad of paper for keeping notes', 'name': 'notepad'}, {'frequency': 'f', 'synset': 'nut.n.03', 'synonyms': ['nut'], 'id': 726, 'def': 'a small metal block (usually square or hexagonal) with internal screw thread to be fitted onto a bolt', 'name': 'nut'}, {'frequency': 'r', 'synset': 'nutcracker.n.01', 'synonyms': ['nutcracker'], 'id': 727, 'def': 'a hand tool used to crack nuts open', 'name': 'nutcracker'}, {'frequency': 'f', 'synset': 'oar.n.01', 'synonyms': ['oar'], 'id': 728, 'def': 'an implement used to propel or steer a boat', 'name': 'oar'}, {'frequency': 'r', 'synset': 'octopus.n.01', 'synonyms': ['octopus_(food)'], 'id': 729, 'def': 'tentacles of octopus prepared as food', 'name': 'octopus_(food)'}, {'frequency': 'r', 'synset': 'octopus.n.02', 'synonyms': ['octopus_(animal)'], 'id': 730, 'def': 'bottom-living cephalopod having a soft oval body with eight long tentacles', 'name': 'octopus_(animal)'}, {'frequency': 'c', 'synset': 'oil_lamp.n.01', 'synonyms': ['oil_lamp', 'kerosene_lamp', 'kerosine_lamp'], 'id': 731, 'def': 'a lamp that burns oil (as kerosine) for light', 'name': 'oil_lamp'}, {'frequency': 'c', 'synset': 'olive_oil.n.01', 'synonyms': ['olive_oil'], 'id': 732, 'def': 'oil from olives', 'name': 'olive_oil'}, {'frequency': 'r', 'synset': 'omelet.n.01', 'synonyms': ['omelet', 'omelette'], 'id': 733, 'def': 'beaten eggs cooked until just set; may be folded around e.g. ham or cheese or jelly', 'name': 'omelet'}, {'frequency': 'f', 'synset': 'onion.n.01', 'synonyms': ['onion'], 'id': 734, 'def': 'the bulb of an onion plant', 'name': 'onion'}, {'frequency': 'f', 'synset': 'orange.n.01', 'synonyms': ['orange_(fruit)'], 'id': 735, 'def': 'orange (FRUIT of an orange tree)', 'name': 'orange_(fruit)'}, {'frequency': 'c', 'synset': 'orange_juice.n.01', 'synonyms': ['orange_juice'], 'id': 736, 'def': 'bottled or freshly squeezed juice of oranges', 'name': 'orange_juice'}, {'frequency': 'c', 'synset': 'ostrich.n.02', 'synonyms': ['ostrich'], 'id': 737, 'def': 'fast-running African flightless bird with two-toed feet; largest living bird', 'name': 'ostrich'}, {'frequency': 'f', 'synset': 'ottoman.n.03', 'synonyms': ['ottoman', 'pouf', 'pouffe', 'hassock'], 'id': 738, 'def': 'a thick standalone cushion used as a seat or footrest, often next to a chair', 'name': 'ottoman'}, {'frequency': 'f', 'synset': 'oven.n.01', 'synonyms': ['oven'], 'id': 739, 'def': 'kitchen appliance used for baking or roasting', 'name': 'oven'}, {'frequency': 'c', 'synset': 'overall.n.01', 'synonyms': ['overalls_(clothing)'], 'id': 740, 'def': 'work clothing consisting of denim trousers usually with a bib and shoulder straps', 'name': 'overalls_(clothing)'}, {'frequency': 'c', 'synset': 'owl.n.01', 'synonyms': ['owl'], 'id': 741, 'def': 'nocturnal bird of prey with hawk-like beak and claws and large head with front-facing eyes', 'name': 'owl'}, {'frequency': 'c', 'synset': 'packet.n.03', 'synonyms': ['packet'], 'id': 742, 'def': 'a small package or bundle', 'name': 'packet'}, {'frequency': 'r', 'synset': 'pad.n.03', 'synonyms': ['inkpad', 'inking_pad', 'stamp_pad'], 'id': 743, 'def': 'absorbent material saturated with ink used to transfer ink evenly to a rubber stamp', 'name': 'inkpad'}, {'frequency': 'c', 'synset': 'pad.n.04', 'synonyms': ['pad'], 'id': 744, 'def': 'mostly arm/knee pads labeled', 'name': 'pad'}, {'frequency': 'f', 'synset': 'paddle.n.04', 'synonyms': ['paddle', 'boat_paddle'], 'id': 745, 'def': 'a short light oar used without an oarlock to propel a canoe or small boat', 'name': 'paddle'}, {'frequency': 'c', 'synset': 'padlock.n.01', 'synonyms': ['padlock'], 'id': 746, 'def': 'a detachable, portable lock', 'name': 'padlock'}, {'frequency': 'c', 'synset': 'paintbrush.n.01', 'synonyms': ['paintbrush'], 'id': 747, 'def': 'a brush used as an applicator to apply paint', 'name': 'paintbrush'}, {'frequency': 'f', 'synset': 'painting.n.01', 'synonyms': ['painting'], 'id': 748, 'def': 'graphic art consisting of an artistic composition made by applying paints to a surface', 'name': 'painting'}, {'frequency': 'f', 'synset': 'pajama.n.02', 'synonyms': ['pajamas', 'pyjamas'], 'id': 749, 'def': 'loose-fitting nightclothes worn for sleeping or lounging', 'name': 'pajamas'}, {'frequency': 'c', 'synset': 'palette.n.02', 'synonyms': ['palette', 'pallet'], 'id': 750, 'def': 'board that provides a flat surface on which artists mix paints and the range of colors used', 'name': 'palette'}, {'frequency': 'f', 'synset': 'pan.n.01', 'synonyms': ['pan_(for_cooking)', 'cooking_pan'], 'id': 751, 'def': 'cooking utensil consisting of a wide metal vessel', 'name': 'pan_(for_cooking)'}, {'frequency': 'r', 'synset': 'pan.n.03', 'synonyms': ['pan_(metal_container)'], 'id': 752, 'def': 'shallow container made of metal', 'name': 'pan_(metal_container)'}, {'frequency': 'c', 'synset': 'pancake.n.01', 'synonyms': ['pancake'], 'id': 753, 'def': 'a flat cake of thin batter fried on both sides on a griddle', 'name': 'pancake'}, {'frequency': 'r', 'synset': 'pantyhose.n.01', 'synonyms': ['pantyhose'], 'id': 754, 'def': "a woman's tights consisting of underpants and stockings", 'name': 'pantyhose'}, {'frequency': 'r', 'synset': 'papaya.n.02', 'synonyms': ['papaya'], 'id': 755, 'def': 'large oval melon-like tropical fruit with yellowish flesh', 'name': 'papaya'}, {'frequency': 'f', 'synset': 'paper_plate.n.01', 'synonyms': ['paper_plate'], 'id': 756, 'def': 'a disposable plate made of cardboard', 'name': 'paper_plate'}, {'frequency': 'f', 'synset': 'paper_towel.n.01', 'synonyms': ['paper_towel'], 'id': 757, 'def': 'a disposable towel made of absorbent paper', 'name': 'paper_towel'}, {'frequency': 'r', 'synset': 'paperback_book.n.01', 'synonyms': ['paperback_book', 'paper-back_book', 'softback_book', 'soft-cover_book'], 'id': 758, 'def': 'a book with paper covers', 'name': 'paperback_book'}, {'frequency': 'r', 'synset': 'paperweight.n.01', 'synonyms': ['paperweight'], 'id': 759, 'def': 'a weight used to hold down a stack of papers', 'name': 'paperweight'}, {'frequency': 'c', 'synset': 'parachute.n.01', 'synonyms': ['parachute'], 'id': 760, 'def': 'rescue equipment consisting of a device that fills with air and retards your fall', 'name': 'parachute'}, {'frequency': 'c', 'synset': 'parakeet.n.01', 'synonyms': ['parakeet', 'parrakeet', 'parroket', 'paraquet', 'paroquet', 'parroquet'], 'id': 761, 'def': 'any of numerous small slender long-tailed parrots', 'name': 'parakeet'}, {'frequency': 'c', 'synset': 'parasail.n.01', 'synonyms': ['parasail_(sports)'], 'id': 762, 'def': 'parachute that will lift a person up into the air when it is towed by a motorboat or a car', 'name': 'parasail_(sports)'}, {'frequency': 'c', 'synset': 'parasol.n.01', 'synonyms': ['parasol', 'sunshade'], 'id': 763, 'def': 'a handheld collapsible source of shade', 'name': 'parasol'}, {'frequency': 'r', 'synset': 'parchment.n.01', 'synonyms': ['parchment'], 'id': 764, 'def': 'a superior paper resembling sheepskin', 'name': 'parchment'}, {'frequency': 'c', 'synset': 'parka.n.01', 'synonyms': ['parka', 'anorak'], 'id': 765, 'def': "a kind of heavy jacket (`windcheater' is a British term)", 'name': 'parka'}, {'frequency': 'f', 'synset': 'parking_meter.n.01', 'synonyms': ['parking_meter'], 'id': 766, 'def': 'a coin-operated timer located next to a parking space', 'name': 'parking_meter'}, {'frequency': 'c', 'synset': 'parrot.n.01', 'synonyms': ['parrot'], 'id': 767, 'def': 'usually brightly colored tropical birds with short hooked beaks and the ability to mimic sounds', 'name': 'parrot'}, {'frequency': 'c', 'synset': 'passenger_car.n.01', 'synonyms': ['passenger_car_(part_of_a_train)', 'coach_(part_of_a_train)'], 'id': 768, 'def': 'a railcar where passengers ride', 'name': 'passenger_car_(part_of_a_train)'}, {'frequency': 'r', 'synset': 'passenger_ship.n.01', 'synonyms': ['passenger_ship'], 'id': 769, 'def': 'a ship built to carry passengers', 'name': 'passenger_ship'}, {'frequency': 'c', 'synset': 'passport.n.02', 'synonyms': ['passport'], 'id': 770, 'def': 'a document issued by a country to a citizen allowing that person to travel abroad and re-enter the home country', 'name': 'passport'}, {'frequency': 'f', 'synset': 'pastry.n.02', 'synonyms': ['pastry'], 'id': 771, 'def': 'any of various baked foods made of dough or batter', 'name': 'pastry'}, {'frequency': 'r', 'synset': 'patty.n.01', 'synonyms': ['patty_(food)'], 'id': 772, 'def': 'small flat mass of chopped food', 'name': 'patty_(food)'}, {'frequency': 'c', 'synset': 'pea.n.01', 'synonyms': ['pea_(food)'], 'id': 773, 'def': 'seed of a pea plant used for food', 'name': 'pea_(food)'}, {'frequency': 'c', 'synset': 'peach.n.03', 'synonyms': ['peach'], 'id': 774, 'def': 'downy juicy fruit with sweet yellowish or whitish flesh', 'name': 'peach'}, {'frequency': 'c', 'synset': 'peanut_butter.n.01', 'synonyms': ['peanut_butter'], 'id': 775, 'def': 'a spread made from ground peanuts', 'name': 'peanut_butter'}, {'frequency': 'f', 'synset': 'pear.n.01', 'synonyms': ['pear'], 'id': 776, 'def': 'sweet juicy gritty-textured fruit available in many varieties', 'name': 'pear'}, {'frequency': 'c', 'synset': 'peeler.n.03', 'synonyms': ['peeler_(tool_for_fruit_and_vegetables)'], 'id': 777, 'def': 'a device for peeling vegetables or fruits', 'name': 'peeler_(tool_for_fruit_and_vegetables)'}, {'frequency': 'r', 'synset': 'peg.n.04', 'synonyms': ['wooden_leg', 'pegleg'], 'id': 778, 'def': 'a prosthesis that replaces a missing leg', 'name': 'wooden_leg'}, {'frequency': 'r', 'synset': 'pegboard.n.01', 'synonyms': ['pegboard'], 'id': 779, 'def': 'a board perforated with regularly spaced holes into which pegs can be fitted', 'name': 'pegboard'}, {'frequency': 'c', 'synset': 'pelican.n.01', 'synonyms': ['pelican'], 'id': 780, 'def': 'large long-winged warm-water seabird having a large bill with a distensible pouch for fish', 'name': 'pelican'}, {'frequency': 'f', 'synset': 'pen.n.01', 'synonyms': ['pen'], 'id': 781, 'def': 'a writing implement with a point from which ink flows', 'name': 'pen'}, {'frequency': 'f', 'synset': 'pencil.n.01', 'synonyms': ['pencil'], 'id': 782, 'def': 'a thin cylindrical pointed writing implement made of wood and graphite', 'name': 'pencil'}, {'frequency': 'r', 'synset': 'pencil_box.n.01', 'synonyms': ['pencil_box', 'pencil_case'], 'id': 783, 'def': 'a box for holding pencils', 'name': 'pencil_box'}, {'frequency': 'r', 'synset': 'pencil_sharpener.n.01', 'synonyms': ['pencil_sharpener'], 'id': 784, 'def': 'a rotary implement for sharpening the point on pencils', 'name': 'pencil_sharpener'}, {'frequency': 'r', 'synset': 'pendulum.n.01', 'synonyms': ['pendulum'], 'id': 785, 'def': 'an apparatus consisting of an object mounted so that it swings freely under the influence of gravity', 'name': 'pendulum'}, {'frequency': 'c', 'synset': 'penguin.n.01', 'synonyms': ['penguin'], 'id': 786, 'def': 'short-legged flightless birds of cold southern regions having webbed feet and wings modified as flippers', 'name': 'penguin'}, {'frequency': 'r', 'synset': 'pennant.n.02', 'synonyms': ['pennant'], 'id': 787, 'def': 'a flag longer than it is wide (and often tapering)', 'name': 'pennant'}, {'frequency': 'r', 'synset': 'penny.n.02', 'synonyms': ['penny_(coin)'], 'id': 788, 'def': 'a coin worth one-hundredth of the value of the basic unit', 'name': 'penny_(coin)'}, {'frequency': 'f', 'synset': 'pepper.n.03', 'synonyms': ['pepper', 'peppercorn'], 'id': 789, 'def': 'pungent seasoning from the berry of the common pepper plant; whole or ground', 'name': 'pepper'}, {'frequency': 'c', 'synset': 'pepper_mill.n.01', 'synonyms': ['pepper_mill', 'pepper_grinder'], 'id': 790, 'def': 'a mill for grinding pepper', 'name': 'pepper_mill'}, {'frequency': 'c', 'synset': 'perfume.n.02', 'synonyms': ['perfume'], 'id': 791, 'def': 'a toiletry that emits and diffuses a fragrant odor', 'name': 'perfume'}, {'frequency': 'r', 'synset': 'persimmon.n.02', 'synonyms': ['persimmon'], 'id': 792, 'def': 'orange fruit resembling a plum; edible when fully ripe', 'name': 'persimmon'}, {'frequency': 'f', 'synset': 'person.n.01', 'synonyms': ['person', 'baby', 'child', 'boy', 'girl', 'man', 'woman', 'human'], 'id': 793, 'def': 'a human being', 'name': 'person'}, {'frequency': 'c', 'synset': 'pet.n.01', 'synonyms': ['pet'], 'id': 794, 'def': 'a domesticated animal kept for companionship or amusement', 'name': 'pet'}, {'frequency': 'c', 'synset': 'pew.n.01', 'synonyms': ['pew_(church_bench)', 'church_bench'], 'id': 795, 'def': 'long bench with backs; used in church by the congregation', 'name': 'pew_(church_bench)'}, {'frequency': 'r', 'synset': 'phonebook.n.01', 'synonyms': ['phonebook', 'telephone_book', 'telephone_directory'], 'id': 796, 'def': 'a directory containing an alphabetical list of telephone subscribers and their telephone numbers', 'name': 'phonebook'}, {'frequency': 'c', 'synset': 'phonograph_record.n.01', 'synonyms': ['phonograph_record', 'phonograph_recording', 'record_(phonograph_recording)'], 'id': 797, 'def': 'sound recording consisting of a typically black disk with a continuous groove', 'name': 'phonograph_record'}, {'frequency': 'f', 'synset': 'piano.n.01', 'synonyms': ['piano'], 'id': 798, 'def': 'a keyboard instrument that is played by depressing keys that cause hammers to strike tuned strings and produce sounds', 'name': 'piano'}, {'frequency': 'f', 'synset': 'pickle.n.01', 'synonyms': ['pickle'], 'id': 799, 'def': 'vegetables (especially cucumbers) preserved in brine or vinegar', 'name': 'pickle'}, {'frequency': 'f', 'synset': 'pickup.n.01', 'synonyms': ['pickup_truck'], 'id': 800, 'def': 'a light truck with an open body and low sides and a tailboard', 'name': 'pickup_truck'}, {'frequency': 'c', 'synset': 'pie.n.01', 'synonyms': ['pie'], 'id': 801, 'def': 'dish baked in pastry-lined pan often with a pastry top', 'name': 'pie'}, {'frequency': 'c', 'synset': 'pigeon.n.01', 'synonyms': ['pigeon'], 'id': 802, 'def': 'wild and domesticated birds having a heavy body and short legs', 'name': 'pigeon'}, {'frequency': 'r', 'synset': 'piggy_bank.n.01', 'synonyms': ['piggy_bank', 'penny_bank'], 'id': 803, 'def': "a child's coin bank (often shaped like a pig)", 'name': 'piggy_bank'}, {'frequency': 'f', 'synset': 'pillow.n.01', 'synonyms': ['pillow'], 'id': 804, 'def': 'a cushion to support the head of a sleeping person', 'name': 'pillow'}, {'frequency': 'r', 'synset': 'pin.n.09', 'synonyms': ['pin_(non_jewelry)'], 'id': 805, 'def': 'a small slender (often pointed) piece of wood or metal used to support or fasten or attach things', 'name': 'pin_(non_jewelry)'}, {'frequency': 'f', 'synset': 'pineapple.n.02', 'synonyms': ['pineapple'], 'id': 806, 'def': 'large sweet fleshy tropical fruit with a tuft of stiff leaves', 'name': 'pineapple'}, {'frequency': 'c', 'synset': 'pinecone.n.01', 'synonyms': ['pinecone'], 'id': 807, 'def': 'the seed-producing cone of a pine tree', 'name': 'pinecone'}, {'frequency': 'r', 'synset': 'ping-pong_ball.n.01', 'synonyms': ['ping-pong_ball'], 'id': 808, 'def': 'light hollow ball used in playing table tennis', 'name': 'ping-pong_ball'}, {'frequency': 'r', 'synset': 'pinwheel.n.03', 'synonyms': ['pinwheel'], 'id': 809, 'def': 'a toy consisting of vanes of colored paper or plastic that is pinned to a stick and spins when it is pointed into the wind', 'name': 'pinwheel'}, {'frequency': 'r', 'synset': 'pipe.n.01', 'synonyms': ['tobacco_pipe'], 'id': 810, 'def': 'a tube with a small bowl at one end; used for smoking tobacco', 'name': 'tobacco_pipe'}, {'frequency': 'f', 'synset': 'pipe.n.02', 'synonyms': ['pipe', 'piping'], 'id': 811, 'def': 'a long tube made of metal or plastic that is used to carry water or oil or gas etc.', 'name': 'pipe'}, {'frequency': 'r', 'synset': 'pistol.n.01', 'synonyms': ['pistol', 'handgun'], 'id': 812, 'def': 'a firearm that is held and fired with one hand', 'name': 'pistol'}, {'frequency': 'c', 'synset': 'pita.n.01', 'synonyms': ['pita_(bread)', 'pocket_bread'], 'id': 813, 'def': 'usually small round bread that can open into a pocket for filling', 'name': 'pita_(bread)'}, {'frequency': 'f', 'synset': 'pitcher.n.02', 'synonyms': ['pitcher_(vessel_for_liquid)', 'ewer'], 'id': 814, 'def': 'an open vessel with a handle and a spout for pouring', 'name': 'pitcher_(vessel_for_liquid)'}, {'frequency': 'r', 'synset': 'pitchfork.n.01', 'synonyms': ['pitchfork'], 'id': 815, 'def': 'a long-handled hand tool with sharp widely spaced prongs for lifting and pitching hay', 'name': 'pitchfork'}, {'frequency': 'f', 'synset': 'pizza.n.01', 'synonyms': ['pizza'], 'id': 816, 'def': 'Italian open pie made of thin bread dough spread with a spiced mixture of e.g. tomato sauce and cheese', 'name': 'pizza'}, {'frequency': 'f', 'synset': 'place_mat.n.01', 'synonyms': ['place_mat'], 'id': 817, 'def': 'a mat placed on a table for an individual place setting', 'name': 'place_mat'}, {'frequency': 'f', 'synset': 'plate.n.04', 'synonyms': ['plate'], 'id': 818, 'def': 'dish on which food is served or from which food is eaten', 'name': 'plate'}, {'frequency': 'c', 'synset': 'platter.n.01', 'synonyms': ['platter'], 'id': 819, 'def': 'a large shallow dish used for serving food', 'name': 'platter'}, {'frequency': 'r', 'synset': 'playpen.n.01', 'synonyms': ['playpen'], 'id': 820, 'def': 'a portable enclosure in which babies may be left to play', 'name': 'playpen'}, {'frequency': 'c', 'synset': 'pliers.n.01', 'synonyms': ['pliers', 'plyers'], 'id': 821, 'def': 'a gripping hand tool with two hinged arms and (usually) serrated jaws', 'name': 'pliers'}, {'frequency': 'r', 'synset': 'plow.n.01', 'synonyms': ['plow_(farm_equipment)', 'plough_(farm_equipment)'], 'id': 822, 'def': 'a farm tool having one or more heavy blades to break the soil and cut a furrow prior to sowing', 'name': 'plow_(farm_equipment)'}, {'frequency': 'r', 'synset': 'plume.n.02', 'synonyms': ['plume'], 'id': 823, 'def': 'a feather or cluster of feathers worn as an ornament', 'name': 'plume'}, {'frequency': 'r', 'synset': 'pocket_watch.n.01', 'synonyms': ['pocket_watch'], 'id': 824, 'def': 'a watch that is carried in a small watch pocket', 'name': 'pocket_watch'}, {'frequency': 'c', 'synset': 'pocketknife.n.01', 'synonyms': ['pocketknife'], 'id': 825, 'def': 'a knife with a blade that folds into the handle; suitable for carrying in the pocket', 'name': 'pocketknife'}, {'frequency': 'c', 'synset': 'poker.n.01', 'synonyms': ['poker_(fire_stirring_tool)', 'stove_poker', 'fire_hook'], 'id': 826, 'def': 'fire iron consisting of a metal rod with a handle; used to stir a fire', 'name': 'poker_(fire_stirring_tool)'}, {'frequency': 'f', 'synset': 'pole.n.01', 'synonyms': ['pole', 'post'], 'id': 827, 'def': 'a long (usually round) rod of wood or metal or plastic', 'name': 'pole'}, {'frequency': 'f', 'synset': 'polo_shirt.n.01', 'synonyms': ['polo_shirt', 'sport_shirt'], 'id': 828, 'def': 'a shirt with short sleeves designed for comfort and casual wear', 'name': 'polo_shirt'}, {'frequency': 'r', 'synset': 'poncho.n.01', 'synonyms': ['poncho'], 'id': 829, 'def': 'a blanket-like cloak with a hole in the center for the head', 'name': 'poncho'}, {'frequency': 'c', 'synset': 'pony.n.05', 'synonyms': ['pony'], 'id': 830, 'def': 'any of various breeds of small gentle horses usually less than five feet high at the shoulder', 'name': 'pony'}, {'frequency': 'r', 'synset': 'pool_table.n.01', 'synonyms': ['pool_table', 'billiard_table', 'snooker_table'], 'id': 831, 'def': 'game equipment consisting of a heavy table on which pool is played', 'name': 'pool_table'}, {'frequency': 'f', 'synset': 'pop.n.02', 'synonyms': ['pop_(soda)', 'soda_(pop)', 'tonic', 'soft_drink'], 'id': 832, 'def': 'a sweet drink containing carbonated water and flavoring', 'name': 'pop_(soda)'}, {'frequency': 'c', 'synset': 'postbox.n.01', 'synonyms': ['postbox_(public)', 'mailbox_(public)'], 'id': 833, 'def': 'public box for deposit of mail', 'name': 'postbox_(public)'}, {'frequency': 'c', 'synset': 'postcard.n.01', 'synonyms': ['postcard', 'postal_card', 'mailing-card'], 'id': 834, 'def': 'a card for sending messages by post without an envelope', 'name': 'postcard'}, {'frequency': 'f', 'synset': 'poster.n.01', 'synonyms': ['poster', 'placard'], 'id': 835, 'def': 'a sign posted in a public place as an advertisement', 'name': 'poster'}, {'frequency': 'f', 'synset': 'pot.n.01', 'synonyms': ['pot'], 'id': 836, 'def': 'metal or earthenware cooking vessel that is usually round and deep; often has a handle and lid', 'name': 'pot'}, {'frequency': 'f', 'synset': 'pot.n.04', 'synonyms': ['flowerpot'], 'id': 837, 'def': 'a container in which plants are cultivated', 'name': 'flowerpot'}, {'frequency': 'f', 'synset': 'potato.n.01', 'synonyms': ['potato'], 'id': 838, 'def': 'an edible tuber native to South America', 'name': 'potato'}, {'frequency': 'c', 'synset': 'potholder.n.01', 'synonyms': ['potholder'], 'id': 839, 'def': 'an insulated pad for holding hot pots', 'name': 'potholder'}, {'frequency': 'c', 'synset': 'pottery.n.01', 'synonyms': ['pottery', 'clayware'], 'id': 840, 'def': 'ceramic ware made from clay and baked in a kiln', 'name': 'pottery'}, {'frequency': 'c', 'synset': 'pouch.n.01', 'synonyms': ['pouch'], 'id': 841, 'def': 'a small or medium size container for holding or carrying things', 'name': 'pouch'}, {'frequency': 'c', 'synset': 'power_shovel.n.01', 'synonyms': ['power_shovel', 'excavator', 'digger'], 'id': 842, 'def': 'a machine for excavating', 'name': 'power_shovel'}, {'frequency': 'c', 'synset': 'prawn.n.01', 'synonyms': ['prawn', 'shrimp'], 'id': 843, 'def': 'any of various edible decapod crustaceans', 'name': 'prawn'}, {'frequency': 'c', 'synset': 'pretzel.n.01', 'synonyms': ['pretzel'], 'id': 844, 'def': 'glazed and salted cracker typically in the shape of a loose knot', 'name': 'pretzel'}, {'frequency': 'f', 'synset': 'printer.n.03', 'synonyms': ['printer', 'printing_machine'], 'id': 845, 'def': 'a machine that prints', 'name': 'printer'}, {'frequency': 'c', 'synset': 'projectile.n.01', 'synonyms': ['projectile_(weapon)', 'missile'], 'id': 846, 'def': 'a weapon that is forcibly thrown or projected at a targets', 'name': 'projectile_(weapon)'}, {'frequency': 'c', 'synset': 'projector.n.02', 'synonyms': ['projector'], 'id': 847, 'def': 'an optical instrument that projects an enlarged image onto a screen', 'name': 'projector'}, {'frequency': 'f', 'synset': 'propeller.n.01', 'synonyms': ['propeller', 'propellor'], 'id': 848, 'def': 'a mechanical device that rotates to push against air or water', 'name': 'propeller'}, {'frequency': 'r', 'synset': 'prune.n.01', 'synonyms': ['prune'], 'id': 849, 'def': 'dried plum', 'name': 'prune'}, {'frequency': 'r', 'synset': 'pudding.n.01', 'synonyms': ['pudding'], 'id': 850, 'def': 'any of various soft thick unsweetened baked dishes', 'name': 'pudding'}, {'frequency': 'r', 'synset': 'puffer.n.02', 'synonyms': ['puffer_(fish)', 'pufferfish', 'blowfish', 'globefish'], 'id': 851, 'def': 'fishes whose elongated spiny body can inflate itself with water or air to form a globe', 'name': 'puffer_(fish)'}, {'frequency': 'r', 'synset': 'puffin.n.01', 'synonyms': ['puffin'], 'id': 852, 'def': 'seabirds having short necks and brightly colored compressed bills', 'name': 'puffin'}, {'frequency': 'r', 'synset': 'pug.n.01', 'synonyms': ['pug-dog'], 'id': 853, 'def': 'small compact smooth-coated breed of Asiatic origin having a tightly curled tail and broad flat wrinkled muzzle', 'name': 'pug-dog'}, {'frequency': 'c', 'synset': 'pumpkin.n.02', 'synonyms': ['pumpkin'], 'id': 854, 'def': 'usually large pulpy deep-yellow round fruit of the squash family maturing in late summer or early autumn', 'name': 'pumpkin'}, {'frequency': 'r', 'synset': 'punch.n.03', 'synonyms': ['puncher'], 'id': 855, 'def': 'a tool for making holes or indentations', 'name': 'puncher'}, {'frequency': 'r', 'synset': 'puppet.n.01', 'synonyms': ['puppet', 'marionette'], 'id': 856, 'def': 'a small figure of a person operated from above with strings by a puppeteer', 'name': 'puppet'}, {'frequency': 'c', 'synset': 'puppy.n.01', 'synonyms': ['puppy'], 'id': 857, 'def': 'a young dog', 'name': 'puppy'}, {'frequency': 'r', 'synset': 'quesadilla.n.01', 'synonyms': ['quesadilla'], 'id': 858, 'def': 'a tortilla that is filled with cheese and heated', 'name': 'quesadilla'}, {'frequency': 'r', 'synset': 'quiche.n.02', 'synonyms': ['quiche'], 'id': 859, 'def': 'a tart filled with rich unsweetened custard; often contains other ingredients (as cheese or ham or seafood or vegetables)', 'name': 'quiche'}, {'frequency': 'f', 'synset': 'quilt.n.01', 'synonyms': ['quilt', 'comforter'], 'id': 860, 'def': 'bedding made of two layers of cloth filled with stuffing and stitched together', 'name': 'quilt'}, {'frequency': 'c', 'synset': 'rabbit.n.01', 'synonyms': ['rabbit'], 'id': 861, 'def': 'any of various burrowing animals of the family Leporidae having long ears and short tails', 'name': 'rabbit'}, {'frequency': 'r', 'synset': 'racer.n.02', 'synonyms': ['race_car', 'racing_car'], 'id': 862, 'def': 'a fast car that competes in races', 'name': 'race_car'}, {'frequency': 'c', 'synset': 'racket.n.04', 'synonyms': ['racket', 'racquet'], 'id': 863, 'def': 'a sports implement used to strike a ball in various games', 'name': 'racket'}, {'frequency': 'r', 'synset': 'radar.n.01', 'synonyms': ['radar'], 'id': 864, 'def': 'measuring instrument in which the echo of a pulse of microwave radiation is used to detect and locate distant objects', 'name': 'radar'}, {'frequency': 'f', 'synset': 'radiator.n.03', 'synonyms': ['radiator'], 'id': 865, 'def': 'a mechanism consisting of a metal honeycomb through which hot fluids circulate', 'name': 'radiator'}, {'frequency': 'c', 'synset': 'radio_receiver.n.01', 'synonyms': ['radio_receiver', 'radio_set', 'radio', 'tuner_(radio)'], 'id': 866, 'def': 'an electronic receiver that detects and demodulates and amplifies transmitted radio signals', 'name': 'radio_receiver'}, {'frequency': 'c', 'synset': 'radish.n.03', 'synonyms': ['radish', 'daikon'], 'id': 867, 'def': 'pungent edible root of any of various cultivated radish plants', 'name': 'radish'}, {'frequency': 'c', 'synset': 'raft.n.01', 'synonyms': ['raft'], 'id': 868, 'def': 'a flat float (usually made of logs or planks) that can be used for transport or as a platform for swimmers', 'name': 'raft'}, {'frequency': 'r', 'synset': 'rag_doll.n.01', 'synonyms': ['rag_doll'], 'id': 869, 'def': 'a cloth doll that is stuffed and (usually) painted', 'name': 'rag_doll'}, {'frequency': 'c', 'synset': 'raincoat.n.01', 'synonyms': ['raincoat', 'waterproof_jacket'], 'id': 870, 'def': 'a water-resistant coat', 'name': 'raincoat'}, {'frequency': 'c', 'synset': 'ram.n.05', 'synonyms': ['ram_(animal)'], 'id': 871, 'def': 'uncastrated adult male sheep', 'name': 'ram_(animal)'}, {'frequency': 'c', 'synset': 'raspberry.n.02', 'synonyms': ['raspberry'], 'id': 872, 'def': 'red or black edible aggregate berries usually smaller than the related blackberries', 'name': 'raspberry'}, {'frequency': 'r', 'synset': 'rat.n.01', 'synonyms': ['rat'], 'id': 873, 'def': 'any of various long-tailed rodents similar to but larger than a mouse', 'name': 'rat'}, {'frequency': 'c', 'synset': 'razorblade.n.01', 'synonyms': ['razorblade'], 'id': 874, 'def': 'a blade that has very sharp edge', 'name': 'razorblade'}, {'frequency': 'c', 'synset': 'reamer.n.01', 'synonyms': ['reamer_(juicer)', 'juicer', 'juice_reamer'], 'id': 875, 'def': 'a squeezer with a conical ridged center that is used for squeezing juice from citrus fruit', 'name': 'reamer_(juicer)'}, {'frequency': 'f', 'synset': 'rearview_mirror.n.01', 'synonyms': ['rearview_mirror'], 'id': 876, 'def': 'vehicle mirror (side or rearview)', 'name': 'rearview_mirror'}, {'frequency': 'c', 'synset': 'receipt.n.02', 'synonyms': ['receipt'], 'id': 877, 'def': 'an acknowledgment (usually tangible) that payment has been made', 'name': 'receipt'}, {'frequency': 'c', 'synset': 'recliner.n.01', 'synonyms': ['recliner', 'reclining_chair', 'lounger_(chair)'], 'id': 878, 'def': 'an armchair whose back can be lowered and foot can be raised to allow the sitter to recline in it', 'name': 'recliner'}, {'frequency': 'c', 'synset': 'record_player.n.01', 'synonyms': ['record_player', 'phonograph_(record_player)', 'turntable'], 'id': 879, 'def': 'machine in which rotating records cause a stylus to vibrate and the vibrations are amplified acoustically or electronically', 'name': 'record_player'}, {'frequency': 'f', 'synset': 'reflector.n.01', 'synonyms': ['reflector'], 'id': 880, 'def': 'device that reflects light, radiation, etc.', 'name': 'reflector'}, {'frequency': 'f', 'synset': 'remote_control.n.01', 'synonyms': ['remote_control'], 'id': 881, 'def': 'a device that can be used to control a machine or apparatus from a distance', 'name': 'remote_control'}, {'frequency': 'c', 'synset': 'rhinoceros.n.01', 'synonyms': ['rhinoceros'], 'id': 882, 'def': 'massive powerful herbivorous odd-toed ungulate of southeast Asia and Africa having very thick skin and one or two horns on the snout', 'name': 'rhinoceros'}, {'frequency': 'r', 'synset': 'rib.n.03', 'synonyms': ['rib_(food)'], 'id': 883, 'def': 'cut of meat including one or more ribs', 'name': 'rib_(food)'}, {'frequency': 'c', 'synset': 'rifle.n.01', 'synonyms': ['rifle'], 'id': 884, 'def': 'a shoulder firearm with a long barrel', 'name': 'rifle'}, {'frequency': 'f', 'synset': 'ring.n.08', 'synonyms': ['ring'], 'id': 885, 'def': 'jewelry consisting of a circlet of precious metal (often set with jewels) worn on the finger', 'name': 'ring'}, {'frequency': 'r', 'synset': 'river_boat.n.01', 'synonyms': ['river_boat'], 'id': 886, 'def': 'a boat used on rivers or to ply a river', 'name': 'river_boat'}, {'frequency': 'r', 'synset': 'road_map.n.02', 'synonyms': ['road_map'], 'id': 887, 'def': '(NOT A ROAD) a MAP showing roads (for automobile travel)', 'name': 'road_map'}, {'frequency': 'c', 'synset': 'robe.n.01', 'synonyms': ['robe'], 'id': 888, 'def': 'any loose flowing garment', 'name': 'robe'}, {'frequency': 'c', 'synset': 'rocking_chair.n.01', 'synonyms': ['rocking_chair'], 'id': 889, 'def': 'a chair mounted on rockers', 'name': 'rocking_chair'}, {'frequency': 'r', 'synset': 'rodent.n.01', 'synonyms': ['rodent'], 'id': 890, 'def': 'relatively small placental mammals having a single pair of constantly growing incisor teeth specialized for gnawing', 'name': 'rodent'}, {'frequency': 'r', 'synset': 'roller_skate.n.01', 'synonyms': ['roller_skate'], 'id': 891, 'def': 'a shoe with pairs of rollers (small hard wheels) fixed to the sole', 'name': 'roller_skate'}, {'frequency': 'r', 'synset': 'rollerblade.n.01', 'synonyms': ['Rollerblade'], 'id': 892, 'def': 'an in-line variant of a roller skate', 'name': 'Rollerblade'}, {'frequency': 'c', 'synset': 'rolling_pin.n.01', 'synonyms': ['rolling_pin'], 'id': 893, 'def': 'utensil consisting of a cylinder (usually of wood) with a handle at each end; used to roll out dough', 'name': 'rolling_pin'}, {'frequency': 'r', 'synset': 'root_beer.n.01', 'synonyms': ['root_beer'], 'id': 894, 'def': 'carbonated drink containing extracts of roots and herbs', 'name': 'root_beer'}, {'frequency': 'c', 'synset': 'router.n.02', 'synonyms': ['router_(computer_equipment)'], 'id': 895, 'def': 'a device that forwards data packets between computer networks', 'name': 'router_(computer_equipment)'}, {'frequency': 'f', 'synset': 'rubber_band.n.01', 'synonyms': ['rubber_band', 'elastic_band'], 'id': 896, 'def': 'a narrow band of elastic rubber used to hold things (such as papers) together', 'name': 'rubber_band'}, {'frequency': 'c', 'synset': 'runner.n.08', 'synonyms': ['runner_(carpet)'], 'id': 897, 'def': 'a long narrow carpet', 'name': 'runner_(carpet)'}, {'frequency': 'f', 'synset': 'sack.n.01', 'synonyms': ['plastic_bag', 'paper_bag'], 'id': 898, 'def': "a bag made of paper or plastic for holding customer's purchases", 'name': 'plastic_bag'}, {'frequency': 'f', 'synset': 'saddle.n.01', 'synonyms': ['saddle_(on_an_animal)'], 'id': 899, 'def': 'a seat for the rider of a horse or camel', 'name': 'saddle_(on_an_animal)'}, {'frequency': 'f', 'synset': 'saddle_blanket.n.01', 'synonyms': ['saddle_blanket', 'saddlecloth', 'horse_blanket'], 'id': 900, 'def': 'stable gear consisting of a blanket placed under the saddle', 'name': 'saddle_blanket'}, {'frequency': 'c', 'synset': 'saddlebag.n.01', 'synonyms': ['saddlebag'], 'id': 901, 'def': 'a large bag (or pair of bags) hung over a saddle', 'name': 'saddlebag'}, {'frequency': 'r', 'synset': 'safety_pin.n.01', 'synonyms': ['safety_pin'], 'id': 902, 'def': 'a pin in the form of a clasp; has a guard so the point of the pin will not stick the user', 'name': 'safety_pin'}, {'frequency': 'f', 'synset': 'sail.n.01', 'synonyms': ['sail'], 'id': 903, 'def': 'a large piece of fabric by means of which wind is used to propel a sailing vessel', 'name': 'sail'}, {'frequency': 'f', 'synset': 'salad.n.01', 'synonyms': ['salad'], 'id': 904, 'def': 'food mixtures either arranged on a plate or tossed and served with a moist dressing; usually consisting of or including greens', 'name': 'salad'}, {'frequency': 'r', 'synset': 'salad_plate.n.01', 'synonyms': ['salad_plate', 'salad_bowl'], 'id': 905, 'def': 'a plate or bowl for individual servings of salad', 'name': 'salad_plate'}, {'frequency': 'c', 'synset': 'salami.n.01', 'synonyms': ['salami'], 'id': 906, 'def': 'highly seasoned fatty sausage of pork and beef usually dried', 'name': 'salami'}, {'frequency': 'c', 'synset': 'salmon.n.01', 'synonyms': ['salmon_(fish)'], 'id': 907, 'def': 'any of various large food and game fishes of northern waters', 'name': 'salmon_(fish)'}, {'frequency': 'r', 'synset': 'salmon.n.03', 'synonyms': ['salmon_(food)'], 'id': 908, 'def': 'flesh of any of various marine or freshwater fish of the family Salmonidae', 'name': 'salmon_(food)'}, {'frequency': 'c', 'synset': 'salsa.n.01', 'synonyms': ['salsa'], 'id': 909, 'def': 'spicy sauce of tomatoes and onions and chili peppers to accompany Mexican foods', 'name': 'salsa'}, {'frequency': 'f', 'synset': 'saltshaker.n.01', 'synonyms': ['saltshaker'], 'id': 910, 'def': 'a shaker with a perforated top for sprinkling salt', 'name': 'saltshaker'}, {'frequency': 'f', 'synset': 'sandal.n.01', 'synonyms': ['sandal_(type_of_shoe)'], 'id': 911, 'def': 'a shoe consisting of a sole fastened by straps to the foot', 'name': 'sandal_(type_of_shoe)'}, {'frequency': 'f', 'synset': 'sandwich.n.01', 'synonyms': ['sandwich'], 'id': 912, 'def': 'two (or more) slices of bread with a filling between them', 'name': 'sandwich'}, {'frequency': 'r', 'synset': 'satchel.n.01', 'synonyms': ['satchel'], 'id': 913, 'def': 'luggage consisting of a small case with a flat bottom and (usually) a shoulder strap', 'name': 'satchel'}, {'frequency': 'r', 'synset': 'saucepan.n.01', 'synonyms': ['saucepan'], 'id': 914, 'def': 'a deep pan with a handle; used for stewing or boiling', 'name': 'saucepan'}, {'frequency': 'f', 'synset': 'saucer.n.02', 'synonyms': ['saucer'], 'id': 915, 'def': 'a small shallow dish for holding a cup at the table', 'name': 'saucer'}, {'frequency': 'f', 'synset': 'sausage.n.01', 'synonyms': ['sausage'], 'id': 916, 'def': 'highly seasoned minced meat stuffed in casings', 'name': 'sausage'}, {'frequency': 'r', 'synset': 'sawhorse.n.01', 'synonyms': ['sawhorse', 'sawbuck'], 'id': 917, 'def': 'a framework for holding wood that is being sawed', 'name': 'sawhorse'}, {'frequency': 'r', 'synset': 'sax.n.02', 'synonyms': ['saxophone'], 'id': 918, 'def': "a wind instrument with a `J'-shaped form typically made of brass", 'name': 'saxophone'}, {'frequency': 'f', 'synset': 'scale.n.07', 'synonyms': ['scale_(measuring_instrument)'], 'id': 919, 'def': 'a measuring instrument for weighing; shows amount of mass', 'name': 'scale_(measuring_instrument)'}, {'frequency': 'r', 'synset': 'scarecrow.n.01', 'synonyms': ['scarecrow', 'strawman'], 'id': 920, 'def': 'an effigy in the shape of a man to frighten birds away from seeds', 'name': 'scarecrow'}, {'frequency': 'f', 'synset': 'scarf.n.01', 'synonyms': ['scarf'], 'id': 921, 'def': 'a garment worn around the head or neck or shoulders for warmth or decoration', 'name': 'scarf'}, {'frequency': 'c', 'synset': 'school_bus.n.01', 'synonyms': ['school_bus'], 'id': 922, 'def': 'a bus used to transport children to or from school', 'name': 'school_bus'}, {'frequency': 'f', 'synset': 'scissors.n.01', 'synonyms': ['scissors'], 'id': 923, 'def': 'a tool having two crossed pivoting blades with looped handles', 'name': 'scissors'}, {'frequency': 'f', 'synset': 'scoreboard.n.01', 'synonyms': ['scoreboard'], 'id': 924, 'def': 'a large board for displaying the score of a contest (and some other information)', 'name': 'scoreboard'}, {'frequency': 'r', 'synset': 'scraper.n.01', 'synonyms': ['scraper'], 'id': 925, 'def': 'any of various hand tools for scraping', 'name': 'scraper'}, {'frequency': 'c', 'synset': 'screwdriver.n.01', 'synonyms': ['screwdriver'], 'id': 926, 'def': 'a hand tool for driving screws; has a tip that fits into the head of a screw', 'name': 'screwdriver'}, {'frequency': 'f', 'synset': 'scrub_brush.n.01', 'synonyms': ['scrubbing_brush'], 'id': 927, 'def': 'a brush with short stiff bristles for heavy cleaning', 'name': 'scrubbing_brush'}, {'frequency': 'c', 'synset': 'sculpture.n.01', 'synonyms': ['sculpture'], 'id': 928, 'def': 'a three-dimensional work of art', 'name': 'sculpture'}, {'frequency': 'c', 'synset': 'seabird.n.01', 'synonyms': ['seabird', 'seafowl'], 'id': 929, 'def': 'a bird that frequents coastal waters and the open ocean: gulls; pelicans; gannets; cormorants; albatrosses; petrels; etc.', 'name': 'seabird'}, {'frequency': 'c', 'synset': 'seahorse.n.02', 'synonyms': ['seahorse'], 'id': 930, 'def': 'small fish with horse-like heads bent sharply downward and curled tails', 'name': 'seahorse'}, {'frequency': 'r', 'synset': 'seaplane.n.01', 'synonyms': ['seaplane', 'hydroplane'], 'id': 931, 'def': 'an airplane that can land on or take off from water', 'name': 'seaplane'}, {'frequency': 'c', 'synset': 'seashell.n.01', 'synonyms': ['seashell'], 'id': 932, 'def': 'the shell of a marine organism', 'name': 'seashell'}, {'frequency': 'c', 'synset': 'sewing_machine.n.01', 'synonyms': ['sewing_machine'], 'id': 933, 'def': 'a textile machine used as a home appliance for sewing', 'name': 'sewing_machine'}, {'frequency': 'c', 'synset': 'shaker.n.03', 'synonyms': ['shaker'], 'id': 934, 'def': 'a container in which something can be shaken', 'name': 'shaker'}, {'frequency': 'c', 'synset': 'shampoo.n.01', 'synonyms': ['shampoo'], 'id': 935, 'def': 'cleansing agent consisting of soaps or detergents used for washing the hair', 'name': 'shampoo'}, {'frequency': 'c', 'synset': 'shark.n.01', 'synonyms': ['shark'], 'id': 936, 'def': 'typically large carnivorous fishes with sharpe teeth', 'name': 'shark'}, {'frequency': 'r', 'synset': 'sharpener.n.01', 'synonyms': ['sharpener'], 'id': 937, 'def': 'any implement that is used to make something (an edge or a point) sharper', 'name': 'sharpener'}, {'frequency': 'r', 'synset': 'sharpie.n.03', 'synonyms': ['Sharpie'], 'id': 938, 'def': 'a pen with indelible ink that will write on any surface', 'name': 'Sharpie'}, {'frequency': 'r', 'synset': 'shaver.n.03', 'synonyms': ['shaver_(electric)', 'electric_shaver', 'electric_razor'], 'id': 939, 'def': 'a razor powered by an electric motor', 'name': 'shaver_(electric)'}, {'frequency': 'c', 'synset': 'shaving_cream.n.01', 'synonyms': ['shaving_cream', 'shaving_soap'], 'id': 940, 'def': 'toiletry consisting that forms a rich lather for softening the beard before shaving', 'name': 'shaving_cream'}, {'frequency': 'r', 'synset': 'shawl.n.01', 'synonyms': ['shawl'], 'id': 941, 'def': 'cloak consisting of an oblong piece of cloth used to cover the head and shoulders', 'name': 'shawl'}, {'frequency': 'r', 'synset': 'shears.n.01', 'synonyms': ['shears'], 'id': 942, 'def': 'large scissors with strong blades', 'name': 'shears'}, {'frequency': 'f', 'synset': 'sheep.n.01', 'synonyms': ['sheep'], 'id': 943, 'def': 'woolly usually horned ruminant mammal related to the goat', 'name': 'sheep'}, {'frequency': 'r', 'synset': 'shepherd_dog.n.01', 'synonyms': ['shepherd_dog', 'sheepdog'], 'id': 944, 'def': 'any of various usually long-haired breeds of dog reared to herd and guard sheep', 'name': 'shepherd_dog'}, {'frequency': 'r', 'synset': 'sherbert.n.01', 'synonyms': ['sherbert', 'sherbet'], 'id': 945, 'def': 'a frozen dessert made primarily of fruit juice and sugar', 'name': 'sherbert'}, {'frequency': 'c', 'synset': 'shield.n.02', 'synonyms': ['shield'], 'id': 946, 'def': 'armor carried on the arm to intercept blows', 'name': 'shield'}, {'frequency': 'f', 'synset': 'shirt.n.01', 'synonyms': ['shirt'], 'id': 947, 'def': 'a garment worn on the upper half of the body', 'name': 'shirt'}, {'frequency': 'f', 'synset': 'shoe.n.01', 'synonyms': ['shoe', 'sneaker_(type_of_shoe)', 'tennis_shoe'], 'id': 948, 'def': 'common footwear covering the foot', 'name': 'shoe'}, {'frequency': 'f', 'synset': 'shopping_bag.n.01', 'synonyms': ['shopping_bag'], 'id': 949, 'def': 'a bag made of plastic or strong paper (often with handles); used to transport goods after shopping', 'name': 'shopping_bag'}, {'frequency': 'c', 'synset': 'shopping_cart.n.01', 'synonyms': ['shopping_cart'], 'id': 950, 'def': 'a handcart that holds groceries or other goods while shopping', 'name': 'shopping_cart'}, {'frequency': 'f', 'synset': 'short_pants.n.01', 'synonyms': ['short_pants', 'shorts_(clothing)', 'trunks_(clothing)'], 'id': 951, 'def': 'trousers that end at or above the knee', 'name': 'short_pants'}, {'frequency': 'r', 'synset': 'shot_glass.n.01', 'synonyms': ['shot_glass'], 'id': 952, 'def': 'a small glass adequate to hold a single swallow of whiskey', 'name': 'shot_glass'}, {'frequency': 'f', 'synset': 'shoulder_bag.n.01', 'synonyms': ['shoulder_bag'], 'id': 953, 'def': 'a large handbag that can be carried by a strap looped over the shoulder', 'name': 'shoulder_bag'}, {'frequency': 'c', 'synset': 'shovel.n.01', 'synonyms': ['shovel'], 'id': 954, 'def': 'a hand tool for lifting loose material such as snow, dirt, etc.', 'name': 'shovel'}, {'frequency': 'f', 'synset': 'shower.n.01', 'synonyms': ['shower_head'], 'id': 955, 'def': 'a plumbing fixture that sprays water over you', 'name': 'shower_head'}, {'frequency': 'r', 'synset': 'shower_cap.n.01', 'synonyms': ['shower_cap'], 'id': 956, 'def': 'a tight cap worn to keep hair dry while showering', 'name': 'shower_cap'}, {'frequency': 'f', 'synset': 'shower_curtain.n.01', 'synonyms': ['shower_curtain'], 'id': 957, 'def': 'a curtain that keeps water from splashing out of the shower area', 'name': 'shower_curtain'}, {'frequency': 'r', 'synset': 'shredder.n.01', 'synonyms': ['shredder_(for_paper)'], 'id': 958, 'def': 'a device that shreds documents', 'name': 'shredder_(for_paper)'}, {'frequency': 'f', 'synset': 'signboard.n.01', 'synonyms': ['signboard'], 'id': 959, 'def': 'structure displaying a board on which advertisements can be posted', 'name': 'signboard'}, {'frequency': 'c', 'synset': 'silo.n.01', 'synonyms': ['silo'], 'id': 960, 'def': 'a cylindrical tower used for storing goods', 'name': 'silo'}, {'frequency': 'f', 'synset': 'sink.n.01', 'synonyms': ['sink'], 'id': 961, 'def': 'plumbing fixture consisting of a water basin fixed to a wall or floor and having a drainpipe', 'name': 'sink'}, {'frequency': 'f', 'synset': 'skateboard.n.01', 'synonyms': ['skateboard'], 'id': 962, 'def': 'a board with wheels that is ridden in a standing or crouching position and propelled by foot', 'name': 'skateboard'}, {'frequency': 'c', 'synset': 'skewer.n.01', 'synonyms': ['skewer'], 'id': 963, 'def': 'a long pin for holding meat in position while it is being roasted', 'name': 'skewer'}, {'frequency': 'f', 'synset': 'ski.n.01', 'synonyms': ['ski'], 'id': 964, 'def': 'sports equipment for skiing on snow', 'name': 'ski'}, {'frequency': 'f', 'synset': 'ski_boot.n.01', 'synonyms': ['ski_boot'], 'id': 965, 'def': 'a stiff boot that is fastened to a ski with a ski binding', 'name': 'ski_boot'}, {'frequency': 'f', 'synset': 'ski_parka.n.01', 'synonyms': ['ski_parka', 'ski_jacket'], 'id': 966, 'def': 'a parka to be worn while skiing', 'name': 'ski_parka'}, {'frequency': 'f', 'synset': 'ski_pole.n.01', 'synonyms': ['ski_pole'], 'id': 967, 'def': 'a pole with metal points used as an aid in skiing', 'name': 'ski_pole'}, {'frequency': 'f', 'synset': 'skirt.n.02', 'synonyms': ['skirt'], 'id': 968, 'def': 'a garment hanging from the waist; worn mainly by girls and women', 'name': 'skirt'}, {'frequency': 'r', 'synset': 'skullcap.n.01', 'synonyms': ['skullcap'], 'id': 969, 'def': 'rounded brimless cap fitting the crown of the head', 'name': 'skullcap'}, {'frequency': 'c', 'synset': 'sled.n.01', 'synonyms': ['sled', 'sledge', 'sleigh'], 'id': 970, 'def': 'a vehicle or flat object for transportation over snow by sliding or pulled by dogs, etc.', 'name': 'sled'}, {'frequency': 'c', 'synset': 'sleeping_bag.n.01', 'synonyms': ['sleeping_bag'], 'id': 971, 'def': 'large padded bag designed to be slept in outdoors', 'name': 'sleeping_bag'}, {'frequency': 'r', 'synset': 'sling.n.05', 'synonyms': ['sling_(bandage)', 'triangular_bandage'], 'id': 972, 'def': 'bandage to support an injured forearm; slung over the shoulder or neck', 'name': 'sling_(bandage)'}, {'frequency': 'c', 'synset': 'slipper.n.01', 'synonyms': ['slipper_(footwear)', 'carpet_slipper_(footwear)'], 'id': 973, 'def': 'low footwear that can be slipped on and off easily; usually worn indoors', 'name': 'slipper_(footwear)'}, {'frequency': 'r', 'synset': 'smoothie.n.02', 'synonyms': ['smoothie'], 'id': 974, 'def': 'a thick smooth drink consisting of fresh fruit pureed with ice cream or yoghurt or milk', 'name': 'smoothie'}, {'frequency': 'r', 'synset': 'snake.n.01', 'synonyms': ['snake', 'serpent'], 'id': 975, 'def': 'limbless scaly elongate reptile; some are venomous', 'name': 'snake'}, {'frequency': 'f', 'synset': 'snowboard.n.01', 'synonyms': ['snowboard'], 'id': 976, 'def': 'a board that resembles a broad ski or a small surfboard; used in a standing position to slide down snow-covered slopes', 'name': 'snowboard'}, {'frequency': 'c', 'synset': 'snowman.n.01', 'synonyms': ['snowman'], 'id': 977, 'def': 'a figure of a person made of packed snow', 'name': 'snowman'}, {'frequency': 'c', 'synset': 'snowmobile.n.01', 'synonyms': ['snowmobile'], 'id': 978, 'def': 'tracked vehicle for travel on snow having skis in front', 'name': 'snowmobile'}, {'frequency': 'f', 'synset': 'soap.n.01', 'synonyms': ['soap'], 'id': 979, 'def': 'a cleansing agent made from the salts of vegetable or animal fats', 'name': 'soap'}, {'frequency': 'f', 'synset': 'soccer_ball.n.01', 'synonyms': ['soccer_ball'], 'id': 980, 'def': "an inflated ball used in playing soccer (called `football' outside of the United States)", 'name': 'soccer_ball'}, {'frequency': 'f', 'synset': 'sock.n.01', 'synonyms': ['sock'], 'id': 981, 'def': 'cloth covering for the foot; worn inside the shoe; reaches to between the ankle and the knee', 'name': 'sock'}, {'frequency': 'f', 'synset': 'sofa.n.01', 'synonyms': ['sofa', 'couch', 'lounge'], 'id': 982, 'def': 'an upholstered seat for more than one person', 'name': 'sofa'}, {'frequency': 'r', 'synset': 'softball.n.01', 'synonyms': ['softball'], 'id': 983, 'def': 'ball used in playing softball', 'name': 'softball'}, {'frequency': 'c', 'synset': 'solar_array.n.01', 'synonyms': ['solar_array', 'solar_battery', 'solar_panel'], 'id': 984, 'def': 'electrical device consisting of a large array of connected solar cells', 'name': 'solar_array'}, {'frequency': 'r', 'synset': 'sombrero.n.02', 'synonyms': ['sombrero'], 'id': 985, 'def': 'a straw hat with a tall crown and broad brim; worn in American southwest and in Mexico', 'name': 'sombrero'}, {'frequency': 'f', 'synset': 'soup.n.01', 'synonyms': ['soup'], 'id': 986, 'def': 'liquid food especially of meat or fish or vegetable stock often containing pieces of solid food', 'name': 'soup'}, {'frequency': 'r', 'synset': 'soup_bowl.n.01', 'synonyms': ['soup_bowl'], 'id': 987, 'def': 'a bowl for serving soup', 'name': 'soup_bowl'}, {'frequency': 'c', 'synset': 'soupspoon.n.01', 'synonyms': ['soupspoon'], 'id': 988, 'def': 'a spoon with a rounded bowl for eating soup', 'name': 'soupspoon'}, {'frequency': 'c', 'synset': 'sour_cream.n.01', 'synonyms': ['sour_cream', 'soured_cream'], 'id': 989, 'def': 'soured light cream', 'name': 'sour_cream'}, {'frequency': 'r', 'synset': 'soya_milk.n.01', 'synonyms': ['soya_milk', 'soybean_milk', 'soymilk'], 'id': 990, 'def': 'a milk substitute containing soybean flour and water; used in some infant formulas and in making tofu', 'name': 'soya_milk'}, {'frequency': 'r', 'synset': 'space_shuttle.n.01', 'synonyms': ['space_shuttle'], 'id': 991, 'def': "a reusable spacecraft with wings for a controlled descent through the Earth's atmosphere", 'name': 'space_shuttle'}, {'frequency': 'r', 'synset': 'sparkler.n.02', 'synonyms': ['sparkler_(fireworks)'], 'id': 992, 'def': 'a firework that burns slowly and throws out a shower of sparks', 'name': 'sparkler_(fireworks)'}, {'frequency': 'f', 'synset': 'spatula.n.02', 'synonyms': ['spatula'], 'id': 993, 'def': 'a hand tool with a thin flexible blade used to mix or spread soft substances', 'name': 'spatula'}, {'frequency': 'r', 'synset': 'spear.n.01', 'synonyms': ['spear', 'lance'], 'id': 994, 'def': 'a long pointed rod used as a tool or weapon', 'name': 'spear'}, {'frequency': 'f', 'synset': 'spectacles.n.01', 'synonyms': ['spectacles', 'specs', 'eyeglasses', 'glasses'], 'id': 995, 'def': 'optical instrument consisting of a frame that holds a pair of lenses for correcting defective vision', 'name': 'spectacles'}, {'frequency': 'c', 'synset': 'spice_rack.n.01', 'synonyms': ['spice_rack'], 'id': 996, 'def': 'a rack for displaying containers filled with spices', 'name': 'spice_rack'}, {'frequency': 'c', 'synset': 'spider.n.01', 'synonyms': ['spider'], 'id': 997, 'def': 'predatory arachnid with eight legs, two poison fangs, two feelers, and usually two silk-spinning organs at the back end of the body', 'name': 'spider'}, {'frequency': 'r', 'synset': 'spiny_lobster.n.02', 'synonyms': ['crawfish', 'crayfish'], 'id': 998, 'def': 'large edible marine crustacean having a spiny carapace but lacking the large pincers of true lobsters', 'name': 'crawfish'}, {'frequency': 'c', 'synset': 'sponge.n.01', 'synonyms': ['sponge'], 'id': 999, 'def': 'a porous mass usable to absorb water typically used for cleaning', 'name': 'sponge'}, {'frequency': 'f', 'synset': 'spoon.n.01', 'synonyms': ['spoon'], 'id': 1000, 'def': 'a piece of cutlery with a shallow bowl-shaped container and a handle', 'name': 'spoon'}, {'frequency': 'c', 'synset': 'sportswear.n.01', 'synonyms': ['sportswear', 'athletic_wear', 'activewear'], 'id': 1001, 'def': 'attire worn for sport or for casual wear', 'name': 'sportswear'}, {'frequency': 'c', 'synset': 'spotlight.n.02', 'synonyms': ['spotlight'], 'id': 1002, 'def': 'a lamp that produces a strong beam of light to illuminate a restricted area; used to focus attention of a stage performer', 'name': 'spotlight'}, {'frequency': 'r', 'synset': 'squid.n.01', 'synonyms': ['squid_(food)', 'calamari', 'calamary'], 'id': 1003, 'def': '(Italian cuisine) squid prepared as food', 'name': 'squid_(food)'}, {'frequency': 'c', 'synset': 'squirrel.n.01', 'synonyms': ['squirrel'], 'id': 1004, 'def': 'a kind of arboreal rodent having a long bushy tail', 'name': 'squirrel'}, {'frequency': 'r', 'synset': 'stagecoach.n.01', 'synonyms': ['stagecoach'], 'id': 1005, 'def': 'a large coach-and-four formerly used to carry passengers and mail on regular routes between towns', 'name': 'stagecoach'}, {'frequency': 'c', 'synset': 'stapler.n.01', 'synonyms': ['stapler_(stapling_machine)'], 'id': 1006, 'def': 'a machine that inserts staples into sheets of paper in order to fasten them together', 'name': 'stapler_(stapling_machine)'}, {'frequency': 'c', 'synset': 'starfish.n.01', 'synonyms': ['starfish', 'sea_star'], 'id': 1007, 'def': 'echinoderms characterized by five arms extending from a central disk', 'name': 'starfish'}, {'frequency': 'f', 'synset': 'statue.n.01', 'synonyms': ['statue_(sculpture)'], 'id': 1008, 'def': 'a sculpture representing a human or animal', 'name': 'statue_(sculpture)'}, {'frequency': 'c', 'synset': 'steak.n.01', 'synonyms': ['steak_(food)'], 'id': 1009, 'def': 'a slice of meat cut from the fleshy part of an animal or large fish', 'name': 'steak_(food)'}, {'frequency': 'r', 'synset': 'steak_knife.n.01', 'synonyms': ['steak_knife'], 'id': 1010, 'def': 'a sharp table knife used in eating steak', 'name': 'steak_knife'}, {'frequency': 'f', 'synset': 'steering_wheel.n.01', 'synonyms': ['steering_wheel'], 'id': 1011, 'def': 'a handwheel that is used for steering', 'name': 'steering_wheel'}, {'frequency': 'r', 'synset': 'step_ladder.n.01', 'synonyms': ['stepladder'], 'id': 1012, 'def': 'a folding portable ladder hinged at the top', 'name': 'stepladder'}, {'frequency': 'c', 'synset': 'step_stool.n.01', 'synonyms': ['step_stool'], 'id': 1013, 'def': 'a stool that has one or two steps that fold under the seat', 'name': 'step_stool'}, {'frequency': 'c', 'synset': 'stereo.n.01', 'synonyms': ['stereo_(sound_system)'], 'id': 1014, 'def': 'electronic device for playing audio', 'name': 'stereo_(sound_system)'}, {'frequency': 'r', 'synset': 'stew.n.02', 'synonyms': ['stew'], 'id': 1015, 'def': 'food prepared by stewing especially meat or fish with vegetables', 'name': 'stew'}, {'frequency': 'r', 'synset': 'stirrer.n.02', 'synonyms': ['stirrer'], 'id': 1016, 'def': 'an implement used for stirring', 'name': 'stirrer'}, {'frequency': 'f', 'synset': 'stirrup.n.01', 'synonyms': ['stirrup'], 'id': 1017, 'def': "support consisting of metal loops into which rider's feet go", 'name': 'stirrup'}, {'frequency': 'f', 'synset': 'stool.n.01', 'synonyms': ['stool'], 'id': 1018, 'def': 'a simple seat without a back or arms', 'name': 'stool'}, {'frequency': 'f', 'synset': 'stop_sign.n.01', 'synonyms': ['stop_sign'], 'id': 1019, 'def': 'a traffic sign to notify drivers that they must come to a complete stop', 'name': 'stop_sign'}, {'frequency': 'f', 'synset': 'stoplight.n.01', 'synonyms': ['brake_light'], 'id': 1020, 'def': 'a red light on the rear of a motor vehicle that signals when the brakes are applied', 'name': 'brake_light'}, {'frequency': 'f', 'synset': 'stove.n.01', 'synonyms': ['stove', 'kitchen_stove', 'range_(kitchen_appliance)', 'kitchen_range', 'cooking_stove'], 'id': 1021, 'def': 'a kitchen appliance used for cooking food', 'name': 'stove'}, {'frequency': 'c', 'synset': 'strainer.n.01', 'synonyms': ['strainer'], 'id': 1022, 'def': 'a filter to retain larger pieces while smaller pieces and liquids pass through', 'name': 'strainer'}, {'frequency': 'f', 'synset': 'strap.n.01', 'synonyms': ['strap'], 'id': 1023, 'def': 'an elongated strip of material for binding things together or holding', 'name': 'strap'}, {'frequency': 'f', 'synset': 'straw.n.04', 'synonyms': ['straw_(for_drinking)', 'drinking_straw'], 'id': 1024, 'def': 'a thin paper or plastic tube used to suck liquids into the mouth', 'name': 'straw_(for_drinking)'}, {'frequency': 'f', 'synset': 'strawberry.n.01', 'synonyms': ['strawberry'], 'id': 1025, 'def': 'sweet fleshy red fruit', 'name': 'strawberry'}, {'frequency': 'f', 'synset': 'street_sign.n.01', 'synonyms': ['street_sign'], 'id': 1026, 'def': 'a sign visible from the street', 'name': 'street_sign'}, {'frequency': 'f', 'synset': 'streetlight.n.01', 'synonyms': ['streetlight', 'street_lamp'], 'id': 1027, 'def': 'a lamp supported on a lamppost; for illuminating a street', 'name': 'streetlight'}, {'frequency': 'r', 'synset': 'string_cheese.n.01', 'synonyms': ['string_cheese'], 'id': 1028, 'def': 'cheese formed in long strings twisted together', 'name': 'string_cheese'}, {'frequency': 'r', 'synset': 'stylus.n.02', 'synonyms': ['stylus'], 'id': 1029, 'def': 'a pointed tool for writing or drawing or engraving, including pens', 'name': 'stylus'}, {'frequency': 'r', 'synset': 'subwoofer.n.01', 'synonyms': ['subwoofer'], 'id': 1030, 'def': 'a loudspeaker that is designed to reproduce very low bass frequencies', 'name': 'subwoofer'}, {'frequency': 'r', 'synset': 'sugar_bowl.n.01', 'synonyms': ['sugar_bowl'], 'id': 1031, 'def': 'a dish in which sugar is served', 'name': 'sugar_bowl'}, {'frequency': 'r', 'synset': 'sugarcane.n.01', 'synonyms': ['sugarcane_(plant)'], 'id': 1032, 'def': 'juicy canes whose sap is a source of molasses and commercial sugar; fresh canes are sometimes chewed for the juice', 'name': 'sugarcane_(plant)'}, {'frequency': 'f', 'synset': 'suit.n.01', 'synonyms': ['suit_(clothing)'], 'id': 1033, 'def': 'a set of garments (usually including a jacket and trousers or skirt) for outerwear all of the same fabric and color', 'name': 'suit_(clothing)'}, {'frequency': 'c', 'synset': 'sunflower.n.01', 'synonyms': ['sunflower'], 'id': 1034, 'def': 'any plant of the genus Helianthus having large flower heads with dark disk florets and showy yellow rays', 'name': 'sunflower'}, {'frequency': 'f', 'synset': 'sunglasses.n.01', 'synonyms': ['sunglasses'], 'id': 1035, 'def': 'spectacles that are darkened or polarized to protect the eyes from the glare of the sun', 'name': 'sunglasses'}, {'frequency': 'c', 'synset': 'sunhat.n.01', 'synonyms': ['sunhat'], 'id': 1036, 'def': 'a hat with a broad brim that protects the face from direct exposure to the sun', 'name': 'sunhat'}, {'frequency': 'f', 'synset': 'surfboard.n.01', 'synonyms': ['surfboard'], 'id': 1037, 'def': 'a narrow buoyant board for riding surf', 'name': 'surfboard'}, {'frequency': 'c', 'synset': 'sushi.n.01', 'synonyms': ['sushi'], 'id': 1038, 'def': 'rice (with raw fish) wrapped in seaweed', 'name': 'sushi'}, {'frequency': 'c', 'synset': 'swab.n.02', 'synonyms': ['mop'], 'id': 1039, 'def': 'cleaning implement consisting of absorbent material fastened to a handle; for cleaning floors', 'name': 'mop'}, {'frequency': 'c', 'synset': 'sweat_pants.n.01', 'synonyms': ['sweat_pants'], 'id': 1040, 'def': 'loose-fitting trousers with elastic cuffs; worn by athletes', 'name': 'sweat_pants'}, {'frequency': 'c', 'synset': 'sweatband.n.02', 'synonyms': ['sweatband'], 'id': 1041, 'def': 'a band of material tied around the forehead or wrist to absorb sweat', 'name': 'sweatband'}, {'frequency': 'f', 'synset': 'sweater.n.01', 'synonyms': ['sweater'], 'id': 1042, 'def': 'a crocheted or knitted garment covering the upper part of the body', 'name': 'sweater'}, {'frequency': 'f', 'synset': 'sweatshirt.n.01', 'synonyms': ['sweatshirt'], 'id': 1043, 'def': 'cotton knit pullover with long sleeves worn during athletic activity', 'name': 'sweatshirt'}, {'frequency': 'c', 'synset': 'sweet_potato.n.02', 'synonyms': ['sweet_potato'], 'id': 1044, 'def': 'the edible tuberous root of the sweet potato vine', 'name': 'sweet_potato'}, {'frequency': 'f', 'synset': 'swimsuit.n.01', 'synonyms': ['swimsuit', 'swimwear', 'bathing_suit', 'swimming_costume', 'bathing_costume', 'swimming_trunks', 'bathing_trunks'], 'id': 1045, 'def': 'garment worn for swimming', 'name': 'swimsuit'}, {'frequency': 'c', 'synset': 'sword.n.01', 'synonyms': ['sword'], 'id': 1046, 'def': 'a cutting or thrusting weapon that has a long metal blade', 'name': 'sword'}, {'frequency': 'r', 'synset': 'syringe.n.01', 'synonyms': ['syringe'], 'id': 1047, 'def': 'a medical instrument used to inject or withdraw fluids', 'name': 'syringe'}, {'frequency': 'r', 'synset': 'tabasco.n.02', 'synonyms': ['Tabasco_sauce'], 'id': 1048, 'def': 'very spicy sauce (trade name Tabasco) made from fully-aged red peppers', 'name': 'Tabasco_sauce'}, {'frequency': 'r', 'synset': 'table-tennis_table.n.01', 'synonyms': ['table-tennis_table', 'ping-pong_table'], 'id': 1049, 'def': 'a table used for playing table tennis', 'name': 'table-tennis_table'}, {'frequency': 'f', 'synset': 'table.n.02', 'synonyms': ['table'], 'id': 1050, 'def': 'a piece of furniture having a smooth flat top that is usually supported by one or more vertical legs', 'name': 'table'}, {'frequency': 'c', 'synset': 'table_lamp.n.01', 'synonyms': ['table_lamp'], 'id': 1051, 'def': 'a lamp that sits on a table', 'name': 'table_lamp'}, {'frequency': 'f', 'synset': 'tablecloth.n.01', 'synonyms': ['tablecloth'], 'id': 1052, 'def': 'a covering spread over a dining table', 'name': 'tablecloth'}, {'frequency': 'r', 'synset': 'tachometer.n.01', 'synonyms': ['tachometer'], 'id': 1053, 'def': 'measuring instrument for indicating speed of rotation', 'name': 'tachometer'}, {'frequency': 'r', 'synset': 'taco.n.02', 'synonyms': ['taco'], 'id': 1054, 'def': 'a small tortilla cupped around a filling', 'name': 'taco'}, {'frequency': 'f', 'synset': 'tag.n.02', 'synonyms': ['tag'], 'id': 1055, 'def': 'a label associated with something for the purpose of identification or information', 'name': 'tag'}, {'frequency': 'f', 'synset': 'taillight.n.01', 'synonyms': ['taillight', 'rear_light'], 'id': 1056, 'def': 'lamp (usually red) mounted at the rear of a motor vehicle', 'name': 'taillight'}, {'frequency': 'r', 'synset': 'tambourine.n.01', 'synonyms': ['tambourine'], 'id': 1057, 'def': 'a shallow drum with a single drumhead and with metallic disks in the sides', 'name': 'tambourine'}, {'frequency': 'r', 'synset': 'tank.n.01', 'synonyms': ['army_tank', 'armored_combat_vehicle', 'armoured_combat_vehicle'], 'id': 1058, 'def': 'an enclosed armored military vehicle; has a cannon and moves on caterpillar treads', 'name': 'army_tank'}, {'frequency': 'f', 'synset': 'tank.n.02', 'synonyms': ['tank_(storage_vessel)', 'storage_tank'], 'id': 1059, 'def': 'a large (usually metallic) vessel for holding gases or liquids', 'name': 'tank_(storage_vessel)'}, {'frequency': 'f', 'synset': 'tank_top.n.01', 'synonyms': ['tank_top_(clothing)'], 'id': 1060, 'def': 'a tight-fitting sleeveless shirt with wide shoulder straps and low neck and no front opening', 'name': 'tank_top_(clothing)'}, {'frequency': 'f', 'synset': 'tape.n.01', 'synonyms': ['tape_(sticky_cloth_or_paper)'], 'id': 1061, 'def': 'a long thin piece of cloth or paper as used for binding or fastening', 'name': 'tape_(sticky_cloth_or_paper)'}, {'frequency': 'c', 'synset': 'tape.n.04', 'synonyms': ['tape_measure', 'measuring_tape'], 'id': 1062, 'def': 'measuring instrument consisting of a narrow strip (cloth or metal) marked in inches or centimeters and used for measuring lengths', 'name': 'tape_measure'}, {'frequency': 'c', 'synset': 'tapestry.n.02', 'synonyms': ['tapestry'], 'id': 1063, 'def': 'a heavy textile with a woven design; used for curtains and upholstery', 'name': 'tapestry'}, {'frequency': 'f', 'synset': 'tarpaulin.n.01', 'synonyms': ['tarp'], 'id': 1064, 'def': 'waterproofed canvas', 'name': 'tarp'}, {'frequency': 'c', 'synset': 'tartan.n.01', 'synonyms': ['tartan', 'plaid'], 'id': 1065, 'def': 'a cloth having a crisscross design', 'name': 'tartan'}, {'frequency': 'c', 'synset': 'tassel.n.01', 'synonyms': ['tassel'], 'id': 1066, 'def': 'adornment consisting of a bunch of cords fastened at one end', 'name': 'tassel'}, {'frequency': 'c', 'synset': 'tea_bag.n.01', 'synonyms': ['tea_bag'], 'id': 1067, 'def': 'a measured amount of tea in a bag for an individual serving of tea', 'name': 'tea_bag'}, {'frequency': 'c', 'synset': 'teacup.n.02', 'synonyms': ['teacup'], 'id': 1068, 'def': 'a cup from which tea is drunk', 'name': 'teacup'}, {'frequency': 'c', 'synset': 'teakettle.n.01', 'synonyms': ['teakettle'], 'id': 1069, 'def': 'kettle for boiling water to make tea', 'name': 'teakettle'}, {'frequency': 'f', 'synset': 'teapot.n.01', 'synonyms': ['teapot'], 'id': 1070, 'def': 'pot for brewing tea; usually has a spout and handle', 'name': 'teapot'}, {'frequency': 'f', 'synset': 'teddy.n.01', 'synonyms': ['teddy_bear'], 'id': 1071, 'def': "plaything consisting of a child's toy bear (usually plush and stuffed with soft materials)", 'name': 'teddy_bear'}, {'frequency': 'f', 'synset': 'telephone.n.01', 'synonyms': ['telephone', 'phone', 'telephone_set'], 'id': 1072, 'def': 'electronic device for communicating by voice over long distances (includes wired and wireless/cell phones)', 'name': 'telephone'}, {'frequency': 'c', 'synset': 'telephone_booth.n.01', 'synonyms': ['telephone_booth', 'phone_booth', 'call_box', 'telephone_box', 'telephone_kiosk'], 'id': 1073, 'def': 'booth for using a telephone', 'name': 'telephone_booth'}, {'frequency': 'f', 'synset': 'telephone_pole.n.01', 'synonyms': ['telephone_pole', 'telegraph_pole', 'telegraph_post'], 'id': 1074, 'def': 'tall pole supporting telephone wires', 'name': 'telephone_pole'}, {'frequency': 'r', 'synset': 'telephoto_lens.n.01', 'synonyms': ['telephoto_lens', 'zoom_lens'], 'id': 1075, 'def': 'a camera lens that magnifies the image', 'name': 'telephoto_lens'}, {'frequency': 'c', 'synset': 'television_camera.n.01', 'synonyms': ['television_camera', 'tv_camera'], 'id': 1076, 'def': 'television equipment for capturing and recording video', 'name': 'television_camera'}, {'frequency': 'f', 'synset': 'television_receiver.n.01', 'synonyms': ['television_set', 'tv', 'tv_set'], 'id': 1077, 'def': 'an electronic device that receives television signals and displays them on a screen', 'name': 'television_set'}, {'frequency': 'f', 'synset': 'tennis_ball.n.01', 'synonyms': ['tennis_ball'], 'id': 1078, 'def': 'ball about the size of a fist used in playing tennis', 'name': 'tennis_ball'}, {'frequency': 'f', 'synset': 'tennis_racket.n.01', 'synonyms': ['tennis_racket'], 'id': 1079, 'def': 'a racket used to play tennis', 'name': 'tennis_racket'}, {'frequency': 'r', 'synset': 'tequila.n.01', 'synonyms': ['tequila'], 'id': 1080, 'def': 'Mexican liquor made from fermented juices of an agave plant', 'name': 'tequila'}, {'frequency': 'c', 'synset': 'thermometer.n.01', 'synonyms': ['thermometer'], 'id': 1081, 'def': 'measuring instrument for measuring temperature', 'name': 'thermometer'}, {'frequency': 'c', 'synset': 'thermos.n.01', 'synonyms': ['thermos_bottle'], 'id': 1082, 'def': 'vacuum flask that preserves temperature of hot or cold drinks', 'name': 'thermos_bottle'}, {'frequency': 'f', 'synset': 'thermostat.n.01', 'synonyms': ['thermostat'], 'id': 1083, 'def': 'a regulator for automatically regulating temperature by starting or stopping the supply of heat', 'name': 'thermostat'}, {'frequency': 'r', 'synset': 'thimble.n.02', 'synonyms': ['thimble'], 'id': 1084, 'def': 'a small metal cap to protect the finger while sewing; can be used as a small container', 'name': 'thimble'}, {'frequency': 'c', 'synset': 'thread.n.01', 'synonyms': ['thread', 'yarn'], 'id': 1085, 'def': 'a fine cord of twisted fibers (of cotton or silk or wool or nylon etc.) used in sewing and weaving', 'name': 'thread'}, {'frequency': 'c', 'synset': 'thumbtack.n.01', 'synonyms': ['thumbtack', 'drawing_pin', 'pushpin'], 'id': 1086, 'def': 'a tack for attaching papers to a bulletin board or drawing board', 'name': 'thumbtack'}, {'frequency': 'c', 'synset': 'tiara.n.01', 'synonyms': ['tiara'], 'id': 1087, 'def': 'a jeweled headdress worn by women on formal occasions', 'name': 'tiara'}, {'frequency': 'c', 'synset': 'tiger.n.02', 'synonyms': ['tiger'], 'id': 1088, 'def': 'large feline of forests in most of Asia having a tawny coat with black stripes', 'name': 'tiger'}, {'frequency': 'c', 'synset': 'tights.n.01', 'synonyms': ['tights_(clothing)', 'leotards'], 'id': 1089, 'def': 'skintight knit hose covering the body from the waist to the feet worn by acrobats and dancers and as stockings by women and girls', 'name': 'tights_(clothing)'}, {'frequency': 'c', 'synset': 'timer.n.01', 'synonyms': ['timer', 'stopwatch'], 'id': 1090, 'def': 'a timepiece that measures a time interval and signals its end', 'name': 'timer'}, {'frequency': 'f', 'synset': 'tinfoil.n.01', 'synonyms': ['tinfoil'], 'id': 1091, 'def': 'foil made of tin or an alloy of tin and lead', 'name': 'tinfoil'}, {'frequency': 'c', 'synset': 'tinsel.n.01', 'synonyms': ['tinsel'], 'id': 1092, 'def': 'a showy decoration that is basically valueless', 'name': 'tinsel'}, {'frequency': 'f', 'synset': 'tissue.n.02', 'synonyms': ['tissue_paper'], 'id': 1093, 'def': 'a soft thin (usually translucent) paper', 'name': 'tissue_paper'}, {'frequency': 'c', 'synset': 'toast.n.01', 'synonyms': ['toast_(food)'], 'id': 1094, 'def': 'slice of bread that has been toasted', 'name': 'toast_(food)'}, {'frequency': 'f', 'synset': 'toaster.n.02', 'synonyms': ['toaster'], 'id': 1095, 'def': 'a kitchen appliance (usually electric) for toasting bread', 'name': 'toaster'}, {'frequency': 'f', 'synset': 'toaster_oven.n.01', 'synonyms': ['toaster_oven'], 'id': 1096, 'def': 'kitchen appliance consisting of a small electric oven for toasting or warming food', 'name': 'toaster_oven'}, {'frequency': 'f', 'synset': 'toilet.n.02', 'synonyms': ['toilet'], 'id': 1097, 'def': 'a plumbing fixture for defecation and urination', 'name': 'toilet'}, {'frequency': 'f', 'synset': 'toilet_tissue.n.01', 'synonyms': ['toilet_tissue', 'toilet_paper', 'bathroom_tissue'], 'id': 1098, 'def': 'a soft thin absorbent paper for use in toilets', 'name': 'toilet_tissue'}, {'frequency': 'f', 'synset': 'tomato.n.01', 'synonyms': ['tomato'], 'id': 1099, 'def': 'mildly acid red or yellow pulpy fruit eaten as a vegetable', 'name': 'tomato'}, {'frequency': 'f', 'synset': 'tongs.n.01', 'synonyms': ['tongs'], 'id': 1100, 'def': 'any of various devices for taking hold of objects; usually have two hinged legs with handles above and pointed hooks below', 'name': 'tongs'}, {'frequency': 'c', 'synset': 'toolbox.n.01', 'synonyms': ['toolbox'], 'id': 1101, 'def': 'a box or chest or cabinet for holding hand tools', 'name': 'toolbox'}, {'frequency': 'f', 'synset': 'toothbrush.n.01', 'synonyms': ['toothbrush'], 'id': 1102, 'def': 'small brush; has long handle; used to clean teeth', 'name': 'toothbrush'}, {'frequency': 'f', 'synset': 'toothpaste.n.01', 'synonyms': ['toothpaste'], 'id': 1103, 'def': 'a dentifrice in the form of a paste', 'name': 'toothpaste'}, {'frequency': 'f', 'synset': 'toothpick.n.01', 'synonyms': ['toothpick'], 'id': 1104, 'def': 'pick consisting of a small strip of wood or plastic; used to pick food from between the teeth', 'name': 'toothpick'}, {'frequency': 'f', 'synset': 'top.n.09', 'synonyms': ['cover'], 'id': 1105, 'def': 'covering for a hole (especially a hole in the top of a container)', 'name': 'cover'}, {'frequency': 'c', 'synset': 'tortilla.n.01', 'synonyms': ['tortilla'], 'id': 1106, 'def': 'thin unleavened pancake made from cornmeal or wheat flour', 'name': 'tortilla'}, {'frequency': 'c', 'synset': 'tow_truck.n.01', 'synonyms': ['tow_truck'], 'id': 1107, 'def': 'a truck equipped to hoist and pull wrecked cars (or to remove cars from no-parking zones)', 'name': 'tow_truck'}, {'frequency': 'f', 'synset': 'towel.n.01', 'synonyms': ['towel'], 'id': 1108, 'def': 'a rectangular piece of absorbent cloth (or paper) for drying or wiping', 'name': 'towel'}, {'frequency': 'f', 'synset': 'towel_rack.n.01', 'synonyms': ['towel_rack', 'towel_rail', 'towel_bar'], 'id': 1109, 'def': 'a rack consisting of one or more bars on which towels can be hung', 'name': 'towel_rack'}, {'frequency': 'f', 'synset': 'toy.n.03', 'synonyms': ['toy'], 'id': 1110, 'def': 'a device regarded as providing amusement', 'name': 'toy'}, {'frequency': 'c', 'synset': 'tractor.n.01', 'synonyms': ['tractor_(farm_equipment)'], 'id': 1111, 'def': 'a wheeled vehicle with large wheels; used in farming and other applications', 'name': 'tractor_(farm_equipment)'}, {'frequency': 'f', 'synset': 'traffic_light.n.01', 'synonyms': ['traffic_light'], 'id': 1112, 'def': 'a device to control vehicle traffic often consisting of three or more lights', 'name': 'traffic_light'}, {'frequency': 'c', 'synset': 'trail_bike.n.01', 'synonyms': ['dirt_bike'], 'id': 1113, 'def': 'a lightweight motorcycle equipped with rugged tires and suspension for off-road use', 'name': 'dirt_bike'}, {'frequency': 'f', 'synset': 'trailer_truck.n.01', 'synonyms': ['trailer_truck', 'tractor_trailer', 'trucking_rig', 'articulated_lorry', 'semi_truck'], 'id': 1114, 'def': 'a truck consisting of a tractor and trailer together', 'name': 'trailer_truck'}, {'frequency': 'f', 'synset': 'train.n.01', 'synonyms': ['train_(railroad_vehicle)', 'railroad_train'], 'id': 1115, 'def': 'public or private transport provided by a line of railway cars coupled together and drawn by a locomotive', 'name': 'train_(railroad_vehicle)'}, {'frequency': 'r', 'synset': 'trampoline.n.01', 'synonyms': ['trampoline'], 'id': 1116, 'def': 'gymnastic apparatus consisting of a strong canvas sheet attached with springs to a metal frame', 'name': 'trampoline'}, {'frequency': 'f', 'synset': 'tray.n.01', 'synonyms': ['tray'], 'id': 1117, 'def': 'an open receptacle for holding or displaying or serving articles or food', 'name': 'tray'}, {'frequency': 'r', 'synset': 'trench_coat.n.01', 'synonyms': ['trench_coat'], 'id': 1118, 'def': 'a military style raincoat; belted with deep pockets', 'name': 'trench_coat'}, {'frequency': 'r', 'synset': 'triangle.n.05', 'synonyms': ['triangle_(musical_instrument)'], 'id': 1119, 'def': 'a percussion instrument consisting of a metal bar bent in the shape of an open triangle', 'name': 'triangle_(musical_instrument)'}, {'frequency': 'c', 'synset': 'tricycle.n.01', 'synonyms': ['tricycle'], 'id': 1120, 'def': 'a vehicle with three wheels that is moved by foot pedals', 'name': 'tricycle'}, {'frequency': 'f', 'synset': 'tripod.n.01', 'synonyms': ['tripod'], 'id': 1121, 'def': 'a three-legged rack used for support', 'name': 'tripod'}, {'frequency': 'f', 'synset': 'trouser.n.01', 'synonyms': ['trousers', 'pants_(clothing)'], 'id': 1122, 'def': 'a garment extending from the waist to the knee or ankle, covering each leg separately', 'name': 'trousers'}, {'frequency': 'f', 'synset': 'truck.n.01', 'synonyms': ['truck'], 'id': 1123, 'def': 'an automotive vehicle suitable for hauling', 'name': 'truck'}, {'frequency': 'r', 'synset': 'truffle.n.03', 'synonyms': ['truffle_(chocolate)', 'chocolate_truffle'], 'id': 1124, 'def': 'creamy chocolate candy', 'name': 'truffle_(chocolate)'}, {'frequency': 'c', 'synset': 'trunk.n.02', 'synonyms': ['trunk'], 'id': 1125, 'def': 'luggage consisting of a large strong case used when traveling or for storage', 'name': 'trunk'}, {'frequency': 'r', 'synset': 'tub.n.02', 'synonyms': ['vat'], 'id': 1126, 'def': 'a large vessel for holding or storing liquids', 'name': 'vat'}, {'frequency': 'c', 'synset': 'turban.n.01', 'synonyms': ['turban'], 'id': 1127, 'def': 'a traditional headdress consisting of a long scarf wrapped around the head', 'name': 'turban'}, {'frequency': 'c', 'synset': 'turkey.n.04', 'synonyms': ['turkey_(food)'], 'id': 1128, 'def': 'flesh of large domesticated fowl usually roasted', 'name': 'turkey_(food)'}, {'frequency': 'r', 'synset': 'turnip.n.01', 'synonyms': ['turnip'], 'id': 1129, 'def': 'widely cultivated plant having a large fleshy edible white or yellow root', 'name': 'turnip'}, {'frequency': 'c', 'synset': 'turtle.n.02', 'synonyms': ['turtle'], 'id': 1130, 'def': 'any of various aquatic and land reptiles having a bony shell and flipper-like limbs for swimming', 'name': 'turtle'}, {'frequency': 'c', 'synset': 'turtleneck.n.01', 'synonyms': ['turtleneck_(clothing)', 'polo-neck'], 'id': 1131, 'def': 'a sweater or jersey with a high close-fitting collar', 'name': 'turtleneck_(clothing)'}, {'frequency': 'c', 'synset': 'typewriter.n.01', 'synonyms': ['typewriter'], 'id': 1132, 'def': 'hand-operated character printer for printing written messages one character at a time', 'name': 'typewriter'}, {'frequency': 'f', 'synset': 'umbrella.n.01', 'synonyms': ['umbrella'], 'id': 1133, 'def': 'a lightweight handheld collapsible canopy', 'name': 'umbrella'}, {'frequency': 'f', 'synset': 'underwear.n.01', 'synonyms': ['underwear', 'underclothes', 'underclothing', 'underpants'], 'id': 1134, 'def': 'undergarment worn next to the skin and under the outer garments', 'name': 'underwear'}, {'frequency': 'r', 'synset': 'unicycle.n.01', 'synonyms': ['unicycle'], 'id': 1135, 'def': 'a vehicle with a single wheel that is driven by pedals', 'name': 'unicycle'}, {'frequency': 'f', 'synset': 'urinal.n.01', 'synonyms': ['urinal'], 'id': 1136, 'def': 'a plumbing fixture (usually attached to the wall) used by men to urinate', 'name': 'urinal'}, {'frequency': 'c', 'synset': 'urn.n.01', 'synonyms': ['urn'], 'id': 1137, 'def': 'a large vase that usually has a pedestal or feet', 'name': 'urn'}, {'frequency': 'c', 'synset': 'vacuum.n.04', 'synonyms': ['vacuum_cleaner'], 'id': 1138, 'def': 'an electrical home appliance that cleans by suction', 'name': 'vacuum_cleaner'}, {'frequency': 'f', 'synset': 'vase.n.01', 'synonyms': ['vase'], 'id': 1139, 'def': 'an open jar of glass or porcelain used as an ornament or to hold flowers', 'name': 'vase'}, {'frequency': 'c', 'synset': 'vending_machine.n.01', 'synonyms': ['vending_machine'], 'id': 1140, 'def': 'a slot machine for selling goods', 'name': 'vending_machine'}, {'frequency': 'f', 'synset': 'vent.n.01', 'synonyms': ['vent', 'blowhole', 'air_vent'], 'id': 1141, 'def': 'a hole for the escape of gas or air', 'name': 'vent'}, {'frequency': 'f', 'synset': 'vest.n.01', 'synonyms': ['vest', 'waistcoat'], 'id': 1142, 'def': "a man's sleeveless garment worn underneath a coat", 'name': 'vest'}, {'frequency': 'c', 'synset': 'videotape.n.01', 'synonyms': ['videotape'], 'id': 1143, 'def': 'a video recording made on magnetic tape', 'name': 'videotape'}, {'frequency': 'r', 'synset': 'vinegar.n.01', 'synonyms': ['vinegar'], 'id': 1144, 'def': 'sour-tasting liquid produced usually by oxidation of the alcohol in wine or cider and used as a condiment or food preservative', 'name': 'vinegar'}, {'frequency': 'r', 'synset': 'violin.n.01', 'synonyms': ['violin', 'fiddle'], 'id': 1145, 'def': 'bowed stringed instrument that is the highest member of the violin family', 'name': 'violin'}, {'frequency': 'r', 'synset': 'vodka.n.01', 'synonyms': ['vodka'], 'id': 1146, 'def': 'unaged colorless liquor originating in Russia', 'name': 'vodka'}, {'frequency': 'c', 'synset': 'volleyball.n.02', 'synonyms': ['volleyball'], 'id': 1147, 'def': 'an inflated ball used in playing volleyball', 'name': 'volleyball'}, {'frequency': 'r', 'synset': 'vulture.n.01', 'synonyms': ['vulture'], 'id': 1148, 'def': 'any of various large birds of prey having naked heads and weak claws and feeding chiefly on carrion', 'name': 'vulture'}, {'frequency': 'c', 'synset': 'waffle.n.01', 'synonyms': ['waffle'], 'id': 1149, 'def': 'pancake batter baked in a waffle iron', 'name': 'waffle'}, {'frequency': 'r', 'synset': 'waffle_iron.n.01', 'synonyms': ['waffle_iron'], 'id': 1150, 'def': 'a kitchen appliance for baking waffles', 'name': 'waffle_iron'}, {'frequency': 'c', 'synset': 'wagon.n.01', 'synonyms': ['wagon'], 'id': 1151, 'def': 'any of various kinds of wheeled vehicles drawn by an animal or a tractor', 'name': 'wagon'}, {'frequency': 'c', 'synset': 'wagon_wheel.n.01', 'synonyms': ['wagon_wheel'], 'id': 1152, 'def': 'a wheel of a wagon', 'name': 'wagon_wheel'}, {'frequency': 'c', 'synset': 'walking_stick.n.01', 'synonyms': ['walking_stick'], 'id': 1153, 'def': 'a stick carried in the hand for support in walking', 'name': 'walking_stick'}, {'frequency': 'c', 'synset': 'wall_clock.n.01', 'synonyms': ['wall_clock'], 'id': 1154, 'def': 'a clock mounted on a wall', 'name': 'wall_clock'}, {'frequency': 'f', 'synset': 'wall_socket.n.01', 'synonyms': ['wall_socket', 'wall_plug', 'electric_outlet', 'electrical_outlet', 'outlet', 'electric_receptacle'], 'id': 1155, 'def': 'receptacle providing a place in a wiring system where current can be taken to run electrical devices', 'name': 'wall_socket'}, {'frequency': 'f', 'synset': 'wallet.n.01', 'synonyms': ['wallet', 'billfold'], 'id': 1156, 'def': 'a pocket-size case for holding papers and paper money', 'name': 'wallet'}, {'frequency': 'r', 'synset': 'walrus.n.01', 'synonyms': ['walrus'], 'id': 1157, 'def': 'either of two large northern marine mammals having ivory tusks and tough hide over thick blubber', 'name': 'walrus'}, {'frequency': 'r', 'synset': 'wardrobe.n.01', 'synonyms': ['wardrobe'], 'id': 1158, 'def': 'a tall piece of furniture that provides storage space for clothes; has a door and rails or hooks for hanging clothes', 'name': 'wardrobe'}, {'frequency': 'r', 'synset': 'washbasin.n.01', 'synonyms': ['washbasin', 'basin_(for_washing)', 'washbowl', 'washstand', 'handbasin'], 'id': 1159, 'def': 'a bathroom sink that is permanently installed and connected to a water supply and drainpipe; where you can wash your hands and face', 'name': 'washbasin'}, {'frequency': 'c', 'synset': 'washer.n.03', 'synonyms': ['automatic_washer', 'washing_machine'], 'id': 1160, 'def': 'a home appliance for washing clothes and linens automatically', 'name': 'automatic_washer'}, {'frequency': 'f', 'synset': 'watch.n.01', 'synonyms': ['watch', 'wristwatch'], 'id': 1161, 'def': 'a small, portable timepiece', 'name': 'watch'}, {'frequency': 'f', 'synset': 'water_bottle.n.01', 'synonyms': ['water_bottle'], 'id': 1162, 'def': 'a bottle for holding water', 'name': 'water_bottle'}, {'frequency': 'c', 'synset': 'water_cooler.n.01', 'synonyms': ['water_cooler'], 'id': 1163, 'def': 'a device for cooling and dispensing drinking water', 'name': 'water_cooler'}, {'frequency': 'c', 'synset': 'water_faucet.n.01', 'synonyms': ['water_faucet', 'water_tap', 'tap_(water_faucet)'], 'id': 1164, 'def': 'a faucet for drawing water from a pipe or cask', 'name': 'water_faucet'}, {'frequency': 'r', 'synset': 'water_heater.n.01', 'synonyms': ['water_heater', 'hot-water_heater'], 'id': 1165, 'def': 'a heater and storage tank to supply heated water', 'name': 'water_heater'}, {'frequency': 'c', 'synset': 'water_jug.n.01', 'synonyms': ['water_jug'], 'id': 1166, 'def': 'a jug that holds water', 'name': 'water_jug'}, {'frequency': 'r', 'synset': 'water_pistol.n.01', 'synonyms': ['water_gun', 'squirt_gun'], 'id': 1167, 'def': 'plaything consisting of a toy pistol that squirts water', 'name': 'water_gun'}, {'frequency': 'c', 'synset': 'water_scooter.n.01', 'synonyms': ['water_scooter', 'sea_scooter', 'jet_ski'], 'id': 1168, 'def': 'a motorboat resembling a motor scooter (NOT A SURFBOARD OR WATER SKI)', 'name': 'water_scooter'}, {'frequency': 'c', 'synset': 'water_ski.n.01', 'synonyms': ['water_ski'], 'id': 1169, 'def': 'broad ski for skimming over water towed by a speedboat (DO NOT MARK WATER)', 'name': 'water_ski'}, {'frequency': 'c', 'synset': 'water_tower.n.01', 'synonyms': ['water_tower'], 'id': 1170, 'def': 'a large reservoir for water', 'name': 'water_tower'}, {'frequency': 'c', 'synset': 'watering_can.n.01', 'synonyms': ['watering_can'], 'id': 1171, 'def': 'a container with a handle and a spout with a perforated nozzle; used to sprinkle water over plants', 'name': 'watering_can'}, {'frequency': 'f', 'synset': 'watermelon.n.02', 'synonyms': ['watermelon'], 'id': 1172, 'def': 'large oblong or roundish melon with a hard green rind and sweet watery red or occasionally yellowish pulp', 'name': 'watermelon'}, {'frequency': 'f', 'synset': 'weathervane.n.01', 'synonyms': ['weathervane', 'vane_(weathervane)', 'wind_vane'], 'id': 1173, 'def': 'mechanical device attached to an elevated structure; rotates freely to show the direction of the wind', 'name': 'weathervane'}, {'frequency': 'c', 'synset': 'webcam.n.01', 'synonyms': ['webcam'], 'id': 1174, 'def': 'a digital camera designed to take digital photographs and transmit them over the internet', 'name': 'webcam'}, {'frequency': 'c', 'synset': 'wedding_cake.n.01', 'synonyms': ['wedding_cake', 'bridecake'], 'id': 1175, 'def': 'a rich cake with two or more tiers and covered with frosting and decorations; served at a wedding reception', 'name': 'wedding_cake'}, {'frequency': 'c', 'synset': 'wedding_ring.n.01', 'synonyms': ['wedding_ring', 'wedding_band'], 'id': 1176, 'def': 'a ring given to the bride and/or groom at the wedding', 'name': 'wedding_ring'}, {'frequency': 'f', 'synset': 'wet_suit.n.01', 'synonyms': ['wet_suit'], 'id': 1177, 'def': 'a close-fitting garment made of a permeable material; worn in cold water to retain body heat', 'name': 'wet_suit'}, {'frequency': 'f', 'synset': 'wheel.n.01', 'synonyms': ['wheel'], 'id': 1178, 'def': 'a circular frame with spokes (or a solid disc) that can rotate on a shaft or axle', 'name': 'wheel'}, {'frequency': 'c', 'synset': 'wheelchair.n.01', 'synonyms': ['wheelchair'], 'id': 1179, 'def': 'a movable chair mounted on large wheels', 'name': 'wheelchair'}, {'frequency': 'c', 'synset': 'whipped_cream.n.01', 'synonyms': ['whipped_cream'], 'id': 1180, 'def': 'cream that has been beaten until light and fluffy', 'name': 'whipped_cream'}, {'frequency': 'c', 'synset': 'whistle.n.03', 'synonyms': ['whistle'], 'id': 1181, 'def': 'a small wind instrument that produces a whistling sound by blowing into it', 'name': 'whistle'}, {'frequency': 'c', 'synset': 'wig.n.01', 'synonyms': ['wig'], 'id': 1182, 'def': 'hairpiece covering the head and made of real or synthetic hair', 'name': 'wig'}, {'frequency': 'c', 'synset': 'wind_chime.n.01', 'synonyms': ['wind_chime'], 'id': 1183, 'def': 'a decorative arrangement of pieces of metal or glass or pottery that hang together loosely so the wind can cause them to tinkle', 'name': 'wind_chime'}, {'frequency': 'c', 'synset': 'windmill.n.01', 'synonyms': ['windmill'], 'id': 1184, 'def': 'A mill or turbine that is powered by wind', 'name': 'windmill'}, {'frequency': 'c', 'synset': 'window_box.n.01', 'synonyms': ['window_box_(for_plants)'], 'id': 1185, 'def': 'a container for growing plants on a windowsill', 'name': 'window_box_(for_plants)'}, {'frequency': 'f', 'synset': 'windshield_wiper.n.01', 'synonyms': ['windshield_wiper', 'windscreen_wiper', 'wiper_(for_windshield/screen)'], 'id': 1186, 'def': 'a mechanical device that cleans the windshield', 'name': 'windshield_wiper'}, {'frequency': 'c', 'synset': 'windsock.n.01', 'synonyms': ['windsock', 'air_sock', 'air-sleeve', 'wind_sleeve', 'wind_cone'], 'id': 1187, 'def': 'a truncated cloth cone mounted on a mast/pole; shows wind direction', 'name': 'windsock'}, {'frequency': 'f', 'synset': 'wine_bottle.n.01', 'synonyms': ['wine_bottle'], 'id': 1188, 'def': 'a bottle for holding wine', 'name': 'wine_bottle'}, {'frequency': 'c', 'synset': 'wine_bucket.n.01', 'synonyms': ['wine_bucket', 'wine_cooler'], 'id': 1189, 'def': 'a bucket of ice used to chill a bottle of wine', 'name': 'wine_bucket'}, {'frequency': 'f', 'synset': 'wineglass.n.01', 'synonyms': ['wineglass'], 'id': 1190, 'def': 'a glass that has a stem and in which wine is served', 'name': 'wineglass'}, {'frequency': 'f', 'synset': 'winker.n.02', 'synonyms': ['blinder_(for_horses)'], 'id': 1191, 'def': 'blinds that prevent a horse from seeing something on either side', 'name': 'blinder_(for_horses)'}, {'frequency': 'c', 'synset': 'wok.n.01', 'synonyms': ['wok'], 'id': 1192, 'def': 'pan with a convex bottom; used for frying in Chinese cooking', 'name': 'wok'}, {'frequency': 'r', 'synset': 'wolf.n.01', 'synonyms': ['wolf'], 'id': 1193, 'def': 'a wild carnivorous mammal of the dog family, living and hunting in packs', 'name': 'wolf'}, {'frequency': 'c', 'synset': 'wooden_spoon.n.02', 'synonyms': ['wooden_spoon'], 'id': 1194, 'def': 'a spoon made of wood', 'name': 'wooden_spoon'}, {'frequency': 'c', 'synset': 'wreath.n.01', 'synonyms': ['wreath'], 'id': 1195, 'def': 'an arrangement of flowers, leaves, or stems fastened in a ring', 'name': 'wreath'}, {'frequency': 'c', 'synset': 'wrench.n.03', 'synonyms': ['wrench', 'spanner'], 'id': 1196, 'def': 'a hand tool that is used to hold or twist a nut or bolt', 'name': 'wrench'}, {'frequency': 'f', 'synset': 'wristband.n.01', 'synonyms': ['wristband'], 'id': 1197, 'def': 'band consisting of a part of a sleeve that covers the wrist', 'name': 'wristband'}, {'frequency': 'f', 'synset': 'wristlet.n.01', 'synonyms': ['wristlet', 'wrist_band'], 'id': 1198, 'def': 'a band or bracelet worn around the wrist', 'name': 'wristlet'}, {'frequency': 'c', 'synset': 'yacht.n.01', 'synonyms': ['yacht'], 'id': 1199, 'def': 'an expensive vessel propelled by sail or power and used for cruising or racing', 'name': 'yacht'}, {'frequency': 'c', 'synset': 'yogurt.n.01', 'synonyms': ['yogurt', 'yoghurt', 'yoghourt'], 'id': 1200, 'def': 'a custard-like food made from curdled milk', 'name': 'yogurt'}, {'frequency': 'c', 'synset': 'yoke.n.07', 'synonyms': ['yoke_(animal_equipment)'], 'id': 1201, 'def': 'gear joining two animals at the neck; NOT egg yolk', 'name': 'yoke_(animal_equipment)'}, {'frequency': 'f', 'synset': 'zebra.n.01', 'synonyms': ['zebra'], 'id': 1202, 'def': 'any of several fleet black-and-white striped African equines', 'name': 'zebra'}, {'frequency': 'c', 'synset': 'zucchini.n.02', 'synonyms': ['zucchini', 'courgette'], 'id': 1203, 'def': 'small cucumber-shaped vegetable marrow; typically dark green', 'name': 'zucchini'}] # noqa
+# fmt: on
diff --git a/detectron2/data/datasets/lvis_v1_category_image_count.py b/detectron2/data/datasets/lvis_v1_category_image_count.py
new file mode 100644
index 0000000000000000000000000000000000000000..31bf0cfcd5096ab87835db86a28671d474514c40
--- /dev/null
+++ b/detectron2/data/datasets/lvis_v1_category_image_count.py
@@ -0,0 +1,20 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+# Autogen with
+# with open("lvis_v1_train.json", "r") as f:
+# a = json.load(f)
+# c = a["categories"]
+# for x in c:
+# del x["name"]
+# del x["instance_count"]
+# del x["def"]
+# del x["synonyms"]
+# del x["frequency"]
+# del x["synset"]
+# LVIS_CATEGORY_IMAGE_COUNT = repr(c) + " # noqa"
+# with open("/tmp/lvis_category_image_count.py", "wt") as f:
+# f.write(f"LVIS_CATEGORY_IMAGE_COUNT = {LVIS_CATEGORY_IMAGE_COUNT}")
+# Then paste the contents of that file below
+
+# fmt: off
+LVIS_CATEGORY_IMAGE_COUNT = [{'id': 1, 'image_count': 64}, {'id': 2, 'image_count': 364}, {'id': 3, 'image_count': 1911}, {'id': 4, 'image_count': 149}, {'id': 5, 'image_count': 29}, {'id': 6, 'image_count': 26}, {'id': 7, 'image_count': 59}, {'id': 8, 'image_count': 22}, {'id': 9, 'image_count': 12}, {'id': 10, 'image_count': 28}, {'id': 11, 'image_count': 505}, {'id': 12, 'image_count': 1207}, {'id': 13, 'image_count': 4}, {'id': 14, 'image_count': 10}, {'id': 15, 'image_count': 500}, {'id': 16, 'image_count': 33}, {'id': 17, 'image_count': 3}, {'id': 18, 'image_count': 44}, {'id': 19, 'image_count': 561}, {'id': 20, 'image_count': 8}, {'id': 21, 'image_count': 9}, {'id': 22, 'image_count': 33}, {'id': 23, 'image_count': 1883}, {'id': 24, 'image_count': 98}, {'id': 25, 'image_count': 70}, {'id': 26, 'image_count': 46}, {'id': 27, 'image_count': 117}, {'id': 28, 'image_count': 41}, {'id': 29, 'image_count': 1395}, {'id': 30, 'image_count': 7}, {'id': 31, 'image_count': 1}, {'id': 32, 'image_count': 314}, {'id': 33, 'image_count': 31}, {'id': 34, 'image_count': 1905}, {'id': 35, 'image_count': 1859}, {'id': 36, 'image_count': 1623}, {'id': 37, 'image_count': 47}, {'id': 38, 'image_count': 3}, {'id': 39, 'image_count': 3}, {'id': 40, 'image_count': 1}, {'id': 41, 'image_count': 305}, {'id': 42, 'image_count': 6}, {'id': 43, 'image_count': 210}, {'id': 44, 'image_count': 36}, {'id': 45, 'image_count': 1787}, {'id': 46, 'image_count': 17}, {'id': 47, 'image_count': 51}, {'id': 48, 'image_count': 138}, {'id': 49, 'image_count': 3}, {'id': 50, 'image_count': 1470}, {'id': 51, 'image_count': 3}, {'id': 52, 'image_count': 2}, {'id': 53, 'image_count': 186}, {'id': 54, 'image_count': 76}, {'id': 55, 'image_count': 26}, {'id': 56, 'image_count': 303}, {'id': 57, 'image_count': 738}, {'id': 58, 'image_count': 1799}, {'id': 59, 'image_count': 1934}, {'id': 60, 'image_count': 1609}, {'id': 61, 'image_count': 1622}, {'id': 62, 'image_count': 41}, {'id': 63, 'image_count': 4}, {'id': 64, 'image_count': 11}, {'id': 65, 'image_count': 270}, {'id': 66, 'image_count': 349}, {'id': 67, 'image_count': 42}, {'id': 68, 'image_count': 823}, {'id': 69, 'image_count': 6}, {'id': 70, 'image_count': 48}, {'id': 71, 'image_count': 3}, {'id': 72, 'image_count': 42}, {'id': 73, 'image_count': 24}, {'id': 74, 'image_count': 16}, {'id': 75, 'image_count': 605}, {'id': 76, 'image_count': 646}, {'id': 77, 'image_count': 1765}, {'id': 78, 'image_count': 2}, {'id': 79, 'image_count': 125}, {'id': 80, 'image_count': 1420}, {'id': 81, 'image_count': 140}, {'id': 82, 'image_count': 4}, {'id': 83, 'image_count': 322}, {'id': 84, 'image_count': 60}, {'id': 85, 'image_count': 2}, {'id': 86, 'image_count': 231}, {'id': 87, 'image_count': 333}, {'id': 88, 'image_count': 1941}, {'id': 89, 'image_count': 367}, {'id': 90, 'image_count': 1922}, {'id': 91, 'image_count': 18}, {'id': 92, 'image_count': 81}, {'id': 93, 'image_count': 1}, {'id': 94, 'image_count': 1852}, {'id': 95, 'image_count': 430}, {'id': 96, 'image_count': 247}, {'id': 97, 'image_count': 94}, {'id': 98, 'image_count': 21}, {'id': 99, 'image_count': 1821}, {'id': 100, 'image_count': 16}, {'id': 101, 'image_count': 12}, {'id': 102, 'image_count': 25}, {'id': 103, 'image_count': 41}, {'id': 104, 'image_count': 244}, {'id': 105, 'image_count': 7}, {'id': 106, 'image_count': 1}, {'id': 107, 'image_count': 40}, {'id': 108, 'image_count': 40}, {'id': 109, 'image_count': 104}, {'id': 110, 'image_count': 1671}, {'id': 111, 'image_count': 49}, {'id': 112, 'image_count': 243}, {'id': 113, 'image_count': 2}, {'id': 114, 'image_count': 242}, {'id': 115, 'image_count': 271}, {'id': 116, 'image_count': 104}, {'id': 117, 'image_count': 8}, {'id': 118, 'image_count': 1758}, {'id': 119, 'image_count': 1}, {'id': 120, 'image_count': 48}, {'id': 121, 'image_count': 14}, {'id': 122, 'image_count': 40}, {'id': 123, 'image_count': 1}, {'id': 124, 'image_count': 37}, {'id': 125, 'image_count': 1510}, {'id': 126, 'image_count': 6}, {'id': 127, 'image_count': 1903}, {'id': 128, 'image_count': 70}, {'id': 129, 'image_count': 86}, {'id': 130, 'image_count': 7}, {'id': 131, 'image_count': 5}, {'id': 132, 'image_count': 1406}, {'id': 133, 'image_count': 1901}, {'id': 134, 'image_count': 15}, {'id': 135, 'image_count': 28}, {'id': 136, 'image_count': 6}, {'id': 137, 'image_count': 494}, {'id': 138, 'image_count': 234}, {'id': 139, 'image_count': 1922}, {'id': 140, 'image_count': 1}, {'id': 141, 'image_count': 35}, {'id': 142, 'image_count': 5}, {'id': 143, 'image_count': 1828}, {'id': 144, 'image_count': 8}, {'id': 145, 'image_count': 63}, {'id': 146, 'image_count': 1668}, {'id': 147, 'image_count': 4}, {'id': 148, 'image_count': 95}, {'id': 149, 'image_count': 17}, {'id': 150, 'image_count': 1567}, {'id': 151, 'image_count': 2}, {'id': 152, 'image_count': 103}, {'id': 153, 'image_count': 50}, {'id': 154, 'image_count': 1309}, {'id': 155, 'image_count': 6}, {'id': 156, 'image_count': 92}, {'id': 157, 'image_count': 19}, {'id': 158, 'image_count': 37}, {'id': 159, 'image_count': 4}, {'id': 160, 'image_count': 709}, {'id': 161, 'image_count': 9}, {'id': 162, 'image_count': 82}, {'id': 163, 'image_count': 15}, {'id': 164, 'image_count': 3}, {'id': 165, 'image_count': 61}, {'id': 166, 'image_count': 51}, {'id': 167, 'image_count': 5}, {'id': 168, 'image_count': 13}, {'id': 169, 'image_count': 642}, {'id': 170, 'image_count': 24}, {'id': 171, 'image_count': 255}, {'id': 172, 'image_count': 9}, {'id': 173, 'image_count': 1808}, {'id': 174, 'image_count': 31}, {'id': 175, 'image_count': 158}, {'id': 176, 'image_count': 80}, {'id': 177, 'image_count': 1884}, {'id': 178, 'image_count': 158}, {'id': 179, 'image_count': 2}, {'id': 180, 'image_count': 12}, {'id': 181, 'image_count': 1659}, {'id': 182, 'image_count': 7}, {'id': 183, 'image_count': 834}, {'id': 184, 'image_count': 57}, {'id': 185, 'image_count': 174}, {'id': 186, 'image_count': 95}, {'id': 187, 'image_count': 27}, {'id': 188, 'image_count': 22}, {'id': 189, 'image_count': 1391}, {'id': 190, 'image_count': 90}, {'id': 191, 'image_count': 40}, {'id': 192, 'image_count': 445}, {'id': 193, 'image_count': 21}, {'id': 194, 'image_count': 1132}, {'id': 195, 'image_count': 177}, {'id': 196, 'image_count': 4}, {'id': 197, 'image_count': 17}, {'id': 198, 'image_count': 84}, {'id': 199, 'image_count': 55}, {'id': 200, 'image_count': 30}, {'id': 201, 'image_count': 25}, {'id': 202, 'image_count': 2}, {'id': 203, 'image_count': 125}, {'id': 204, 'image_count': 1135}, {'id': 205, 'image_count': 19}, {'id': 206, 'image_count': 72}, {'id': 207, 'image_count': 1926}, {'id': 208, 'image_count': 159}, {'id': 209, 'image_count': 7}, {'id': 210, 'image_count': 1}, {'id': 211, 'image_count': 13}, {'id': 212, 'image_count': 35}, {'id': 213, 'image_count': 18}, {'id': 214, 'image_count': 8}, {'id': 215, 'image_count': 6}, {'id': 216, 'image_count': 35}, {'id': 217, 'image_count': 1222}, {'id': 218, 'image_count': 103}, {'id': 219, 'image_count': 28}, {'id': 220, 'image_count': 63}, {'id': 221, 'image_count': 28}, {'id': 222, 'image_count': 5}, {'id': 223, 'image_count': 7}, {'id': 224, 'image_count': 14}, {'id': 225, 'image_count': 1918}, {'id': 226, 'image_count': 133}, {'id': 227, 'image_count': 16}, {'id': 228, 'image_count': 27}, {'id': 229, 'image_count': 110}, {'id': 230, 'image_count': 1895}, {'id': 231, 'image_count': 4}, {'id': 232, 'image_count': 1927}, {'id': 233, 'image_count': 8}, {'id': 234, 'image_count': 1}, {'id': 235, 'image_count': 263}, {'id': 236, 'image_count': 10}, {'id': 237, 'image_count': 2}, {'id': 238, 'image_count': 3}, {'id': 239, 'image_count': 87}, {'id': 240, 'image_count': 9}, {'id': 241, 'image_count': 71}, {'id': 242, 'image_count': 13}, {'id': 243, 'image_count': 18}, {'id': 244, 'image_count': 2}, {'id': 245, 'image_count': 5}, {'id': 246, 'image_count': 45}, {'id': 247, 'image_count': 1}, {'id': 248, 'image_count': 23}, {'id': 249, 'image_count': 32}, {'id': 250, 'image_count': 4}, {'id': 251, 'image_count': 1}, {'id': 252, 'image_count': 858}, {'id': 253, 'image_count': 661}, {'id': 254, 'image_count': 168}, {'id': 255, 'image_count': 210}, {'id': 256, 'image_count': 65}, {'id': 257, 'image_count': 4}, {'id': 258, 'image_count': 2}, {'id': 259, 'image_count': 159}, {'id': 260, 'image_count': 31}, {'id': 261, 'image_count': 811}, {'id': 262, 'image_count': 1}, {'id': 263, 'image_count': 42}, {'id': 264, 'image_count': 27}, {'id': 265, 'image_count': 2}, {'id': 266, 'image_count': 5}, {'id': 267, 'image_count': 95}, {'id': 268, 'image_count': 32}, {'id': 269, 'image_count': 1}, {'id': 270, 'image_count': 1}, {'id': 271, 'image_count': 1844}, {'id': 272, 'image_count': 897}, {'id': 273, 'image_count': 31}, {'id': 274, 'image_count': 23}, {'id': 275, 'image_count': 1}, {'id': 276, 'image_count': 202}, {'id': 277, 'image_count': 746}, {'id': 278, 'image_count': 44}, {'id': 279, 'image_count': 14}, {'id': 280, 'image_count': 26}, {'id': 281, 'image_count': 1}, {'id': 282, 'image_count': 2}, {'id': 283, 'image_count': 25}, {'id': 284, 'image_count': 238}, {'id': 285, 'image_count': 592}, {'id': 286, 'image_count': 26}, {'id': 287, 'image_count': 5}, {'id': 288, 'image_count': 42}, {'id': 289, 'image_count': 13}, {'id': 290, 'image_count': 46}, {'id': 291, 'image_count': 1}, {'id': 292, 'image_count': 8}, {'id': 293, 'image_count': 34}, {'id': 294, 'image_count': 5}, {'id': 295, 'image_count': 1}, {'id': 296, 'image_count': 1871}, {'id': 297, 'image_count': 717}, {'id': 298, 'image_count': 1010}, {'id': 299, 'image_count': 679}, {'id': 300, 'image_count': 3}, {'id': 301, 'image_count': 4}, {'id': 302, 'image_count': 1}, {'id': 303, 'image_count': 166}, {'id': 304, 'image_count': 2}, {'id': 305, 'image_count': 266}, {'id': 306, 'image_count': 101}, {'id': 307, 'image_count': 6}, {'id': 308, 'image_count': 14}, {'id': 309, 'image_count': 133}, {'id': 310, 'image_count': 2}, {'id': 311, 'image_count': 38}, {'id': 312, 'image_count': 95}, {'id': 313, 'image_count': 1}, {'id': 314, 'image_count': 12}, {'id': 315, 'image_count': 49}, {'id': 316, 'image_count': 5}, {'id': 317, 'image_count': 5}, {'id': 318, 'image_count': 16}, {'id': 319, 'image_count': 216}, {'id': 320, 'image_count': 12}, {'id': 321, 'image_count': 1}, {'id': 322, 'image_count': 54}, {'id': 323, 'image_count': 5}, {'id': 324, 'image_count': 245}, {'id': 325, 'image_count': 12}, {'id': 326, 'image_count': 7}, {'id': 327, 'image_count': 35}, {'id': 328, 'image_count': 36}, {'id': 329, 'image_count': 32}, {'id': 330, 'image_count': 1027}, {'id': 331, 'image_count': 10}, {'id': 332, 'image_count': 12}, {'id': 333, 'image_count': 1}, {'id': 334, 'image_count': 67}, {'id': 335, 'image_count': 71}, {'id': 336, 'image_count': 30}, {'id': 337, 'image_count': 48}, {'id': 338, 'image_count': 249}, {'id': 339, 'image_count': 13}, {'id': 340, 'image_count': 29}, {'id': 341, 'image_count': 14}, {'id': 342, 'image_count': 236}, {'id': 343, 'image_count': 15}, {'id': 344, 'image_count': 1521}, {'id': 345, 'image_count': 25}, {'id': 346, 'image_count': 249}, {'id': 347, 'image_count': 139}, {'id': 348, 'image_count': 2}, {'id': 349, 'image_count': 2}, {'id': 350, 'image_count': 1890}, {'id': 351, 'image_count': 1240}, {'id': 352, 'image_count': 1}, {'id': 353, 'image_count': 9}, {'id': 354, 'image_count': 1}, {'id': 355, 'image_count': 3}, {'id': 356, 'image_count': 11}, {'id': 357, 'image_count': 4}, {'id': 358, 'image_count': 236}, {'id': 359, 'image_count': 44}, {'id': 360, 'image_count': 19}, {'id': 361, 'image_count': 1100}, {'id': 362, 'image_count': 7}, {'id': 363, 'image_count': 69}, {'id': 364, 'image_count': 2}, {'id': 365, 'image_count': 8}, {'id': 366, 'image_count': 5}, {'id': 367, 'image_count': 227}, {'id': 368, 'image_count': 6}, {'id': 369, 'image_count': 106}, {'id': 370, 'image_count': 81}, {'id': 371, 'image_count': 17}, {'id': 372, 'image_count': 134}, {'id': 373, 'image_count': 312}, {'id': 374, 'image_count': 8}, {'id': 375, 'image_count': 271}, {'id': 376, 'image_count': 2}, {'id': 377, 'image_count': 103}, {'id': 378, 'image_count': 1938}, {'id': 379, 'image_count': 574}, {'id': 380, 'image_count': 120}, {'id': 381, 'image_count': 2}, {'id': 382, 'image_count': 2}, {'id': 383, 'image_count': 13}, {'id': 384, 'image_count': 29}, {'id': 385, 'image_count': 1710}, {'id': 386, 'image_count': 66}, {'id': 387, 'image_count': 1008}, {'id': 388, 'image_count': 1}, {'id': 389, 'image_count': 3}, {'id': 390, 'image_count': 1942}, {'id': 391, 'image_count': 19}, {'id': 392, 'image_count': 1488}, {'id': 393, 'image_count': 46}, {'id': 394, 'image_count': 106}, {'id': 395, 'image_count': 115}, {'id': 396, 'image_count': 19}, {'id': 397, 'image_count': 2}, {'id': 398, 'image_count': 1}, {'id': 399, 'image_count': 28}, {'id': 400, 'image_count': 9}, {'id': 401, 'image_count': 192}, {'id': 402, 'image_count': 12}, {'id': 403, 'image_count': 21}, {'id': 404, 'image_count': 247}, {'id': 405, 'image_count': 6}, {'id': 406, 'image_count': 64}, {'id': 407, 'image_count': 7}, {'id': 408, 'image_count': 40}, {'id': 409, 'image_count': 542}, {'id': 410, 'image_count': 2}, {'id': 411, 'image_count': 1898}, {'id': 412, 'image_count': 36}, {'id': 413, 'image_count': 4}, {'id': 414, 'image_count': 1}, {'id': 415, 'image_count': 191}, {'id': 416, 'image_count': 6}, {'id': 417, 'image_count': 41}, {'id': 418, 'image_count': 39}, {'id': 419, 'image_count': 46}, {'id': 420, 'image_count': 1}, {'id': 421, 'image_count': 1451}, {'id': 422, 'image_count': 1878}, {'id': 423, 'image_count': 11}, {'id': 424, 'image_count': 82}, {'id': 425, 'image_count': 18}, {'id': 426, 'image_count': 1}, {'id': 427, 'image_count': 7}, {'id': 428, 'image_count': 3}, {'id': 429, 'image_count': 575}, {'id': 430, 'image_count': 1907}, {'id': 431, 'image_count': 8}, {'id': 432, 'image_count': 4}, {'id': 433, 'image_count': 32}, {'id': 434, 'image_count': 11}, {'id': 435, 'image_count': 4}, {'id': 436, 'image_count': 54}, {'id': 437, 'image_count': 202}, {'id': 438, 'image_count': 32}, {'id': 439, 'image_count': 3}, {'id': 440, 'image_count': 130}, {'id': 441, 'image_count': 119}, {'id': 442, 'image_count': 141}, {'id': 443, 'image_count': 29}, {'id': 444, 'image_count': 525}, {'id': 445, 'image_count': 1323}, {'id': 446, 'image_count': 2}, {'id': 447, 'image_count': 113}, {'id': 448, 'image_count': 16}, {'id': 449, 'image_count': 7}, {'id': 450, 'image_count': 35}, {'id': 451, 'image_count': 1908}, {'id': 452, 'image_count': 353}, {'id': 453, 'image_count': 18}, {'id': 454, 'image_count': 14}, {'id': 455, 'image_count': 77}, {'id': 456, 'image_count': 8}, {'id': 457, 'image_count': 37}, {'id': 458, 'image_count': 1}, {'id': 459, 'image_count': 346}, {'id': 460, 'image_count': 19}, {'id': 461, 'image_count': 1779}, {'id': 462, 'image_count': 23}, {'id': 463, 'image_count': 25}, {'id': 464, 'image_count': 67}, {'id': 465, 'image_count': 19}, {'id': 466, 'image_count': 28}, {'id': 467, 'image_count': 4}, {'id': 468, 'image_count': 27}, {'id': 469, 'image_count': 1861}, {'id': 470, 'image_count': 11}, {'id': 471, 'image_count': 13}, {'id': 472, 'image_count': 13}, {'id': 473, 'image_count': 32}, {'id': 474, 'image_count': 1767}, {'id': 475, 'image_count': 42}, {'id': 476, 'image_count': 17}, {'id': 477, 'image_count': 128}, {'id': 478, 'image_count': 1}, {'id': 479, 'image_count': 9}, {'id': 480, 'image_count': 10}, {'id': 481, 'image_count': 4}, {'id': 482, 'image_count': 9}, {'id': 483, 'image_count': 18}, {'id': 484, 'image_count': 41}, {'id': 485, 'image_count': 28}, {'id': 486, 'image_count': 3}, {'id': 487, 'image_count': 65}, {'id': 488, 'image_count': 9}, {'id': 489, 'image_count': 23}, {'id': 490, 'image_count': 24}, {'id': 491, 'image_count': 1}, {'id': 492, 'image_count': 2}, {'id': 493, 'image_count': 59}, {'id': 494, 'image_count': 48}, {'id': 495, 'image_count': 17}, {'id': 496, 'image_count': 1877}, {'id': 497, 'image_count': 18}, {'id': 498, 'image_count': 1920}, {'id': 499, 'image_count': 50}, {'id': 500, 'image_count': 1890}, {'id': 501, 'image_count': 99}, {'id': 502, 'image_count': 1530}, {'id': 503, 'image_count': 3}, {'id': 504, 'image_count': 11}, {'id': 505, 'image_count': 19}, {'id': 506, 'image_count': 3}, {'id': 507, 'image_count': 63}, {'id': 508, 'image_count': 5}, {'id': 509, 'image_count': 6}, {'id': 510, 'image_count': 233}, {'id': 511, 'image_count': 54}, {'id': 512, 'image_count': 36}, {'id': 513, 'image_count': 10}, {'id': 514, 'image_count': 124}, {'id': 515, 'image_count': 101}, {'id': 516, 'image_count': 3}, {'id': 517, 'image_count': 363}, {'id': 518, 'image_count': 3}, {'id': 519, 'image_count': 30}, {'id': 520, 'image_count': 18}, {'id': 521, 'image_count': 199}, {'id': 522, 'image_count': 97}, {'id': 523, 'image_count': 32}, {'id': 524, 'image_count': 121}, {'id': 525, 'image_count': 16}, {'id': 526, 'image_count': 12}, {'id': 527, 'image_count': 2}, {'id': 528, 'image_count': 214}, {'id': 529, 'image_count': 48}, {'id': 530, 'image_count': 26}, {'id': 531, 'image_count': 13}, {'id': 532, 'image_count': 4}, {'id': 533, 'image_count': 11}, {'id': 534, 'image_count': 123}, {'id': 535, 'image_count': 7}, {'id': 536, 'image_count': 200}, {'id': 537, 'image_count': 91}, {'id': 538, 'image_count': 9}, {'id': 539, 'image_count': 72}, {'id': 540, 'image_count': 1886}, {'id': 541, 'image_count': 4}, {'id': 542, 'image_count': 1}, {'id': 543, 'image_count': 1}, {'id': 544, 'image_count': 1932}, {'id': 545, 'image_count': 4}, {'id': 546, 'image_count': 56}, {'id': 547, 'image_count': 854}, {'id': 548, 'image_count': 755}, {'id': 549, 'image_count': 1843}, {'id': 550, 'image_count': 96}, {'id': 551, 'image_count': 7}, {'id': 552, 'image_count': 74}, {'id': 553, 'image_count': 66}, {'id': 554, 'image_count': 57}, {'id': 555, 'image_count': 44}, {'id': 556, 'image_count': 1905}, {'id': 557, 'image_count': 4}, {'id': 558, 'image_count': 90}, {'id': 559, 'image_count': 1635}, {'id': 560, 'image_count': 8}, {'id': 561, 'image_count': 5}, {'id': 562, 'image_count': 50}, {'id': 563, 'image_count': 545}, {'id': 564, 'image_count': 20}, {'id': 565, 'image_count': 193}, {'id': 566, 'image_count': 285}, {'id': 567, 'image_count': 3}, {'id': 568, 'image_count': 1}, {'id': 569, 'image_count': 1904}, {'id': 570, 'image_count': 294}, {'id': 571, 'image_count': 3}, {'id': 572, 'image_count': 5}, {'id': 573, 'image_count': 24}, {'id': 574, 'image_count': 2}, {'id': 575, 'image_count': 2}, {'id': 576, 'image_count': 16}, {'id': 577, 'image_count': 8}, {'id': 578, 'image_count': 154}, {'id': 579, 'image_count': 66}, {'id': 580, 'image_count': 1}, {'id': 581, 'image_count': 24}, {'id': 582, 'image_count': 1}, {'id': 583, 'image_count': 4}, {'id': 584, 'image_count': 75}, {'id': 585, 'image_count': 6}, {'id': 586, 'image_count': 126}, {'id': 587, 'image_count': 24}, {'id': 588, 'image_count': 22}, {'id': 589, 'image_count': 1872}, {'id': 590, 'image_count': 16}, {'id': 591, 'image_count': 423}, {'id': 592, 'image_count': 1927}, {'id': 593, 'image_count': 38}, {'id': 594, 'image_count': 3}, {'id': 595, 'image_count': 1945}, {'id': 596, 'image_count': 35}, {'id': 597, 'image_count': 1}, {'id': 598, 'image_count': 13}, {'id': 599, 'image_count': 9}, {'id': 600, 'image_count': 14}, {'id': 601, 'image_count': 37}, {'id': 602, 'image_count': 3}, {'id': 603, 'image_count': 4}, {'id': 604, 'image_count': 100}, {'id': 605, 'image_count': 195}, {'id': 606, 'image_count': 1}, {'id': 607, 'image_count': 12}, {'id': 608, 'image_count': 24}, {'id': 609, 'image_count': 489}, {'id': 610, 'image_count': 10}, {'id': 611, 'image_count': 1689}, {'id': 612, 'image_count': 42}, {'id': 613, 'image_count': 81}, {'id': 614, 'image_count': 894}, {'id': 615, 'image_count': 1868}, {'id': 616, 'image_count': 7}, {'id': 617, 'image_count': 1567}, {'id': 618, 'image_count': 10}, {'id': 619, 'image_count': 8}, {'id': 620, 'image_count': 7}, {'id': 621, 'image_count': 629}, {'id': 622, 'image_count': 89}, {'id': 623, 'image_count': 15}, {'id': 624, 'image_count': 134}, {'id': 625, 'image_count': 4}, {'id': 626, 'image_count': 1802}, {'id': 627, 'image_count': 595}, {'id': 628, 'image_count': 1210}, {'id': 629, 'image_count': 48}, {'id': 630, 'image_count': 418}, {'id': 631, 'image_count': 1846}, {'id': 632, 'image_count': 5}, {'id': 633, 'image_count': 221}, {'id': 634, 'image_count': 10}, {'id': 635, 'image_count': 7}, {'id': 636, 'image_count': 76}, {'id': 637, 'image_count': 22}, {'id': 638, 'image_count': 10}, {'id': 639, 'image_count': 341}, {'id': 640, 'image_count': 1}, {'id': 641, 'image_count': 705}, {'id': 642, 'image_count': 1900}, {'id': 643, 'image_count': 188}, {'id': 644, 'image_count': 227}, {'id': 645, 'image_count': 861}, {'id': 646, 'image_count': 6}, {'id': 647, 'image_count': 115}, {'id': 648, 'image_count': 5}, {'id': 649, 'image_count': 43}, {'id': 650, 'image_count': 14}, {'id': 651, 'image_count': 6}, {'id': 652, 'image_count': 15}, {'id': 653, 'image_count': 1167}, {'id': 654, 'image_count': 15}, {'id': 655, 'image_count': 994}, {'id': 656, 'image_count': 28}, {'id': 657, 'image_count': 2}, {'id': 658, 'image_count': 338}, {'id': 659, 'image_count': 334}, {'id': 660, 'image_count': 15}, {'id': 661, 'image_count': 102}, {'id': 662, 'image_count': 1}, {'id': 663, 'image_count': 8}, {'id': 664, 'image_count': 1}, {'id': 665, 'image_count': 1}, {'id': 666, 'image_count': 28}, {'id': 667, 'image_count': 91}, {'id': 668, 'image_count': 260}, {'id': 669, 'image_count': 131}, {'id': 670, 'image_count': 128}, {'id': 671, 'image_count': 3}, {'id': 672, 'image_count': 10}, {'id': 673, 'image_count': 39}, {'id': 674, 'image_count': 2}, {'id': 675, 'image_count': 925}, {'id': 676, 'image_count': 354}, {'id': 677, 'image_count': 31}, {'id': 678, 'image_count': 10}, {'id': 679, 'image_count': 215}, {'id': 680, 'image_count': 71}, {'id': 681, 'image_count': 43}, {'id': 682, 'image_count': 28}, {'id': 683, 'image_count': 34}, {'id': 684, 'image_count': 16}, {'id': 685, 'image_count': 273}, {'id': 686, 'image_count': 2}, {'id': 687, 'image_count': 999}, {'id': 688, 'image_count': 4}, {'id': 689, 'image_count': 107}, {'id': 690, 'image_count': 2}, {'id': 691, 'image_count': 1}, {'id': 692, 'image_count': 454}, {'id': 693, 'image_count': 9}, {'id': 694, 'image_count': 1901}, {'id': 695, 'image_count': 61}, {'id': 696, 'image_count': 91}, {'id': 697, 'image_count': 46}, {'id': 698, 'image_count': 1402}, {'id': 699, 'image_count': 74}, {'id': 700, 'image_count': 421}, {'id': 701, 'image_count': 226}, {'id': 702, 'image_count': 10}, {'id': 703, 'image_count': 1720}, {'id': 704, 'image_count': 261}, {'id': 705, 'image_count': 1337}, {'id': 706, 'image_count': 293}, {'id': 707, 'image_count': 62}, {'id': 708, 'image_count': 814}, {'id': 709, 'image_count': 407}, {'id': 710, 'image_count': 6}, {'id': 711, 'image_count': 16}, {'id': 712, 'image_count': 7}, {'id': 713, 'image_count': 1791}, {'id': 714, 'image_count': 2}, {'id': 715, 'image_count': 1915}, {'id': 716, 'image_count': 1940}, {'id': 717, 'image_count': 13}, {'id': 718, 'image_count': 16}, {'id': 719, 'image_count': 448}, {'id': 720, 'image_count': 12}, {'id': 721, 'image_count': 18}, {'id': 722, 'image_count': 4}, {'id': 723, 'image_count': 71}, {'id': 724, 'image_count': 189}, {'id': 725, 'image_count': 74}, {'id': 726, 'image_count': 103}, {'id': 727, 'image_count': 3}, {'id': 728, 'image_count': 110}, {'id': 729, 'image_count': 5}, {'id': 730, 'image_count': 9}, {'id': 731, 'image_count': 15}, {'id': 732, 'image_count': 25}, {'id': 733, 'image_count': 7}, {'id': 734, 'image_count': 647}, {'id': 735, 'image_count': 824}, {'id': 736, 'image_count': 100}, {'id': 737, 'image_count': 47}, {'id': 738, 'image_count': 121}, {'id': 739, 'image_count': 731}, {'id': 740, 'image_count': 73}, {'id': 741, 'image_count': 49}, {'id': 742, 'image_count': 23}, {'id': 743, 'image_count': 4}, {'id': 744, 'image_count': 62}, {'id': 745, 'image_count': 118}, {'id': 746, 'image_count': 99}, {'id': 747, 'image_count': 40}, {'id': 748, 'image_count': 1036}, {'id': 749, 'image_count': 105}, {'id': 750, 'image_count': 21}, {'id': 751, 'image_count': 229}, {'id': 752, 'image_count': 7}, {'id': 753, 'image_count': 72}, {'id': 754, 'image_count': 9}, {'id': 755, 'image_count': 10}, {'id': 756, 'image_count': 328}, {'id': 757, 'image_count': 468}, {'id': 758, 'image_count': 1}, {'id': 759, 'image_count': 2}, {'id': 760, 'image_count': 24}, {'id': 761, 'image_count': 11}, {'id': 762, 'image_count': 72}, {'id': 763, 'image_count': 17}, {'id': 764, 'image_count': 10}, {'id': 765, 'image_count': 17}, {'id': 766, 'image_count': 489}, {'id': 767, 'image_count': 47}, {'id': 768, 'image_count': 93}, {'id': 769, 'image_count': 1}, {'id': 770, 'image_count': 12}, {'id': 771, 'image_count': 228}, {'id': 772, 'image_count': 5}, {'id': 773, 'image_count': 76}, {'id': 774, 'image_count': 71}, {'id': 775, 'image_count': 30}, {'id': 776, 'image_count': 109}, {'id': 777, 'image_count': 14}, {'id': 778, 'image_count': 1}, {'id': 779, 'image_count': 8}, {'id': 780, 'image_count': 26}, {'id': 781, 'image_count': 339}, {'id': 782, 'image_count': 153}, {'id': 783, 'image_count': 2}, {'id': 784, 'image_count': 3}, {'id': 785, 'image_count': 8}, {'id': 786, 'image_count': 47}, {'id': 787, 'image_count': 8}, {'id': 788, 'image_count': 6}, {'id': 789, 'image_count': 116}, {'id': 790, 'image_count': 69}, {'id': 791, 'image_count': 13}, {'id': 792, 'image_count': 6}, {'id': 793, 'image_count': 1928}, {'id': 794, 'image_count': 79}, {'id': 795, 'image_count': 14}, {'id': 796, 'image_count': 7}, {'id': 797, 'image_count': 20}, {'id': 798, 'image_count': 114}, {'id': 799, 'image_count': 221}, {'id': 800, 'image_count': 502}, {'id': 801, 'image_count': 62}, {'id': 802, 'image_count': 87}, {'id': 803, 'image_count': 4}, {'id': 804, 'image_count': 1912}, {'id': 805, 'image_count': 7}, {'id': 806, 'image_count': 186}, {'id': 807, 'image_count': 18}, {'id': 808, 'image_count': 4}, {'id': 809, 'image_count': 3}, {'id': 810, 'image_count': 7}, {'id': 811, 'image_count': 1413}, {'id': 812, 'image_count': 7}, {'id': 813, 'image_count': 12}, {'id': 814, 'image_count': 248}, {'id': 815, 'image_count': 4}, {'id': 816, 'image_count': 1881}, {'id': 817, 'image_count': 529}, {'id': 818, 'image_count': 1932}, {'id': 819, 'image_count': 50}, {'id': 820, 'image_count': 3}, {'id': 821, 'image_count': 28}, {'id': 822, 'image_count': 10}, {'id': 823, 'image_count': 5}, {'id': 824, 'image_count': 5}, {'id': 825, 'image_count': 18}, {'id': 826, 'image_count': 14}, {'id': 827, 'image_count': 1890}, {'id': 828, 'image_count': 660}, {'id': 829, 'image_count': 8}, {'id': 830, 'image_count': 25}, {'id': 831, 'image_count': 10}, {'id': 832, 'image_count': 218}, {'id': 833, 'image_count': 36}, {'id': 834, 'image_count': 16}, {'id': 835, 'image_count': 808}, {'id': 836, 'image_count': 479}, {'id': 837, 'image_count': 1404}, {'id': 838, 'image_count': 307}, {'id': 839, 'image_count': 57}, {'id': 840, 'image_count': 28}, {'id': 841, 'image_count': 80}, {'id': 842, 'image_count': 11}, {'id': 843, 'image_count': 92}, {'id': 844, 'image_count': 20}, {'id': 845, 'image_count': 194}, {'id': 846, 'image_count': 23}, {'id': 847, 'image_count': 52}, {'id': 848, 'image_count': 673}, {'id': 849, 'image_count': 2}, {'id': 850, 'image_count': 2}, {'id': 851, 'image_count': 1}, {'id': 852, 'image_count': 2}, {'id': 853, 'image_count': 8}, {'id': 854, 'image_count': 80}, {'id': 855, 'image_count': 3}, {'id': 856, 'image_count': 3}, {'id': 857, 'image_count': 15}, {'id': 858, 'image_count': 2}, {'id': 859, 'image_count': 10}, {'id': 860, 'image_count': 386}, {'id': 861, 'image_count': 65}, {'id': 862, 'image_count': 3}, {'id': 863, 'image_count': 35}, {'id': 864, 'image_count': 5}, {'id': 865, 'image_count': 180}, {'id': 866, 'image_count': 99}, {'id': 867, 'image_count': 49}, {'id': 868, 'image_count': 28}, {'id': 869, 'image_count': 1}, {'id': 870, 'image_count': 52}, {'id': 871, 'image_count': 36}, {'id': 872, 'image_count': 70}, {'id': 873, 'image_count': 6}, {'id': 874, 'image_count': 29}, {'id': 875, 'image_count': 24}, {'id': 876, 'image_count': 1115}, {'id': 877, 'image_count': 61}, {'id': 878, 'image_count': 18}, {'id': 879, 'image_count': 18}, {'id': 880, 'image_count': 665}, {'id': 881, 'image_count': 1096}, {'id': 882, 'image_count': 29}, {'id': 883, 'image_count': 8}, {'id': 884, 'image_count': 14}, {'id': 885, 'image_count': 1622}, {'id': 886, 'image_count': 2}, {'id': 887, 'image_count': 3}, {'id': 888, 'image_count': 32}, {'id': 889, 'image_count': 55}, {'id': 890, 'image_count': 1}, {'id': 891, 'image_count': 10}, {'id': 892, 'image_count': 10}, {'id': 893, 'image_count': 47}, {'id': 894, 'image_count': 3}, {'id': 895, 'image_count': 29}, {'id': 896, 'image_count': 342}, {'id': 897, 'image_count': 25}, {'id': 898, 'image_count': 1469}, {'id': 899, 'image_count': 521}, {'id': 900, 'image_count': 347}, {'id': 901, 'image_count': 35}, {'id': 902, 'image_count': 7}, {'id': 903, 'image_count': 207}, {'id': 904, 'image_count': 108}, {'id': 905, 'image_count': 2}, {'id': 906, 'image_count': 34}, {'id': 907, 'image_count': 12}, {'id': 908, 'image_count': 10}, {'id': 909, 'image_count': 13}, {'id': 910, 'image_count': 361}, {'id': 911, 'image_count': 1023}, {'id': 912, 'image_count': 782}, {'id': 913, 'image_count': 2}, {'id': 914, 'image_count': 5}, {'id': 915, 'image_count': 247}, {'id': 916, 'image_count': 221}, {'id': 917, 'image_count': 4}, {'id': 918, 'image_count': 8}, {'id': 919, 'image_count': 158}, {'id': 920, 'image_count': 3}, {'id': 921, 'image_count': 752}, {'id': 922, 'image_count': 64}, {'id': 923, 'image_count': 707}, {'id': 924, 'image_count': 143}, {'id': 925, 'image_count': 1}, {'id': 926, 'image_count': 49}, {'id': 927, 'image_count': 126}, {'id': 928, 'image_count': 76}, {'id': 929, 'image_count': 11}, {'id': 930, 'image_count': 11}, {'id': 931, 'image_count': 4}, {'id': 932, 'image_count': 39}, {'id': 933, 'image_count': 11}, {'id': 934, 'image_count': 13}, {'id': 935, 'image_count': 91}, {'id': 936, 'image_count': 14}, {'id': 937, 'image_count': 5}, {'id': 938, 'image_count': 3}, {'id': 939, 'image_count': 10}, {'id': 940, 'image_count': 18}, {'id': 941, 'image_count': 9}, {'id': 942, 'image_count': 6}, {'id': 943, 'image_count': 951}, {'id': 944, 'image_count': 2}, {'id': 945, 'image_count': 1}, {'id': 946, 'image_count': 19}, {'id': 947, 'image_count': 1942}, {'id': 948, 'image_count': 1916}, {'id': 949, 'image_count': 139}, {'id': 950, 'image_count': 43}, {'id': 951, 'image_count': 1969}, {'id': 952, 'image_count': 5}, {'id': 953, 'image_count': 134}, {'id': 954, 'image_count': 74}, {'id': 955, 'image_count': 381}, {'id': 956, 'image_count': 1}, {'id': 957, 'image_count': 381}, {'id': 958, 'image_count': 6}, {'id': 959, 'image_count': 1826}, {'id': 960, 'image_count': 28}, {'id': 961, 'image_count': 1635}, {'id': 962, 'image_count': 1967}, {'id': 963, 'image_count': 16}, {'id': 964, 'image_count': 1926}, {'id': 965, 'image_count': 1789}, {'id': 966, 'image_count': 401}, {'id': 967, 'image_count': 1968}, {'id': 968, 'image_count': 1167}, {'id': 969, 'image_count': 1}, {'id': 970, 'image_count': 56}, {'id': 971, 'image_count': 17}, {'id': 972, 'image_count': 1}, {'id': 973, 'image_count': 58}, {'id': 974, 'image_count': 9}, {'id': 975, 'image_count': 8}, {'id': 976, 'image_count': 1124}, {'id': 977, 'image_count': 31}, {'id': 978, 'image_count': 16}, {'id': 979, 'image_count': 491}, {'id': 980, 'image_count': 432}, {'id': 981, 'image_count': 1945}, {'id': 982, 'image_count': 1899}, {'id': 983, 'image_count': 5}, {'id': 984, 'image_count': 28}, {'id': 985, 'image_count': 7}, {'id': 986, 'image_count': 146}, {'id': 987, 'image_count': 1}, {'id': 988, 'image_count': 25}, {'id': 989, 'image_count': 22}, {'id': 990, 'image_count': 1}, {'id': 991, 'image_count': 10}, {'id': 992, 'image_count': 9}, {'id': 993, 'image_count': 308}, {'id': 994, 'image_count': 4}, {'id': 995, 'image_count': 1969}, {'id': 996, 'image_count': 45}, {'id': 997, 'image_count': 12}, {'id': 998, 'image_count': 1}, {'id': 999, 'image_count': 85}, {'id': 1000, 'image_count': 1127}, {'id': 1001, 'image_count': 11}, {'id': 1002, 'image_count': 60}, {'id': 1003, 'image_count': 1}, {'id': 1004, 'image_count': 16}, {'id': 1005, 'image_count': 1}, {'id': 1006, 'image_count': 65}, {'id': 1007, 'image_count': 13}, {'id': 1008, 'image_count': 655}, {'id': 1009, 'image_count': 51}, {'id': 1010, 'image_count': 1}, {'id': 1011, 'image_count': 673}, {'id': 1012, 'image_count': 5}, {'id': 1013, 'image_count': 36}, {'id': 1014, 'image_count': 54}, {'id': 1015, 'image_count': 5}, {'id': 1016, 'image_count': 8}, {'id': 1017, 'image_count': 305}, {'id': 1018, 'image_count': 297}, {'id': 1019, 'image_count': 1053}, {'id': 1020, 'image_count': 223}, {'id': 1021, 'image_count': 1037}, {'id': 1022, 'image_count': 63}, {'id': 1023, 'image_count': 1881}, {'id': 1024, 'image_count': 507}, {'id': 1025, 'image_count': 333}, {'id': 1026, 'image_count': 1911}, {'id': 1027, 'image_count': 1765}, {'id': 1028, 'image_count': 1}, {'id': 1029, 'image_count': 5}, {'id': 1030, 'image_count': 1}, {'id': 1031, 'image_count': 9}, {'id': 1032, 'image_count': 2}, {'id': 1033, 'image_count': 151}, {'id': 1034, 'image_count': 82}, {'id': 1035, 'image_count': 1931}, {'id': 1036, 'image_count': 41}, {'id': 1037, 'image_count': 1895}, {'id': 1038, 'image_count': 24}, {'id': 1039, 'image_count': 22}, {'id': 1040, 'image_count': 35}, {'id': 1041, 'image_count': 69}, {'id': 1042, 'image_count': 962}, {'id': 1043, 'image_count': 588}, {'id': 1044, 'image_count': 21}, {'id': 1045, 'image_count': 825}, {'id': 1046, 'image_count': 52}, {'id': 1047, 'image_count': 5}, {'id': 1048, 'image_count': 5}, {'id': 1049, 'image_count': 5}, {'id': 1050, 'image_count': 1860}, {'id': 1051, 'image_count': 56}, {'id': 1052, 'image_count': 1582}, {'id': 1053, 'image_count': 7}, {'id': 1054, 'image_count': 2}, {'id': 1055, 'image_count': 1562}, {'id': 1056, 'image_count': 1885}, {'id': 1057, 'image_count': 1}, {'id': 1058, 'image_count': 5}, {'id': 1059, 'image_count': 137}, {'id': 1060, 'image_count': 1094}, {'id': 1061, 'image_count': 134}, {'id': 1062, 'image_count': 29}, {'id': 1063, 'image_count': 22}, {'id': 1064, 'image_count': 522}, {'id': 1065, 'image_count': 50}, {'id': 1066, 'image_count': 68}, {'id': 1067, 'image_count': 16}, {'id': 1068, 'image_count': 40}, {'id': 1069, 'image_count': 35}, {'id': 1070, 'image_count': 135}, {'id': 1071, 'image_count': 1413}, {'id': 1072, 'image_count': 772}, {'id': 1073, 'image_count': 50}, {'id': 1074, 'image_count': 1015}, {'id': 1075, 'image_count': 1}, {'id': 1076, 'image_count': 65}, {'id': 1077, 'image_count': 1900}, {'id': 1078, 'image_count': 1302}, {'id': 1079, 'image_count': 1977}, {'id': 1080, 'image_count': 2}, {'id': 1081, 'image_count': 29}, {'id': 1082, 'image_count': 36}, {'id': 1083, 'image_count': 138}, {'id': 1084, 'image_count': 4}, {'id': 1085, 'image_count': 67}, {'id': 1086, 'image_count': 26}, {'id': 1087, 'image_count': 25}, {'id': 1088, 'image_count': 33}, {'id': 1089, 'image_count': 37}, {'id': 1090, 'image_count': 50}, {'id': 1091, 'image_count': 270}, {'id': 1092, 'image_count': 12}, {'id': 1093, 'image_count': 316}, {'id': 1094, 'image_count': 41}, {'id': 1095, 'image_count': 224}, {'id': 1096, 'image_count': 105}, {'id': 1097, 'image_count': 1925}, {'id': 1098, 'image_count': 1021}, {'id': 1099, 'image_count': 1213}, {'id': 1100, 'image_count': 172}, {'id': 1101, 'image_count': 28}, {'id': 1102, 'image_count': 745}, {'id': 1103, 'image_count': 187}, {'id': 1104, 'image_count': 147}, {'id': 1105, 'image_count': 136}, {'id': 1106, 'image_count': 34}, {'id': 1107, 'image_count': 41}, {'id': 1108, 'image_count': 636}, {'id': 1109, 'image_count': 570}, {'id': 1110, 'image_count': 1149}, {'id': 1111, 'image_count': 61}, {'id': 1112, 'image_count': 1890}, {'id': 1113, 'image_count': 18}, {'id': 1114, 'image_count': 143}, {'id': 1115, 'image_count': 1517}, {'id': 1116, 'image_count': 7}, {'id': 1117, 'image_count': 943}, {'id': 1118, 'image_count': 6}, {'id': 1119, 'image_count': 1}, {'id': 1120, 'image_count': 11}, {'id': 1121, 'image_count': 101}, {'id': 1122, 'image_count': 1909}, {'id': 1123, 'image_count': 800}, {'id': 1124, 'image_count': 1}, {'id': 1125, 'image_count': 44}, {'id': 1126, 'image_count': 3}, {'id': 1127, 'image_count': 44}, {'id': 1128, 'image_count': 31}, {'id': 1129, 'image_count': 7}, {'id': 1130, 'image_count': 20}, {'id': 1131, 'image_count': 11}, {'id': 1132, 'image_count': 13}, {'id': 1133, 'image_count': 1924}, {'id': 1134, 'image_count': 113}, {'id': 1135, 'image_count': 2}, {'id': 1136, 'image_count': 139}, {'id': 1137, 'image_count': 12}, {'id': 1138, 'image_count': 37}, {'id': 1139, 'image_count': 1866}, {'id': 1140, 'image_count': 47}, {'id': 1141, 'image_count': 1468}, {'id': 1142, 'image_count': 729}, {'id': 1143, 'image_count': 24}, {'id': 1144, 'image_count': 1}, {'id': 1145, 'image_count': 10}, {'id': 1146, 'image_count': 3}, {'id': 1147, 'image_count': 14}, {'id': 1148, 'image_count': 4}, {'id': 1149, 'image_count': 29}, {'id': 1150, 'image_count': 4}, {'id': 1151, 'image_count': 70}, {'id': 1152, 'image_count': 46}, {'id': 1153, 'image_count': 14}, {'id': 1154, 'image_count': 48}, {'id': 1155, 'image_count': 1855}, {'id': 1156, 'image_count': 113}, {'id': 1157, 'image_count': 1}, {'id': 1158, 'image_count': 1}, {'id': 1159, 'image_count': 10}, {'id': 1160, 'image_count': 54}, {'id': 1161, 'image_count': 1923}, {'id': 1162, 'image_count': 630}, {'id': 1163, 'image_count': 31}, {'id': 1164, 'image_count': 69}, {'id': 1165, 'image_count': 7}, {'id': 1166, 'image_count': 11}, {'id': 1167, 'image_count': 1}, {'id': 1168, 'image_count': 30}, {'id': 1169, 'image_count': 50}, {'id': 1170, 'image_count': 45}, {'id': 1171, 'image_count': 28}, {'id': 1172, 'image_count': 114}, {'id': 1173, 'image_count': 193}, {'id': 1174, 'image_count': 21}, {'id': 1175, 'image_count': 91}, {'id': 1176, 'image_count': 31}, {'id': 1177, 'image_count': 1469}, {'id': 1178, 'image_count': 1924}, {'id': 1179, 'image_count': 87}, {'id': 1180, 'image_count': 77}, {'id': 1181, 'image_count': 11}, {'id': 1182, 'image_count': 47}, {'id': 1183, 'image_count': 21}, {'id': 1184, 'image_count': 47}, {'id': 1185, 'image_count': 70}, {'id': 1186, 'image_count': 1838}, {'id': 1187, 'image_count': 19}, {'id': 1188, 'image_count': 531}, {'id': 1189, 'image_count': 11}, {'id': 1190, 'image_count': 941}, {'id': 1191, 'image_count': 113}, {'id': 1192, 'image_count': 26}, {'id': 1193, 'image_count': 5}, {'id': 1194, 'image_count': 56}, {'id': 1195, 'image_count': 73}, {'id': 1196, 'image_count': 32}, {'id': 1197, 'image_count': 128}, {'id': 1198, 'image_count': 623}, {'id': 1199, 'image_count': 12}, {'id': 1200, 'image_count': 52}, {'id': 1201, 'image_count': 11}, {'id': 1202, 'image_count': 1674}, {'id': 1203, 'image_count': 81}] # noqa
+# fmt: on
diff --git a/detectron2/data/datasets/pascal_voc.py b/detectron2/data/datasets/pascal_voc.py
new file mode 100644
index 0000000000000000000000000000000000000000..46f8536ad26f4d47a53a95bed62548d8aff5047e
--- /dev/null
+++ b/detectron2/data/datasets/pascal_voc.py
@@ -0,0 +1,82 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import numpy as np
+import os
+import xml.etree.ElementTree as ET
+from typing import List, Tuple, Union
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.structures import BoxMode
+from detectron2.utils.file_io import PathManager
+
+__all__ = ["load_voc_instances", "register_pascal_voc"]
+
+
+# fmt: off
+CLASS_NAMES = (
+ "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
+ "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
+ "pottedplant", "sheep", "sofa", "train", "tvmonitor"
+)
+# fmt: on
+
+
+def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):
+ """
+ Load Pascal VOC detection annotations to Detectron2 format.
+
+ Args:
+ dirname: Contain "Annotations", "ImageSets", "JPEGImages"
+ split (str): one of "train", "test", "val", "trainval"
+ class_names: list or tuple of class names
+ """
+ with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
+ fileids = np.loadtxt(f, dtype=str)
+
+ # Needs to read many small annotation files. Makes sense at local
+ annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
+ dicts = []
+ for fileid in fileids:
+ anno_file = os.path.join(annotation_dirname, fileid + ".xml")
+ jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")
+
+ with PathManager.open(anno_file) as f:
+ tree = ET.parse(f)
+
+ r = {
+ "file_name": jpeg_file,
+ "image_id": fileid,
+ "height": int(tree.findall("./size/height")[0].text),
+ "width": int(tree.findall("./size/width")[0].text),
+ }
+ instances = []
+
+ for obj in tree.findall("object"):
+ cls = obj.find("name").text
+ # We include "difficult" samples in training.
+ # Based on limited experiments, they don't hurt accuracy.
+ # difficult = int(obj.find("difficult").text)
+ # if difficult == 1:
+ # continue
+ bbox = obj.find("bndbox")
+ bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
+ # Original annotations are integers in the range [1, W or H]
+ # Assuming they mean 1-based pixel indices (inclusive),
+ # a box with annotation (xmin=1, xmax=W) covers the whole image.
+ # In coordinate space this is represented by (xmin=0, xmax=W)
+ bbox[0] -= 1.0
+ bbox[1] -= 1.0
+ instances.append(
+ {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
+ )
+ r["annotations"] = instances
+ dicts.append(r)
+ return dicts
+
+
+def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
+ DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
+ MetadataCatalog.get(name).set(
+ thing_classes=list(class_names), dirname=dirname, year=year, split=split
+ )
diff --git a/detectron2/data/datasets/register_coco.py b/detectron2/data/datasets/register_coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..e564438d5bf016bcdbb65b4bbdc215d79f579f8a
--- /dev/null
+++ b/detectron2/data/datasets/register_coco.py
@@ -0,0 +1,3 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .coco import register_coco_instances # noqa
+from .coco_panoptic import register_coco_panoptic_separated # noqa
diff --git a/detectron2/data/detection_utils.py b/detectron2/data/detection_utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..ada19bdb4a2aa74874da4dba5d179ce38201c85d
--- /dev/null
+++ b/detectron2/data/detection_utils.py
@@ -0,0 +1,659 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+Common data processing utilities that are used in a
+typical object detection data pipeline.
+"""
+import logging
+import numpy as np
+from typing import List, Union
+import pycocotools.mask as mask_util
+import torch
+from PIL import Image
+
+from detectron2.structures import (
+ BitMasks,
+ Boxes,
+ BoxMode,
+ Instances,
+ Keypoints,
+ PolygonMasks,
+ RotatedBoxes,
+ polygons_to_bitmask,
+)
+from detectron2.utils.file_io import PathManager
+
+from . import transforms as T
+from .catalog import MetadataCatalog
+
+__all__ = [
+ "SizeMismatchError",
+ "convert_image_to_rgb",
+ "check_image_size",
+ "transform_proposals",
+ "transform_instance_annotations",
+ "annotations_to_instances",
+ "annotations_to_instances_rotated",
+ "build_augmentation",
+ "build_transform_gen",
+ "create_keypoint_hflip_indices",
+ "filter_empty_instances",
+ "read_image",
+]
+
+
+class SizeMismatchError(ValueError):
+ """
+ When loaded image has difference width/height compared with annotation.
+ """
+
+
+# https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
+_M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
+_M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
+
+# https://www.exiv2.org/tags.html
+_EXIF_ORIENT = 274 # exif 'Orientation' tag
+
+
+def convert_PIL_to_numpy(image, format):
+ """
+ Convert PIL image to numpy array of target format.
+
+ Args:
+ image (PIL.Image): a PIL image
+ format (str): the format of output image
+
+ Returns:
+ (np.ndarray): also see `read_image`
+ """
+ if format is not None:
+ # PIL only supports RGB, so convert to RGB and flip channels over below
+ conversion_format = format
+ if format in ["BGR", "YUV-BT.601"]:
+ conversion_format = "RGB"
+ image = image.convert(conversion_format)
+ image = np.asarray(image)
+ # PIL squeezes out the channel dimension for "L", so make it HWC
+ if format == "L":
+ image = np.expand_dims(image, -1)
+
+ # handle formats not supported by PIL
+ elif format == "BGR":
+ # flip channels if needed
+ image = image[:, :, ::-1]
+ elif format == "YUV-BT.601":
+ image = image / 255.0
+ image = np.dot(image, np.array(_M_RGB2YUV).T)
+
+ return image
+
+
+def convert_image_to_rgb(image, format):
+ """
+ Convert an image from given format to RGB.
+
+ Args:
+ image (np.ndarray or Tensor): an HWC image
+ format (str): the format of input image, also see `read_image`
+
+ Returns:
+ (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
+ """
+ if isinstance(image, torch.Tensor):
+ image = image.cpu().numpy()
+ if format == "BGR":
+ image = image[:, :, [2, 1, 0]]
+ elif format == "YUV-BT.601":
+ image = np.dot(image, np.array(_M_YUV2RGB).T)
+ image = image * 255.0
+ else:
+ if format == "L":
+ image = image[:, :, 0]
+ image = image.astype(np.uint8)
+ image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
+ return image
+
+
+def _apply_exif_orientation(image):
+ """
+ Applies the exif orientation correctly.
+
+ This code exists per the bug:
+ https://github.com/python-pillow/Pillow/issues/3973
+ with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
+ various methods, especially `tobytes`
+
+ Function based on:
+ https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
+ https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
+
+ Args:
+ image (PIL.Image): a PIL image
+
+ Returns:
+ (PIL.Image): the PIL image with exif orientation applied, if applicable
+ """
+ if not hasattr(image, "getexif"):
+ return image
+
+ try:
+ exif = image.getexif()
+ except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
+ exif = None
+
+ if exif is None:
+ return image
+
+ orientation = exif.get(_EXIF_ORIENT)
+
+ method = {
+ 2: Image.FLIP_LEFT_RIGHT,
+ 3: Image.ROTATE_180,
+ 4: Image.FLIP_TOP_BOTTOM,
+ 5: Image.TRANSPOSE,
+ 6: Image.ROTATE_270,
+ 7: Image.TRANSVERSE,
+ 8: Image.ROTATE_90,
+ }.get(orientation)
+
+ if method is not None:
+ return image.transpose(method)
+ return image
+
+
+def read_image(file_name, format=None):
+ """
+ Read an image into the given format.
+ Will apply rotation and flipping if the image has such exif information.
+
+ Args:
+ file_name (str): image file path
+ format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
+
+ Returns:
+ image (np.ndarray):
+ an HWC image in the given format, which is 0-255, uint8 for
+ supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
+ """
+ with PathManager.open(file_name, "rb") as f:
+ image = Image.open(f)
+
+ # work around this bug: https://github.com/python-pillow/Pillow/issues/3973
+ image = _apply_exif_orientation(image)
+ return convert_PIL_to_numpy(image, format)
+
+
+def check_image_size(dataset_dict, image):
+ """
+ Raise an error if the image does not match the size specified in the dict.
+ """
+ if "width" in dataset_dict or "height" in dataset_dict:
+ image_wh = (image.shape[1], image.shape[0])
+ expected_wh = (dataset_dict["width"], dataset_dict["height"])
+ if not image_wh == expected_wh:
+ raise SizeMismatchError(
+ "Mismatched image shape{}, got {}, expect {}.".format(
+ " for image " + dataset_dict["file_name"]
+ if "file_name" in dataset_dict
+ else "",
+ image_wh,
+ expected_wh,
+ )
+ + " Please check the width/height in your annotation."
+ )
+
+ # To ensure bbox always remap to original image size
+ if "width" not in dataset_dict:
+ dataset_dict["width"] = image.shape[1]
+ if "height" not in dataset_dict:
+ dataset_dict["height"] = image.shape[0]
+
+
+def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
+ """
+ Apply transformations to the proposals in dataset_dict, if any.
+
+ Args:
+ dataset_dict (dict): a dict read from the dataset, possibly
+ contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
+ image_shape (tuple): height, width
+ transforms (TransformList):
+ proposal_topk (int): only keep top-K scoring proposals
+ min_box_size (int): proposals with either side smaller than this
+ threshold are removed
+
+ The input dict is modified in-place, with abovementioned keys removed. A new
+ key "proposals" will be added. Its value is an `Instances`
+ object which contains the transformed proposals in its field
+ "proposal_boxes" and "objectness_logits".
+ """
+ if "proposal_boxes" in dataset_dict:
+ # Transform proposal boxes
+ boxes = transforms.apply_box(
+ BoxMode.convert(
+ dataset_dict.pop("proposal_boxes"),
+ dataset_dict.pop("proposal_bbox_mode"),
+ BoxMode.XYXY_ABS,
+ )
+ )
+ boxes = Boxes(boxes)
+ objectness_logits = torch.as_tensor(
+ dataset_dict.pop("proposal_objectness_logits").astype("float32")
+ )
+
+ boxes.clip(image_shape)
+ keep = boxes.nonempty(threshold=min_box_size)
+ boxes = boxes[keep]
+ objectness_logits = objectness_logits[keep]
+
+ proposals = Instances(image_shape)
+ proposals.proposal_boxes = boxes[:proposal_topk]
+ proposals.objectness_logits = objectness_logits[:proposal_topk]
+ dataset_dict["proposals"] = proposals
+
+
+def get_bbox(annotation):
+ """
+ Get bbox from data
+ Args:
+ annotation (dict): dict of instance annotations for a single instance.
+ Returns:
+ bbox (ndarray): x1, y1, x2, y2 coordinates
+ """
+ # bbox is 1d (per-instance bounding box)
+ bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
+ return bbox
+
+
+def transform_instance_annotations(
+ annotation, transforms, image_size, *, keypoint_hflip_indices=None
+):
+ """
+ Apply transforms to box, segmentation and keypoints annotations of a single instance.
+
+ It will use `transforms.apply_box` for the box, and
+ `transforms.apply_coords` for segmentation polygons & keypoints.
+ If you need anything more specially designed for each data structure,
+ you'll need to implement your own version of this function or the transforms.
+
+ Args:
+ annotation (dict): dict of instance annotations for a single instance.
+ It will be modified in-place.
+ transforms (TransformList or list[Transform]):
+ image_size (tuple): the height, width of the transformed image
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
+
+ Returns:
+ dict:
+ the same input dict with fields "bbox", "segmentation", "keypoints"
+ transformed according to `transforms`.
+ The "bbox_mode" field will be set to XYXY_ABS.
+ """
+ if isinstance(transforms, (tuple, list)):
+ transforms = T.TransformList(transforms)
+ # bbox is 1d (per-instance bounding box)
+ bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
+ # clip transformed bbox to image size
+ bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
+ annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
+ annotation["bbox_mode"] = BoxMode.XYXY_ABS
+
+ if "segmentation" in annotation:
+ # each instance contains 1 or more polygons
+ segm = annotation["segmentation"]
+ if isinstance(segm, list):
+ # polygons
+ polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
+ annotation["segmentation"] = [
+ p.reshape(-1) for p in transforms.apply_polygons(polygons)
+ ]
+ elif isinstance(segm, dict):
+ # RLE
+ mask = mask_util.decode(segm)
+ mask = transforms.apply_segmentation(mask)
+ assert tuple(mask.shape[:2]) == image_size
+ annotation["segmentation"] = mask
+ else:
+ raise ValueError(
+ "Cannot transform segmentation of type '{}'!"
+ "Supported types are: polygons as list[list[float] or ndarray],"
+ " COCO-style RLE as a dict.".format(type(segm))
+ )
+
+ if "keypoints" in annotation:
+ keypoints = transform_keypoint_annotations(
+ annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
+ )
+ annotation["keypoints"] = keypoints
+
+ return annotation
+
+
+def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
+ """
+ Transform keypoint annotations of an image.
+ If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
+
+ Args:
+ keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
+ Each point is represented by (x, y, visibility).
+ transforms (TransformList):
+ image_size (tuple): the height, width of the transformed image
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
+ When `transforms` includes horizontal flip, will use the index
+ mapping to flip keypoints.
+ """
+ # (N*3,) -> (N, 3)
+ keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
+ keypoints_xy = transforms.apply_coords(keypoints[:, :2])
+
+ # Set all out-of-boundary points to "unlabeled"
+ inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
+ inside = inside.all(axis=1)
+ keypoints[:, :2] = keypoints_xy
+ keypoints[:, 2][~inside] = 0
+
+ # This assumes that HorizFlipTransform is the only one that does flip
+ do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
+
+ # Alternative way: check if probe points was horizontally flipped.
+ # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
+ # probe_aug = transforms.apply_coords(probe.copy())
+ # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
+
+ # If flipped, swap each keypoint with its opposite-handed equivalent
+ if do_hflip:
+ if keypoint_hflip_indices is None:
+ raise ValueError("Cannot flip keypoints without providing flip indices!")
+ if len(keypoints) != len(keypoint_hflip_indices):
+ raise ValueError(
+ "Keypoint data has {} points, but metadata "
+ "contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
+ )
+ keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
+
+ # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
+ keypoints[keypoints[:, 2] == 0] = 0
+ return keypoints
+
+
+def annotations_to_instances(annos, image_size, mask_format="polygon"):
+ """
+ Create an :class:`Instances` object used by the models,
+ from instance annotations in the dataset dict.
+
+ Args:
+ annos (list[dict]): a list of instance annotations in one image, each
+ element for one instance.
+ image_size (tuple): height, width
+
+ Returns:
+ Instances:
+ It will contain fields "gt_boxes", "gt_classes",
+ "gt_masks", "gt_keypoints", if they can be obtained from `annos`.
+ This is the format that builtin models expect.
+ """
+ boxes = (
+ np.stack(
+ [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
+ )
+ if len(annos)
+ else np.zeros((0, 4))
+ )
+ target = Instances(image_size)
+ target.gt_boxes = Boxes(boxes)
+
+ classes = [int(obj["category_id"]) for obj in annos]
+ classes = torch.tensor(classes, dtype=torch.int64)
+ target.gt_classes = classes
+
+ if len(annos) and "segmentation" in annos[0]:
+ segms = [obj["segmentation"] for obj in annos]
+ if mask_format == "polygon":
+ try:
+ masks = PolygonMasks(segms)
+ except ValueError as e:
+ raise ValueError(
+ "Failed to use mask_format=='polygon' from the given annotations!"
+ ) from e
+ else:
+ assert mask_format == "bitmask", mask_format
+ masks = []
+ for segm in segms:
+ if isinstance(segm, list):
+ # polygon
+ masks.append(polygons_to_bitmask(segm, *image_size))
+ elif isinstance(segm, dict):
+ # COCO RLE
+ masks.append(mask_util.decode(segm))
+ elif isinstance(segm, np.ndarray):
+ assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
+ segm.ndim
+ )
+ # mask array
+ masks.append(segm)
+ else:
+ raise ValueError(
+ "Cannot convert segmentation of type '{}' to BitMasks!"
+ "Supported types are: polygons as list[list[float] or ndarray],"
+ " COCO-style RLE as a dict, or a binary segmentation mask "
+ " in a 2D numpy array of shape HxW.".format(type(segm))
+ )
+ # torch.from_numpy does not support array with negative stride.
+ masks = BitMasks(
+ torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
+ )
+ target.gt_masks = masks
+
+ if len(annos) and "keypoints" in annos[0]:
+ kpts = [obj.get("keypoints", []) for obj in annos]
+ target.gt_keypoints = Keypoints(kpts)
+
+ return target
+
+
+def annotations_to_instances_rotated(annos, image_size):
+ """
+ Create an :class:`Instances` object used by the models,
+ from instance annotations in the dataset dict.
+ Compared to `annotations_to_instances`, this function is for rotated boxes only
+
+ Args:
+ annos (list[dict]): a list of instance annotations in one image, each
+ element for one instance.
+ image_size (tuple): height, width
+
+ Returns:
+ Instances:
+ Containing fields "gt_boxes", "gt_classes",
+ if they can be obtained from `annos`.
+ This is the format that builtin models expect.
+ """
+ boxes = [obj["bbox"] for obj in annos]
+ target = Instances(image_size)
+ boxes = target.gt_boxes = RotatedBoxes(boxes)
+ boxes.clip(image_size)
+
+ classes = [obj["category_id"] for obj in annos]
+ classes = torch.tensor(classes, dtype=torch.int64)
+ target.gt_classes = classes
+
+ return target
+
+
+def filter_empty_instances(
+ instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
+):
+ """
+ Filter out empty instances in an `Instances` object.
+
+ Args:
+ instances (Instances):
+ by_box (bool): whether to filter out instances with empty boxes
+ by_mask (bool): whether to filter out instances with empty masks
+ box_threshold (float): minimum width and height to be considered non-empty
+ return_mask (bool): whether to return boolean mask of filtered instances
+
+ Returns:
+ Instances: the filtered instances.
+ tensor[bool], optional: boolean mask of filtered instances
+ """
+ assert by_box or by_mask
+ r = []
+ if by_box:
+ r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
+ if instances.has("gt_masks") and by_mask:
+ r.append(instances.gt_masks.nonempty())
+
+ # TODO: can also filter visible keypoints
+
+ if not r:
+ return instances
+ m = r[0]
+ for x in r[1:]:
+ m = m & x
+ if return_mask:
+ return instances[m], m
+ return instances[m]
+
+
+def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
+ """
+ Args:
+ dataset_names: list of dataset names
+
+ Returns:
+ list[int]: a list of size=#keypoints, storing the
+ horizontally-flipped keypoint indices.
+ """
+ if isinstance(dataset_names, str):
+ dataset_names = [dataset_names]
+
+ check_metadata_consistency("keypoint_names", dataset_names)
+ check_metadata_consistency("keypoint_flip_map", dataset_names)
+
+ meta = MetadataCatalog.get(dataset_names[0])
+ names = meta.keypoint_names
+ # TODO flip -> hflip
+ flip_map = dict(meta.keypoint_flip_map)
+ flip_map.update({v: k for k, v in flip_map.items()})
+ flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
+ flip_indices = [names.index(i) for i in flipped_names]
+ return flip_indices
+
+
+def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0):
+ """
+ Get frequency weight for each class sorted by class id.
+ We now calcualte freqency weight using image_count to the power freq_weight_power.
+
+ Args:
+ dataset_names: list of dataset names
+ freq_weight_power: power value
+ """
+ if isinstance(dataset_names, str):
+ dataset_names = [dataset_names]
+
+ check_metadata_consistency("class_image_count", dataset_names)
+
+ meta = MetadataCatalog.get(dataset_names[0])
+ class_freq_meta = meta.class_image_count
+ class_freq = torch.tensor(
+ [c["image_count"] for c in sorted(class_freq_meta, key=lambda x: x["id"])]
+ )
+ class_freq_weight = class_freq.float() ** freq_weight_power
+ return class_freq_weight
+
+
+def gen_crop_transform_with_instance(crop_size, image_size, instance):
+ """
+ Generate a CropTransform so that the cropping region contains
+ the center of the given instance.
+
+ Args:
+ crop_size (tuple): h, w in pixels
+ image_size (tuple): h, w
+ instance (dict): an annotation dict of one instance, in Detectron2's
+ dataset format.
+ """
+ crop_size = np.asarray(crop_size, dtype=np.int32)
+ bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
+ center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
+ assert (
+ image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
+ ), "The annotation bounding box is outside of the image!"
+ assert (
+ image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
+ ), "Crop size is larger than image size!"
+
+ min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
+ max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
+ max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
+
+ y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
+ x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
+ return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
+
+
+def check_metadata_consistency(key, dataset_names):
+ """
+ Check that the datasets have consistent metadata.
+
+ Args:
+ key (str): a metadata key
+ dataset_names (list[str]): a list of dataset names
+
+ Raises:
+ AttributeError: if the key does not exist in the metadata
+ ValueError: if the given datasets do not have the same metadata values defined by key
+ """
+ if len(dataset_names) == 0:
+ return
+ logger = logging.getLogger(__name__)
+ entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
+ for idx, entry in enumerate(entries_per_dataset):
+ if entry != entries_per_dataset[0]:
+ logger.error(
+ "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
+ )
+ logger.error(
+ "Metadata '{}' for dataset '{}' is '{}'".format(
+ key, dataset_names[0], str(entries_per_dataset[0])
+ )
+ )
+ raise ValueError("Datasets have different metadata '{}'!".format(key))
+
+
+def build_augmentation(cfg, is_train):
+ """
+ Create a list of default :class:`Augmentation` from config.
+ Now it includes resizing and flipping.
+
+ Returns:
+ list[Augmentation]
+ """
+ if is_train:
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
+ else:
+ min_size = cfg.INPUT.MIN_SIZE_TEST
+ max_size = cfg.INPUT.MAX_SIZE_TEST
+ sample_style = "choice"
+ augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
+ if is_train and cfg.INPUT.RANDOM_FLIP != "none":
+ augmentation.append(
+ T.RandomFlip(
+ horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
+ vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
+ )
+ )
+ return augmentation
+
+
+build_transform_gen = build_augmentation
+"""
+Alias for backward-compatibility.
+"""
diff --git a/detectron2/data/samplers/__init__.py b/detectron2/data/samplers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..85c9f1a9df8a4038fbd4246239b699402e382309
--- /dev/null
+++ b/detectron2/data/samplers/__init__.py
@@ -0,0 +1,17 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .distributed_sampler import (
+ InferenceSampler,
+ RandomSubsetTrainingSampler,
+ RepeatFactorTrainingSampler,
+ TrainingSampler,
+)
+
+from .grouped_batch_sampler import GroupedBatchSampler
+
+__all__ = [
+ "GroupedBatchSampler",
+ "TrainingSampler",
+ "RandomSubsetTrainingSampler",
+ "InferenceSampler",
+ "RepeatFactorTrainingSampler",
+]
diff --git a/detectron2/data/samplers/distributed_sampler.py b/detectron2/data/samplers/distributed_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..a098e6ac07c1b193fddcb69e6e54aced82e6081c
--- /dev/null
+++ b/detectron2/data/samplers/distributed_sampler.py
@@ -0,0 +1,278 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import logging
+import math
+from collections import defaultdict
+from typing import Optional
+import torch
+from torch.utils.data.sampler import Sampler
+
+from detectron2.utils import comm
+
+logger = logging.getLogger(__name__)
+
+
+class TrainingSampler(Sampler):
+ """
+ In training, we only care about the "infinite stream" of training data.
+ So this sampler produces an infinite stream of indices and
+ all workers cooperate to correctly shuffle the indices and sample different indices.
+
+ The samplers in each worker effectively produces `indices[worker_id::num_workers]`
+ where `indices` is an infinite stream of indices consisting of
+ `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
+ or `range(size) + range(size) + ...` (if shuffle is False)
+
+ Note that this sampler does not shard based on pytorch DataLoader worker id.
+ A sampler passed to pytorch DataLoader is used only with map-style dataset
+ and will not be executed inside workers.
+ But if this sampler is used in a way that it gets execute inside a dataloader
+ worker, then extra work needs to be done to shard its outputs based on worker id.
+ This is required so that workers don't produce identical data.
+ :class:`ToIterableDataset` implements this logic.
+ This note is true for all samplers in detectron2.
+ """
+
+ def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):
+ """
+ Args:
+ size (int): the total number of data of the underlying dataset to sample from
+ shuffle (bool): whether to shuffle the indices or not
+ seed (int): the initial seed of the shuffle. Must be the same
+ across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ """
+ if not isinstance(size, int):
+ raise TypeError(f"TrainingSampler(size=) expects an int. Got type {type(size)}.")
+ if size <= 0:
+ raise ValueError(f"TrainingSampler(size=) expects a positive int. Got {size}.")
+ self._size = size
+ self._shuffle = shuffle
+ if seed is None:
+ seed = comm.shared_random_seed()
+ self._seed = int(seed)
+
+ self._rank = comm.get_rank()
+ self._world_size = comm.get_world_size()
+
+ def __iter__(self):
+ start = self._rank
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
+
+ def _infinite_indices(self):
+ g = torch.Generator()
+ g.manual_seed(self._seed)
+ while True:
+ if self._shuffle:
+ yield from torch.randperm(self._size, generator=g).tolist()
+ else:
+ yield from torch.arange(self._size).tolist()
+
+
+class RandomSubsetTrainingSampler(TrainingSampler):
+ """
+ Similar to TrainingSampler, but only sample a random subset of indices.
+ This is useful when you want to estimate the accuracy vs data-number curves by
+ training the model with different subset_ratio.
+ """
+
+ def __init__(
+ self,
+ size: int,
+ subset_ratio: float,
+ shuffle: bool = True,
+ seed_shuffle: Optional[int] = None,
+ seed_subset: Optional[int] = None,
+ ):
+ """
+ Args:
+ size (int): the total number of data of the underlying dataset to sample from
+ subset_ratio (float): the ratio of subset data to sample from the underlying dataset
+ shuffle (bool): whether to shuffle the indices or not
+ seed_shuffle (int): the initial seed of the shuffle. Must be the same
+ across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ seed_subset (int): the seed to randomize the subset to be sampled.
+ Must be the same across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ """
+ super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)
+
+ assert 0.0 < subset_ratio <= 1.0
+ self._size_subset = int(size * subset_ratio)
+ assert self._size_subset > 0
+ if seed_subset is None:
+ seed_subset = comm.shared_random_seed()
+ self._seed_subset = int(seed_subset)
+
+ # randomly generate the subset indexes to be sampled from
+ g = torch.Generator()
+ g.manual_seed(self._seed_subset)
+ indexes_randperm = torch.randperm(self._size, generator=g)
+ self._indexes_subset = indexes_randperm[: self._size_subset]
+
+ logger.info("Using RandomSubsetTrainingSampler......")
+ logger.info(f"Randomly sample {self._size_subset} data from the original {self._size} data")
+
+ def _infinite_indices(self):
+ g = torch.Generator()
+ g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()
+ while True:
+ if self._shuffle:
+ # generate a random permutation to shuffle self._indexes_subset
+ randperm = torch.randperm(self._size_subset, generator=g)
+ yield from self._indexes_subset[randperm].tolist()
+ else:
+ yield from self._indexes_subset.tolist()
+
+
+class RepeatFactorTrainingSampler(Sampler):
+ """
+ Similar to TrainingSampler, but a sample may appear more times than others based
+ on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS.
+ """
+
+ def __init__(self, repeat_factors, *, shuffle=True, seed=None):
+ """
+ Args:
+ repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
+ full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
+ shuffle (bool): whether to shuffle the indices or not
+ seed (int): the initial seed of the shuffle. Must be the same
+ across all workers. If None, will use a random seed shared
+ among workers (require synchronization among all workers).
+ """
+ self._shuffle = shuffle
+ if seed is None:
+ seed = comm.shared_random_seed()
+ self._seed = int(seed)
+
+ self._rank = comm.get_rank()
+ self._world_size = comm.get_world_size()
+
+ # Split into whole number (_int_part) and fractional (_frac_part) parts.
+ self._int_part = torch.trunc(repeat_factors)
+ self._frac_part = repeat_factors - self._int_part
+
+ @staticmethod
+ def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
+ """
+ Compute (fractional) per-image repeat factors based on category frequency.
+ The repeat factor for an image is a function of the frequency of the rarest
+ category labeled in that image. The "frequency of category c" in [0, 1] is defined
+ as the fraction of images in the training set (without repeats) in which category c
+ appears.
+ See :paper:`lvis` (>= v2) Appendix B.2.
+
+ Args:
+ dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
+ repeat_thresh (float): frequency threshold below which data is repeated.
+ If the frequency is half of `repeat_thresh`, the image will be
+ repeated twice.
+
+ Returns:
+ torch.Tensor:
+ the i-th element is the repeat factor for the dataset image at index i.
+ """
+ # 1. For each category c, compute the fraction of images that contain it: f(c)
+ category_freq = defaultdict(int)
+ for dataset_dict in dataset_dicts: # For each image (without repeats)
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
+ for cat_id in cat_ids:
+ category_freq[cat_id] += 1
+ num_images = len(dataset_dicts)
+ for k, v in category_freq.items():
+ category_freq[k] = v / num_images
+
+ # 2. For each category c, compute the category-level repeat factor:
+ # r(c) = max(1, sqrt(t / f(c)))
+ category_rep = {
+ cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
+ for cat_id, cat_freq in category_freq.items()
+ }
+
+ # 3. For each image I, compute the image-level repeat factor:
+ # r(I) = max_{c in I} r(c)
+ rep_factors = []
+ for dataset_dict in dataset_dicts:
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
+ rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
+ rep_factors.append(rep_factor)
+
+ return torch.tensor(rep_factors, dtype=torch.float32)
+
+ def _get_epoch_indices(self, generator):
+ """
+ Create a list of dataset indices (with repeats) to use for one epoch.
+
+ Args:
+ generator (torch.Generator): pseudo random number generator used for
+ stochastic rounding.
+
+ Returns:
+ torch.Tensor: list of dataset indices to use in one epoch. Each index
+ is repeated based on its calculated repeat factor.
+ """
+ # Since repeat factors are fractional, we use stochastic rounding so
+ # that the target repeat factor is achieved in expectation over the
+ # course of training
+ rands = torch.rand(len(self._frac_part), generator=generator)
+ rep_factors = self._int_part + (rands < self._frac_part).float()
+ # Construct a list of indices in which we repeat images as specified
+ indices = []
+ for dataset_index, rep_factor in enumerate(rep_factors):
+ indices.extend([dataset_index] * int(rep_factor.item()))
+ return torch.tensor(indices, dtype=torch.int64)
+
+ def __iter__(self):
+ start = self._rank
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
+
+ def _infinite_indices(self):
+ g = torch.Generator()
+ g.manual_seed(self._seed)
+ while True:
+ # Sample indices with repeats determined by stochastic rounding; each
+ # "epoch" may have a slightly different size due to the rounding.
+ indices = self._get_epoch_indices(g)
+ if self._shuffle:
+ randperm = torch.randperm(len(indices), generator=g)
+ yield from indices[randperm].tolist()
+ else:
+ yield from indices.tolist()
+
+
+class InferenceSampler(Sampler):
+ """
+ Produce indices for inference across all workers.
+ Inference needs to run on the __exact__ set of samples,
+ therefore when the total number of samples is not divisible by the number of workers,
+ this sampler produces different number of samples on different workers.
+ """
+
+ def __init__(self, size: int):
+ """
+ Args:
+ size (int): the total number of data of the underlying dataset to sample from
+ """
+ self._size = size
+ assert size > 0
+ self._rank = comm.get_rank()
+ self._world_size = comm.get_world_size()
+ self._local_indices = self._get_local_indices(size, self._world_size, self._rank)
+
+ @staticmethod
+ def _get_local_indices(total_size, world_size, rank):
+ shard_size = total_size // world_size
+ left = total_size % world_size
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
+
+ begin = sum(shard_sizes[:rank])
+ end = min(sum(shard_sizes[: rank + 1]), total_size)
+ return range(begin, end)
+
+ def __iter__(self):
+ yield from self._local_indices
+
+ def __len__(self):
+ return len(self._local_indices)
diff --git a/detectron2/data/samplers/grouped_batch_sampler.py b/detectron2/data/samplers/grouped_batch_sampler.py
new file mode 100644
index 0000000000000000000000000000000000000000..5b247730aacd04dd0c752664acde3257c4eddd71
--- /dev/null
+++ b/detectron2/data/samplers/grouped_batch_sampler.py
@@ -0,0 +1,47 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import numpy as np
+from torch.utils.data.sampler import BatchSampler, Sampler
+
+
+class GroupedBatchSampler(BatchSampler):
+ """
+ Wraps another sampler to yield a mini-batch of indices.
+ It enforces that the batch only contain elements from the same group.
+ It also tries to provide mini-batches which follows an ordering which is
+ as close as possible to the ordering from the original sampler.
+ """
+
+ def __init__(self, sampler, group_ids, batch_size):
+ """
+ Args:
+ sampler (Sampler): Base sampler.
+ group_ids (list[int]): If the sampler produces indices in range [0, N),
+ `group_ids` must be a list of `N` ints which contains the group id of each sample.
+ The group ids must be a set of integers in the range [0, num_groups).
+ batch_size (int): Size of mini-batch.
+ """
+ if not isinstance(sampler, Sampler):
+ raise ValueError(
+ "sampler should be an instance of "
+ "torch.utils.data.Sampler, but got sampler={}".format(sampler)
+ )
+ self.sampler = sampler
+ self.group_ids = np.asarray(group_ids)
+ assert self.group_ids.ndim == 1
+ self.batch_size = batch_size
+ groups = np.unique(self.group_ids).tolist()
+
+ # buffer the indices of each group until batch size is reached
+ self.buffer_per_group = {k: [] for k in groups}
+
+ def __iter__(self):
+ for idx in self.sampler:
+ group_id = self.group_ids[idx]
+ group_buffer = self.buffer_per_group[group_id]
+ group_buffer.append(idx)
+ if len(group_buffer) == self.batch_size:
+ yield group_buffer[:] # yield a copy of the list
+ del group_buffer[:]
+
+ def __len__(self):
+ raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
diff --git a/detectron2/data/transforms/__init__.py b/detectron2/data/transforms/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab3c63b5b456a7fb878757e25768a3634f76ae5b
--- /dev/null
+++ b/detectron2/data/transforms/__init__.py
@@ -0,0 +1,14 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from fvcore.transforms.transform import Transform, TransformList # order them first
+from fvcore.transforms.transform import *
+from .transform import *
+from .augmentation import *
+from .augmentation_impl import *
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
+
+
+from detectron2.utils.env import fixup_module_metadata
+
+fixup_module_metadata(__name__, globals(), __all__)
+del fixup_module_metadata
diff --git a/detectron2/data/transforms/augmentation.py b/detectron2/data/transforms/augmentation.py
new file mode 100644
index 0000000000000000000000000000000000000000..63dd41aef658c9b51c7246880399405a029c5580
--- /dev/null
+++ b/detectron2/data/transforms/augmentation.py
@@ -0,0 +1,380 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import inspect
+import numpy as np
+import pprint
+from typing import Any, List, Optional, Tuple, Union
+from fvcore.transforms.transform import Transform, TransformList
+
+"""
+See "Data Augmentation" tutorial for an overview of the system:
+https://detectron2.readthedocs.io/tutorials/augmentation.html
+"""
+
+
+__all__ = [
+ "Augmentation",
+ "AugmentationList",
+ "AugInput",
+ "TransformGen",
+ "apply_transform_gens",
+ "StandardAugInput",
+ "apply_augmentations",
+]
+
+
+def _check_img_dtype(img):
+ assert isinstance(img, np.ndarray), "[Augmentation] Needs an numpy array, but got a {}!".format(
+ type(img)
+ )
+ assert not isinstance(img.dtype, np.integer) or (
+ img.dtype == np.uint8
+ ), "[Augmentation] Got image of type {}, use uint8 or floating points instead!".format(
+ img.dtype
+ )
+ assert img.ndim in [2, 3], img.ndim
+
+
+def _get_aug_input_args(aug, aug_input) -> List[Any]:
+ """
+ Get the arguments to be passed to ``aug.get_transform`` from the input ``aug_input``.
+ """
+ if aug.input_args is None:
+ # Decide what attributes are needed automatically
+ prms = list(inspect.signature(aug.get_transform).parameters.items())
+ # The default behavior is: if there is one parameter, then its "image"
+ # (work automatically for majority of use cases, and also avoid BC breaking),
+ # Otherwise, use the argument names.
+ if len(prms) == 1:
+ names = ("image",)
+ else:
+ names = []
+ for name, prm in prms:
+ if prm.kind in (
+ inspect.Parameter.VAR_POSITIONAL,
+ inspect.Parameter.VAR_KEYWORD,
+ ):
+ raise TypeError(
+ f""" \
+The default implementation of `{type(aug)}.__call__` does not allow \
+`{type(aug)}.get_transform` to use variable-length arguments (*args, **kwargs)! \
+If arguments are unknown, reimplement `__call__` instead. \
+"""
+ )
+ names.append(name)
+ aug.input_args = tuple(names)
+
+ args = []
+ for f in aug.input_args:
+ try:
+ args.append(getattr(aug_input, f))
+ except AttributeError as e:
+ raise AttributeError(
+ f"{type(aug)}.get_transform needs input attribute '{f}', "
+ f"but it is not an attribute of {type(aug_input)}!"
+ ) from e
+ return args
+
+
+class Augmentation:
+ """
+ Augmentation defines (often random) policies/strategies to generate :class:`Transform`
+ from data. It is often used for pre-processing of input data.
+
+ A "policy" that generates a :class:`Transform` may, in the most general case,
+ need arbitrary information from input data in order to determine what transforms
+ to apply. Therefore, each :class:`Augmentation` instance defines the arguments
+ needed by its :meth:`get_transform` method. When called with the positional arguments,
+ the :meth:`get_transform` method executes the policy.
+
+ Note that :class:`Augmentation` defines the policies to create a :class:`Transform`,
+ but not how to execute the actual transform operations to those data.
+ Its :meth:`__call__` method will use :meth:`AugInput.transform` to execute the transform.
+
+ The returned `Transform` object is meant to describe deterministic transformation, which means
+ it can be re-applied on associated data, e.g. the geometry of an image and its segmentation
+ masks need to be transformed together.
+ (If such re-application is not needed, then determinism is not a crucial requirement.)
+ """
+
+ input_args: Optional[Tuple[str]] = None
+ """
+ Stores the attribute names needed by :meth:`get_transform`, e.g. ``("image", "sem_seg")``.
+ By default, it is just a tuple of argument names in :meth:`self.get_transform`, which often only
+ contain "image". As long as the argument name convention is followed, there is no need for
+ users to touch this attribute.
+ """
+
+ def _init(self, params=None):
+ if params:
+ for k, v in params.items():
+ if k != "self" and not k.startswith("_"):
+ setattr(self, k, v)
+
+ def get_transform(self, *args) -> Transform:
+ """
+ Execute the policy based on input data, and decide what transform to apply to inputs.
+
+ Args:
+ args: Any fixed-length positional arguments. By default, the name of the arguments
+ should exist in the :class:`AugInput` to be used.
+
+ Returns:
+ Transform: Returns the deterministic transform to apply to the input.
+
+ Examples:
+ ::
+ class MyAug:
+ # if a policy needs to know both image and semantic segmentation
+ def get_transform(image, sem_seg) -> T.Transform:
+ pass
+ tfm: Transform = MyAug().get_transform(image, sem_seg)
+ new_image = tfm.apply_image(image)
+
+ Notes:
+ Users can freely use arbitrary new argument names in custom
+ :meth:`get_transform` method, as long as they are available in the
+ input data. In detectron2 we use the following convention:
+
+ * image: (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
+ floating point in range [0, 1] or [0, 255].
+ * boxes: (N,4) ndarray of float32. It represents the instance bounding boxes
+ of N instances. Each is in XYXY format in unit of absolute coordinates.
+ * sem_seg: (H,W) ndarray of type uint8. Each element is an integer label of pixel.
+
+ We do not specify convention for other types and do not include builtin
+ :class:`Augmentation` that uses other types in detectron2.
+ """
+ raise NotImplementedError
+
+ def __call__(self, aug_input) -> Transform:
+ """
+ Augment the given `aug_input` **in-place**, and return the transform that's used.
+
+ This method will be called to apply the augmentation. In most augmentation, it
+ is enough to use the default implementation, which calls :meth:`get_transform`
+ using the inputs. But a subclass can overwrite it to have more complicated logic.
+
+ Args:
+ aug_input (AugInput): an object that has attributes needed by this augmentation
+ (defined by ``self.get_transform``). Its ``transform`` method will be called
+ to in-place transform it.
+
+ Returns:
+ Transform: the transform that is applied on the input.
+ """
+ args = _get_aug_input_args(self, aug_input)
+ tfm = self.get_transform(*args)
+ assert isinstance(tfm, (Transform, TransformList)), (
+ f"{type(self)}.get_transform must return an instance of Transform! "
+ f"Got {type(tfm)} instead."
+ )
+ aug_input.transform(tfm)
+ return tfm
+
+ def _rand_range(self, low=1.0, high=None, size=None):
+ """
+ Uniform float random number between low and high.
+ """
+ if high is None:
+ low, high = 0, low
+ if size is None:
+ size = []
+ return np.random.uniform(low, high, size)
+
+ def __repr__(self):
+ """
+ Produce something like:
+ "MyAugmentation(field1={self.field1}, field2={self.field2})"
+ """
+ try:
+ sig = inspect.signature(self.__init__)
+ classname = type(self).__name__
+ argstr = []
+ for name, param in sig.parameters.items():
+ assert (
+ param.kind != param.VAR_POSITIONAL and param.kind != param.VAR_KEYWORD
+ ), "The default __repr__ doesn't support *args or **kwargs"
+ assert hasattr(self, name), (
+ "Attribute {} not found! "
+ "Default __repr__ only works if attributes match the constructor.".format(name)
+ )
+ attr = getattr(self, name)
+ default = param.default
+ if default is attr:
+ continue
+ attr_str = pprint.pformat(attr)
+ if "\n" in attr_str:
+ # don't show it if pformat decides to use >1 lines
+ attr_str = "..."
+ argstr.append("{}={}".format(name, attr_str))
+ return "{}({})".format(classname, ", ".join(argstr))
+ except AssertionError:
+ return super().__repr__()
+
+ __str__ = __repr__
+
+
+class _TransformToAug(Augmentation):
+ def __init__(self, tfm: Transform):
+ self.tfm = tfm
+
+ def get_transform(self, *args):
+ return self.tfm
+
+ def __repr__(self):
+ return repr(self.tfm)
+
+ __str__ = __repr__
+
+
+def _transform_to_aug(tfm_or_aug):
+ """
+ Wrap Transform into Augmentation.
+ Private, used internally to implement augmentations.
+ """
+ assert isinstance(tfm_or_aug, (Transform, Augmentation)), tfm_or_aug
+ if isinstance(tfm_or_aug, Augmentation):
+ return tfm_or_aug
+ else:
+ return _TransformToAug(tfm_or_aug)
+
+
+class AugmentationList(Augmentation):
+ """
+ Apply a sequence of augmentations.
+
+ It has ``__call__`` method to apply the augmentations.
+
+ Note that :meth:`get_transform` method is impossible (will throw error if called)
+ for :class:`AugmentationList`, because in order to apply a sequence of augmentations,
+ the kth augmentation must be applied first, to provide inputs needed by the (k+1)th
+ augmentation.
+ """
+
+ def __init__(self, augs):
+ """
+ Args:
+ augs (list[Augmentation or Transform]):
+ """
+ super().__init__()
+ self.augs = [_transform_to_aug(x) for x in augs]
+
+ def __call__(self, aug_input) -> TransformList:
+ tfms = []
+ for x in self.augs:
+ tfm = x(aug_input)
+ tfms.append(tfm)
+ return TransformList(tfms)
+
+ def __repr__(self):
+ msgs = [str(x) for x in self.augs]
+ return "AugmentationList[{}]".format(", ".join(msgs))
+
+ __str__ = __repr__
+
+
+class AugInput:
+ """
+ Input that can be used with :meth:`Augmentation.__call__`.
+ This is a standard implementation for the majority of use cases.
+ This class provides the standard attributes **"image", "boxes", "sem_seg"**
+ defined in :meth:`__init__` and they may be needed by different augmentations.
+ Most augmentation policies do not need attributes beyond these three.
+
+ After applying augmentations to these attributes (using :meth:`AugInput.transform`),
+ the returned transforms can then be used to transform other data structures that users have.
+
+ Examples:
+ ::
+ input = AugInput(image, boxes=boxes)
+ tfms = augmentation(input)
+ transformed_image = input.image
+ transformed_boxes = input.boxes
+ transformed_other_data = tfms.apply_other(other_data)
+
+ An extended project that works with new data types may implement augmentation policies
+ that need other inputs. An algorithm may need to transform inputs in a way different
+ from the standard approach defined in this class. In those rare situations, users can
+ implement a class similar to this class, that satify the following condition:
+
+ * The input must provide access to these data in the form of attribute access
+ (``getattr``). For example, if an :class:`Augmentation` to be applied needs "image"
+ and "sem_seg" arguments, its input must have the attribute "image" and "sem_seg".
+ * The input must have a ``transform(tfm: Transform) -> None`` method which
+ in-place transforms all its attributes.
+ """
+
+ # TODO maybe should support more builtin data types here
+ def __init__(
+ self,
+ image: np.ndarray,
+ *,
+ boxes: Optional[np.ndarray] = None,
+ sem_seg: Optional[np.ndarray] = None,
+ ):
+ """
+ Args:
+ image (ndarray): (H,W) or (H,W,C) ndarray of type uint8 in range [0, 255], or
+ floating point in range [0, 1] or [0, 255]. The meaning of C is up
+ to users.
+ boxes (ndarray or None): Nx4 float32 boxes in XYXY_ABS mode
+ sem_seg (ndarray or None): HxW uint8 semantic segmentation mask. Each element
+ is an integer label of pixel.
+ """
+ _check_img_dtype(image)
+ self.image = image
+ self.boxes = boxes
+ self.sem_seg = sem_seg
+
+ def transform(self, tfm: Transform) -> None:
+ """
+ In-place transform all attributes of this class.
+
+ By "in-place", it means after calling this method, accessing an attribute such
+ as ``self.image`` will return transformed data.
+ """
+ self.image = tfm.apply_image(self.image)
+ if self.boxes is not None:
+ self.boxes = tfm.apply_box(self.boxes)
+ if self.sem_seg is not None:
+ self.sem_seg = tfm.apply_segmentation(self.sem_seg)
+
+ def apply_augmentations(
+ self, augmentations: List[Union[Augmentation, Transform]]
+ ) -> TransformList:
+ """
+ Equivalent of ``AugmentationList(augmentations)(self)``
+ """
+ return AugmentationList(augmentations)(self)
+
+
+def apply_augmentations(augmentations: List[Union[Transform, Augmentation]], inputs):
+ """
+ Use ``T.AugmentationList(augmentations)(inputs)`` instead.
+ """
+ if isinstance(inputs, np.ndarray):
+ # handle the common case of image-only Augmentation, also for backward compatibility
+ image_only = True
+ inputs = AugInput(inputs)
+ else:
+ image_only = False
+ tfms = inputs.apply_augmentations(augmentations)
+ return inputs.image if image_only else inputs, tfms
+
+
+apply_transform_gens = apply_augmentations
+"""
+Alias for backward-compatibility.
+"""
+
+TransformGen = Augmentation
+"""
+Alias for Augmentation, since it is something that generates :class:`Transform`s
+"""
+
+StandardAugInput = AugInput
+"""
+Alias for compatibility. It's not worth the complexity to have two classes.
+"""
diff --git a/detectron2/data/transforms/augmentation_impl.py b/detectron2/data/transforms/augmentation_impl.py
new file mode 100644
index 0000000000000000000000000000000000000000..7cc7b28be66cdf14bff493745c6c567da55aeb34
--- /dev/null
+++ b/detectron2/data/transforms/augmentation_impl.py
@@ -0,0 +1,736 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+"""
+Implement many useful :class:`Augmentation`.
+"""
+import numpy as np
+import sys
+from numpy import random
+from typing import Tuple
+import torch
+from fvcore.transforms.transform import (
+ BlendTransform,
+ CropTransform,
+ HFlipTransform,
+ NoOpTransform,
+ PadTransform,
+ Transform,
+ TransformList,
+ VFlipTransform,
+)
+from PIL import Image
+
+from detectron2.structures import Boxes, pairwise_iou
+
+from .augmentation import Augmentation, _transform_to_aug
+from .transform import ExtentTransform, ResizeTransform, RotationTransform
+
+__all__ = [
+ "FixedSizeCrop",
+ "RandomApply",
+ "RandomBrightness",
+ "RandomContrast",
+ "RandomCrop",
+ "RandomExtent",
+ "RandomFlip",
+ "RandomSaturation",
+ "RandomLighting",
+ "RandomRotation",
+ "Resize",
+ "ResizeScale",
+ "ResizeShortestEdge",
+ "RandomCrop_CategoryAreaConstraint",
+ "RandomResize",
+ "MinIoURandomCrop",
+]
+
+
+class RandomApply(Augmentation):
+ """
+ Randomly apply an augmentation with a given probability.
+ """
+
+ def __init__(self, tfm_or_aug, prob=0.5):
+ """
+ Args:
+ tfm_or_aug (Transform, Augmentation): the transform or augmentation
+ to be applied. It can either be a `Transform` or `Augmentation`
+ instance.
+ prob (float): probability between 0.0 and 1.0 that
+ the wrapper transformation is applied
+ """
+ super().__init__()
+ self.aug = _transform_to_aug(tfm_or_aug)
+ assert 0.0 <= prob <= 1.0, f"Probablity must be between 0.0 and 1.0 (given: {prob})"
+ self.prob = prob
+
+ def get_transform(self, *args):
+ do = self._rand_range() < self.prob
+ if do:
+ return self.aug.get_transform(*args)
+ else:
+ return NoOpTransform()
+
+ def __call__(self, aug_input):
+ do = self._rand_range() < self.prob
+ if do:
+ return self.aug(aug_input)
+ else:
+ return NoOpTransform()
+
+
+class RandomFlip(Augmentation):
+ """
+ Flip the image horizontally or vertically with the given probability.
+ """
+
+ def __init__(self, prob=0.5, *, horizontal=True, vertical=False):
+ """
+ Args:
+ prob (float): probability of flip.
+ horizontal (boolean): whether to apply horizontal flipping
+ vertical (boolean): whether to apply vertical flipping
+ """
+ super().__init__()
+
+ if horizontal and vertical:
+ raise ValueError("Cannot do both horiz and vert. Please use two Flip instead.")
+ if not horizontal and not vertical:
+ raise ValueError("At least one of horiz or vert has to be True!")
+ self._init(locals())
+
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ do = self._rand_range() < self.prob
+ if do:
+ if self.horizontal:
+ return HFlipTransform(w)
+ elif self.vertical:
+ return VFlipTransform(h)
+ else:
+ return NoOpTransform()
+
+
+class Resize(Augmentation):
+ """Resize image to a fixed target size"""
+
+ def __init__(self, shape, interp=Image.BILINEAR):
+ """
+ Args:
+ shape: (h, w) tuple or a int
+ interp: PIL interpolation method
+ """
+ if isinstance(shape, int):
+ shape = (shape, shape)
+ shape = tuple(shape)
+ self._init(locals())
+
+ def get_transform(self, image):
+ return ResizeTransform(
+ image.shape[0], image.shape[1], self.shape[0], self.shape[1], self.interp
+ )
+
+
+class ResizeShortestEdge(Augmentation):
+ """
+ Resize the image while keeping the aspect ratio unchanged.
+ It attempts to scale the shorter edge to the given `short_edge_length`,
+ as long as the longer edge does not exceed `max_size`.
+ If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
+ """
+
+ @torch.jit.unused
+ def __init__(
+ self, short_edge_length, max_size=sys.maxsize, sample_style="range", interp=Image.BILINEAR
+ ):
+ """
+ Args:
+ short_edge_length (list[int]): If ``sample_style=="range"``,
+ a [min, max] interval from which to sample the shortest edge length.
+ If ``sample_style=="choice"``, a list of shortest edge lengths to sample from.
+ max_size (int): maximum allowed longest edge length.
+ sample_style (str): either "range" or "choice".
+ """
+ super().__init__()
+ assert sample_style in ["range", "choice"], sample_style
+
+ self.is_range = sample_style == "range"
+ if isinstance(short_edge_length, int):
+ short_edge_length = (short_edge_length, short_edge_length)
+ if self.is_range:
+ assert len(short_edge_length) == 2, (
+ "short_edge_length must be two values using 'range' sample style."
+ f" Got {short_edge_length}!"
+ )
+ self._init(locals())
+
+ @torch.jit.unused
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ if self.is_range:
+ size = np.random.randint(self.short_edge_length[0], self.short_edge_length[1] + 1)
+ else:
+ size = np.random.choice(self.short_edge_length)
+ if size == 0:
+ return NoOpTransform()
+
+ newh, neww = ResizeShortestEdge.get_output_shape(h, w, size, self.max_size)
+ return ResizeTransform(h, w, newh, neww, self.interp)
+
+ @staticmethod
+ def get_output_shape(
+ oldh: int, oldw: int, short_edge_length: int, max_size: int
+ ) -> Tuple[int, int]:
+ """
+ Compute the output size given input size and target short edge length.
+ """
+ h, w = oldh, oldw
+ size = short_edge_length * 1.0
+ scale = size / min(h, w)
+ if h < w:
+ newh, neww = size, scale * w
+ else:
+ newh, neww = scale * h, size
+ if max(newh, neww) > max_size:
+ scale = max_size * 1.0 / max(newh, neww)
+ newh = newh * scale
+ neww = neww * scale
+ neww = int(neww + 0.5)
+ newh = int(newh + 0.5)
+ return (newh, neww)
+
+
+class ResizeScale(Augmentation):
+ """
+ Takes target size as input and randomly scales the given target size between `min_scale`
+ and `max_scale`. It then scales the input image such that it fits inside the scaled target
+ box, keeping the aspect ratio constant.
+ This implements the resize part of the Google's 'resize_and_crop' data augmentation:
+ https://github.com/tensorflow/tpu/blob/master/models/official/detection/utils/input_utils.py#L127
+ """
+
+ def __init__(
+ self,
+ min_scale: float,
+ max_scale: float,
+ target_height: int,
+ target_width: int,
+ interp: int = Image.BILINEAR,
+ ):
+ """
+ Args:
+ min_scale: minimum image scale range.
+ max_scale: maximum image scale range.
+ target_height: target image height.
+ target_width: target image width.
+ interp: image interpolation method.
+ """
+ super().__init__()
+ self._init(locals())
+
+ def _get_resize(self, image: np.ndarray, scale: float) -> Transform:
+ input_size = image.shape[:2]
+
+ # Compute new target size given a scale.
+ target_size = (self.target_height, self.target_width)
+ target_scale_size = np.multiply(target_size, scale)
+
+ # Compute actual rescaling applied to input image and output size.
+ output_scale = np.minimum(
+ target_scale_size[0] / input_size[0], target_scale_size[1] / input_size[1]
+ )
+ output_size = np.round(np.multiply(input_size, output_scale)).astype(int)
+
+ return ResizeTransform(
+ input_size[0], input_size[1], int(output_size[0]), int(output_size[1]), self.interp
+ )
+
+ def get_transform(self, image: np.ndarray) -> Transform:
+ random_scale = np.random.uniform(self.min_scale, self.max_scale)
+ return self._get_resize(image, random_scale)
+
+
+class RandomRotation(Augmentation):
+ """
+ This method returns a copy of this image, rotated the given
+ number of degrees counter clockwise around the given center.
+ """
+
+ def __init__(self, angle, expand=True, center=None, sample_style="range", interp=None):
+ """
+ Args:
+ angle (list[float]): If ``sample_style=="range"``,
+ a [min, max] interval from which to sample the angle (in degrees).
+ If ``sample_style=="choice"``, a list of angles to sample from
+ expand (bool): choose if the image should be resized to fit the whole
+ rotated image (default), or simply cropped
+ center (list[[float, float]]): If ``sample_style=="range"``,
+ a [[minx, miny], [maxx, maxy]] relative interval from which to sample the center,
+ [0, 0] being the top left of the image and [1, 1] the bottom right.
+ If ``sample_style=="choice"``, a list of centers to sample from
+ Default: None, which means that the center of rotation is the center of the image
+ center has no effect if expand=True because it only affects shifting
+ """
+ super().__init__()
+ assert sample_style in ["range", "choice"], sample_style
+ self.is_range = sample_style == "range"
+ if isinstance(angle, (float, int)):
+ angle = (angle, angle)
+ if center is not None and isinstance(center[0], (float, int)):
+ center = (center, center)
+ self._init(locals())
+
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ center = None
+ if self.is_range:
+ angle = np.random.uniform(self.angle[0], self.angle[1])
+ if self.center is not None:
+ center = (
+ np.random.uniform(self.center[0][0], self.center[1][0]),
+ np.random.uniform(self.center[0][1], self.center[1][1]),
+ )
+ else:
+ angle = np.random.choice(self.angle)
+ if self.center is not None:
+ center = np.random.choice(self.center)
+
+ if center is not None:
+ center = (w * center[0], h * center[1]) # Convert to absolute coordinates
+
+ if angle % 360 == 0:
+ return NoOpTransform()
+
+ return RotationTransform(h, w, angle, expand=self.expand, center=center, interp=self.interp)
+
+
+class FixedSizeCrop(Augmentation):
+ """
+ If `crop_size` is smaller than the input image size, then it uses a random crop of
+ the crop size. If `crop_size` is larger than the input image size, then it pads
+ the right and the bottom of the image to the crop size if `pad` is True, otherwise
+ it returns the smaller image.
+ """
+
+ def __init__(
+ self,
+ crop_size: Tuple[int],
+ pad: bool = True,
+ pad_value: float = 128.0,
+ seg_pad_value: int = 255,
+ ):
+ """
+ Args:
+ crop_size: target image (height, width).
+ pad: if True, will pad images smaller than `crop_size` up to `crop_size`
+ pad_value: the padding value to the image.
+ seg_pad_value: the padding value to the segmentation mask.
+ """
+ super().__init__()
+ self._init(locals())
+
+ def _get_crop(self, image: np.ndarray) -> Transform:
+ # Compute the image scale and scaled size.
+ input_size = image.shape[:2]
+ output_size = self.crop_size
+
+ # Add random crop if the image is scaled up.
+ max_offset = np.subtract(input_size, output_size)
+ max_offset = np.maximum(max_offset, 0)
+ offset = np.multiply(max_offset, np.random.uniform(0.0, 1.0))
+ offset = np.round(offset).astype(int)
+ return CropTransform(
+ offset[1], offset[0], output_size[1], output_size[0], input_size[1], input_size[0]
+ )
+
+ def _get_pad(self, image: np.ndarray) -> Transform:
+ # Compute the image scale and scaled size.
+ input_size = image.shape[:2]
+ output_size = self.crop_size
+
+ # Add padding if the image is scaled down.
+ pad_size = np.subtract(output_size, input_size)
+ pad_size = np.maximum(pad_size, 0)
+ original_size = np.minimum(input_size, output_size)
+ return PadTransform(
+ 0,
+ 0,
+ pad_size[1],
+ pad_size[0],
+ original_size[1],
+ original_size[0],
+ self.pad_value,
+ self.seg_pad_value,
+ )
+
+ def get_transform(self, image: np.ndarray) -> TransformList:
+ transforms = [self._get_crop(image)]
+ if self.pad:
+ transforms.append(self._get_pad(image))
+ return TransformList(transforms)
+
+
+class RandomCrop(Augmentation):
+ """
+ Randomly crop a rectangle region out of an image.
+ """
+
+ def __init__(self, crop_type: str, crop_size):
+ """
+ Args:
+ crop_type (str): one of "relative_range", "relative", "absolute", "absolute_range".
+ crop_size (tuple[float, float]): two floats, explained below.
+
+ - "relative": crop a (H * crop_size[0], W * crop_size[1]) region from an input image of
+ size (H, W). crop size should be in (0, 1]
+ - "relative_range": uniformly sample two values from [crop_size[0], 1]
+ and [crop_size[1]], 1], and use them as in "relative" crop type.
+ - "absolute" crop a (crop_size[0], crop_size[1]) region from input image.
+ crop_size must be smaller than the input image size.
+ - "absolute_range", for an input of size (H, W), uniformly sample H_crop in
+ [crop_size[0], min(H, crop_size[1])] and W_crop in [crop_size[0], min(W, crop_size[1])].
+ Then crop a region (H_crop, W_crop).
+ """
+ # TODO style of relative_range and absolute_range are not consistent:
+ # one takes (h, w) but another takes (min, max)
+ super().__init__()
+ assert crop_type in ["relative_range", "relative", "absolute", "absolute_range"]
+ self._init(locals())
+
+ def get_transform(self, image):
+ h, w = image.shape[:2]
+ croph, cropw = self.get_crop_size((h, w))
+ assert h >= croph and w >= cropw, "Shape computation in {} has bugs.".format(self)
+ h0 = np.random.randint(h - croph + 1)
+ w0 = np.random.randint(w - cropw + 1)
+ return CropTransform(w0, h0, cropw, croph)
+
+ def get_crop_size(self, image_size):
+ """
+ Args:
+ image_size (tuple): height, width
+
+ Returns:
+ crop_size (tuple): height, width in absolute pixels
+ """
+ h, w = image_size
+ if self.crop_type == "relative":
+ ch, cw = self.crop_size
+ return int(h * ch + 0.5), int(w * cw + 0.5)
+ elif self.crop_type == "relative_range":
+ crop_size = np.asarray(self.crop_size, dtype=np.float32)
+ ch, cw = crop_size + np.random.rand(2) * (1 - crop_size)
+ return int(h * ch + 0.5), int(w * cw + 0.5)
+ elif self.crop_type == "absolute":
+ return (min(self.crop_size[0], h), min(self.crop_size[1], w))
+ elif self.crop_type == "absolute_range":
+ assert self.crop_size[0] <= self.crop_size[1]
+ ch = np.random.randint(min(h, self.crop_size[0]), min(h, self.crop_size[1]) + 1)
+ cw = np.random.randint(min(w, self.crop_size[0]), min(w, self.crop_size[1]) + 1)
+ return ch, cw
+ else:
+ raise NotImplementedError("Unknown crop type {}".format(self.crop_type))
+
+
+class RandomCrop_CategoryAreaConstraint(Augmentation):
+ """
+ Similar to :class:`RandomCrop`, but find a cropping window such that no single category
+ occupies a ratio of more than `single_category_max_area` in semantic segmentation ground
+ truth, which can cause unstability in training. The function attempts to find such a valid
+ cropping window for at most 10 times.
+ """
+
+ def __init__(
+ self,
+ crop_type: str,
+ crop_size,
+ single_category_max_area: float = 1.0,
+ ignored_category: int = None,
+ ):
+ """
+ Args:
+ crop_type, crop_size: same as in :class:`RandomCrop`
+ single_category_max_area: the maximum allowed area ratio of a
+ category. Set to 1.0 to disable
+ ignored_category: allow this category in the semantic segmentation
+ ground truth to exceed the area ratio. Usually set to the category
+ that's ignored in training.
+ """
+ self.crop_aug = RandomCrop(crop_type, crop_size)
+ self._init(locals())
+
+ def get_transform(self, image, sem_seg):
+ if self.single_category_max_area >= 1.0:
+ return self.crop_aug.get_transform(image)
+ else:
+ h, w = sem_seg.shape
+ for _ in range(10):
+ crop_size = self.crop_aug.get_crop_size((h, w))
+ y0 = np.random.randint(h - crop_size[0] + 1)
+ x0 = np.random.randint(w - crop_size[1] + 1)
+ sem_seg_temp = sem_seg[y0 : y0 + crop_size[0], x0 : x0 + crop_size[1]]
+ labels, cnt = np.unique(sem_seg_temp, return_counts=True)
+ if self.ignored_category is not None:
+ cnt = cnt[labels != self.ignored_category]
+ if len(cnt) > 1 and np.max(cnt) < np.sum(cnt) * self.single_category_max_area:
+ break
+ crop_tfm = CropTransform(x0, y0, crop_size[1], crop_size[0])
+ return crop_tfm
+
+
+class RandomExtent(Augmentation):
+ """
+ Outputs an image by cropping a random "subrect" of the source image.
+
+ The subrect can be parameterized to include pixels outside the source image,
+ in which case they will be set to zeros (i.e. black). The size of the output
+ image will vary with the size of the random subrect.
+ """
+
+ def __init__(self, scale_range, shift_range):
+ """
+ Args:
+ output_size (h, w): Dimensions of output image
+ scale_range (l, h): Range of input-to-output size scaling factor
+ shift_range (x, y): Range of shifts of the cropped subrect. The rect
+ is shifted by [w / 2 * Uniform(-x, x), h / 2 * Uniform(-y, y)],
+ where (w, h) is the (width, height) of the input image. Set each
+ component to zero to crop at the image's center.
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ img_h, img_w = image.shape[:2]
+
+ # Initialize src_rect to fit the input image.
+ src_rect = np.array([-0.5 * img_w, -0.5 * img_h, 0.5 * img_w, 0.5 * img_h])
+
+ # Apply a random scaling to the src_rect.
+ src_rect *= np.random.uniform(self.scale_range[0], self.scale_range[1])
+
+ # Apply a random shift to the coordinates origin.
+ src_rect[0::2] += self.shift_range[0] * img_w * (np.random.rand() - 0.5)
+ src_rect[1::2] += self.shift_range[1] * img_h * (np.random.rand() - 0.5)
+
+ # Map src_rect coordinates into image coordinates (center at corner).
+ src_rect[0::2] += 0.5 * img_w
+ src_rect[1::2] += 0.5 * img_h
+
+ return ExtentTransform(
+ src_rect=(src_rect[0], src_rect[1], src_rect[2], src_rect[3]),
+ output_size=(int(src_rect[3] - src_rect[1]), int(src_rect[2] - src_rect[0])),
+ )
+
+
+class RandomContrast(Augmentation):
+ """
+ Randomly transforms image contrast.
+
+ Contrast intensity is uniformly sampled in (intensity_min, intensity_max).
+ - intensity < 1 will reduce contrast
+ - intensity = 1 will preserve the input image
+ - intensity > 1 will increase contrast
+
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
+ """
+
+ def __init__(self, intensity_min, intensity_max):
+ """
+ Args:
+ intensity_min (float): Minimum augmentation
+ intensity_max (float): Maximum augmentation
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
+ return BlendTransform(src_image=image.mean(), src_weight=1 - w, dst_weight=w)
+
+
+class RandomBrightness(Augmentation):
+ """
+ Randomly transforms image brightness.
+
+ Brightness intensity is uniformly sampled in (intensity_min, intensity_max).
+ - intensity < 1 will reduce brightness
+ - intensity = 1 will preserve the input image
+ - intensity > 1 will increase brightness
+
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
+ """
+
+ def __init__(self, intensity_min, intensity_max):
+ """
+ Args:
+ intensity_min (float): Minimum augmentation
+ intensity_max (float): Maximum augmentation
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
+ return BlendTransform(src_image=0, src_weight=1 - w, dst_weight=w)
+
+
+class RandomSaturation(Augmentation):
+ """
+ Randomly transforms saturation of an RGB image.
+ Input images are assumed to have 'RGB' channel order.
+
+ Saturation intensity is uniformly sampled in (intensity_min, intensity_max).
+ - intensity < 1 will reduce saturation (make the image more grayscale)
+ - intensity = 1 will preserve the input image
+ - intensity > 1 will increase saturation
+
+ See: https://pillow.readthedocs.io/en/3.0.x/reference/ImageEnhance.html
+ """
+
+ def __init__(self, intensity_min, intensity_max):
+ """
+ Args:
+ intensity_min (float): Minimum augmentation (1 preserves input).
+ intensity_max (float): Maximum augmentation (1 preserves input).
+ """
+ super().__init__()
+ self._init(locals())
+
+ def get_transform(self, image):
+ assert image.shape[-1] == 3, "RandomSaturation only works on RGB images"
+ w = np.random.uniform(self.intensity_min, self.intensity_max)
+ grayscale = image.dot([0.299, 0.587, 0.114])[:, :, np.newaxis]
+ return BlendTransform(src_image=grayscale, src_weight=1 - w, dst_weight=w)
+
+
+class RandomLighting(Augmentation):
+ """
+ The "lighting" augmentation described in AlexNet, using fixed PCA over ImageNet.
+ Input images are assumed to have 'RGB' channel order.
+
+ The degree of color jittering is randomly sampled via a normal distribution,
+ with standard deviation given by the scale parameter.
+ """
+
+ def __init__(self, scale):
+ """
+ Args:
+ scale (float): Standard deviation of principal component weighting.
+ """
+ super().__init__()
+ self._init(locals())
+ self.eigen_vecs = np.array(
+ [[-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203]]
+ )
+ self.eigen_vals = np.array([0.2175, 0.0188, 0.0045])
+
+ def get_transform(self, image):
+ assert image.shape[-1] == 3, "RandomLighting only works on RGB images"
+ weights = np.random.normal(scale=self.scale, size=3)
+ return BlendTransform(
+ src_image=self.eigen_vecs.dot(weights * self.eigen_vals), src_weight=1.0, dst_weight=1.0
+ )
+
+
+class RandomResize(Augmentation):
+ """Randomly resize image to a target size in shape_list"""
+
+ def __init__(self, shape_list, interp=Image.BILINEAR):
+ """
+ Args:
+ shape_list: a list of shapes in (h, w)
+ interp: PIL interpolation method
+ """
+ self.shape_list = shape_list
+ self._init(locals())
+
+ def get_transform(self, image):
+ shape_idx = np.random.randint(low=0, high=len(self.shape_list))
+ h, w = self.shape_list[shape_idx]
+ return ResizeTransform(image.shape[0], image.shape[1], h, w, self.interp)
+
+
+class MinIoURandomCrop(Augmentation):
+ """Random crop the image & bboxes, the cropped patches have minimum IoU
+ requirement with original image & bboxes, the IoU threshold is randomly
+ selected from min_ious.
+
+ Args:
+ min_ious (tuple): minimum IoU threshold for all intersections with
+ bounding boxes
+ min_crop_size (float): minimum crop's size (i.e. h,w := a*h, a*w,
+ where a >= min_crop_size)
+ mode_trials: number of trials for sampling min_ious threshold
+ crop_trials: number of trials for sampling crop_size after cropping
+ """
+
+ def __init__(
+ self,
+ min_ious=(0.1, 0.3, 0.5, 0.7, 0.9),
+ min_crop_size=0.3,
+ mode_trials=1000,
+ crop_trials=50,
+ ):
+ self.min_ious = min_ious
+ self.sample_mode = (1, *min_ious, 0)
+ self.min_crop_size = min_crop_size
+ self.mode_trials = mode_trials
+ self.crop_trials = crop_trials
+
+ def get_transform(self, image, boxes):
+ """Call function to crop images and bounding boxes with minimum IoU
+ constraint.
+
+ Args:
+ boxes: ground truth boxes in (x1, y1, x2, y2) format
+ """
+ if boxes is None:
+ return NoOpTransform()
+ h, w, c = image.shape
+ for _ in range(self.mode_trials):
+ mode = random.choice(self.sample_mode)
+ self.mode = mode
+ if mode == 1:
+ return NoOpTransform()
+
+ min_iou = mode
+ for _ in range(self.crop_trials):
+ new_w = random.uniform(self.min_crop_size * w, w)
+ new_h = random.uniform(self.min_crop_size * h, h)
+
+ # h / w in [0.5, 2]
+ if new_h / new_w < 0.5 or new_h / new_w > 2:
+ continue
+
+ left = random.uniform(w - new_w)
+ top = random.uniform(h - new_h)
+
+ patch = np.array((int(left), int(top), int(left + new_w), int(top + new_h)))
+ # Line or point crop is not allowed
+ if patch[2] == patch[0] or patch[3] == patch[1]:
+ continue
+ overlaps = pairwise_iou(
+ Boxes(patch.reshape(-1, 4)), Boxes(boxes.reshape(-1, 4))
+ ).reshape(-1)
+ if len(overlaps) > 0 and overlaps.min() < min_iou:
+ continue
+
+ # center of boxes should inside the crop img
+ # only adjust boxes and instance masks when the gt is not empty
+ if len(overlaps) > 0:
+ # adjust boxes
+ def is_center_of_bboxes_in_patch(boxes, patch):
+ center = (boxes[:, :2] + boxes[:, 2:]) / 2
+ mask = (
+ (center[:, 0] > patch[0])
+ * (center[:, 1] > patch[1])
+ * (center[:, 0] < patch[2])
+ * (center[:, 1] < patch[3])
+ )
+ return mask
+
+ mask = is_center_of_bboxes_in_patch(boxes, patch)
+ if not mask.any():
+ continue
+ return CropTransform(int(left), int(top), int(new_w), int(new_h))
diff --git a/detectron2/data/transforms/transform.py b/detectron2/data/transforms/transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..46769a2569ffc6223a95990f8db5973757e7d23f
--- /dev/null
+++ b/detectron2/data/transforms/transform.py
@@ -0,0 +1,351 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+See "Data Augmentation" tutorial for an overview of the system:
+https://detectron2.readthedocs.io/tutorials/augmentation.html
+"""
+
+import numpy as np
+import torch
+import torch.nn.functional as F
+from fvcore.transforms.transform import (
+ CropTransform,
+ HFlipTransform,
+ NoOpTransform,
+ Transform,
+ TransformList,
+)
+from PIL import Image
+
+try:
+ import cv2 # noqa
+except ImportError:
+ # OpenCV is an optional dependency at the moment
+ pass
+
+__all__ = [
+ "ExtentTransform",
+ "ResizeTransform",
+ "RotationTransform",
+ "ColorTransform",
+ "PILColorTransform",
+]
+
+
+class ExtentTransform(Transform):
+ """
+ Extracts a subregion from the source image and scales it to the output size.
+
+ The fill color is used to map pixels from the source rect that fall outside
+ the source image.
+
+ See: https://pillow.readthedocs.io/en/latest/PIL.html#PIL.ImageTransform.ExtentTransform
+ """
+
+ def __init__(self, src_rect, output_size, interp=Image.BILINEAR, fill=0):
+ """
+ Args:
+ src_rect (x0, y0, x1, y1): src coordinates
+ output_size (h, w): dst image size
+ interp: PIL interpolation methods
+ fill: Fill color used when src_rect extends outside image
+ """
+ super().__init__()
+ self._set_attributes(locals())
+
+ def apply_image(self, img, interp=None):
+ h, w = self.output_size
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ pil_image = Image.fromarray(img[:, :, 0], mode="L")
+ else:
+ pil_image = Image.fromarray(img)
+ pil_image = pil_image.transform(
+ size=(w, h),
+ method=Image.EXTENT,
+ data=self.src_rect,
+ resample=interp if interp else self.interp,
+ fill=self.fill,
+ )
+ ret = np.asarray(pil_image)
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ ret = np.expand_dims(ret, -1)
+ return ret
+
+ def apply_coords(self, coords):
+ # Transform image center from source coordinates into output coordinates
+ # and then map the new origin to the corner of the output image.
+ h, w = self.output_size
+ x0, y0, x1, y1 = self.src_rect
+ new_coords = coords.astype(np.float32)
+ new_coords[:, 0] -= 0.5 * (x0 + x1)
+ new_coords[:, 1] -= 0.5 * (y0 + y1)
+ new_coords[:, 0] *= w / (x1 - x0)
+ new_coords[:, 1] *= h / (y1 - y0)
+ new_coords[:, 0] += 0.5 * w
+ new_coords[:, 1] += 0.5 * h
+ return new_coords
+
+ def apply_segmentation(self, segmentation):
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
+ return segmentation
+
+
+class ResizeTransform(Transform):
+ """
+ Resize the image to a target size.
+ """
+
+ def __init__(self, h, w, new_h, new_w, interp=None):
+ """
+ Args:
+ h, w (int): original image size
+ new_h, new_w (int): new image size
+ interp: PIL interpolation methods, defaults to bilinear.
+ """
+ # TODO decide on PIL vs opencv
+ super().__init__()
+ if interp is None:
+ interp = Image.BILINEAR
+ self._set_attributes(locals())
+
+ def apply_image(self, img, interp=None):
+ assert img.shape[:2] == (self.h, self.w)
+ assert len(img.shape) <= 4
+ interp_method = interp if interp is not None else self.interp
+
+ if img.dtype == np.uint8:
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ pil_image = Image.fromarray(img[:, :, 0], mode="L")
+ else:
+ pil_image = Image.fromarray(img)
+ pil_image = pil_image.resize((self.new_w, self.new_h), interp_method)
+ ret = np.asarray(pil_image)
+ if len(img.shape) > 2 and img.shape[2] == 1:
+ ret = np.expand_dims(ret, -1)
+ else:
+ # PIL only supports uint8
+ if any(x < 0 for x in img.strides):
+ img = np.ascontiguousarray(img)
+ img = torch.from_numpy(img)
+ shape = list(img.shape)
+ shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
+ img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
+ _PIL_RESIZE_TO_INTERPOLATE_MODE = {
+ Image.NEAREST: "nearest",
+ Image.BILINEAR: "bilinear",
+ Image.BICUBIC: "bicubic",
+ }
+ mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[interp_method]
+ align_corners = None if mode == "nearest" else False
+ img = F.interpolate(
+ img, (self.new_h, self.new_w), mode=mode, align_corners=align_corners
+ )
+ shape[:2] = (self.new_h, self.new_w)
+ ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
+
+ return ret
+
+ def apply_coords(self, coords):
+ coords[:, 0] = coords[:, 0] * (self.new_w * 1.0 / self.w)
+ coords[:, 1] = coords[:, 1] * (self.new_h * 1.0 / self.h)
+ return coords
+
+ def apply_segmentation(self, segmentation):
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
+ return segmentation
+
+ def inverse(self):
+ return ResizeTransform(self.new_h, self.new_w, self.h, self.w, self.interp)
+
+
+class RotationTransform(Transform):
+ """
+ This method returns a copy of this image, rotated the given
+ number of degrees counter clockwise around its center.
+ """
+
+ def __init__(self, h, w, angle, expand=True, center=None, interp=None):
+ """
+ Args:
+ h, w (int): original image size
+ angle (float): degrees for rotation
+ expand (bool): choose if the image should be resized to fit the whole
+ rotated image (default), or simply cropped
+ center (tuple (width, height)): coordinates of the rotation center
+ if left to None, the center will be fit to the center of each image
+ center has no effect if expand=True because it only affects shifting
+ interp: cv2 interpolation method, default cv2.INTER_LINEAR
+ """
+ super().__init__()
+ image_center = np.array((w / 2, h / 2))
+ if center is None:
+ center = image_center
+ if interp is None:
+ interp = cv2.INTER_LINEAR
+ abs_cos, abs_sin = (abs(np.cos(np.deg2rad(angle))), abs(np.sin(np.deg2rad(angle))))
+ if expand:
+ # find the new width and height bounds
+ bound_w, bound_h = np.rint(
+ [h * abs_sin + w * abs_cos, h * abs_cos + w * abs_sin]
+ ).astype(int)
+ else:
+ bound_w, bound_h = w, h
+
+ self._set_attributes(locals())
+ self.rm_coords = self.create_rotation_matrix()
+ # Needed because of this problem https://github.com/opencv/opencv/issues/11784
+ self.rm_image = self.create_rotation_matrix(offset=-0.5)
+
+ def apply_image(self, img, interp=None):
+ """
+ img should be a numpy array, formatted as Height * Width * Nchannels
+ """
+ if len(img) == 0 or self.angle % 360 == 0:
+ return img
+ assert img.shape[:2] == (self.h, self.w)
+ interp = interp if interp is not None else self.interp
+ return cv2.warpAffine(img, self.rm_image, (self.bound_w, self.bound_h), flags=interp)
+
+ def apply_coords(self, coords):
+ """
+ coords should be a N * 2 array-like, containing N couples of (x, y) points
+ """
+ coords = np.asarray(coords, dtype=float)
+ if len(coords) == 0 or self.angle % 360 == 0:
+ return coords
+ return cv2.transform(coords[:, np.newaxis, :], self.rm_coords)[:, 0, :]
+
+ def apply_segmentation(self, segmentation):
+ segmentation = self.apply_image(segmentation, interp=cv2.INTER_NEAREST)
+ return segmentation
+
+ def create_rotation_matrix(self, offset=0):
+ center = (self.center[0] + offset, self.center[1] + offset)
+ rm = cv2.getRotationMatrix2D(tuple(center), self.angle, 1)
+ if self.expand:
+ # Find the coordinates of the center of rotation in the new image
+ # The only point for which we know the future coordinates is the center of the image
+ rot_im_center = cv2.transform(self.image_center[None, None, :] + offset, rm)[0, 0, :]
+ new_center = np.array([self.bound_w / 2, self.bound_h / 2]) + offset - rot_im_center
+ # shift the rotation center to the new coordinates
+ rm[:, 2] += new_center
+ return rm
+
+ def inverse(self):
+ """
+ The inverse is to rotate it back with expand, and crop to get the original shape.
+ """
+ if not self.expand: # Not possible to inverse if a part of the image is lost
+ raise NotImplementedError()
+ rotation = RotationTransform(
+ self.bound_h, self.bound_w, -self.angle, True, None, self.interp
+ )
+ crop = CropTransform(
+ (rotation.bound_w - self.w) // 2, (rotation.bound_h - self.h) // 2, self.w, self.h
+ )
+ return TransformList([rotation, crop])
+
+
+class ColorTransform(Transform):
+ """
+ Generic wrapper for any photometric transforms.
+ These transformations should only affect the color space and
+ not the coordinate space of the image (e.g. annotation
+ coordinates such as bounding boxes should not be changed)
+ """
+
+ def __init__(self, op):
+ """
+ Args:
+ op (Callable): operation to be applied to the image,
+ which takes in an ndarray and returns an ndarray.
+ """
+ if not callable(op):
+ raise ValueError("op parameter should be callable")
+ super().__init__()
+ self._set_attributes(locals())
+
+ def apply_image(self, img):
+ return self.op(img)
+
+ def apply_coords(self, coords):
+ return coords
+
+ def inverse(self):
+ return NoOpTransform()
+
+ def apply_segmentation(self, segmentation):
+ return segmentation
+
+
+class PILColorTransform(ColorTransform):
+ """
+ Generic wrapper for PIL Photometric image transforms,
+ which affect the color space and not the coordinate
+ space of the image
+ """
+
+ def __init__(self, op):
+ """
+ Args:
+ op (Callable): operation to be applied to the image,
+ which takes in a PIL Image and returns a transformed
+ PIL Image.
+ For reference on possible operations see:
+ - https://pillow.readthedocs.io/en/stable/
+ """
+ if not callable(op):
+ raise ValueError("op parameter should be callable")
+ super().__init__(op)
+
+ def apply_image(self, img):
+ img = Image.fromarray(img)
+ return np.asarray(super().apply_image(img))
+
+
+def HFlip_rotated_box(transform, rotated_boxes):
+ """
+ Apply the horizontal flip transform on rotated boxes.
+
+ Args:
+ rotated_boxes (ndarray): Nx5 floating point array of
+ (x_center, y_center, width, height, angle_degrees) format
+ in absolute coordinates.
+ """
+ # Transform x_center
+ rotated_boxes[:, 0] = transform.width - rotated_boxes[:, 0]
+ # Transform angle
+ rotated_boxes[:, 4] = -rotated_boxes[:, 4]
+ return rotated_boxes
+
+
+def Resize_rotated_box(transform, rotated_boxes):
+ """
+ Apply the resizing transform on rotated boxes. For details of how these (approximation)
+ formulas are derived, please refer to :meth:`RotatedBoxes.scale`.
+
+ Args:
+ rotated_boxes (ndarray): Nx5 floating point array of
+ (x_center, y_center, width, height, angle_degrees) format
+ in absolute coordinates.
+ """
+ scale_factor_x = transform.new_w * 1.0 / transform.w
+ scale_factor_y = transform.new_h * 1.0 / transform.h
+ rotated_boxes[:, 0] *= scale_factor_x
+ rotated_boxes[:, 1] *= scale_factor_y
+ theta = rotated_boxes[:, 4] * np.pi / 180.0
+ c = np.cos(theta)
+ s = np.sin(theta)
+ rotated_boxes[:, 2] *= np.sqrt(np.square(scale_factor_x * c) + np.square(scale_factor_y * s))
+ rotated_boxes[:, 3] *= np.sqrt(np.square(scale_factor_x * s) + np.square(scale_factor_y * c))
+ rotated_boxes[:, 4] = np.arctan2(scale_factor_x * s, scale_factor_y * c) * 180 / np.pi
+
+ return rotated_boxes
+
+
+HFlipTransform.register_type("rotated_box", HFlip_rotated_box)
+ResizeTransform.register_type("rotated_box", Resize_rotated_box)
+
+# not necessary any more with latest fvcore
+NoOpTransform.register_type("rotated_box", lambda t, x: x)
diff --git a/detectron2/engine/__init__.py b/detectron2/engine/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e6e4d673dedd10419b612755cfcb9744fc4999f8
--- /dev/null
+++ b/detectron2/engine/__init__.py
@@ -0,0 +1,19 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from .launch import *
+from .train_loop import *
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
+
+
+# prefer to let hooks and defaults live in separate namespaces (therefore not in __all__)
+# but still make them available here
+from .hooks import *
+from .defaults import (
+ create_ddp_model,
+ default_argument_parser,
+ default_setup,
+ default_writers,
+ DefaultPredictor,
+ DefaultTrainer,
+)
diff --git a/detectron2/engine/defaults.py b/detectron2/engine/defaults.py
new file mode 100644
index 0000000000000000000000000000000000000000..ff5625ae86364c9c47ff4f63f5607b992855c6e3
--- /dev/null
+++ b/detectron2/engine/defaults.py
@@ -0,0 +1,717 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+"""
+This file contains components with some default boilerplate logic user may need
+in training / testing. They will not work for everyone, but many users may find them useful.
+
+The behavior of functions/classes in this file is subject to change,
+since they are meant to represent the "common default behavior" people need in their projects.
+"""
+
+import argparse
+import logging
+import os
+import sys
+import weakref
+from collections import OrderedDict
+from typing import Optional
+import torch
+from fvcore.nn.precise_bn import get_bn_modules
+from omegaconf import OmegaConf
+from torch.nn.parallel import DistributedDataParallel
+
+import detectron2.data.transforms as T
+from detectron2.checkpoint import DetectionCheckpointer
+from detectron2.config import CfgNode, LazyConfig
+from detectron2.data import (
+ MetadataCatalog,
+ build_detection_test_loader,
+ build_detection_train_loader,
+)
+from detectron2.evaluation import (
+ DatasetEvaluator,
+ inference_on_dataset,
+ print_csv_format,
+ verify_results,
+)
+from detectron2.modeling import build_model
+from detectron2.solver import build_lr_scheduler, build_optimizer
+from detectron2.utils import comm
+from detectron2.utils.collect_env import collect_env_info
+from detectron2.utils.env import seed_all_rng
+from detectron2.utils.events import CommonMetricPrinter, JSONWriter, TensorboardXWriter
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import setup_logger
+
+from . import hooks
+from .train_loop import AMPTrainer, SimpleTrainer, TrainerBase
+
+__all__ = [
+ "create_ddp_model",
+ "default_argument_parser",
+ "default_setup",
+ "default_writers",
+ "DefaultPredictor",
+ "DefaultTrainer",
+]
+
+
+def create_ddp_model(model, *, fp16_compression=False, **kwargs):
+ """
+ Create a DistributedDataParallel model if there are >1 processes.
+
+ Args:
+ model: a torch.nn.Module
+ fp16_compression: add fp16 compression hooks to the ddp object.
+ See more at https://pytorch.org/docs/stable/ddp_comm_hooks.html#torch.distributed.algorithms.ddp_comm_hooks.default_hooks.fp16_compress_hook
+ kwargs: other arguments of :module:`torch.nn.parallel.DistributedDataParallel`.
+ """ # noqa
+ if comm.get_world_size() == 1:
+ return model
+ if "device_ids" not in kwargs:
+ kwargs["device_ids"] = [comm.get_local_rank()]
+ ddp = DistributedDataParallel(model, **kwargs)
+ if fp16_compression:
+ from torch.distributed.algorithms.ddp_comm_hooks import default as comm_hooks
+
+ ddp.register_comm_hook(state=None, hook=comm_hooks.fp16_compress_hook)
+ return ddp
+
+
+def default_argument_parser(epilog=None):
+ """
+ Create a parser with some common arguments used by detectron2 users.
+
+ Args:
+ epilog (str): epilog passed to ArgumentParser describing the usage.
+
+ Returns:
+ argparse.ArgumentParser:
+ """
+ parser = argparse.ArgumentParser(
+ epilog=epilog
+ or f"""
+Examples:
+
+Run on single machine:
+ $ {sys.argv[0]} --num-gpus 8 --config-file cfg.yaml
+
+Change some config options:
+ $ {sys.argv[0]} --config-file cfg.yaml MODEL.WEIGHTS /path/to/weight.pth SOLVER.BASE_LR 0.001
+
+Run on multiple machines:
+ (machine0)$ {sys.argv[0]} --machine-rank 0 --num-machines 2 --dist-url [--other-flags]
+ (machine1)$ {sys.argv[0]} --machine-rank 1 --num-machines 2 --dist-url [--other-flags]
+""",
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ )
+ parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
+ parser.add_argument(
+ "--resume",
+ action="store_true",
+ help="Whether to attempt to resume from the checkpoint directory. "
+ "See documentation of `DefaultTrainer.resume_or_load()` for what it means.",
+ )
+ parser.add_argument("--eval-only", action="store_true", help="perform evaluation only")
+ parser.add_argument("--num-gpus", type=int, default=1, help="number of gpus *per machine*")
+ parser.add_argument("--num-machines", type=int, default=1, help="total number of machines")
+ parser.add_argument(
+ "--machine-rank", type=int, default=0, help="the rank of this machine (unique per machine)"
+ )
+
+ # PyTorch still may leave orphan processes in multi-gpu training.
+ # Therefore we use a deterministic way to obtain port,
+ # so that users are aware of orphan processes by seeing the port occupied.
+ port = 2**15 + 2**14 + hash(os.getuid() if sys.platform != "win32" else 1) % 2**14
+ parser.add_argument(
+ "--dist-url",
+ default="tcp://127.0.0.1:{}".format(port),
+ help="initialization URL for pytorch distributed backend. See "
+ "https://pytorch.org/docs/stable/distributed.html for details.",
+ )
+ parser.add_argument(
+ "opts",
+ help="""
+Modify config options at the end of the command. For Yacs configs, use
+space-separated "PATH.KEY VALUE" pairs.
+For python-based LazyConfig, use "path.key=value".
+ """.strip(),
+ default=None,
+ nargs=argparse.REMAINDER,
+ )
+ return parser
+
+
+def _try_get_key(cfg, *keys, default=None):
+ """
+ Try select keys from cfg until the first key that exists. Otherwise return default.
+ """
+ if isinstance(cfg, CfgNode):
+ cfg = OmegaConf.create(cfg.dump())
+ for k in keys:
+ none = object()
+ p = OmegaConf.select(cfg, k, default=none)
+ if p is not none:
+ return p
+ return default
+
+
+def _highlight(code, filename):
+ try:
+ import pygments
+ except ImportError:
+ return code
+
+ from pygments.lexers import Python3Lexer, YamlLexer
+ from pygments.formatters import Terminal256Formatter
+
+ lexer = Python3Lexer() if filename.endswith(".py") else YamlLexer()
+ code = pygments.highlight(code, lexer, Terminal256Formatter(style="monokai"))
+ return code
+
+
+def default_setup(cfg, args):
+ """
+ Perform some basic common setups at the beginning of a job, including:
+
+ 1. Set up the detectron2 logger
+ 2. Log basic information about environment, cmdline arguments, and config
+ 3. Backup the config to the output directory
+
+ Args:
+ cfg (CfgNode or omegaconf.DictConfig): the full config to be used
+ args (argparse.NameSpace): the command line arguments to be logged
+ """
+ output_dir = _try_get_key(cfg, "OUTPUT_DIR", "output_dir", "train.output_dir")
+ if comm.is_main_process() and output_dir:
+ PathManager.mkdirs(output_dir)
+
+ rank = comm.get_rank()
+ setup_logger(output_dir, distributed_rank=rank, name="fvcore")
+ logger = setup_logger(output_dir, distributed_rank=rank)
+
+ logger.info("Rank of current process: {}. World size: {}".format(rank, comm.get_world_size()))
+ logger.info("Environment info:\n" + collect_env_info())
+
+ logger.info("Command line arguments: " + str(args))
+ if hasattr(args, "config_file") and args.config_file != "":
+ logger.info(
+ "Contents of args.config_file={}:\n{}".format(
+ args.config_file,
+ _highlight(PathManager.open(args.config_file, "r").read(), args.config_file),
+ )
+ )
+
+ if comm.is_main_process() and output_dir:
+ # Note: some of our scripts may expect the existence of
+ # config.yaml in output directory
+ path = os.path.join(output_dir, "config.yaml")
+ if isinstance(cfg, CfgNode):
+ logger.info("Running with full config:\n{}".format(_highlight(cfg.dump(), ".yaml")))
+ with PathManager.open(path, "w") as f:
+ f.write(cfg.dump())
+ else:
+ LazyConfig.save(cfg, path)
+ logger.info("Full config saved to {}".format(path))
+
+ # make sure each worker has a different, yet deterministic seed if specified
+ seed = _try_get_key(cfg, "SEED", "train.seed", default=-1)
+ seed_all_rng(None if seed < 0 else seed + rank)
+
+ # cudnn benchmark has large overhead. It shouldn't be used considering the small size of
+ # typical validation set.
+ if not (hasattr(args, "eval_only") and args.eval_only):
+ torch.backends.cudnn.benchmark = _try_get_key(
+ cfg, "CUDNN_BENCHMARK", "train.cudnn_benchmark", default=False
+ )
+
+
+def default_writers(output_dir: str, max_iter: Optional[int] = None):
+ """
+ Build a list of :class:`EventWriter` to be used.
+ It now consists of a :class:`CommonMetricPrinter`,
+ :class:`TensorboardXWriter` and :class:`JSONWriter`.
+
+ Args:
+ output_dir: directory to store JSON metrics and tensorboard events
+ max_iter: the total number of iterations
+
+ Returns:
+ list[EventWriter]: a list of :class:`EventWriter` objects.
+ """
+ PathManager.mkdirs(output_dir)
+ return [
+ # It may not always print what you want to see, since it prints "common" metrics only.
+ CommonMetricPrinter(max_iter),
+ JSONWriter(os.path.join(output_dir, "metrics.json")),
+ TensorboardXWriter(output_dir),
+ ]
+
+
+class DefaultPredictor:
+ """
+ Create a simple end-to-end predictor with the given config that runs on
+ single device for a single input image.
+
+ Compared to using the model directly, this class does the following additions:
+
+ 1. Load checkpoint from `cfg.MODEL.WEIGHTS`.
+ 2. Always take BGR image as the input and apply conversion defined by `cfg.INPUT.FORMAT`.
+ 3. Apply resizing defined by `cfg.INPUT.{MIN,MAX}_SIZE_TEST`.
+ 4. Take one input image and produce a single output, instead of a batch.
+
+ This is meant for simple demo purposes, so it does the above steps automatically.
+ This is not meant for benchmarks or running complicated inference logic.
+ If you'd like to do anything more complicated, please refer to its source code as
+ examples to build and use the model manually.
+
+ Attributes:
+ metadata (Metadata): the metadata of the underlying dataset, obtained from
+ cfg.DATASETS.TEST.
+
+ Examples:
+ ::
+ pred = DefaultPredictor(cfg)
+ inputs = cv2.imread("input.jpg")
+ outputs = pred(inputs)
+ """
+
+ def __init__(self, cfg):
+ self.cfg = cfg.clone() # cfg can be modified by model
+ self.model = build_model(self.cfg)
+ self.model.eval()
+ if len(cfg.DATASETS.TEST):
+ self.metadata = MetadataCatalog.get(cfg.DATASETS.TEST[0])
+
+ checkpointer = DetectionCheckpointer(self.model)
+ checkpointer.load(cfg.MODEL.WEIGHTS)
+
+ self.aug = T.ResizeShortestEdge(
+ [cfg.INPUT.MIN_SIZE_TEST, cfg.INPUT.MIN_SIZE_TEST], cfg.INPUT.MAX_SIZE_TEST
+ )
+
+ self.input_format = cfg.INPUT.FORMAT
+ assert self.input_format in ["RGB", "BGR"], self.input_format
+
+ def __call__(self, original_image):
+ """
+ Args:
+ original_image (np.ndarray): an image of shape (H, W, C) (in BGR order).
+
+ Returns:
+ predictions (dict):
+ the output of the model for one image only.
+ See :doc:`/tutorials/models` for details about the format.
+ """
+ with torch.no_grad(): # https://github.com/sphinx-doc/sphinx/issues/4258
+ # Apply pre-processing to image.
+ if self.input_format == "RGB":
+ # whether the model expects BGR inputs or RGB
+ original_image = original_image[:, :, ::-1]
+ height, width = original_image.shape[:2]
+ image = self.aug.get_transform(original_image).apply_image(original_image)
+ image = torch.as_tensor(image.astype("float32").transpose(2, 0, 1))
+ image.to(self.cfg.MODEL.DEVICE)
+
+ inputs = {"image": image, "height": height, "width": width}
+
+ predictions = self.model([inputs])[0]
+ return predictions
+
+
+class DefaultTrainer(TrainerBase):
+ """
+ A trainer with default training logic. It does the following:
+
+ 1. Create a :class:`SimpleTrainer` using model, optimizer, dataloader
+ defined by the given config. Create a LR scheduler defined by the config.
+ 2. Load the last checkpoint or `cfg.MODEL.WEIGHTS`, if exists, when
+ `resume_or_load` is called.
+ 3. Register a few common hooks defined by the config.
+
+ It is created to simplify the **standard model training workflow** and reduce code boilerplate
+ for users who only need the standard training workflow, with standard features.
+ It means this class makes *many assumptions* about your training logic that
+ may easily become invalid in a new research. In fact, any assumptions beyond those made in the
+ :class:`SimpleTrainer` are too much for research.
+
+ The code of this class has been annotated about restrictive assumptions it makes.
+ When they do not work for you, you're encouraged to:
+
+ 1. Overwrite methods of this class, OR:
+ 2. Use :class:`SimpleTrainer`, which only does minimal SGD training and
+ nothing else. You can then add your own hooks if needed. OR:
+ 3. Write your own training loop similar to `tools/plain_train_net.py`.
+
+ See the :doc:`/tutorials/training` tutorials for more details.
+
+ Note that the behavior of this class, like other functions/classes in
+ this file, is not stable, since it is meant to represent the "common default behavior".
+ It is only guaranteed to work well with the standard models and training workflow in detectron2.
+ To obtain more stable behavior, write your own training logic with other public APIs.
+
+ Examples:
+ ::
+ trainer = DefaultTrainer(cfg)
+ trainer.resume_or_load() # load last checkpoint or MODEL.WEIGHTS
+ trainer.train()
+
+ Attributes:
+ scheduler:
+ checkpointer (DetectionCheckpointer):
+ cfg (CfgNode):
+ """
+
+ def __init__(self, cfg):
+ """
+ Args:
+ cfg (CfgNode):
+ """
+ super().__init__()
+ logger = logging.getLogger("detectron2")
+ if not logger.isEnabledFor(logging.INFO): # setup_logger is not called for d2
+ setup_logger()
+ cfg = DefaultTrainer.auto_scale_workers(cfg, comm.get_world_size())
+
+ # Assume these objects must be constructed in this order.
+ model = self.build_model(cfg)
+ optimizer = self.build_optimizer(cfg, model)
+ data_loader = self.build_train_loader(cfg)
+
+ model = create_ddp_model(model, broadcast_buffers=False)
+ self._trainer = (AMPTrainer if cfg.SOLVER.AMP.ENABLED else SimpleTrainer)(
+ model, data_loader, optimizer
+ )
+
+ self.scheduler = self.build_lr_scheduler(cfg, optimizer)
+ self.checkpointer = DetectionCheckpointer(
+ # Assume you want to save checkpoints together with logs/statistics
+ model,
+ cfg.OUTPUT_DIR,
+ trainer=weakref.proxy(self),
+ )
+ self.start_iter = 0
+ self.max_iter = cfg.SOLVER.MAX_ITER
+ self.cfg = cfg
+
+ self.register_hooks(self.build_hooks())
+
+ def resume_or_load(self, resume=True):
+ """
+ If `resume==True` and `cfg.OUTPUT_DIR` contains the last checkpoint (defined by
+ a `last_checkpoint` file), resume from the file. Resuming means loading all
+ available states (eg. optimizer and scheduler) and update iteration counter
+ from the checkpoint. ``cfg.MODEL.WEIGHTS`` will not be used.
+
+ Otherwise, this is considered as an independent training. The method will load model
+ weights from the file `cfg.MODEL.WEIGHTS` (but will not load other states) and start
+ from iteration 0.
+
+ Args:
+ resume (bool): whether to do resume or not
+ """
+ self.checkpointer.resume_or_load(self.cfg.MODEL.WEIGHTS, resume=resume)
+ if resume and self.checkpointer.has_checkpoint():
+ # The checkpoint stores the training iteration that just finished, thus we start
+ # at the next iteration
+ self.start_iter = self.iter + 1
+
+ def build_hooks(self):
+ """
+ Build a list of default hooks, including timing, evaluation,
+ checkpointing, lr scheduling, precise BN, writing events.
+
+ Returns:
+ list[HookBase]:
+ """
+ cfg = self.cfg.clone()
+ cfg.defrost()
+ cfg.DATALOADER.NUM_WORKERS = 0 # save some memory and time for PreciseBN
+
+ ret = [
+ hooks.IterationTimer(),
+ hooks.LRScheduler(),
+ hooks.PreciseBN(
+ # Run at the same freq as (but before) evaluation.
+ cfg.TEST.EVAL_PERIOD,
+ self.model,
+ # Build a new data loader to not affect training
+ self.build_train_loader(cfg),
+ cfg.TEST.PRECISE_BN.NUM_ITER,
+ )
+ if cfg.TEST.PRECISE_BN.ENABLED and get_bn_modules(self.model)
+ else None,
+ ]
+
+ # Do PreciseBN before checkpointer, because it updates the model and need to
+ # be saved by checkpointer.
+ # This is not always the best: if checkpointing has a different frequency,
+ # some checkpoints may have more precise statistics than others.
+ if comm.is_main_process():
+ ret.append(hooks.PeriodicCheckpointer(self.checkpointer, cfg.SOLVER.CHECKPOINT_PERIOD))
+
+ def test_and_save_results():
+ self._last_eval_results = self.test(self.cfg, self.model)
+ return self._last_eval_results
+
+ # Do evaluation after checkpointer, because then if it fails,
+ # we can use the saved checkpoint to debug.
+ ret.append(hooks.EvalHook(cfg.TEST.EVAL_PERIOD, test_and_save_results))
+
+ if comm.is_main_process():
+ # Here the default print/log frequency of each writer is used.
+ # run writers in the end, so that evaluation metrics are written
+ ret.append(hooks.PeriodicWriter(self.build_writers(), period=20))
+ return ret
+
+ def build_writers(self):
+ """
+ Build a list of writers to be used using :func:`default_writers()`.
+ If you'd like a different list of writers, you can overwrite it in
+ your trainer.
+
+ Returns:
+ list[EventWriter]: a list of :class:`EventWriter` objects.
+ """
+ return default_writers(self.cfg.OUTPUT_DIR, self.max_iter)
+
+ def train(self):
+ """
+ Run training.
+
+ Returns:
+ OrderedDict of results, if evaluation is enabled. Otherwise None.
+ """
+ super().train(self.start_iter, self.max_iter)
+ if len(self.cfg.TEST.EXPECTED_RESULTS) and comm.is_main_process():
+ assert hasattr(
+ self, "_last_eval_results"
+ ), "No evaluation results obtained during training!"
+ verify_results(self.cfg, self._last_eval_results)
+ return self._last_eval_results
+
+ def run_step(self):
+ self._trainer.iter = self.iter
+ self._trainer.run_step()
+
+ def state_dict(self):
+ ret = super().state_dict()
+ ret["_trainer"] = self._trainer.state_dict()
+ return ret
+
+ def load_state_dict(self, state_dict):
+ super().load_state_dict(state_dict)
+ self._trainer.load_state_dict(state_dict["_trainer"])
+
+ @classmethod
+ def build_model(cls, cfg):
+ """
+ Returns:
+ torch.nn.Module:
+
+ It now calls :func:`detectron2.modeling.build_model`.
+ Overwrite it if you'd like a different model.
+ """
+ model = build_model(cfg)
+ logger = logging.getLogger(__name__)
+ logger.info("Model:\n{}".format(model))
+ return model
+
+ @classmethod
+ def build_optimizer(cls, cfg, model):
+ """
+ Returns:
+ torch.optim.Optimizer:
+
+ It now calls :func:`detectron2.solver.build_optimizer`.
+ Overwrite it if you'd like a different optimizer.
+ """
+ return build_optimizer(cfg, model)
+
+ @classmethod
+ def build_lr_scheduler(cls, cfg, optimizer):
+ """
+ It now calls :func:`detectron2.solver.build_lr_scheduler`.
+ Overwrite it if you'd like a different scheduler.
+ """
+ return build_lr_scheduler(cfg, optimizer)
+
+ @classmethod
+ def build_train_loader(cls, cfg):
+ """
+ Returns:
+ iterable
+
+ It now calls :func:`detectron2.data.build_detection_train_loader`.
+ Overwrite it if you'd like a different data loader.
+ """
+ return build_detection_train_loader(cfg)
+
+ @classmethod
+ def build_test_loader(cls, cfg, dataset_name):
+ """
+ Returns:
+ iterable
+
+ It now calls :func:`detectron2.data.build_detection_test_loader`.
+ Overwrite it if you'd like a different data loader.
+ """
+ return build_detection_test_loader(cfg, dataset_name)
+
+ @classmethod
+ def build_evaluator(cls, cfg, dataset_name):
+ """
+ Returns:
+ DatasetEvaluator or None
+
+ It is not implemented by default.
+ """
+ raise NotImplementedError(
+ """
+If you want DefaultTrainer to automatically run evaluation,
+please implement `build_evaluator()` in subclasses (see train_net.py for example).
+Alternatively, you can call evaluation functions yourself (see Colab balloon tutorial for example).
+"""
+ )
+
+ @classmethod
+ def test(cls, cfg, model, evaluators=None):
+ """
+ Evaluate the given model. The given model is expected to already contain
+ weights to evaluate.
+
+ Args:
+ cfg (CfgNode):
+ model (nn.Module):
+ evaluators (list[DatasetEvaluator] or None): if None, will call
+ :meth:`build_evaluator`. Otherwise, must have the same length as
+ ``cfg.DATASETS.TEST``.
+
+ Returns:
+ dict: a dict of result metrics
+ """
+ logger = logging.getLogger(__name__)
+ if isinstance(evaluators, DatasetEvaluator):
+ evaluators = [evaluators]
+ if evaluators is not None:
+ assert len(cfg.DATASETS.TEST) == len(evaluators), "{} != {}".format(
+ len(cfg.DATASETS.TEST), len(evaluators)
+ )
+
+ results = OrderedDict()
+ for idx, dataset_name in enumerate(cfg.DATASETS.TEST):
+ data_loader = cls.build_test_loader(cfg, dataset_name)
+ # When evaluators are passed in as arguments,
+ # implicitly assume that evaluators can be created before data_loader.
+ if evaluators is not None:
+ evaluator = evaluators[idx]
+ else:
+ try:
+ evaluator = cls.build_evaluator(cfg, dataset_name)
+ except NotImplementedError:
+ logger.warn(
+ "No evaluator found. Use `DefaultTrainer.test(evaluators=)`, "
+ "or implement its `build_evaluator` method."
+ )
+ results[dataset_name] = {}
+ continue
+ results_i = inference_on_dataset(model, data_loader, evaluator)
+ results[dataset_name] = results_i
+ if comm.is_main_process():
+ assert isinstance(
+ results_i, dict
+ ), "Evaluator must return a dict on the main process. Got {} instead.".format(
+ results_i
+ )
+ logger.info("Evaluation results for {} in csv format:".format(dataset_name))
+ print_csv_format(results_i)
+
+ if len(results) == 1:
+ results = list(results.values())[0]
+ return results
+
+ @staticmethod
+ def auto_scale_workers(cfg, num_workers: int):
+ """
+ When the config is defined for certain number of workers (according to
+ ``cfg.SOLVER.REFERENCE_WORLD_SIZE``) that's different from the number of
+ workers currently in use, returns a new cfg where the total batch size
+ is scaled so that the per-GPU batch size stays the same as the
+ original ``IMS_PER_BATCH // REFERENCE_WORLD_SIZE``.
+
+ Other config options are also scaled accordingly:
+ * training steps and warmup steps are scaled inverse proportionally.
+ * learning rate are scaled proportionally, following :paper:`ImageNet in 1h`.
+
+ For example, with the original config like the following:
+
+ .. code-block:: yaml
+
+ IMS_PER_BATCH: 16
+ BASE_LR: 0.1
+ REFERENCE_WORLD_SIZE: 8
+ MAX_ITER: 5000
+ STEPS: (4000,)
+ CHECKPOINT_PERIOD: 1000
+
+ When this config is used on 16 GPUs instead of the reference number 8,
+ calling this method will return a new config with:
+
+ .. code-block:: yaml
+
+ IMS_PER_BATCH: 32
+ BASE_LR: 0.2
+ REFERENCE_WORLD_SIZE: 16
+ MAX_ITER: 2500
+ STEPS: (2000,)
+ CHECKPOINT_PERIOD: 500
+
+ Note that both the original config and this new config can be trained on 16 GPUs.
+ It's up to user whether to enable this feature (by setting ``REFERENCE_WORLD_SIZE``).
+
+ Returns:
+ CfgNode: a new config. Same as original if ``cfg.SOLVER.REFERENCE_WORLD_SIZE==0``.
+ """
+ old_world_size = cfg.SOLVER.REFERENCE_WORLD_SIZE
+ if old_world_size == 0 or old_world_size == num_workers:
+ return cfg
+ cfg = cfg.clone()
+ frozen = cfg.is_frozen()
+ cfg.defrost()
+
+ assert (
+ cfg.SOLVER.IMS_PER_BATCH % old_world_size == 0
+ ), "Invalid REFERENCE_WORLD_SIZE in config!"
+ scale = num_workers / old_world_size
+ bs = cfg.SOLVER.IMS_PER_BATCH = int(round(cfg.SOLVER.IMS_PER_BATCH * scale))
+ lr = cfg.SOLVER.BASE_LR = cfg.SOLVER.BASE_LR * scale
+ max_iter = cfg.SOLVER.MAX_ITER = int(round(cfg.SOLVER.MAX_ITER / scale))
+ warmup_iter = cfg.SOLVER.WARMUP_ITERS = int(round(cfg.SOLVER.WARMUP_ITERS / scale))
+ cfg.SOLVER.STEPS = tuple(int(round(s / scale)) for s in cfg.SOLVER.STEPS)
+ cfg.TEST.EVAL_PERIOD = int(round(cfg.TEST.EVAL_PERIOD / scale))
+ cfg.SOLVER.CHECKPOINT_PERIOD = int(round(cfg.SOLVER.CHECKPOINT_PERIOD / scale))
+ cfg.SOLVER.REFERENCE_WORLD_SIZE = num_workers # maintain invariant
+ logger = logging.getLogger(__name__)
+ logger.info(
+ f"Auto-scaling the config to batch_size={bs}, learning_rate={lr}, "
+ f"max_iter={max_iter}, warmup={warmup_iter}."
+ )
+
+ if frozen:
+ cfg.freeze()
+ return cfg
+
+
+# Access basic attributes from the underlying trainer
+for _attr in ["model", "data_loader", "optimizer"]:
+ setattr(
+ DefaultTrainer,
+ _attr,
+ property(
+ # getter
+ lambda self, x=_attr: getattr(self._trainer, x),
+ # setter
+ lambda self, value, x=_attr: setattr(self._trainer, x, value),
+ ),
+ )
diff --git a/detectron2/engine/hooks.py b/detectron2/engine/hooks.py
new file mode 100644
index 0000000000000000000000000000000000000000..fc37af0fd3a276eb389f7667be113b41ca53f012
--- /dev/null
+++ b/detectron2/engine/hooks.py
@@ -0,0 +1,690 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import datetime
+import itertools
+import logging
+import math
+import operator
+import os
+import tempfile
+import time
+import warnings
+from collections import Counter
+import torch
+from fvcore.common.checkpoint import Checkpointer
+from fvcore.common.checkpoint import PeriodicCheckpointer as _PeriodicCheckpointer
+from fvcore.common.param_scheduler import ParamScheduler
+from fvcore.common.timer import Timer
+from fvcore.nn.precise_bn import get_bn_modules, update_bn_stats
+
+import detectron2.utils.comm as comm
+from detectron2.evaluation.testing import flatten_results_dict
+from detectron2.solver import LRMultiplier
+from detectron2.solver import LRScheduler as _LRScheduler
+from detectron2.utils.events import EventStorage, EventWriter
+from detectron2.utils.file_io import PathManager
+
+from .train_loop import HookBase
+
+__all__ = [
+ "CallbackHook",
+ "IterationTimer",
+ "PeriodicWriter",
+ "PeriodicCheckpointer",
+ "BestCheckpointer",
+ "LRScheduler",
+ "AutogradProfiler",
+ "EvalHook",
+ "PreciseBN",
+ "TorchProfiler",
+ "TorchMemoryStats",
+]
+
+
+"""
+Implement some common hooks.
+"""
+
+
+class CallbackHook(HookBase):
+ """
+ Create a hook using callback functions provided by the user.
+ """
+
+ def __init__(self, *, before_train=None, after_train=None, before_step=None, after_step=None):
+ """
+ Each argument is a function that takes one argument: the trainer.
+ """
+ self._before_train = before_train
+ self._before_step = before_step
+ self._after_step = after_step
+ self._after_train = after_train
+
+ def before_train(self):
+ if self._before_train:
+ self._before_train(self.trainer)
+
+ def after_train(self):
+ if self._after_train:
+ self._after_train(self.trainer)
+ # The functions may be closures that hold reference to the trainer
+ # Therefore, delete them to avoid circular reference.
+ del self._before_train, self._after_train
+ del self._before_step, self._after_step
+
+ def before_step(self):
+ if self._before_step:
+ self._before_step(self.trainer)
+
+ def after_step(self):
+ if self._after_step:
+ self._after_step(self.trainer)
+
+
+class IterationTimer(HookBase):
+ """
+ Track the time spent for each iteration (each run_step call in the trainer).
+ Print a summary in the end of training.
+
+ This hook uses the time between the call to its :meth:`before_step`
+ and :meth:`after_step` methods.
+ Under the convention that :meth:`before_step` of all hooks should only
+ take negligible amount of time, the :class:`IterationTimer` hook should be
+ placed at the beginning of the list of hooks to obtain accurate timing.
+ """
+
+ def __init__(self, warmup_iter=3):
+ """
+ Args:
+ warmup_iter (int): the number of iterations at the beginning to exclude
+ from timing.
+ """
+ self._warmup_iter = warmup_iter
+ self._step_timer = Timer()
+ self._start_time = time.perf_counter()
+ self._total_timer = Timer()
+
+ def before_train(self):
+ self._start_time = time.perf_counter()
+ self._total_timer.reset()
+ self._total_timer.pause()
+
+ def after_train(self):
+ logger = logging.getLogger(__name__)
+ total_time = time.perf_counter() - self._start_time
+ total_time_minus_hooks = self._total_timer.seconds()
+ hook_time = total_time - total_time_minus_hooks
+
+ num_iter = self.trainer.storage.iter + 1 - self.trainer.start_iter - self._warmup_iter
+
+ if num_iter > 0 and total_time_minus_hooks > 0:
+ # Speed is meaningful only after warmup
+ # NOTE this format is parsed by grep in some scripts
+ logger.info(
+ "Overall training speed: {} iterations in {} ({:.4f} s / it)".format(
+ num_iter,
+ str(datetime.timedelta(seconds=int(total_time_minus_hooks))),
+ total_time_minus_hooks / num_iter,
+ )
+ )
+
+ logger.info(
+ "Total training time: {} ({} on hooks)".format(
+ str(datetime.timedelta(seconds=int(total_time))),
+ str(datetime.timedelta(seconds=int(hook_time))),
+ )
+ )
+
+ def before_step(self):
+ self._step_timer.reset()
+ self._total_timer.resume()
+
+ def after_step(self):
+ # +1 because we're in after_step, the current step is done
+ # but not yet counted
+ iter_done = self.trainer.storage.iter - self.trainer.start_iter + 1
+ if iter_done >= self._warmup_iter:
+ sec = self._step_timer.seconds()
+ self.trainer.storage.put_scalars(time=sec)
+ else:
+ self._start_time = time.perf_counter()
+ self._total_timer.reset()
+
+ self._total_timer.pause()
+
+
+class PeriodicWriter(HookBase):
+ """
+ Write events to EventStorage (by calling ``writer.write()``) periodically.
+
+ It is executed every ``period`` iterations and after the last iteration.
+ Note that ``period`` does not affect how data is smoothed by each writer.
+ """
+
+ def __init__(self, writers, period=20):
+ """
+ Args:
+ writers (list[EventWriter]): a list of EventWriter objects
+ period (int):
+ """
+ self._writers = writers
+ for w in writers:
+ assert isinstance(w, EventWriter), w
+ self._period = period
+
+ def after_step(self):
+ if (self.trainer.iter + 1) % self._period == 0 or (
+ self.trainer.iter == self.trainer.max_iter - 1
+ ):
+ for writer in self._writers:
+ writer.write()
+
+ def after_train(self):
+ for writer in self._writers:
+ # If any new data is found (e.g. produced by other after_train),
+ # write them before closing
+ writer.write()
+ writer.close()
+
+
+class PeriodicCheckpointer(_PeriodicCheckpointer, HookBase):
+ """
+ Same as :class:`detectron2.checkpoint.PeriodicCheckpointer`, but as a hook.
+
+ Note that when used as a hook,
+ it is unable to save additional data other than what's defined
+ by the given `checkpointer`.
+
+ It is executed every ``period`` iterations and after the last iteration.
+ """
+
+ def before_train(self):
+ self.max_iter = self.trainer.max_iter
+
+ def after_step(self):
+ # No way to use **kwargs
+ self.step(self.trainer.iter)
+
+
+class BestCheckpointer(HookBase):
+ """
+ Checkpoints best weights based off given metric.
+
+ This hook should be used in conjunction to and executed after the hook
+ that produces the metric, e.g. `EvalHook`.
+ """
+
+ def __init__(
+ self,
+ eval_period: int,
+ checkpointer: Checkpointer,
+ val_metric: str,
+ mode: str = "max",
+ file_prefix: str = "model_best",
+ ) -> None:
+ """
+ Args:
+ eval_period (int): the period `EvalHook` is set to run.
+ checkpointer: the checkpointer object used to save checkpoints.
+ val_metric (str): validation metric to track for best checkpoint, e.g. "bbox/AP50"
+ mode (str): one of {'max', 'min'}. controls whether the chosen val metric should be
+ maximized or minimized, e.g. for "bbox/AP50" it should be "max"
+ file_prefix (str): the prefix of checkpoint's filename, defaults to "model_best"
+ """
+ self._logger = logging.getLogger(__name__)
+ self._period = eval_period
+ self._val_metric = val_metric
+ assert mode in [
+ "max",
+ "min",
+ ], f'Mode "{mode}" to `BestCheckpointer` is unknown. It should be one of {"max", "min"}.'
+ if mode == "max":
+ self._compare = operator.gt
+ else:
+ self._compare = operator.lt
+ self._checkpointer = checkpointer
+ self._file_prefix = file_prefix
+ self.best_metric = None
+ self.best_iter = None
+
+ def _update_best(self, val, iteration):
+ if math.isnan(val) or math.isinf(val):
+ return False
+ self.best_metric = val
+ self.best_iter = iteration
+ return True
+
+ def _best_checking(self):
+ metric_tuple = self.trainer.storage.latest().get(self._val_metric)
+ if metric_tuple is None:
+ self._logger.warning(
+ f"Given val metric {self._val_metric} does not seem to be computed/stored."
+ "Will not be checkpointing based on it."
+ )
+ return
+ else:
+ latest_metric, metric_iter = metric_tuple
+
+ if self.best_metric is None:
+ if self._update_best(latest_metric, metric_iter):
+ additional_state = {"iteration": metric_iter}
+ self._checkpointer.save(f"{self._file_prefix}", **additional_state)
+ self._logger.info(
+ f"Saved first model at {self.best_metric:0.5f} @ {self.best_iter} steps"
+ )
+ elif self._compare(latest_metric, self.best_metric):
+ additional_state = {"iteration": metric_iter}
+ self._checkpointer.save(f"{self._file_prefix}", **additional_state)
+ self._logger.info(
+ f"Saved best model as latest eval score for {self._val_metric} is "
+ f"{latest_metric:0.5f}, better than last best score "
+ f"{self.best_metric:0.5f} @ iteration {self.best_iter}."
+ )
+ self._update_best(latest_metric, metric_iter)
+ else:
+ self._logger.info(
+ f"Not saving as latest eval score for {self._val_metric} is {latest_metric:0.5f}, "
+ f"not better than best score {self.best_metric:0.5f} @ iteration {self.best_iter}."
+ )
+
+ def after_step(self):
+ # same conditions as `EvalHook`
+ next_iter = self.trainer.iter + 1
+ if (
+ self._period > 0
+ and next_iter % self._period == 0
+ and next_iter != self.trainer.max_iter
+ ):
+ self._best_checking()
+
+ def after_train(self):
+ # same conditions as `EvalHook`
+ if self.trainer.iter + 1 >= self.trainer.max_iter:
+ self._best_checking()
+
+
+class LRScheduler(HookBase):
+ """
+ A hook which executes a torch builtin LR scheduler and summarizes the LR.
+ It is executed after every iteration.
+ """
+
+ def __init__(self, optimizer=None, scheduler=None):
+ """
+ Args:
+ optimizer (torch.optim.Optimizer):
+ scheduler (torch.optim.LRScheduler or fvcore.common.param_scheduler.ParamScheduler):
+ if a :class:`ParamScheduler` object, it defines the multiplier over the base LR
+ in the optimizer.
+
+ If any argument is not given, will try to obtain it from the trainer.
+ """
+ self._optimizer = optimizer
+ self._scheduler = scheduler
+
+ def before_train(self):
+ self._optimizer = self._optimizer or self.trainer.optimizer
+ if isinstance(self.scheduler, ParamScheduler):
+ self._scheduler = LRMultiplier(
+ self._optimizer,
+ self.scheduler,
+ self.trainer.max_iter,
+ last_iter=self.trainer.iter - 1,
+ )
+ self._best_param_group_id = LRScheduler.get_best_param_group_id(self._optimizer)
+
+ @staticmethod
+ def get_best_param_group_id(optimizer):
+ # NOTE: some heuristics on what LR to summarize
+ # summarize the param group with most parameters
+ largest_group = max(len(g["params"]) for g in optimizer.param_groups)
+
+ if largest_group == 1:
+ # If all groups have one parameter,
+ # then find the most common initial LR, and use it for summary
+ lr_count = Counter([g["lr"] for g in optimizer.param_groups])
+ lr = lr_count.most_common()[0][0]
+ for i, g in enumerate(optimizer.param_groups):
+ if g["lr"] == lr:
+ return i
+ else:
+ for i, g in enumerate(optimizer.param_groups):
+ if len(g["params"]) == largest_group:
+ return i
+
+ def after_step(self):
+ lr = self._optimizer.param_groups[self._best_param_group_id]["lr"]
+ self.trainer.storage.put_scalar("lr", lr, smoothing_hint=False)
+ self.scheduler.step()
+
+ @property
+ def scheduler(self):
+ return self._scheduler or self.trainer.scheduler
+
+ def state_dict(self):
+ if isinstance(self.scheduler, _LRScheduler):
+ return self.scheduler.state_dict()
+ return {}
+
+ def load_state_dict(self, state_dict):
+ if isinstance(self.scheduler, _LRScheduler):
+ logger = logging.getLogger(__name__)
+ logger.info("Loading scheduler from state_dict ...")
+ self.scheduler.load_state_dict(state_dict)
+
+
+class TorchProfiler(HookBase):
+ """
+ A hook which runs `torch.profiler.profile`.
+
+ Examples:
+ ::
+ hooks.TorchProfiler(
+ lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
+ )
+
+ The above example will run the profiler for iteration 10~20 and dump
+ results to ``OUTPUT_DIR``. We did not profile the first few iterations
+ because they are typically slower than the rest.
+ The result files can be loaded in the ``chrome://tracing`` page in chrome browser,
+ and the tensorboard visualizations can be visualized using
+ ``tensorboard --logdir OUTPUT_DIR/log``
+ """
+
+ def __init__(self, enable_predicate, output_dir, *, activities=None, save_tensorboard=True):
+ """
+ Args:
+ enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
+ and returns whether to enable the profiler.
+ It will be called once every step, and can be used to select which steps to profile.
+ output_dir (str): the output directory to dump tracing files.
+ activities (iterable): same as in `torch.profiler.profile`.
+ save_tensorboard (bool): whether to save tensorboard visualizations at (output_dir)/log/
+ """
+ self._enable_predicate = enable_predicate
+ self._activities = activities
+ self._output_dir = output_dir
+ self._save_tensorboard = save_tensorboard
+
+ def before_step(self):
+ if self._enable_predicate(self.trainer):
+ if self._save_tensorboard:
+ on_trace_ready = torch.profiler.tensorboard_trace_handler(
+ os.path.join(
+ self._output_dir,
+ "log",
+ "profiler-tensorboard-iter{}".format(self.trainer.iter),
+ ),
+ f"worker{comm.get_rank()}",
+ )
+ else:
+ on_trace_ready = None
+ self._profiler = torch.profiler.profile(
+ activities=self._activities,
+ on_trace_ready=on_trace_ready,
+ record_shapes=True,
+ profile_memory=True,
+ with_stack=True,
+ with_flops=True,
+ )
+ self._profiler.__enter__()
+ else:
+ self._profiler = None
+
+ def after_step(self):
+ if self._profiler is None:
+ return
+ self._profiler.__exit__(None, None, None)
+ if not self._save_tensorboard:
+ PathManager.mkdirs(self._output_dir)
+ out_file = os.path.join(
+ self._output_dir, "profiler-trace-iter{}.json".format(self.trainer.iter)
+ )
+ if "://" not in out_file:
+ self._profiler.export_chrome_trace(out_file)
+ else:
+ # Support non-posix filesystems
+ with tempfile.TemporaryDirectory(prefix="detectron2_profiler") as d:
+ tmp_file = os.path.join(d, "tmp.json")
+ self._profiler.export_chrome_trace(tmp_file)
+ with open(tmp_file) as f:
+ content = f.read()
+ with PathManager.open(out_file, "w") as f:
+ f.write(content)
+
+
+class AutogradProfiler(TorchProfiler):
+ """
+ A hook which runs `torch.autograd.profiler.profile`.
+
+ Examples:
+ ::
+ hooks.AutogradProfiler(
+ lambda trainer: 10 < trainer.iter < 20, self.cfg.OUTPUT_DIR
+ )
+
+ The above example will run the profiler for iteration 10~20 and dump
+ results to ``OUTPUT_DIR``. We did not profile the first few iterations
+ because they are typically slower than the rest.
+ The result files can be loaded in the ``chrome://tracing`` page in chrome browser.
+
+ Note:
+ When used together with NCCL on older version of GPUs,
+ autograd profiler may cause deadlock because it unnecessarily allocates
+ memory on every device it sees. The memory management calls, if
+ interleaved with NCCL calls, lead to deadlock on GPUs that do not
+ support ``cudaLaunchCooperativeKernelMultiDevice``.
+ """
+
+ def __init__(self, enable_predicate, output_dir, *, use_cuda=True):
+ """
+ Args:
+ enable_predicate (callable[trainer -> bool]): a function which takes a trainer,
+ and returns whether to enable the profiler.
+ It will be called once every step, and can be used to select which steps to profile.
+ output_dir (str): the output directory to dump tracing files.
+ use_cuda (bool): same as in `torch.autograd.profiler.profile`.
+ """
+ warnings.warn("AutogradProfiler has been deprecated in favor of TorchProfiler.")
+ self._enable_predicate = enable_predicate
+ self._use_cuda = use_cuda
+ self._output_dir = output_dir
+
+ def before_step(self):
+ if self._enable_predicate(self.trainer):
+ self._profiler = torch.autograd.profiler.profile(use_cuda=self._use_cuda)
+ self._profiler.__enter__()
+ else:
+ self._profiler = None
+
+
+class EvalHook(HookBase):
+ """
+ Run an evaluation function periodically, and at the end of training.
+
+ It is executed every ``eval_period`` iterations and after the last iteration.
+ """
+
+ def __init__(self, eval_period, eval_function, eval_after_train=True):
+ """
+ Args:
+ eval_period (int): the period to run `eval_function`. Set to 0 to
+ not evaluate periodically (but still evaluate after the last iteration
+ if `eval_after_train` is True).
+ eval_function (callable): a function which takes no arguments, and
+ returns a nested dict of evaluation metrics.
+ eval_after_train (bool): whether to evaluate after the last iteration
+
+ Note:
+ This hook must be enabled in all or none workers.
+ If you would like only certain workers to perform evaluation,
+ give other workers a no-op function (`eval_function=lambda: None`).
+ """
+ self._period = eval_period
+ self._func = eval_function
+ self._eval_after_train = eval_after_train
+
+ def _do_eval(self):
+ results = self._func()
+
+ if results:
+ assert isinstance(
+ results, dict
+ ), "Eval function must return a dict. Got {} instead.".format(results)
+
+ flattened_results = flatten_results_dict(results)
+ for k, v in flattened_results.items():
+ try:
+ v = float(v)
+ except Exception as e:
+ raise ValueError(
+ "[EvalHook] eval_function should return a nested dict of float. "
+ "Got '{}: {}' instead.".format(k, v)
+ ) from e
+ self.trainer.storage.put_scalars(**flattened_results, smoothing_hint=False)
+
+ # Evaluation may take different time among workers.
+ # A barrier make them start the next iteration together.
+ comm.synchronize()
+
+ def after_step(self):
+ next_iter = self.trainer.iter + 1
+ if self._period > 0 and next_iter % self._period == 0:
+ # do the last eval in after_train
+ if next_iter != self.trainer.max_iter:
+ self._do_eval()
+
+ def after_train(self):
+ # This condition is to prevent the eval from running after a failed training
+ if self._eval_after_train and self.trainer.iter + 1 >= self.trainer.max_iter:
+ self._do_eval()
+ # func is likely a closure that holds reference to the trainer
+ # therefore we clean it to avoid circular reference in the end
+ del self._func
+
+
+class PreciseBN(HookBase):
+ """
+ The standard implementation of BatchNorm uses EMA in inference, which is
+ sometimes suboptimal.
+ This class computes the true average of statistics rather than the moving average,
+ and put true averages to every BN layer in the given model.
+
+ It is executed every ``period`` iterations and after the last iteration.
+ """
+
+ def __init__(self, period, model, data_loader, num_iter):
+ """
+ Args:
+ period (int): the period this hook is run, or 0 to not run during training.
+ The hook will always run in the end of training.
+ model (nn.Module): a module whose all BN layers in training mode will be
+ updated by precise BN.
+ Note that user is responsible for ensuring the BN layers to be
+ updated are in training mode when this hook is triggered.
+ data_loader (iterable): it will produce data to be run by `model(data)`.
+ num_iter (int): number of iterations used to compute the precise
+ statistics.
+ """
+ self._logger = logging.getLogger(__name__)
+ if len(get_bn_modules(model)) == 0:
+ self._logger.info(
+ "PreciseBN is disabled because model does not contain BN layers in training mode."
+ )
+ self._disabled = True
+ return
+
+ self._model = model
+ self._data_loader = data_loader
+ self._num_iter = num_iter
+ self._period = period
+ self._disabled = False
+
+ self._data_iter = None
+
+ def after_step(self):
+ next_iter = self.trainer.iter + 1
+ is_final = next_iter == self.trainer.max_iter
+ if is_final or (self._period > 0 and next_iter % self._period == 0):
+ self.update_stats()
+
+ def update_stats(self):
+ """
+ Update the model with precise statistics. Users can manually call this method.
+ """
+ if self._disabled:
+ return
+
+ if self._data_iter is None:
+ self._data_iter = iter(self._data_loader)
+
+ def data_loader():
+ for num_iter in itertools.count(1):
+ if num_iter % 100 == 0:
+ self._logger.info(
+ "Running precise-BN ... {}/{} iterations.".format(num_iter, self._num_iter)
+ )
+ # This way we can reuse the same iterator
+ yield next(self._data_iter)
+
+ with EventStorage(): # capture events in a new storage to discard them
+ self._logger.info(
+ "Running precise-BN for {} iterations... ".format(self._num_iter)
+ + "Note that this could produce different statistics every time."
+ )
+ update_bn_stats(self._model, data_loader(), self._num_iter)
+
+
+class TorchMemoryStats(HookBase):
+ """
+ Writes pytorch's cuda memory statistics periodically.
+ """
+
+ def __init__(self, period=20, max_runs=10):
+ """
+ Args:
+ period (int): Output stats each 'period' iterations
+ max_runs (int): Stop the logging after 'max_runs'
+ """
+
+ self._logger = logging.getLogger(__name__)
+ self._period = period
+ self._max_runs = max_runs
+ self._runs = 0
+
+ def after_step(self):
+ if self._runs > self._max_runs:
+ return
+
+ if (self.trainer.iter + 1) % self._period == 0 or (
+ self.trainer.iter == self.trainer.max_iter - 1
+ ):
+ if torch.cuda.is_available():
+ max_reserved_mb = torch.cuda.max_memory_reserved() / 1024.0 / 1024.0
+ reserved_mb = torch.cuda.memory_reserved() / 1024.0 / 1024.0
+ max_allocated_mb = torch.cuda.max_memory_allocated() / 1024.0 / 1024.0
+ allocated_mb = torch.cuda.memory_allocated() / 1024.0 / 1024.0
+
+ self._logger.info(
+ (
+ " iter: {} "
+ " max_reserved_mem: {:.0f}MB "
+ " reserved_mem: {:.0f}MB "
+ " max_allocated_mem: {:.0f}MB "
+ " allocated_mem: {:.0f}MB "
+ ).format(
+ self.trainer.iter,
+ max_reserved_mb,
+ reserved_mb,
+ max_allocated_mb,
+ allocated_mb,
+ )
+ )
+
+ self._runs += 1
+ if self._runs == self._max_runs:
+ mem_summary = torch.cuda.memory_summary()
+ self._logger.info("\n" + mem_summary)
+
+ torch.cuda.reset_peak_memory_stats()
diff --git a/detectron2/engine/launch.py b/detectron2/engine/launch.py
new file mode 100644
index 0000000000000000000000000000000000000000..7052c5040e4d9e6553a1b371518cb53fb056524e
--- /dev/null
+++ b/detectron2/engine/launch.py
@@ -0,0 +1,123 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+from datetime import timedelta
+import torch
+import torch.distributed as dist
+import torch.multiprocessing as mp
+
+from detectron2.utils import comm
+
+__all__ = ["DEFAULT_TIMEOUT", "launch"]
+
+DEFAULT_TIMEOUT = timedelta(minutes=30)
+
+
+def _find_free_port():
+ import socket
+
+ sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
+ # Binding to port 0 will cause the OS to find an available port for us
+ sock.bind(("", 0))
+ port = sock.getsockname()[1]
+ sock.close()
+ # NOTE: there is still a chance the port could be taken by other processes.
+ return port
+
+
+def launch(
+ main_func,
+ # Should be num_processes_per_machine, but kept for compatibility.
+ num_gpus_per_machine,
+ num_machines=1,
+ machine_rank=0,
+ dist_url=None,
+ args=(),
+ timeout=DEFAULT_TIMEOUT,
+):
+ """
+ Launch multi-process or distributed training.
+ This function must be called on all machines involved in the training.
+ It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine.
+
+ Args:
+ main_func: a function that will be called by `main_func(*args)`
+ num_gpus_per_machine (int): number of processes per machine. When
+ using GPUs, this should be the number of GPUs.
+ num_machines (int): the total number of machines
+ machine_rank (int): the rank of this machine
+ dist_url (str): url to connect to for distributed jobs, including protocol
+ e.g. "tcp://127.0.0.1:8686".
+ Can be set to "auto" to automatically select a free port on localhost
+ timeout (timedelta): timeout of the distributed workers
+ args (tuple): arguments passed to main_func
+ """
+ world_size = num_machines * num_gpus_per_machine
+ if world_size > 1:
+ # https://github.com/pytorch/pytorch/pull/14391
+ # TODO prctl in spawned processes
+
+ if dist_url == "auto":
+ assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs."
+ port = _find_free_port()
+ dist_url = f"tcp://127.0.0.1:{port}"
+ if num_machines > 1 and dist_url.startswith("file://"):
+ logger = logging.getLogger(__name__)
+ logger.warning(
+ "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://"
+ )
+
+ mp.start_processes(
+ _distributed_worker,
+ nprocs=num_gpus_per_machine,
+ args=(
+ main_func,
+ world_size,
+ num_gpus_per_machine,
+ machine_rank,
+ dist_url,
+ args,
+ timeout,
+ ),
+ daemon=False,
+ )
+ else:
+ main_func(*args)
+
+
+def _distributed_worker(
+ local_rank,
+ main_func,
+ world_size,
+ num_gpus_per_machine,
+ machine_rank,
+ dist_url,
+ args,
+ timeout=DEFAULT_TIMEOUT,
+):
+ has_gpu = torch.cuda.is_available()
+ if has_gpu:
+ assert num_gpus_per_machine <= torch.cuda.device_count()
+ global_rank = machine_rank * num_gpus_per_machine + local_rank
+ try:
+ dist.init_process_group(
+ backend="NCCL" if has_gpu else "GLOO",
+ init_method=dist_url,
+ world_size=world_size,
+ rank=global_rank,
+ timeout=timeout,
+ )
+ except Exception as e:
+ logger = logging.getLogger(__name__)
+ logger.error("Process group URL: {}".format(dist_url))
+ raise e
+
+ # Setup the local process group.
+ comm.create_local_process_group(num_gpus_per_machine)
+ if has_gpu:
+ torch.cuda.set_device(local_rank)
+
+ # synchronize is needed here to prevent a possible timeout after calling init_process_group
+ # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172
+ comm.synchronize()
+
+ main_func(*args)
diff --git a/detectron2/engine/train_loop.py b/detectron2/engine/train_loop.py
new file mode 100644
index 0000000000000000000000000000000000000000..738a69de946ae7741e2e16d322592076b3d1014d
--- /dev/null
+++ b/detectron2/engine/train_loop.py
@@ -0,0 +1,530 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+import concurrent.futures
+import logging
+import numpy as np
+import time
+import weakref
+from typing import List, Mapping, Optional
+import torch
+from torch.nn.parallel import DataParallel, DistributedDataParallel
+
+import detectron2.utils.comm as comm
+from detectron2.utils.events import EventStorage, get_event_storage
+from detectron2.utils.logger import _log_api_usage
+
+__all__ = ["HookBase", "TrainerBase", "SimpleTrainer", "AMPTrainer"]
+
+
+class HookBase:
+ """
+ Base class for hooks that can be registered with :class:`TrainerBase`.
+
+ Each hook can implement 4 methods. The way they are called is demonstrated
+ in the following snippet:
+ ::
+ hook.before_train()
+ for iter in range(start_iter, max_iter):
+ hook.before_step()
+ trainer.run_step()
+ hook.after_step()
+ iter += 1
+ hook.after_train()
+
+ Notes:
+ 1. In the hook method, users can access ``self.trainer`` to access more
+ properties about the context (e.g., model, current iteration, or config
+ if using :class:`DefaultTrainer`).
+
+ 2. A hook that does something in :meth:`before_step` can often be
+ implemented equivalently in :meth:`after_step`.
+ If the hook takes non-trivial time, it is strongly recommended to
+ implement the hook in :meth:`after_step` instead of :meth:`before_step`.
+ The convention is that :meth:`before_step` should only take negligible time.
+
+ Following this convention will allow hooks that do care about the difference
+ between :meth:`before_step` and :meth:`after_step` (e.g., timer) to
+ function properly.
+
+ """
+
+ trainer: "TrainerBase" = None
+ """
+ A weak reference to the trainer object. Set by the trainer when the hook is registered.
+ """
+
+ def before_train(self):
+ """
+ Called before the first iteration.
+ """
+ pass
+
+ def after_train(self):
+ """
+ Called after the last iteration.
+ """
+ pass
+
+ def before_step(self):
+ """
+ Called before each iteration.
+ """
+ pass
+
+ def after_backward(self):
+ """
+ Called after the backward pass of each iteration.
+ """
+ pass
+
+ def after_step(self):
+ """
+ Called after each iteration.
+ """
+ pass
+
+ def state_dict(self):
+ """
+ Hooks are stateless by default, but can be made checkpointable by
+ implementing `state_dict` and `load_state_dict`.
+ """
+ return {}
+
+
+class TrainerBase:
+ """
+ Base class for iterative trainer with hooks.
+
+ The only assumption we made here is: the training runs in a loop.
+ A subclass can implement what the loop is.
+ We made no assumptions about the existence of dataloader, optimizer, model, etc.
+
+ Attributes:
+ iter(int): the current iteration.
+
+ start_iter(int): The iteration to start with.
+ By convention the minimum possible value is 0.
+
+ max_iter(int): The iteration to end training.
+
+ storage(EventStorage): An EventStorage that's opened during the course of training.
+ """
+
+ def __init__(self) -> None:
+ self._hooks: List[HookBase] = []
+ self.iter: int = 0
+ self.start_iter: int = 0
+ self.max_iter: int
+ self.storage: EventStorage
+ _log_api_usage("trainer." + self.__class__.__name__)
+
+ def register_hooks(self, hooks: List[Optional[HookBase]]) -> None:
+ """
+ Register hooks to the trainer. The hooks are executed in the order
+ they are registered.
+
+ Args:
+ hooks (list[Optional[HookBase]]): list of hooks
+ """
+ hooks = [h for h in hooks if h is not None]
+ for h in hooks:
+ assert isinstance(h, HookBase)
+ # To avoid circular reference, hooks and trainer cannot own each other.
+ # This normally does not matter, but will cause memory leak if the
+ # involved objects contain __del__:
+ # See http://engineering.hearsaysocial.com/2013/06/16/circular-references-in-python/
+ h.trainer = weakref.proxy(self)
+ self._hooks.extend(hooks)
+
+ def train(self, start_iter: int, max_iter: int):
+ """
+ Args:
+ start_iter, max_iter (int): See docs above
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Starting training from iteration {}".format(start_iter))
+
+ self.iter = self.start_iter = start_iter
+ self.max_iter = max_iter
+
+ with EventStorage(start_iter) as self.storage:
+ try:
+ self.before_train()
+ for self.iter in range(start_iter, max_iter):
+ self.before_step()
+ self.run_step()
+ self.after_step()
+ # self.iter == max_iter can be used by `after_train` to
+ # tell whether the training successfully finished or failed
+ # due to exceptions.
+ self.iter += 1
+ except Exception:
+ logger.exception("Exception during training:")
+ raise
+ finally:
+ self.after_train()
+
+ def before_train(self):
+ for h in self._hooks:
+ h.before_train()
+
+ def after_train(self):
+ self.storage.iter = self.iter
+ for h in self._hooks:
+ h.after_train()
+
+ def before_step(self):
+ # Maintain the invariant that storage.iter == trainer.iter
+ # for the entire execution of each step
+ self.storage.iter = self.iter
+
+ for h in self._hooks:
+ h.before_step()
+
+ def after_backward(self):
+ for h in self._hooks:
+ h.after_backward()
+
+ def after_step(self):
+ for h in self._hooks:
+ h.after_step()
+
+ def run_step(self):
+ raise NotImplementedError
+
+ def state_dict(self):
+ ret = {"iteration": self.iter}
+ hooks_state = {}
+ for h in self._hooks:
+ sd = h.state_dict()
+ if sd:
+ name = type(h).__qualname__
+ if name in hooks_state:
+ # TODO handle repetitive stateful hooks
+ continue
+ hooks_state[name] = sd
+ if hooks_state:
+ ret["hooks"] = hooks_state
+ return ret
+
+ def load_state_dict(self, state_dict):
+ logger = logging.getLogger(__name__)
+ self.iter = state_dict["iteration"]
+ for key, value in state_dict.get("hooks", {}).items():
+ for h in self._hooks:
+ try:
+ name = type(h).__qualname__
+ except AttributeError:
+ continue
+ if name == key:
+ h.load_state_dict(value)
+ break
+ else:
+ logger.warning(f"Cannot find the hook '{key}', its state_dict is ignored.")
+
+
+class SimpleTrainer(TrainerBase):
+ """
+ A simple trainer for the most common type of task:
+ single-cost single-optimizer single-data-source iterative optimization,
+ optionally using data-parallelism.
+ It assumes that every step, you:
+
+ 1. Compute the loss with a data from the data_loader.
+ 2. Compute the gradients with the above loss.
+ 3. Update the model with the optimizer.
+
+ All other tasks during training (checkpointing, logging, evaluation, LR schedule)
+ are maintained by hooks, which can be registered by :meth:`TrainerBase.register_hooks`.
+
+ If you want to do anything fancier than this,
+ either subclass TrainerBase and implement your own `run_step`,
+ or write your own training loop.
+ """
+
+ def __init__(
+ self,
+ model,
+ data_loader,
+ optimizer,
+ gather_metric_period=1,
+ zero_grad_before_forward=False,
+ async_write_metrics=False,
+ ):
+ """
+ Args:
+ model: a torch Module. Takes a data from data_loader and returns a
+ dict of losses.
+ data_loader: an iterable. Contains data to be used to call model.
+ optimizer: a torch optimizer.
+ gather_metric_period: an int. Every gather_metric_period iterations
+ the metrics are gathered from all the ranks to rank 0 and logged.
+ zero_grad_before_forward: whether to zero the gradients before the forward.
+ async_write_metrics: bool. If True, then write metrics asynchronously to improve
+ training speed
+ """
+ super().__init__()
+
+ """
+ We set the model to training mode in the trainer.
+ However it's valid to train a model that's in eval mode.
+ If you want your model (or a submodule of it) to behave
+ like evaluation during training, you can overwrite its train() method.
+ """
+ model.train()
+
+ self.model = model
+ self.data_loader = data_loader
+ # to access the data loader iterator, call `self._data_loader_iter`
+ self._data_loader_iter_obj = None
+ self.optimizer = optimizer
+ self.gather_metric_period = gather_metric_period
+ self.zero_grad_before_forward = zero_grad_before_forward
+ self.async_write_metrics = async_write_metrics
+ # create a thread pool that can execute non critical logic in run_step asynchronically
+ # use only 1 worker so tasks will be executred in order of submitting.
+ self.concurrent_executor = concurrent.futures.ThreadPoolExecutor(max_workers=1)
+
+ def run_step(self):
+ """
+ Implement the standard training logic described above.
+ """
+ assert self.model.training, "[SimpleTrainer] model was changed to eval mode!"
+ start = time.perf_counter()
+ """
+ If you want to do something with the data, you can wrap the dataloader.
+ """
+ data = next(self._data_loader_iter)
+ data_time = time.perf_counter() - start
+
+ if self.zero_grad_before_forward:
+ """
+ If you need to accumulate gradients or do something similar, you can
+ wrap the optimizer with your custom `zero_grad()` method.
+ """
+ self.optimizer.zero_grad()
+
+ """
+ If you want to do something with the losses, you can wrap the model.
+ """
+ loss_dict = self.model(data)
+ if isinstance(loss_dict, torch.Tensor):
+ losses = loss_dict
+ loss_dict = {"total_loss": loss_dict}
+ else:
+ losses = sum(loss_dict.values())
+ if not self.zero_grad_before_forward:
+ """
+ If you need to accumulate gradients or do something similar, you can
+ wrap the optimizer with your custom `zero_grad()` method.
+ """
+ self.optimizer.zero_grad()
+ losses.backward()
+
+ self.after_backward()
+
+ if self.async_write_metrics:
+ # write metrics asynchronically
+ self.concurrent_executor.submit(
+ self._write_metrics, loss_dict, data_time, iter=self.iter
+ )
+ else:
+ self._write_metrics(loss_dict, data_time)
+
+ """
+ If you need gradient clipping/scaling or other processing, you can
+ wrap the optimizer with your custom `step()` method. But it is
+ suboptimal as explained in https://arxiv.org/abs/2006.15704 Sec 3.2.4
+ """
+ self.optimizer.step()
+
+ @property
+ def _data_loader_iter(self):
+ # only create the data loader iterator when it is used
+ if self._data_loader_iter_obj is None:
+ self._data_loader_iter_obj = iter(self.data_loader)
+ return self._data_loader_iter_obj
+
+ def reset_data_loader(self, data_loader_builder):
+ """
+ Delete and replace the current data loader with a new one, which will be created
+ by calling `data_loader_builder` (without argument).
+ """
+ del self.data_loader
+ data_loader = data_loader_builder()
+ self.data_loader = data_loader
+ self._data_loader_iter_obj = None
+
+ def _write_metrics(
+ self,
+ loss_dict: Mapping[str, torch.Tensor],
+ data_time: float,
+ prefix: str = "",
+ iter: Optional[int] = None,
+ ) -> None:
+ logger = logging.getLogger(__name__)
+
+ iter = self.iter if iter is None else iter
+ if (iter + 1) % self.gather_metric_period == 0:
+ try:
+ SimpleTrainer.write_metrics(loss_dict, data_time, iter, prefix)
+ except Exception:
+ logger.exception("Exception in writing metrics: ")
+ raise
+
+ @staticmethod
+ def write_metrics(
+ loss_dict: Mapping[str, torch.Tensor],
+ data_time: float,
+ cur_iter: int,
+ prefix: str = "",
+ ) -> None:
+ """
+ Args:
+ loss_dict (dict): dict of scalar losses
+ data_time (float): time taken by the dataloader iteration
+ prefix (str): prefix for logging keys
+ """
+ metrics_dict = {k: v.detach().cpu().item() for k, v in loss_dict.items()}
+ metrics_dict["data_time"] = data_time
+
+ storage = get_event_storage()
+ # Keep track of data time per rank
+ storage.put_scalar("rank_data_time", data_time, cur_iter=cur_iter)
+
+ # Gather metrics among all workers for logging
+ # This assumes we do DDP-style training, which is currently the only
+ # supported method in detectron2.
+ all_metrics_dict = comm.gather(metrics_dict)
+
+ if comm.is_main_process():
+ # data_time among workers can have high variance. The actual latency
+ # caused by data_time is the maximum among workers.
+ data_time = np.max([x.pop("data_time") for x in all_metrics_dict])
+ storage.put_scalar("data_time", data_time, cur_iter=cur_iter)
+
+ # average the rest metrics
+ metrics_dict = {
+ k: np.mean([x[k] for x in all_metrics_dict]) for k in all_metrics_dict[0].keys()
+ }
+ total_losses_reduced = sum(metrics_dict.values())
+ if not np.isfinite(total_losses_reduced):
+ raise FloatingPointError(
+ f"Loss became infinite or NaN at iteration={cur_iter}!\n"
+ f"loss_dict = {metrics_dict}"
+ )
+
+ storage.put_scalar(
+ "{}total_loss".format(prefix), total_losses_reduced, cur_iter=cur_iter
+ )
+ if len(metrics_dict) > 1:
+ storage.put_scalars(cur_iter=cur_iter, **metrics_dict)
+
+ def state_dict(self):
+ ret = super().state_dict()
+ ret["optimizer"] = self.optimizer.state_dict()
+ return ret
+
+ def load_state_dict(self, state_dict):
+ super().load_state_dict(state_dict)
+ self.optimizer.load_state_dict(state_dict["optimizer"])
+
+ def after_train(self):
+ super().after_train()
+ self.concurrent_executor.shutdown(wait=True)
+
+
+class AMPTrainer(SimpleTrainer):
+ """
+ Like :class:`SimpleTrainer`, but uses PyTorch's native automatic mixed precision
+ in the training loop.
+ """
+
+ def __init__(
+ self,
+ model,
+ data_loader,
+ optimizer,
+ gather_metric_period=1,
+ zero_grad_before_forward=False,
+ grad_scaler=None,
+ precision: torch.dtype = torch.float16,
+ log_grad_scaler: bool = False,
+ async_write_metrics=False,
+ ):
+ """
+ Args:
+ model, data_loader, optimizer, gather_metric_period, zero_grad_before_forward,
+ async_write_metrics: same as in :class:`SimpleTrainer`.
+ grad_scaler: torch GradScaler to automatically scale gradients.
+ precision: torch.dtype as the target precision to cast to in computations
+ """
+ unsupported = "AMPTrainer does not support single-process multi-device training!"
+ if isinstance(model, DistributedDataParallel):
+ assert not (model.device_ids and len(model.device_ids) > 1), unsupported
+ assert not isinstance(model, DataParallel), unsupported
+
+ super().__init__(
+ model, data_loader, optimizer, gather_metric_period, zero_grad_before_forward
+ )
+
+ if grad_scaler is None:
+ from torch.cuda.amp import GradScaler
+
+ grad_scaler = GradScaler()
+ self.grad_scaler = grad_scaler
+ self.precision = precision
+ self.log_grad_scaler = log_grad_scaler
+
+ def run_step(self):
+ """
+ Implement the AMP training logic.
+ """
+ assert self.model.training, "[AMPTrainer] model was changed to eval mode!"
+ assert torch.cuda.is_available(), "[AMPTrainer] CUDA is required for AMP training!"
+ from torch.cuda.amp import autocast
+
+ start = time.perf_counter()
+ data = next(self._data_loader_iter)
+ data_time = time.perf_counter() - start
+
+ if self.zero_grad_before_forward:
+ self.optimizer.zero_grad()
+ with autocast(dtype=self.precision):
+ loss_dict = self.model(data)
+ if isinstance(loss_dict, torch.Tensor):
+ losses = loss_dict
+ loss_dict = {"total_loss": loss_dict}
+ else:
+ losses = sum(loss_dict.values())
+
+ if not self.zero_grad_before_forward:
+ self.optimizer.zero_grad()
+
+ self.grad_scaler.scale(losses).backward()
+
+ if self.log_grad_scaler:
+ storage = get_event_storage()
+ storage.put_scalar("[metric]grad_scaler", self.grad_scaler.get_scale())
+
+ self.after_backward()
+
+ if self.async_write_metrics:
+ # write metrics asynchronically
+ self.concurrent_executor.submit(
+ self._write_metrics, loss_dict, data_time, iter=self.iter
+ )
+ else:
+ self._write_metrics(loss_dict, data_time)
+
+ self.grad_scaler.step(self.optimizer)
+ self.grad_scaler.update()
+
+ def state_dict(self):
+ ret = super().state_dict()
+ ret["grad_scaler"] = self.grad_scaler.state_dict()
+ return ret
+
+ def load_state_dict(self, state_dict):
+ super().load_state_dict(state_dict)
+ self.grad_scaler.load_state_dict(state_dict["grad_scaler"])
diff --git a/detectron2/evaluation/__init__.py b/detectron2/evaluation/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..d96609e8f2261a6800fe85fcf3e1eaeaa44455c6
--- /dev/null
+++ b/detectron2/evaluation/__init__.py
@@ -0,0 +1,12 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .cityscapes_evaluation import CityscapesInstanceEvaluator, CityscapesSemSegEvaluator
+from .coco_evaluation import COCOEvaluator
+from .rotated_coco_evaluation import RotatedCOCOEvaluator
+from .evaluator import DatasetEvaluator, DatasetEvaluators, inference_context, inference_on_dataset
+from .lvis_evaluation import LVISEvaluator
+from .panoptic_evaluation import COCOPanopticEvaluator
+from .pascal_voc_evaluation import PascalVOCDetectionEvaluator
+from .sem_seg_evaluation import SemSegEvaluator
+from .testing import print_csv_format, verify_results
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/detectron2/evaluation/cityscapes_evaluation.py b/detectron2/evaluation/cityscapes_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cc7888f0f88ed9b44eae942353a9f4dd4b8782a
--- /dev/null
+++ b/detectron2/evaluation/cityscapes_evaluation.py
@@ -0,0 +1,197 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import glob
+import logging
+import numpy as np
+import os
+import tempfile
+from collections import OrderedDict
+import torch
+from PIL import Image
+
+from detectron2.data import MetadataCatalog
+from detectron2.utils import comm
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+
+class CityscapesEvaluator(DatasetEvaluator):
+ """
+ Base class for evaluation using cityscapes API.
+ """
+
+ def __init__(self, dataset_name):
+ """
+ Args:
+ dataset_name (str): the name of the dataset.
+ It must have the following metadata associated with it:
+ "thing_classes", "gt_dir".
+ """
+ self._metadata = MetadataCatalog.get(dataset_name)
+ self._cpu_device = torch.device("cpu")
+ self._logger = logging.getLogger(__name__)
+
+ def reset(self):
+ self._working_dir = tempfile.TemporaryDirectory(prefix="cityscapes_eval_")
+ self._temp_dir = self._working_dir.name
+ # All workers will write to the same results directory
+ # TODO this does not work in distributed training
+ assert (
+ comm.get_local_size() == comm.get_world_size()
+ ), "CityscapesEvaluator currently do not work with multiple machines."
+ self._temp_dir = comm.all_gather(self._temp_dir)[0]
+ if self._temp_dir != self._working_dir.name:
+ self._working_dir.cleanup()
+ self._logger.info(
+ "Writing cityscapes results to temporary directory {} ...".format(self._temp_dir)
+ )
+
+
+class CityscapesInstanceEvaluator(CityscapesEvaluator):
+ """
+ Evaluate instance segmentation results on cityscapes dataset using cityscapes API.
+
+ Note:
+ * It does not work in multi-machine distributed training.
+ * It contains a synchronization, therefore has to be used on all ranks.
+ * Only the main process runs evaluation.
+ """
+
+ def process(self, inputs, outputs):
+ from cityscapesscripts.helpers.labels import name2label
+
+ for input, output in zip(inputs, outputs):
+ file_name = input["file_name"]
+ basename = os.path.splitext(os.path.basename(file_name))[0]
+ pred_txt = os.path.join(self._temp_dir, basename + "_pred.txt")
+
+ if "instances" in output:
+ output = output["instances"].to(self._cpu_device)
+ num_instances = len(output)
+ with open(pred_txt, "w") as fout:
+ for i in range(num_instances):
+ pred_class = output.pred_classes[i]
+ classes = self._metadata.thing_classes[pred_class]
+ class_id = name2label[classes].id
+ score = output.scores[i]
+ mask = output.pred_masks[i].numpy().astype("uint8")
+ png_filename = os.path.join(
+ self._temp_dir, basename + "_{}_{}.png".format(i, classes)
+ )
+
+ Image.fromarray(mask * 255).save(png_filename)
+ fout.write(
+ "{} {} {}\n".format(os.path.basename(png_filename), class_id, score)
+ )
+ else:
+ # Cityscapes requires a prediction file for every ground truth image.
+ with open(pred_txt, "w") as fout:
+ pass
+
+ def evaluate(self):
+ """
+ Returns:
+ dict: has a key "segm", whose value is a dict of "AP" and "AP50".
+ """
+ comm.synchronize()
+ if comm.get_rank() > 0:
+ return
+ import cityscapesscripts.evaluation.evalInstanceLevelSemanticLabeling as cityscapes_eval
+
+ self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
+
+ # set some global states in cityscapes evaluation API, before evaluating
+ cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
+ cityscapes_eval.args.predictionWalk = None
+ cityscapes_eval.args.JSONOutput = False
+ cityscapes_eval.args.colorized = False
+ cityscapes_eval.args.gtInstancesFile = os.path.join(self._temp_dir, "gtInstances.json")
+
+ # These lines are adopted from
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalInstanceLevelSemanticLabeling.py # noqa
+ gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
+ groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_instanceIds.png"))
+ assert len(
+ groundTruthImgList
+ ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
+ cityscapes_eval.args.groundTruthSearch
+ )
+ predictionImgList = []
+ for gt in groundTruthImgList:
+ predictionImgList.append(cityscapes_eval.getPrediction(gt, cityscapes_eval.args))
+ results = cityscapes_eval.evaluateImgLists(
+ predictionImgList, groundTruthImgList, cityscapes_eval.args
+ )["averages"]
+
+ ret = OrderedDict()
+ ret["segm"] = {"AP": results["allAp"] * 100, "AP50": results["allAp50%"] * 100}
+ self._working_dir.cleanup()
+ return ret
+
+
+class CityscapesSemSegEvaluator(CityscapesEvaluator):
+ """
+ Evaluate semantic segmentation results on cityscapes dataset using cityscapes API.
+
+ Note:
+ * It does not work in multi-machine distributed training.
+ * It contains a synchronization, therefore has to be used on all ranks.
+ * Only the main process runs evaluation.
+ """
+
+ def process(self, inputs, outputs):
+ from cityscapesscripts.helpers.labels import trainId2label
+
+ for input, output in zip(inputs, outputs):
+ file_name = input["file_name"]
+ basename = os.path.splitext(os.path.basename(file_name))[0]
+ pred_filename = os.path.join(self._temp_dir, basename + "_pred.png")
+
+ output = output["sem_seg"].argmax(dim=0).to(self._cpu_device).numpy()
+ pred = 255 * np.ones(output.shape, dtype=np.uint8)
+ for train_id, label in trainId2label.items():
+ if label.ignoreInEval:
+ continue
+ pred[output == train_id] = label.id
+ Image.fromarray(pred).save(pred_filename)
+
+ def evaluate(self):
+ comm.synchronize()
+ if comm.get_rank() > 0:
+ return
+ # Load the Cityscapes eval script *after* setting the required env var,
+ # since the script reads CITYSCAPES_DATASET into global variables at load time.
+ import cityscapesscripts.evaluation.evalPixelLevelSemanticLabeling as cityscapes_eval
+
+ self._logger.info("Evaluating results under {} ...".format(self._temp_dir))
+
+ # set some global states in cityscapes evaluation API, before evaluating
+ cityscapes_eval.args.predictionPath = os.path.abspath(self._temp_dir)
+ cityscapes_eval.args.predictionWalk = None
+ cityscapes_eval.args.JSONOutput = False
+ cityscapes_eval.args.colorized = False
+
+ # These lines are adopted from
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/evalPixelLevelSemanticLabeling.py # noqa
+ gt_dir = PathManager.get_local_path(self._metadata.gt_dir)
+ groundTruthImgList = glob.glob(os.path.join(gt_dir, "*", "*_gtFine_labelIds.png"))
+ assert len(
+ groundTruthImgList
+ ), "Cannot find any ground truth images to use for evaluation. Searched for: {}".format(
+ cityscapes_eval.args.groundTruthSearch
+ )
+ predictionImgList = []
+ for gt in groundTruthImgList:
+ predictionImgList.append(cityscapes_eval.getPrediction(cityscapes_eval.args, gt))
+ results = cityscapes_eval.evaluateImgLists(
+ predictionImgList, groundTruthImgList, cityscapes_eval.args
+ )
+ ret = OrderedDict()
+ ret["sem_seg"] = {
+ "IoU": 100.0 * results["averageScoreClasses"],
+ "iIoU": 100.0 * results["averageScoreInstClasses"],
+ "IoU_sup": 100.0 * results["averageScoreCategories"],
+ "iIoU_sup": 100.0 * results["averageScoreInstCategories"],
+ }
+ self._working_dir.cleanup()
+ return ret
diff --git a/detectron2/evaluation/coco_evaluation.py b/detectron2/evaluation/coco_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe8142cda29613ce1cf78523e422bf598128f590
--- /dev/null
+++ b/detectron2/evaluation/coco_evaluation.py
@@ -0,0 +1,722 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import copy
+import io
+import itertools
+import json
+import logging
+import numpy as np
+import os
+import pickle
+from collections import OrderedDict
+import pycocotools.mask as mask_util
+import torch
+from pycocotools.coco import COCO
+from pycocotools.cocoeval import COCOeval
+from tabulate import tabulate
+
+import detectron2.utils.comm as comm
+from detectron2.config import CfgNode
+from detectron2.data import MetadataCatalog
+from detectron2.data.datasets.coco import convert_to_coco_json
+from detectron2.structures import Boxes, BoxMode, pairwise_iou
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import create_small_table
+
+from .evaluator import DatasetEvaluator
+
+try:
+ from detectron2.evaluation.fast_eval_api import COCOeval_opt
+except ImportError:
+ COCOeval_opt = COCOeval
+
+
+class COCOEvaluator(DatasetEvaluator):
+ """
+ Evaluate AR for object proposals, AP for instance detection/segmentation, AP
+ for keypoint detection outputs using COCO's metrics.
+ See http://cocodataset.org/#detection-eval and
+ http://cocodataset.org/#keypoints-eval to understand its metrics.
+ The metrics range from 0 to 100 (instead of 0 to 1), where a -1 or NaN means
+ the metric cannot be computed (e.g. due to no predictions made).
+
+ In addition to COCO, this evaluator is able to support any bounding box detection,
+ instance segmentation, or keypoint detection dataset.
+ """
+
+ def __init__(
+ self,
+ dataset_name,
+ tasks=None,
+ distributed=True,
+ output_dir=None,
+ *,
+ max_dets_per_image=None,
+ use_fast_impl=True,
+ kpt_oks_sigmas=(),
+ allow_cached_coco=True,
+ ):
+ """
+ Args:
+ dataset_name (str): name of the dataset to be evaluated.
+ It must have either the following corresponding metadata:
+
+ "json_file": the path to the COCO format annotation
+
+ Or it must be in detectron2's standard dataset format
+ so it can be converted to COCO format automatically.
+ tasks (tuple[str]): tasks that can be evaluated under the given
+ configuration. A task is one of "bbox", "segm", "keypoints".
+ By default, will infer this automatically from predictions.
+ distributed (True): if True, will collect results from all ranks and run evaluation
+ in the main process.
+ Otherwise, will only evaluate the results in the current process.
+ output_dir (str): optional, an output directory to dump all
+ results predicted on the dataset. The dump contains two files:
+
+ 1. "instances_predictions.pth" a file that can be loaded with `torch.load` and
+ contains all the results in the format they are produced by the model.
+ 2. "coco_instances_results.json" a json file in COCO's result format.
+ max_dets_per_image (int): limit on the maximum number of detections per image.
+ By default in COCO, this limit is to 100, but this can be customized
+ to be greater, as is needed in evaluation metrics AP fixed and AP pool
+ (see https://arxiv.org/pdf/2102.01066.pdf)
+ This doesn't affect keypoint evaluation.
+ use_fast_impl (bool): use a fast but **unofficial** implementation to compute AP.
+ Although the results should be very close to the official implementation in COCO
+ API, it is still recommended to compute results with the official API for use in
+ papers. The faster implementation also uses more RAM.
+ kpt_oks_sigmas (list[float]): The sigmas used to calculate keypoint OKS.
+ See http://cocodataset.org/#keypoints-eval
+ When empty, it will use the defaults in COCO.
+ Otherwise it should be the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
+ allow_cached_coco (bool): Whether to use cached coco json from previous validation
+ runs. You should set this to False if you need to use different validation data.
+ Defaults to True.
+ """
+ self._logger = logging.getLogger(__name__)
+ self._distributed = distributed
+ self._output_dir = output_dir
+
+ if use_fast_impl and (COCOeval_opt is COCOeval):
+ self._logger.info("Fast COCO eval is not built. Falling back to official COCO eval.")
+ use_fast_impl = False
+ self._use_fast_impl = use_fast_impl
+
+ # COCOeval requires the limit on the number of detections per image (maxDets) to be a list
+ # with at least 3 elements. The default maxDets in COCOeval is [1, 10, 100], in which the
+ # 3rd element (100) is used as the limit on the number of detections per image when
+ # evaluating AP. COCOEvaluator expects an integer for max_dets_per_image, so for COCOeval,
+ # we reformat max_dets_per_image into [1, 10, max_dets_per_image], based on the defaults.
+ if max_dets_per_image is None:
+ max_dets_per_image = [1, 10, 100]
+ else:
+ max_dets_per_image = [1, 10, max_dets_per_image]
+ self._max_dets_per_image = max_dets_per_image
+
+ if tasks is not None and isinstance(tasks, CfgNode):
+ kpt_oks_sigmas = (
+ tasks.TEST.KEYPOINT_OKS_SIGMAS if not kpt_oks_sigmas else kpt_oks_sigmas
+ )
+ self._logger.warn(
+ "COCO Evaluator instantiated using config, this is deprecated behavior."
+ " Please pass in explicit arguments instead."
+ )
+ self._tasks = None # Infering it from predictions should be better
+ else:
+ self._tasks = tasks
+
+ self._cpu_device = torch.device("cpu")
+
+ self._metadata = MetadataCatalog.get(dataset_name)
+ if not hasattr(self._metadata, "json_file"):
+ if output_dir is None:
+ raise ValueError(
+ "output_dir must be provided to COCOEvaluator "
+ "for datasets not in COCO format."
+ )
+ self._logger.info(f"Trying to convert '{dataset_name}' to COCO format ...")
+
+ cache_path = os.path.join(output_dir, f"{dataset_name}_coco_format.json")
+ self._metadata.json_file = cache_path
+ convert_to_coco_json(dataset_name, cache_path, allow_cached=allow_cached_coco)
+
+ json_file = PathManager.get_local_path(self._metadata.json_file)
+ with contextlib.redirect_stdout(io.StringIO()):
+ self._coco_api = COCO(json_file)
+
+ # Test set json files do not contain annotations (evaluation must be
+ # performed using the COCO evaluation server).
+ self._do_evaluation = "annotations" in self._coco_api.dataset
+ if self._do_evaluation:
+ self._kpt_oks_sigmas = kpt_oks_sigmas
+
+ def reset(self):
+ self._predictions = []
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
+ It is a list of dict. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name", "image_id".
+ outputs: the outputs of a COCO model. It is a list of dicts with key
+ "instances" that contains :class:`Instances`.
+ """
+ for input, output in zip(inputs, outputs):
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+ prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
+ if "proposals" in output:
+ prediction["proposals"] = output["proposals"].to(self._cpu_device)
+ if len(prediction) > 1:
+ self._predictions.append(prediction)
+
+ def evaluate(self, img_ids=None):
+ """
+ Args:
+ img_ids: a list of image IDs to evaluate on. Default to None for the whole dataset
+ """
+ if self._distributed:
+ comm.synchronize()
+ predictions = comm.gather(self._predictions, dst=0)
+ predictions = list(itertools.chain(*predictions))
+
+ if not comm.is_main_process():
+ return {}
+ else:
+ predictions = self._predictions
+
+ if len(predictions) == 0:
+ self._logger.warning("[COCOEvaluator] Did not receive valid predictions.")
+ return {}
+
+ if self._output_dir:
+ PathManager.mkdirs(self._output_dir)
+ file_path = os.path.join(self._output_dir, "instances_predictions.pth")
+ with PathManager.open(file_path, "wb") as f:
+ torch.save(predictions, f)
+
+ self._results = OrderedDict()
+ if "proposals" in predictions[0]:
+ self._eval_box_proposals(predictions)
+ if "instances" in predictions[0]:
+ self._eval_predictions(predictions, img_ids=img_ids)
+ # Copy so the caller can do whatever with results
+ return copy.deepcopy(self._results)
+
+ def _tasks_from_predictions(self, predictions):
+ """
+ Get COCO API "tasks" (i.e. iou_type) from COCO-format predictions.
+ """
+ tasks = {"bbox"}
+ for pred in predictions:
+ if "segmentation" in pred:
+ tasks.add("segm")
+ if "keypoints" in pred:
+ tasks.add("keypoints")
+ return sorted(tasks)
+
+ def _eval_predictions(self, predictions, img_ids=None):
+ """
+ Evaluate predictions. Fill self._results with the metrics of the tasks.
+ """
+ self._logger.info("Preparing results for COCO format ...")
+ coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+ tasks = self._tasks or self._tasks_from_predictions(coco_results)
+
+ # unmap the category ids for COCO
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
+ dataset_id_to_contiguous_id = self._metadata.thing_dataset_id_to_contiguous_id
+ all_contiguous_ids = list(dataset_id_to_contiguous_id.values())
+ num_classes = len(all_contiguous_ids)
+ assert min(all_contiguous_ids) == 0 and max(all_contiguous_ids) == num_classes - 1
+
+ reverse_id_mapping = {v: k for k, v in dataset_id_to_contiguous_id.items()}
+ for result in coco_results:
+ category_id = result["category_id"]
+ assert category_id < num_classes, (
+ f"A prediction has class={category_id}, "
+ f"but the dataset only has {num_classes} classes and "
+ f"predicted class id should be in [0, {num_classes - 1}]."
+ )
+ result["category_id"] = reverse_id_mapping[category_id]
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "coco_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(coco_results))
+ f.flush()
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info(
+ "Evaluating predictions with {} COCO API...".format(
+ "unofficial" if self._use_fast_impl else "official"
+ )
+ )
+ for task in sorted(tasks):
+ assert task in {"bbox", "segm", "keypoints"}, f"Got unknown task: {task}!"
+ coco_eval = (
+ _evaluate_predictions_on_coco(
+ self._coco_api,
+ coco_results,
+ task,
+ kpt_oks_sigmas=self._kpt_oks_sigmas,
+ cocoeval_fn=COCOeval_opt if self._use_fast_impl else COCOeval,
+ img_ids=img_ids,
+ max_dets_per_image=self._max_dets_per_image,
+ )
+ if len(coco_results) > 0
+ else None # cocoapi does not handle empty results very well
+ )
+
+ res = self._derive_coco_results(
+ coco_eval, task, class_names=self._metadata.get("thing_classes")
+ )
+ self._results[task] = res
+
+ def _eval_box_proposals(self, predictions):
+ """
+ Evaluate the box proposals in predictions.
+ Fill self._results with the metrics for "box_proposals" task.
+ """
+ if self._output_dir:
+ # Saving generated box proposals to file.
+ # Predicted box_proposals are in XYXY_ABS mode.
+ bbox_mode = BoxMode.XYXY_ABS.value
+ ids, boxes, objectness_logits = [], [], []
+ for prediction in predictions:
+ ids.append(prediction["image_id"])
+ boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
+ objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
+
+ proposal_data = {
+ "boxes": boxes,
+ "objectness_logits": objectness_logits,
+ "ids": ids,
+ "bbox_mode": bbox_mode,
+ }
+ with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
+ pickle.dump(proposal_data, f)
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating bbox proposals ...")
+ res = {}
+ areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
+ for limit in [100, 1000]:
+ for area, suffix in areas.items():
+ stats = _evaluate_box_proposals(predictions, self._coco_api, area=area, limit=limit)
+ key = "AR{}@{:d}".format(suffix, limit)
+ res[key] = float(stats["ar"].item() * 100)
+ self._logger.info("Proposal metrics: \n" + create_small_table(res))
+ self._results["box_proposals"] = res
+
+ def _derive_coco_results(self, coco_eval, iou_type, class_names=None):
+ """
+ Derive the desired score numbers from summarized COCOeval.
+
+ Args:
+ coco_eval (None or COCOEval): None represents no predictions from model.
+ iou_type (str):
+ class_names (None or list[str]): if provided, will use it to predict
+ per-category AP.
+
+ Returns:
+ a dict of {metric name: score}
+ """
+
+ metrics = {
+ "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
+ "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl"],
+ "keypoints": ["AP", "AP50", "AP75", "APm", "APl"],
+ }[iou_type]
+
+ if coco_eval is None:
+ self._logger.warn("No predictions from the model!")
+ return {metric: float("nan") for metric in metrics}
+
+ # the standard metrics
+ results = {
+ metric: float(coco_eval.stats[idx] * 100 if coco_eval.stats[idx] >= 0 else "nan")
+ for idx, metric in enumerate(metrics)
+ }
+ self._logger.info(
+ "Evaluation results for {}: \n".format(iou_type) + create_small_table(results)
+ )
+ if not np.isfinite(sum(results.values())):
+ self._logger.info("Some metrics cannot be computed and is shown as NaN.")
+
+ if class_names is None or len(class_names) <= 1:
+ return results
+ # Compute per-category AP
+ # from https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L222-L252 # noqa
+ precisions = coco_eval.eval["precision"]
+ # precision has dims (iou, recall, cls, area range, max dets)
+ assert len(class_names) == precisions.shape[2]
+
+ results_per_category = []
+ for idx, name in enumerate(class_names):
+ # area range index 0: all area ranges
+ # max dets index -1: typically 100 per image
+ precision = precisions[:, :, idx, 0, -1]
+ precision = precision[precision > -1]
+ ap = np.mean(precision) if precision.size else float("nan")
+ results_per_category.append(("{}".format(name), float(ap * 100)))
+
+ # tabulate it
+ N_COLS = min(6, len(results_per_category) * 2)
+ results_flatten = list(itertools.chain(*results_per_category))
+ results_2d = itertools.zip_longest(*[results_flatten[i::N_COLS] for i in range(N_COLS)])
+ table = tabulate(
+ results_2d,
+ tablefmt="pipe",
+ floatfmt=".3f",
+ headers=["category", "AP"] * (N_COLS // 2),
+ numalign="left",
+ )
+ self._logger.info("Per-category {} AP: \n".format(iou_type) + table)
+
+ results.update({"AP-" + name: ap for name, ap in results_per_category})
+ return results
+
+
+def instances_to_coco_json(instances, img_id):
+ """
+ Dump an "Instances" object to a COCO-format json that's used for evaluation.
+
+ Args:
+ instances (Instances):
+ img_id (int): the image id
+
+ Returns:
+ list[dict]: list of json annotations in COCO format.
+ """
+ num_instance = len(instances)
+ if num_instance == 0:
+ return []
+
+ boxes = instances.pred_boxes.tensor.numpy()
+ boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ boxes = boxes.tolist()
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+
+ has_mask = instances.has("pred_masks")
+ if has_mask:
+ # use RLE to encode the masks, because they are too large and takes memory
+ # since this evaluator stores outputs of the entire dataset
+ rles = [
+ mask_util.encode(np.array(mask[:, :, None], order="F", dtype="uint8"))[0]
+ for mask in instances.pred_masks
+ ]
+ for rle in rles:
+ # "counts" is an array encoded by mask_util as a byte-stream. Python3's
+ # json writer which always produces strings cannot serialize a bytestream
+ # unless you decode it. Thankfully, utf-8 works out (which is also what
+ # the pycocotools/_mask.pyx does).
+ rle["counts"] = rle["counts"].decode("utf-8")
+
+ has_keypoints = instances.has("pred_keypoints")
+ if has_keypoints:
+ keypoints = instances.pred_keypoints
+
+ results = []
+ for k in range(num_instance):
+ result = {
+ "image_id": img_id,
+ "category_id": classes[k],
+ "bbox": boxes[k],
+ "score": scores[k],
+ }
+ if has_mask:
+ result["segmentation"] = rles[k]
+ if has_keypoints:
+ # In COCO annotations,
+ # keypoints coordinates are pixel indices.
+ # However our predictions are floating point coordinates.
+ # Therefore we subtract 0.5 to be consistent with the annotation format.
+ # This is the inverse of data loading logic in `datasets/coco.py`.
+ keypoints[k][:, :2] -= 0.5
+ result["keypoints"] = keypoints[k].flatten().tolist()
+ results.append(result)
+ return results
+
+
+# inspired from Detectron:
+# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
+def _evaluate_box_proposals(dataset_predictions, coco_api, thresholds=None, area="all", limit=None):
+ """
+ Evaluate detection proposal recall metrics. This function is a much
+ faster alternative to the official COCO API recall evaluation code. However,
+ it produces slightly different results.
+ """
+ # Record max overlap value for each gt box
+ # Return vector of overlap values
+ areas = {
+ "all": 0,
+ "small": 1,
+ "medium": 2,
+ "large": 3,
+ "96-128": 4,
+ "128-256": 5,
+ "256-512": 6,
+ "512-inf": 7,
+ }
+ area_ranges = [
+ [0**2, 1e5**2], # all
+ [0**2, 32**2], # small
+ [32**2, 96**2], # medium
+ [96**2, 1e5**2], # large
+ [96**2, 128**2], # 96-128
+ [128**2, 256**2], # 128-256
+ [256**2, 512**2], # 256-512
+ [512**2, 1e5**2],
+ ] # 512-inf
+ assert area in areas, "Unknown area range: {}".format(area)
+ area_range = area_ranges[areas[area]]
+ gt_overlaps = []
+ num_pos = 0
+
+ for prediction_dict in dataset_predictions:
+ predictions = prediction_dict["proposals"]
+
+ # sort predictions in descending order
+ # TODO maybe remove this and make it explicit in the documentation
+ inds = predictions.objectness_logits.sort(descending=True)[1]
+ predictions = predictions[inds]
+
+ ann_ids = coco_api.getAnnIds(imgIds=prediction_dict["image_id"])
+ anno = coco_api.loadAnns(ann_ids)
+ gt_boxes = [
+ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS)
+ for obj in anno
+ if obj["iscrowd"] == 0
+ ]
+ gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
+ gt_boxes = Boxes(gt_boxes)
+ gt_areas = torch.as_tensor([obj["area"] for obj in anno if obj["iscrowd"] == 0])
+
+ if len(gt_boxes) == 0 or len(predictions) == 0:
+ continue
+
+ valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
+ gt_boxes = gt_boxes[valid_gt_inds]
+
+ num_pos += len(gt_boxes)
+
+ if len(gt_boxes) == 0:
+ continue
+
+ if limit is not None and len(predictions) > limit:
+ predictions = predictions[:limit]
+
+ overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
+
+ _gt_overlaps = torch.zeros(len(gt_boxes))
+ for j in range(min(len(predictions), len(gt_boxes))):
+ # find which proposal box maximally covers each gt box
+ # and get the iou amount of coverage for each gt box
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ # find which gt box is 'best' covered (i.e. 'best' = most iou)
+ gt_ovr, gt_ind = max_overlaps.max(dim=0)
+ assert gt_ovr >= 0
+ # find the proposal box that covers the best covered gt box
+ box_ind = argmax_overlaps[gt_ind]
+ # record the iou coverage of this gt box
+ _gt_overlaps[j] = overlaps[box_ind, gt_ind]
+ assert _gt_overlaps[j] == gt_ovr
+ # mark the proposal box and the gt box as used
+ overlaps[box_ind, :] = -1
+ overlaps[:, gt_ind] = -1
+
+ # append recorded iou coverage level
+ gt_overlaps.append(_gt_overlaps)
+ gt_overlaps = (
+ torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
+ )
+ gt_overlaps, _ = torch.sort(gt_overlaps)
+
+ if thresholds is None:
+ step = 0.05
+ thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
+ recalls = torch.zeros_like(thresholds)
+ # compute recall for each iou threshold
+ for i, t in enumerate(thresholds):
+ recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
+ # ar = 2 * np.trapz(recalls, thresholds)
+ ar = recalls.mean()
+ return {
+ "ar": ar,
+ "recalls": recalls,
+ "thresholds": thresholds,
+ "gt_overlaps": gt_overlaps,
+ "num_pos": num_pos,
+ }
+
+
+def _evaluate_predictions_on_coco(
+ coco_gt,
+ coco_results,
+ iou_type,
+ kpt_oks_sigmas=None,
+ cocoeval_fn=COCOeval_opt,
+ img_ids=None,
+ max_dets_per_image=None,
+):
+ """
+ Evaluate the coco results using COCOEval API.
+ """
+ assert len(coco_results) > 0
+
+ if iou_type == "segm":
+ coco_results = copy.deepcopy(coco_results)
+ # When evaluating mask AP, if the results contain bbox, cocoapi will
+ # use the box area as the area of the instance, instead of the mask area.
+ # This leads to a different definition of small/medium/large.
+ # We remove the bbox field to let mask AP use mask area.
+ for c in coco_results:
+ c.pop("bbox", None)
+
+ coco_dt = coco_gt.loadRes(coco_results)
+ coco_eval = cocoeval_fn(coco_gt, coco_dt, iou_type)
+ # For COCO, the default max_dets_per_image is [1, 10, 100].
+ if max_dets_per_image is None:
+ max_dets_per_image = [1, 10, 100] # Default from COCOEval
+ else:
+ assert (
+ len(max_dets_per_image) >= 3
+ ), "COCOeval requires maxDets (and max_dets_per_image) to have length at least 3"
+ # In the case that user supplies a custom input for max_dets_per_image,
+ # apply COCOevalMaxDets to evaluate AP with the custom input.
+ if max_dets_per_image[2] != 100:
+ coco_eval = COCOevalMaxDets(coco_gt, coco_dt, iou_type)
+ if iou_type != "keypoints":
+ coco_eval.params.maxDets = max_dets_per_image
+
+ if img_ids is not None:
+ coco_eval.params.imgIds = img_ids
+
+ if iou_type == "keypoints":
+ # Use the COCO default keypoint OKS sigmas unless overrides are specified
+ if kpt_oks_sigmas:
+ assert hasattr(coco_eval.params, "kpt_oks_sigmas"), "pycocotools is too old!"
+ coco_eval.params.kpt_oks_sigmas = np.array(kpt_oks_sigmas)
+ # COCOAPI requires every detection and every gt to have keypoints, so
+ # we just take the first entry from both
+ num_keypoints_dt = len(coco_results[0]["keypoints"]) // 3
+ num_keypoints_gt = len(next(iter(coco_gt.anns.values()))["keypoints"]) // 3
+ num_keypoints_oks = len(coco_eval.params.kpt_oks_sigmas)
+ assert num_keypoints_oks == num_keypoints_dt == num_keypoints_gt, (
+ f"[COCOEvaluator] Prediction contain {num_keypoints_dt} keypoints. "
+ f"Ground truth contains {num_keypoints_gt} keypoints. "
+ f"The length of cfg.TEST.KEYPOINT_OKS_SIGMAS is {num_keypoints_oks}. "
+ "They have to agree with each other. For meaning of OKS, please refer to "
+ "http://cocodataset.org/#keypoints-eval."
+ )
+
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ coco_eval.summarize()
+
+ return coco_eval
+
+
+class COCOevalMaxDets(COCOeval):
+ """
+ Modified version of COCOeval for evaluating AP with a custom
+ maxDets (by default for COCO, maxDets is 100)
+ """
+
+ def summarize(self):
+ """
+ Compute and display summary metrics for evaluation results given
+ a custom value for max_dets_per_image
+ """
+
+ def _summarize(ap=1, iouThr=None, areaRng="all", maxDets=100):
+ p = self.params
+ iStr = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} ] = {:0.3f}"
+ titleStr = "Average Precision" if ap == 1 else "Average Recall"
+ typeStr = "(AP)" if ap == 1 else "(AR)"
+ iouStr = (
+ "{:0.2f}:{:0.2f}".format(p.iouThrs[0], p.iouThrs[-1])
+ if iouThr is None
+ else "{:0.2f}".format(iouThr)
+ )
+
+ aind = [i for i, aRng in enumerate(p.areaRngLbl) if aRng == areaRng]
+ mind = [i for i, mDet in enumerate(p.maxDets) if mDet == maxDets]
+ if ap == 1:
+ # dimension of precision: [TxRxKxAxM]
+ s = self.eval["precision"]
+ # IoU
+ if iouThr is not None:
+ t = np.where(iouThr == p.iouThrs)[0]
+ s = s[t]
+ s = s[:, :, :, aind, mind]
+ else:
+ # dimension of recall: [TxKxAxM]
+ s = self.eval["recall"]
+ if iouThr is not None:
+ t = np.where(iouThr == p.iouThrs)[0]
+ s = s[t]
+ s = s[:, :, aind, mind]
+ if len(s[s > -1]) == 0:
+ mean_s = -1
+ else:
+ mean_s = np.mean(s[s > -1])
+ print(iStr.format(titleStr, typeStr, iouStr, areaRng, maxDets, mean_s))
+ return mean_s
+
+ def _summarizeDets():
+ stats = np.zeros((12,))
+ # Evaluate AP using the custom limit on maximum detections per image
+ stats[0] = _summarize(1, maxDets=self.params.maxDets[2])
+ stats[1] = _summarize(1, iouThr=0.5, maxDets=self.params.maxDets[2])
+ stats[2] = _summarize(1, iouThr=0.75, maxDets=self.params.maxDets[2])
+ stats[3] = _summarize(1, areaRng="small", maxDets=self.params.maxDets[2])
+ stats[4] = _summarize(1, areaRng="medium", maxDets=self.params.maxDets[2])
+ stats[5] = _summarize(1, areaRng="large", maxDets=self.params.maxDets[2])
+ stats[6] = _summarize(0, maxDets=self.params.maxDets[0])
+ stats[7] = _summarize(0, maxDets=self.params.maxDets[1])
+ stats[8] = _summarize(0, maxDets=self.params.maxDets[2])
+ stats[9] = _summarize(0, areaRng="small", maxDets=self.params.maxDets[2])
+ stats[10] = _summarize(0, areaRng="medium", maxDets=self.params.maxDets[2])
+ stats[11] = _summarize(0, areaRng="large", maxDets=self.params.maxDets[2])
+ return stats
+
+ def _summarizeKps():
+ stats = np.zeros((10,))
+ stats[0] = _summarize(1, maxDets=20)
+ stats[1] = _summarize(1, maxDets=20, iouThr=0.5)
+ stats[2] = _summarize(1, maxDets=20, iouThr=0.75)
+ stats[3] = _summarize(1, maxDets=20, areaRng="medium")
+ stats[4] = _summarize(1, maxDets=20, areaRng="large")
+ stats[5] = _summarize(0, maxDets=20)
+ stats[6] = _summarize(0, maxDets=20, iouThr=0.5)
+ stats[7] = _summarize(0, maxDets=20, iouThr=0.75)
+ stats[8] = _summarize(0, maxDets=20, areaRng="medium")
+ stats[9] = _summarize(0, maxDets=20, areaRng="large")
+ return stats
+
+ if not self.eval:
+ raise Exception("Please run accumulate() first")
+ iouType = self.params.iouType
+ if iouType == "segm" or iouType == "bbox":
+ summarize = _summarizeDets
+ elif iouType == "keypoints":
+ summarize = _summarizeKps
+ self.stats = summarize()
+
+ def __str__(self):
+ self.summarize()
diff --git a/detectron2/evaluation/evaluator.py b/detectron2/evaluation/evaluator.py
new file mode 100644
index 0000000000000000000000000000000000000000..9c0e33e0269dd90fcff82a636f476791868e8dd7
--- /dev/null
+++ b/detectron2/evaluation/evaluator.py
@@ -0,0 +1,233 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import datetime
+import logging
+import time
+from collections import OrderedDict, abc
+from contextlib import ExitStack, contextmanager
+from typing import List, Union
+import torch
+from torch import nn
+
+from detectron2.utils.comm import get_world_size, is_main_process
+from detectron2.utils.logger import log_every_n_seconds
+
+
+class DatasetEvaluator:
+ """
+ Base class for a dataset evaluator.
+
+ The function :func:`inference_on_dataset` runs the model over
+ all samples in the dataset, and have a DatasetEvaluator to process the inputs/outputs.
+
+ This class will accumulate information of the inputs/outputs (by :meth:`process`),
+ and produce evaluation results in the end (by :meth:`evaluate`).
+ """
+
+ def reset(self):
+ """
+ Preparation for a new round of evaluation.
+ Should be called before starting a round of evaluation.
+ """
+ pass
+
+ def process(self, inputs, outputs):
+ """
+ Process the pair of inputs and outputs.
+ If they contain batches, the pairs can be consumed one-by-one using `zip`:
+
+ .. code-block:: python
+
+ for input_, output in zip(inputs, outputs):
+ # do evaluation on single input/output pair
+ ...
+
+ Args:
+ inputs (list): the inputs that's used to call the model.
+ outputs (list): the return value of `model(inputs)`
+ """
+ pass
+
+ def evaluate(self):
+ """
+ Evaluate/summarize the performance, after processing all input/output pairs.
+
+ Returns:
+ dict:
+ A new evaluator class can return a dict of arbitrary format
+ as long as the user can process the results.
+ In our train_net.py, we expect the following format:
+
+ * key: the name of the task (e.g., bbox)
+ * value: a dict of {metric name: score}, e.g.: {"AP50": 80}
+ """
+ pass
+
+
+class DatasetEvaluators(DatasetEvaluator):
+ """
+ Wrapper class to combine multiple :class:`DatasetEvaluator` instances.
+
+ This class dispatches every evaluation call to
+ all of its :class:`DatasetEvaluator`.
+ """
+
+ def __init__(self, evaluators):
+ """
+ Args:
+ evaluators (list): the evaluators to combine.
+ """
+ super().__init__()
+ self._evaluators = evaluators
+
+ def reset(self):
+ for evaluator in self._evaluators:
+ evaluator.reset()
+
+ def process(self, inputs, outputs):
+ for evaluator in self._evaluators:
+ evaluator.process(inputs, outputs)
+
+ def evaluate(self):
+ results = OrderedDict()
+ for evaluator in self._evaluators:
+ result = evaluator.evaluate()
+ if is_main_process() and result is not None:
+ for k, v in result.items():
+ assert (
+ k not in results
+ ), "Different evaluators produce results with the same key {}".format(k)
+ results[k] = v
+ return results
+
+
+def inference_on_dataset(
+ model,
+ data_loader,
+ evaluator: Union[DatasetEvaluator, List[DatasetEvaluator], None],
+ callbacks=None,
+):
+ """
+ Run model on the data_loader and evaluate the metrics with evaluator.
+ Also benchmark the inference speed of `model.__call__` accurately.
+ The model will be used in eval mode.
+
+ Args:
+ model (callable): a callable which takes an object from
+ `data_loader` and returns some outputs.
+
+ If it's an nn.Module, it will be temporarily set to `eval` mode.
+ If you wish to evaluate a model in `training` mode instead, you can
+ wrap the given model and override its behavior of `.eval()` and `.train()`.
+ data_loader: an iterable object with a length.
+ The elements it generates will be the inputs to the model.
+ evaluator: the evaluator(s) to run. Use `None` if you only want to benchmark,
+ but don't want to do any evaluation.
+ callbacks (dict of callables): a dictionary of callback functions which can be
+ called at each stage of inference.
+
+ Returns:
+ The return value of `evaluator.evaluate()`
+ """
+ num_devices = get_world_size()
+ logger = logging.getLogger(__name__)
+ logger.info("Start inference on {} batches".format(len(data_loader)))
+
+ total = len(data_loader) # inference data loader must have a fixed length
+ if evaluator is None:
+ # create a no-op evaluator
+ evaluator = DatasetEvaluators([])
+ if isinstance(evaluator, abc.MutableSequence):
+ evaluator = DatasetEvaluators(evaluator)
+ evaluator.reset()
+
+ num_warmup = min(5, total - 1)
+ start_time = time.perf_counter()
+ total_data_time = 0
+ total_compute_time = 0
+ total_eval_time = 0
+ with ExitStack() as stack:
+ if isinstance(model, nn.Module):
+ stack.enter_context(inference_context(model))
+ stack.enter_context(torch.no_grad())
+
+ start_data_time = time.perf_counter()
+ dict.get(callbacks or {}, "on_start", lambda: None)()
+ for idx, inputs in enumerate(data_loader):
+ total_data_time += time.perf_counter() - start_data_time
+ if idx == num_warmup:
+ start_time = time.perf_counter()
+ total_data_time = 0
+ total_compute_time = 0
+ total_eval_time = 0
+
+ start_compute_time = time.perf_counter()
+ dict.get(callbacks or {}, "before_inference", lambda: None)()
+ outputs = model(inputs)
+ dict.get(callbacks or {}, "after_inference", lambda: None)()
+ if torch.cuda.is_available():
+ torch.cuda.synchronize()
+ total_compute_time += time.perf_counter() - start_compute_time
+
+ start_eval_time = time.perf_counter()
+ evaluator.process(inputs, outputs)
+ total_eval_time += time.perf_counter() - start_eval_time
+
+ iters_after_start = idx + 1 - num_warmup * int(idx >= num_warmup)
+ data_seconds_per_iter = total_data_time / iters_after_start
+ compute_seconds_per_iter = total_compute_time / iters_after_start
+ eval_seconds_per_iter = total_eval_time / iters_after_start
+ total_seconds_per_iter = (time.perf_counter() - start_time) / iters_after_start
+ if idx >= num_warmup * 2 or compute_seconds_per_iter > 5:
+ eta = datetime.timedelta(seconds=int(total_seconds_per_iter * (total - idx - 1)))
+ log_every_n_seconds(
+ logging.INFO,
+ (
+ f"Inference done {idx + 1}/{total}. "
+ f"Dataloading: {data_seconds_per_iter:.4f} s/iter. "
+ f"Inference: {compute_seconds_per_iter:.4f} s/iter. "
+ f"Eval: {eval_seconds_per_iter:.4f} s/iter. "
+ f"Total: {total_seconds_per_iter:.4f} s/iter. "
+ f"ETA={eta}"
+ ),
+ n=5,
+ )
+ start_data_time = time.perf_counter()
+ dict.get(callbacks or {}, "on_end", lambda: None)()
+
+ # Measure the time only for this worker (before the synchronization barrier)
+ total_time = time.perf_counter() - start_time
+ total_time_str = str(datetime.timedelta(seconds=total_time))
+ # NOTE this format is parsed by grep
+ logger.info(
+ "Total inference time: {} ({:.6f} s / iter per device, on {} devices)".format(
+ total_time_str, total_time / (total - num_warmup), num_devices
+ )
+ )
+ total_compute_time_str = str(datetime.timedelta(seconds=int(total_compute_time)))
+ logger.info(
+ "Total inference pure compute time: {} ({:.6f} s / iter per device, on {} devices)".format(
+ total_compute_time_str, total_compute_time / (total - num_warmup), num_devices
+ )
+ )
+
+ results = evaluator.evaluate()
+ # An evaluator may return None when not in main process.
+ # Replace it by an empty dict instead to make it easier for downstream code to handle
+ if results is None:
+ results = {}
+ return results
+
+
+@contextmanager
+def inference_context(model):
+ """
+ A context where the model is temporarily changed to eval mode,
+ and restored to previous mode afterwards.
+
+ Args:
+ model: a torch Module
+ """
+ training_mode = model.training
+ model.eval()
+ yield
+ model.train(training_mode)
diff --git a/detectron2/evaluation/fast_eval_api.py b/detectron2/evaluation/fast_eval_api.py
new file mode 100644
index 0000000000000000000000000000000000000000..2eb202bd5efa3ec3d366027b1debffc269ae8b17
--- /dev/null
+++ b/detectron2/evaluation/fast_eval_api.py
@@ -0,0 +1,121 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import numpy as np
+import time
+from pycocotools.cocoeval import COCOeval
+
+from detectron2 import _C
+
+logger = logging.getLogger(__name__)
+
+
+class COCOeval_opt(COCOeval):
+ """
+ This is a slightly modified version of the original COCO API, where the functions evaluateImg()
+ and accumulate() are implemented in C++ to speedup evaluation
+ """
+
+ def evaluate(self):
+ """
+ Run per image evaluation on given images and store results in self.evalImgs_cpp, a
+ datastructure that isn't readable from Python but is used by a c++ implementation of
+ accumulate(). Unlike the original COCO PythonAPI, we don't populate the datastructure
+ self.evalImgs because this datastructure is a computational bottleneck.
+ :return: None
+ """
+ tic = time.time()
+
+ p = self.params
+ # add backward compatibility if useSegm is specified in params
+ if p.useSegm is not None:
+ p.iouType = "segm" if p.useSegm == 1 else "bbox"
+ logger.info("Evaluate annotation type *{}*".format(p.iouType))
+ p.imgIds = list(np.unique(p.imgIds))
+ if p.useCats:
+ p.catIds = list(np.unique(p.catIds))
+ p.maxDets = sorted(p.maxDets)
+ self.params = p
+
+ self._prepare() # bottleneck
+
+ # loop through images, area range, max detection number
+ catIds = p.catIds if p.useCats else [-1]
+
+ if p.iouType == "segm" or p.iouType == "bbox":
+ computeIoU = self.computeIoU
+ elif p.iouType == "keypoints":
+ computeIoU = self.computeOks
+ self.ious = {
+ (imgId, catId): computeIoU(imgId, catId) for imgId in p.imgIds for catId in catIds
+ } # bottleneck
+
+ maxDet = p.maxDets[-1]
+
+ # <<<< Beginning of code differences with original COCO API
+ def convert_instances_to_cpp(instances, is_det=False):
+ # Convert annotations for a list of instances in an image to a format that's fast
+ # to access in C++
+ instances_cpp = []
+ for instance in instances:
+ instance_cpp = _C.InstanceAnnotation(
+ int(instance["id"]),
+ instance["score"] if is_det else instance.get("score", 0.0),
+ instance["area"],
+ bool(instance.get("iscrowd", 0)),
+ bool(instance.get("ignore", 0)),
+ )
+ instances_cpp.append(instance_cpp)
+ return instances_cpp
+
+ # Convert GT annotations, detections, and IOUs to a format that's fast to access in C++
+ ground_truth_instances = [
+ [convert_instances_to_cpp(self._gts[imgId, catId]) for catId in p.catIds]
+ for imgId in p.imgIds
+ ]
+ detected_instances = [
+ [convert_instances_to_cpp(self._dts[imgId, catId], is_det=True) for catId in p.catIds]
+ for imgId in p.imgIds
+ ]
+ ious = [[self.ious[imgId, catId] for catId in catIds] for imgId in p.imgIds]
+
+ if not p.useCats:
+ # For each image, flatten per-category lists into a single list
+ ground_truth_instances = [[[o for c in i for o in c]] for i in ground_truth_instances]
+ detected_instances = [[[o for c in i for o in c]] for i in detected_instances]
+
+ # Call C++ implementation of self.evaluateImgs()
+ self._evalImgs_cpp = _C.COCOevalEvaluateImages(
+ p.areaRng, maxDet, p.iouThrs, ious, ground_truth_instances, detected_instances
+ )
+ self._evalImgs = None
+
+ self._paramsEval = copy.deepcopy(self.params)
+ toc = time.time()
+ logger.info("COCOeval_opt.evaluate() finished in {:0.2f} seconds.".format(toc - tic))
+ # >>>> End of code differences with original COCO API
+
+ def accumulate(self):
+ """
+ Accumulate per image evaluation results and store the result in self.eval. Does not
+ support changing parameter settings from those used by self.evaluate()
+ """
+ logger.info("Accumulating evaluation results...")
+ tic = time.time()
+ assert hasattr(
+ self, "_evalImgs_cpp"
+ ), "evaluate() must be called before accmulate() is called."
+
+ self.eval = _C.COCOevalAccumulate(self._paramsEval, self._evalImgs_cpp)
+
+ # recall is num_iou_thresholds X num_categories X num_area_ranges X num_max_detections
+ self.eval["recall"] = np.array(self.eval["recall"]).reshape(
+ self.eval["counts"][:1] + self.eval["counts"][2:]
+ )
+
+ # precision and scores are num_iou_thresholds X num_recall_thresholds X num_categories X
+ # num_area_ranges X num_max_detections
+ self.eval["precision"] = np.array(self.eval["precision"]).reshape(self.eval["counts"])
+ self.eval["scores"] = np.array(self.eval["scores"]).reshape(self.eval["counts"])
+ toc = time.time()
+ logger.info("COCOeval_opt.accumulate() finished in {:0.2f} seconds.".format(toc - tic))
diff --git a/detectron2/evaluation/lvis_evaluation.py b/detectron2/evaluation/lvis_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..6cc854a157dc469be99a9be1bb7d570068adc891
--- /dev/null
+++ b/detectron2/evaluation/lvis_evaluation.py
@@ -0,0 +1,380 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import itertools
+import json
+import logging
+import os
+import pickle
+from collections import OrderedDict
+import torch
+
+import detectron2.utils.comm as comm
+from detectron2.config import CfgNode
+from detectron2.data import MetadataCatalog
+from detectron2.structures import Boxes, BoxMode, pairwise_iou
+from detectron2.utils.file_io import PathManager
+from detectron2.utils.logger import create_small_table
+
+from .coco_evaluation import instances_to_coco_json
+from .evaluator import DatasetEvaluator
+
+
+class LVISEvaluator(DatasetEvaluator):
+ """
+ Evaluate object proposal and instance detection/segmentation outputs using
+ LVIS's metrics and evaluation API.
+ """
+
+ def __init__(
+ self,
+ dataset_name,
+ tasks=None,
+ distributed=True,
+ output_dir=None,
+ *,
+ max_dets_per_image=None,
+ ):
+ """
+ Args:
+ dataset_name (str): name of the dataset to be evaluated.
+ It must have the following corresponding metadata:
+ "json_file": the path to the LVIS format annotation
+ tasks (tuple[str]): tasks that can be evaluated under the given
+ configuration. A task is one of "bbox", "segm".
+ By default, will infer this automatically from predictions.
+ distributed (True): if True, will collect results from all ranks for evaluation.
+ Otherwise, will evaluate the results in the current process.
+ output_dir (str): optional, an output directory to dump results.
+ max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
+ This limit, by default of the LVIS dataset, is 300.
+ """
+ from lvis import LVIS
+
+ self._logger = logging.getLogger(__name__)
+
+ if tasks is not None and isinstance(tasks, CfgNode):
+ self._logger.warn(
+ "COCO Evaluator instantiated using config, this is deprecated behavior."
+ " Please pass in explicit arguments instead."
+ )
+ self._tasks = None # Infering it from predictions should be better
+ else:
+ self._tasks = tasks
+
+ self._distributed = distributed
+ self._output_dir = output_dir
+ self._max_dets_per_image = max_dets_per_image
+
+ self._cpu_device = torch.device("cpu")
+
+ self._metadata = MetadataCatalog.get(dataset_name)
+ json_file = PathManager.get_local_path(self._metadata.json_file)
+ self._lvis_api = LVIS(json_file)
+ # Test set json files do not contain annotations (evaluation must be
+ # performed using the LVIS evaluation server).
+ self._do_evaluation = len(self._lvis_api.get_ann_ids()) > 0
+
+ def reset(self):
+ self._predictions = []
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a LVIS model (e.g., GeneralizedRCNN).
+ It is a list of dict. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name", "image_id".
+ outputs: the outputs of a LVIS model. It is a list of dicts with key
+ "instances" that contains :class:`Instances`.
+ """
+ for input, output in zip(inputs, outputs):
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+ prediction["instances"] = instances_to_coco_json(instances, input["image_id"])
+ if "proposals" in output:
+ prediction["proposals"] = output["proposals"].to(self._cpu_device)
+ self._predictions.append(prediction)
+
+ def evaluate(self):
+ if self._distributed:
+ comm.synchronize()
+ predictions = comm.gather(self._predictions, dst=0)
+ predictions = list(itertools.chain(*predictions))
+
+ if not comm.is_main_process():
+ return
+ else:
+ predictions = self._predictions
+
+ if len(predictions) == 0:
+ self._logger.warning("[LVISEvaluator] Did not receive valid predictions.")
+ return {}
+
+ if self._output_dir:
+ PathManager.mkdirs(self._output_dir)
+ file_path = os.path.join(self._output_dir, "instances_predictions.pth")
+ with PathManager.open(file_path, "wb") as f:
+ torch.save(predictions, f)
+
+ self._results = OrderedDict()
+ if "proposals" in predictions[0]:
+ self._eval_box_proposals(predictions)
+ if "instances" in predictions[0]:
+ self._eval_predictions(predictions)
+ # Copy so the caller can do whatever with results
+ return copy.deepcopy(self._results)
+
+ def _tasks_from_predictions(self, predictions):
+ for pred in predictions:
+ if "segmentation" in pred:
+ return ("bbox", "segm")
+ return ("bbox",)
+
+ def _eval_predictions(self, predictions):
+ """
+ Evaluate predictions. Fill self._results with the metrics of the tasks.
+
+ Args:
+ predictions (list[dict]): list of outputs from the model
+ """
+ self._logger.info("Preparing results in the LVIS format ...")
+ lvis_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+ tasks = self._tasks or self._tasks_from_predictions(lvis_results)
+
+ # LVIS evaluator can be used to evaluate results for COCO dataset categories.
+ # In this case `_metadata` variable will have a field with COCO-specific category mapping.
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
+ reverse_id_mapping = {
+ v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
+ }
+ for result in lvis_results:
+ result["category_id"] = reverse_id_mapping[result["category_id"]]
+ else:
+ # unmap the category ids for LVIS (from 0-indexed to 1-indexed)
+ for result in lvis_results:
+ result["category_id"] += 1
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "lvis_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(lvis_results))
+ f.flush()
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating predictions ...")
+ for task in sorted(tasks):
+ res = _evaluate_predictions_on_lvis(
+ self._lvis_api,
+ lvis_results,
+ task,
+ max_dets_per_image=self._max_dets_per_image,
+ class_names=self._metadata.get("thing_classes"),
+ )
+ self._results[task] = res
+
+ def _eval_box_proposals(self, predictions):
+ """
+ Evaluate the box proposals in predictions.
+ Fill self._results with the metrics for "box_proposals" task.
+ """
+ if self._output_dir:
+ # Saving generated box proposals to file.
+ # Predicted box_proposals are in XYXY_ABS mode.
+ bbox_mode = BoxMode.XYXY_ABS.value
+ ids, boxes, objectness_logits = [], [], []
+ for prediction in predictions:
+ ids.append(prediction["image_id"])
+ boxes.append(prediction["proposals"].proposal_boxes.tensor.numpy())
+ objectness_logits.append(prediction["proposals"].objectness_logits.numpy())
+
+ proposal_data = {
+ "boxes": boxes,
+ "objectness_logits": objectness_logits,
+ "ids": ids,
+ "bbox_mode": bbox_mode,
+ }
+ with PathManager.open(os.path.join(self._output_dir, "box_proposals.pkl"), "wb") as f:
+ pickle.dump(proposal_data, f)
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating bbox proposals ...")
+ res = {}
+ areas = {"all": "", "small": "s", "medium": "m", "large": "l"}
+ for limit in [100, 1000]:
+ for area, suffix in areas.items():
+ stats = _evaluate_box_proposals(predictions, self._lvis_api, area=area, limit=limit)
+ key = "AR{}@{:d}".format(suffix, limit)
+ res[key] = float(stats["ar"].item() * 100)
+ self._logger.info("Proposal metrics: \n" + create_small_table(res))
+ self._results["box_proposals"] = res
+
+
+# inspired from Detectron:
+# https://github.com/facebookresearch/Detectron/blob/a6a835f5b8208c45d0dce217ce9bbda915f44df7/detectron/datasets/json_dataset_evaluator.py#L255 # noqa
+def _evaluate_box_proposals(dataset_predictions, lvis_api, thresholds=None, area="all", limit=None):
+ """
+ Evaluate detection proposal recall metrics. This function is a much
+ faster alternative to the official LVIS API recall evaluation code. However,
+ it produces slightly different results.
+ """
+ # Record max overlap value for each gt box
+ # Return vector of overlap values
+ areas = {
+ "all": 0,
+ "small": 1,
+ "medium": 2,
+ "large": 3,
+ "96-128": 4,
+ "128-256": 5,
+ "256-512": 6,
+ "512-inf": 7,
+ }
+ area_ranges = [
+ [0**2, 1e5**2], # all
+ [0**2, 32**2], # small
+ [32**2, 96**2], # medium
+ [96**2, 1e5**2], # large
+ [96**2, 128**2], # 96-128
+ [128**2, 256**2], # 128-256
+ [256**2, 512**2], # 256-512
+ [512**2, 1e5**2],
+ ] # 512-inf
+ assert area in areas, "Unknown area range: {}".format(area)
+ area_range = area_ranges[areas[area]]
+ gt_overlaps = []
+ num_pos = 0
+
+ for prediction_dict in dataset_predictions:
+ predictions = prediction_dict["proposals"]
+
+ # sort predictions in descending order
+ # TODO maybe remove this and make it explicit in the documentation
+ inds = predictions.objectness_logits.sort(descending=True)[1]
+ predictions = predictions[inds]
+
+ ann_ids = lvis_api.get_ann_ids(img_ids=[prediction_dict["image_id"]])
+ anno = lvis_api.load_anns(ann_ids)
+ gt_boxes = [
+ BoxMode.convert(obj["bbox"], BoxMode.XYWH_ABS, BoxMode.XYXY_ABS) for obj in anno
+ ]
+ gt_boxes = torch.as_tensor(gt_boxes).reshape(-1, 4) # guard against no boxes
+ gt_boxes = Boxes(gt_boxes)
+ gt_areas = torch.as_tensor([obj["area"] for obj in anno])
+
+ if len(gt_boxes) == 0 or len(predictions) == 0:
+ continue
+
+ valid_gt_inds = (gt_areas >= area_range[0]) & (gt_areas <= area_range[1])
+ gt_boxes = gt_boxes[valid_gt_inds]
+
+ num_pos += len(gt_boxes)
+
+ if len(gt_boxes) == 0:
+ continue
+
+ if limit is not None and len(predictions) > limit:
+ predictions = predictions[:limit]
+
+ overlaps = pairwise_iou(predictions.proposal_boxes, gt_boxes)
+
+ _gt_overlaps = torch.zeros(len(gt_boxes))
+ for j in range(min(len(predictions), len(gt_boxes))):
+ # find which proposal box maximally covers each gt box
+ # and get the iou amount of coverage for each gt box
+ max_overlaps, argmax_overlaps = overlaps.max(dim=0)
+
+ # find which gt box is 'best' covered (i.e. 'best' = most iou)
+ gt_ovr, gt_ind = max_overlaps.max(dim=0)
+ assert gt_ovr >= 0
+ # find the proposal box that covers the best covered gt box
+ box_ind = argmax_overlaps[gt_ind]
+ # record the iou coverage of this gt box
+ _gt_overlaps[j] = overlaps[box_ind, gt_ind]
+ assert _gt_overlaps[j] == gt_ovr
+ # mark the proposal box and the gt box as used
+ overlaps[box_ind, :] = -1
+ overlaps[:, gt_ind] = -1
+
+ # append recorded iou coverage level
+ gt_overlaps.append(_gt_overlaps)
+ gt_overlaps = (
+ torch.cat(gt_overlaps, dim=0) if len(gt_overlaps) else torch.zeros(0, dtype=torch.float32)
+ )
+ gt_overlaps, _ = torch.sort(gt_overlaps)
+
+ if thresholds is None:
+ step = 0.05
+ thresholds = torch.arange(0.5, 0.95 + 1e-5, step, dtype=torch.float32)
+ recalls = torch.zeros_like(thresholds)
+ # compute recall for each iou threshold
+ for i, t in enumerate(thresholds):
+ recalls[i] = (gt_overlaps >= t).float().sum() / float(num_pos)
+ # ar = 2 * np.trapz(recalls, thresholds)
+ ar = recalls.mean()
+ return {
+ "ar": ar,
+ "recalls": recalls,
+ "thresholds": thresholds,
+ "gt_overlaps": gt_overlaps,
+ "num_pos": num_pos,
+ }
+
+
+def _evaluate_predictions_on_lvis(
+ lvis_gt, lvis_results, iou_type, max_dets_per_image=None, class_names=None
+):
+ """
+ Args:
+ iou_type (str):
+ max_dets_per_image (None or int): limit on maximum detections per image in evaluating AP
+ This limit, by default of the LVIS dataset, is 300.
+ class_names (None or list[str]): if provided, will use it to predict
+ per-category AP.
+
+ Returns:
+ a dict of {metric name: score}
+ """
+ metrics = {
+ "bbox": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
+ "segm": ["AP", "AP50", "AP75", "APs", "APm", "APl", "APr", "APc", "APf"],
+ }[iou_type]
+
+ logger = logging.getLogger(__name__)
+
+ if len(lvis_results) == 0: # TODO: check if needed
+ logger.warn("No predictions from the model!")
+ return {metric: float("nan") for metric in metrics}
+
+ if iou_type == "segm":
+ lvis_results = copy.deepcopy(lvis_results)
+ # When evaluating mask AP, if the results contain bbox, LVIS API will
+ # use the box area as the area of the instance, instead of the mask area.
+ # This leads to a different definition of small/medium/large.
+ # We remove the bbox field to let mask AP use mask area.
+ for c in lvis_results:
+ c.pop("bbox", None)
+
+ if max_dets_per_image is None:
+ max_dets_per_image = 300 # Default for LVIS dataset
+
+ from lvis import LVISEval, LVISResults
+
+ logger.info(f"Evaluating with max detections per image = {max_dets_per_image}")
+ lvis_results = LVISResults(lvis_gt, lvis_results, max_dets=max_dets_per_image)
+ lvis_eval = LVISEval(lvis_gt, lvis_results, iou_type)
+ lvis_eval.run()
+ lvis_eval.print_results()
+
+ # Pull the standard metrics from the LVIS results
+ results = lvis_eval.get_results()
+ results = {metric: float(results[metric] * 100) for metric in metrics}
+ logger.info("Evaluation results for {}: \n".format(iou_type) + create_small_table(results))
+ return results
diff --git a/detectron2/evaluation/panoptic_evaluation.py b/detectron2/evaluation/panoptic_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..9fb3462b7f9abf6feaa499976bfed526ebd17e31
--- /dev/null
+++ b/detectron2/evaluation/panoptic_evaluation.py
@@ -0,0 +1,199 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import contextlib
+import io
+import itertools
+import json
+import logging
+import numpy as np
+import os
+import tempfile
+from collections import OrderedDict
+from typing import Optional
+from PIL import Image
+from tabulate import tabulate
+
+from detectron2.data import MetadataCatalog
+from detectron2.utils import comm
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+logger = logging.getLogger(__name__)
+
+
+class COCOPanopticEvaluator(DatasetEvaluator):
+ """
+ Evaluate Panoptic Quality metrics on COCO using PanopticAPI.
+ It saves panoptic segmentation prediction in `output_dir`
+
+ It contains a synchronize call and has to be called from all workers.
+ """
+
+ def __init__(self, dataset_name: str, output_dir: Optional[str] = None):
+ """
+ Args:
+ dataset_name: name of the dataset
+ output_dir: output directory to save results for evaluation.
+ """
+ self._metadata = MetadataCatalog.get(dataset_name)
+ self._thing_contiguous_id_to_dataset_id = {
+ v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
+ }
+ self._stuff_contiguous_id_to_dataset_id = {
+ v: k for k, v in self._metadata.stuff_dataset_id_to_contiguous_id.items()
+ }
+
+ self._output_dir = output_dir
+ if self._output_dir is not None:
+ PathManager.mkdirs(self._output_dir)
+
+ def reset(self):
+ self._predictions = []
+
+ def _convert_category_id(self, segment_info):
+ isthing = segment_info.pop("isthing", None)
+ if isthing is None:
+ # the model produces panoptic category id directly. No more conversion needed
+ return segment_info
+ if isthing is True:
+ segment_info["category_id"] = self._thing_contiguous_id_to_dataset_id[
+ segment_info["category_id"]
+ ]
+ else:
+ segment_info["category_id"] = self._stuff_contiguous_id_to_dataset_id[
+ segment_info["category_id"]
+ ]
+ return segment_info
+
+ def process(self, inputs, outputs):
+ from panopticapi.utils import id2rgb
+
+ for input, output in zip(inputs, outputs):
+ panoptic_img, segments_info = output["panoptic_seg"]
+ panoptic_img = panoptic_img.cpu().numpy()
+ if segments_info is None:
+ # If "segments_info" is None, we assume "panoptic_img" is a
+ # H*W int32 image storing the panoptic_id in the format of
+ # category_id * label_divisor + instance_id. We reserve -1 for
+ # VOID label, and add 1 to panoptic_img since the official
+ # evaluation script uses 0 for VOID label.
+ label_divisor = self._metadata.label_divisor
+ segments_info = []
+ for panoptic_label in np.unique(panoptic_img):
+ if panoptic_label == -1:
+ # VOID region.
+ continue
+ pred_class = panoptic_label // label_divisor
+ isthing = (
+ pred_class in self._metadata.thing_dataset_id_to_contiguous_id.values()
+ )
+ segments_info.append(
+ {
+ "id": int(panoptic_label) + 1,
+ "category_id": int(pred_class),
+ "isthing": bool(isthing),
+ }
+ )
+ # Official evaluation script uses 0 for VOID label.
+ panoptic_img += 1
+
+ file_name = os.path.basename(input["file_name"])
+ file_name_png = os.path.splitext(file_name)[0] + ".png"
+ with io.BytesIO() as out:
+ Image.fromarray(id2rgb(panoptic_img)).save(out, format="PNG")
+ segments_info = [self._convert_category_id(x) for x in segments_info]
+ self._predictions.append(
+ {
+ "image_id": input["image_id"],
+ "file_name": file_name_png,
+ "png_string": out.getvalue(),
+ "segments_info": segments_info,
+ }
+ )
+
+ def evaluate(self):
+ comm.synchronize()
+
+ self._predictions = comm.gather(self._predictions)
+ self._predictions = list(itertools.chain(*self._predictions))
+ if not comm.is_main_process():
+ return
+
+ # PanopticApi requires local files
+ gt_json = PathManager.get_local_path(self._metadata.panoptic_json)
+ gt_folder = PathManager.get_local_path(self._metadata.panoptic_root)
+
+ with tempfile.TemporaryDirectory(prefix="panoptic_eval") as pred_dir:
+ logger.info("Writing all panoptic predictions to {} ...".format(pred_dir))
+ for p in self._predictions:
+ with open(os.path.join(pred_dir, p["file_name"]), "wb") as f:
+ f.write(p.pop("png_string"))
+
+ with open(gt_json, "r") as f:
+ json_data = json.load(f)
+ json_data["annotations"] = self._predictions
+
+ output_dir = self._output_dir or pred_dir
+ predictions_json = os.path.join(output_dir, "predictions.json")
+ with PathManager.open(predictions_json, "w") as f:
+ f.write(json.dumps(json_data))
+
+ from panopticapi.evaluation import pq_compute
+
+ with contextlib.redirect_stdout(io.StringIO()):
+ pq_res = pq_compute(
+ gt_json,
+ PathManager.get_local_path(predictions_json),
+ gt_folder=gt_folder,
+ pred_folder=pred_dir,
+ )
+
+ res = {}
+ res["PQ"] = 100 * pq_res["All"]["pq"]
+ res["SQ"] = 100 * pq_res["All"]["sq"]
+ res["RQ"] = 100 * pq_res["All"]["rq"]
+ res["PQ_th"] = 100 * pq_res["Things"]["pq"]
+ res["SQ_th"] = 100 * pq_res["Things"]["sq"]
+ res["RQ_th"] = 100 * pq_res["Things"]["rq"]
+ res["PQ_st"] = 100 * pq_res["Stuff"]["pq"]
+ res["SQ_st"] = 100 * pq_res["Stuff"]["sq"]
+ res["RQ_st"] = 100 * pq_res["Stuff"]["rq"]
+
+ results = OrderedDict({"panoptic_seg": res})
+ _print_panoptic_results(pq_res)
+
+ return results
+
+
+def _print_panoptic_results(pq_res):
+ headers = ["", "PQ", "SQ", "RQ", "#categories"]
+ data = []
+ for name in ["All", "Things", "Stuff"]:
+ row = [name] + [pq_res[name][k] * 100 for k in ["pq", "sq", "rq"]] + [pq_res[name]["n"]]
+ data.append(row)
+ table = tabulate(
+ data, headers=headers, tablefmt="pipe", floatfmt=".3f", stralign="center", numalign="center"
+ )
+ logger.info("Panoptic Evaluation Results:\n" + table)
+
+
+if __name__ == "__main__":
+ from detectron2.utils.logger import setup_logger
+
+ logger = setup_logger()
+ import argparse
+
+ parser = argparse.ArgumentParser()
+ parser.add_argument("--gt-json")
+ parser.add_argument("--gt-dir")
+ parser.add_argument("--pred-json")
+ parser.add_argument("--pred-dir")
+ args = parser.parse_args()
+
+ from panopticapi.evaluation import pq_compute
+
+ with contextlib.redirect_stdout(io.StringIO()):
+ pq_res = pq_compute(
+ args.gt_json, args.pred_json, gt_folder=args.gt_dir, pred_folder=args.pred_dir
+ )
+ _print_panoptic_results(pq_res)
diff --git a/detectron2/evaluation/pascal_voc_evaluation.py b/detectron2/evaluation/pascal_voc_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..88bb42e6f75f5f0faa4b774ddf16938477a37d2b
--- /dev/null
+++ b/detectron2/evaluation/pascal_voc_evaluation.py
@@ -0,0 +1,300 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import logging
+import numpy as np
+import os
+import tempfile
+import xml.etree.ElementTree as ET
+from collections import OrderedDict, defaultdict
+from functools import lru_cache
+import torch
+
+from detectron2.data import MetadataCatalog
+from detectron2.utils import comm
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+
+class PascalVOCDetectionEvaluator(DatasetEvaluator):
+ """
+ Evaluate Pascal VOC style AP for Pascal VOC dataset.
+ It contains a synchronization, therefore has to be called from all ranks.
+
+ Note that the concept of AP can be implemented in different ways and may not
+ produce identical results. This class mimics the implementation of the official
+ Pascal VOC Matlab API, and should produce similar but not identical results to the
+ official API.
+ """
+
+ def __init__(self, dataset_name):
+ """
+ Args:
+ dataset_name (str): name of the dataset, e.g., "voc_2007_test"
+ """
+ self._dataset_name = dataset_name
+ meta = MetadataCatalog.get(dataset_name)
+
+ # Too many tiny files, download all to local for speed.
+ annotation_dir_local = PathManager.get_local_path(
+ os.path.join(meta.dirname, "Annotations/")
+ )
+ self._anno_file_template = os.path.join(annotation_dir_local, "{}.xml")
+ self._image_set_path = os.path.join(meta.dirname, "ImageSets", "Main", meta.split + ".txt")
+ self._class_names = meta.thing_classes
+ assert meta.year in [2007, 2012], meta.year
+ self._is_2007 = meta.year == 2007
+ self._cpu_device = torch.device("cpu")
+ self._logger = logging.getLogger(__name__)
+
+ def reset(self):
+ self._predictions = defaultdict(list) # class name -> list of prediction strings
+
+ def process(self, inputs, outputs):
+ for input, output in zip(inputs, outputs):
+ image_id = input["image_id"]
+ instances = output["instances"].to(self._cpu_device)
+ boxes = instances.pred_boxes.tensor.numpy()
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+ for box, score, cls in zip(boxes, scores, classes):
+ xmin, ymin, xmax, ymax = box
+ # The inverse of data loading logic in `datasets/pascal_voc.py`
+ xmin += 1
+ ymin += 1
+ self._predictions[cls].append(
+ f"{image_id} {score:.3f} {xmin:.1f} {ymin:.1f} {xmax:.1f} {ymax:.1f}"
+ )
+
+ def evaluate(self):
+ """
+ Returns:
+ dict: has a key "segm", whose value is a dict of "AP", "AP50", and "AP75".
+ """
+ all_predictions = comm.gather(self._predictions, dst=0)
+ if not comm.is_main_process():
+ return
+ predictions = defaultdict(list)
+ for predictions_per_rank in all_predictions:
+ for clsid, lines in predictions_per_rank.items():
+ predictions[clsid].extend(lines)
+ del all_predictions
+
+ self._logger.info(
+ "Evaluating {} using {} metric. "
+ "Note that results do not use the official Matlab API.".format(
+ self._dataset_name, 2007 if self._is_2007 else 2012
+ )
+ )
+
+ with tempfile.TemporaryDirectory(prefix="pascal_voc_eval_") as dirname:
+ res_file_template = os.path.join(dirname, "{}.txt")
+
+ aps = defaultdict(list) # iou -> ap per class
+ for cls_id, cls_name in enumerate(self._class_names):
+ lines = predictions.get(cls_id, [""])
+
+ with open(res_file_template.format(cls_name), "w") as f:
+ f.write("\n".join(lines))
+
+ for thresh in range(50, 100, 5):
+ rec, prec, ap = voc_eval(
+ res_file_template,
+ self._anno_file_template,
+ self._image_set_path,
+ cls_name,
+ ovthresh=thresh / 100.0,
+ use_07_metric=self._is_2007,
+ )
+ aps[thresh].append(ap * 100)
+
+ ret = OrderedDict()
+ mAP = {iou: np.mean(x) for iou, x in aps.items()}
+ ret["bbox"] = {"AP": np.mean(list(mAP.values())), "AP50": mAP[50], "AP75": mAP[75]}
+ return ret
+
+
+##############################################################################
+#
+# Below code is modified from
+# https://github.com/rbgirshick/py-faster-rcnn/blob/master/lib/datasets/voc_eval.py
+# --------------------------------------------------------
+# Fast/er R-CNN
+# Licensed under The MIT License [see LICENSE for details]
+# Written by Bharath Hariharan
+# --------------------------------------------------------
+
+"""Python implementation of the PASCAL VOC devkit's AP evaluation code."""
+
+
+@lru_cache(maxsize=None)
+def parse_rec(filename):
+ """Parse a PASCAL VOC xml file."""
+ with PathManager.open(filename) as f:
+ tree = ET.parse(f)
+ objects = []
+ for obj in tree.findall("object"):
+ obj_struct = {}
+ obj_struct["name"] = obj.find("name").text
+ obj_struct["pose"] = obj.find("pose").text
+ obj_struct["truncated"] = int(obj.find("truncated").text)
+ obj_struct["difficult"] = int(obj.find("difficult").text)
+ bbox = obj.find("bndbox")
+ obj_struct["bbox"] = [
+ int(bbox.find("xmin").text),
+ int(bbox.find("ymin").text),
+ int(bbox.find("xmax").text),
+ int(bbox.find("ymax").text),
+ ]
+ objects.append(obj_struct)
+
+ return objects
+
+
+def voc_ap(rec, prec, use_07_metric=False):
+ """Compute VOC AP given precision and recall. If use_07_metric is true, uses
+ the VOC 07 11-point method (default:False).
+ """
+ if use_07_metric:
+ # 11 point metric
+ ap = 0.0
+ for t in np.arange(0.0, 1.1, 0.1):
+ if np.sum(rec >= t) == 0:
+ p = 0
+ else:
+ p = np.max(prec[rec >= t])
+ ap = ap + p / 11.0
+ else:
+ # correct AP calculation
+ # first append sentinel values at the end
+ mrec = np.concatenate(([0.0], rec, [1.0]))
+ mpre = np.concatenate(([0.0], prec, [0.0]))
+
+ # compute the precision envelope
+ for i in range(mpre.size - 1, 0, -1):
+ mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i])
+
+ # to calculate area under PR curve, look for points
+ # where X axis (recall) changes value
+ i = np.where(mrec[1:] != mrec[:-1])[0]
+
+ # and sum (\Delta recall) * prec
+ ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
+ return ap
+
+
+def voc_eval(detpath, annopath, imagesetfile, classname, ovthresh=0.5, use_07_metric=False):
+ """rec, prec, ap = voc_eval(detpath,
+ annopath,
+ imagesetfile,
+ classname,
+ [ovthresh],
+ [use_07_metric])
+
+ Top level function that does the PASCAL VOC evaluation.
+
+ detpath: Path to detections
+ detpath.format(classname) should produce the detection results file.
+ annopath: Path to annotations
+ annopath.format(imagename) should be the xml annotations file.
+ imagesetfile: Text file containing the list of images, one image per line.
+ classname: Category name (duh)
+ [ovthresh]: Overlap threshold (default = 0.5)
+ [use_07_metric]: Whether to use VOC07's 11 point AP computation
+ (default False)
+ """
+ # assumes detections are in detpath.format(classname)
+ # assumes annotations are in annopath.format(imagename)
+ # assumes imagesetfile is a text file with each line an image name
+
+ # first load gt
+ # read list of images
+ with PathManager.open(imagesetfile, "r") as f:
+ lines = f.readlines()
+ imagenames = [x.strip() for x in lines]
+
+ # load annots
+ recs = {}
+ for imagename in imagenames:
+ recs[imagename] = parse_rec(annopath.format(imagename))
+
+ # extract gt objects for this class
+ class_recs = {}
+ npos = 0
+ for imagename in imagenames:
+ R = [obj for obj in recs[imagename] if obj["name"] == classname]
+ bbox = np.array([x["bbox"] for x in R])
+ difficult = np.array([x["difficult"] for x in R]).astype(bool)
+ # difficult = np.array([False for x in R]).astype(bool) # treat all "difficult" as GT
+ det = [False] * len(R)
+ npos = npos + sum(~difficult)
+ class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det}
+
+ # read dets
+ detfile = detpath.format(classname)
+ with open(detfile, "r") as f:
+ lines = f.readlines()
+
+ splitlines = [x.strip().split(" ") for x in lines]
+ image_ids = [x[0] for x in splitlines]
+ confidence = np.array([float(x[1]) for x in splitlines])
+ BB = np.array([[float(z) for z in x[2:]] for x in splitlines]).reshape(-1, 4)
+
+ # sort by confidence
+ sorted_ind = np.argsort(-confidence)
+ BB = BB[sorted_ind, :]
+ image_ids = [image_ids[x] for x in sorted_ind]
+
+ # go down dets and mark TPs and FPs
+ nd = len(image_ids)
+ tp = np.zeros(nd)
+ fp = np.zeros(nd)
+ for d in range(nd):
+ R = class_recs[image_ids[d]]
+ bb = BB[d, :].astype(float)
+ ovmax = -np.inf
+ BBGT = R["bbox"].astype(float)
+
+ if BBGT.size > 0:
+ # compute overlaps
+ # intersection
+ ixmin = np.maximum(BBGT[:, 0], bb[0])
+ iymin = np.maximum(BBGT[:, 1], bb[1])
+ ixmax = np.minimum(BBGT[:, 2], bb[2])
+ iymax = np.minimum(BBGT[:, 3], bb[3])
+ iw = np.maximum(ixmax - ixmin + 1.0, 0.0)
+ ih = np.maximum(iymax - iymin + 1.0, 0.0)
+ inters = iw * ih
+
+ # union
+ uni = (
+ (bb[2] - bb[0] + 1.0) * (bb[3] - bb[1] + 1.0)
+ + (BBGT[:, 2] - BBGT[:, 0] + 1.0) * (BBGT[:, 3] - BBGT[:, 1] + 1.0)
+ - inters
+ )
+
+ overlaps = inters / uni
+ ovmax = np.max(overlaps)
+ jmax = np.argmax(overlaps)
+
+ if ovmax > ovthresh:
+ if not R["difficult"][jmax]:
+ if not R["det"][jmax]:
+ tp[d] = 1.0
+ R["det"][jmax] = 1
+ else:
+ fp[d] = 1.0
+ else:
+ fp[d] = 1.0
+
+ # compute precision recall
+ fp = np.cumsum(fp)
+ tp = np.cumsum(tp)
+ rec = tp / float(npos)
+ # avoid divide by zero in case the first detection matches a difficult
+ # ground truth
+ prec = tp / np.maximum(tp + fp, np.finfo(np.float64).eps)
+ ap = voc_ap(rec, prec, use_07_metric)
+
+ return rec, prec, ap
diff --git a/detectron2/evaluation/rotated_coco_evaluation.py b/detectron2/evaluation/rotated_coco_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..c9782558881c9ad651accf8ac57ae158f3e46a96
--- /dev/null
+++ b/detectron2/evaluation/rotated_coco_evaluation.py
@@ -0,0 +1,209 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import json
+import numpy as np
+import os
+import torch
+from pycocotools.cocoeval import COCOeval, maskUtils
+
+from detectron2.structures import BoxMode, RotatedBoxes, pairwise_iou_rotated
+from detectron2.utils.file_io import PathManager
+
+from .coco_evaluation import COCOEvaluator
+
+
+class RotatedCOCOeval(COCOeval):
+ @staticmethod
+ def is_rotated(box_list):
+ if type(box_list) == np.ndarray:
+ return box_list.shape[1] == 5
+ elif type(box_list) == list:
+ if box_list == []: # cannot decide the box_dim
+ return False
+ return np.all(
+ np.array(
+ [
+ (len(obj) == 5) and ((type(obj) == list) or (type(obj) == np.ndarray))
+ for obj in box_list
+ ]
+ )
+ )
+ return False
+
+ @staticmethod
+ def boxlist_to_tensor(boxlist, output_box_dim):
+ if type(boxlist) == np.ndarray:
+ box_tensor = torch.from_numpy(boxlist)
+ elif type(boxlist) == list:
+ if boxlist == []:
+ return torch.zeros((0, output_box_dim), dtype=torch.float32)
+ else:
+ box_tensor = torch.FloatTensor(boxlist)
+ else:
+ raise Exception("Unrecognized boxlist type")
+
+ input_box_dim = box_tensor.shape[1]
+ if input_box_dim != output_box_dim:
+ if input_box_dim == 4 and output_box_dim == 5:
+ box_tensor = BoxMode.convert(box_tensor, BoxMode.XYWH_ABS, BoxMode.XYWHA_ABS)
+ else:
+ raise Exception(
+ "Unable to convert from {}-dim box to {}-dim box".format(
+ input_box_dim, output_box_dim
+ )
+ )
+ return box_tensor
+
+ def compute_iou_dt_gt(self, dt, gt, is_crowd):
+ if self.is_rotated(dt) or self.is_rotated(gt):
+ # TODO: take is_crowd into consideration
+ assert all(c == 0 for c in is_crowd)
+ dt = RotatedBoxes(self.boxlist_to_tensor(dt, output_box_dim=5))
+ gt = RotatedBoxes(self.boxlist_to_tensor(gt, output_box_dim=5))
+ return pairwise_iou_rotated(dt, gt)
+ else:
+ # This is the same as the classical COCO evaluation
+ return maskUtils.iou(dt, gt, is_crowd)
+
+ def computeIoU(self, imgId: int, catId: int):
+ p = self.params
+ if p.useCats:
+ gt = self._gts[imgId, catId]
+ dt = self._dts[imgId, catId]
+ else:
+ gt = [_ for cId in p.catIds for _ in self._gts[imgId, cId]]
+ dt = [_ for cId in p.catIds for _ in self._dts[imgId, cId]]
+
+ if len(gt) == 0 or len(dt) == 0:
+ return []
+
+ inds = np.argsort([-d["score"] for d in dt], kind="mergesort")
+ dt = [dt[i] for i in inds]
+ if len(dt) > p.maxDets[-1]:
+ dt = dt[0 : p.maxDets[-1]]
+
+ assert p.iouType == "bbox", "unsupported iouType for iou computation"
+
+ g = [g["bbox"] for g in gt]
+ d = [d["bbox"] for d in dt]
+
+ # compute iou between each dt and gt region
+ iscrowd = [int(o["iscrowd"]) for o in gt]
+
+ # Note: this function is copied from cocoeval.py in cocoapi
+ # and the major difference is here.
+ ious = self.compute_iou_dt_gt(d, g, iscrowd)
+ return ious
+
+
+class RotatedCOCOEvaluator(COCOEvaluator):
+ """
+ Evaluate object proposal/instance detection outputs using COCO-like metrics and APIs,
+ with rotated boxes support.
+ Note: this uses IOU only and does not consider angle differences.
+ """
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a COCO model (e.g., GeneralizedRCNN).
+ It is a list of dict. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name", "image_id".
+ outputs: the outputs of a COCO model. It is a list of dicts with key
+ "instances" that contains :class:`Instances`.
+ """
+ for input, output in zip(inputs, outputs):
+ prediction = {"image_id": input["image_id"]}
+
+ if "instances" in output:
+ instances = output["instances"].to(self._cpu_device)
+
+ prediction["instances"] = self.instances_to_json(instances, input["image_id"])
+ if "proposals" in output:
+ prediction["proposals"] = output["proposals"].to(self._cpu_device)
+ self._predictions.append(prediction)
+
+ def instances_to_json(self, instances, img_id):
+ num_instance = len(instances)
+ if num_instance == 0:
+ return []
+
+ boxes = instances.pred_boxes.tensor.numpy()
+ if boxes.shape[1] == 4:
+ boxes = BoxMode.convert(boxes, BoxMode.XYXY_ABS, BoxMode.XYWH_ABS)
+ boxes = boxes.tolist()
+ scores = instances.scores.tolist()
+ classes = instances.pred_classes.tolist()
+
+ results = []
+ for k in range(num_instance):
+ result = {
+ "image_id": img_id,
+ "category_id": classes[k],
+ "bbox": boxes[k],
+ "score": scores[k],
+ }
+
+ results.append(result)
+ return results
+
+ def _eval_predictions(self, predictions, img_ids=None): # img_ids: unused
+ """
+ Evaluate predictions on the given tasks.
+ Fill self._results with the metrics of the tasks.
+ """
+ self._logger.info("Preparing results for COCO format ...")
+ coco_results = list(itertools.chain(*[x["instances"] for x in predictions]))
+
+ # unmap the category ids for COCO
+ if hasattr(self._metadata, "thing_dataset_id_to_contiguous_id"):
+ reverse_id_mapping = {
+ v: k for k, v in self._metadata.thing_dataset_id_to_contiguous_id.items()
+ }
+ for result in coco_results:
+ result["category_id"] = reverse_id_mapping[result["category_id"]]
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "coco_instances_results.json")
+ self._logger.info("Saving results to {}".format(file_path))
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(coco_results))
+ f.flush()
+
+ if not self._do_evaluation:
+ self._logger.info("Annotations are not available for evaluation.")
+ return
+
+ self._logger.info("Evaluating predictions ...")
+
+ assert self._tasks is None or set(self._tasks) == {
+ "bbox"
+ }, "[RotatedCOCOEvaluator] Only bbox evaluation is supported"
+ coco_eval = (
+ self._evaluate_predictions_on_coco(self._coco_api, coco_results)
+ if len(coco_results) > 0
+ else None # cocoapi does not handle empty results very well
+ )
+
+ task = "bbox"
+ res = self._derive_coco_results(
+ coco_eval, task, class_names=self._metadata.get("thing_classes")
+ )
+ self._results[task] = res
+
+ def _evaluate_predictions_on_coco(self, coco_gt, coco_results):
+ """
+ Evaluate the coco results using COCOEval API.
+ """
+ assert len(coco_results) > 0
+
+ coco_dt = coco_gt.loadRes(coco_results)
+
+ # Only bbox is supported for now
+ coco_eval = RotatedCOCOeval(coco_gt, coco_dt, iouType="bbox")
+
+ coco_eval.evaluate()
+ coco_eval.accumulate()
+ coco_eval.summarize()
+
+ return coco_eval
diff --git a/detectron2/evaluation/sem_seg_evaluation.py b/detectron2/evaluation/sem_seg_evaluation.py
new file mode 100644
index 0000000000000000000000000000000000000000..f87043b02f08777c4bea801eb1a9bcb1da747774
--- /dev/null
+++ b/detectron2/evaluation/sem_seg_evaluation.py
@@ -0,0 +1,265 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import itertools
+import json
+import logging
+import numpy as np
+import os
+from collections import OrderedDict
+from typing import Optional, Union
+import pycocotools.mask as mask_util
+import torch
+from PIL import Image
+
+from detectron2.data import DatasetCatalog, MetadataCatalog
+from detectron2.utils.comm import all_gather, is_main_process, synchronize
+from detectron2.utils.file_io import PathManager
+
+from .evaluator import DatasetEvaluator
+
+_CV2_IMPORTED = True
+try:
+ import cv2 # noqa
+except ImportError:
+ # OpenCV is an optional dependency at the moment
+ _CV2_IMPORTED = False
+
+
+def load_image_into_numpy_array(
+ filename: str,
+ copy: bool = False,
+ dtype: Optional[Union[np.dtype, str]] = None,
+) -> np.ndarray:
+ with PathManager.open(filename, "rb") as f:
+ array = np.array(Image.open(f), copy=copy, dtype=dtype)
+ return array
+
+
+class SemSegEvaluator(DatasetEvaluator):
+ """
+ Evaluate semantic segmentation metrics.
+ """
+
+ def __init__(
+ self,
+ dataset_name,
+ distributed=True,
+ output_dir=None,
+ *,
+ sem_seg_loading_fn=load_image_into_numpy_array,
+ num_classes=None,
+ ignore_label=None,
+ ):
+ """
+ Args:
+ dataset_name (str): name of the dataset to be evaluated.
+ distributed (bool): if True, will collect results from all ranks for evaluation.
+ Otherwise, will evaluate the results in the current process.
+ output_dir (str): an output directory to dump results.
+ sem_seg_loading_fn: function to read sem seg file and load into numpy array.
+ Default provided, but projects can customize.
+ num_classes, ignore_label: deprecated argument
+ """
+ self._logger = logging.getLogger(__name__)
+ if num_classes is not None:
+ self._logger.warn(
+ "SemSegEvaluator(num_classes) is deprecated! It should be obtained from metadata."
+ )
+ if ignore_label is not None:
+ self._logger.warn(
+ "SemSegEvaluator(ignore_label) is deprecated! It should be obtained from metadata."
+ )
+ self._dataset_name = dataset_name
+ self._distributed = distributed
+ self._output_dir = output_dir
+
+ self._cpu_device = torch.device("cpu")
+
+ self.input_file_to_gt_file = {
+ dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
+ for dataset_record in DatasetCatalog.get(dataset_name)
+ }
+
+ meta = MetadataCatalog.get(dataset_name)
+ # Dict that maps contiguous training ids to COCO category ids
+ try:
+ c2d = meta.stuff_dataset_id_to_contiguous_id
+ self._contiguous_id_to_dataset_id = {v: k for k, v in c2d.items()}
+ except AttributeError:
+ self._contiguous_id_to_dataset_id = None
+ self._class_names = meta.stuff_classes
+ self.sem_seg_loading_fn = sem_seg_loading_fn
+ self._num_classes = len(meta.stuff_classes)
+ if num_classes is not None:
+ assert self._num_classes == num_classes, f"{self._num_classes} != {num_classes}"
+ self._ignore_label = ignore_label if ignore_label is not None else meta.ignore_label
+
+ # This is because cv2.erode did not work for int datatype. Only works for uint8.
+ self._compute_boundary_iou = True
+ if not _CV2_IMPORTED:
+ self._compute_boundary_iou = False
+ self._logger.warn(
+ """Boundary IoU calculation requires OpenCV. B-IoU metrics are
+ not going to be computed because OpenCV is not available to import."""
+ )
+ if self._num_classes >= np.iinfo(np.uint8).max:
+ self._compute_boundary_iou = False
+ self._logger.warn(
+ f"""SemSegEvaluator(num_classes) is more than supported value for Boundary IoU calculation!
+ B-IoU metrics are not going to be computed. Max allowed value (exclusive)
+ for num_classes for calculating Boundary IoU is {np.iinfo(np.uint8).max}.
+ The number of classes of dataset {self._dataset_name} is {self._num_classes}"""
+ )
+
+ def reset(self):
+ self._conf_matrix = np.zeros((self._num_classes + 1, self._num_classes + 1), dtype=np.int64)
+ self._b_conf_matrix = np.zeros(
+ (self._num_classes + 1, self._num_classes + 1), dtype=np.int64
+ )
+ self._predictions = []
+
+ def process(self, inputs, outputs):
+ """
+ Args:
+ inputs: the inputs to a model.
+ It is a list of dicts. Each dict corresponds to an image and
+ contains keys like "height", "width", "file_name".
+ outputs: the outputs of a model. It is either list of semantic segmentation predictions
+ (Tensor [H, W]) or list of dicts with key "sem_seg" that contains semantic
+ segmentation prediction in the same format.
+ """
+ for input, output in zip(inputs, outputs):
+ output = output["sem_seg"].argmax(dim=0).to(self._cpu_device)
+ pred = np.array(output, dtype=int)
+ gt_filename = self.input_file_to_gt_file[input["file_name"]]
+ gt = self.sem_seg_loading_fn(gt_filename, dtype=int)
+
+ gt[gt == self._ignore_label] = self._num_classes
+
+ self._conf_matrix += np.bincount(
+ (self._num_classes + 1) * pred.reshape(-1) + gt.reshape(-1),
+ minlength=self._conf_matrix.size,
+ ).reshape(self._conf_matrix.shape)
+
+ if self._compute_boundary_iou:
+ b_gt = self._mask_to_boundary(gt.astype(np.uint8))
+ b_pred = self._mask_to_boundary(pred.astype(np.uint8))
+
+ self._b_conf_matrix += np.bincount(
+ (self._num_classes + 1) * b_pred.reshape(-1) + b_gt.reshape(-1),
+ minlength=self._conf_matrix.size,
+ ).reshape(self._conf_matrix.shape)
+
+ self._predictions.extend(self.encode_json_sem_seg(pred, input["file_name"]))
+
+ def evaluate(self):
+ """
+ Evaluates standard semantic segmentation metrics (http://cocodataset.org/#stuff-eval):
+
+ * Mean intersection-over-union averaged across classes (mIoU)
+ * Frequency Weighted IoU (fwIoU)
+ * Mean pixel accuracy averaged across classes (mACC)
+ * Pixel Accuracy (pACC)
+ """
+ if self._distributed:
+ synchronize()
+ conf_matrix_list = all_gather(self._conf_matrix)
+ b_conf_matrix_list = all_gather(self._b_conf_matrix)
+ self._predictions = all_gather(self._predictions)
+ self._predictions = list(itertools.chain(*self._predictions))
+ if not is_main_process():
+ return
+
+ self._conf_matrix = np.zeros_like(self._conf_matrix)
+ for conf_matrix in conf_matrix_list:
+ self._conf_matrix += conf_matrix
+
+ self._b_conf_matrix = np.zeros_like(self._b_conf_matrix)
+ for b_conf_matrix in b_conf_matrix_list:
+ self._b_conf_matrix += b_conf_matrix
+
+ if self._output_dir:
+ PathManager.mkdirs(self._output_dir)
+ file_path = os.path.join(self._output_dir, "sem_seg_predictions.json")
+ with PathManager.open(file_path, "w") as f:
+ f.write(json.dumps(self._predictions))
+
+ acc = np.full(self._num_classes, np.nan, dtype=float)
+ iou = np.full(self._num_classes, np.nan, dtype=float)
+ tp = self._conf_matrix.diagonal()[:-1].astype(float)
+ pos_gt = np.sum(self._conf_matrix[:-1, :-1], axis=0).astype(float)
+ class_weights = pos_gt / np.sum(pos_gt)
+ pos_pred = np.sum(self._conf_matrix[:-1, :-1], axis=1).astype(float)
+ acc_valid = pos_gt > 0
+ acc[acc_valid] = tp[acc_valid] / pos_gt[acc_valid]
+ union = pos_gt + pos_pred - tp
+ iou_valid = np.logical_and(acc_valid, union > 0)
+ iou[iou_valid] = tp[iou_valid] / union[iou_valid]
+ macc = np.sum(acc[acc_valid]) / np.sum(acc_valid)
+ miou = np.sum(iou[iou_valid]) / np.sum(iou_valid)
+ fiou = np.sum(iou[iou_valid] * class_weights[iou_valid])
+ pacc = np.sum(tp) / np.sum(pos_gt)
+
+ if self._compute_boundary_iou:
+ b_iou = np.full(self._num_classes, np.nan, dtype=float)
+ b_tp = self._b_conf_matrix.diagonal()[:-1].astype(float)
+ b_pos_gt = np.sum(self._b_conf_matrix[:-1, :-1], axis=0).astype(float)
+ b_pos_pred = np.sum(self._b_conf_matrix[:-1, :-1], axis=1).astype(float)
+ b_union = b_pos_gt + b_pos_pred - b_tp
+ b_iou_valid = b_union > 0
+ b_iou[b_iou_valid] = b_tp[b_iou_valid] / b_union[b_iou_valid]
+
+ res = {}
+ res["mIoU"] = 100 * miou
+ res["fwIoU"] = 100 * fiou
+ for i, name in enumerate(self._class_names):
+ res[f"IoU-{name}"] = 100 * iou[i]
+ if self._compute_boundary_iou:
+ res[f"BoundaryIoU-{name}"] = 100 * b_iou[i]
+ res[f"min(IoU, B-Iou)-{name}"] = 100 * min(iou[i], b_iou[i])
+ res["mACC"] = 100 * macc
+ res["pACC"] = 100 * pacc
+ for i, name in enumerate(self._class_names):
+ res[f"ACC-{name}"] = 100 * acc[i]
+
+ if self._output_dir:
+ file_path = os.path.join(self._output_dir, "sem_seg_evaluation.pth")
+ with PathManager.open(file_path, "wb") as f:
+ torch.save(res, f)
+ results = OrderedDict({"sem_seg": res})
+ self._logger.info(results)
+ return results
+
+ def encode_json_sem_seg(self, sem_seg, input_file_name):
+ """
+ Convert semantic segmentation to COCO stuff format with segments encoded as RLEs.
+ See http://cocodataset.org/#format-results
+ """
+ json_list = []
+ for label in np.unique(sem_seg):
+ if self._contiguous_id_to_dataset_id is not None:
+ assert (
+ label in self._contiguous_id_to_dataset_id
+ ), "Label {} is not in the metadata info for {}".format(label, self._dataset_name)
+ dataset_id = self._contiguous_id_to_dataset_id[label]
+ else:
+ dataset_id = int(label)
+ mask = (sem_seg == label).astype(np.uint8)
+ mask_rle = mask_util.encode(np.array(mask[:, :, None], order="F"))[0]
+ mask_rle["counts"] = mask_rle["counts"].decode("utf-8")
+ json_list.append(
+ {"file_name": input_file_name, "category_id": dataset_id, "segmentation": mask_rle}
+ )
+ return json_list
+
+ def _mask_to_boundary(self, mask: np.ndarray, dilation_ratio=0.02):
+ assert mask.ndim == 2, "mask_to_boundary expects a 2-dimensional image"
+ h, w = mask.shape
+ diag_len = np.sqrt(h**2 + w**2)
+ dilation = max(1, int(round(dilation_ratio * diag_len)))
+ kernel = np.ones((3, 3), dtype=np.uint8)
+
+ padded_mask = cv2.copyMakeBorder(mask, 1, 1, 1, 1, cv2.BORDER_CONSTANT, value=0)
+ eroded_mask_with_padding = cv2.erode(padded_mask, kernel, iterations=dilation)
+ eroded_mask = eroded_mask_with_padding[1:-1, 1:-1]
+ boundary = mask - eroded_mask
+ return boundary
diff --git a/detectron2/evaluation/testing.py b/detectron2/evaluation/testing.py
new file mode 100644
index 0000000000000000000000000000000000000000..9e5ae625bb0593fc20739dd3ea549157e4df4f3d
--- /dev/null
+++ b/detectron2/evaluation/testing.py
@@ -0,0 +1,85 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import logging
+import numpy as np
+import pprint
+import sys
+from collections.abc import Mapping
+
+
+def print_csv_format(results):
+ """
+ Print main metrics in a format similar to Detectron,
+ so that they are easy to copypaste into a spreadsheet.
+
+ Args:
+ results (OrderedDict[dict]): task_name -> {metric -> score}
+ unordered dict can also be printed, but in arbitrary order
+ """
+ assert isinstance(results, Mapping) or not len(results), results
+ logger = logging.getLogger(__name__)
+ for task, res in results.items():
+ if isinstance(res, Mapping):
+ # Don't print "AP-category" metrics since they are usually not tracked.
+ important_res = [(k, v) for k, v in res.items() if "-" not in k]
+ logger.info("copypaste: Task: {}".format(task))
+ logger.info("copypaste: " + ",".join([k[0] for k in important_res]))
+ logger.info("copypaste: " + ",".join(["{0:.4f}".format(k[1]) for k in important_res]))
+ else:
+ logger.info(f"copypaste: {task}={res}")
+
+
+def verify_results(cfg, results):
+ """
+ Args:
+ results (OrderedDict[dict]): task_name -> {metric -> score}
+
+ Returns:
+ bool: whether the verification succeeds or not
+ """
+ expected_results = cfg.TEST.EXPECTED_RESULTS
+ if not len(expected_results):
+ return True
+
+ ok = True
+ for task, metric, expected, tolerance in expected_results:
+ actual = results[task].get(metric, None)
+ if actual is None:
+ ok = False
+ continue
+ if not np.isfinite(actual):
+ ok = False
+ continue
+ diff = abs(actual - expected)
+ if diff > tolerance:
+ ok = False
+
+ logger = logging.getLogger(__name__)
+ if not ok:
+ logger.error("Result verification failed!")
+ logger.error("Expected Results: " + str(expected_results))
+ logger.error("Actual Results: " + pprint.pformat(results))
+
+ sys.exit(1)
+ else:
+ logger.info("Results verification passed.")
+ return ok
+
+
+def flatten_results_dict(results):
+ """
+ Expand a hierarchical dict of scalars into a flat dict of scalars.
+ If results[k1][k2][k3] = v, the returned dict will have the entry
+ {"k1/k2/k3": v}.
+
+ Args:
+ results (dict):
+ """
+ r = {}
+ for k, v in results.items():
+ if isinstance(v, Mapping):
+ v = flatten_results_dict(v)
+ for kk, vv in v.items():
+ r[k + "/" + kk] = vv
+ else:
+ r[k] = v
+ return r
diff --git a/detectron2/export/README.md b/detectron2/export/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..c86ff62516f4e8e4b1a6c1f33f11192933cf3861
--- /dev/null
+++ b/detectron2/export/README.md
@@ -0,0 +1,15 @@
+
+This directory contains code to prepare a detectron2 model for deployment.
+Currently it supports exporting a detectron2 model to TorchScript, ONNX, or (deprecated) Caffe2 format.
+
+Please see [documentation](https://detectron2.readthedocs.io/tutorials/deployment.html) for its usage.
+
+
+### Acknowledgements
+
+Thanks to Mobile Vision team at Facebook for developing the Caffe2 conversion tools.
+
+Thanks to Computing Platform Department - PAI team at Alibaba Group (@bddpqq, @chenbohua3) who
+help export Detectron2 models to TorchScript.
+
+Thanks to ONNX Converter team at Microsoft who help export Detectron2 models to ONNX.
diff --git a/detectron2/export/__init__.py b/detectron2/export/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..5a58758f64aae6071fa688be4400622ce6036efa
--- /dev/null
+++ b/detectron2/export/__init__.py
@@ -0,0 +1,30 @@
+# -*- coding: utf-8 -*-
+
+import warnings
+
+from .flatten import TracingAdapter
+from .torchscript import dump_torchscript_IR, scripting_with_instances
+
+try:
+ from caffe2.proto import caffe2_pb2 as _tmp
+ from caffe2.python import core
+
+ # caffe2 is optional
+except ImportError:
+ pass
+else:
+ from .api import *
+
+
+# TODO: Update ONNX Opset version and run tests when a newer PyTorch is supported
+STABLE_ONNX_OPSET_VERSION = 11
+
+
+def add_export_config(cfg):
+ warnings.warn(
+ "add_export_config has been deprecated and behaves as no-op function.", DeprecationWarning
+ )
+ return cfg
+
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/detectron2/export/api.py b/detectron2/export/api.py
new file mode 100644
index 0000000000000000000000000000000000000000..1a272fed929217f18e04f731365f4bf7472110fc
--- /dev/null
+++ b/detectron2/export/api.py
@@ -0,0 +1,230 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import copy
+import logging
+import os
+import torch
+from caffe2.proto import caffe2_pb2
+from torch import nn
+
+from detectron2.config import CfgNode
+from detectron2.utils.file_io import PathManager
+
+from .caffe2_inference import ProtobufDetectionModel
+from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
+from .shared import get_pb_arg_vali, get_pb_arg_vals, save_graph
+
+__all__ = [
+ "Caffe2Model",
+ "Caffe2Tracer",
+]
+
+
+class Caffe2Tracer:
+ """
+ Make a detectron2 model traceable with Caffe2 operators.
+ This class creates a traceable version of a detectron2 model which:
+
+ 1. Rewrite parts of the model using ops in Caffe2. Note that some ops do
+ not have GPU implementation in Caffe2.
+ 2. Remove post-processing and only produce raw layer outputs
+
+ After making a traceable model, the class provide methods to export such a
+ model to different deployment formats.
+ Exported graph produced by this class take two input tensors:
+
+ 1. (1, C, H, W) float "data" which is an image (usually in [0, 255]).
+ (H, W) often has to be padded to multiple of 32 (depend on the model
+ architecture).
+ 2. 1x3 float "im_info", each row of which is (height, width, 1.0).
+ Height and width are true image shapes before padding.
+
+ The class currently only supports models using builtin meta architectures.
+ Batch inference is not supported, and contributions are welcome.
+ """
+
+ def __init__(self, cfg: CfgNode, model: nn.Module, inputs):
+ """
+ Args:
+ cfg (CfgNode): a detectron2 config used to construct caffe2-compatible model.
+ model (nn.Module): An original pytorch model. Must be among a few official models
+ in detectron2 that can be converted to become caffe2-compatible automatically.
+ Weights have to be already loaded to this model.
+ inputs: sample inputs that the given model takes for inference.
+ Will be used to trace the model. For most models, random inputs with
+ no detected objects will not work as they lead to wrong traces.
+ """
+ assert isinstance(cfg, CfgNode), cfg
+ assert isinstance(model, torch.nn.Module), type(model)
+
+ # TODO make it support custom models, by passing in c2 model directly
+ C2MetaArch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[cfg.MODEL.META_ARCHITECTURE]
+ self.traceable_model = C2MetaArch(cfg, copy.deepcopy(model))
+ self.inputs = inputs
+ self.traceable_inputs = self.traceable_model.get_caffe2_inputs(inputs)
+
+ def export_caffe2(self):
+ """
+ Export the model to Caffe2's protobuf format.
+ The returned object can be saved with its :meth:`.save_protobuf()` method.
+ The result can be loaded and executed using Caffe2 runtime.
+
+ Returns:
+ :class:`Caffe2Model`
+ """
+ from .caffe2_export import export_caffe2_detection_model
+
+ predict_net, init_net = export_caffe2_detection_model(
+ self.traceable_model, self.traceable_inputs
+ )
+ return Caffe2Model(predict_net, init_net)
+
+ def export_onnx(self):
+ """
+ Export the model to ONNX format.
+ Note that the exported model contains custom ops only available in caffe2, therefore it
+ cannot be directly executed by other runtime (such as onnxruntime or TensorRT).
+ Post-processing or transformation passes may be applied on the model to accommodate
+ different runtimes, but we currently do not provide support for them.
+
+ Returns:
+ onnx.ModelProto: an onnx model.
+ """
+ from .caffe2_export import export_onnx_model as export_onnx_model_impl
+
+ return export_onnx_model_impl(self.traceable_model, (self.traceable_inputs,))
+
+ def export_torchscript(self):
+ """
+ Export the model to a ``torch.jit.TracedModule`` by tracing.
+ The returned object can be saved to a file by ``.save()``.
+
+ Returns:
+ torch.jit.TracedModule: a torch TracedModule
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Tracing the model with torch.jit.trace ...")
+ with torch.no_grad():
+ return torch.jit.trace(self.traceable_model, (self.traceable_inputs,))
+
+
+class Caffe2Model(nn.Module):
+ """
+ A wrapper around the traced model in Caffe2's protobuf format.
+ The exported graph has different inputs/outputs from the original Pytorch
+ model, as explained in :class:`Caffe2Tracer`. This class wraps around the
+ exported graph to simulate the same interface as the original Pytorch model.
+ It also provides functions to save/load models in Caffe2's format.'
+
+ Examples:
+ ::
+ c2_model = Caffe2Tracer(cfg, torch_model, inputs).export_caffe2()
+ inputs = [{"image": img_tensor_CHW}]
+ outputs = c2_model(inputs)
+ orig_outputs = torch_model(inputs)
+ """
+
+ def __init__(self, predict_net, init_net):
+ super().__init__()
+ self.eval() # always in eval mode
+ self._predict_net = predict_net
+ self._init_net = init_net
+ self._predictor = None
+
+ __init__.__HIDE_SPHINX_DOC__ = True
+
+ @property
+ def predict_net(self):
+ """
+ caffe2.core.Net: the underlying caffe2 predict net
+ """
+ return self._predict_net
+
+ @property
+ def init_net(self):
+ """
+ caffe2.core.Net: the underlying caffe2 init net
+ """
+ return self._init_net
+
+ def save_protobuf(self, output_dir):
+ """
+ Save the model as caffe2's protobuf format.
+ It saves the following files:
+
+ * "model.pb": definition of the graph. Can be visualized with
+ tools like `netron `_.
+ * "model_init.pb": model parameters
+ * "model.pbtxt": human-readable definition of the graph. Not
+ needed for deployment.
+
+ Args:
+ output_dir (str): the output directory to save protobuf files.
+ """
+ logger = logging.getLogger(__name__)
+ logger.info("Saving model to {} ...".format(output_dir))
+ if not PathManager.exists(output_dir):
+ PathManager.mkdirs(output_dir)
+
+ with PathManager.open(os.path.join(output_dir, "model.pb"), "wb") as f:
+ f.write(self._predict_net.SerializeToString())
+ with PathManager.open(os.path.join(output_dir, "model.pbtxt"), "w") as f:
+ f.write(str(self._predict_net))
+ with PathManager.open(os.path.join(output_dir, "model_init.pb"), "wb") as f:
+ f.write(self._init_net.SerializeToString())
+
+ def save_graph(self, output_file, inputs=None):
+ """
+ Save the graph as SVG format.
+
+ Args:
+ output_file (str): a SVG file
+ inputs: optional inputs given to the model.
+ If given, the inputs will be used to run the graph to record
+ shape of every tensor. The shape information will be
+ saved together with the graph.
+ """
+ from .caffe2_export import run_and_save_graph
+
+ if inputs is None:
+ save_graph(self._predict_net, output_file, op_only=False)
+ else:
+ size_divisibility = get_pb_arg_vali(self._predict_net, "size_divisibility", 0)
+ device = get_pb_arg_vals(self._predict_net, "device", b"cpu").decode("ascii")
+ inputs = convert_batched_inputs_to_c2_format(inputs, size_divisibility, device)
+ inputs = [x.cpu().numpy() for x in inputs]
+ run_and_save_graph(self._predict_net, self._init_net, inputs, output_file)
+
+ @staticmethod
+ def load_protobuf(dir):
+ """
+ Args:
+ dir (str): a directory used to save Caffe2Model with
+ :meth:`save_protobuf`.
+ The files "model.pb" and "model_init.pb" are needed.
+
+ Returns:
+ Caffe2Model: the caffe2 model loaded from this directory.
+ """
+ predict_net = caffe2_pb2.NetDef()
+ with PathManager.open(os.path.join(dir, "model.pb"), "rb") as f:
+ predict_net.ParseFromString(f.read())
+
+ init_net = caffe2_pb2.NetDef()
+ with PathManager.open(os.path.join(dir, "model_init.pb"), "rb") as f:
+ init_net.ParseFromString(f.read())
+
+ return Caffe2Model(predict_net, init_net)
+
+ def __call__(self, inputs):
+ """
+ An interface that wraps around a Caffe2 model and mimics detectron2's models'
+ input/output format. See details about the format at :doc:`/tutorials/models`.
+ This is used to compare the outputs of caffe2 model with its original torch model.
+
+ Due to the extra conversion between Pytorch/Caffe2, this method is not meant for
+ benchmark. Because of the conversion, this method also has dependency
+ on detectron2 in order to convert to detectron2's output format.
+ """
+ if self._predictor is None:
+ self._predictor = ProtobufDetectionModel(self._predict_net, self._init_net)
+ return self._predictor(inputs)
diff --git a/detectron2/export/c10.py b/detectron2/export/c10.py
new file mode 100644
index 0000000000000000000000000000000000000000..adbc62bea70b67f8ba6fef83f29826f165dc7c4d
--- /dev/null
+++ b/detectron2/export/c10.py
@@ -0,0 +1,571 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import math
+from typing import Dict
+import torch
+import torch.nn.functional as F
+
+from detectron2.layers import ShapeSpec, cat
+from detectron2.layers.roi_align_rotated import ROIAlignRotated
+from detectron2.modeling import poolers
+from detectron2.modeling.proposal_generator import rpn
+from detectron2.modeling.roi_heads.mask_head import mask_rcnn_inference
+from detectron2.structures import Boxes, ImageList, Instances, Keypoints, RotatedBoxes
+
+from .shared import alias, to_device
+
+
+"""
+This file contains caffe2-compatible implementation of several detectron2 components.
+"""
+
+
+class Caffe2Boxes(Boxes):
+ """
+ Representing a list of detectron2.structures.Boxes from minibatch, each box
+ is represented by a 5d vector (batch index + 4 coordinates), or a 6d vector
+ (batch index + 5 coordinates) for RotatedBoxes.
+ """
+
+ def __init__(self, tensor):
+ assert isinstance(tensor, torch.Tensor)
+ assert tensor.dim() == 2 and tensor.size(-1) in [4, 5, 6], tensor.size()
+ # TODO: make tensor immutable when dim is Nx5 for Boxes,
+ # and Nx6 for RotatedBoxes?
+ self.tensor = tensor
+
+
+# TODO clean up this class, maybe just extend Instances
+class InstancesList:
+ """
+ Tensor representation of a list of Instances object for a batch of images.
+
+ When dealing with a batch of images with Caffe2 ops, a list of bboxes
+ (instances) are usually represented by single Tensor with size
+ (sigma(Ni), 5) or (sigma(Ni), 4) plus a batch split Tensor. This class is
+ for providing common functions to convert between these two representations.
+ """
+
+ def __init__(self, im_info, indices, extra_fields=None):
+ # [N, 3] -> (H, W, Scale)
+ self.im_info = im_info
+ # [N,] -> indice of batch to which the instance belongs
+ self.indices = indices
+ # [N, ...]
+ self.batch_extra_fields = extra_fields or {}
+
+ self.image_size = self.im_info
+
+ def get_fields(self):
+ """like `get_fields` in the Instances object,
+ but return each field in tensor representations"""
+ ret = {}
+ for k, v in self.batch_extra_fields.items():
+ # if isinstance(v, torch.Tensor):
+ # tensor_rep = v
+ # elif isinstance(v, (Boxes, Keypoints)):
+ # tensor_rep = v.tensor
+ # else:
+ # raise ValueError("Can't find tensor representation for: {}".format())
+ ret[k] = v
+ return ret
+
+ def has(self, name):
+ return name in self.batch_extra_fields
+
+ def set(self, name, value):
+ # len(tensor) is a bad practice that generates ONNX constants during tracing.
+ # Although not a problem for the `assert` statement below, torch ONNX exporter
+ # still raises a misleading warning as it does not this call comes from `assert`
+ if isinstance(value, Boxes):
+ data_len = value.tensor.shape[0]
+ elif isinstance(value, torch.Tensor):
+ data_len = value.shape[0]
+ else:
+ data_len = len(value)
+ if len(self.batch_extra_fields):
+ assert (
+ len(self) == data_len
+ ), "Adding a field of length {} to a Instances of length {}".format(data_len, len(self))
+ self.batch_extra_fields[name] = value
+
+ def __getattr__(self, name):
+ if name not in self.batch_extra_fields:
+ raise AttributeError("Cannot find field '{}' in the given Instances!".format(name))
+ return self.batch_extra_fields[name]
+
+ def __len__(self):
+ return len(self.indices)
+
+ def flatten(self):
+ ret = []
+ for _, v in self.batch_extra_fields.items():
+ if isinstance(v, (Boxes, Keypoints)):
+ ret.append(v.tensor)
+ else:
+ ret.append(v)
+ return ret
+
+ @staticmethod
+ def to_d2_instances_list(instances_list):
+ """
+ Convert InstancesList to List[Instances]. The input `instances_list` can
+ also be a List[Instances], in this case this method is a non-op.
+ """
+ if not isinstance(instances_list, InstancesList):
+ assert all(isinstance(x, Instances) for x in instances_list)
+ return instances_list
+
+ ret = []
+ for i, info in enumerate(instances_list.im_info):
+ instances = Instances(torch.Size([int(info[0].item()), int(info[1].item())]))
+
+ ids = instances_list.indices == i
+ for k, v in instances_list.batch_extra_fields.items():
+ if isinstance(v, torch.Tensor):
+ instances.set(k, v[ids])
+ continue
+ elif isinstance(v, Boxes):
+ instances.set(k, v[ids, -4:])
+ continue
+
+ target_type, tensor_source = v
+ assert isinstance(tensor_source, torch.Tensor)
+ assert tensor_source.shape[0] == instances_list.indices.shape[0]
+ tensor_source = tensor_source[ids]
+
+ if issubclass(target_type, Boxes):
+ instances.set(k, Boxes(tensor_source[:, -4:]))
+ elif issubclass(target_type, Keypoints):
+ instances.set(k, Keypoints(tensor_source))
+ elif issubclass(target_type, torch.Tensor):
+ instances.set(k, tensor_source)
+ else:
+ raise ValueError("Can't handle targe type: {}".format(target_type))
+
+ ret.append(instances)
+ return ret
+
+
+class Caffe2Compatible:
+ """
+ A model can inherit this class to indicate that it can be traced and deployed with caffe2.
+ """
+
+ def _get_tensor_mode(self):
+ return self._tensor_mode
+
+ def _set_tensor_mode(self, v):
+ self._tensor_mode = v
+
+ tensor_mode = property(_get_tensor_mode, _set_tensor_mode)
+ """
+ If true, the model expects C2-style tensor only inputs/outputs format.
+ """
+
+
+class Caffe2RPN(Caffe2Compatible, rpn.RPN):
+ @classmethod
+ def from_config(cls, cfg, input_shape: Dict[str, ShapeSpec]):
+ ret = super(Caffe2Compatible, cls).from_config(cfg, input_shape)
+ assert tuple(cfg.MODEL.RPN.BBOX_REG_WEIGHTS) == (1.0, 1.0, 1.0, 1.0) or tuple(
+ cfg.MODEL.RPN.BBOX_REG_WEIGHTS
+ ) == (1.0, 1.0, 1.0, 1.0, 1.0)
+ return ret
+
+ def _generate_proposals(
+ self, images, objectness_logits_pred, anchor_deltas_pred, gt_instances=None
+ ):
+ assert isinstance(images, ImageList)
+ if self.tensor_mode:
+ im_info = images.image_sizes
+ else:
+ im_info = torch.tensor([[im_sz[0], im_sz[1], 1.0] for im_sz in images.image_sizes]).to(
+ images.tensor.device
+ )
+ assert isinstance(im_info, torch.Tensor)
+
+ rpn_rois_list = []
+ rpn_roi_probs_list = []
+ for scores, bbox_deltas, cell_anchors_tensor, feat_stride in zip(
+ objectness_logits_pred,
+ anchor_deltas_pred,
+ [b for (n, b) in self.anchor_generator.cell_anchors.named_buffers()],
+ self.anchor_generator.strides,
+ ):
+ scores = scores.detach()
+ bbox_deltas = bbox_deltas.detach()
+
+ rpn_rois, rpn_roi_probs = torch.ops._caffe2.GenerateProposals(
+ scores,
+ bbox_deltas,
+ im_info,
+ cell_anchors_tensor,
+ spatial_scale=1.0 / feat_stride,
+ pre_nms_topN=self.pre_nms_topk[self.training],
+ post_nms_topN=self.post_nms_topk[self.training],
+ nms_thresh=self.nms_thresh,
+ min_size=self.min_box_size,
+ # correct_transform_coords=True, # deprecated argument
+ angle_bound_on=True, # Default
+ angle_bound_lo=-180,
+ angle_bound_hi=180,
+ clip_angle_thresh=1.0, # Default
+ legacy_plus_one=False,
+ )
+ rpn_rois_list.append(rpn_rois)
+ rpn_roi_probs_list.append(rpn_roi_probs)
+
+ # For FPN in D2, in RPN all proposals from different levels are concated
+ # together, ranked and picked by top post_nms_topk. Then in ROIPooler
+ # it calculates level_assignments and calls the RoIAlign from
+ # the corresponding level.
+
+ if len(objectness_logits_pred) == 1:
+ rpn_rois = rpn_rois_list[0]
+ rpn_roi_probs = rpn_roi_probs_list[0]
+ else:
+ assert len(rpn_rois_list) == len(rpn_roi_probs_list)
+ rpn_post_nms_topN = self.post_nms_topk[self.training]
+
+ device = rpn_rois_list[0].device
+ input_list = [to_device(x, "cpu") for x in (rpn_rois_list + rpn_roi_probs_list)]
+
+ # TODO remove this after confirming rpn_max_level/rpn_min_level
+ # is not needed in CollectRpnProposals.
+ feature_strides = list(self.anchor_generator.strides)
+ rpn_min_level = int(math.log2(feature_strides[0]))
+ rpn_max_level = int(math.log2(feature_strides[-1]))
+ assert (rpn_max_level - rpn_min_level + 1) == len(
+ rpn_rois_list
+ ), "CollectRpnProposals requires continuous levels"
+
+ rpn_rois = torch.ops._caffe2.CollectRpnProposals(
+ input_list,
+ # NOTE: in current implementation, rpn_max_level and rpn_min_level
+ # are not needed, only the subtraction of two matters and it
+ # can be infer from the number of inputs. Keep them now for
+ # consistency.
+ rpn_max_level=2 + len(rpn_rois_list) - 1,
+ rpn_min_level=2,
+ rpn_post_nms_topN=rpn_post_nms_topN,
+ )
+ rpn_rois = to_device(rpn_rois, device)
+ rpn_roi_probs = []
+
+ proposals = self.c2_postprocess(im_info, rpn_rois, rpn_roi_probs, self.tensor_mode)
+ return proposals, {}
+
+ def forward(self, images, features, gt_instances=None):
+ assert not self.training
+ features = [features[f] for f in self.in_features]
+ objectness_logits_pred, anchor_deltas_pred = self.rpn_head(features)
+ return self._generate_proposals(
+ images,
+ objectness_logits_pred,
+ anchor_deltas_pred,
+ gt_instances,
+ )
+
+ @staticmethod
+ def c2_postprocess(im_info, rpn_rois, rpn_roi_probs, tensor_mode):
+ proposals = InstancesList(
+ im_info=im_info,
+ indices=rpn_rois[:, 0],
+ extra_fields={
+ "proposal_boxes": Caffe2Boxes(rpn_rois),
+ "objectness_logits": (torch.Tensor, rpn_roi_probs),
+ },
+ )
+ if not tensor_mode:
+ proposals = InstancesList.to_d2_instances_list(proposals)
+ else:
+ proposals = [proposals]
+ return proposals
+
+
+class Caffe2ROIPooler(Caffe2Compatible, poolers.ROIPooler):
+ @staticmethod
+ def c2_preprocess(box_lists):
+ assert all(isinstance(x, Boxes) for x in box_lists)
+ if all(isinstance(x, Caffe2Boxes) for x in box_lists):
+ # input is pure-tensor based
+ assert len(box_lists) == 1
+ pooler_fmt_boxes = box_lists[0].tensor
+ else:
+ pooler_fmt_boxes = poolers.convert_boxes_to_pooler_format(box_lists)
+ return pooler_fmt_boxes
+
+ def forward(self, x, box_lists):
+ assert not self.training
+
+ pooler_fmt_boxes = self.c2_preprocess(box_lists)
+ num_level_assignments = len(self.level_poolers)
+
+ if num_level_assignments == 1:
+ if isinstance(self.level_poolers[0], ROIAlignRotated):
+ c2_roi_align = torch.ops._caffe2.RoIAlignRotated
+ aligned = True
+ else:
+ c2_roi_align = torch.ops._caffe2.RoIAlign
+ aligned = self.level_poolers[0].aligned
+
+ x0 = x[0]
+ if x0.is_quantized:
+ x0 = x0.dequantize()
+
+ out = c2_roi_align(
+ x0,
+ pooler_fmt_boxes,
+ order="NCHW",
+ spatial_scale=float(self.level_poolers[0].spatial_scale),
+ pooled_h=int(self.output_size[0]),
+ pooled_w=int(self.output_size[1]),
+ sampling_ratio=int(self.level_poolers[0].sampling_ratio),
+ aligned=aligned,
+ )
+ return out
+
+ device = pooler_fmt_boxes.device
+ assert (
+ self.max_level - self.min_level + 1 == 4
+ ), "Currently DistributeFpnProposals only support 4 levels"
+ fpn_outputs = torch.ops._caffe2.DistributeFpnProposals(
+ to_device(pooler_fmt_boxes, "cpu"),
+ roi_canonical_scale=self.canonical_box_size,
+ roi_canonical_level=self.canonical_level,
+ roi_max_level=self.max_level,
+ roi_min_level=self.min_level,
+ legacy_plus_one=False,
+ )
+ fpn_outputs = [to_device(x, device) for x in fpn_outputs]
+
+ rois_fpn_list = fpn_outputs[:-1]
+ rois_idx_restore_int32 = fpn_outputs[-1]
+
+ roi_feat_fpn_list = []
+ for roi_fpn, x_level, pooler in zip(rois_fpn_list, x, self.level_poolers):
+ if isinstance(pooler, ROIAlignRotated):
+ c2_roi_align = torch.ops._caffe2.RoIAlignRotated
+ aligned = True
+ else:
+ c2_roi_align = torch.ops._caffe2.RoIAlign
+ aligned = bool(pooler.aligned)
+
+ if x_level.is_quantized:
+ x_level = x_level.dequantize()
+
+ roi_feat_fpn = c2_roi_align(
+ x_level,
+ roi_fpn,
+ order="NCHW",
+ spatial_scale=float(pooler.spatial_scale),
+ pooled_h=int(self.output_size[0]),
+ pooled_w=int(self.output_size[1]),
+ sampling_ratio=int(pooler.sampling_ratio),
+ aligned=aligned,
+ )
+ roi_feat_fpn_list.append(roi_feat_fpn)
+
+ roi_feat_shuffled = cat(roi_feat_fpn_list, dim=0)
+ assert roi_feat_shuffled.numel() > 0 and rois_idx_restore_int32.numel() > 0, (
+ "Caffe2 export requires tracing with a model checkpoint + input that can produce valid"
+ " detections. But no detections were obtained with the given checkpoint and input!"
+ )
+ roi_feat = torch.ops._caffe2.BatchPermutation(roi_feat_shuffled, rois_idx_restore_int32)
+ return roi_feat
+
+
+def caffe2_fast_rcnn_outputs_inference(tensor_mode, box_predictor, predictions, proposals):
+ """equivalent to FastRCNNOutputLayers.inference"""
+ num_classes = box_predictor.num_classes
+ score_thresh = box_predictor.test_score_thresh
+ nms_thresh = box_predictor.test_nms_thresh
+ topk_per_image = box_predictor.test_topk_per_image
+ is_rotated = len(box_predictor.box2box_transform.weights) == 5
+
+ if is_rotated:
+ box_dim = 5
+ assert box_predictor.box2box_transform.weights[4] == 1, (
+ "The weights for Rotated BBoxTransform in C2 have only 4 dimensions,"
+ + " thus enforcing the angle weight to be 1 for now"
+ )
+ box2box_transform_weights = box_predictor.box2box_transform.weights[:4]
+ else:
+ box_dim = 4
+ box2box_transform_weights = box_predictor.box2box_transform.weights
+
+ class_logits, box_regression = predictions
+ if num_classes + 1 == class_logits.shape[1]:
+ class_prob = F.softmax(class_logits, -1)
+ else:
+ assert num_classes == class_logits.shape[1]
+ class_prob = F.sigmoid(class_logits)
+ # BoxWithNMSLimit will infer num_classes from the shape of the class_prob
+ # So append a zero column as placeholder for the background class
+ class_prob = torch.cat((class_prob, torch.zeros(class_prob.shape[0], 1)), dim=1)
+
+ assert box_regression.shape[1] % box_dim == 0
+ cls_agnostic_bbox_reg = box_regression.shape[1] // box_dim == 1
+
+ input_tensor_mode = proposals[0].proposal_boxes.tensor.shape[1] == box_dim + 1
+
+ proposal_boxes = proposals[0].proposal_boxes
+ if isinstance(proposal_boxes, Caffe2Boxes):
+ rois = Caffe2Boxes.cat([p.proposal_boxes for p in proposals])
+ elif isinstance(proposal_boxes, RotatedBoxes):
+ rois = RotatedBoxes.cat([p.proposal_boxes for p in proposals])
+ elif isinstance(proposal_boxes, Boxes):
+ rois = Boxes.cat([p.proposal_boxes for p in proposals])
+ else:
+ raise NotImplementedError(
+ 'Expected proposals[0].proposal_boxes to be type "Boxes", '
+ f"instead got {type(proposal_boxes)}"
+ )
+
+ device, dtype = rois.tensor.device, rois.tensor.dtype
+ if input_tensor_mode:
+ im_info = proposals[0].image_size
+ rois = rois.tensor
+ else:
+ im_info = torch.tensor([[sz[0], sz[1], 1.0] for sz in [x.image_size for x in proposals]])
+ batch_ids = cat(
+ [
+ torch.full((b, 1), i, dtype=dtype, device=device)
+ for i, b in enumerate(len(p) for p in proposals)
+ ],
+ dim=0,
+ )
+ rois = torch.cat([batch_ids, rois.tensor], dim=1)
+
+ roi_pred_bbox, roi_batch_splits = torch.ops._caffe2.BBoxTransform(
+ to_device(rois, "cpu"),
+ to_device(box_regression, "cpu"),
+ to_device(im_info, "cpu"),
+ weights=box2box_transform_weights,
+ apply_scale=True,
+ rotated=is_rotated,
+ angle_bound_on=True,
+ angle_bound_lo=-180,
+ angle_bound_hi=180,
+ clip_angle_thresh=1.0,
+ legacy_plus_one=False,
+ )
+ roi_pred_bbox = to_device(roi_pred_bbox, device)
+ roi_batch_splits = to_device(roi_batch_splits, device)
+
+ nms_outputs = torch.ops._caffe2.BoxWithNMSLimit(
+ to_device(class_prob, "cpu"),
+ to_device(roi_pred_bbox, "cpu"),
+ to_device(roi_batch_splits, "cpu"),
+ score_thresh=float(score_thresh),
+ nms=float(nms_thresh),
+ detections_per_im=int(topk_per_image),
+ soft_nms_enabled=False,
+ soft_nms_method="linear",
+ soft_nms_sigma=0.5,
+ soft_nms_min_score_thres=0.001,
+ rotated=is_rotated,
+ cls_agnostic_bbox_reg=cls_agnostic_bbox_reg,
+ input_boxes_include_bg_cls=False,
+ output_classes_include_bg_cls=False,
+ legacy_plus_one=False,
+ )
+ roi_score_nms = to_device(nms_outputs[0], device)
+ roi_bbox_nms = to_device(nms_outputs[1], device)
+ roi_class_nms = to_device(nms_outputs[2], device)
+ roi_batch_splits_nms = to_device(nms_outputs[3], device)
+ roi_keeps_nms = to_device(nms_outputs[4], device)
+ roi_keeps_size_nms = to_device(nms_outputs[5], device)
+ if not tensor_mode:
+ roi_class_nms = roi_class_nms.to(torch.int64)
+
+ roi_batch_ids = cat(
+ [
+ torch.full((b, 1), i, dtype=dtype, device=device)
+ for i, b in enumerate(int(x.item()) for x in roi_batch_splits_nms)
+ ],
+ dim=0,
+ )
+
+ roi_class_nms = alias(roi_class_nms, "class_nms")
+ roi_score_nms = alias(roi_score_nms, "score_nms")
+ roi_bbox_nms = alias(roi_bbox_nms, "bbox_nms")
+ roi_batch_splits_nms = alias(roi_batch_splits_nms, "batch_splits_nms")
+ roi_keeps_nms = alias(roi_keeps_nms, "keeps_nms")
+ roi_keeps_size_nms = alias(roi_keeps_size_nms, "keeps_size_nms")
+
+ results = InstancesList(
+ im_info=im_info,
+ indices=roi_batch_ids[:, 0],
+ extra_fields={
+ "pred_boxes": Caffe2Boxes(roi_bbox_nms),
+ "scores": roi_score_nms,
+ "pred_classes": roi_class_nms,
+ },
+ )
+
+ if not tensor_mode:
+ results = InstancesList.to_d2_instances_list(results)
+ batch_splits = roi_batch_splits_nms.int().tolist()
+ kept_indices = list(roi_keeps_nms.to(torch.int64).split(batch_splits))
+ else:
+ results = [results]
+ kept_indices = [roi_keeps_nms]
+
+ return results, kept_indices
+
+
+class Caffe2FastRCNNOutputsInference:
+ def __init__(self, tensor_mode):
+ self.tensor_mode = tensor_mode # whether the output is caffe2 tensor mode
+
+ def __call__(self, box_predictor, predictions, proposals):
+ return caffe2_fast_rcnn_outputs_inference(
+ self.tensor_mode, box_predictor, predictions, proposals
+ )
+
+
+def caffe2_mask_rcnn_inference(pred_mask_logits, pred_instances):
+ """equivalent to mask_head.mask_rcnn_inference"""
+ if all(isinstance(x, InstancesList) for x in pred_instances):
+ assert len(pred_instances) == 1
+ mask_probs_pred = pred_mask_logits.sigmoid()
+ mask_probs_pred = alias(mask_probs_pred, "mask_fcn_probs")
+ pred_instances[0].set("pred_masks", mask_probs_pred)
+ else:
+ mask_rcnn_inference(pred_mask_logits, pred_instances)
+
+
+class Caffe2MaskRCNNInference:
+ def __call__(self, pred_mask_logits, pred_instances):
+ return caffe2_mask_rcnn_inference(pred_mask_logits, pred_instances)
+
+
+def caffe2_keypoint_rcnn_inference(use_heatmap_max_keypoint, pred_keypoint_logits, pred_instances):
+ # just return the keypoint heatmap for now,
+ # there will be option to call HeatmapMaxKeypointOp
+ output = alias(pred_keypoint_logits, "kps_score")
+ if all(isinstance(x, InstancesList) for x in pred_instances):
+ assert len(pred_instances) == 1
+ if use_heatmap_max_keypoint:
+ device = output.device
+ output = torch.ops._caffe2.HeatmapMaxKeypoint(
+ to_device(output, "cpu"),
+ pred_instances[0].pred_boxes.tensor,
+ should_output_softmax=True, # worth make it configerable?
+ )
+ output = to_device(output, device)
+ output = alias(output, "keypoints_out")
+ pred_instances[0].set("pred_keypoints", output)
+ return pred_keypoint_logits
+
+
+class Caffe2KeypointRCNNInference:
+ def __init__(self, use_heatmap_max_keypoint):
+ self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
+
+ def __call__(self, pred_keypoint_logits, pred_instances):
+ return caffe2_keypoint_rcnn_inference(
+ self.use_heatmap_max_keypoint, pred_keypoint_logits, pred_instances
+ )
diff --git a/detectron2/export/caffe2_export.py b/detectron2/export/caffe2_export.py
new file mode 100644
index 0000000000000000000000000000000000000000..d609c27c7deb396352967dbcbc79b1e00f2a2de1
--- /dev/null
+++ b/detectron2/export/caffe2_export.py
@@ -0,0 +1,203 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import copy
+import io
+import logging
+import numpy as np
+from typing import List
+import onnx
+import onnx.optimizer
+import torch
+from caffe2.proto import caffe2_pb2
+from caffe2.python import core
+from caffe2.python.onnx.backend import Caffe2Backend
+from tabulate import tabulate
+from termcolor import colored
+from torch.onnx import OperatorExportTypes
+
+from .shared import (
+ ScopedWS,
+ construct_init_net_from_params,
+ fuse_alias_placeholder,
+ fuse_copy_between_cpu_and_gpu,
+ get_params_from_init_net,
+ group_norm_replace_aten_with_caffe2,
+ infer_device_type,
+ remove_dead_end_ops,
+ remove_reshape_for_fc,
+ save_graph,
+)
+
+logger = logging.getLogger(__name__)
+
+
+def export_onnx_model(model, inputs):
+ """
+ Trace and export a model to onnx format.
+
+ Args:
+ model (nn.Module):
+ inputs (tuple[args]): the model will be called by `model(*inputs)`
+
+ Returns:
+ an onnx model
+ """
+ assert isinstance(model, torch.nn.Module)
+
+ # make sure all modules are in eval mode, onnx may change the training state
+ # of the module if the states are not consistent
+ def _check_eval(module):
+ assert not module.training
+
+ model.apply(_check_eval)
+
+ # Export the model to ONNX
+ with torch.no_grad():
+ with io.BytesIO() as f:
+ torch.onnx.export(
+ model,
+ inputs,
+ f,
+ operator_export_type=OperatorExportTypes.ONNX_ATEN_FALLBACK,
+ # verbose=True, # NOTE: uncomment this for debugging
+ # export_params=True,
+ )
+ onnx_model = onnx.load_from_string(f.getvalue())
+
+ return onnx_model
+
+
+def _op_stats(net_def):
+ type_count = {}
+ for t in [op.type for op in net_def.op]:
+ type_count[t] = type_count.get(t, 0) + 1
+ type_count_list = sorted(type_count.items(), key=lambda kv: kv[0]) # alphabet
+ type_count_list = sorted(type_count_list, key=lambda kv: -kv[1]) # count
+ return "\n".join("{:>4}x {}".format(count, name) for name, count in type_count_list)
+
+
+def _assign_device_option(
+ predict_net: caffe2_pb2.NetDef, init_net: caffe2_pb2.NetDef, tensor_inputs: List[torch.Tensor]
+):
+ """
+ ONNX exported network doesn't have concept of device, assign necessary
+ device option for each op in order to make it runable on GPU runtime.
+ """
+
+ def _get_device_type(torch_tensor):
+ assert torch_tensor.device.type in ["cpu", "cuda"]
+ assert torch_tensor.device.index == 0
+ return torch_tensor.device.type
+
+ def _assign_op_device_option(net_proto, net_ssa, blob_device_types):
+ for op, ssa_i in zip(net_proto.op, net_ssa):
+ if op.type in ["CopyCPUToGPU", "CopyGPUToCPU"]:
+ op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
+ else:
+ devices = [blob_device_types[b] for b in ssa_i[0] + ssa_i[1]]
+ assert all(d == devices[0] for d in devices)
+ if devices[0] == "cuda":
+ op.device_option.CopyFrom(core.DeviceOption(caffe2_pb2.CUDA, 0))
+
+ # update ops in predict_net
+ predict_net_input_device_types = {
+ (name, 0): _get_device_type(tensor)
+ for name, tensor in zip(predict_net.external_input, tensor_inputs)
+ }
+ predict_net_device_types = infer_device_type(
+ predict_net, known_status=predict_net_input_device_types, device_name_style="pytorch"
+ )
+ predict_net_ssa, _ = core.get_ssa(predict_net)
+ _assign_op_device_option(predict_net, predict_net_ssa, predict_net_device_types)
+
+ # update ops in init_net
+ init_net_ssa, versions = core.get_ssa(init_net)
+ init_net_output_device_types = {
+ (name, versions[name]): predict_net_device_types[(name, 0)]
+ for name in init_net.external_output
+ }
+ init_net_device_types = infer_device_type(
+ init_net, known_status=init_net_output_device_types, device_name_style="pytorch"
+ )
+ _assign_op_device_option(init_net, init_net_ssa, init_net_device_types)
+
+
+def export_caffe2_detection_model(model: torch.nn.Module, tensor_inputs: List[torch.Tensor]):
+ """
+ Export a caffe2-compatible Detectron2 model to caffe2 format via ONNX.
+
+ Arg:
+ model: a caffe2-compatible version of detectron2 model, defined in caffe2_modeling.py
+ tensor_inputs: a list of tensors that caffe2 model takes as input.
+ """
+ model = copy.deepcopy(model)
+ assert isinstance(model, torch.nn.Module)
+ assert hasattr(model, "encode_additional_info")
+
+ # Export via ONNX
+ logger.info(
+ "Exporting a {} model via ONNX ...".format(type(model).__name__)
+ + " Some warnings from ONNX are expected and are usually not to worry about."
+ )
+ onnx_model = export_onnx_model(model, (tensor_inputs,))
+ # Convert ONNX model to Caffe2 protobuf
+ init_net, predict_net = Caffe2Backend.onnx_graph_to_caffe2_net(onnx_model)
+ ops_table = [[op.type, op.input, op.output] for op in predict_net.op]
+ table = tabulate(ops_table, headers=["type", "input", "output"], tablefmt="pipe")
+ logger.info(
+ "ONNX export Done. Exported predict_net (before optimizations):\n" + colored(table, "cyan")
+ )
+
+ # Apply protobuf optimization
+ fuse_alias_placeholder(predict_net, init_net)
+ if any(t.device.type != "cpu" for t in tensor_inputs):
+ fuse_copy_between_cpu_and_gpu(predict_net)
+ remove_dead_end_ops(init_net)
+ _assign_device_option(predict_net, init_net, tensor_inputs)
+ params, device_options = get_params_from_init_net(init_net)
+ predict_net, params = remove_reshape_for_fc(predict_net, params)
+ init_net = construct_init_net_from_params(params, device_options)
+ group_norm_replace_aten_with_caffe2(predict_net)
+
+ # Record necessary information for running the pb model in Detectron2 system.
+ model.encode_additional_info(predict_net, init_net)
+
+ logger.info("Operators used in predict_net: \n{}".format(_op_stats(predict_net)))
+ logger.info("Operators used in init_net: \n{}".format(_op_stats(init_net)))
+
+ return predict_net, init_net
+
+
+def run_and_save_graph(predict_net, init_net, tensor_inputs, graph_save_path):
+ """
+ Run the caffe2 model on given inputs, recording the shape and draw the graph.
+
+ predict_net/init_net: caffe2 model.
+ tensor_inputs: a list of tensors that caffe2 model takes as input.
+ graph_save_path: path for saving graph of exported model.
+ """
+
+ logger.info("Saving graph of ONNX exported model to {} ...".format(graph_save_path))
+ save_graph(predict_net, graph_save_path, op_only=False)
+
+ # Run the exported Caffe2 net
+ logger.info("Running ONNX exported model ...")
+ with ScopedWS("__ws_tmp__", True) as ws:
+ ws.RunNetOnce(init_net)
+ initialized_blobs = set(ws.Blobs())
+ uninitialized = [inp for inp in predict_net.external_input if inp not in initialized_blobs]
+ for name, blob in zip(uninitialized, tensor_inputs):
+ ws.FeedBlob(name, blob)
+
+ try:
+ ws.RunNetOnce(predict_net)
+ except RuntimeError as e:
+ logger.warning("Encountered RuntimeError: \n{}".format(str(e)))
+
+ ws_blobs = {b: ws.FetchBlob(b) for b in ws.Blobs()}
+ blob_sizes = {b: ws_blobs[b].shape for b in ws_blobs if isinstance(ws_blobs[b], np.ndarray)}
+
+ logger.info("Saving graph with blob shapes to {} ...".format(graph_save_path))
+ save_graph(predict_net, graph_save_path, op_only=False, blob_sizes=blob_sizes)
+
+ return ws_blobs
diff --git a/detectron2/export/caffe2_inference.py b/detectron2/export/caffe2_inference.py
new file mode 100644
index 0000000000000000000000000000000000000000..deb886c0417285ed1d5ad85eb941fa1ac757cdab
--- /dev/null
+++ b/detectron2/export/caffe2_inference.py
@@ -0,0 +1,161 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import logging
+import numpy as np
+from itertools import count
+import torch
+from caffe2.proto import caffe2_pb2
+from caffe2.python import core
+
+from .caffe2_modeling import META_ARCH_CAFFE2_EXPORT_TYPE_MAP, convert_batched_inputs_to_c2_format
+from .shared import ScopedWS, get_pb_arg_vali, get_pb_arg_vals, infer_device_type
+
+logger = logging.getLogger(__name__)
+
+
+# ===== ref: mobile-vision predictor's 'Caffe2Wrapper' class ======
+class ProtobufModel(torch.nn.Module):
+ """
+ Wrapper of a caffe2's protobuf model.
+ It works just like nn.Module, but running caffe2 under the hood.
+ Input/Output are tuple[tensor] that match the caffe2 net's external_input/output.
+ """
+
+ _ids = count(0)
+
+ def __init__(self, predict_net, init_net):
+ logger.info(f"Initializing ProtobufModel for: {predict_net.name} ...")
+ super().__init__()
+ assert isinstance(predict_net, caffe2_pb2.NetDef)
+ assert isinstance(init_net, caffe2_pb2.NetDef)
+ # create unique temporary workspace for each instance
+ self.ws_name = "__tmp_ProtobufModel_{}__".format(next(self._ids))
+ self.net = core.Net(predict_net)
+
+ logger.info("Running init_net once to fill the parameters ...")
+ with ScopedWS(self.ws_name, is_reset=True, is_cleanup=False) as ws:
+ ws.RunNetOnce(init_net)
+ uninitialized_external_input = []
+ for blob in self.net.Proto().external_input:
+ if blob not in ws.Blobs():
+ uninitialized_external_input.append(blob)
+ ws.CreateBlob(blob)
+ ws.CreateNet(self.net)
+
+ self._error_msgs = set()
+ self._input_blobs = uninitialized_external_input
+
+ def _infer_output_devices(self, inputs):
+ """
+ Returns:
+ list[str]: list of device for each external output
+ """
+
+ def _get_device_type(torch_tensor):
+ assert torch_tensor.device.type in ["cpu", "cuda"]
+ assert torch_tensor.device.index == 0
+ return torch_tensor.device.type
+
+ predict_net = self.net.Proto()
+ input_device_types = {
+ (name, 0): _get_device_type(tensor) for name, tensor in zip(self._input_blobs, inputs)
+ }
+ device_type_map = infer_device_type(
+ predict_net, known_status=input_device_types, device_name_style="pytorch"
+ )
+ ssa, versions = core.get_ssa(predict_net)
+ versioned_outputs = [(name, versions[name]) for name in predict_net.external_output]
+ output_devices = [device_type_map[outp] for outp in versioned_outputs]
+ return output_devices
+
+ def forward(self, inputs):
+ """
+ Args:
+ inputs (tuple[torch.Tensor])
+
+ Returns:
+ tuple[torch.Tensor]
+ """
+ assert len(inputs) == len(self._input_blobs), (
+ f"Length of inputs ({len(inputs)}) "
+ f"doesn't match the required input blobs: {self._input_blobs}"
+ )
+
+ with ScopedWS(self.ws_name, is_reset=False, is_cleanup=False) as ws:
+ for b, tensor in zip(self._input_blobs, inputs):
+ ws.FeedBlob(b, tensor)
+
+ try:
+ ws.RunNet(self.net.Proto().name)
+ except RuntimeError as e:
+ if not str(e) in self._error_msgs:
+ self._error_msgs.add(str(e))
+ logger.warning("Encountered new RuntimeError: \n{}".format(str(e)))
+ logger.warning("Catch the error and use partial results.")
+
+ c2_outputs = [ws.FetchBlob(b) for b in self.net.Proto().external_output]
+ # Remove outputs of current run, this is necessary in order to
+ # prevent fetching the result from previous run if the model fails
+ # in the middle.
+ for b in self.net.Proto().external_output:
+ # Needs to create uninitialized blob to make the net runable.
+ # This is "equivalent" to: ws.RemoveBlob(b) then ws.CreateBlob(b),
+ # but there'no such API.
+ ws.FeedBlob(b, f"{b}, a C++ native class of type nullptr (uninitialized).")
+
+ # Cast output to torch.Tensor on the desired device
+ output_devices = (
+ self._infer_output_devices(inputs)
+ if any(t.device.type != "cpu" for t in inputs)
+ else ["cpu" for _ in self.net.Proto().external_output]
+ )
+
+ outputs = []
+ for name, c2_output, device in zip(
+ self.net.Proto().external_output, c2_outputs, output_devices
+ ):
+ if not isinstance(c2_output, np.ndarray):
+ raise RuntimeError(
+ "Invalid output for blob {}, received: {}".format(name, c2_output)
+ )
+ outputs.append(torch.tensor(c2_output).to(device=device))
+ return tuple(outputs)
+
+
+class ProtobufDetectionModel(torch.nn.Module):
+ """
+ A class works just like a pytorch meta arch in terms of inference, but running
+ caffe2 model under the hood.
+ """
+
+ def __init__(self, predict_net, init_net, *, convert_outputs=None):
+ """
+ Args:
+ predict_net, init_net (core.Net): caffe2 nets
+ convert_outptus (callable): a function that converts caffe2
+ outputs to the same format of the original pytorch model.
+ By default, use the one defined in the caffe2 meta_arch.
+ """
+ super().__init__()
+ self.protobuf_model = ProtobufModel(predict_net, init_net)
+ self.size_divisibility = get_pb_arg_vali(predict_net, "size_divisibility", 0)
+ self.device = get_pb_arg_vals(predict_net, "device", b"cpu").decode("ascii")
+
+ if convert_outputs is None:
+ meta_arch = get_pb_arg_vals(predict_net, "meta_architecture", b"GeneralizedRCNN")
+ meta_arch = META_ARCH_CAFFE2_EXPORT_TYPE_MAP[meta_arch.decode("ascii")]
+ self._convert_outputs = meta_arch.get_outputs_converter(predict_net, init_net)
+ else:
+ self._convert_outputs = convert_outputs
+
+ def _convert_inputs(self, batched_inputs):
+ # currently all models convert inputs in the same way
+ return convert_batched_inputs_to_c2_format(
+ batched_inputs, self.size_divisibility, self.device
+ )
+
+ def forward(self, batched_inputs):
+ c2_inputs = self._convert_inputs(batched_inputs)
+ c2_results = self.protobuf_model(c2_inputs)
+ c2_results = dict(zip(self.protobuf_model.net.Proto().external_output, c2_results))
+ return self._convert_outputs(batched_inputs, c2_inputs, c2_results)
diff --git a/detectron2/export/caffe2_modeling.py b/detectron2/export/caffe2_modeling.py
new file mode 100644
index 0000000000000000000000000000000000000000..3e675c45d62f7b363a298099cd520c417832d58c
--- /dev/null
+++ b/detectron2/export/caffe2_modeling.py
@@ -0,0 +1,420 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import functools
+import io
+import struct
+import types
+import torch
+
+from detectron2.modeling import meta_arch
+from detectron2.modeling.box_regression import Box2BoxTransform
+from detectron2.modeling.roi_heads import keypoint_head
+from detectron2.structures import Boxes, ImageList, Instances, RotatedBoxes
+
+from .c10 import Caffe2Compatible
+from .caffe2_patch import ROIHeadsPatcher, patch_generalized_rcnn
+from .shared import (
+ alias,
+ check_set_pb_arg,
+ get_pb_arg_floats,
+ get_pb_arg_valf,
+ get_pb_arg_vali,
+ get_pb_arg_vals,
+ mock_torch_nn_functional_interpolate,
+)
+
+
+def assemble_rcnn_outputs_by_name(image_sizes, tensor_outputs, force_mask_on=False):
+ """
+ A function to assemble caffe2 model's outputs (i.e. Dict[str, Tensor])
+ to detectron2's format (i.e. list of Instances instance).
+ This only works when the model follows the Caffe2 detectron's naming convention.
+
+ Args:
+ image_sizes (List[List[int, int]]): [H, W] of every image.
+ tensor_outputs (Dict[str, Tensor]): external_output to its tensor.
+
+ force_mask_on (Bool): if true, the it make sure there'll be pred_masks even
+ if the mask is not found from tensor_outputs (usually due to model crash)
+ """
+
+ results = [Instances(image_size) for image_size in image_sizes]
+
+ batch_splits = tensor_outputs.get("batch_splits", None)
+ if batch_splits:
+ raise NotImplementedError()
+ assert len(image_sizes) == 1
+ result = results[0]
+
+ bbox_nms = tensor_outputs["bbox_nms"]
+ score_nms = tensor_outputs["score_nms"]
+ class_nms = tensor_outputs["class_nms"]
+ # Detection will always success because Conv support 0-batch
+ assert bbox_nms is not None
+ assert score_nms is not None
+ assert class_nms is not None
+ if bbox_nms.shape[1] == 5:
+ result.pred_boxes = RotatedBoxes(bbox_nms)
+ else:
+ result.pred_boxes = Boxes(bbox_nms)
+ result.scores = score_nms
+ result.pred_classes = class_nms.to(torch.int64)
+
+ mask_fcn_probs = tensor_outputs.get("mask_fcn_probs", None)
+ if mask_fcn_probs is not None:
+ # finish the mask pred
+ mask_probs_pred = mask_fcn_probs
+ num_masks = mask_probs_pred.shape[0]
+ class_pred = result.pred_classes
+ indices = torch.arange(num_masks, device=class_pred.device)
+ mask_probs_pred = mask_probs_pred[indices, class_pred][:, None]
+ result.pred_masks = mask_probs_pred
+ elif force_mask_on:
+ # NOTE: there's no way to know the height/width of mask here, it won't be
+ # used anyway when batch size is 0, so just set them to 0.
+ result.pred_masks = torch.zeros([0, 1, 0, 0], dtype=torch.uint8)
+
+ keypoints_out = tensor_outputs.get("keypoints_out", None)
+ kps_score = tensor_outputs.get("kps_score", None)
+ if keypoints_out is not None:
+ # keypoints_out: [N, 4, #kypoints], where 4 is in order of (x, y, score, prob)
+ keypoints_tensor = keypoints_out
+ # NOTE: it's possible that prob is not calculated if "should_output_softmax"
+ # is set to False in HeatmapMaxKeypoint, so just using raw score, seems
+ # it doesn't affect mAP. TODO: check more carefully.
+ keypoint_xyp = keypoints_tensor.transpose(1, 2)[:, :, [0, 1, 2]]
+ result.pred_keypoints = keypoint_xyp
+ elif kps_score is not None:
+ # keypoint heatmap to sparse data structure
+ pred_keypoint_logits = kps_score
+ keypoint_head.keypoint_rcnn_inference(pred_keypoint_logits, [result])
+
+ return results
+
+
+def _cast_to_f32(f64):
+ return struct.unpack("f", struct.pack("f", f64))[0]
+
+
+def set_caffe2_compatible_tensor_mode(model, enable=True):
+ def _fn(m):
+ if isinstance(m, Caffe2Compatible):
+ m.tensor_mode = enable
+
+ model.apply(_fn)
+
+
+def convert_batched_inputs_to_c2_format(batched_inputs, size_divisibility, device):
+ """
+ See get_caffe2_inputs() below.
+ """
+ assert all(isinstance(x, dict) for x in batched_inputs)
+ assert all(x["image"].dim() == 3 for x in batched_inputs)
+
+ images = [x["image"] for x in batched_inputs]
+ images = ImageList.from_tensors(images, size_divisibility)
+
+ im_info = []
+ for input_per_image, image_size in zip(batched_inputs, images.image_sizes):
+ target_height = input_per_image.get("height", image_size[0])
+ target_width = input_per_image.get("width", image_size[1]) # noqa
+ # NOTE: The scale inside im_info is kept as convention and for providing
+ # post-processing information if further processing is needed. For
+ # current Caffe2 model definitions that don't include post-processing inside
+ # the model, this number is not used.
+ # NOTE: There can be a slight difference between width and height
+ # scales, using a single number can results in numerical difference
+ # compared with D2's post-processing.
+ scale = target_height / image_size[0]
+ im_info.append([image_size[0], image_size[1], scale])
+ im_info = torch.Tensor(im_info)
+
+ return images.tensor.to(device), im_info.to(device)
+
+
+class Caffe2MetaArch(Caffe2Compatible, torch.nn.Module):
+ """
+ Base class for caffe2-compatible implementation of a meta architecture.
+ The forward is traceable and its traced graph can be converted to caffe2
+ graph through ONNX.
+ """
+
+ def __init__(self, cfg, torch_model, enable_tensor_mode=True):
+ """
+ Args:
+ cfg (CfgNode):
+ torch_model (nn.Module): the detectron2 model (meta_arch) to be
+ converted.
+ """
+ super().__init__()
+ self._wrapped_model = torch_model
+ self.eval()
+ set_caffe2_compatible_tensor_mode(self, enable_tensor_mode)
+
+ def get_caffe2_inputs(self, batched_inputs):
+ """
+ Convert pytorch-style structured inputs to caffe2-style inputs that
+ are tuples of tensors.
+
+ Args:
+ batched_inputs (list[dict]): inputs to a detectron2 model
+ in its standard format. Each dict has "image" (CHW tensor), and optionally
+ "height" and "width".
+
+ Returns:
+ tuple[Tensor]:
+ tuple of tensors that will be the inputs to the
+ :meth:`forward` method. For existing models, the first
+ is an NCHW tensor (padded and batched); the second is
+ a im_info Nx3 tensor, where the rows are
+ (height, width, unused legacy parameter)
+ """
+ return convert_batched_inputs_to_c2_format(
+ batched_inputs,
+ self._wrapped_model.backbone.size_divisibility,
+ self._wrapped_model.device,
+ )
+
+ def encode_additional_info(self, predict_net, init_net):
+ """
+ Save extra metadata that will be used by inference in the output protobuf.
+ """
+ pass
+
+ def forward(self, inputs):
+ """
+ Run the forward in caffe2-style. It has to use caffe2-compatible ops
+ and the method will be used for tracing.
+
+ Args:
+ inputs (tuple[Tensor]): inputs defined by :meth:`get_caffe2_input`.
+ They will be the inputs of the converted caffe2 graph.
+
+ Returns:
+ tuple[Tensor]: output tensors. They will be the outputs of the
+ converted caffe2 graph.
+ """
+ raise NotImplementedError
+
+ def _caffe2_preprocess_image(self, inputs):
+ """
+ Caffe2 implementation of preprocess_image, which is called inside each MetaArch's forward.
+ It normalizes the input images, and the final caffe2 graph assumes the
+ inputs have been batched already.
+ """
+ data, im_info = inputs
+ data = alias(data, "data")
+ im_info = alias(im_info, "im_info")
+ mean, std = self._wrapped_model.pixel_mean, self._wrapped_model.pixel_std
+ normalized_data = (data - mean) / std
+ normalized_data = alias(normalized_data, "normalized_data")
+
+ # Pack (data, im_info) into ImageList which is recognized by self.inference.
+ images = ImageList(tensor=normalized_data, image_sizes=im_info)
+ return images
+
+ @staticmethod
+ def get_outputs_converter(predict_net, init_net):
+ """
+ Creates a function that converts outputs of the caffe2 model to
+ detectron2's standard format.
+ The function uses information in `predict_net` and `init_net` that are
+ available at inferene time. Therefore the function logic can be used in inference.
+
+ The returned function has the following signature:
+
+ def convert(batched_inputs, c2_inputs, c2_results) -> detectron2_outputs
+
+ Where
+
+ * batched_inputs (list[dict]): the original input format of the meta arch
+ * c2_inputs (tuple[Tensor]): the caffe2 inputs.
+ * c2_results (dict[str, Tensor]): the caffe2 output format,
+ corresponding to the outputs of the :meth:`forward` function.
+ * detectron2_outputs: the original output format of the meta arch.
+
+ This function can be used to compare the outputs of the original meta arch and
+ the converted caffe2 graph.
+
+ Returns:
+ callable: a callable of the above signature.
+ """
+ raise NotImplementedError
+
+
+class Caffe2GeneralizedRCNN(Caffe2MetaArch):
+ def __init__(self, cfg, torch_model, enable_tensor_mode=True):
+ assert isinstance(torch_model, meta_arch.GeneralizedRCNN)
+ torch_model = patch_generalized_rcnn(torch_model)
+ super().__init__(cfg, torch_model, enable_tensor_mode)
+
+ try:
+ use_heatmap_max_keypoint = cfg.EXPORT_CAFFE2.USE_HEATMAP_MAX_KEYPOINT
+ except AttributeError:
+ use_heatmap_max_keypoint = False
+ self.roi_heads_patcher = ROIHeadsPatcher(
+ self._wrapped_model.roi_heads, use_heatmap_max_keypoint
+ )
+ if self.tensor_mode:
+ self.roi_heads_patcher.patch_roi_heads()
+
+ def encode_additional_info(self, predict_net, init_net):
+ size_divisibility = self._wrapped_model.backbone.size_divisibility
+ check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
+ check_set_pb_arg(
+ predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
+ )
+ check_set_pb_arg(predict_net, "meta_architecture", "s", b"GeneralizedRCNN")
+
+ @mock_torch_nn_functional_interpolate()
+ def forward(self, inputs):
+ if not self.tensor_mode:
+ return self._wrapped_model.inference(inputs)
+ images = self._caffe2_preprocess_image(inputs)
+ features = self._wrapped_model.backbone(images.tensor)
+ proposals, _ = self._wrapped_model.proposal_generator(images, features)
+ detector_results, _ = self._wrapped_model.roi_heads(images, features, proposals)
+ return tuple(detector_results[0].flatten())
+
+ @staticmethod
+ def get_outputs_converter(predict_net, init_net):
+ def f(batched_inputs, c2_inputs, c2_results):
+ _, im_info = c2_inputs
+ image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
+ results = assemble_rcnn_outputs_by_name(image_sizes, c2_results)
+ return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
+
+ return f
+
+
+class Caffe2RetinaNet(Caffe2MetaArch):
+ def __init__(self, cfg, torch_model):
+ assert isinstance(torch_model, meta_arch.RetinaNet)
+ super().__init__(cfg, torch_model)
+
+ @mock_torch_nn_functional_interpolate()
+ def forward(self, inputs):
+ assert self.tensor_mode
+ images = self._caffe2_preprocess_image(inputs)
+
+ # explicitly return the images sizes to avoid removing "im_info" by ONNX
+ # since it's not used in the forward path
+ return_tensors = [images.image_sizes]
+
+ features = self._wrapped_model.backbone(images.tensor)
+ features = [features[f] for f in self._wrapped_model.head_in_features]
+ for i, feature_i in enumerate(features):
+ features[i] = alias(feature_i, "feature_{}".format(i), is_backward=True)
+ return_tensors.append(features[i])
+
+ pred_logits, pred_anchor_deltas = self._wrapped_model.head(features)
+ for i, (box_cls_i, box_delta_i) in enumerate(zip(pred_logits, pred_anchor_deltas)):
+ return_tensors.append(alias(box_cls_i, "box_cls_{}".format(i)))
+ return_tensors.append(alias(box_delta_i, "box_delta_{}".format(i)))
+
+ return tuple(return_tensors)
+
+ def encode_additional_info(self, predict_net, init_net):
+ size_divisibility = self._wrapped_model.backbone.size_divisibility
+ check_set_pb_arg(predict_net, "size_divisibility", "i", size_divisibility)
+ check_set_pb_arg(
+ predict_net, "device", "s", str.encode(str(self._wrapped_model.device), "ascii")
+ )
+ check_set_pb_arg(predict_net, "meta_architecture", "s", b"RetinaNet")
+
+ # Inference parameters:
+ check_set_pb_arg(
+ predict_net, "score_threshold", "f", _cast_to_f32(self._wrapped_model.test_score_thresh)
+ )
+ check_set_pb_arg(
+ predict_net, "topk_candidates", "i", self._wrapped_model.test_topk_candidates
+ )
+ check_set_pb_arg(
+ predict_net, "nms_threshold", "f", _cast_to_f32(self._wrapped_model.test_nms_thresh)
+ )
+ check_set_pb_arg(
+ predict_net,
+ "max_detections_per_image",
+ "i",
+ self._wrapped_model.max_detections_per_image,
+ )
+
+ check_set_pb_arg(
+ predict_net,
+ "bbox_reg_weights",
+ "floats",
+ [_cast_to_f32(w) for w in self._wrapped_model.box2box_transform.weights],
+ )
+ self._encode_anchor_generator_cfg(predict_net)
+
+ def _encode_anchor_generator_cfg(self, predict_net):
+ # serialize anchor_generator for future use
+ serialized_anchor_generator = io.BytesIO()
+ torch.save(self._wrapped_model.anchor_generator, serialized_anchor_generator)
+ # Ideally we can put anchor generating inside the model, then we don't
+ # need to store this information.
+ bytes = serialized_anchor_generator.getvalue()
+ check_set_pb_arg(predict_net, "serialized_anchor_generator", "s", bytes)
+
+ @staticmethod
+ def get_outputs_converter(predict_net, init_net):
+ self = types.SimpleNamespace()
+ serialized_anchor_generator = io.BytesIO(
+ get_pb_arg_vals(predict_net, "serialized_anchor_generator", None)
+ )
+ self.anchor_generator = torch.load(serialized_anchor_generator)
+ bbox_reg_weights = get_pb_arg_floats(predict_net, "bbox_reg_weights", None)
+ self.box2box_transform = Box2BoxTransform(weights=tuple(bbox_reg_weights))
+ self.test_score_thresh = get_pb_arg_valf(predict_net, "score_threshold", None)
+ self.test_topk_candidates = get_pb_arg_vali(predict_net, "topk_candidates", None)
+ self.test_nms_thresh = get_pb_arg_valf(predict_net, "nms_threshold", None)
+ self.max_detections_per_image = get_pb_arg_vali(
+ predict_net, "max_detections_per_image", None
+ )
+
+ # hack to reuse inference code from RetinaNet
+ for meth in [
+ "forward_inference",
+ "inference_single_image",
+ "_transpose_dense_predictions",
+ "_decode_multi_level_predictions",
+ "_decode_per_level_predictions",
+ ]:
+ setattr(self, meth, functools.partial(getattr(meta_arch.RetinaNet, meth), self))
+
+ def f(batched_inputs, c2_inputs, c2_results):
+ _, im_info = c2_inputs
+ image_sizes = [[int(im[0]), int(im[1])] for im in im_info]
+ dummy_images = ImageList(
+ torch.randn(
+ (
+ len(im_info),
+ 3,
+ )
+ + tuple(image_sizes[0])
+ ),
+ image_sizes,
+ )
+
+ num_features = len([x for x in c2_results.keys() if x.startswith("box_cls_")])
+ pred_logits = [c2_results["box_cls_{}".format(i)] for i in range(num_features)]
+ pred_anchor_deltas = [c2_results["box_delta_{}".format(i)] for i in range(num_features)]
+
+ # For each feature level, feature should have the same batch size and
+ # spatial dimension as the box_cls and box_delta.
+ dummy_features = [x.clone()[:, 0:0, :, :] for x in pred_logits]
+ # self.num_classess can be inferred
+ self.num_classes = pred_logits[0].shape[1] // (pred_anchor_deltas[0].shape[1] // 4)
+
+ results = self.forward_inference(
+ dummy_images, dummy_features, [pred_logits, pred_anchor_deltas]
+ )
+ return meta_arch.GeneralizedRCNN._postprocess(results, batched_inputs, image_sizes)
+
+ return f
+
+
+META_ARCH_CAFFE2_EXPORT_TYPE_MAP = {
+ "GeneralizedRCNN": Caffe2GeneralizedRCNN,
+ "RetinaNet": Caffe2RetinaNet,
+}
diff --git a/detectron2/export/caffe2_patch.py b/detectron2/export/caffe2_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..4ddc2c1c6c5cff3e70df9b6001fcf43aae1d732d
--- /dev/null
+++ b/detectron2/export/caffe2_patch.py
@@ -0,0 +1,189 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import contextlib
+from unittest import mock
+import torch
+
+from detectron2.modeling import poolers
+from detectron2.modeling.proposal_generator import rpn
+from detectron2.modeling.roi_heads import keypoint_head, mask_head
+from detectron2.modeling.roi_heads.fast_rcnn import FastRCNNOutputLayers
+
+from .c10 import (
+ Caffe2Compatible,
+ Caffe2FastRCNNOutputsInference,
+ Caffe2KeypointRCNNInference,
+ Caffe2MaskRCNNInference,
+ Caffe2ROIPooler,
+ Caffe2RPN,
+ caffe2_fast_rcnn_outputs_inference,
+ caffe2_keypoint_rcnn_inference,
+ caffe2_mask_rcnn_inference,
+)
+
+
+class GenericMixin:
+ pass
+
+
+class Caffe2CompatibleConverter:
+ """
+ A GenericUpdater which implements the `create_from` interface, by modifying
+ module object and assign it with another class replaceCls.
+ """
+
+ def __init__(self, replaceCls):
+ self.replaceCls = replaceCls
+
+ def create_from(self, module):
+ # update module's class to the new class
+ assert isinstance(module, torch.nn.Module)
+ if issubclass(self.replaceCls, GenericMixin):
+ # replaceCls should act as mixin, create a new class on-the-fly
+ new_class = type(
+ "{}MixedWith{}".format(self.replaceCls.__name__, module.__class__.__name__),
+ (self.replaceCls, module.__class__),
+ {}, # {"new_method": lambda self: ...},
+ )
+ module.__class__ = new_class
+ else:
+ # replaceCls is complete class, this allow arbitrary class swap
+ module.__class__ = self.replaceCls
+
+ # initialize Caffe2Compatible
+ if isinstance(module, Caffe2Compatible):
+ module.tensor_mode = False
+
+ return module
+
+
+def patch(model, target, updater, *args, **kwargs):
+ """
+ recursively (post-order) update all modules with the target type and its
+ subclasses, make a initialization/composition/inheritance/... via the
+ updater.create_from.
+ """
+ for name, module in model.named_children():
+ model._modules[name] = patch(module, target, updater, *args, **kwargs)
+ if isinstance(model, target):
+ return updater.create_from(model, *args, **kwargs)
+ return model
+
+
+def patch_generalized_rcnn(model):
+ ccc = Caffe2CompatibleConverter
+ model = patch(model, rpn.RPN, ccc(Caffe2RPN))
+ model = patch(model, poolers.ROIPooler, ccc(Caffe2ROIPooler))
+
+ return model
+
+
+@contextlib.contextmanager
+def mock_fastrcnn_outputs_inference(
+ tensor_mode, check=True, box_predictor_type=FastRCNNOutputLayers
+):
+ with mock.patch.object(
+ box_predictor_type,
+ "inference",
+ autospec=True,
+ side_effect=Caffe2FastRCNNOutputsInference(tensor_mode),
+ ) as mocked_func:
+ yield
+ if check:
+ assert mocked_func.call_count > 0
+
+
+@contextlib.contextmanager
+def mock_mask_rcnn_inference(tensor_mode, patched_module, check=True):
+ with mock.patch(
+ "{}.mask_rcnn_inference".format(patched_module), side_effect=Caffe2MaskRCNNInference()
+ ) as mocked_func:
+ yield
+ if check:
+ assert mocked_func.call_count > 0
+
+
+@contextlib.contextmanager
+def mock_keypoint_rcnn_inference(tensor_mode, patched_module, use_heatmap_max_keypoint, check=True):
+ with mock.patch(
+ "{}.keypoint_rcnn_inference".format(patched_module),
+ side_effect=Caffe2KeypointRCNNInference(use_heatmap_max_keypoint),
+ ) as mocked_func:
+ yield
+ if check:
+ assert mocked_func.call_count > 0
+
+
+class ROIHeadsPatcher:
+ def __init__(self, heads, use_heatmap_max_keypoint):
+ self.heads = heads
+ self.use_heatmap_max_keypoint = use_heatmap_max_keypoint
+ self.previous_patched = {}
+
+ @contextlib.contextmanager
+ def mock_roi_heads(self, tensor_mode=True):
+ """
+ Patching several inference functions inside ROIHeads and its subclasses
+
+ Args:
+ tensor_mode (bool): whether the inputs/outputs are caffe2's tensor
+ format or not. Default to True.
+ """
+ # NOTE: this requries the `keypoint_rcnn_inference` and `mask_rcnn_inference`
+ # are called inside the same file as BaseXxxHead due to using mock.patch.
+ kpt_heads_mod = keypoint_head.BaseKeypointRCNNHead.__module__
+ mask_head_mod = mask_head.BaseMaskRCNNHead.__module__
+
+ mock_ctx_managers = [
+ mock_fastrcnn_outputs_inference(
+ tensor_mode=tensor_mode,
+ check=True,
+ box_predictor_type=type(self.heads.box_predictor),
+ )
+ ]
+ if getattr(self.heads, "keypoint_on", False):
+ mock_ctx_managers += [
+ mock_keypoint_rcnn_inference(
+ tensor_mode, kpt_heads_mod, self.use_heatmap_max_keypoint
+ )
+ ]
+ if getattr(self.heads, "mask_on", False):
+ mock_ctx_managers += [mock_mask_rcnn_inference(tensor_mode, mask_head_mod)]
+
+ with contextlib.ExitStack() as stack: # python 3.3+
+ for mgr in mock_ctx_managers:
+ stack.enter_context(mgr)
+ yield
+
+ def patch_roi_heads(self, tensor_mode=True):
+ self.previous_patched["box_predictor"] = self.heads.box_predictor.inference
+ self.previous_patched["keypoint_rcnn"] = keypoint_head.keypoint_rcnn_inference
+ self.previous_patched["mask_rcnn"] = mask_head.mask_rcnn_inference
+
+ def patched_fastrcnn_outputs_inference(predictions, proposal):
+ return caffe2_fast_rcnn_outputs_inference(
+ True, self.heads.box_predictor, predictions, proposal
+ )
+
+ self.heads.box_predictor.inference = patched_fastrcnn_outputs_inference
+
+ if getattr(self.heads, "keypoint_on", False):
+
+ def patched_keypoint_rcnn_inference(pred_keypoint_logits, pred_instances):
+ return caffe2_keypoint_rcnn_inference(
+ self.use_heatmap_max_keypoint, pred_keypoint_logits, pred_instances
+ )
+
+ keypoint_head.keypoint_rcnn_inference = patched_keypoint_rcnn_inference
+
+ if getattr(self.heads, "mask_on", False):
+
+ def patched_mask_rcnn_inference(pred_mask_logits, pred_instances):
+ return caffe2_mask_rcnn_inference(pred_mask_logits, pred_instances)
+
+ mask_head.mask_rcnn_inference = patched_mask_rcnn_inference
+
+ def unpatch_roi_heads(self):
+ self.heads.box_predictor.inference = self.previous_patched["box_predictor"]
+ keypoint_head.keypoint_rcnn_inference = self.previous_patched["keypoint_rcnn"]
+ mask_head.mask_rcnn_inference = self.previous_patched["mask_rcnn"]
diff --git a/detectron2/export/flatten.py b/detectron2/export/flatten.py
new file mode 100644
index 0000000000000000000000000000000000000000..f5ba4297567d650f147eebeed361e9d62fab899d
--- /dev/null
+++ b/detectron2/export/flatten.py
@@ -0,0 +1,330 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import collections
+from dataclasses import dataclass
+from typing import Callable, List, Optional, Tuple
+import torch
+from torch import nn
+
+from detectron2.structures import Boxes, Instances, ROIMasks
+from detectron2.utils.registry import _convert_target_to_string, locate
+
+from .torchscript_patch import patch_builtin_len
+
+
+@dataclass
+class Schema:
+ """
+ A Schema defines how to flatten a possibly hierarchical object into tuple of
+ primitive objects, so it can be used as inputs/outputs of PyTorch's tracing.
+
+ PyTorch does not support tracing a function that produces rich output
+ structures (e.g. dict, Instances, Boxes). To trace such a function, we
+ flatten the rich object into tuple of tensors, and return this tuple of tensors
+ instead. Meanwhile, we also need to know how to "rebuild" the original object
+ from the flattened results, so we can evaluate the flattened results.
+ A Schema defines how to flatten an object, and while flattening it, it records
+ necessary schemas so that the object can be rebuilt using the flattened outputs.
+
+ The flattened object and the schema object is returned by ``.flatten`` classmethod.
+ Then the original object can be rebuilt with the ``__call__`` method of schema.
+
+ A Schema is a dataclass that can be serialized easily.
+ """
+
+ # inspired by FetchMapper in tensorflow/python/client/session.py
+
+ @classmethod
+ def flatten(cls, obj):
+ raise NotImplementedError
+
+ def __call__(self, values):
+ raise NotImplementedError
+
+ @staticmethod
+ def _concat(values):
+ ret = ()
+ sizes = []
+ for v in values:
+ assert isinstance(v, tuple), "Flattened results must be a tuple"
+ ret = ret + v
+ sizes.append(len(v))
+ return ret, sizes
+
+ @staticmethod
+ def _split(values, sizes):
+ if len(sizes):
+ expected_len = sum(sizes)
+ assert (
+ len(values) == expected_len
+ ), f"Values has length {len(values)} but expect length {expected_len}."
+ ret = []
+ for k in range(len(sizes)):
+ begin, end = sum(sizes[:k]), sum(sizes[: k + 1])
+ ret.append(values[begin:end])
+ return ret
+
+
+@dataclass
+class ListSchema(Schema):
+ schemas: List[Schema] # the schemas that define how to flatten each element in the list
+ sizes: List[int] # the flattened length of each element
+
+ def __call__(self, values):
+ values = self._split(values, self.sizes)
+ if len(values) != len(self.schemas):
+ raise ValueError(
+ f"Values has length {len(values)} but schemas " f"has length {len(self.schemas)}!"
+ )
+ values = [m(v) for m, v in zip(self.schemas, values)]
+ return list(values)
+
+ @classmethod
+ def flatten(cls, obj):
+ res = [flatten_to_tuple(k) for k in obj]
+ values, sizes = cls._concat([k[0] for k in res])
+ return values, cls([k[1] for k in res], sizes)
+
+
+@dataclass
+class TupleSchema(ListSchema):
+ def __call__(self, values):
+ return tuple(super().__call__(values))
+
+
+@dataclass
+class IdentitySchema(Schema):
+ def __call__(self, values):
+ return values[0]
+
+ @classmethod
+ def flatten(cls, obj):
+ return (obj,), cls()
+
+
+@dataclass
+class DictSchema(ListSchema):
+ keys: List[str]
+
+ def __call__(self, values):
+ values = super().__call__(values)
+ return dict(zip(self.keys, values))
+
+ @classmethod
+ def flatten(cls, obj):
+ for k in obj.keys():
+ if not isinstance(k, str):
+ raise KeyError("Only support flattening dictionaries if keys are str.")
+ keys = sorted(obj.keys())
+ values = [obj[k] for k in keys]
+ ret, schema = ListSchema.flatten(values)
+ return ret, cls(schema.schemas, schema.sizes, keys)
+
+
+@dataclass
+class InstancesSchema(DictSchema):
+ def __call__(self, values):
+ image_size, fields = values[-1], values[:-1]
+ fields = super().__call__(fields)
+ return Instances(image_size, **fields)
+
+ @classmethod
+ def flatten(cls, obj):
+ ret, schema = super().flatten(obj.get_fields())
+ size = obj.image_size
+ if not isinstance(size, torch.Tensor):
+ size = torch.tensor(size)
+ return ret + (size,), schema
+
+
+@dataclass
+class TensorWrapSchema(Schema):
+ """
+ For classes that are simple wrapper of tensors, e.g.
+ Boxes, RotatedBoxes, BitMasks
+ """
+
+ class_name: str
+
+ def __call__(self, values):
+ return locate(self.class_name)(values[0])
+
+ @classmethod
+ def flatten(cls, obj):
+ return (obj.tensor,), cls(_convert_target_to_string(type(obj)))
+
+
+# if more custom structures needed in the future, can allow
+# passing in extra schemas for custom types
+def flatten_to_tuple(obj):
+ """
+ Flatten an object so it can be used for PyTorch tracing.
+ Also returns how to rebuild the original object from the flattened outputs.
+
+ Returns:
+ res (tuple): the flattened results that can be used as tracing outputs
+ schema: an object with a ``__call__`` method such that ``schema(res) == obj``.
+ It is a pure dataclass that can be serialized.
+ """
+ schemas = [
+ ((str, bytes), IdentitySchema),
+ (list, ListSchema),
+ (tuple, TupleSchema),
+ (collections.abc.Mapping, DictSchema),
+ (Instances, InstancesSchema),
+ ((Boxes, ROIMasks), TensorWrapSchema),
+ ]
+ for klass, schema in schemas:
+ if isinstance(obj, klass):
+ F = schema
+ break
+ else:
+ F = IdentitySchema
+
+ return F.flatten(obj)
+
+
+class TracingAdapter(nn.Module):
+ """
+ A model may take rich input/output format (e.g. dict or custom classes),
+ but `torch.jit.trace` requires tuple of tensors as input/output.
+ This adapter flattens input/output format of a model so it becomes traceable.
+
+ It also records the necessary schema to rebuild model's inputs/outputs from flattened
+ inputs/outputs.
+
+ Example:
+ ::
+ outputs = model(inputs) # inputs/outputs may be rich structure
+ adapter = TracingAdapter(model, inputs)
+
+ # can now trace the model, with adapter.flattened_inputs, or another
+ # tuple of tensors with the same length and meaning
+ traced = torch.jit.trace(adapter, adapter.flattened_inputs)
+
+ # traced model can only produce flattened outputs (tuple of tensors)
+ flattened_outputs = traced(*adapter.flattened_inputs)
+ # adapter knows the schema to convert it back (new_outputs == outputs)
+ new_outputs = adapter.outputs_schema(flattened_outputs)
+ """
+
+ flattened_inputs: Tuple[torch.Tensor] = None
+ """
+ Flattened version of inputs given to this class's constructor.
+ """
+
+ inputs_schema: Schema = None
+ """
+ Schema of the inputs given to this class's constructor.
+ """
+
+ outputs_schema: Schema = None
+ """
+ Schema of the output produced by calling the given model with inputs.
+ """
+
+ def __init__(
+ self,
+ model: nn.Module,
+ inputs,
+ inference_func: Optional[Callable] = None,
+ allow_non_tensor: bool = False,
+ ):
+ """
+ Args:
+ model: an nn.Module
+ inputs: An input argument or a tuple of input arguments used to call model.
+ After flattening, it has to only consist of tensors.
+ inference_func: a callable that takes (model, *inputs), calls the
+ model with inputs, and return outputs. By default it
+ is ``lambda model, *inputs: model(*inputs)``. Can be override
+ if you need to call the model differently.
+ allow_non_tensor: allow inputs/outputs to contain non-tensor objects.
+ This option will filter out non-tensor objects to make the
+ model traceable, but ``inputs_schema``/``outputs_schema`` cannot be
+ used anymore because inputs/outputs cannot be rebuilt from pure tensors.
+ This is useful when you're only interested in the single trace of
+ execution (e.g. for flop count), but not interested in
+ generalizing the traced graph to new inputs.
+ """
+ super().__init__()
+ if isinstance(model, (nn.parallel.distributed.DistributedDataParallel, nn.DataParallel)):
+ model = model.module
+ self.model = model
+ if not isinstance(inputs, tuple):
+ inputs = (inputs,)
+ self.inputs = inputs
+ self.allow_non_tensor = allow_non_tensor
+
+ if inference_func is None:
+ inference_func = lambda model, *inputs: model(*inputs) # noqa
+ self.inference_func = inference_func
+
+ self.flattened_inputs, self.inputs_schema = flatten_to_tuple(inputs)
+
+ if all(isinstance(x, torch.Tensor) for x in self.flattened_inputs):
+ return
+ if self.allow_non_tensor:
+ self.flattened_inputs = tuple(
+ [x for x in self.flattened_inputs if isinstance(x, torch.Tensor)]
+ )
+ self.inputs_schema = None
+ else:
+ for input in self.flattened_inputs:
+ if not isinstance(input, torch.Tensor):
+ raise ValueError(
+ "Inputs for tracing must only contain tensors. "
+ f"Got a {type(input)} instead."
+ )
+
+ def forward(self, *args: torch.Tensor):
+ with torch.no_grad(), patch_builtin_len():
+ if self.inputs_schema is not None:
+ inputs_orig_format = self.inputs_schema(args)
+ else:
+ if len(args) != len(self.flattened_inputs) or any(
+ x is not y for x, y in zip(args, self.flattened_inputs)
+ ):
+ raise ValueError(
+ "TracingAdapter does not contain valid inputs_schema."
+ " So it cannot generalize to other inputs and must be"
+ " traced with `.flattened_inputs`."
+ )
+ inputs_orig_format = self.inputs
+
+ outputs = self.inference_func(self.model, *inputs_orig_format)
+ flattened_outputs, schema = flatten_to_tuple(outputs)
+
+ flattened_output_tensors = tuple(
+ [x for x in flattened_outputs if isinstance(x, torch.Tensor)]
+ )
+ if len(flattened_output_tensors) < len(flattened_outputs):
+ if self.allow_non_tensor:
+ flattened_outputs = flattened_output_tensors
+ self.outputs_schema = None
+ else:
+ raise ValueError(
+ "Model cannot be traced because some model outputs "
+ "cannot flatten to tensors."
+ )
+ else: # schema is valid
+ if self.outputs_schema is None:
+ self.outputs_schema = schema
+ else:
+ assert self.outputs_schema == schema, (
+ "Model should always return outputs with the same "
+ "structure so it can be traced!"
+ )
+ return flattened_outputs
+
+ def _create_wrapper(self, traced_model):
+ """
+ Return a function that has an input/output interface the same as the
+ original model, but it calls the given traced model under the hood.
+ """
+
+ def forward(*args):
+ flattened_inputs, _ = flatten_to_tuple(args)
+ flattened_outputs = traced_model(*flattened_inputs)
+ return self.outputs_schema(flattened_outputs)
+
+ return forward
diff --git a/detectron2/export/shared.py b/detectron2/export/shared.py
new file mode 100644
index 0000000000000000000000000000000000000000..fe5b790fa301b911d2e00e0fdd0c0a3d8e27cbf2
--- /dev/null
+++ b/detectron2/export/shared.py
@@ -0,0 +1,1039 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import collections
+import copy
+import functools
+import logging
+import numpy as np
+import os
+from typing import Any, Callable, Dict, List, Optional, Tuple, Union
+from unittest import mock
+import caffe2.python.utils as putils
+import torch
+import torch.nn.functional as F
+from caffe2.proto import caffe2_pb2
+from caffe2.python import core, net_drawer, workspace
+from torch.nn.functional import interpolate as interp
+
+logger = logging.getLogger(__name__)
+
+
+# ==== torch/utils_toffee/cast.py =======================================
+
+
+def to_device(t, device_str):
+ """
+ This function is a replacement of .to(another_device) such that it allows the
+ casting to be traced properly by explicitly calling the underlying copy ops.
+ It also avoids introducing unncessary op when casting to the same device.
+ """
+ src = t.device
+ dst = torch.device(device_str)
+
+ if src == dst:
+ return t
+ elif src.type == "cuda" and dst.type == "cpu":
+ return torch.ops._caffe2.CopyGPUToCPU(t)
+ elif src.type == "cpu" and dst.type == "cuda":
+ return torch.ops._caffe2.CopyCPUToGPU(t)
+ else:
+ raise RuntimeError("Can't cast tensor from device {} to device {}".format(src, dst))
+
+
+# ==== torch/utils_toffee/interpolate.py =======================================
+
+
+# Note: borrowed from vision/detection/fair/detectron/detectron/modeling/detector.py
+def BilinearInterpolation(tensor_in, up_scale):
+ assert up_scale % 2 == 0, "Scale should be even"
+
+ def upsample_filt(size):
+ factor = (size + 1) // 2
+ if size % 2 == 1:
+ center = factor - 1
+ else:
+ center = factor - 0.5
+
+ og = np.ogrid[:size, :size]
+ return (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)
+
+ kernel_size = int(up_scale) * 2
+ bil_filt = upsample_filt(kernel_size)
+
+ dim = int(tensor_in.shape[1])
+ kernel = np.zeros((dim, dim, kernel_size, kernel_size), dtype=np.float32)
+ kernel[range(dim), range(dim), :, :] = bil_filt
+
+ tensor_out = F.conv_transpose2d(
+ tensor_in,
+ weight=to_device(torch.Tensor(kernel), tensor_in.device),
+ bias=None,
+ stride=int(up_scale),
+ padding=int(up_scale / 2),
+ )
+
+ return tensor_out
+
+
+# NOTE: ONNX is incompatible with traced torch.nn.functional.interpolate if
+# using dynamic `scale_factor` rather than static `size`. (T43166860)
+# NOTE: Caffe2 Int8 conversion might not be able to quantize `size` properly.
+def onnx_compatibale_interpolate(
+ input, size=None, scale_factor=None, mode="nearest", align_corners=None
+):
+ # NOTE: The input dimensions are interpreted in the form:
+ # `mini-batch x channels x [optional depth] x [optional height] x width`.
+ if size is None and scale_factor is not None:
+ if input.dim() == 4:
+ if isinstance(scale_factor, (int, float)):
+ height_scale, width_scale = (scale_factor, scale_factor)
+ else:
+ assert isinstance(scale_factor, (tuple, list))
+ assert len(scale_factor) == 2
+ height_scale, width_scale = scale_factor
+
+ assert not align_corners, "No matching C2 op for align_corners == True"
+ if mode == "nearest":
+ return torch.ops._caffe2.ResizeNearest(
+ input, order="NCHW", width_scale=width_scale, height_scale=height_scale
+ )
+ elif mode == "bilinear":
+ logger.warning(
+ "Use F.conv_transpose2d for bilinear interpolate"
+ " because there's no such C2 op, this may cause significant"
+ " slowdown and the boundary pixels won't be as same as"
+ " using F.interpolate due to padding."
+ )
+ assert height_scale == width_scale
+ return BilinearInterpolation(input, up_scale=height_scale)
+ logger.warning("Output size is not static, it might cause ONNX conversion issue")
+
+ return interp(input, size, scale_factor, mode, align_corners)
+
+
+def mock_torch_nn_functional_interpolate():
+ def decorator(func):
+ @functools.wraps(func)
+ def _mock_torch_nn_functional_interpolate(*args, **kwargs):
+ if torch.onnx.is_in_onnx_export():
+ with mock.patch(
+ "torch.nn.functional.interpolate", side_effect=onnx_compatibale_interpolate
+ ):
+ return func(*args, **kwargs)
+ else:
+ return func(*args, **kwargs)
+
+ return _mock_torch_nn_functional_interpolate
+
+ return decorator
+
+
+# ==== torch/utils_caffe2/ws_utils.py ==========================================
+
+
+class ScopedWS:
+ def __init__(self, ws_name, is_reset, is_cleanup=False):
+ self.ws_name = ws_name
+ self.is_reset = is_reset
+ self.is_cleanup = is_cleanup
+ self.org_ws = ""
+
+ def __enter__(self):
+ self.org_ws = workspace.CurrentWorkspace()
+ if self.ws_name is not None:
+ workspace.SwitchWorkspace(self.ws_name, True)
+ if self.is_reset:
+ workspace.ResetWorkspace()
+
+ return workspace
+
+ def __exit__(self, *args):
+ if self.is_cleanup:
+ workspace.ResetWorkspace()
+ if self.ws_name is not None:
+ workspace.SwitchWorkspace(self.org_ws)
+
+
+def fetch_any_blob(name):
+ bb = None
+ try:
+ bb = workspace.FetchBlob(name)
+ except TypeError:
+ bb = workspace.FetchInt8Blob(name)
+ except Exception as e:
+ logger.error("Get blob {} error: {}".format(name, e))
+
+ return bb
+
+
+# ==== torch/utils_caffe2/protobuf.py ==========================================
+
+
+def get_pb_arg(pb, arg_name):
+ for x in pb.arg:
+ if x.name == arg_name:
+ return x
+ return None
+
+
+def get_pb_arg_valf(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return arg.f if arg is not None else default_val
+
+
+def get_pb_arg_floats(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return list(map(float, arg.floats)) if arg is not None else default_val
+
+
+def get_pb_arg_ints(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return list(map(int, arg.ints)) if arg is not None else default_val
+
+
+def get_pb_arg_vali(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return arg.i if arg is not None else default_val
+
+
+def get_pb_arg_vals(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return arg.s if arg is not None else default_val
+
+
+def get_pb_arg_valstrings(pb, arg_name, default_val):
+ arg = get_pb_arg(pb, arg_name)
+ return list(arg.strings) if arg is not None else default_val
+
+
+def check_set_pb_arg(pb, arg_name, arg_attr, arg_value, allow_override=False):
+ arg = get_pb_arg(pb, arg_name)
+ if arg is None:
+ arg = putils.MakeArgument(arg_name, arg_value)
+ assert hasattr(arg, arg_attr)
+ pb.arg.extend([arg])
+ if allow_override and getattr(arg, arg_attr) != arg_value:
+ logger.warning(
+ "Override argument {}: {} -> {}".format(arg_name, getattr(arg, arg_attr), arg_value)
+ )
+ setattr(arg, arg_attr, arg_value)
+ else:
+ assert arg is not None
+ assert getattr(arg, arg_attr) == arg_value, "Existing value {}, new value {}".format(
+ getattr(arg, arg_attr), arg_value
+ )
+
+
+def _create_const_fill_op_from_numpy(name, tensor, device_option=None):
+ assert type(tensor) == np.ndarray
+ kTypeNameMapper = {
+ np.dtype("float32"): "GivenTensorFill",
+ np.dtype("int32"): "GivenTensorIntFill",
+ np.dtype("int64"): "GivenTensorInt64Fill",
+ np.dtype("uint8"): "GivenTensorStringFill",
+ }
+
+ args_dict = {}
+ if tensor.dtype == np.dtype("uint8"):
+ args_dict.update({"values": [str(tensor.data)], "shape": [1]})
+ else:
+ args_dict.update({"values": tensor, "shape": tensor.shape})
+
+ if device_option is not None:
+ args_dict["device_option"] = device_option
+
+ return core.CreateOperator(kTypeNameMapper[tensor.dtype], [], [name], **args_dict)
+
+
+def _create_const_fill_op_from_c2_int8_tensor(name, int8_tensor):
+ assert type(int8_tensor) == workspace.Int8Tensor
+ kTypeNameMapper = {
+ np.dtype("int32"): "Int8GivenIntTensorFill",
+ np.dtype("uint8"): "Int8GivenTensorFill",
+ }
+
+ tensor = int8_tensor.data
+ assert tensor.dtype in [np.dtype("uint8"), np.dtype("int32")]
+ values = tensor.tobytes() if tensor.dtype == np.dtype("uint8") else tensor
+
+ return core.CreateOperator(
+ kTypeNameMapper[tensor.dtype],
+ [],
+ [name],
+ values=values,
+ shape=tensor.shape,
+ Y_scale=int8_tensor.scale,
+ Y_zero_point=int8_tensor.zero_point,
+ )
+
+
+def create_const_fill_op(
+ name: str,
+ blob: Union[np.ndarray, workspace.Int8Tensor],
+ device_option: Optional[caffe2_pb2.DeviceOption] = None,
+) -> caffe2_pb2.OperatorDef:
+ """
+ Given a blob object, return the Caffe2 operator that creates this blob
+ as constant. Currently support NumPy tensor and Caffe2 Int8Tensor.
+ """
+
+ tensor_type = type(blob)
+ assert tensor_type in [
+ np.ndarray,
+ workspace.Int8Tensor,
+ ], 'Error when creating const fill op for "{}", unsupported blob type: {}'.format(
+ name, type(blob)
+ )
+
+ if tensor_type == np.ndarray:
+ return _create_const_fill_op_from_numpy(name, blob, device_option)
+ elif tensor_type == workspace.Int8Tensor:
+ assert device_option is None
+ return _create_const_fill_op_from_c2_int8_tensor(name, blob)
+
+
+def construct_init_net_from_params(
+ params: Dict[str, Any], device_options: Optional[Dict[str, caffe2_pb2.DeviceOption]] = None
+) -> caffe2_pb2.NetDef:
+ """
+ Construct the init_net from params dictionary
+ """
+ init_net = caffe2_pb2.NetDef()
+ device_options = device_options or {}
+ for name, blob in params.items():
+ if isinstance(blob, str):
+ logger.warning(
+ (
+ "Blob {} with type {} is not supported in generating init net,"
+ " skipped.".format(name, type(blob))
+ )
+ )
+ continue
+ init_net.op.extend(
+ [create_const_fill_op(name, blob, device_option=device_options.get(name, None))]
+ )
+ init_net.external_output.append(name)
+ return init_net
+
+
+def get_producer_map(ssa):
+ """
+ Return dict from versioned blob to (i, j),
+ where i is index of producer op, j is the index of output of that op.
+ """
+ producer_map = {}
+ for i in range(len(ssa)):
+ outputs = ssa[i][1]
+ for j, outp in enumerate(outputs):
+ producer_map[outp] = (i, j)
+ return producer_map
+
+
+def get_consumer_map(ssa):
+ """
+ Return dict from versioned blob to list of (i, j),
+ where i is index of consumer op, j is the index of input of that op.
+ """
+ consumer_map = collections.defaultdict(list)
+ for i in range(len(ssa)):
+ inputs = ssa[i][0]
+ for j, inp in enumerate(inputs):
+ consumer_map[inp].append((i, j))
+ return consumer_map
+
+
+def get_params_from_init_net(
+ init_net: caffe2_pb2.NetDef,
+) -> [Dict[str, Any], Dict[str, caffe2_pb2.DeviceOption]]:
+ """
+ Take the output blobs from init_net by running it.
+ Outputs:
+ params: dict from blob name to numpy array
+ device_options: dict from blob name to the device option of its creating op
+ """
+ # NOTE: this assumes that the params is determined by producer op with the
+ # only exception be CopyGPUToCPU which is CUDA op but returns CPU tensor.
+ def _get_device_option(producer_op):
+ if producer_op.type == "CopyGPUToCPU":
+ return caffe2_pb2.DeviceOption()
+ else:
+ return producer_op.device_option
+
+ with ScopedWS("__get_params_from_init_net__", is_reset=True, is_cleanup=True) as ws:
+ ws.RunNetOnce(init_net)
+ params = {b: fetch_any_blob(b) for b in init_net.external_output}
+ ssa, versions = core.get_ssa(init_net)
+ producer_map = get_producer_map(ssa)
+ device_options = {
+ b: _get_device_option(init_net.op[producer_map[(b, versions[b])][0]])
+ for b in init_net.external_output
+ }
+ return params, device_options
+
+
+def _updater_raise(op, input_types, output_types):
+ raise RuntimeError(
+ "Failed to apply updater for op {} given input_types {} and"
+ " output_types {}".format(op, input_types, output_types)
+ )
+
+
+def _generic_status_identifier(
+ predict_net: caffe2_pb2.NetDef,
+ status_updater: Callable,
+ known_status: Dict[Tuple[str, int], Any],
+) -> Dict[Tuple[str, int], Any]:
+ """
+ Statically infer the status of each blob, the status can be such as device type
+ (CPU/GPU), layout (NCHW/NHWC), data type (float32/int8), etc. "Blob" here
+ is versioned blob (Tuple[str, int]) in the format compatible with ssa.
+ Inputs:
+ predict_net: the caffe2 network
+ status_updater: a callable, given an op and the status of its input/output,
+ it returns the updated status of input/output. `None` is used for
+ representing unknown status.
+ known_status: a dict containing known status, used as initialization.
+ Outputs:
+ A dict mapping from versioned blob to its status
+ """
+ ssa, versions = core.get_ssa(predict_net)
+ versioned_ext_input = [(b, 0) for b in predict_net.external_input]
+ versioned_ext_output = [(b, versions[b]) for b in predict_net.external_output]
+ all_versioned_blobs = set().union(*[set(x[0] + x[1]) for x in ssa])
+
+ allowed_vbs = all_versioned_blobs.union(versioned_ext_input).union(versioned_ext_output)
+ assert all(k in allowed_vbs for k in known_status)
+ assert all(v is not None for v in known_status.values())
+ _known_status = copy.deepcopy(known_status)
+
+ def _check_and_update(key, value):
+ assert value is not None
+ if key in _known_status:
+ if not _known_status[key] == value:
+ raise RuntimeError(
+ "Confilict status for {}, existing status {}, new status {}".format(
+ key, _known_status[key], value
+ )
+ )
+ _known_status[key] = value
+
+ def _update_i(op, ssa_i):
+ versioned_inputs = ssa_i[0]
+ versioned_outputs = ssa_i[1]
+
+ inputs_status = [_known_status.get(b, None) for b in versioned_inputs]
+ outputs_status = [_known_status.get(b, None) for b in versioned_outputs]
+
+ new_inputs_status, new_outputs_status = status_updater(op, inputs_status, outputs_status)
+
+ for versioned_blob, status in zip(
+ versioned_inputs + versioned_outputs, new_inputs_status + new_outputs_status
+ ):
+ if status is not None:
+ _check_and_update(versioned_blob, status)
+
+ for op, ssa_i in zip(predict_net.op, ssa):
+ _update_i(op, ssa_i)
+ for op, ssa_i in zip(reversed(predict_net.op), reversed(ssa)):
+ _update_i(op, ssa_i)
+
+ # NOTE: This strictly checks all the blob from predict_net must be assgined
+ # a known status. However sometimes it's impossible (eg. having deadend op),
+ # we may relax this constraint if
+ for k in all_versioned_blobs:
+ if k not in _known_status:
+ raise NotImplementedError(
+ "Can not infer the status for {}. Currently only support the case where"
+ " a single forward and backward pass can identify status for all blobs.".format(k)
+ )
+
+ return _known_status
+
+
+def infer_device_type(
+ predict_net: caffe2_pb2.NetDef,
+ known_status: Dict[Tuple[str, int], Any],
+ device_name_style: str = "caffe2",
+) -> Dict[Tuple[str, int], str]:
+ """Return the device type ("cpu" or "gpu"/"cuda") of each (versioned) blob"""
+
+ assert device_name_style in ["caffe2", "pytorch"]
+ _CPU_STR = "cpu"
+ _GPU_STR = "gpu" if device_name_style == "caffe2" else "cuda"
+
+ def _copy_cpu_to_gpu_updater(op, input_types, output_types):
+ if input_types[0] == _GPU_STR or output_types[0] == _CPU_STR:
+ _updater_raise(op, input_types, output_types)
+ return ([_CPU_STR], [_GPU_STR])
+
+ def _copy_gpu_to_cpu_updater(op, input_types, output_types):
+ if input_types[0] == _CPU_STR or output_types[0] == _GPU_STR:
+ _updater_raise(op, input_types, output_types)
+ return ([_GPU_STR], [_CPU_STR])
+
+ def _other_ops_updater(op, input_types, output_types):
+ non_none_types = [x for x in input_types + output_types if x is not None]
+ if len(non_none_types) > 0:
+ the_type = non_none_types[0]
+ if not all(x == the_type for x in non_none_types):
+ _updater_raise(op, input_types, output_types)
+ else:
+ the_type = None
+ return ([the_type for _ in op.input], [the_type for _ in op.output])
+
+ def _device_updater(op, *args, **kwargs):
+ return {
+ "CopyCPUToGPU": _copy_cpu_to_gpu_updater,
+ "CopyGPUToCPU": _copy_gpu_to_cpu_updater,
+ }.get(op.type, _other_ops_updater)(op, *args, **kwargs)
+
+ return _generic_status_identifier(predict_net, _device_updater, known_status)
+
+
+# ==== torch/utils_caffe2/vis.py ===============================================
+
+
+def _modify_blob_names(ops, blob_rename_f):
+ ret = []
+
+ def _replace_list(blob_list, replaced_list):
+ del blob_list[:]
+ blob_list.extend(replaced_list)
+
+ for x in ops:
+ cur = copy.deepcopy(x)
+ _replace_list(cur.input, list(map(blob_rename_f, cur.input)))
+ _replace_list(cur.output, list(map(blob_rename_f, cur.output)))
+ ret.append(cur)
+
+ return ret
+
+
+def _rename_blob(name, blob_sizes, blob_ranges):
+ def _list_to_str(bsize):
+ ret = ", ".join([str(x) for x in bsize])
+ ret = "[" + ret + "]"
+ return ret
+
+ ret = name
+ if blob_sizes is not None and name in blob_sizes:
+ ret += "\n" + _list_to_str(blob_sizes[name])
+ if blob_ranges is not None and name in blob_ranges:
+ ret += "\n" + _list_to_str(blob_ranges[name])
+
+ return ret
+
+
+# graph_name could not contain word 'graph'
+def save_graph(net, file_name, graph_name="net", op_only=True, blob_sizes=None, blob_ranges=None):
+ blob_rename_f = functools.partial(_rename_blob, blob_sizes=blob_sizes, blob_ranges=blob_ranges)
+ return save_graph_base(net, file_name, graph_name, op_only, blob_rename_f)
+
+
+def save_graph_base(net, file_name, graph_name="net", op_only=True, blob_rename_func=None):
+ graph = None
+ ops = net.op
+ if blob_rename_func is not None:
+ ops = _modify_blob_names(ops, blob_rename_func)
+ if not op_only:
+ graph = net_drawer.GetPydotGraph(ops, graph_name, rankdir="TB")
+ else:
+ graph = net_drawer.GetPydotGraphMinimal(
+ ops, graph_name, rankdir="TB", minimal_dependency=True
+ )
+
+ try:
+ par_dir = os.path.dirname(file_name)
+ if not os.path.exists(par_dir):
+ os.makedirs(par_dir)
+
+ format = os.path.splitext(os.path.basename(file_name))[-1]
+ if format == ".png":
+ graph.write_png(file_name)
+ elif format == ".pdf":
+ graph.write_pdf(file_name)
+ elif format == ".svg":
+ graph.write_svg(file_name)
+ else:
+ print("Incorrect format {}".format(format))
+ except Exception as e:
+ print("Error when writing graph to image {}".format(e))
+
+ return graph
+
+
+# ==== torch/utils_toffee/aten_to_caffe2.py ====================================
+
+
+def group_norm_replace_aten_with_caffe2(predict_net: caffe2_pb2.NetDef):
+ """
+ For ONNX exported model, GroupNorm will be represented as ATen op,
+ this can be a drop in replacement from ATen to GroupNorm
+ """
+ count = 0
+ for op in predict_net.op:
+ if op.type == "ATen":
+ op_name = get_pb_arg_vals(op, "operator", None) # return byte in py3
+ if op_name and op_name.decode() == "group_norm":
+ op.arg.remove(get_pb_arg(op, "operator"))
+
+ if get_pb_arg_vali(op, "cudnn_enabled", None):
+ op.arg.remove(get_pb_arg(op, "cudnn_enabled"))
+
+ num_groups = get_pb_arg_vali(op, "num_groups", None)
+ if num_groups is not None:
+ op.arg.remove(get_pb_arg(op, "num_groups"))
+ check_set_pb_arg(op, "group", "i", num_groups)
+
+ op.type = "GroupNorm"
+ count += 1
+ if count > 1:
+ logger.info("Replaced {} ATen operator to GroupNormOp".format(count))
+
+
+# ==== torch/utils_toffee/alias.py =============================================
+
+
+def alias(x, name, is_backward=False):
+ if not torch.onnx.is_in_onnx_export():
+ return x
+ assert isinstance(x, torch.Tensor)
+ return torch.ops._caffe2.AliasWithName(x, name, is_backward=is_backward)
+
+
+def fuse_alias_placeholder(predict_net, init_net):
+ """Remove AliasWithName placeholder and rename the input/output of it"""
+ # First we finish all the re-naming
+ for i, op in enumerate(predict_net.op):
+ if op.type == "AliasWithName":
+ assert len(op.input) == 1
+ assert len(op.output) == 1
+ name = get_pb_arg_vals(op, "name", None).decode()
+ is_backward = bool(get_pb_arg_vali(op, "is_backward", 0))
+ rename_op_input(predict_net, init_net, i, 0, name, from_producer=is_backward)
+ rename_op_output(predict_net, i, 0, name)
+
+ # Remove AliasWithName, should be very safe since it's a non-op
+ new_ops = []
+ for op in predict_net.op:
+ if op.type != "AliasWithName":
+ new_ops.append(op)
+ else:
+ # safety check
+ assert op.input == op.output
+ assert op.input[0] == op.arg[0].s.decode()
+ del predict_net.op[:]
+ predict_net.op.extend(new_ops)
+
+
+# ==== torch/utils_caffe2/graph_transform.py ===================================
+
+
+class IllegalGraphTransformError(ValueError):
+ """When a graph transform function call can't be executed."""
+
+
+def _rename_versioned_blob_in_proto(
+ proto: caffe2_pb2.NetDef,
+ old_name: str,
+ new_name: str,
+ version: int,
+ ssa: List[Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]],
+ start_versions: Dict[str, int],
+ end_versions: Dict[str, int],
+):
+ """In given proto, rename all blobs with matched version"""
+ # Operater list
+ for op, i_th_ssa in zip(proto.op, ssa):
+ versioned_inputs, versioned_outputs = i_th_ssa
+ for i in range(len(op.input)):
+ if versioned_inputs[i] == (old_name, version):
+ op.input[i] = new_name
+ for i in range(len(op.output)):
+ if versioned_outputs[i] == (old_name, version):
+ op.output[i] = new_name
+ # external_input
+ if start_versions.get(old_name, 0) == version:
+ for i in range(len(proto.external_input)):
+ if proto.external_input[i] == old_name:
+ proto.external_input[i] = new_name
+ # external_output
+ if end_versions.get(old_name, 0) == version:
+ for i in range(len(proto.external_output)):
+ if proto.external_output[i] == old_name:
+ proto.external_output[i] = new_name
+
+
+def rename_op_input(
+ predict_net: caffe2_pb2.NetDef,
+ init_net: caffe2_pb2.NetDef,
+ op_id: int,
+ input_id: int,
+ new_name: str,
+ from_producer: bool = False,
+):
+ """
+ Rename the op_id-th operator in predict_net, change it's input_id-th input's
+ name to the new_name. It also does automatic re-route and change
+ external_input and init_net if necessary.
+ - It requires the input is only consumed by this op.
+ - This function modifies predict_net and init_net in-place.
+ - When from_producer is enable, this also updates other operators that consumes
+ the same input. Be cautious because may trigger unintended behavior.
+ """
+ assert isinstance(predict_net, caffe2_pb2.NetDef)
+ assert isinstance(init_net, caffe2_pb2.NetDef)
+
+ init_net_ssa, init_net_versions = core.get_ssa(init_net)
+ predict_net_ssa, predict_net_versions = core.get_ssa(
+ predict_net, copy.deepcopy(init_net_versions)
+ )
+
+ versioned_inputs, versioned_outputs = predict_net_ssa[op_id]
+ old_name, version = versioned_inputs[input_id]
+
+ if from_producer:
+ producer_map = get_producer_map(predict_net_ssa)
+ if not (old_name, version) in producer_map:
+ raise NotImplementedError(
+ "Can't find producer, the input {} is probably from"
+ " init_net, this is not supported yet.".format(old_name)
+ )
+ producer = producer_map[(old_name, version)]
+ rename_op_output(predict_net, producer[0], producer[1], new_name)
+ return
+
+ def contain_targets(op_ssa):
+ return (old_name, version) in op_ssa[0]
+
+ is_consumer = [contain_targets(op_ssa) for op_ssa in predict_net_ssa]
+ if sum(is_consumer) > 1:
+ raise IllegalGraphTransformError(
+ (
+ "Input '{}' of operator(#{}) are consumed by other ops, please use"
+ + " rename_op_output on the producer instead. Offending op: \n{}"
+ ).format(old_name, op_id, predict_net.op[op_id])
+ )
+
+ # update init_net
+ _rename_versioned_blob_in_proto(
+ init_net, old_name, new_name, version, init_net_ssa, {}, init_net_versions
+ )
+ # update predict_net
+ _rename_versioned_blob_in_proto(
+ predict_net,
+ old_name,
+ new_name,
+ version,
+ predict_net_ssa,
+ init_net_versions,
+ predict_net_versions,
+ )
+
+
+def rename_op_output(predict_net: caffe2_pb2.NetDef, op_id: int, output_id: int, new_name: str):
+ """
+ Rename the op_id-th operator in predict_net, change it's output_id-th input's
+ name to the new_name. It also does automatic re-route and change
+ external_output and if necessary.
+ - It allows multiple consumers of its output.
+ - This function modifies predict_net in-place, doesn't need init_net.
+ """
+ assert isinstance(predict_net, caffe2_pb2.NetDef)
+
+ ssa, blob_versions = core.get_ssa(predict_net)
+
+ versioned_inputs, versioned_outputs = ssa[op_id]
+ old_name, version = versioned_outputs[output_id]
+
+ # update predict_net
+ _rename_versioned_blob_in_proto(
+ predict_net, old_name, new_name, version, ssa, {}, blob_versions
+ )
+
+
+def get_sub_graph_external_input_output(
+ predict_net: caffe2_pb2.NetDef, sub_graph_op_indices: List[int]
+) -> Tuple[List[Tuple[str, int]], List[Tuple[str, int]]]:
+ """
+ Return the list of external input/output of sub-graph,
+ each element is tuple of the name and corresponding version in predict_net.
+
+ external input/output is defined the same way as caffe2 NetDef.
+ """
+ ssa, versions = core.get_ssa(predict_net)
+
+ all_inputs = []
+ all_outputs = []
+ for op_id in sub_graph_op_indices:
+ all_inputs += [inp for inp in ssa[op_id][0] if inp not in all_inputs]
+ all_outputs += list(ssa[op_id][1]) # ssa output won't repeat
+
+ # for versioned blobs, external inputs are just those blob in all_inputs
+ # but not in all_outputs
+ ext_inputs = [inp for inp in all_inputs if inp not in all_outputs]
+
+ # external outputs are essentially outputs of this subgraph that are used
+ # outside of this sub-graph (including predict_net.external_output)
+ all_other_inputs = sum(
+ (ssa[i][0] for i in range(len(ssa)) if i not in sub_graph_op_indices),
+ [(outp, versions[outp]) for outp in predict_net.external_output],
+ )
+ ext_outputs = [outp for outp in all_outputs if outp in set(all_other_inputs)]
+
+ return ext_inputs, ext_outputs
+
+
+class DiGraph:
+ """A DAG representation of caffe2 graph, each vertice is a versioned blob."""
+
+ def __init__(self):
+ self.vertices = set()
+ self.graph = collections.defaultdict(list)
+
+ def add_edge(self, u, v):
+ self.graph[u].append(v)
+ self.vertices.add(u)
+ self.vertices.add(v)
+
+ # grab from https://www.geeksforgeeks.org/find-paths-given-source-destination/
+ def get_all_paths(self, s, d):
+ visited = {k: False for k in self.vertices}
+ path = []
+ all_paths = []
+
+ def _get_all_paths_util(graph, u, d, visited, path):
+ visited[u] = True
+ path.append(u)
+ if u == d:
+ all_paths.append(copy.deepcopy(path))
+ else:
+ for i in graph[u]:
+ if not visited[i]:
+ _get_all_paths_util(graph, i, d, visited, path)
+ path.pop()
+ visited[u] = False
+
+ _get_all_paths_util(self.graph, s, d, visited, path)
+ return all_paths
+
+ @staticmethod
+ def from_ssa(ssa):
+ graph = DiGraph()
+ for op_id in range(len(ssa)):
+ for inp in ssa[op_id][0]:
+ for outp in ssa[op_id][1]:
+ graph.add_edge(inp, outp)
+ return graph
+
+
+def _get_dependency_chain(ssa, versioned_target, versioned_source):
+ """
+ Return the index list of relevant operator to produce target blob from source blob,
+ if there's no dependency, return empty list.
+ """
+
+ # finding all paths between nodes can be O(N!), thus we can only search
+ # in the subgraph using the op starting from the first consumer of source blob
+ # to the producer of the target blob.
+ consumer_map = get_consumer_map(ssa)
+ producer_map = get_producer_map(ssa)
+ start_op = min(x[0] for x in consumer_map[versioned_source]) - 15
+ end_op = (
+ producer_map[versioned_target][0] + 15 if versioned_target in producer_map else start_op
+ )
+ sub_graph_ssa = ssa[start_op : end_op + 1]
+ if len(sub_graph_ssa) > 30:
+ logger.warning(
+ "Subgraph bebetween {} and {} is large (from op#{} to op#{}), it"
+ " might take non-trival time to find all paths between them.".format(
+ versioned_source, versioned_target, start_op, end_op
+ )
+ )
+
+ dag = DiGraph.from_ssa(sub_graph_ssa)
+ paths = dag.get_all_paths(versioned_source, versioned_target) # include two ends
+ ops_in_paths = [[producer_map[blob][0] for blob in path[1:]] for path in paths]
+ return sorted(set().union(*[set(ops) for ops in ops_in_paths]))
+
+
+def identify_reshape_sub_graph(predict_net: caffe2_pb2.NetDef) -> List[List[int]]:
+ """
+ Idenfity the reshape sub-graph in a protobuf.
+ The reshape sub-graph is defined as matching the following pattern:
+
+ (input_blob) -> Op_1 -> ... -> Op_N -> (new_shape) -─┐
+ └-------------------------------------------> Reshape -> (output_blob)
+
+ Return:
+ List of sub-graphs, each sub-graph is represented as a list of indices
+ of the relavent ops, [Op_1, Op_2, ..., Op_N, Reshape]
+ """
+
+ ssa, _ = core.get_ssa(predict_net)
+
+ ret = []
+ for i, op in enumerate(predict_net.op):
+ if op.type == "Reshape":
+ assert len(op.input) == 2
+ input_ssa = ssa[i][0]
+ data_source = input_ssa[0]
+ shape_source = input_ssa[1]
+ op_indices = _get_dependency_chain(ssa, shape_source, data_source)
+ ret.append(op_indices + [i])
+ return ret
+
+
+def remove_reshape_for_fc(predict_net, params):
+ """
+ In PyTorch nn.Linear has to take 2D tensor, this often leads to reshape
+ a 4D tensor to 2D by calling .view(). However this (dynamic) reshaping
+ doesn't work well with ONNX and Int8 tools, and cause using extra
+ ops (eg. ExpandDims) that might not be available on mobile.
+ Luckily Caffe2 supports 4D tensor for FC, so we can remove those reshape
+ after exporting ONNX model.
+ """
+ from caffe2.python import core
+
+ # find all reshape sub-graph that can be removed, which is now all Reshape
+ # sub-graph whose output is only consumed by FC.
+ # TODO: to make it safer, we may need the actually value to better determine
+ # if a Reshape before FC is removable.
+ reshape_sub_graphs = identify_reshape_sub_graph(predict_net)
+ sub_graphs_to_remove = []
+ for reshape_sub_graph in reshape_sub_graphs:
+ reshape_op_id = reshape_sub_graph[-1]
+ assert predict_net.op[reshape_op_id].type == "Reshape"
+ ssa, _ = core.get_ssa(predict_net)
+ reshape_output = ssa[reshape_op_id][1][0]
+ consumers = [i for i in range(len(ssa)) if reshape_output in ssa[i][0]]
+ if all(predict_net.op[consumer].type == "FC" for consumer in consumers):
+ # safety check if the sub-graph is isolated, for this reshape sub-graph,
+ # it means it has one non-param external input and one external output.
+ ext_inputs, ext_outputs = get_sub_graph_external_input_output(
+ predict_net, reshape_sub_graph
+ )
+ non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
+ if len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1:
+ sub_graphs_to_remove.append(reshape_sub_graph)
+
+ # perform removing subgraph by:
+ # 1: rename the Reshape's output to its input, then the graph can be
+ # seen as in-place itentify, meaning whose external input/output are the same.
+ # 2: simply remove those ops.
+ remove_op_ids = []
+ params_to_remove = []
+ for sub_graph in sub_graphs_to_remove:
+ logger.info(
+ "Remove Reshape sub-graph:\n{}".format(
+ "".join(["(#{:>4})\n{}".format(i, predict_net.op[i]) for i in sub_graph])
+ )
+ )
+ reshape_op_id = sub_graph[-1]
+ new_reshap_output = predict_net.op[reshape_op_id].input[0]
+ rename_op_output(predict_net, reshape_op_id, 0, new_reshap_output)
+ ext_inputs, ext_outputs = get_sub_graph_external_input_output(predict_net, sub_graph)
+ non_params_ext_inputs = [inp for inp in ext_inputs if inp[1] != 0]
+ params_ext_inputs = [inp for inp in ext_inputs if inp[1] == 0]
+ assert len(non_params_ext_inputs) == 1 and len(ext_outputs) == 1
+ assert ext_outputs[0][0] == non_params_ext_inputs[0][0]
+ assert ext_outputs[0][1] == non_params_ext_inputs[0][1] + 1
+ remove_op_ids.extend(sub_graph)
+ params_to_remove.extend(params_ext_inputs)
+
+ predict_net = copy.deepcopy(predict_net)
+ new_ops = [op for i, op in enumerate(predict_net.op) if i not in remove_op_ids]
+ del predict_net.op[:]
+ predict_net.op.extend(new_ops)
+ for versioned_params in params_to_remove:
+ name = versioned_params[0]
+ logger.info("Remove params: {} from init_net and predict_net.external_input".format(name))
+ del params[name]
+ predict_net.external_input.remove(name)
+
+ return predict_net, params
+
+
+def fuse_copy_between_cpu_and_gpu(predict_net: caffe2_pb2.NetDef):
+ """
+ In-place fuse extra copy ops between cpu/gpu for the following case:
+ a -CopyAToB-> b -CopyBToA> c1 -NextOp1-> d1
+ -CopyBToA> c2 -NextOp2-> d2
+ The fused network will look like:
+ a -NextOp1-> d1
+ -NextOp2-> d2
+ """
+
+ _COPY_OPS = ["CopyCPUToGPU", "CopyGPUToCPU"]
+
+ def _fuse_once(predict_net):
+ ssa, blob_versions = core.get_ssa(predict_net)
+ consumer_map = get_consumer_map(ssa)
+ versioned_external_output = [
+ (name, blob_versions[name]) for name in predict_net.external_output
+ ]
+
+ for op_id, op in enumerate(predict_net.op):
+ if op.type in _COPY_OPS:
+ fw_copy_versioned_output = ssa[op_id][1][0]
+ consumer_ids = [x[0] for x in consumer_map[fw_copy_versioned_output]]
+ reverse_op_type = _COPY_OPS[1 - _COPY_OPS.index(op.type)]
+
+ is_fusable = (
+ len(consumer_ids) > 0
+ and fw_copy_versioned_output not in versioned_external_output
+ and all(
+ predict_net.op[_op_id].type == reverse_op_type
+ and ssa[_op_id][1][0] not in versioned_external_output
+ for _op_id in consumer_ids
+ )
+ )
+
+ if is_fusable:
+ for rv_copy_op_id in consumer_ids:
+ # making each NextOp uses "a" directly and removing Copy ops
+ rs_copy_versioned_output = ssa[rv_copy_op_id][1][0]
+ next_op_id, inp_id = consumer_map[rs_copy_versioned_output][0]
+ predict_net.op[next_op_id].input[inp_id] = op.input[0]
+ # remove CopyOps
+ new_ops = [
+ op
+ for i, op in enumerate(predict_net.op)
+ if i != op_id and i not in consumer_ids
+ ]
+ del predict_net.op[:]
+ predict_net.op.extend(new_ops)
+ return True
+
+ return False
+
+ # _fuse_once returns False is nothing can be fused
+ while _fuse_once(predict_net):
+ pass
+
+
+def remove_dead_end_ops(net_def: caffe2_pb2.NetDef):
+ """remove ops if its output is not used or not in external_output"""
+ ssa, versions = core.get_ssa(net_def)
+ versioned_external_output = [(name, versions[name]) for name in net_def.external_output]
+ consumer_map = get_consumer_map(ssa)
+ removed_op_ids = set()
+
+ def _is_dead_end(versioned_blob):
+ return not (
+ versioned_blob in versioned_external_output
+ or (
+ len(consumer_map[versioned_blob]) > 0
+ and all(x[0] not in removed_op_ids for x in consumer_map[versioned_blob])
+ )
+ )
+
+ for i, ssa_i in reversed(list(enumerate(ssa))):
+ versioned_outputs = ssa_i[1]
+ if all(_is_dead_end(outp) for outp in versioned_outputs):
+ removed_op_ids.add(i)
+
+ # simply removing those deadend ops should have no effect to external_output
+ new_ops = [op for i, op in enumerate(net_def.op) if i not in removed_op_ids]
+ del net_def.op[:]
+ net_def.op.extend(new_ops)
diff --git a/detectron2/export/torchscript.py b/detectron2/export/torchscript.py
new file mode 100644
index 0000000000000000000000000000000000000000..24fe59bda44225324928542df3f2ef1745375dfd
--- /dev/null
+++ b/detectron2/export/torchscript.py
@@ -0,0 +1,132 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import os
+import torch
+
+from detectron2.utils.file_io import PathManager
+
+from .torchscript_patch import freeze_training_mode, patch_instances
+
+__all__ = ["scripting_with_instances", "dump_torchscript_IR"]
+
+
+def scripting_with_instances(model, fields):
+ """
+ Run :func:`torch.jit.script` on a model that uses the :class:`Instances` class. Since
+ attributes of :class:`Instances` are "dynamically" added in eager mode,it is difficult
+ for scripting to support it out of the box. This function is made to support scripting
+ a model that uses :class:`Instances`. It does the following:
+
+ 1. Create a scriptable ``new_Instances`` class which behaves similarly to ``Instances``,
+ but with all attributes been "static".
+ The attributes need to be statically declared in the ``fields`` argument.
+ 2. Register ``new_Instances``, and force scripting compiler to
+ use it when trying to compile ``Instances``.
+
+ After this function, the process will be reverted. User should be able to script another model
+ using different fields.
+
+ Example:
+ Assume that ``Instances`` in the model consist of two attributes named
+ ``proposal_boxes`` and ``objectness_logits`` with type :class:`Boxes` and
+ :class:`Tensor` respectively during inference. You can call this function like:
+ ::
+ fields = {"proposal_boxes": Boxes, "objectness_logits": torch.Tensor}
+ torchscipt_model = scripting_with_instances(model, fields)
+
+ Note:
+ It only support models in evaluation mode.
+
+ Args:
+ model (nn.Module): The input model to be exported by scripting.
+ fields (Dict[str, type]): Attribute names and corresponding type that
+ ``Instances`` will use in the model. Note that all attributes used in ``Instances``
+ need to be added, regardless of whether they are inputs/outputs of the model.
+ Data type not defined in detectron2 is not supported for now.
+
+ Returns:
+ torch.jit.ScriptModule: the model in torchscript format
+ """
+ assert (
+ not model.training
+ ), "Currently we only support exporting models in evaluation mode to torchscript"
+
+ with freeze_training_mode(model), patch_instances(fields):
+ scripted_model = torch.jit.script(model)
+ return scripted_model
+
+
+# alias for old name
+export_torchscript_with_instances = scripting_with_instances
+
+
+def dump_torchscript_IR(model, dir):
+ """
+ Dump IR of a TracedModule/ScriptModule/Function in various format (code, graph,
+ inlined graph). Useful for debugging.
+
+ Args:
+ model (TracedModule/ScriptModule/ScriptFUnction): traced or scripted module
+ dir (str): output directory to dump files.
+ """
+ dir = os.path.expanduser(dir)
+ PathManager.mkdirs(dir)
+
+ def _get_script_mod(mod):
+ if isinstance(mod, torch.jit.TracedModule):
+ return mod._actual_script_module
+ return mod
+
+ # Dump pretty-printed code: https://pytorch.org/docs/stable/jit.html#inspecting-code
+ with PathManager.open(os.path.join(dir, "model_ts_code.txt"), "w") as f:
+
+ def get_code(mod):
+ # Try a few ways to get code using private attributes.
+ try:
+ # This contains more information than just `mod.code`
+ return _get_script_mod(mod)._c.code
+ except AttributeError:
+ pass
+ try:
+ return mod.code
+ except AttributeError:
+ return None
+
+ def dump_code(prefix, mod):
+ code = get_code(mod)
+ name = prefix or "root model"
+ if code is None:
+ f.write(f"Could not found code for {name} (type={mod.original_name})\n")
+ f.write("\n")
+ else:
+ f.write(f"\nCode for {name}, type={mod.original_name}:\n")
+ f.write(code)
+ f.write("\n")
+ f.write("-" * 80)
+
+ for name, m in mod.named_children():
+ dump_code(prefix + "." + name, m)
+
+ if isinstance(model, torch.jit.ScriptFunction):
+ f.write(get_code(model))
+ else:
+ dump_code("", model)
+
+ def _get_graph(model):
+ try:
+ # Recursively dump IR of all modules
+ return _get_script_mod(model)._c.dump_to_str(True, False, False)
+ except AttributeError:
+ return model.graph.str()
+
+ with PathManager.open(os.path.join(dir, "model_ts_IR.txt"), "w") as f:
+ f.write(_get_graph(model))
+
+ # Dump IR of the entire graph (all submodules inlined)
+ with PathManager.open(os.path.join(dir, "model_ts_IR_inlined.txt"), "w") as f:
+ f.write(str(model.inlined_graph))
+
+ if not isinstance(model, torch.jit.ScriptFunction):
+ # Dump the model structure in pytorch style
+ with PathManager.open(os.path.join(dir, "model.txt"), "w") as f:
+ f.write(str(model))
diff --git a/detectron2/export/torchscript_patch.py b/detectron2/export/torchscript_patch.py
new file mode 100644
index 0000000000000000000000000000000000000000..da9b324f1582e31d1a16d2fe462ac2989bea56ea
--- /dev/null
+++ b/detectron2/export/torchscript_patch.py
@@ -0,0 +1,406 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import os
+import sys
+import tempfile
+from contextlib import ExitStack, contextmanager
+from copy import deepcopy
+from unittest import mock
+import torch
+from torch import nn
+
+# need some explicit imports due to https://github.com/pytorch/pytorch/issues/38964
+import detectron2 # noqa F401
+from detectron2.structures import Boxes, Instances
+from detectron2.utils.env import _import_file
+
+_counter = 0
+
+
+def _clear_jit_cache():
+ from torch.jit._recursive import concrete_type_store
+ from torch.jit._state import _jit_caching_layer
+
+ concrete_type_store.type_store.clear() # for modules
+ _jit_caching_layer.clear() # for free functions
+
+
+def _add_instances_conversion_methods(newInstances):
+ """
+ Add from_instances methods to the scripted Instances class.
+ """
+ cls_name = newInstances.__name__
+
+ @torch.jit.unused
+ def from_instances(instances: Instances):
+ """
+ Create scripted Instances from original Instances
+ """
+ fields = instances.get_fields()
+ image_size = instances.image_size
+ ret = newInstances(image_size)
+ for name, val in fields.items():
+ assert hasattr(ret, f"_{name}"), f"No attribute named {name} in {cls_name}"
+ setattr(ret, name, deepcopy(val))
+ return ret
+
+ newInstances.from_instances = from_instances
+
+
+@contextmanager
+def patch_instances(fields):
+ """
+ A contextmanager, under which the Instances class in detectron2 is replaced
+ by a statically-typed scriptable class, defined by `fields`.
+ See more in `scripting_with_instances`.
+ """
+
+ with tempfile.TemporaryDirectory(prefix="detectron2") as dir, tempfile.NamedTemporaryFile(
+ mode="w", encoding="utf-8", suffix=".py", dir=dir, delete=False
+ ) as f:
+ try:
+ # Objects that use Instances should not reuse previously-compiled
+ # results in cache, because `Instances` could be a new class each time.
+ _clear_jit_cache()
+
+ cls_name, s = _gen_instance_module(fields)
+ f.write(s)
+ f.flush()
+ f.close()
+
+ module = _import(f.name)
+ new_instances = getattr(module, cls_name)
+ _ = torch.jit.script(new_instances)
+ # let torchscript think Instances was scripted already
+ Instances.__torch_script_class__ = True
+ # let torchscript find new_instances when looking for the jit type of Instances
+ Instances._jit_override_qualname = torch._jit_internal._qualified_name(new_instances)
+
+ _add_instances_conversion_methods(new_instances)
+ yield new_instances
+ finally:
+ try:
+ del Instances.__torch_script_class__
+ del Instances._jit_override_qualname
+ except AttributeError:
+ pass
+ sys.modules.pop(module.__name__)
+
+
+def _gen_instance_class(fields):
+ """
+ Args:
+ fields (dict[name: type])
+ """
+
+ class _FieldType:
+ def __init__(self, name, type_):
+ assert isinstance(name, str), f"Field name must be str, got {name}"
+ self.name = name
+ self.type_ = type_
+ self.annotation = f"{type_.__module__}.{type_.__name__}"
+
+ fields = [_FieldType(k, v) for k, v in fields.items()]
+
+ def indent(level, s):
+ return " " * 4 * level + s
+
+ lines = []
+
+ global _counter
+ _counter += 1
+
+ cls_name = "ScriptedInstances{}".format(_counter)
+
+ field_names = tuple(x.name for x in fields)
+ extra_args = ", ".join([f"{f.name}: Optional[{f.annotation}] = None" for f in fields])
+ lines.append(
+ f"""
+class {cls_name}:
+ def __init__(self, image_size: Tuple[int, int], {extra_args}):
+ self.image_size = image_size
+ self._field_names = {field_names}
+"""
+ )
+
+ for f in fields:
+ lines.append(
+ indent(2, f"self._{f.name} = torch.jit.annotate(Optional[{f.annotation}], {f.name})")
+ )
+
+ for f in fields:
+ lines.append(
+ f"""
+ @property
+ def {f.name}(self) -> {f.annotation}:
+ # has to use a local for type refinement
+ # https://pytorch.org/docs/stable/jit_language_reference.html#optional-type-refinement
+ t = self._{f.name}
+ assert t is not None, "{f.name} is None and cannot be accessed!"
+ return t
+
+ @{f.name}.setter
+ def {f.name}(self, value: {f.annotation}) -> None:
+ self._{f.name} = value
+"""
+ )
+
+ # support method `__len__`
+ lines.append(
+ """
+ def __len__(self) -> int:
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ return len(t)
+"""
+ )
+ lines.append(
+ """
+ raise NotImplementedError("Empty Instances does not support __len__!")
+"""
+ )
+
+ # support method `has`
+ lines.append(
+ """
+ def has(self, name: str) -> bool:
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ if name == "{f.name}":
+ return self._{f.name} is not None
+"""
+ )
+ lines.append(
+ """
+ return False
+"""
+ )
+
+ # support method `to`
+ none_args = ", None" * len(fields)
+ lines.append(
+ f"""
+ def to(self, device: torch.device) -> "{cls_name}":
+ ret = {cls_name}(self.image_size{none_args})
+"""
+ )
+ for f in fields:
+ if hasattr(f.type_, "to"):
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ ret._{f.name} = t.to(device)
+"""
+ )
+ else:
+ # For now, ignore fields that cannot be moved to devices.
+ # Maybe can support other tensor-like classes (e.g. __torch_function__)
+ pass
+ lines.append(
+ """
+ return ret
+"""
+ )
+
+ # support method `getitem`
+ none_args = ", None" * len(fields)
+ lines.append(
+ f"""
+ def __getitem__(self, item) -> "{cls_name}":
+ ret = {cls_name}(self.image_size{none_args})
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ ret._{f.name} = t[item]
+"""
+ )
+ lines.append(
+ """
+ return ret
+"""
+ )
+
+ # support method `cat`
+ # this version does not contain checks that all instances have same size and fields
+ none_args = ", None" * len(fields)
+ lines.append(
+ f"""
+ def cat(self, instances: List["{cls_name}"]) -> "{cls_name}":
+ ret = {cls_name}(self.image_size{none_args})
+"""
+ )
+ for f in fields:
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ values: List[{f.annotation}] = [x.{f.name} for x in instances]
+ if torch.jit.isinstance(t, torch.Tensor):
+ ret._{f.name} = torch.cat(values, dim=0)
+ else:
+ ret._{f.name} = t.cat(values)
+"""
+ )
+ lines.append(
+ """
+ return ret"""
+ )
+
+ # support method `get_fields()`
+ lines.append(
+ """
+ def get_fields(self) -> Dict[str, Tensor]:
+ ret = {}
+ """
+ )
+ for f in fields:
+ if f.type_ == Boxes:
+ stmt = "t.tensor"
+ elif f.type_ == torch.Tensor:
+ stmt = "t"
+ else:
+ stmt = f'assert False, "unsupported type {str(f.type_)}"'
+ lines.append(
+ f"""
+ t = self._{f.name}
+ if t is not None:
+ ret["{f.name}"] = {stmt}
+ """
+ )
+ lines.append(
+ """
+ return ret"""
+ )
+ return cls_name, os.linesep.join(lines)
+
+
+def _gen_instance_module(fields):
+ # TODO: find a more automatic way to enable import of other classes
+ s = """
+from copy import deepcopy
+import torch
+from torch import Tensor
+import typing
+from typing import *
+
+import detectron2
+from detectron2.structures import Boxes, Instances
+
+"""
+
+ cls_name, cls_def = _gen_instance_class(fields)
+ s += cls_def
+ return cls_name, s
+
+
+def _import(path):
+ return _import_file(
+ "{}{}".format(sys.modules[__name__].__name__, _counter), path, make_importable=True
+ )
+
+
+@contextmanager
+def patch_builtin_len(modules=()):
+ """
+ Patch the builtin len() function of a few detectron2 modules
+ to use __len__ instead, because __len__ does not convert values to
+ integers and therefore is friendly to tracing.
+
+ Args:
+ modules (list[stsr]): names of extra modules to patch len(), in
+ addition to those in detectron2.
+ """
+
+ def _new_len(obj):
+ return obj.__len__()
+
+ with ExitStack() as stack:
+ MODULES = [
+ "detectron2.modeling.roi_heads.fast_rcnn",
+ "detectron2.modeling.roi_heads.mask_head",
+ "detectron2.modeling.roi_heads.keypoint_head",
+ ] + list(modules)
+ ctxs = [stack.enter_context(mock.patch(mod + ".len")) for mod in MODULES]
+ for m in ctxs:
+ m.side_effect = _new_len
+ yield
+
+
+def patch_nonscriptable_classes():
+ """
+ Apply patches on a few nonscriptable detectron2 classes.
+ Should not have side-effects on eager usage.
+ """
+ # __prepare_scriptable__ can also be added to models for easier maintenance.
+ # But it complicates the clean model code.
+
+ from detectron2.modeling.backbone import ResNet, FPN
+
+ # Due to https://github.com/pytorch/pytorch/issues/36061,
+ # we change backbone to use ModuleList for scripting.
+ # (note: this changes param names in state_dict)
+
+ def prepare_resnet(self):
+ ret = deepcopy(self)
+ ret.stages = nn.ModuleList(ret.stages)
+ for k in self.stage_names:
+ delattr(ret, k)
+ return ret
+
+ ResNet.__prepare_scriptable__ = prepare_resnet
+
+ def prepare_fpn(self):
+ ret = deepcopy(self)
+ ret.lateral_convs = nn.ModuleList(ret.lateral_convs)
+ ret.output_convs = nn.ModuleList(ret.output_convs)
+ for name, _ in self.named_children():
+ if name.startswith("fpn_"):
+ delattr(ret, name)
+ return ret
+
+ FPN.__prepare_scriptable__ = prepare_fpn
+
+ # Annotate some attributes to be constants for the purpose of scripting,
+ # even though they are not constants in eager mode.
+ from detectron2.modeling.roi_heads import StandardROIHeads
+
+ if hasattr(StandardROIHeads, "__annotations__"):
+ # copy first to avoid editing annotations of base class
+ StandardROIHeads.__annotations__ = deepcopy(StandardROIHeads.__annotations__)
+ StandardROIHeads.__annotations__["mask_on"] = torch.jit.Final[bool]
+ StandardROIHeads.__annotations__["keypoint_on"] = torch.jit.Final[bool]
+
+
+# These patches are not supposed to have side-effects.
+patch_nonscriptable_classes()
+
+
+@contextmanager
+def freeze_training_mode(model):
+ """
+ A context manager that annotates the "training" attribute of every submodule
+ to constant, so that the training codepath in these modules can be
+ meta-compiled away. Upon exiting, the annotations are reverted.
+ """
+ classes = {type(x) for x in model.modules()}
+ # __constants__ is the old way to annotate constants and not compatible
+ # with __annotations__ .
+ classes = {x for x in classes if not hasattr(x, "__constants__")}
+ for cls in classes:
+ cls.__annotations__["training"] = torch.jit.Final[bool]
+ yield
+ for cls in classes:
+ cls.__annotations__["training"] = bool
diff --git a/detectron2/layers/__init__.py b/detectron2/layers/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..761a3d1c7afa049e9779ee9fc4d299e9aae38cad
--- /dev/null
+++ b/detectron2/layers/__init__.py
@@ -0,0 +1,26 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+from .batch_norm import FrozenBatchNorm2d, get_norm, NaiveSyncBatchNorm, CycleBatchNormList
+from .deform_conv import DeformConv, ModulatedDeformConv
+from .mask_ops import paste_masks_in_image
+from .nms import batched_nms, batched_nms_rotated, nms, nms_rotated
+from .roi_align import ROIAlign, roi_align
+from .roi_align_rotated import ROIAlignRotated, roi_align_rotated
+from .shape_spec import ShapeSpec
+from .wrappers import (
+ BatchNorm2d,
+ Conv2d,
+ ConvTranspose2d,
+ cat,
+ interpolate,
+ Linear,
+ nonzero_tuple,
+ cross_entropy,
+ empty_input_loss_func_wrapper,
+ shapes_to_tensor,
+ move_device_like,
+)
+from .blocks import CNNBlockBase, DepthwiseSeparableConv2d
+from .aspp import ASPP
+from .losses import ciou_loss, diou_loss
+
+__all__ = [k for k in globals().keys() if not k.startswith("_")]
diff --git a/detectron2/layers/aspp.py b/detectron2/layers/aspp.py
new file mode 100644
index 0000000000000000000000000000000000000000..14861aa9ede4fea6a69a49f189bcab997b558148
--- /dev/null
+++ b/detectron2/layers/aspp.py
@@ -0,0 +1,144 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+from copy import deepcopy
+import fvcore.nn.weight_init as weight_init
+import torch
+from torch import nn
+from torch.nn import functional as F
+
+from .batch_norm import get_norm
+from .blocks import DepthwiseSeparableConv2d
+from .wrappers import Conv2d
+
+
+class ASPP(nn.Module):
+ """
+ Atrous Spatial Pyramid Pooling (ASPP).
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ dilations,
+ *,
+ norm,
+ activation,
+ pool_kernel_size=None,
+ dropout: float = 0.0,
+ use_depthwise_separable_conv=False,
+ ):
+ """
+ Args:
+ in_channels (int): number of input channels for ASPP.
+ out_channels (int): number of output channels.
+ dilations (list): a list of 3 dilations in ASPP.
+ norm (str or callable): normalization for all conv layers.
+ See :func:`layers.get_norm` for supported format. norm is
+ applied to all conv layers except the conv following
+ global average pooling.
+ activation (callable): activation function.
+ pool_kernel_size (tuple, list): the average pooling size (kh, kw)
+ for image pooling layer in ASPP. If set to None, it always
+ performs global average pooling. If not None, it must be
+ divisible by the shape of inputs in forward(). It is recommended
+ to use a fixed input feature size in training, and set this
+ option to match this size, so that it performs global average
+ pooling in training, and the size of the pooling window stays
+ consistent in inference.
+ dropout (float): apply dropout on the output of ASPP. It is used in
+ the official DeepLab implementation with a rate of 0.1:
+ https://github.com/tensorflow/models/blob/21b73d22f3ed05b650e85ac50849408dd36de32e/research/deeplab/model.py#L532 # noqa
+ use_depthwise_separable_conv (bool): use DepthwiseSeparableConv2d
+ for 3x3 convs in ASPP, proposed in :paper:`DeepLabV3+`.
+ """
+ super(ASPP, self).__init__()
+ assert len(dilations) == 3, "ASPP expects 3 dilations, got {}".format(len(dilations))
+ self.pool_kernel_size = pool_kernel_size
+ self.dropout = dropout
+ use_bias = norm == ""
+ self.convs = nn.ModuleList()
+ # conv 1x1
+ self.convs.append(
+ Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ bias=use_bias,
+ norm=get_norm(norm, out_channels),
+ activation=deepcopy(activation),
+ )
+ )
+ weight_init.c2_xavier_fill(self.convs[-1])
+ # atrous convs
+ for dilation in dilations:
+ if use_depthwise_separable_conv:
+ self.convs.append(
+ DepthwiseSeparableConv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ padding=dilation,
+ dilation=dilation,
+ norm1=norm,
+ activation1=deepcopy(activation),
+ norm2=norm,
+ activation2=deepcopy(activation),
+ )
+ )
+ else:
+ self.convs.append(
+ Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ padding=dilation,
+ dilation=dilation,
+ bias=use_bias,
+ norm=get_norm(norm, out_channels),
+ activation=deepcopy(activation),
+ )
+ )
+ weight_init.c2_xavier_fill(self.convs[-1])
+ # image pooling
+ # We do not add BatchNorm because the spatial resolution is 1x1,
+ # the original TF implementation has BatchNorm.
+ if pool_kernel_size is None:
+ image_pooling = nn.Sequential(
+ nn.AdaptiveAvgPool2d(1),
+ Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
+ )
+ else:
+ image_pooling = nn.Sequential(
+ nn.AvgPool2d(kernel_size=pool_kernel_size, stride=1),
+ Conv2d(in_channels, out_channels, 1, bias=True, activation=deepcopy(activation)),
+ )
+ weight_init.c2_xavier_fill(image_pooling[1])
+ self.convs.append(image_pooling)
+
+ self.project = Conv2d(
+ 5 * out_channels,
+ out_channels,
+ kernel_size=1,
+ bias=use_bias,
+ norm=get_norm(norm, out_channels),
+ activation=deepcopy(activation),
+ )
+ weight_init.c2_xavier_fill(self.project)
+
+ def forward(self, x):
+ size = x.shape[-2:]
+ if self.pool_kernel_size is not None:
+ if size[0] % self.pool_kernel_size[0] or size[1] % self.pool_kernel_size[1]:
+ raise ValueError(
+ "`pool_kernel_size` must be divisible by the shape of inputs. "
+ "Input size: {} `pool_kernel_size`: {}".format(size, self.pool_kernel_size)
+ )
+ res = []
+ for conv in self.convs:
+ res.append(conv(x))
+ res[-1] = F.interpolate(res[-1], size=size, mode="bilinear", align_corners=False)
+ res = torch.cat(res, dim=1)
+ res = self.project(res)
+ res = F.dropout(res, self.dropout, training=self.training) if self.dropout > 0 else res
+ return res
diff --git a/detectron2/layers/batch_norm.py b/detectron2/layers/batch_norm.py
new file mode 100644
index 0000000000000000000000000000000000000000..d304061ecf36dc1ebacccf19a154b8ba2fe8e785
--- /dev/null
+++ b/detectron2/layers/batch_norm.py
@@ -0,0 +1,353 @@
+# Copyright (c) Facebook, Inc. and its affiliates.
+import torch
+import torch.distributed as dist
+from fvcore.nn.distributed import differentiable_all_reduce
+from torch import nn
+from torch.nn import functional as F
+
+from detectron2.utils import comm, env
+
+from .wrappers import BatchNorm2d
+
+
+class FrozenBatchNorm2d(nn.Module):
+ """
+ BatchNorm2d where the batch statistics and the affine parameters are fixed.
+
+ It contains non-trainable buffers called
+ "weight" and "bias", "running_mean", "running_var",
+ initialized to perform identity transformation.
+
+ The pre-trained backbone models from Caffe2 only contain "weight" and "bias",
+ which are computed from the original four parameters of BN.
+ The affine transform `x * weight + bias` will perform the equivalent
+ computation of `(x - running_mean) / sqrt(running_var) * weight + bias`.
+ When loading a backbone model from Caffe2, "running_mean" and "running_var"
+ will be left unchanged as identity transformation.
+
+ Other pre-trained backbone models may contain all 4 parameters.
+
+ The forward is implemented by `F.batch_norm(..., training=False)`.
+ """
+
+ _version = 3
+
+ def __init__(self, num_features, eps=1e-5):
+ super().__init__()
+ self.num_features = num_features
+ self.eps = eps
+ self.register_buffer("weight", torch.ones(num_features))
+ self.register_buffer("bias", torch.zeros(num_features))
+ self.register_buffer("running_mean", torch.zeros(num_features))
+ self.register_buffer("running_var", torch.ones(num_features) - eps)
+ self.register_buffer("num_batches_tracked", None)
+
+ def forward(self, x):
+ if x.requires_grad:
+ # When gradients are needed, F.batch_norm will use extra memory
+ # because its backward op computes gradients for weight/bias as well.
+ scale = self.weight * (self.running_var + self.eps).rsqrt()
+ bias = self.bias - self.running_mean * scale
+ scale = scale.reshape(1, -1, 1, 1)
+ bias = bias.reshape(1, -1, 1, 1)
+ out_dtype = x.dtype # may be half
+ return x * scale.to(out_dtype) + bias.to(out_dtype)
+ else:
+ # When gradients are not needed, F.batch_norm is a single fused op
+ # and provide more optimization opportunities.
+ return F.batch_norm(
+ x,
+ self.running_mean,
+ self.running_var,
+ self.weight,
+ self.bias,
+ training=False,
+ eps=self.eps,
+ )
+
+ def _load_from_state_dict(
+ self,
+ state_dict,
+ prefix,
+ local_metadata,
+ strict,
+ missing_keys,
+ unexpected_keys,
+ error_msgs,
+ ):
+ version = local_metadata.get("version", None)
+
+ if version is None or version < 2:
+ # No running_mean/var in early versions
+ # This will silent the warnings
+ if prefix + "running_mean" not in state_dict:
+ state_dict[prefix + "running_mean"] = torch.zeros_like(self.running_mean)
+ if prefix + "running_var" not in state_dict:
+ state_dict[prefix + "running_var"] = torch.ones_like(self.running_var)
+
+ super()._load_from_state_dict(
+ state_dict,
+ prefix,
+ local_metadata,
+ strict,
+ missing_keys,
+ unexpected_keys,
+ error_msgs,
+ )
+
+ def __repr__(self):
+ return "FrozenBatchNorm2d(num_features={}, eps={})".format(self.num_features, self.eps)
+
+ @classmethod
+ def convert_frozen_batchnorm(cls, module):
+ """
+ Convert all BatchNorm/SyncBatchNorm in module into FrozenBatchNorm.
+
+ Args:
+ module (torch.nn.Module):
+
+ Returns:
+ If module is BatchNorm/SyncBatchNorm, returns a new module.
+ Otherwise, in-place convert module and return it.
+
+ Similar to convert_sync_batchnorm in
+ https://github.com/pytorch/pytorch/blob/master/torch/nn/modules/batchnorm.py
+ """
+ bn_module = nn.modules.batchnorm
+ bn_module = (bn_module.BatchNorm2d, bn_module.SyncBatchNorm)
+ res = module
+ if isinstance(module, bn_module):
+ res = cls(module.num_features)
+ if module.affine:
+ res.weight.data = module.weight.data.clone().detach()
+ res.bias.data = module.bias.data.clone().detach()
+ res.running_mean.data = module.running_mean.data
+ res.running_var.data = module.running_var.data
+ res.eps = module.eps
+ res.num_batches_tracked = module.num_batches_tracked
+ else:
+ for name, child in module.named_children():
+ new_child = cls.convert_frozen_batchnorm(child)
+ if new_child is not child:
+ res.add_module(name, new_child)
+ return res
+
+ @classmethod
+ def convert_frozenbatchnorm2d_to_batchnorm2d(cls, module: nn.Module) -> nn.Module:
+ """
+ Convert all FrozenBatchNorm2d to BatchNorm2d
+
+ Args:
+ module (torch.nn.Module):
+
+ Returns:
+ If module is FrozenBatchNorm2d, returns a new module.
+ Otherwise, in-place convert module and return it.
+
+ This is needed for quantization:
+ https://fb.workplace.com/groups/1043663463248667/permalink/1296330057982005/
+ """
+
+ res = module
+ if isinstance(module, FrozenBatchNorm2d):
+ res = torch.nn.BatchNorm2d(module.num_features, module.eps)
+
+ res.weight.data = module.weight.data.clone().detach()
+ res.bias.data = module.bias.data.clone().detach()
+ res.running_mean.data = module.running_mean.data.clone().detach()
+ res.running_var.data = module.running_var.data.clone().detach()
+ res.eps = module.eps
+ res.num_batches_tracked = module.num_batches_tracked
+ else:
+ for name, child in module.named_children():
+ new_child = cls.convert_frozenbatchnorm2d_to_batchnorm2d(child)
+ if new_child is not child:
+ res.add_module(name, new_child)
+ return res
+
+
+def get_norm(norm, out_channels):
+ """
+ Args:
+ norm (str or callable): either one of BN, SyncBN, FrozenBN, GN;
+ or a callable that takes a channel number and returns
+ the normalization layer as a nn.Module.
+
+ Returns:
+ nn.Module or None: the normalization layer
+ """
+ if norm is None:
+ return None
+ if isinstance(norm, str):
+ if len(norm) == 0:
+ return None
+ norm = {
+ "BN": BatchNorm2d,
+ # Fixed in https://github.com/pytorch/pytorch/pull/36382
+ "SyncBN": NaiveSyncBatchNorm if env.TORCH_VERSION <= (1, 5) else nn.SyncBatchNorm,
+ "FrozenBN": FrozenBatchNorm2d,
+ "GN": lambda channels: nn.GroupNorm(32, channels),
+ # for debugging:
+ "nnSyncBN": nn.SyncBatchNorm,
+ "naiveSyncBN": NaiveSyncBatchNorm,
+ # expose stats_mode N as an option to caller, required for zero-len inputs
+ "naiveSyncBN_N": lambda channels: NaiveSyncBatchNorm(channels, stats_mode="N"),
+ "LN": lambda channels: LayerNorm(channels),
+ }[norm]
+ return norm(out_channels)
+
+
+class NaiveSyncBatchNorm(BatchNorm2d):
+ """
+ In PyTorch<=1.5, ``nn.SyncBatchNorm`` has incorrect gradient
+ when the batch size on each worker is different.
+ (e.g., when scale augmentation is used, or when it is applied to mask head).
+
+ This is a slower but correct alternative to `nn.SyncBatchNorm`.
+
+ Note:
+ There isn't a single definition of Sync BatchNorm.
+
+ When ``stats_mode==""``, this module computes overall statistics by using
+ statistics of each worker with equal weight. The result is true statistics
+ of all samples (as if they are all on one worker) only when all workers
+ have the same (N, H, W). This mode does not support inputs with zero batch size.
+
+ When ``stats_mode=="N"``, this module computes overall statistics by weighting
+ the statistics of each worker by their ``N``. The result is true statistics
+ of all samples (as if they are all on one worker) only when all workers
+ have the same (H, W). It is slower than ``stats_mode==""``.
+
+ Even though the result of this module may not be the true statistics of all samples,
+ it may still be reasonable because it might be preferrable to assign equal weights
+ to all workers, regardless of their (H, W) dimension, instead of putting larger weight
+ on larger images. From preliminary experiments, little difference is found between such
+ a simplified implementation and an accurate computation of overall mean & variance.
+ """
+
+ def __init__(self, *args, stats_mode="", **kwargs):
+ super().__init__(*args, **kwargs)
+ assert stats_mode in ["", "N"]
+ self._stats_mode = stats_mode
+
+ def forward(self, input):
+ if comm.get_world_size() == 1 or not self.training:
+ return super().forward(input)
+
+ B, C = input.shape[0], input.shape[1]
+
+ half_input = input.dtype == torch.float16
+ if half_input:
+ # fp16 does not have good enough numerics for the reduction here
+ input = input.float()
+ mean = torch.mean(input, dim=[0, 2, 3])
+ meansqr = torch.mean(input * input, dim=[0, 2, 3])
+
+ if self._stats_mode == "":
+ assert B > 0, 'SyncBatchNorm(stats_mode="") does not support zero batch size.'
+ vec = torch.cat([mean, meansqr], dim=0)
+ vec = differentiable_all_reduce(vec) * (1.0 / dist.get_world_size())
+ mean, meansqr = torch.split(vec, C)
+ momentum = self.momentum
+ else:
+ if B == 0:
+ vec = torch.zeros([2 * C + 1], device=mean.device, dtype=mean.dtype)
+ vec = vec + input.sum() # make sure there is gradient w.r.t input
+ else:
+ vec = torch.cat(
+ [
+ mean,
+ meansqr,
+ torch.ones([1], device=mean.device, dtype=mean.dtype),
+ ],
+ dim=0,
+ )
+ vec = differentiable_all_reduce(vec * B)
+
+ total_batch = vec[-1].detach()
+ momentum = total_batch.clamp(max=1) * self.momentum # no update if total_batch is 0
+ mean, meansqr, _ = torch.split(vec / total_batch.clamp(min=1), C) # avoid div-by-zero
+
+ var = meansqr - mean * mean
+ invstd = torch.rsqrt(var + self.eps)
+ scale = self.weight * invstd
+ bias = self.bias - mean * scale
+ scale = scale.reshape(1, -1, 1, 1)
+ bias = bias.reshape(1, -1, 1, 1)
+
+ self.running_mean += momentum * (mean.detach() - self.running_mean)
+ self.running_var += momentum * (var.detach() - self.running_var)
+ ret = input * scale + bias
+ if half_input:
+ ret = ret.half()
+ return ret
+
+
+class CycleBatchNormList(nn.ModuleList):
+ """
+ Implement domain-specific BatchNorm by cycling.
+
+ When a BatchNorm layer is used for multiple input domains or input
+ features, it might need to maintain a separate test-time statistics
+ for each domain. See Sec 5.2 in :paper:`rethinking-batchnorm`.
+
+ This module implements it by using N separate BN layers
+ and it cycles through them every time a forward() is called.
+
+ NOTE: The caller of this module MUST guarantee to always call
+ this module by multiple of N times. Otherwise its test-time statistics
+ will be incorrect.
+ """
+
+ def __init__(self, length: int, bn_class=nn.BatchNorm2d, **kwargs):
+ """
+ Args:
+ length: number of BatchNorm layers to cycle.
+ bn_class: the BatchNorm class to use
+ kwargs: arguments of the BatchNorm class, such as num_features.
+ """
+ self._affine = kwargs.pop("affine", True)
+ super().__init__([bn_class(**kwargs, affine=False) for k in range(length)])
+ if self._affine:
+ # shared affine, domain-specific BN
+ channels = self[0].num_features
+ self.weight = nn.Parameter(torch.ones(channels))
+ self.bias = nn.Parameter(torch.zeros(channels))
+ self._pos = 0
+
+ def forward(self, x):
+ ret = self[self._pos](x)
+ self._pos = (self._pos + 1) % len(self)
+
+ if self._affine:
+ w = self.weight.reshape(1, -1, 1, 1)
+ b = self.bias.reshape(1, -1, 1, 1)
+ return ret * w + b
+ else:
+ return ret
+
+ def extra_repr(self):
+ return f"affine={self._affine}"
+
+
+class LayerNorm(nn.Module):
+ """
+ A LayerNorm variant, popularized by Transformers, that performs point-wise mean and
+ variance normalization over the channel dimension for inputs that have shape
+ (batch_size, channels, height, width).
+ https://github.com/facebookresearch/ConvNeXt/blob/d1fa8f6fef0a165b27399986cc2bdacc92777e40/models/convnext.py#L119 # noqa B950
+ """
+
+ def __init__(self, normalized_shape, eps=1e-6):
+ super().__init__()
+ self.weight = nn.Parameter(torch.ones(normalized_shape))
+ self.bias = nn.Parameter(torch.zeros(normalized_shape))
+ self.eps = eps
+ self.normalized_shape = (normalized_shape,)
+
+ def forward(self, x):
+ u = x.mean(1, keepdim=True)
+ s = (x - u).pow(2).mean(1, keepdim=True)
+ x = (x - u) / torch.sqrt(s + self.eps)
+ x = self.weight[:, None, None] * x + self.bias[:, None, None]
+ return x
diff --git a/detectron2/layers/blocks.py b/detectron2/layers/blocks.py
new file mode 100644
index 0000000000000000000000000000000000000000..1995a4bf7339e8deb7eaaffda4f819dda55e7ac7
--- /dev/null
+++ b/detectron2/layers/blocks.py
@@ -0,0 +1,111 @@
+# -*- coding: utf-8 -*-
+# Copyright (c) Facebook, Inc. and its affiliates.
+
+import fvcore.nn.weight_init as weight_init
+from torch import nn
+
+from .batch_norm import FrozenBatchNorm2d, get_norm
+from .wrappers import Conv2d
+
+
+"""
+CNN building blocks.
+"""
+
+
+class CNNBlockBase(nn.Module):
+ """
+ A CNN block is assumed to have input channels, output channels and a stride.
+ The input and output of `forward()` method must be NCHW tensors.
+ The method can perform arbitrary computation but must match the given
+ channels and stride specification.
+
+ Attribute:
+ in_channels (int):
+ out_channels (int):
+ stride (int):
+ """
+
+ def __init__(self, in_channels, out_channels, stride):
+ """
+ The `__init__` method of any subclass should also contain these arguments.
+
+ Args:
+ in_channels (int):
+ out_channels (int):
+ stride (int):
+ """
+ super().__init__()
+ self.in_channels = in_channels
+ self.out_channels = out_channels
+ self.stride = stride
+
+ def freeze(self):
+ """
+ Make this block not trainable.
+ This method sets all parameters to `requires_grad=False`,
+ and convert all BatchNorm layers to FrozenBatchNorm
+
+ Returns:
+ the block itself
+ """
+ for p in self.parameters():
+ p.requires_grad = False
+ FrozenBatchNorm2d.convert_frozen_batchnorm(self)
+ return self
+
+
+class DepthwiseSeparableConv2d(nn.Module):
+ """
+ A kxk depthwise convolution + a 1x1 convolution.
+
+ In :paper:`xception`, norm & activation are applied on the second conv.
+ :paper:`mobilenet` uses norm & activation on both convs.
+ """
+
+ def __init__(
+ self,
+ in_channels,
+ out_channels,
+ kernel_size=3,
+ padding=1,
+ dilation=1,
+ *,
+ norm1=None,
+ activation1=None,
+ norm2=None,
+ activation2=None,
+ ):
+ """
+ Args:
+ norm1, norm2 (str or callable): normalization for the two conv layers.
+ activation1, activation2 (callable(Tensor) -> Tensor): activation
+ function for the two conv layers.
+ """
+ super().__init__()
+ self.depthwise = Conv2d(
+ in_channels,
+ in_channels,
+ kernel_size=kernel_size,
+ padding=padding,
+ dilation=dilation,
+ groups=in_channels,
+ bias=not norm1,
+ norm=get_norm(norm1, in_channels),
+ activation=activation1,
+ )
+ self.pointwise = Conv2d(
+ in_channels,
+ out_channels,
+ kernel_size=1,
+ bias=not norm2,
+ norm=get_norm(norm2, out_channels),
+ activation=activation2,
+ )
+
+ # default initialization
+ weight_init.c2_msra_fill(self.depthwise)
+ weight_init.c2_msra_fill(self.pointwise)
+
+ def forward(self, x):
+ return self.pointwise(self.depthwise(x))
diff --git a/detectron2/layers/csrc/README.md b/detectron2/layers/csrc/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..778ed3da0bae89820831bcd8a72ff7b9cad8d4dd
--- /dev/null
+++ b/detectron2/layers/csrc/README.md
@@ -0,0 +1,7 @@
+
+
+To add a new Op:
+
+1. Create a new directory
+2. Implement new ops there
+3. Delcare its Python interface in `vision.cpp`.
diff --git a/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h
new file mode 100644
index 0000000000000000000000000000000000000000..03f4211003f42f601f0cfcf4a690f5da4a0a1f67
--- /dev/null
+++ b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated.h
@@ -0,0 +1,115 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#pragma once
+#include
+
+namespace detectron2 {
+
+at::Tensor ROIAlignRotated_forward_cpu(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio);
+
+at::Tensor ROIAlignRotated_backward_cpu(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio);
+
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+at::Tensor ROIAlignRotated_forward_cuda(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio);
+
+at::Tensor ROIAlignRotated_backward_cuda(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio);
+#endif
+
+// Interface for Python
+inline at::Tensor ROIAlignRotated_forward(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const double spatial_scale,
+ const int64_t pooled_height,
+ const int64_t pooled_width,
+ const int64_t sampling_ratio) {
+ if (input.is_cuda()) {
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+ return ROIAlignRotated_forward_cuda(
+ input,
+ rois,
+ spatial_scale,
+ pooled_height,
+ pooled_width,
+ sampling_ratio);
+#else
+ AT_ERROR("Detectron2 is not compiled with GPU support!");
+#endif
+ }
+ return ROIAlignRotated_forward_cpu(
+ input, rois, spatial_scale, pooled_height, pooled_width, sampling_ratio);
+}
+
+inline at::Tensor ROIAlignRotated_backward(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const double spatial_scale,
+ const int64_t pooled_height,
+ const int64_t pooled_width,
+ const int64_t batch_size,
+ const int64_t channels,
+ const int64_t height,
+ const int64_t width,
+ const int64_t sampling_ratio) {
+ if (grad.is_cuda()) {
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+ return ROIAlignRotated_backward_cuda(
+ grad,
+ rois,
+ spatial_scale,
+ pooled_height,
+ pooled_width,
+ batch_size,
+ channels,
+ height,
+ width,
+ sampling_ratio);
+#else
+ AT_ERROR("Detectron2 is not compiled with GPU support!");
+#endif
+ }
+ return ROIAlignRotated_backward_cpu(
+ grad,
+ rois,
+ spatial_scale,
+ pooled_height,
+ pooled_width,
+ batch_size,
+ channels,
+ height,
+ width,
+ sampling_ratio);
+}
+
+} // namespace detectron2
diff --git a/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..2a3d3056cc71a4acaafb570739a9dd247a7eb1ed
--- /dev/null
+++ b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cpu.cpp
@@ -0,0 +1,522 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include
+#include "ROIAlignRotated.h"
+
+// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
+// and PyTorch ROIAlign (non-rotated) Op implementations.
+// The key difference between this implementation and those ones is
+// we don't do "legacy offset" in this version, as there aren't many previous
+// works, if any, using the "legacy" ROIAlignRotated Op.
+// This would make the interface a bit cleaner.
+
+namespace detectron2 {
+
+namespace {
+template
+struct PreCalc {
+ int pos1;
+ int pos2;
+ int pos3;
+ int pos4;
+ T w1;
+ T w2;
+ T w3;
+ T w4;
+};
+
+template
+void pre_calc_for_bilinear_interpolate(
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int iy_upper,
+ const int ix_upper,
+ T roi_start_h,
+ T roi_start_w,
+ T bin_size_h,
+ T bin_size_w,
+ int roi_bin_grid_h,
+ int roi_bin_grid_w,
+ T roi_center_h,
+ T roi_center_w,
+ T cos_theta,
+ T sin_theta,
+ std::vector>& pre_calc) {
+ int pre_calc_index = 0;
+ for (int ph = 0; ph < pooled_height; ph++) {
+ for (int pw = 0; pw < pooled_width; pw++) {
+ for (int iy = 0; iy < iy_upper; iy++) {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < ix_upper; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ // In image space, (y, x) is the order for Right Handed System,
+ // and this is essentially multiplying the point by a rotation matrix
+ // to rotate it counterclockwise through angle theta.
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+ // deal with: inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ PreCalc pc;
+ pc.pos1 = 0;
+ pc.pos2 = 0;
+ pc.pos3 = 0;
+ pc.pos4 = 0;
+ pc.w1 = 0;
+ pc.w2 = 0;
+ pc.w3 = 0;
+ pc.w4 = 0;
+ pre_calc[pre_calc_index] = pc;
+ pre_calc_index += 1;
+ continue;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+ if (x < 0) {
+ x = 0;
+ }
+
+ int y_low = (int)y;
+ int x_low = (int)x;
+ int y_high;
+ int x_high;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+ T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ // save weights and indices
+ PreCalc pc;
+ pc.pos1 = y_low * width + x_low;
+ pc.pos2 = y_low * width + x_high;
+ pc.pos3 = y_high * width + x_low;
+ pc.pos4 = y_high * width + x_high;
+ pc.w1 = w1;
+ pc.w2 = w2;
+ pc.w3 = w3;
+ pc.w4 = w4;
+ pre_calc[pre_calc_index] = pc;
+
+ pre_calc_index += 1;
+ }
+ }
+ }
+ }
+}
+
+template
+void bilinear_interpolate_gradient(
+ const int height,
+ const int width,
+ T y,
+ T x,
+ T& w1,
+ T& w2,
+ T& w3,
+ T& w4,
+ int& x_low,
+ int& x_high,
+ int& y_low,
+ int& y_high) {
+ // deal with cases that inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ w1 = w2 = w3 = w4 = 0.;
+ x_low = x_high = y_low = y_high = -1;
+ return;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+
+ if (x < 0) {
+ x = 0;
+ }
+
+ y_low = (int)y;
+ x_low = (int)x;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+
+ // reference in forward
+ // T v1 = input[y_low * width + x_low];
+ // T v2 = input[y_low * width + x_high];
+ // T v3 = input[y_high * width + x_low];
+ // T v4 = input[y_high * width + x_high];
+ // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+
+ w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ return;
+}
+
+template
+inline void add(T* address, const T& val) {
+ *address += val;
+}
+
+} // namespace
+
+template
+void ROIAlignRotatedForward(
+ const int nthreads,
+ const T* input,
+ const T& spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ const T* rois,
+ T* output) {
+ int n_rois = nthreads / channels / pooled_width / pooled_height;
+ // (n, c, ph, pw) is an element in the pooled output
+ // can be parallelized using omp
+ // #pragma omp parallel for num_threads(32)
+ for (int n = 0; n < n_rois; n++) {
+ int index_n = n * channels * pooled_width * pooled_height;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ AT_ASSERTM(
+ roi_width >= 0 && roi_height >= 0,
+ "ROIs in ROIAlignRotated do not have non-negative size!");
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // We do average (integral) pooling inside a bin
+ const T count = std::max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
+
+ // we want to precalculate indices and weights shared by all channels,
+ // this is the key point of optimization
+ std::vector> pre_calc(
+ roi_bin_grid_h * roi_bin_grid_w * pooled_width * pooled_height);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ pre_calc_for_bilinear_interpolate(
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ roi_bin_grid_h,
+ roi_bin_grid_w,
+ roi_start_h,
+ roi_start_w,
+ bin_size_h,
+ bin_size_w,
+ roi_bin_grid_h,
+ roi_bin_grid_w,
+ roi_center_h,
+ roi_center_w,
+ cos_theta,
+ sin_theta,
+ pre_calc);
+
+ for (int c = 0; c < channels; c++) {
+ int index_n_c = index_n + c * pooled_width * pooled_height;
+ const T* offset_input =
+ input + (roi_batch_ind * channels + c) * height * width;
+ int pre_calc_index = 0;
+
+ for (int ph = 0; ph < pooled_height; ph++) {
+ for (int pw = 0; pw < pooled_width; pw++) {
+ int index = index_n_c + ph * pooled_width + pw;
+
+ T output_val = 0.;
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) {
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ PreCalc pc = pre_calc[pre_calc_index];
+ output_val += pc.w1 * offset_input[pc.pos1] +
+ pc.w2 * offset_input[pc.pos2] +
+ pc.w3 * offset_input[pc.pos3] + pc.w4 * offset_input[pc.pos4];
+
+ pre_calc_index += 1;
+ }
+ }
+ output_val /= count;
+
+ output[index] = output_val;
+ } // for pw
+ } // for ph
+ } // for c
+ } // for n
+}
+
+template
+void ROIAlignRotatedBackward(
+ const int nthreads,
+ // may not be contiguous. should index using n_stride, etc
+ const T* grad_output,
+ const T& spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ T* grad_input,
+ const T* rois,
+ const int n_stride,
+ const int c_stride,
+ const int h_stride,
+ const int w_stride) {
+ for (int index = 0; index < nthreads; index++) {
+ // (n, c, ph, pw) is an element in the pooled output
+ int pw = index % pooled_width;
+ int ph = (index / pooled_width) % pooled_height;
+ int c = (index / pooled_width / pooled_height) % channels;
+ int n = index / pooled_width / pooled_height / channels;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ AT_ASSERTM(
+ roi_width >= 0 && roi_height >= 0,
+ "ROIs in ROIAlignRotated do not have non-negative size!");
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ T* offset_grad_input =
+ grad_input + ((roi_batch_ind * channels + c) * height * width);
+
+ int output_offset = n * n_stride + c * c_stride;
+ const T* offset_grad_output = grad_output + output_offset;
+ const T grad_output_this_bin =
+ offset_grad_output[ph * h_stride + pw * w_stride];
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ // We do average (integral) pooling inside a bin
+ const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
+
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+
+ T w1, w2, w3, w4;
+ int x_low, x_high, y_low, y_high;
+
+ bilinear_interpolate_gradient(
+ height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
+
+ T g1 = grad_output_this_bin * w1 / count;
+ T g2 = grad_output_this_bin * w2 / count;
+ T g3 = grad_output_this_bin * w3 / count;
+ T g4 = grad_output_this_bin * w4 / count;
+
+ if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
+ // atomic add is not needed for now since it is single threaded
+ add(offset_grad_input + y_low * width + x_low, static_cast(g1));
+ add(offset_grad_input + y_low * width + x_high, static_cast(g2));
+ add(offset_grad_input + y_high * width + x_low, static_cast(g3));
+ add(offset_grad_input + y_high * width + x_high, static_cast(g4));
+ } // if
+ } // ix
+ } // iy
+ } // for
+} // ROIAlignRotatedBackward
+
+at::Tensor ROIAlignRotated_forward_cpu(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio) {
+ AT_ASSERTM(input.device().is_cpu(), "input must be a CPU tensor");
+ AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
+
+ at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
+
+ at::CheckedFrom c = "ROIAlign_forward_cpu";
+ at::checkAllSameType(c, {input_t, rois_t});
+
+ auto num_rois = rois.size(0);
+ auto channels = input.size(1);
+ auto height = input.size(2);
+ auto width = input.size(3);
+
+ at::Tensor output = at::zeros(
+ {num_rois, channels, pooled_height, pooled_width}, input.options());
+
+ auto output_size = num_rois * pooled_height * pooled_width * channels;
+
+ if (output.numel() == 0) {
+ return output;
+ }
+
+ auto input_ = input.contiguous(), rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
+ input.scalar_type(), "ROIAlignRotated_forward", [&] {
+ ROIAlignRotatedForward(
+ output_size,
+ input_.data_ptr(),
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ rois_.data_ptr(),
+ output.data_ptr());
+ });
+ return output;
+}
+
+at::Tensor ROIAlignRotated_backward_cpu(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio) {
+ AT_ASSERTM(grad.device().is_cpu(), "grad must be a CPU tensor");
+ AT_ASSERTM(rois.device().is_cpu(), "rois must be a CPU tensor");
+
+ at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
+
+ at::CheckedFrom c = "ROIAlignRotated_backward_cpu";
+ at::checkAllSameType(c, {grad_t, rois_t});
+
+ at::Tensor grad_input =
+ at::zeros({batch_size, channels, height, width}, grad.options());
+
+ // handle possibly empty gradients
+ if (grad.numel() == 0) {
+ return grad_input;
+ }
+
+ // get stride values to ensure indexing into gradients is correct.
+ int n_stride = grad.stride(0);
+ int c_stride = grad.stride(1);
+ int h_stride = grad.stride(2);
+ int w_stride = grad.stride(3);
+
+ auto rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES_AND_HALF(
+ grad.scalar_type(), "ROIAlignRotated_forward", [&] {
+ ROIAlignRotatedBackward(
+ grad.numel(),
+ grad.data_ptr(),
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ grad_input.data_ptr(),
+ rois_.data_ptr(),
+ n_stride,
+ c_stride,
+ h_stride,
+ w_stride);
+ });
+ return grad_input;
+}
+
+} // namespace detectron2
diff --git a/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu
new file mode 100644
index 0000000000000000000000000000000000000000..fca186519143b168a912c880a4cf495a0a5a9322
--- /dev/null
+++ b/detectron2/layers/csrc/ROIAlignRotated/ROIAlignRotated_cuda.cu
@@ -0,0 +1,443 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include
+#include
+#include
+#include
+
+// TODO make it in a common file
+#define CUDA_1D_KERNEL_LOOP(i, n) \
+ for (int i = blockIdx.x * blockDim.x + threadIdx.x; i < n; \
+ i += blockDim.x * gridDim.x)
+
+// Note: this implementation originates from the Caffe2 ROIAlignRotated Op
+// and PyTorch ROIAlign (non-rotated) Op implementations.
+// The key difference between this implementation and those ones is
+// we don't do "legacy offset" in this version, as there aren't many previous
+// works, if any, using the "legacy" ROIAlignRotated Op.
+// This would make the interface a bit cleaner.
+
+namespace detectron2 {
+
+namespace {
+
+template
+__device__ T bilinear_interpolate(
+ const T* input,
+ const int height,
+ const int width,
+ T y,
+ T x) {
+ // deal with cases that inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ return 0;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+
+ if (x < 0) {
+ x = 0;
+ }
+
+ int y_low = (int)y;
+ int x_low = (int)x;
+ int y_high;
+ int x_high;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+ // do bilinear interpolation
+ T v1 = input[y_low * width + x_low];
+ T v2 = input[y_low * width + x_high];
+ T v3 = input[y_high * width + x_low];
+ T v4 = input[y_high * width + x_high];
+ T w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+
+ return val;
+}
+
+template
+__device__ void bilinear_interpolate_gradient(
+ const int height,
+ const int width,
+ T y,
+ T x,
+ T& w1,
+ T& w2,
+ T& w3,
+ T& w4,
+ int& x_low,
+ int& x_high,
+ int& y_low,
+ int& y_high) {
+ // deal with cases that inverse elements are out of feature map boundary
+ if (y < -1.0 || y > height || x < -1.0 || x > width) {
+ // empty
+ w1 = w2 = w3 = w4 = 0.;
+ x_low = x_high = y_low = y_high = -1;
+ return;
+ }
+
+ if (y < 0) {
+ y = 0;
+ }
+
+ if (x < 0) {
+ x = 0;
+ }
+
+ y_low = (int)y;
+ x_low = (int)x;
+
+ if (y_low >= height - 1) {
+ y_high = y_low = height - 1;
+ y = (T)y_low;
+ } else {
+ y_high = y_low + 1;
+ }
+
+ if (x_low >= width - 1) {
+ x_high = x_low = width - 1;
+ x = (T)x_low;
+ } else {
+ x_high = x_low + 1;
+ }
+
+ T ly = y - y_low;
+ T lx = x - x_low;
+ T hy = 1. - ly, hx = 1. - lx;
+
+ // reference in forward
+ // T v1 = input[y_low * width + x_low];
+ // T v2 = input[y_low * width + x_high];
+ // T v3 = input[y_high * width + x_low];
+ // T v4 = input[y_high * width + x_high];
+ // T val = (w1 * v1 + w2 * v2 + w3 * v3 + w4 * v4);
+
+ w1 = hy * hx, w2 = hy * lx, w3 = ly * hx, w4 = ly * lx;
+
+ return;
+}
+
+} // namespace
+
+template
+__global__ void RoIAlignRotatedForward(
+ const int nthreads,
+ const T* input,
+ const T spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ const T* rois,
+ T* top_data) {
+ CUDA_1D_KERNEL_LOOP(index, nthreads) {
+ // (n, c, ph, pw) is an element in the pooled output
+ int pw = index % pooled_width;
+ int ph = (index / pooled_width) % pooled_height;
+ int c = (index / pooled_width / pooled_height) % channels;
+ int n = index / pooled_width / pooled_height / channels;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ const T* offset_input =
+ input + (roi_batch_ind * channels + c) * height * width;
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ // We do average (inte gral) pooling inside a bin
+ const T count = max(roi_bin_grid_h * roi_bin_grid_w, 1); // e.g. = 4
+
+ T output_val = 0.;
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
+ {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+
+ T val = bilinear_interpolate(offset_input, height, width, y, x);
+ output_val += val;
+ }
+ }
+ output_val /= count;
+
+ top_data[index] = output_val;
+ }
+}
+
+template
+__global__ void RoIAlignRotatedBackwardFeature(
+ const int nthreads,
+ const T* top_diff,
+ const int num_rois,
+ const T spatial_scale,
+ const int channels,
+ const int height,
+ const int width,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio,
+ T* bottom_diff,
+ const T* rois) {
+ CUDA_1D_KERNEL_LOOP(index, nthreads) {
+ // (n, c, ph, pw) is an element in the pooled output
+ int pw = index % pooled_width;
+ int ph = (index / pooled_width) % pooled_height;
+ int c = (index / pooled_width / pooled_height) % channels;
+ int n = index / pooled_width / pooled_height / channels;
+
+ const T* current_roi = rois + n * 6;
+ int roi_batch_ind = current_roi[0];
+
+ // Do not use rounding; this implementation detail is critical
+ // ROIAlignRotated supports align == true, i.e., continuous coordinate
+ // by default, thus the 0.5 offset
+ T offset = (T)0.5;
+ T roi_center_w = current_roi[1] * spatial_scale - offset;
+ T roi_center_h = current_roi[2] * spatial_scale - offset;
+ T roi_width = current_roi[3] * spatial_scale;
+ T roi_height = current_roi[4] * spatial_scale;
+ T theta = current_roi[5] * M_PI / 180.0;
+ T cos_theta = cos(theta);
+ T sin_theta = sin(theta);
+
+ T bin_size_h = static_cast(roi_height) / static_cast(pooled_height);
+ T bin_size_w = static_cast(roi_width) / static_cast(pooled_width);
+
+ T* offset_bottom_diff =
+ bottom_diff + (roi_batch_ind * channels + c) * height * width;
+
+ int top_offset = (n * channels + c) * pooled_height * pooled_width;
+ const T* offset_top_diff = top_diff + top_offset;
+ const T top_diff_this_bin = offset_top_diff[ph * pooled_width + pw];
+
+ // We use roi_bin_grid to sample the grid and mimic integral
+ int roi_bin_grid_h = (sampling_ratio > 0)
+ ? sampling_ratio
+ : ceil(roi_height / pooled_height); // e.g., = 2
+ int roi_bin_grid_w =
+ (sampling_ratio > 0) ? sampling_ratio : ceil(roi_width / pooled_width);
+
+ // roi_start_h and roi_start_w are computed wrt the center of RoI (x, y).
+ // Appropriate translation needs to be applied after.
+ T roi_start_h = -roi_height / 2.0;
+ T roi_start_w = -roi_width / 2.0;
+
+ // We do average (integral) pooling inside a bin
+ const T count = roi_bin_grid_h * roi_bin_grid_w; // e.g. = 4
+
+ for (int iy = 0; iy < roi_bin_grid_h; iy++) // e.g., iy = 0, 1
+ {
+ const T yy = roi_start_h + ph * bin_size_h +
+ static_cast(iy + .5f) * bin_size_h /
+ static_cast(roi_bin_grid_h); // e.g., 0.5, 1.5
+ for (int ix = 0; ix < roi_bin_grid_w; ix++) {
+ const T xx = roi_start_w + pw * bin_size_w +
+ static_cast(ix + .5f) * bin_size_w /
+ static_cast(roi_bin_grid_w);
+
+ // Rotate by theta around the center and translate
+ T y = yy * cos_theta - xx * sin_theta + roi_center_h;
+ T x = yy * sin_theta + xx * cos_theta + roi_center_w;
+
+ T w1, w2, w3, w4;
+ int x_low, x_high, y_low, y_high;
+
+ bilinear_interpolate_gradient(
+ height, width, y, x, w1, w2, w3, w4, x_low, x_high, y_low, y_high);
+
+ T g1 = top_diff_this_bin * w1 / count;
+ T g2 = top_diff_this_bin * w2 / count;
+ T g3 = top_diff_this_bin * w3 / count;
+ T g4 = top_diff_this_bin * w4 / count;
+
+ if (x_low >= 0 && x_high >= 0 && y_low >= 0 && y_high >= 0) {
+ atomicAdd(
+ offset_bottom_diff + y_low * width + x_low, static_cast(g1));
+ atomicAdd(
+ offset_bottom_diff + y_low * width + x_high, static_cast(g2));
+ atomicAdd(
+ offset_bottom_diff + y_high * width + x_low, static_cast(g3));
+ atomicAdd(
+ offset_bottom_diff + y_high * width + x_high, static_cast(g4));
+ } // if
+ } // ix
+ } // iy
+ } // CUDA_1D_KERNEL_LOOP
+} // RoIAlignRotatedBackward
+
+at::Tensor ROIAlignRotated_forward_cuda(
+ const at::Tensor& input,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int sampling_ratio) {
+ AT_ASSERTM(input.device().is_cuda(), "input must be a CUDA tensor");
+ AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
+ at::TensorArg input_t{input, "input", 1}, rois_t{rois, "rois", 2};
+
+ at::CheckedFrom c = "ROIAlignRotated_forward_cuda";
+ at::checkAllSameGPU(c, {input_t, rois_t});
+ at::checkAllSameType(c, {input_t, rois_t});
+ at::cuda::CUDAGuard device_guard(input.device());
+
+ auto num_rois = rois.size(0);
+ auto channels = input.size(1);
+ auto height = input.size(2);
+ auto width = input.size(3);
+
+ auto output = at::empty(
+ {num_rois, channels, pooled_height, pooled_width}, input.options());
+ auto output_size = num_rois * pooled_height * pooled_width * channels;
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ dim3 grid(std::min(
+ at::cuda::ATenCeilDiv(
+ static_cast(output_size), static_cast(512)),
+ static_cast(4096)));
+ dim3 block(512);
+
+ if (output.numel() == 0) {
+ AT_CUDA_CHECK(cudaGetLastError());
+ return output;
+ }
+
+ auto input_ = input.contiguous(), rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES(
+ input.scalar_type(), "ROIAlignRotated_forward", [&] {
+ RoIAlignRotatedForward<<>>(
+ output_size,
+ input_.data_ptr(),
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ rois_.data_ptr(),
+ output.data_ptr());
+ });
+ cudaDeviceSynchronize();
+ AT_CUDA_CHECK(cudaGetLastError());
+ return output;
+}
+
+// TODO remove the dependency on input and use instead its sizes -> save memory
+at::Tensor ROIAlignRotated_backward_cuda(
+ const at::Tensor& grad,
+ const at::Tensor& rois,
+ const float spatial_scale,
+ const int pooled_height,
+ const int pooled_width,
+ const int batch_size,
+ const int channels,
+ const int height,
+ const int width,
+ const int sampling_ratio) {
+ AT_ASSERTM(grad.device().is_cuda(), "grad must be a CUDA tensor");
+ AT_ASSERTM(rois.device().is_cuda(), "rois must be a CUDA tensor");
+
+ at::TensorArg grad_t{grad, "grad", 1}, rois_t{rois, "rois", 2};
+ at::CheckedFrom c = "ROIAlign_backward_cuda";
+ at::checkAllSameGPU(c, {grad_t, rois_t});
+ at::checkAllSameType(c, {grad_t, rois_t});
+ at::cuda::CUDAGuard device_guard(grad.device());
+
+ auto num_rois = rois.size(0);
+ auto grad_input =
+ at::zeros({batch_size, channels, height, width}, grad.options());
+
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ dim3 grid(std::min(
+ at::cuda::ATenCeilDiv(
+ static_cast(grad.numel()), static_cast(512)),
+ static_cast(4096)));
+ dim3 block(512);
+
+ // handle possibly empty gradients
+ if (grad.numel() == 0) {
+ AT_CUDA_CHECK(cudaGetLastError());
+ return grad_input;
+ }
+
+ auto grad_ = grad.contiguous(), rois_ = rois.contiguous();
+ AT_DISPATCH_FLOATING_TYPES(
+ grad.scalar_type(), "ROIAlignRotated_backward", [&] {
+ RoIAlignRotatedBackwardFeature<<>>(
+ grad.numel(),
+ grad_.data_ptr(),
+ num_rois,
+ spatial_scale,
+ channels,
+ height,
+ width,
+ pooled_height,
+ pooled_width,
+ sampling_ratio,
+ grad_input.data_ptr(),
+ rois_.data_ptr());
+ });
+ AT_CUDA_CHECK(cudaGetLastError());
+ return grad_input;
+}
+
+} // namespace detectron2
diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h
new file mode 100644
index 0000000000000000000000000000000000000000..3bf383b8ed9b358b5313d433a9682c294dfb77e4
--- /dev/null
+++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated.h
@@ -0,0 +1,35 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#pragma once
+#include
+
+namespace detectron2 {
+
+at::Tensor box_iou_rotated_cpu(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2);
+
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+at::Tensor box_iou_rotated_cuda(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2);
+#endif
+
+// Interface for Python
+// inline is needed to prevent multiple function definitions when this header is
+// included by different cpps
+inline at::Tensor box_iou_rotated(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2) {
+ assert(boxes1.device().is_cuda() == boxes2.device().is_cuda());
+ if (boxes1.device().is_cuda()) {
+#if defined(WITH_CUDA) || defined(WITH_HIP)
+ return box_iou_rotated_cuda(boxes1.contiguous(), boxes2.contiguous());
+#else
+ AT_ERROR("Detectron2 is not compiled with GPU support!");
+#endif
+ }
+
+ return box_iou_rotated_cpu(boxes1.contiguous(), boxes2.contiguous());
+}
+
+} // namespace detectron2
diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..c843487b5fa4e8077dd27402ec99009266ddda8d
--- /dev/null
+++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cpu.cpp
@@ -0,0 +1,39 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include "box_iou_rotated.h"
+#include "box_iou_rotated_utils.h"
+
+namespace detectron2 {
+
+template
+void box_iou_rotated_cpu_kernel(
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2,
+ at::Tensor& ious) {
+ auto num_boxes1 = boxes1.size(0);
+ auto num_boxes2 = boxes2.size(0);
+
+ for (int i = 0; i < num_boxes1; i++) {
+ for (int j = 0; j < num_boxes2; j++) {
+ ious[i * num_boxes2 + j] = single_box_iou_rotated(
+ boxes1[i].data_ptr(), boxes2[j].data_ptr());
+ }
+ }
+}
+
+at::Tensor box_iou_rotated_cpu(
+ // input must be contiguous:
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2) {
+ auto num_boxes1 = boxes1.size(0);
+ auto num_boxes2 = boxes2.size(0);
+ at::Tensor ious =
+ at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
+
+ box_iou_rotated_cpu_kernel(boxes1, boxes2, ious);
+
+ // reshape from 1d array to 2d array
+ auto shape = std::vector{num_boxes1, num_boxes2};
+ return ious.reshape(shape);
+}
+
+} // namespace detectron2
diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu
new file mode 100644
index 0000000000000000000000000000000000000000..952710e53041187907fbd113f8d0d0fa24134a86
--- /dev/null
+++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_cuda.cu
@@ -0,0 +1,130 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include
+#include
+#include
+#include
+#include "box_iou_rotated_utils.h"
+
+namespace detectron2 {
+
+// 2D block with 32 * 16 = 512 threads per block
+const int BLOCK_DIM_X = 32;
+const int BLOCK_DIM_Y = 16;
+
+template
+__global__ void box_iou_rotated_cuda_kernel(
+ const int n_boxes1,
+ const int n_boxes2,
+ const T* dev_boxes1,
+ const T* dev_boxes2,
+ T* dev_ious) {
+ const int row_start = blockIdx.x * blockDim.x;
+ const int col_start = blockIdx.y * blockDim.y;
+
+ const int row_size = min(n_boxes1 - row_start, blockDim.x);
+ const int col_size = min(n_boxes2 - col_start, blockDim.y);
+
+ __shared__ float block_boxes1[BLOCK_DIM_X * 5];
+ __shared__ float block_boxes2[BLOCK_DIM_Y * 5];
+
+ // It's safe to copy using threadIdx.x since BLOCK_DIM_X >= BLOCK_DIM_Y
+ if (threadIdx.x < row_size && threadIdx.y == 0) {
+ block_boxes1[threadIdx.x * 5 + 0] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 0];
+ block_boxes1[threadIdx.x * 5 + 1] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 1];
+ block_boxes1[threadIdx.x * 5 + 2] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 2];
+ block_boxes1[threadIdx.x * 5 + 3] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 3];
+ block_boxes1[threadIdx.x * 5 + 4] =
+ dev_boxes1[(row_start + threadIdx.x) * 5 + 4];
+ }
+
+ if (threadIdx.x < col_size && threadIdx.y == 0) {
+ block_boxes2[threadIdx.x * 5 + 0] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 0];
+ block_boxes2[threadIdx.x * 5 + 1] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 1];
+ block_boxes2[threadIdx.x * 5 + 2] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 2];
+ block_boxes2[threadIdx.x * 5 + 3] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 3];
+ block_boxes2[threadIdx.x * 5 + 4] =
+ dev_boxes2[(col_start + threadIdx.x) * 5 + 4];
+ }
+ __syncthreads();
+
+ if (threadIdx.x < row_size && threadIdx.y < col_size) {
+ int offset = (row_start + threadIdx.x) * n_boxes2 + col_start + threadIdx.y;
+ dev_ious[offset] = single_box_iou_rotated(
+ block_boxes1 + threadIdx.x * 5, block_boxes2 + threadIdx.y * 5);
+ }
+}
+
+at::Tensor box_iou_rotated_cuda(
+ // input must be contiguous
+ const at::Tensor& boxes1,
+ const at::Tensor& boxes2) {
+ using scalar_t = float;
+ AT_ASSERTM(
+ boxes1.scalar_type() == at::kFloat, "boxes1 must be a float tensor");
+ AT_ASSERTM(
+ boxes2.scalar_type() == at::kFloat, "boxes2 must be a float tensor");
+ AT_ASSERTM(boxes1.is_cuda(), "boxes1 must be a CUDA tensor");
+ AT_ASSERTM(boxes2.is_cuda(), "boxes2 must be a CUDA tensor");
+ at::cuda::CUDAGuard device_guard(boxes1.device());
+
+ auto num_boxes1 = boxes1.size(0);
+ auto num_boxes2 = boxes2.size(0);
+
+ at::Tensor ious =
+ at::empty({num_boxes1 * num_boxes2}, boxes1.options().dtype(at::kFloat));
+
+ bool transpose = false;
+ if (num_boxes1 > 0 && num_boxes2 > 0) {
+ scalar_t *data1 = boxes1.data_ptr(),
+ *data2 = boxes2.data_ptr();
+
+ if (num_boxes2 > 65535 * BLOCK_DIM_Y) {
+ AT_ASSERTM(
+ num_boxes1 <= 65535 * BLOCK_DIM_Y,
+ "Too many boxes for box_iou_rotated_cuda!");
+ // x dim is allowed to be large, but y dim cannot,
+ // so we transpose the two to avoid "invalid configuration argument"
+ // error. We assume one of them is small. Otherwise the result is hard to
+ // fit in memory anyway.
+ std::swap(num_boxes1, num_boxes2);
+ std::swap(data1, data2);
+ transpose = true;
+ }
+
+ const int blocks_x =
+ at::cuda::ATenCeilDiv(static_cast(num_boxes1), BLOCK_DIM_X);
+ const int blocks_y =
+ at::cuda::ATenCeilDiv(static_cast(num_boxes2), BLOCK_DIM_Y);
+
+ dim3 blocks(blocks_x, blocks_y);
+ dim3 threads(BLOCK_DIM_X, BLOCK_DIM_Y);
+ cudaStream_t stream = at::cuda::getCurrentCUDAStream();
+
+ box_iou_rotated_cuda_kernel<<>>(
+ num_boxes1,
+ num_boxes2,
+ data1,
+ data2,
+ (scalar_t*)ious.data_ptr());
+
+ AT_CUDA_CHECK(cudaGetLastError());
+ }
+
+ // reshape from 1d array to 2d array
+ auto shape = std::vector{num_boxes1, num_boxes2};
+ if (transpose) {
+ return ious.view(shape).t();
+ } else {
+ return ious.view(shape);
+ }
+}
+
+} // namespace detectron2
diff --git a/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h
new file mode 100644
index 0000000000000000000000000000000000000000..bc6967a76884a40581a94554e91e6e72c6f8b527
--- /dev/null
+++ b/detectron2/layers/csrc/box_iou_rotated/box_iou_rotated_utils.h
@@ -0,0 +1,391 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#pragma once
+
+#include
+#include
+
+#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
+// Designates functions callable from the host (CPU) and the device (GPU)
+#define HOST_DEVICE __host__ __device__
+#define HOST_DEVICE_INLINE HOST_DEVICE __forceinline__
+#else
+#include
+#define HOST_DEVICE
+#define HOST_DEVICE_INLINE HOST_DEVICE inline
+#endif
+
+namespace detectron2 {
+
+namespace {
+
+template
+struct RotatedBox {
+ T x_ctr, y_ctr, w, h, a;
+};
+
+template
+struct Point {
+ T x, y;
+ HOST_DEVICE_INLINE Point(const T& px = 0, const T& py = 0) : x(px), y(py) {}
+ HOST_DEVICE_INLINE Point operator+(const Point& p) const {
+ return Point(x + p.x, y + p.y);
+ }
+ HOST_DEVICE_INLINE Point& operator+=(const Point& p) {
+ x += p.x;
+ y += p.y;
+ return *this;
+ }
+ HOST_DEVICE_INLINE Point operator-(const Point& p) const {
+ return Point(x - p.x, y - p.y);
+ }
+ HOST_DEVICE_INLINE Point operator*(const T coeff) const {
+ return Point(x * coeff, y * coeff);
+ }
+};
+
+template
+HOST_DEVICE_INLINE T dot_2d(const Point& A, const Point& B) {
+ return A.x * B.x + A.y * B.y;
+}
+
+// R: result type. can be different from input type
+template
+HOST_DEVICE_INLINE R cross_2d(const Point& A, const Point& B) {
+ return static_cast(A.x) * static_cast(B.y) -
+ static_cast(B.x) * static_cast(A.y);
+}
+
+template
+HOST_DEVICE_INLINE void get_rotated_vertices(
+ const RotatedBox& box,
+ Point (&pts)[4]) {
+ // M_PI / 180. == 0.01745329251
+ double theta = box.a * 0.01745329251;
+ T cosTheta2 = (T)cos(theta) * 0.5f;
+ T sinTheta2 = (T)sin(theta) * 0.5f;
+
+ // y: top --> down; x: left --> right
+ pts[0].x = box.x_ctr + sinTheta2 * box.h + cosTheta2 * box.w;
+ pts[0].y = box.y_ctr + cosTheta2 * box.h - sinTheta2 * box.w;
+ pts[1].x = box.x_ctr - sinTheta2 * box.h + cosTheta2 * box.w;
+ pts[1].y = box.y_ctr - cosTheta2 * box.h - sinTheta2 * box.w;
+ pts[2].x = 2 * box.x_ctr - pts[0].x;
+ pts[2].y = 2 * box.y_ctr - pts[0].y;
+ pts[3].x = 2 * box.x_ctr - pts[1].x;
+ pts[3].y = 2 * box.y_ctr - pts[1].y;
+}
+
+template
+HOST_DEVICE_INLINE int get_intersection_points(
+ const Point (&pts1)[4],
+ const Point (&pts2)[4],
+ Point (&intersections)[24]) {
+ // Line vector
+ // A line from p1 to p2 is: p1 + (p2-p1)*t, t=[0,1]
+ Point vec1[4], vec2[4];
+ for (int i = 0; i < 4; i++) {
+ vec1[i] = pts1[(i + 1) % 4] - pts1[i];
+ vec2[i] = pts2[(i + 1) % 4] - pts2[i];
+ }
+
+ // When computing the intersection area, it doesn't hurt if we have
+ // more (duplicated/approximate) intersections/vertices than needed,
+ // while it can cause drastic difference if we miss an intersection/vertex.
+ // Therefore, we add an epsilon to relax the comparisons between
+ // the float point numbers that decide the intersection points.
+ double EPS = 1e-5;
+
+ // Line test - test all line combos for intersection
+ int num = 0; // number of intersections
+ for (int i = 0; i < 4; i++) {
+ for (int j = 0; j < 4; j++) {
+ // Solve for 2x2 Ax=b
+ T det = cross_2d(vec2[j], vec1[i]);
+
+ // This takes care of parallel lines
+ if (fabs(det) <= 1e-14) {
+ continue;
+ }
+
+ auto vec12 = pts2[j] - pts1[i];
+
+ T t1 = cross_2d(vec2[j], vec12) / det;
+ T t2 = cross_2d(vec1[i], vec12) / det;
+
+ if (t1 > -EPS && t1 < 1.0f + EPS && t2 > -EPS && t2 < 1.0f + EPS) {
+ intersections[num++] = pts1[i] + vec1[i] * t1;
+ }
+ }
+ }
+
+ // Check for vertices of rect1 inside rect2
+ {
+ const auto& AB = vec2[0];
+ const auto& DA = vec2[3];
+ auto ABdotAB = dot_2d(AB, AB);
+ auto ADdotAD = dot_2d(DA, DA);
+ for (int i = 0; i < 4; i++) {
+ // assume ABCD is the rectangle, and P is the point to be judged
+ // P is inside ABCD iff. P's projection on AB lies within AB
+ // and P's projection on AD lies within AD
+
+ auto AP = pts1[i] - pts2[0];
+
+ auto APdotAB = dot_2d(AP, AB);
+ auto APdotAD = -dot_2d(AP, DA);
+
+ if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
+ (APdotAD < ADdotAD + EPS)) {
+ intersections[num++] = pts1[i];
+ }
+ }
+ }
+
+ // Reverse the check - check for vertices of rect2 inside rect1
+ {
+ const auto& AB = vec1[0];
+ const auto& DA = vec1[3];
+ auto ABdotAB = dot_2d(AB, AB);
+ auto ADdotAD = dot_2d(DA, DA);
+ for (int i = 0; i < 4; i++) {
+ auto AP = pts2[i] - pts1[0];
+
+ auto APdotAB = dot_2d(AP, AB);
+ auto APdotAD = -dot_2d(AP, DA);
+
+ if ((APdotAB > -EPS) && (APdotAD > -EPS) && (APdotAB < ABdotAB + EPS) &&
+ (APdotAD < ADdotAD + EPS)) {
+ intersections[num++] = pts2[i];
+ }
+ }
+ }
+
+ return num;
+}
+
+template
+HOST_DEVICE_INLINE int convex_hull_graham(
+ const Point (&p)[24],
+ const int& num_in,
+ Point (&q)[24],
+ bool shift_to_zero = false) {
+ assert(num_in >= 2);
+
+ // Step 1:
+ // Find point with minimum y
+ // if more than 1 points have the same minimum y,
+ // pick the one with the minimum x.
+ int t = 0;
+ for (int i = 1; i < num_in; i++) {
+ if (p[i].y < p[t].y || (p[i].y == p[t].y && p[i].x < p[t].x)) {
+ t = i;
+ }
+ }
+ auto& start = p[t]; // starting point
+
+ // Step 2:
+ // Subtract starting point from every points (for sorting in the next step)
+ for (int i = 0; i < num_in; i++) {
+ q[i] = p[i] - start;
+ }
+
+ // Swap the starting point to position 0
+ auto tmp = q[0];
+ q[0] = q[t];
+ q[t] = tmp;
+
+ // Step 3:
+ // Sort point 1 ~ num_in according to their relative cross-product values
+ // (essentially sorting according to angles)
+ // If the angles are the same, sort according to their distance to origin
+ T dist[24];
+#if defined(__CUDACC__) || __HCC__ == 1 || __HIP__ == 1
+ // compute distance to origin before sort, and sort them together with the
+ // points
+ for (int i = 0; i < num_in; i++) {
+ dist[i] = dot_2d(q[i], q[i]);
+ }
+
+ // CUDA version
+ // In the future, we can potentially use thrust
+ // for sorting here to improve speed (though not guaranteed)
+ for (int i = 1; i < num_in - 1; i++) {
+ for (int j = i + 1; j < num_in; j++) {
+ T crossProduct = cross_2d(q[i], q[j]);
+ if ((crossProduct < -1e-6) ||
+ (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) {
+ auto q_tmp = q[i];
+ q[i] = q[j];
+ q[j] = q_tmp;
+ auto dist_tmp = dist[i];
+ dist[i] = dist[j];
+ dist[j] = dist_tmp;
+ }
+ }
+ }
+#else
+ // CPU version
+ // std::sort(
+ // q + 1, q + num_in, [](const Point& A, const Point& B) -> bool {
+ // T temp = cross_2d(A, B);
+
+ // if (fabs(temp) < 1e-6) {
+ // return dot_2d(A, A) < dot_2d(B, B);
+ // } else {
+ // return temp > 0;
+ // }
+ // });
+ for (int i = 0; i < num_in; i++) {
+ dist[i] = dot_2d(q[i], q[i]);
+ }
+
+ for (int i = 1; i < num_in - 1; i++) {
+ for (int j = i + 1; j < num_in; j++) {
+ T crossProduct = cross_2d(q[i], q[j]);
+ if ((crossProduct < -1e-6) ||
+ (fabs(crossProduct) < 1e-6 && dist[i] > dist[j])) {
+ auto q_tmp = q[i];
+ q[i] = q[j];
+ q[j] = q_tmp;
+ auto dist_tmp = dist[i];
+ dist[i] = dist[j];
+ dist[j] = dist_tmp;
+ }
+ }
+ }
+
+ // compute distance to origin after sort, since the points are now different.
+ for (int i = 0; i < num_in; i++) {
+ dist[i] = dot_2d(q[i], q[i]);
+ }
+
+#endif
+
+ // Step 4:
+ // Make sure there are at least 2 points (that don't overlap with each other)
+ // in the stack
+ int k; // index of the non-overlapped second point
+ for (k = 1; k < num_in; k++) {
+ if (dist[k] > 1e-8) {
+ break;
+ }
+ }
+ if (k == num_in) {
+ // We reach the end, which means the convex hull is just one point
+ q[0] = p[t];
+ return 1;
+ }
+ q[1] = q[k];
+ int m = 2; // 2 points in the stack
+ // Step 5:
+ // Finally we can start the scanning process.
+ // When a non-convex relationship between the 3 points is found
+ // (either concave shape or duplicated points),
+ // we pop the previous point from the stack
+ // until the 3-point relationship is convex again, or
+ // until the stack only contains two points
+ for (int i = k + 1; i < num_in; i++) {
+ while (m > 1) {
+ auto q1 = q[i] - q[m - 2], q2 = q[m - 1] - q[m - 2];
+ // cross_2d() uses FMA and therefore computes round(round(q1.x*q2.y) -
+ // q2.x*q1.y) So it may not return 0 even when q1==q2. Therefore we
+ // compare round(q1.x*q2.y) and round(q2.x*q1.y) directly. (round means
+ // round to nearest floating point).
+ if (q1.x * q2.y >= q2.x * q1.y)
+ m--;
+ else
+ break;
+ }
+ // Using double also helps, but float can solve the issue for now.
+ // while (m > 1 && cross_2d(q[i] - q[m - 2], q[m - 1] - q[m - 2])
+ // >= 0) {
+ // m--;
+ // }
+ q[m++] = q[i];
+ }
+
+ // Step 6 (Optional):
+ // In general sense we need the original coordinates, so we
+ // need to shift the points back (reverting Step 2)
+ // But if we're only interested in getting the area/perimeter of the shape
+ // We can simply return.
+ if (!shift_to_zero) {
+ for (int i = 0; i < m; i++) {
+ q[i] += start;
+ }
+ }
+
+ return m;
+}
+
+template
+HOST_DEVICE_INLINE T polygon_area(const Point (&q)[24], const int& m) {
+ if (m <= 2) {
+ return 0;
+ }
+
+ T area = 0;
+ for (int i = 1; i < m - 1; i++) {
+ area += fabs(cross_2d(q[i] - q[0], q[i + 1] - q[0]));
+ }
+
+ return area / 2.0;
+}
+
+template
+HOST_DEVICE_INLINE T rotated_boxes_intersection(
+ const RotatedBox& box1,
+ const RotatedBox& box2) {
+ // There are up to 4 x 4 + 4 + 4 = 24 intersections (including dups) returned
+ // from rotated_rect_intersection_pts
+ Point intersectPts[24], orderedPts[24];
+
+ Point pts1[4];
+ Point pts2[4];
+ get_rotated_vertices(box1, pts1);
+ get_rotated_vertices(box2, pts2);
+
+ int num = get_intersection_points(pts1, pts2, intersectPts);
+
+ if (num <= 2) {
+ return 0.0;
+ }
+
+ // Convex Hull to order the intersection points in clockwise order and find
+ // the contour area.
+ int num_convex = convex_hull_graham(intersectPts, num, orderedPts, true);
+ return polygon_area(orderedPts, num_convex);
+}
+
+} // namespace
+
+template
+HOST_DEVICE_INLINE T
+single_box_iou_rotated(T const* const box1_raw, T const* const box2_raw) {
+ // shift center to the middle point to achieve higher precision in result
+ RotatedBox box1, box2;
+ auto center_shift_x = (box1_raw[0] + box2_raw[0]) / 2.0;
+ auto center_shift_y = (box1_raw[1] + box2_raw[1]) / 2.0;
+ box1.x_ctr = box1_raw[0] - center_shift_x;
+ box1.y_ctr = box1_raw[1] - center_shift_y;
+ box1.w = box1_raw[2];
+ box1.h = box1_raw[3];
+ box1.a = box1_raw[4];
+ box2.x_ctr = box2_raw[0] - center_shift_x;
+ box2.y_ctr = box2_raw[1] - center_shift_y;
+ box2.w = box2_raw[2];
+ box2.h = box2_raw[3];
+ box2.a = box2_raw[4];
+
+ T area1 = box1.w * box1.h;
+ T area2 = box2.w * box2.h;
+ if (area1 < 1e-14 || area2 < 1e-14) {
+ return 0.f;
+ }
+
+ T intersection = rotated_boxes_intersection(box1, box2);
+ T iou = intersection / (area1 + area2 - intersection);
+ return iou;
+}
+
+} // namespace detectron2
diff --git a/detectron2/layers/csrc/cocoeval/cocoeval.cpp b/detectron2/layers/csrc/cocoeval/cocoeval.cpp
new file mode 100644
index 0000000000000000000000000000000000000000..0a5b7b907c06720fefc77b0dfd921b8ec3ecf2be
--- /dev/null
+++ b/detectron2/layers/csrc/cocoeval/cocoeval.cpp
@@ -0,0 +1,507 @@
+// Copyright (c) Facebook, Inc. and its affiliates.
+#include "cocoeval.h"
+#include
+#include
+#include
+#include
+
+using namespace pybind11::literals;
+
+namespace detectron2 {
+
+namespace COCOeval {
+
+// Sort detections from highest score to lowest, such that
+// detection_instances[detection_sorted_indices[t]] >=
+// detection_instances[detection_sorted_indices[t+1]]. Use stable_sort to match
+// original COCO API
+void SortInstancesByDetectionScore(
+ const std::vector& detection_instances,
+ std::vector* detection_sorted_indices) {
+ detection_sorted_indices->resize(detection_instances.size());
+ std::iota(
+ detection_sorted_indices->begin(), detection_sorted_indices->end(), 0);
+ std::stable_sort(
+ detection_sorted_indices->begin(),
+ detection_sorted_indices->end(),
+ [&detection_instances](size_t j1, size_t j2) {
+ return detection_instances[j1].score > detection_instances[j2].score;
+ });
+}
+
+// Partition the ground truth objects based on whether or not to ignore them
+// based on area
+void SortInstancesByIgnore(
+ const std::array& area_range,
+ const std::vector& ground_truth_instances,
+ std::vector* ground_truth_sorted_indices,
+ std::vector* ignores) {
+ ignores->clear();
+ ignores->reserve(ground_truth_instances.size());
+ for (auto o : ground_truth_instances) {
+ ignores->push_back(
+ o.ignore || o.area < area_range[0] || o.area > area_range[1]);
+ }
+
+ ground_truth_sorted_indices->resize(ground_truth_instances.size());
+ std::iota(
+ ground_truth_sorted_indices->begin(),
+ ground_truth_sorted_indices->end(),
+ 0);
+ std::stable_sort(
+ ground_truth_sorted_indices->begin(),
+ ground_truth_sorted_indices->end(),
+ [&ignores](size_t j1, size_t j2) {
+ return (int)(*ignores)[j1] < (int)(*ignores)[j2];
+ });
+}
+
+// For each IOU threshold, greedily match each detected instance to a ground
+// truth instance (if possible) and store the results
+void MatchDetectionsToGroundTruth(
+ const std::vector& detection_instances,
+ const std::vector& detection_sorted_indices,
+ const std::vector& ground_truth_instances,
+ const std::vector& ground_truth_sorted_indices,
+ const std::vector& ignores,
+ const std::vector>& ious,
+ const std::vector& iou_thresholds,
+ const std::array& area_range,
+ ImageEvaluation* results) {
+ // Initialize memory to store return data matches and ignore
+ const int num_iou_thresholds = iou_thresholds.size();
+ const int num_ground_truth = ground_truth_sorted_indices.size();
+ const int num_detections = detection_sorted_indices.size();
+ std::vector ground_truth_matches(
+ num_iou_thresholds * num_ground_truth, 0);
+ std::vector& detection_matches = results->detection_matches;
+ std::vector& detection_ignores = results->detection_ignores;
+ std::vector& ground_truth_ignores = results->ground_truth_ignores;
+ detection_matches.resize(num_iou_thresholds * num_detections, 0);
+ detection_ignores.resize(num_iou_thresholds * num_detections, false);
+ ground_truth_ignores.resize(num_ground_truth);
+ for (auto g = 0; g < num_ground_truth; ++g) {
+ ground_truth_ignores[g] = ignores[ground_truth_sorted_indices[g]];
+ }
+
+ for (auto t = 0; t < num_iou_thresholds; ++t) {
+ for (auto d = 0; d < num_detections; ++d) {
+ // information about best match so far (match=-1 -> unmatched)
+ double best_iou = std::min(iou_thresholds[t], 1 - 1e-10);
+ int match = -1;
+ for (auto g = 0; g < num_ground_truth; ++g) {
+ // if this ground truth instance is already matched and not a
+ // crowd, it cannot be matched to another detection
+ if (ground_truth_matches[t * num_ground_truth + g] > 0 &&
+ !ground_truth_instances[ground_truth_sorted_indices[g]].is_crowd) {
+ continue;
+ }
+
+ // if detected instance matched to a regular ground truth
+ // instance, we can break on the first ground truth instance
+ // tagged as ignore (because they are sorted by the ignore tag)
+ if (match >= 0 && !ground_truth_ignores[match] &&
+ ground_truth_ignores[g]) {
+ break;
+ }
+
+ // if IOU overlap is the best so far, store the match appropriately
+ if (ious[d][ground_truth_sorted_indices[g]] >= best_iou) {
+ best_iou = ious[d][ground_truth_sorted_indices[g]];
+ match = g;
+ }
+ }
+ // if match was made, store id of match for both detection and
+ // ground truth
+ if (match >= 0) {
+ detection_ignores[t * num_detections + d] = ground_truth_ignores[match];
+ detection_matches[t * num_detections + d] =
+ ground_truth_instances[ground_truth_sorted_indices[match]].id;
+ ground_truth_matches[t * num_ground_truth + match] =
+ detection_instances[detection_sorted_indices[d]].id;
+ }
+
+ // set unmatched detections outside of area range to ignore
+ const InstanceAnnotation& detection =
+ detection_instances[detection_sorted_indices[d]];
+ detection_ignores[t * num_detections + d] =
+ detection_ignores[t * num_detections + d] ||
+ (detection_matches[t * num_detections + d] == 0 &&
+ (detection.area < area_range[0] || detection.area > area_range[1]));
+ }
+ }
+
+ // store detection score results
+ results->detection_scores.resize(detection_sorted_indices.size());
+ for (size_t d = 0; d < detection_sorted_indices.size(); ++d) {
+ results->detection_scores[d] =
+ detection_instances[detection_sorted_indices[d]].score;
+ }
+}
+
+std::vector EvaluateImages(
+ const std::vector>& area_ranges,
+ int max_detections,
+ const std::vector& iou_thresholds,
+ const ImageCategoryInstances>& image_category_ious,
+ const ImageCategoryInstances&
+ image_category_ground_truth_instances,
+ const ImageCategoryInstances&
+ image_category_detection_instances) {
+ const int num_area_ranges = area_ranges.size();
+ const int num_images = image_category_ground_truth_instances.size();
+ const int num_categories =
+ image_category_ious.size() > 0 ? image_category_ious[0].size() : 0;
+ std::vector detection_sorted_indices;
+ std::vector ground_truth_sorted_indices;
+ std::vector ignores;
+ std::vector results_all(
+ num_images * num_area_ranges * num_categories);
+
+ // Store results for each image, category, and area range combination. Results
+ // for each IOU threshold are packed into the same ImageEvaluation object
+ for (auto i = 0; i < num_images; ++i) {
+ for (auto c = 0; c < num_categories; ++c) {
+ const std::vector& ground_truth_instances =
+ image_category_ground_truth_instances[i][c];
+ const std::vector& detection_instances =
+ image_category_detection_instances[i][c];
+
+ SortInstancesByDetectionScore(
+ detection_instances, &detection_sorted_indices);
+ if ((int)detection_sorted_indices.size() > max_detections) {
+ detection_sorted_indices.resize(max_detections);
+ }
+
+ for (size_t a = 0; a < area_ranges.size(); ++a) {
+ SortInstancesByIgnore(
+ area_ranges[a],
+ ground_truth_instances,
+ &ground_truth_sorted_indices,
+ &ignores);
+
+ MatchDetectionsToGroundTruth(
+ detection_instances,
+ detection_sorted_indices,
+ ground_truth_instances,
+ ground_truth_sorted_indices,
+ ignores,
+ image_category_ious[i][c],
+ iou_thresholds,
+ area_ranges[a],
+ &results_all
+ [c * num_area_ranges * num_images + a * num_images + i]);
+ }
+ }
+ }
+
+ return results_all;
+}
+
+// Convert a python list to a vector
+template
+std::vector