AK391 commited on
Commit
159f437
1 Parent(s): 82a3407
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. configs/Base-C2_L_R5021k_640b64_4x.yaml +82 -0
  2. configs/Base-DeformDETR_L_R50_4x.yaml +59 -0
  3. configs/Base_OVCOCO_C4_1x.yaml +31 -0
  4. configs/BoxSup-C2_LCOCO_CLIP_SwinB_896b32_4x.yaml +19 -0
  5. configs/BoxSup-C2_L_CLIP_R5021k_640b64_4x.yaml +4 -0
  6. configs/BoxSup-C2_L_CLIP_SwinB_896b32_4x.yaml +17 -0
  7. configs/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.yaml +6 -0
  8. configs/BoxSup-C2_Lbase_CLIP_SwinB_896b32_4x.yaml +19 -0
  9. configs/BoxSup-DeformDETR_L_R50_2x.yaml +3 -0
  10. configs/BoxSup-DeformDETR_L_R50_4x.yaml +1 -0
  11. configs/BoxSup_OVCOCO_CLIP_R50_1x.yaml +1 -0
  12. configs/Detic_DeformDETR_LI_R50_4x_ft4x.yaml +22 -0
  13. configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +43 -0
  14. configs/Detic_LI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +43 -0
  15. configs/Detic_LI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +27 -0
  16. configs/Detic_LI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +33 -0
  17. configs/Detic_LbaseCCcapimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +30 -0
  18. configs/Detic_LbaseCCimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +27 -0
  19. configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml +27 -0
  20. configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_predicted.yaml +27 -0
  21. configs/Detic_LbaseI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml +33 -0
  22. configs/Detic_OVCOCO_CLIP_R50_1x_caption.yaml +33 -0
  23. configs/Detic_OVCOCO_CLIP_R50_1x_max-size.yaml +30 -0
  24. configs/Detic_OVCOCO_CLIP_R50_1x_max-size_caption.yaml +35 -0
  25. datasets/.DS_Store +0 -0
  26. datasets/README.md +207 -0
  27. datasets/metadata/Objects365_names_fix.csv +365 -0
  28. datasets/metadata/coco_clip_a+cname.npy +0 -0
  29. datasets/metadata/lvis_v1_clip_a+cname.npy +0 -0
  30. datasets/metadata/lvis_v1_train_cat_info.json +0 -0
  31. datasets/metadata/o365_clip_a+cnamefix.npy +0 -0
  32. datasets/metadata/oid_clip_a+cname.npy +0 -0
  33. demo.py +204 -0
  34. detic/__init__.py +19 -0
  35. detic/config.py +131 -0
  36. detic/custom_solver.py +78 -0
  37. detic/data/custom_build_augmentation.py +51 -0
  38. detic/data/custom_dataset_dataloader.py +331 -0
  39. detic/data/custom_dataset_mapper.py +280 -0
  40. detic/data/datasets/cc.py +23 -0
  41. detic/data/datasets/coco_zeroshot.py +121 -0
  42. detic/data/datasets/imagenet.py +41 -0
  43. detic/data/datasets/lvis_22k_categories.py +0 -0
  44. detic/data/datasets/lvis_v1.py +155 -0
  45. detic/data/datasets/objects365.py +770 -0
  46. detic/data/datasets/oid.py +535 -0
  47. detic/data/datasets/register_oid.py +122 -0
  48. detic/data/tar_dataset.py +138 -0
  49. detic/data/transforms/custom_augmentation_impl.py +60 -0
  50. detic/data/transforms/custom_transform.py +114 -0
configs/Base-C2_L_R5021k_640b64_4x.yaml ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ META_ARCHITECTURE: "CustomRCNN"
3
+ MASK_ON: True
4
+ PROPOSAL_GENERATOR:
5
+ NAME: "CenterNet"
6
+ WEIGHTS: "models/resnet50_miil_21k.pkl"
7
+ BACKBONE:
8
+ NAME: build_p67_timm_fpn_backbone
9
+ TIMM:
10
+ BASE_NAME: resnet50_in21k
11
+ FPN:
12
+ IN_FEATURES: ["layer3", "layer4", "layer5"]
13
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
14
+ PIXEL_STD: [58.395, 57.12, 57.375]
15
+ ROI_HEADS:
16
+ NAME: DeticCascadeROIHeads
17
+ IN_FEATURES: ["p3", "p4", "p5"]
18
+ IOU_THRESHOLDS: [0.6]
19
+ NUM_CLASSES: 1203
20
+ SCORE_THRESH_TEST: 0.02
21
+ NMS_THRESH_TEST: 0.5
22
+ ROI_BOX_CASCADE_HEAD:
23
+ IOUS: [0.6, 0.7, 0.8]
24
+ ROI_BOX_HEAD:
25
+ NAME: "FastRCNNConvFCHead"
26
+ NUM_FC: 2
27
+ POOLER_RESOLUTION: 7
28
+ CLS_AGNOSTIC_BBOX_REG: True
29
+ MULT_PROPOSAL_SCORE: True
30
+
31
+ USE_SIGMOID_CE: True
32
+ USE_FED_LOSS: True
33
+ ROI_MASK_HEAD:
34
+ NAME: "MaskRCNNConvUpsampleHead"
35
+ NUM_CONV: 4
36
+ POOLER_RESOLUTION: 14
37
+ CLS_AGNOSTIC_MASK: True
38
+ CENTERNET:
39
+ NUM_CLASSES: 1203
40
+ REG_WEIGHT: 1.
41
+ NOT_NORM_REG: True
42
+ ONLY_PROPOSAL: True
43
+ WITH_AGN_HM: True
44
+ INFERENCE_TH: 0.0001
45
+ PRE_NMS_TOPK_TRAIN: 4000
46
+ POST_NMS_TOPK_TRAIN: 2000
47
+ PRE_NMS_TOPK_TEST: 1000
48
+ POST_NMS_TOPK_TEST: 256
49
+ NMS_TH_TRAIN: 0.9
50
+ NMS_TH_TEST: 0.9
51
+ POS_WEIGHT: 0.5
52
+ NEG_WEIGHT: 0.5
53
+ IGNORE_HIGH_FP: 0.85
54
+ DATASETS:
55
+ TRAIN: ("lvis_v1_train",)
56
+ TEST: ("lvis_v1_val",)
57
+ DATALOADER:
58
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
59
+ REPEAT_THRESHOLD: 0.001
60
+ NUM_WORKERS: 8
61
+ TEST:
62
+ DETECTIONS_PER_IMAGE: 300
63
+ SOLVER:
64
+ LR_SCHEDULER_NAME: "WarmupCosineLR"
65
+ CHECKPOINT_PERIOD: 1000000000
66
+ WARMUP_ITERS: 10000
67
+ WARMUP_FACTOR: 0.0001
68
+ USE_CUSTOM_SOLVER: True
69
+ OPTIMIZER: "ADAMW"
70
+ MAX_ITER: 90000
71
+ IMS_PER_BATCH: 64
72
+ BASE_LR: 0.0002
73
+ CLIP_GRADIENTS:
74
+ ENABLED: True
75
+ INPUT:
76
+ FORMAT: RGB
77
+ CUSTOM_AUG: EfficientDetResizeCrop
78
+ TRAIN_SIZE: 640
79
+ OUTPUT_DIR: "./output/Detic/auto"
80
+ EVAL_PROPOSAL_AR: False
81
+ VERSION: 2
82
+ FP16: True
configs/Base-DeformDETR_L_R50_4x.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ META_ARCHITECTURE: "DeformableDetr"
3
+ WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
4
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
5
+ PIXEL_STD: [58.395, 57.120, 57.375]
6
+ MASK_ON: False
7
+ RESNETS:
8
+ DEPTH: 50
9
+ STRIDE_IN_1X1: False
10
+ OUT_FEATURES: ["res3", "res4", "res5"]
11
+ DETR:
12
+ CLS_WEIGHT: 2.0
13
+ GIOU_WEIGHT: 2.0
14
+ L1_WEIGHT: 5.0
15
+ NUM_OBJECT_QUERIES: 300
16
+ DIM_FEEDFORWARD: 1024
17
+ WITH_BOX_REFINE: True
18
+ TWO_STAGE: True
19
+ NUM_CLASSES: 1203
20
+ USE_FED_LOSS: True
21
+ DATASETS:
22
+ TRAIN: ("lvis_v1_train",)
23
+ TEST: ("lvis_v1_val",)
24
+ SOLVER:
25
+ CHECKPOINT_PERIOD: 10000000
26
+ USE_CUSTOM_SOLVER: True
27
+ IMS_PER_BATCH: 32
28
+ BASE_LR: 0.0002
29
+ STEPS: (150000,)
30
+ MAX_ITER: 180000
31
+ WARMUP_FACTOR: 1.0
32
+ WARMUP_ITERS: 10
33
+ WEIGHT_DECAY: 0.0001
34
+ OPTIMIZER: "ADAMW"
35
+ BACKBONE_MULTIPLIER: 0.1
36
+ CLIP_GRADIENTS:
37
+ ENABLED: True
38
+ CLIP_TYPE: "full_model"
39
+ CLIP_VALUE: 0.01
40
+ NORM_TYPE: 2.0
41
+ CUSTOM_MULTIPLIER: 0.1
42
+ CUSTOM_MULTIPLIER_NAME: ['reference_points', 'sampling_offsets']
43
+ INPUT:
44
+ FORMAT: "RGB"
45
+ MIN_SIZE_TRAIN: (480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800)
46
+ CROP:
47
+ ENABLED: True
48
+ TYPE: "absolute_range"
49
+ SIZE: (384, 600)
50
+ CUSTOM_AUG: "DETR"
51
+ TEST:
52
+ DETECTIONS_PER_IMAGE: 300
53
+ DATALOADER:
54
+ FILTER_EMPTY_ANNOTATIONS: False
55
+ NUM_WORKERS: 4
56
+ SAMPLER_TRAIN: "RepeatFactorTrainingSampler"
57
+ REPEAT_THRESHOLD: 0.001
58
+ OUTPUT_DIR: "output/Detic/auto"
59
+ VERSION: 2
configs/Base_OVCOCO_C4_1x.yaml ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ META_ARCHITECTURE: "CustomRCNN"
3
+ RPN:
4
+ PRE_NMS_TOPK_TEST: 6000
5
+ POST_NMS_TOPK_TEST: 1000
6
+ ROI_HEADS:
7
+ NAME: "CustomRes5ROIHeads"
8
+ WEIGHTS: "detectron2://ImageNetPretrained/MSRA/R-50.pkl"
9
+ RESNETS:
10
+ DEPTH: 50
11
+ ROI_BOX_HEAD:
12
+ CLS_AGNOSTIC_BBOX_REG: True
13
+ USE_SIGMOID_CE: True
14
+ USE_ZEROSHOT_CLS: True
15
+ ZEROSHOT_WEIGHT_PATH: 'datasets/metadata/coco_clip_a+cname.npy'
16
+ IGNORE_ZERO_CATS: True
17
+ CAT_FREQ_PATH: 'datasets/coco/zero-shot/instances_train2017_seen_2_oriorder_cat_info.json'
18
+ DATASETS:
19
+ TRAIN: ("coco_zeroshot_train_oriorder",)
20
+ TEST: ("coco_generalized_zeroshot_val",)
21
+ SOLVER:
22
+ IMS_PER_BATCH: 16
23
+ BASE_LR: 0.02
24
+ STEPS: (60000, 80000)
25
+ MAX_ITER: 90000
26
+ CHECKPOINT_PERIOD: 1000000000
27
+ INPUT:
28
+ MIN_SIZE_TRAIN: (800,)
29
+ VERSION: 2
30
+ OUTPUT_DIR: output/Detic-COCO/auto
31
+ FP16: True
configs/BoxSup-C2_LCOCO_CLIP_SwinB_896b32_4x.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ WEIGHTS: "models/swin_base_patch4_window7_224_22k.pkl"
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ SOLVER:
13
+ MAX_ITER: 180000
14
+ IMS_PER_BATCH: 32
15
+ BASE_LR: 0.0001
16
+ INPUT:
17
+ TRAIN_SIZE: 896
18
+ DATASETS:
19
+ TRAIN: ("lvis_v1_train+coco",)
configs/BoxSup-C2_L_CLIP_R5021k_640b64_4x.yaml ADDED
@@ -0,0 +1,4 @@
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
configs/BoxSup-C2_L_CLIP_SwinB_896b32_4x.yaml ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ WEIGHTS: "models/swin_base_patch4_window7_224_22k.pkl"
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ SOLVER:
13
+ MAX_ITER: 180000
14
+ IMS_PER_BATCH: 32
15
+ BASE_LR: 0.0001
16
+ INPUT:
17
+ TRAIN_SIZE: 896
configs/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.yaml ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ DATASETS:
6
+ TRAIN: ("lvis_v1_train_norare",)
configs/BoxSup-C2_Lbase_CLIP_SwinB_896b32_4x.yaml ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ WEIGHTS: "models/swin_base_patch4_window7_224_22k.pkl"
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ SOLVER:
13
+ MAX_ITER: 180000
14
+ IMS_PER_BATCH: 32
15
+ BASE_LR: 0.0001
16
+ INPUT:
17
+ TRAIN_SIZE: 896
18
+ DATASETS:
19
+ TRAIN: ("lvis_v1_train_norare",)
configs/BoxSup-DeformDETR_L_R50_2x.yaml ADDED
@@ -0,0 +1,3 @@
 
 
 
1
+ _BASE_: "Base-DeformDETR_L_R50_4x.yaml"
2
+ SOLVER:
3
+ IMS_PER_BATCH: 16
configs/BoxSup-DeformDETR_L_R50_4x.yaml ADDED
@@ -0,0 +1 @@
 
1
+ _BASE_: "Base-DeformDETR_L_R50_4x.yaml"
configs/BoxSup_OVCOCO_CLIP_R50_1x.yaml ADDED
@@ -0,0 +1 @@
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
configs/Detic_DeformDETR_LI_R50_4x_ft4x.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-DeformDETR_L_R50_4x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup-DeformDETR_L_R50_4x.pth"
4
+ INPUT:
5
+ CUSTOM_AUG: ResizeShortestEdge
6
+ MIN_SIZE_TRAIN_SAMPLING: range
7
+ MIN_SIZE_TRAIN: [480, 800]
8
+ DATASETS:
9
+ TRAIN: ("lvis_v1_train","imagenet_lvis_v1")
10
+ TEST: ("lvis_v1_val",)
11
+ DATALOADER:
12
+ SAMPLER_TRAIN: "MultiDatasetSampler"
13
+ DATASET_RATIO: [1, 4]
14
+ USE_DIFF_BS_SIZE: True
15
+ DATASET_BS: [4, 16]
16
+ USE_RFS: [True, False]
17
+ DATASET_MIN_SIZES: [[480, 800], [240, 400]]
18
+ DATASET_MAX_SIZES: [1333, 667]
19
+ FILTER_EMPTY_ANNOTATIONS: False
20
+ MULTI_DATASET_GROUPING: True
21
+ DATASET_ANN: ['box', 'image']
22
+ WITH_IMAGE_LABELS: True
configs/Detic_LCOCOI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup-C2_LCOCO_CLIP_SwinB_896b32_4x.pth"
4
+ DYNAMIC_CLASSIFIER: True
5
+ ROI_BOX_HEAD:
6
+ USE_ZEROSHOT_CLS: True
7
+ IMAGE_LABEL_LOSS: 'max_size'
8
+ ZEROSHOT_WEIGHT_PATH: 'datasets/metadata/lvis-21k_clip_a+cname.npy'
9
+ USE_FED_LOSS: False # Federated loss is enabled when DYNAMIC_CLASSIFIER is on
10
+ ROI_HEADS:
11
+ NUM_CLASSES: 22047
12
+ BACKBONE:
13
+ NAME: build_swintransformer_fpn_backbone
14
+ SWIN:
15
+ SIZE: B-22k
16
+ FPN:
17
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
18
+ RESET_CLS_TESTS: True
19
+ TEST_CLASSIFIERS: ("datasets/metadata/oid_clip_a+cname.npy","datasets/metadata/o365_clip_a+cnamefix.npy")
20
+ TEST_NUM_CLASSES: [500, 365]
21
+ SOLVER:
22
+ MAX_ITER: 180000
23
+ IMS_PER_BATCH: 32
24
+ BASE_LR: 0.0001
25
+ WARMUP_ITERS: 1000
26
+ WARMUP_FACTOR: 0.001
27
+ DATASETS:
28
+ TRAIN: ("lvis_v1_train+coco","imagenet_lvis-22k")
29
+ TEST: ('oid_val_expanded', 'objects365_v2_val')
30
+ DATALOADER:
31
+ SAMPLER_TRAIN: "MultiDatasetSampler"
32
+ DATASET_RATIO: [1, 16]
33
+ USE_DIFF_BS_SIZE: True
34
+ DATASET_BS: [4, 16]
35
+ DATASET_INPUT_SIZE: [896, 448]
36
+ USE_RFS: [True, False]
37
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
38
+ FILTER_EMPTY_ANNOTATIONS: False
39
+ MULTI_DATASET_GROUPING: True
40
+ DATASET_ANN: ['box', 'image']
41
+ NUM_WORKERS: 4
42
+ USE_TAR_DATASET: True
43
+ WITH_IMAGE_LABELS: True
configs/Detic_LI21k_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup-C2_L_CLIP_SwinB_896b32_4x.pth"
4
+ DYNAMIC_CLASSIFIER: True
5
+ ROI_BOX_HEAD:
6
+ USE_ZEROSHOT_CLS: True
7
+ IMAGE_LABEL_LOSS: 'max_size'
8
+ ZEROSHOT_WEIGHT_PATH: 'datasets/metadata/lvis-21k_clip_a+cname.npy'
9
+ USE_FED_LOSS: False # Federated loss is enabled when DYNAMIC_CLASSIFIER is on
10
+ ROI_HEADS:
11
+ NUM_CLASSES: 22047
12
+ BACKBONE:
13
+ NAME: build_swintransformer_fpn_backbone
14
+ SWIN:
15
+ SIZE: B-22k
16
+ FPN:
17
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
18
+ RESET_CLS_TESTS: True
19
+ TEST_CLASSIFIERS: ("datasets/metadata/oid_clip_a+cname.npy","datasets/metadata/o365_clip_a+cnamefix.npy")
20
+ TEST_NUM_CLASSES: [500, 365]
21
+ SOLVER:
22
+ MAX_ITER: 180000
23
+ IMS_PER_BATCH: 32
24
+ BASE_LR: 0.0001
25
+ WARMUP_ITERS: 1000
26
+ WARMUP_FACTOR: 0.001
27
+ DATASETS:
28
+ TRAIN: ("lvis_v1_train","imagenet_lvis-22k")
29
+ TEST: ('oid_val_expanded', 'objects365_v2_val')
30
+ DATALOADER:
31
+ SAMPLER_TRAIN: "MultiDatasetSampler"
32
+ DATASET_RATIO: [1, 16]
33
+ USE_DIFF_BS_SIZE: True
34
+ DATASET_BS: [4, 16]
35
+ DATASET_INPUT_SIZE: [896, 448]
36
+ USE_RFS: [True, False]
37
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
38
+ FILTER_EMPTY_ANNOTATIONS: False
39
+ MULTI_DATASET_GROUPING: True
40
+ DATASET_ANN: ['box', 'image']
41
+ NUM_WORKERS: 4
42
+ USE_TAR_DATASET: True
43
+ WITH_IMAGE_LABELS: True
configs/Detic_LI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ WEIGHTS: "models/BoxSup-C2_L_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train","imagenet_lvis_v1")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ WEIGHTS: "models/BoxSup-C2_L_CLIP_SwinB_896b32_4x.pth"
13
+ SOLVER:
14
+ MAX_ITER: 180000
15
+ IMS_PER_BATCH: 32
16
+ BASE_LR: 0.0001
17
+ WARMUP_ITERS: 1000
18
+ WARMUP_FACTOR: 0.001
19
+ DATASETS:
20
+ TRAIN: ("lvis_v1_train","imagenet_lvis_v1")
21
+ DATALOADER:
22
+ SAMPLER_TRAIN: "MultiDatasetSampler"
23
+ DATASET_RATIO: [1, 4]
24
+ USE_DIFF_BS_SIZE: True
25
+ DATASET_BS: [4, 16]
26
+ DATASET_INPUT_SIZE: [896, 448]
27
+ USE_RFS: [True, False]
28
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
29
+ FILTER_EMPTY_ANNOTATIONS: False
30
+ MULTI_DATASET_GROUPING: True
31
+ DATASET_ANN: ['box', 'image']
32
+ NUM_WORKERS: 8
33
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseCCcapimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ WITH_CAPTION: True
4
+ SYNC_CAPTION_BATCH: True
5
+ ROI_BOX_HEAD:
6
+ ADD_IMAGE_BOX: True # caption loss is added to the image-box
7
+ USE_ZEROSHOT_CLS: True
8
+ IMAGE_LABEL_LOSS: 'max_size'
9
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
10
+ SOLVER:
11
+ MAX_ITER: 90000
12
+ IMS_PER_BATCH: 64
13
+ BASE_LR: 0.0002
14
+ WARMUP_ITERS: 1000
15
+ WARMUP_FACTOR: 0.001
16
+ DATASETS:
17
+ TRAIN: ("lvis_v1_train_norare","cc3m_v1_train_tags")
18
+ DATALOADER:
19
+ SAMPLER_TRAIN: "MultiDatasetSampler"
20
+ DATASET_RATIO: [1, 4]
21
+ USE_DIFF_BS_SIZE: True
22
+ DATASET_BS: [8, 32]
23
+ DATASET_INPUT_SIZE: [640, 320]
24
+ USE_RFS: [True, False]
25
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
26
+ FILTER_EMPTY_ANNOTATIONS: False
27
+ MULTI_DATASET_GROUPING: True
28
+ DATASET_ANN: ['box', 'captiontag']
29
+ NUM_WORKERS: 8
30
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseCCimg_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train_norare","cc3m_v1_train_tags")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train_norare","imagenet_lvis_v1")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseI_CLIP_R5021k_640b64_4x_ft4x_predicted.yaml ADDED
@@ -0,0 +1,27 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_score'
6
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_R5021k_640b64_4x.pth"
7
+ SOLVER:
8
+ MAX_ITER: 90000
9
+ IMS_PER_BATCH: 64
10
+ BASE_LR: 0.0002
11
+ WARMUP_ITERS: 1000
12
+ WARMUP_FACTOR: 0.001
13
+ DATASETS:
14
+ TRAIN: ("lvis_v1_train_norare","imagenet_lvis_v1")
15
+ DATALOADER:
16
+ SAMPLER_TRAIN: "MultiDatasetSampler"
17
+ DATASET_RATIO: [1, 4]
18
+ USE_DIFF_BS_SIZE: True
19
+ DATASET_BS: [8, 32]
20
+ DATASET_INPUT_SIZE: [640, 320]
21
+ USE_RFS: [True, False]
22
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
23
+ FILTER_EMPTY_ANNOTATIONS: False
24
+ MULTI_DATASET_GROUPING: True
25
+ DATASET_ANN: ['box', 'image']
26
+ NUM_WORKERS: 8
27
+ WITH_IMAGE_LABELS: True
configs/Detic_LbaseI_CLIP_SwinB_896b32_4x_ft4x_max-size.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base-C2_L_R5021k_640b64_4x.yaml"
2
+ MODEL:
3
+ ROI_BOX_HEAD:
4
+ USE_ZEROSHOT_CLS: True
5
+ IMAGE_LABEL_LOSS: 'max_size'
6
+ BACKBONE:
7
+ NAME: build_swintransformer_fpn_backbone
8
+ SWIN:
9
+ SIZE: B-22k
10
+ FPN:
11
+ IN_FEATURES: ["swin1", "swin2", "swin3"]
12
+ WEIGHTS: "models/BoxSup-C2_Lbase_CLIP_SwinB_896b32_4x.pth"
13
+ SOLVER:
14
+ MAX_ITER: 180000
15
+ IMS_PER_BATCH: 32
16
+ BASE_LR: 0.0001
17
+ WARMUP_ITERS: 1000
18
+ WARMUP_FACTOR: 0.001
19
+ DATASETS:
20
+ TRAIN: ("lvis_v1_train_norare","imagenet_lvis_v1")
21
+ DATALOADER:
22
+ SAMPLER_TRAIN: "MultiDatasetSampler"
23
+ DATASET_RATIO: [1, 4]
24
+ USE_DIFF_BS_SIZE: True
25
+ DATASET_BS: [4, 16]
26
+ DATASET_INPUT_SIZE: [896, 448]
27
+ USE_RFS: [True, False]
28
+ DATASET_INPUT_SCALE: [[0.1, 2.0], [0.5, 1.5]]
29
+ FILTER_EMPTY_ANNOTATIONS: False
30
+ MULTI_DATASET_GROUPING: True
31
+ DATASET_ANN: ['box', 'image']
32
+ NUM_WORKERS: 8
33
+ WITH_IMAGE_LABELS: True
configs/Detic_OVCOCO_CLIP_R50_1x_caption.yaml ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup_OVCOCO_CLIP_R50_1x.pth"
4
+ WITH_CAPTION: True
5
+ SYNC_CAPTION_BATCH: True
6
+ ROI_BOX_HEAD:
7
+ WS_NUM_PROPS: 1
8
+ ADD_IMAGE_BOX: True
9
+ NEG_CAP_WEIGHT: 1.0
10
+ SOLVER:
11
+ IMS_PER_BATCH: 16
12
+ BASE_LR: 0.02
13
+ STEPS: (60000, 80000)
14
+ MAX_ITER: 90000
15
+ DATASETS:
16
+ TRAIN: ("coco_zeroshot_train_oriorder", "coco_caption_train_tags")
17
+ INPUT:
18
+ CUSTOM_AUG: ResizeShortestEdge
19
+ MIN_SIZE_TRAIN_SAMPLING: range
20
+ MIN_SIZE_TRAIN: (800, 800)
21
+ DATALOADER:
22
+ SAMPLER_TRAIN: "MultiDatasetSampler"
23
+ DATASET_RATIO: [1, 4]
24
+ USE_DIFF_BS_SIZE: True
25
+ DATASET_BS: [2, 8]
26
+ USE_RFS: [False, False]
27
+ DATASET_MIN_SIZES: [[800, 800], [400, 400]]
28
+ DATASET_MAX_SIZES: [1333, 667]
29
+ FILTER_EMPTY_ANNOTATIONS: False
30
+ MULTI_DATASET_GROUPING: True
31
+ DATASET_ANN: ['box', 'caption']
32
+ NUM_WORKERS: 8
33
+ WITH_IMAGE_LABELS: True
configs/Detic_OVCOCO_CLIP_R50_1x_max-size.yaml ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup_OVCOCO_CLIP_R50_1x.pth"
4
+ ROI_BOX_HEAD:
5
+ WS_NUM_PROPS: 32
6
+ IMAGE_LABEL_LOSS: 'max_size'
7
+ SOLVER:
8
+ IMS_PER_BATCH: 16
9
+ BASE_LR: 0.02
10
+ STEPS: (60000, 80000)
11
+ MAX_ITER: 90000
12
+ DATASETS:
13
+ TRAIN: ("coco_zeroshot_train_oriorder", "coco_caption_train_tags")
14
+ INPUT:
15
+ CUSTOM_AUG: ResizeShortestEdge
16
+ MIN_SIZE_TRAIN_SAMPLING: range
17
+ MIN_SIZE_TRAIN: (800, 800)
18
+ DATALOADER:
19
+ SAMPLER_TRAIN: "MultiDatasetSampler"
20
+ DATASET_RATIO: [1, 4]
21
+ USE_DIFF_BS_SIZE: True
22
+ DATASET_BS: [2, 8]
23
+ USE_RFS: [False, False]
24
+ DATASET_MIN_SIZES: [[800, 800], [400, 400]]
25
+ DATASET_MAX_SIZES: [1333, 667]
26
+ FILTER_EMPTY_ANNOTATIONS: False
27
+ MULTI_DATASET_GROUPING: True
28
+ DATASET_ANN: ['box', 'image']
29
+ NUM_WORKERS: 8
30
+ WITH_IMAGE_LABELS: True
configs/Detic_OVCOCO_CLIP_R50_1x_max-size_caption.yaml ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: "Base_OVCOCO_C4_1x.yaml"
2
+ MODEL:
3
+ WEIGHTS: "models/BoxSup_OVCOCO_CLIP_R50_1x.pth"
4
+ WITH_CAPTION: True
5
+ SYNC_CAPTION_BATCH: True
6
+ ROI_BOX_HEAD:
7
+ WS_NUM_PROPS: 32
8
+ ADD_IMAGE_BOX: True # caption loss is added to the image-box
9
+ IMAGE_LABEL_LOSS: 'max_size'
10
+
11
+ NEG_CAP_WEIGHT: 1.0
12
+ SOLVER:
13
+ IMS_PER_BATCH: 16
14
+ BASE_LR: 0.02
15
+ STEPS: (60000, 80000)
16
+ MAX_ITER: 90000
17
+ DATASETS:
18
+ TRAIN: ("coco_zeroshot_train_oriorder", "coco_caption_train_tags")
19
+ INPUT:
20
+ CUSTOM_AUG: ResizeShortestEdge
21
+ MIN_SIZE_TRAIN_SAMPLING: range
22
+ MIN_SIZE_TRAIN: (800, 800)
23
+ DATALOADER:
24
+ SAMPLER_TRAIN: "MultiDatasetSampler"
25
+ DATASET_RATIO: [1, 4]
26
+ USE_DIFF_BS_SIZE: True
27
+ DATASET_BS: [2, 8]
28
+ USE_RFS: [False, False]
29
+ DATASET_MIN_SIZES: [[800, 800], [400, 400]]
30
+ DATASET_MAX_SIZES: [1333, 667]
31
+ FILTER_EMPTY_ANNOTATIONS: False
32
+ MULTI_DATASET_GROUPING: True
33
+ DATASET_ANN: ['box', 'captiontag']
34
+ NUM_WORKERS: 8
35
+ WITH_IMAGE_LABELS: True
datasets/.DS_Store ADDED
Binary file (6.15 kB). View file
datasets/README.md ADDED
@@ -0,0 +1,207 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Prepare datasets for Detic
2
+
3
+ The basic training of our model uses [LVIS](https://www.lvisdataset.org/) (which uses [COCO](https://cocodataset.org/) images) and [ImageNet-21K](https://www.image-net.org/download.php).
4
+ Some models are trained on [Conceptual Caption (CC3M)](https://ai.google.com/research/ConceptualCaptions/).
5
+ Optionally, we use [Objects365](https://www.objects365.org/) and [OpenImages (Challenge 2019 version)](https://storage.googleapis.com/openimages/web/challenge2019.html) for cross-dataset evaluation.
6
+ Before starting processing, please download the (selected) datasets from the official websites and place or sim-link them under `$Detic_ROOT/datasets/`.
7
+
8
+ ```
9
+ $Detic_ROOT/datasets/
10
+ metadata/
11
+ lvis/
12
+ coco/
13
+ imagenet/
14
+ cc3m/
15
+ objects365/
16
+ oid/
17
+ ```
18
+ `metadata/` is our preprocessed meta-data (included in the repo). See the below [section](#Metadata) for details.
19
+ Please follow the following instruction to pre-process individual datasets.
20
+
21
+ ### COCO and LVIS
22
+
23
+ First, download COCO and LVIS data place them in the following way:
24
+
25
+ ```
26
+ lvis/
27
+ lvis_v1_train.json
28
+ lvis_v1_val.json
29
+ coco/
30
+ train2017/
31
+ val2017/
32
+ annotations/
33
+ captions_train2017.json
34
+ instances_train2017.json
35
+ instances_val2017.json
36
+ ```
37
+
38
+ Next, prepare the open-vocabulary LVIS training set using
39
+
40
+ ```
41
+ python tools/remove_lvis_rare.py --ann datasets/lvis/lvis_v1_train.json
42
+ ```
43
+
44
+ This will generate `datasets/lvis/lvis_v1_train_norare.json`.
45
+
46
+ ### ImageNet-21K
47
+
48
+ The ImageNet-21K folder should look like:
49
+ ```
50
+ imagenet/
51
+ ImageNet-21K/
52
+ n01593028.tar
53
+ n01593282.tar
54
+ ...
55
+ ```
56
+
57
+ We first unzip the overlapping classes of LVIS (we will directly work with the .tar file for the rest classes) and convert them into LVIS annotation format.
58
+
59
+ ~~~
60
+ mkdir imagenet/annotations
61
+ python tools/unzip_imagenet_lvis.py --dst_path datasets/imagenet/ImageNet-LVIS
62
+ python tools/create_imagenetlvis_json.py --imagenet_path datasets/imagenet/ImageNet-LVIS --out_path datasets/imagenet/annotations/imagenet_lvis_image_info.json
63
+ ~~~
64
+ This creates `datasets/imagenet/annotations/imagenet_lvis_image_info.json`.
65
+
66
+ [Optional] To train with all the 21K classes, run
67
+
68
+ ~~~
69
+ python tools/get_imagenet_21k_full_tar_json.py
70
+ python tools/create_lvis_21k.py
71
+ ~~~
72
+ This creates `datasets/imagenet/annotations/imagenet-21k_image_info_lvis-21k.json` and `datasets/lvis/lvis_v1_train_lvis-21k.json` (combined LVIS and ImageNet-21K classes in `categories`).
73
+
74
+ [Optional] To train on combined LVIS and COCO, run
75
+
76
+ ~~~
77
+ python tools/merge_lvis_coco.py
78
+ ~~~
79
+ This creates `datasets/lvis/lvis_v1_train+coco_mask.json`
80
+
81
+ ### Conceptual Caption
82
+
83
+
84
+ Download the dataset from [this](https://ai.google.com/research/ConceptualCaptions/download) page and place them as:
85
+ ```
86
+ cc3m/
87
+ GCC-training.tsv
88
+ ```
89
+
90
+ Run the following command to download the images and convert the annotations to LVIS format (Note: download images takes long).
91
+
92
+ ~~~
93
+ python tools/download_cc.py --ann datasets/cc3m/GCC-training.tsv --save_image_path datasets/cc3m/training/ --out_path datasets/cc3m/train_image_info.json
94
+ python tools/get_cc_tags.py
95
+ ~~~
96
+
97
+ This creates `datasets/cc3m/train_image_info_tags.json`.
98
+
99
+ ### Objects365
100
+ Download Objects365 (v2) from the website. We only need the validation set in this project:
101
+ ```
102
+ objects365/
103
+ annotations/
104
+ zhiyuan_objv2_val.json
105
+ val/
106
+ images/
107
+ v1/
108
+ patch0/
109
+ ...
110
+ patch15/
111
+ v2/
112
+ patch16/
113
+ ...
114
+ patch49/
115
+
116
+ ```
117
+
118
+ The original annotation has typos in the class names, we first fix them for our following use of language embeddings.
119
+
120
+ ```
121
+ python tools/fix_o365_names.py --ann datasets/objects365/annotations/zhiyuan_objv2_val.json
122
+ ```
123
+ This creates `datasets/objects365/zhiyuan_objv2_val_fixname.json`.
124
+
125
+ To train on Objects365, download the training images and use the command above. We note some images in the training annotation do not exist.
126
+ We use the following command to filter the missing images.
127
+ ~~~
128
+ python tools/fix_0365_path.py
129
+ ~~~
130
+ This creates `datasets/objects365/zhiyuan_objv2_train_fixname_fixmiss.json`.
131
+
132
+ ### OpenImages
133
+
134
+ We followed the instructions in [UniDet](https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet_docs/DATASETS.md#openimages) to convert the metadata for OpenImages.
135
+
136
+ The converted folder should look like
137
+
138
+ ```
139
+ oid/
140
+ annotations/
141
+ oid_challenge_2019_train_bbox.json
142
+ oid_challenge_2019_val_expanded.json
143
+ images/
144
+ 0/
145
+ 1/
146
+ 2/
147
+ ...
148
+ ```
149
+
150
+ ### Open-vocabulary COCO
151
+
152
+ We first follow [OVR-CNN](https://github.com/alirezazareian/ovr-cnn/blob/master/ipynb/003.ipynb) to create the open-vocabulary COCO split. The converted files should be like
153
+
154
+ ```
155
+ coco/
156
+ zero-shot/
157
+ instances_train2017_seen_2.json
158
+ instances_val2017_all_2.json
159
+ ```
160
+
161
+ We further pre-process the annotation format for easier evaluation:
162
+
163
+ ```
164
+ python tools/get_coco_zeroshot_oriorder.py --data_path datasets/coco/zero-shot/instances_train2017_seen_2.json
165
+ python tools/get_coco_zeroshot_oriorder.py --data_path datasets/coco/zero-shot/instances_val2017_all_2.json
166
+ ```
167
+
168
+ Next, we preprocess the COCO caption data:
169
+
170
+ ```
171
+ python tools/get_cc_tags.py --cc_ann datasets/coco/annotations/captions_train2017.json --out_path datasets/coco/captions_train2017_tags_allcaps.json --allcaps --convert_caption
172
+ ```
173
+ This creates `datasets/coco/captions_train2017_tags_allcaps.json`.
174
+
175
+ ### Metadata
176
+
177
+ ```
178
+ metadata/
179
+ lvis_v1_train_cat_info.json
180
+ coco_clip_a+cname.npy
181
+ lvis_v1_clip_a+cname.npy
182
+ o365_clip_a+cnamefix.npy
183
+ oid_clip_a+cname.npy
184
+ imagenet_lvis_wnid.txt
185
+ Objects365_names_fix.csv
186
+ ```
187
+
188
+ `lvis_v1_train_cat_info.json` is used by the Federated loss.
189
+ This is created by
190
+ ~~~
191
+ python tools/get_lvis_cat_info.py --ann datasets/lvis/lvis_v1_train.json
192
+ ~~~
193
+
194
+ `*_clip_a+cname.npy` is the pre-computed CLIP embeddings for each datasets.
195
+ They are created by (taking LVIS as an example)
196
+ ~~~
197
+ python tools/dump_clip_features.py --ann datasets/lvis/lvis_v1_val.json --out_path metadata/lvis_v1_clip_a+cname.npy
198
+ ~~~
199
+ Note we do not include the 21K class embeddings due to the large file size.
200
+ To create it, run
201
+ ~~~
202
+ python tools/dump_clip_features.py --ann datasets/lvis/lvis_v1_val_lvis-21k.json --out_path datasets/metadata/lvis-21k_clip_a+cname.npy
203
+ ~~~
204
+
205
+ `imagenet_lvis_wnid.txt` is the list of matched classes between ImageNet-21K and LVIS.
206
+
207
+ `Objects365_names_fix.csv` is our manual fix of the Objects365 names.
datasets/metadata/Objects365_names_fix.csv ADDED
@@ -0,0 +1,365 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ 1,Person,Person
2
+ 2,Sneakers,Sneakers
3
+ 3,Chair,Chair
4
+ 4,Other Shoes,Other Shoes
5
+ 5,Hat,Hat
6
+ 6,Car,Car
7
+ 7,Lamp,Lamp
8
+ 8,Glasses,Glasses
9
+ 9,Bottle,Bottle
10
+ 10,Desk,Desk
11
+ 11,Cup,Cup
12
+ 12,Street Lights,Street Lights
13
+ 13,Cabinet/shelf,Cabinet/shelf
14
+ 14,Handbag/Satchel,Handbag/Satchel
15
+ 15,Bracelet,Bracelet
16
+ 16,Plate,Plate
17
+ 17,Picture/Frame,Picture/Frame
18
+ 18,Helmet,Helmet
19
+ 19,Book,Book
20
+ 20,Gloves,Gloves
21
+ 21,Storage box,Storage box
22
+ 22,Boat,Boat
23
+ 23,Leather Shoes,Leather Shoes
24
+ 24,Flower,Flower
25
+ 25,Bench,Bench
26
+ 26,Potted Plant,Potted Plant
27
+ 27,Bowl/Basin,Bowl/Basin
28
+ 28,Flag,Flag
29
+ 29,Pillow,Pillow
30
+ 30,Boots,Boots
31
+ 31,Vase,Vase
32
+ 32,Microphone,Microphone
33
+ 33,Necklace,Necklace
34
+ 34,Ring,Ring
35
+ 35,SUV,SUV
36
+ 36,Wine Glass,Wine Glass
37
+ 37,Belt,Belt
38
+ 38,Moniter/TV,Monitor/TV
39
+ 39,Backpack,Backpack
40
+ 40,Umbrella,Umbrella
41
+ 41,Traffic Light,Traffic Light
42
+ 42,Speaker,Speaker
43
+ 43,Watch,Watch
44
+ 44,Tie,Tie
45
+ 45,Trash bin Can,Trash bin Can
46
+ 46,Slippers,Slippers
47
+ 47,Bicycle,Bicycle
48
+ 48,Stool,Stool
49
+ 49,Barrel/bucket,Barrel/bucket
50
+ 50,Van,Van
51
+ 51,Couch,Couch
52
+ 52,Sandals,Sandals
53
+ 53,Bakset,Basket
54
+ 54,Drum,Drum
55
+ 55,Pen/Pencil,Pen/Pencil
56
+ 56,Bus,Bus
57
+ 57,Wild Bird,Wild Bird
58
+ 58,High Heels,High Heels
59
+ 59,Motorcycle,Motorcycle
60
+ 60,Guitar,Guitar
61
+ 61,Carpet,Carpet
62
+ 62,Cell Phone,Cell Phone
63
+ 63,Bread,Bread
64
+ 64,Camera,Camera
65
+ 65,Canned,Canned
66
+ 66,Truck,Truck
67
+ 67,Traffic cone,Traffic cone
68
+ 68,Cymbal,Cymbal
69
+ 69,Lifesaver,Lifesaver
70
+ 70,Towel,Towel
71
+ 71,Stuffed Toy,Stuffed Toy
72
+ 72,Candle,Candle
73
+ 73,Sailboat,Sailboat
74
+ 74,Laptop,Laptop
75
+ 75,Awning,Awning
76
+ 76,Bed,Bed
77
+ 77,Faucet,Faucet
78
+ 78,Tent,Tent
79
+ 79,Horse,Horse
80
+ 80,Mirror,Mirror
81
+ 81,Power outlet,Power outlet
82
+ 82,Sink,Sink
83
+ 83,Apple,Apple
84
+ 84,Air Conditioner,Air Conditioner
85
+ 85,Knife,Knife
86
+ 86,Hockey Stick,Hockey Stick
87
+ 87,Paddle,Paddle
88
+ 88,Pickup Truck,Pickup Truck
89
+ 89,Fork,Fork
90
+ 90,Traffic Sign,Traffic Sign
91
+ 91,Ballon,Ballon
92
+ 92,Tripod,Tripod
93
+ 93,Dog,Dog
94
+ 94,Spoon,Spoon
95
+ 95,Clock,Clock
96
+ 96,Pot,Pot
97
+ 97,Cow,Cow
98
+ 98,Cake,Cake
99
+ 99,Dinning Table,Dining Table
100
+ 100,Sheep,Sheep
101
+ 101,Hanger,Hanger
102
+ 102,Blackboard/Whiteboard,Blackboard/Whiteboard
103
+ 103,Napkin,Napkin
104
+ 104,Other Fish,Other Fish
105
+ 105,Orange/Tangerine,Orange/Tangerine
106
+ 106,Toiletry,Toiletry
107
+ 107,Keyboard,Keyboard
108
+ 108,Tomato,Tomato
109
+ 109,Lantern,Lantern
110
+ 110,Machinery Vehicle,Machinery Vehicle
111
+ 111,Fan,Fan
112
+ 112,Green Vegetables,Green Vegetables
113
+ 113,Banana,Banana
114
+ 114,Baseball Glove,Baseball Glove
115
+ 115,Airplane,Airplane
116
+ 116,Mouse,Mouse
117
+ 117,Train,Train
118
+ 118,Pumpkin,Pumpkin
119
+ 119,Soccer,Soccer
120
+ 120,Skiboard,Skiboard
121
+ 121,Luggage,Luggage
122
+ 122,Nightstand,Nightstand
123
+ 123,Tea pot,Teapot
124
+ 124,Telephone,Telephone
125
+ 125,Trolley,Trolley
126
+ 126,Head Phone,Head Phone
127
+ 127,Sports Car,Sports Car
128
+ 128,Stop Sign,Stop Sign
129
+ 129,Dessert,Dessert
130
+ 130,Scooter,Scooter
131
+ 131,Stroller,Stroller
132
+ 132,Crane,Crane
133
+ 133,Remote,Remote
134
+ 134,Refrigerator,Refrigerator
135
+ 135,Oven,Oven
136
+ 136,Lemon,Lemon
137
+ 137,Duck,Duck
138
+ 138,Baseball Bat,Baseball Bat
139
+ 139,Surveillance Camera,Surveillance Camera
140
+ 140,Cat,Cat
141
+ 141,Jug,Jug
142
+ 142,Broccoli,Broccoli
143
+ 143,Piano,Piano
144
+ 144,Pizza,Pizza
145
+ 145,Elephant,Elephant
146
+ 146,Skateboard,Skateboard
147
+ 147,Surfboard,Surfboard
148
+ 148,Gun,Gun
149
+ 149,Skating and Skiing shoes,Skating and Skiing shoes
150
+ 150,Gas stove,Gas stove
151
+ 151,Donut,Donut
152
+ 152,Bow Tie,Bow Tie
153
+ 153,Carrot,Carrot
154
+ 154,Toilet,Toilet
155
+ 155,Kite,Kite
156
+ 156,Strawberry,Strawberry
157
+ 157,Other Balls,Other Balls
158
+ 158,Shovel,Shovel
159
+ 159,Pepper,Pepper
160
+ 160,Computer Box,Computer Box
161
+ 161,Toilet Paper,Toilet Paper
162
+ 162,Cleaning Products,Cleaning Products
163
+ 163,Chopsticks,Chopsticks
164
+ 164,Microwave,Microwave
165
+ 165,Pigeon,Pigeon
166
+ 166,Baseball,Baseball
167
+ 167,Cutting/chopping Board,Cutting/chopping Board
168
+ 168,Coffee Table,Coffee Table
169
+ 169,Side Table,Side Table
170
+ 170,Scissors,Scissors
171
+ 171,Marker,Marker
172
+ 172,Pie,Pie
173
+ 173,Ladder,Ladder
174
+ 174,Snowboard,Snowboard
175
+ 175,Cookies,Cookies
176
+ 176,Radiator,Radiator
177
+ 177,Fire Hydrant,Fire Hydrant
178
+ 178,Basketball,Basketball
179
+ 179,Zebra,Zebra
180
+ 180,Grape,Grape
181
+ 181,Giraffe,Giraffe
182
+ 182,Potato,Potato
183
+ 183,Sausage,Sausage
184
+ 184,Tricycle,Tricycle
185
+ 185,Violin,Violin
186
+ 186,Egg,Egg
187
+ 187,Fire Extinguisher,Fire Extinguisher
188
+ 188,Candy,Candy
189
+ 189,Fire Truck,Fire Truck
190
+ 190,Billards,Billards
191
+ 191,Converter,Converter
192
+ 192,Bathtub,Bathtub
193
+ 193,Wheelchair,Wheelchair
194
+ 194,Golf Club,Golf Club
195
+ 195,Briefcase,Briefcase
196
+ 196,Cucumber,Cucumber
197
+ 197,Cigar/Cigarette,Cigar/Cigarette
198
+ 198,Paint Brush,Paint Brush
199
+ 199,Pear,Pear
200
+ 200,Heavy Truck,Heavy Truck
201
+ 201,Hamburger,Hamburger
202
+ 202,Extractor,Extractor
203
+ 203,Extention Cord,Extension Cord
204
+ 204,Tong,Tong
205
+ 205,Tennis Racket,Tennis Racket
206
+ 206,Folder,Folder
207
+ 207,American Football,American Football
208
+ 208,earphone,earphone
209
+ 209,Mask,Mask
210
+ 210,Kettle,Kettle
211
+ 211,Tennis,Tennis
212
+ 212,Ship,Ship
213
+ 213,Swing,Swing
214
+ 214,Coffee Machine,Coffee Machine
215
+ 215,Slide,Slide
216
+ 216,Carriage,Carriage
217
+ 217,Onion,Onion
218
+ 218,Green beans,Green beans
219
+ 219,Projector,Projector
220
+ 220,Frisbee,Frisbee
221
+ 221,Washing Machine/Drying Machine,Washing Machine/Drying Machine
222
+ 222,Chicken,Chicken
223
+ 223,Printer,Printer
224
+ 224,Watermelon,Watermelon
225
+ 225,Saxophone,Saxophone
226
+ 226,Tissue,Tissue
227
+ 227,Toothbrush,Toothbrush
228
+ 228,Ice cream,Ice cream
229
+ 229,Hotair ballon,Hot air balloon
230
+ 230,Cello,Cello
231
+ 231,French Fries,French Fries
232
+ 232,Scale,Scale
233
+ 233,Trophy,Trophy
234
+ 234,Cabbage,Cabbage
235
+ 235,Hot dog,Hot dog
236
+ 236,Blender,Blender
237
+ 237,Peach,Peach
238
+ 238,Rice,Rice
239
+ 239,Wallet/Purse,Wallet/Purse
240
+ 240,Volleyball,Volleyball
241
+ 241,Deer,Deer
242
+ 242,Goose,Goose
243
+ 243,Tape,Tape
244
+ 244,Tablet,Tablet
245
+ 245,Cosmetics,Cosmetics
246
+ 246,Trumpet,Trumpet
247
+ 247,Pineapple,Pineapple
248
+ 248,Golf Ball,Golf Ball
249
+ 249,Ambulance,Ambulance
250
+ 250,Parking meter,Parking meter
251
+ 251,Mango,Mango
252
+ 252,Key,Key
253
+ 253,Hurdle,Hurdle
254
+ 254,Fishing Rod,Fishing Rod
255
+ 255,Medal,Medal
256
+ 256,Flute,Flute
257
+ 257,Brush,Brush
258
+ 258,Penguin,Penguin
259
+ 259,Megaphone,Megaphone
260
+ 260,Corn,Corn
261
+ 261,Lettuce,Lettuce
262
+ 262,Garlic,Garlic
263
+ 263,Swan,Swan
264
+ 264,Helicopter,Helicopter
265
+ 265,Green Onion,Green Onion
266
+ 266,Sandwich,Sandwich
267
+ 267,Nuts,Nuts
268
+ 268,Speed Limit Sign,Speed Limit Sign
269
+ 269,Induction Cooker,Induction Cooker
270
+ 270,Broom,Broom
271
+ 271,Trombone,Trombone
272
+ 272,Plum,Plum
273
+ 273,Rickshaw,Rickshaw
274
+ 274,Goldfish,Goldfish
275
+ 275,Kiwi fruit,Kiwi fruit
276
+ 276,Router/modem,Router/modem
277
+ 277,Poker Card,Poker Card
278
+ 278,Toaster,Toaster
279
+ 279,Shrimp,Shrimp
280
+ 280,Sushi,Sushi
281
+ 281,Cheese,Cheese
282
+ 282,Notepaper,Notepaper
283
+ 283,Cherry,Cherry
284
+ 284,Pliers,Pliers
285
+ 285,CD,CD
286
+ 286,Pasta,Pasta
287
+ 287,Hammer,Hammer
288
+ 288,Cue,Cue
289
+ 289,Avocado,Avocado
290
+ 290,Hamimelon,Hami melon
291
+ 291,Flask,Flask
292
+ 292,Mushroon,Mushroom
293
+ 293,Screwdriver,Screwdriver
294
+ 294,Soap,Soap
295
+ 295,Recorder,Recorder
296
+ 296,Bear,Bear
297
+ 297,Eggplant,Eggplant
298
+ 298,Board Eraser,Board Eraser
299
+ 299,Coconut,Coconut
300
+ 300,Tape Measur/ Ruler,Tape Measure/ Ruler
301
+ 301,Pig,Pig
302
+ 302,Showerhead,Showerhead
303
+ 303,Globe,Globe
304
+ 304,Chips,Chips
305
+ 305,Steak,Steak
306
+ 306,Crosswalk Sign,Crosswalk Sign
307
+ 307,Stapler,Stapler
308
+ 308,Campel,Camel
309
+ 309,Formula 1,Formula 1
310
+ 310,Pomegranate,Pomegranate
311
+ 311,Dishwasher,Dishwasher
312
+ 312,Crab,Crab
313
+ 313,Hoverboard,Hoverboard
314
+ 314,Meat ball,Meatball
315
+ 315,Rice Cooker,Rice Cooker
316
+ 316,Tuba,Tuba
317
+ 317,Calculator,Calculator
318
+ 318,Papaya,Papaya
319
+ 319,Antelope,Antelope
320
+ 320,Parrot,Parrot
321
+ 321,Seal,Seal
322
+ 322,Buttefly,Butterfly
323
+ 323,Dumbbell,Dumbbell
324
+ 324,Donkey,Donkey
325
+ 325,Lion,Lion
326
+ 326,Urinal,Urinal
327
+ 327,Dolphin,Dolphin
328
+ 328,Electric Drill,Electric Drill
329
+ 329,Hair Dryer,Hair Dryer
330
+ 330,Egg tart,Egg tart
331
+ 331,Jellyfish,Jellyfish
332
+ 332,Treadmill,Treadmill
333
+ 333,Lighter,Lighter
334
+ 334,Grapefruit,Grapefruit
335
+ 335,Game board,Game board
336
+ 336,Mop,Mop
337
+ 337,Radish,Radish
338
+ 338,Baozi,Baozi
339
+ 339,Target,Target
340
+ 340,French,French
341
+ 341,Spring Rolls,Spring Rolls
342
+ 342,Monkey,Monkey
343
+ 343,Rabbit,Rabbit
344
+ 344,Pencil Case,Pencil Case
345
+ 345,Yak,Yak
346
+ 346,Red Cabbage,Red Cabbage
347
+ 347,Binoculars,Binoculars
348
+ 348,Asparagus,Asparagus
349
+ 349,Barbell,Barbell
350
+ 350,Scallop,Scallop
351
+ 351,Noddles,Noddles
352
+ 352,Comb,Comb
353
+ 353,Dumpling,Dumpling
354
+ 354,Oyster,Oyster
355
+ 355,Table Teniis paddle,Table Tennis paddle
356
+ 356,Cosmetics Brush/Eyeliner Pencil,Cosmetics Brush/Eyeliner Pencil
357
+ 357,Chainsaw,Chainsaw
358
+ 358,Eraser,Eraser
359
+ 359,Lobster,Lobster
360
+ 360,Durian,Durian
361
+ 361,Okra,Okra
362
+ 362,Lipstick,Lipstick
363
+ 363,Cosmetics Mirror,Cosmetics Mirror
364
+ 364,Curling,Curling
365
+ 365,Table Tennis,Table Tennis
datasets/metadata/coco_clip_a+cname.npy ADDED
Binary file (82 kB). View file
datasets/metadata/lvis_v1_clip_a+cname.npy ADDED
Binary file (1.23 MB). View file
datasets/metadata/lvis_v1_train_cat_info.json ADDED
The diff for this file is too large to render. See raw diff
datasets/metadata/o365_clip_a+cnamefix.npy ADDED
Binary file (374 kB). View file
datasets/metadata/oid_clip_a+cname.npy ADDED
Binary file (512 kB). View file
demo.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import argparse
3
+ import glob
4
+ import multiprocessing as mp
5
+ import numpy as np
6
+ import os
7
+ import tempfile
8
+ import time
9
+ import warnings
10
+ import cv2
11
+ import tqdm
12
+ import sys
13
+
14
+ from detectron2.config import get_cfg
15
+ from detectron2.data.detection_utils import read_image
16
+ from detectron2.utils.logger import setup_logger
17
+
18
+ sys.path.insert(0, 'third_party/CenterNet2/projects/CenterNet2/')
19
+ from centernet.config import add_centernet_config
20
+ from detic.config import add_detic_config
21
+
22
+ from detic.predictor import VisualizationDemo
23
+
24
+
25
+ # constants
26
+ WINDOW_NAME = "Detic"
27
+
28
+ def setup_cfg(args):
29
+ cfg = get_cfg()
30
+ add_centernet_config(cfg)
31
+ add_detic_config(cfg)
32
+ cfg.merge_from_file(args.config_file)
33
+ cfg.merge_from_list(args.opts)
34
+ # Set score_threshold for builtin models
35
+ cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args.confidence_threshold
36
+ cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args.confidence_threshold
37
+ cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args.confidence_threshold
38
+ cfg.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'rand' # load later
39
+ if not args.pred_all_class:
40
+ cfg.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = True
41
+ cfg.freeze()
42
+ return cfg
43
+
44
+
45
+ def get_parser():
46
+ parser = argparse.ArgumentParser(description="Detectron2 demo for builtin configs")
47
+ parser.add_argument(
48
+ "--config-file",
49
+ default="configs/quick_schedules/mask_rcnn_R_50_FPN_inference_acc_test.yaml",
50
+ metavar="FILE",
51
+ help="path to config file",
52
+ )
53
+ parser.add_argument("--webcam", action="store_true", help="Take inputs from webcam.")
54
+ parser.add_argument("--video-input", help="Path to video file.")
55
+ parser.add_argument(
56
+ "--input",
57
+ nargs="+",
58
+ help="A list of space separated input images; "
59
+ "or a single glob pattern such as 'directory/*.jpg'",
60
+ )
61
+ parser.add_argument(
62
+ "--output",
63
+ help="A file or directory to save output visualizations. "
64
+ "If not given, will show output in an OpenCV window.",
65
+ )
66
+ parser.add_argument(
67
+ "--vocabulary",
68
+ default="lvis",
69
+ choices=['lvis', 'openimages', 'objects365', 'coco', 'custom'],
70
+ help="",
71
+ )
72
+ parser.add_argument(
73
+ "--custom_vocabulary",
74
+ default="",
75
+ help="",
76
+ )
77
+ parser.add_argument("--pred_all_class", action='store_true')
78
+ parser.add_argument(
79
+ "--confidence-threshold",
80
+ type=float,
81
+ default=0.5,
82
+ help="Minimum score for instance predictions to be shown",
83
+ )
84
+ parser.add_argument(
85
+ "--opts",
86
+ help="Modify config options using the command-line 'KEY VALUE' pairs",
87
+ default=[],
88
+ nargs=argparse.REMAINDER,
89
+ )
90
+ return parser
91
+
92
+
93
+ def test_opencv_video_format(codec, file_ext):
94
+ with tempfile.TemporaryDirectory(prefix="video_format_test") as dir:
95
+ filename = os.path.join(dir, "test_file" + file_ext)
96
+ writer = cv2.VideoWriter(
97
+ filename=filename,
98
+ fourcc=cv2.VideoWriter_fourcc(*codec),
99
+ fps=float(30),
100
+ frameSize=(10, 10),
101
+ isColor=True,
102
+ )
103
+ [writer.write(np.zeros((10, 10, 3), np.uint8)) for _ in range(30)]
104
+ writer.release()
105
+ if os.path.isfile(filename):
106
+ return True
107
+ return False
108
+
109
+
110
+ if __name__ == "__main__":
111
+ mp.set_start_method("spawn", force=True)
112
+ args = get_parser().parse_args()
113
+ setup_logger(name="fvcore")
114
+ logger = setup_logger()
115
+ logger.info("Arguments: " + str(args))
116
+
117
+ cfg = setup_cfg(args)
118
+
119
+ demo = VisualizationDemo(cfg, args)
120
+
121
+ if args.input:
122
+ if len(args.input) == 1:
123
+ args.input = glob.glob(os.path.expanduser(args.input[0]))
124
+ assert args.input, "The input path(s) was not found"
125
+ for path in tqdm.tqdm(args.input, disable=not args.output):
126
+ img = read_image(path, format="BGR")
127
+ start_time = time.time()
128
+ predictions, visualized_output = demo.run_on_image(img)
129
+ logger.info(
130
+ "{}: {} in {:.2f}s".format(
131
+ path,
132
+ "detected {} instances".format(len(predictions["instances"]))
133
+ if "instances" in predictions
134
+ else "finished",
135
+ time.time() - start_time,
136
+ )
137
+ )
138
+
139
+ if args.output:
140
+ if os.path.isdir(args.output):
141
+ assert os.path.isdir(args.output), args.output
142
+ out_filename = os.path.join(args.output, os.path.basename(path))
143
+ else:
144
+ assert len(args.input) == 1, "Please specify a directory with args.output"
145
+ out_filename = args.output
146
+ visualized_output.save(out_filename)
147
+ else:
148
+ cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
149
+ cv2.imshow(WINDOW_NAME, visualized_output.get_image()[:, :, ::-1])
150
+ if cv2.waitKey(0) == 27:
151
+ break # esc to quit
152
+ elif args.webcam:
153
+ assert args.input is None, "Cannot have both --input and --webcam!"
154
+ assert args.output is None, "output not yet supported with --webcam!"
155
+ cam = cv2.VideoCapture(0)
156
+ for vis in tqdm.tqdm(demo.run_on_video(cam)):
157
+ cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
158
+ cv2.imshow(WINDOW_NAME, vis)
159
+ if cv2.waitKey(1) == 27:
160
+ break # esc to quit
161
+ cam.release()
162
+ cv2.destroyAllWindows()
163
+ elif args.video_input:
164
+ video = cv2.VideoCapture(args.video_input)
165
+ width = int(video.get(cv2.CAP_PROP_FRAME_WIDTH))
166
+ height = int(video.get(cv2.CAP_PROP_FRAME_HEIGHT))
167
+ frames_per_second = video.get(cv2.CAP_PROP_FPS)
168
+ num_frames = int(video.get(cv2.CAP_PROP_FRAME_COUNT))
169
+ basename = os.path.basename(args.video_input)
170
+ codec, file_ext = (
171
+ ("x264", ".mkv") if test_opencv_video_format("x264", ".mkv") else ("mp4v", ".mp4")
172
+ )
173
+ if codec == ".mp4v":
174
+ warnings.warn("x264 codec not available, switching to mp4v")
175
+ if args.output:
176
+ if os.path.isdir(args.output):
177
+ output_fname = os.path.join(args.output, basename)
178
+ output_fname = os.path.splitext(output_fname)[0] + file_ext
179
+ else:
180
+ output_fname = args.output
181
+ assert not os.path.isfile(output_fname), output_fname
182
+ output_file = cv2.VideoWriter(
183
+ filename=output_fname,
184
+ # some installation of opencv may not support x264 (due to its license),
185
+ # you can try other format (e.g. MPEG)
186
+ fourcc=cv2.VideoWriter_fourcc(*codec),
187
+ fps=float(frames_per_second),
188
+ frameSize=(width, height),
189
+ isColor=True,
190
+ )
191
+ assert os.path.isfile(args.video_input)
192
+ for vis_frame in tqdm.tqdm(demo.run_on_video(video), total=num_frames):
193
+ if args.output:
194
+ output_file.write(vis_frame)
195
+ else:
196
+ cv2.namedWindow(basename, cv2.WINDOW_NORMAL)
197
+ cv2.imshow(basename, vis_frame)
198
+ if cv2.waitKey(1) == 27:
199
+ break # esc to quit
200
+ video.release()
201
+ if args.output:
202
+ output_file.release()
203
+ else:
204
+ cv2.destroyAllWindows()
detic/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .modeling.meta_arch import custom_rcnn
3
+ from .modeling.roi_heads import detic_roi_heads
4
+ from .modeling.roi_heads import res5_roi_heads
5
+ from .modeling.backbone import swintransformer
6
+ from .modeling.backbone import timm
7
+
8
+
9
+ from .data.datasets import lvis_v1
10
+ from .data.datasets import imagenet
11
+ from .data.datasets import cc
12
+ from .data.datasets import objects365
13
+ from .data.datasets import oid
14
+ from .data.datasets import coco_zeroshot
15
+
16
+ try:
17
+ from .modeling.meta_arch import d2_deformable_detr
18
+ except:
19
+ pass
detic/config.py ADDED
@@ -0,0 +1,131 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from detectron2.config import CfgNode as CN
3
+
4
+ def add_detic_config(cfg):
5
+ _C = cfg
6
+
7
+ _C.WITH_IMAGE_LABELS = False # Turn on co-training with classification data
8
+
9
+ # Open-vocabulary classifier
10
+ _C.MODEL.ROI_BOX_HEAD.USE_ZEROSHOT_CLS = False # Use fixed classifier for open-vocabulary detection
11
+ _C.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_PATH = 'datasets/metadata/lvis_v1_clip_a+cname.npy'
12
+ _C.MODEL.ROI_BOX_HEAD.ZEROSHOT_WEIGHT_DIM = 512
13
+ _C.MODEL.ROI_BOX_HEAD.NORM_WEIGHT = True
14
+ _C.MODEL.ROI_BOX_HEAD.NORM_TEMP = 50.0
15
+ _C.MODEL.ROI_BOX_HEAD.IGNORE_ZERO_CATS = False
16
+ _C.MODEL.ROI_BOX_HEAD.USE_BIAS = 0.0 # >= 0: not use
17
+
18
+ _C.MODEL.ROI_BOX_HEAD.MULT_PROPOSAL_SCORE = False # CenterNet2
19
+ _C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False
20
+ _C.MODEL.ROI_BOX_HEAD.PRIOR_PROB = 0.01
21
+ _C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False # Federated Loss
22
+ _C.MODEL.ROI_BOX_HEAD.CAT_FREQ_PATH = \
23
+ 'datasets/metadata/lvis_v1_train_cat_info.json'
24
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CAT = 50
25
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT = 0.5
26
+
27
+ # Classification data configs
28
+ _C.MODEL.ROI_BOX_HEAD.IMAGE_LABEL_LOSS = 'max_size' # max, softmax, sum
29
+ _C.MODEL.ROI_BOX_HEAD.IMAGE_LOSS_WEIGHT = 0.1
30
+ _C.MODEL.ROI_BOX_HEAD.IMAGE_BOX_SIZE = 1.0
31
+ _C.MODEL.ROI_BOX_HEAD.ADD_IMAGE_BOX = False # Used for image-box loss and caption loss
32
+ _C.MODEL.ROI_BOX_HEAD.WS_NUM_PROPS = 128 # num proposals for image-labeled data
33
+ _C.MODEL.ROI_BOX_HEAD.WITH_SOFTMAX_PROP = False # Used for WSDDN
34
+ _C.MODEL.ROI_BOX_HEAD.CAPTION_WEIGHT = 1.0 # Caption loss weight
35
+ _C.MODEL.ROI_BOX_HEAD.NEG_CAP_WEIGHT = 0.125 # Caption loss hyper-parameter
36
+ _C.MODEL.ROI_BOX_HEAD.ADD_FEATURE_TO_PROP = False # Used for WSDDN
37
+ _C.MODEL.ROI_BOX_HEAD.SOFTMAX_WEAK_LOSS = False # Used when USE_SIGMOID_CE is False
38
+
39
+ _C.MODEL.ROI_HEADS.MASK_WEIGHT = 1.0
40
+ _C.MODEL.ROI_HEADS.ONE_CLASS_PER_PROPOSAL = False # For demo only
41
+
42
+ # Caption losses
43
+ _C.MODEL.CAP_BATCH_RATIO = 4 # Ratio between detection data and caption data
44
+ _C.MODEL.WITH_CAPTION = False
45
+ _C.MODEL.SYNC_CAPTION_BATCH = False # synchronize across GPUs to enlarge # "classes"
46
+
47
+ # dynamic class sampling when training with 21K classes
48
+ _C.MODEL.DYNAMIC_CLASSIFIER = False
49
+ _C.MODEL.NUM_SAMPLE_CATS = 50
50
+
51
+ # Different classifiers in testing, used in cross-dataset evaluation
52
+ _C.MODEL.RESET_CLS_TESTS = False
53
+ _C.MODEL.TEST_CLASSIFIERS = []
54
+ _C.MODEL.TEST_NUM_CLASSES = []
55
+
56
+ # Backbones
57
+ _C.MODEL.SWIN = CN()
58
+ _C.MODEL.SWIN.SIZE = 'T' # 'T', 'S', 'B'
59
+ _C.MODEL.SWIN.USE_CHECKPOINT = False
60
+ _C.MODEL.SWIN.OUT_FEATURES = (1, 2, 3) # FPN stride 8 - 32
61
+
62
+ _C.MODEL.TIMM = CN()
63
+ _C.MODEL.TIMM.BASE_NAME = 'resnet50'
64
+ _C.MODEL.TIMM.OUT_LEVELS = (3, 4, 5)
65
+ _C.MODEL.TIMM.NORM = 'FrozenBN'
66
+ _C.MODEL.TIMM.FREEZE_AT = 0
67
+ _C.MODEL.DATASET_LOSS_WEIGHT = []
68
+
69
+ # Multi-dataset dataloader
70
+ _C.DATALOADER.DATASET_RATIO = [1, 1] # sample ratio
71
+ _C.DATALOADER.USE_RFS = [False, False]
72
+ _C.DATALOADER.MULTI_DATASET_GROUPING = False # Always true when multi-dataset is enabled
73
+ _C.DATALOADER.DATASET_ANN = ['box', 'box'] # Annotation type of each dataset
74
+ _C.DATALOADER.USE_DIFF_BS_SIZE = False # Use different batchsize for each dataset
75
+ _C.DATALOADER.DATASET_BS = [8, 32] # Used when USE_DIFF_BS_SIZE is on
76
+ _C.DATALOADER.DATASET_INPUT_SIZE = [896, 384] # Used when USE_DIFF_BS_SIZE is on
77
+ _C.DATALOADER.DATASET_INPUT_SCALE = [(0.1, 2.0), (0.5, 1.5)] # Used when USE_DIFF_BS_SIZE is on
78
+ _C.DATALOADER.DATASET_MIN_SIZES = [(640, 800), (320, 400)] # Used when USE_DIFF_BS_SIZE is on
79
+ _C.DATALOADER.DATASET_MAX_SIZES = [1333, 667] # Used when USE_DIFF_BS_SIZE is on
80
+ _C.DATALOADER.USE_TAR_DATASET = False # for ImageNet-21K, directly reading from unziped files
81
+ _C.DATALOADER.TARFILE_PATH = 'datasets/imagenet/metadata-22k/tar_files.npy'
82
+ _C.DATALOADER.TAR_INDEX_DIR = 'datasets/imagenet/metadata-22k/tarindex_npy'
83
+
84
+ _C.SOLVER.USE_CUSTOM_SOLVER = False
85
+ _C.SOLVER.OPTIMIZER = 'SGD'
86
+ _C.SOLVER.BACKBONE_MULTIPLIER = 1.0 # Used in DETR
87
+ _C.SOLVER.CUSTOM_MULTIPLIER = 1.0 # Used in DETR
88
+ _C.SOLVER.CUSTOM_MULTIPLIER_NAME = [] # Used in DETR
89
+
90
+ # Deformable DETR
91
+ _C.MODEL.DETR = CN()
92
+ _C.MODEL.DETR.NUM_CLASSES = 80
93
+ _C.MODEL.DETR.FROZEN_WEIGHTS = '' # For Segmentation
94
+ _C.MODEL.DETR.GIOU_WEIGHT = 2.0
95
+ _C.MODEL.DETR.L1_WEIGHT = 5.0
96
+ _C.MODEL.DETR.DEEP_SUPERVISION = True
97
+ _C.MODEL.DETR.NO_OBJECT_WEIGHT = 0.1
98
+ _C.MODEL.DETR.CLS_WEIGHT = 2.0
99
+ _C.MODEL.DETR.NUM_FEATURE_LEVELS = 4
100
+ _C.MODEL.DETR.TWO_STAGE = False
101
+ _C.MODEL.DETR.WITH_BOX_REFINE = False
102
+ _C.MODEL.DETR.FOCAL_ALPHA = 0.25
103
+ _C.MODEL.DETR.NHEADS = 8
104
+ _C.MODEL.DETR.DROPOUT = 0.1
105
+ _C.MODEL.DETR.DIM_FEEDFORWARD = 2048
106
+ _C.MODEL.DETR.ENC_LAYERS = 6
107
+ _C.MODEL.DETR.DEC_LAYERS = 6
108
+ _C.MODEL.DETR.PRE_NORM = False
109
+ _C.MODEL.DETR.HIDDEN_DIM = 256
110
+ _C.MODEL.DETR.NUM_OBJECT_QUERIES = 100
111
+
112
+ _C.MODEL.DETR.USE_FED_LOSS = False
113
+ _C.MODEL.DETR.WEAK_WEIGHT = 0.1
114
+
115
+ _C.INPUT.CUSTOM_AUG = ''
116
+ _C.INPUT.TRAIN_SIZE = 640
117
+ _C.INPUT.TEST_SIZE = 640
118
+ _C.INPUT.SCALE_RANGE = (0.1, 2.)
119
+ # 'default' for fixed short/ long edge, 'square' for max size=INPUT.SIZE
120
+ _C.INPUT.TEST_INPUT_TYPE = 'default'
121
+
122
+ _C.FIND_UNUSED_PARAM = True
123
+ _C.EVAL_PRED_AR = False
124
+ _C.EVAL_PROPOSAL_AR = False
125
+ _C.EVAL_CAT_SPEC_AR = False
126
+ _C.IS_DEBUG = False
127
+ _C.QUICK_DEBUG = False
128
+ _C.FP16 = False
129
+ _C.EVAL_AP_FIX = False
130
+ _C.GEN_PSEDO_LABELS = False
131
+ _C.SAVE_DEBUG_PATH = 'output/save_debug/'
detic/custom_solver.py ADDED
@@ -0,0 +1,78 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ from enum import Enum
3
+ import itertools
4
+ from typing import Any, Callable, Dict, Iterable, List, Set, Type, Union
5
+ import torch
6
+
7
+ from detectron2.config import CfgNode
8
+
9
+ from detectron2.solver.build import maybe_add_gradient_clipping
10
+
11
+ def match_name_keywords(n, name_keywords):
12
+ out = False
13
+ for b in name_keywords:
14
+ if b in n:
15
+ out = True
16
+ break
17
+ return out
18
+
19
+ def build_custom_optimizer(cfg: CfgNode, model: torch.nn.Module) -> torch.optim.Optimizer:
20
+ """
21
+ Build an optimizer from config.
22
+ """
23
+ params: List[Dict[str, Any]] = []
24
+ memo: Set[torch.nn.parameter.Parameter] = set()
25
+ custom_multiplier_name = cfg.SOLVER.CUSTOM_MULTIPLIER_NAME
26
+ optimizer_type = cfg.SOLVER.OPTIMIZER
27
+ for key, value in model.named_parameters(recurse=True):
28
+ if not value.requires_grad:
29
+ continue
30
+ # Avoid duplicating parameters
31
+ if value in memo:
32
+ continue
33
+ memo.add(value)
34
+ lr = cfg.SOLVER.BASE_LR
35
+ weight_decay = cfg.SOLVER.WEIGHT_DECAY
36
+ if "backbone" in key:
37
+ lr = lr * cfg.SOLVER.BACKBONE_MULTIPLIER
38
+ if match_name_keywords(key, custom_multiplier_name):
39
+ lr = lr * cfg.SOLVER.CUSTOM_MULTIPLIER
40
+ print('Costum LR', key, lr)
41
+ param = {"params": [value], "lr": lr}
42
+ if optimizer_type != 'ADAMW':
43
+ param['weight_decay'] = weight_decay
44
+ params += [param]
45
+
46
+ def maybe_add_full_model_gradient_clipping(optim): # optim: the optimizer class
47
+ # detectron2 doesn't have full model gradient clipping now
48
+ clip_norm_val = cfg.SOLVER.CLIP_GRADIENTS.CLIP_VALUE
49
+ enable = (
50
+ cfg.SOLVER.CLIP_GRADIENTS.ENABLED
51
+ and cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model"
52
+ and clip_norm_val > 0.0
53
+ )
54
+
55
+ class FullModelGradientClippingOptimizer(optim):
56
+ def step(self, closure=None):
57
+ all_params = itertools.chain(*[x["params"] for x in self.param_groups])
58
+ torch.nn.utils.clip_grad_norm_(all_params, clip_norm_val)
59
+ super().step(closure=closure)
60
+
61
+ return FullModelGradientClippingOptimizer if enable else optim
62
+
63
+
64
+ if optimizer_type == 'SGD':
65
+ optimizer = maybe_add_full_model_gradient_clipping(torch.optim.SGD)(
66
+ params, cfg.SOLVER.BASE_LR, momentum=cfg.SOLVER.MOMENTUM,
67
+ nesterov=cfg.SOLVER.NESTEROV
68
+ )
69
+ elif optimizer_type == 'ADAMW':
70
+ optimizer = maybe_add_full_model_gradient_clipping(torch.optim.AdamW)(
71
+ params, cfg.SOLVER.BASE_LR,
72
+ weight_decay=cfg.SOLVER.WEIGHT_DECAY
73
+ )
74
+ else:
75
+ raise NotImplementedError(f"no optimizer type {optimizer_type}")
76
+ if not cfg.SOLVER.CLIP_GRADIENTS.CLIP_TYPE == "full_model":
77
+ optimizer = maybe_add_gradient_clipping(cfg, optimizer)
78
+ return optimizer
detic/data/custom_build_augmentation.py ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import numpy as np
4
+ import pycocotools.mask as mask_util
5
+ import torch
6
+ from fvcore.common.file_io import PathManager
7
+ from PIL import Image
8
+
9
+
10
+ from detectron2.data import transforms as T
11
+ from .transforms.custom_augmentation_impl import EfficientDetResizeCrop
12
+
13
+ def build_custom_augmentation(cfg, is_train, scale=None, size=None, \
14
+ min_size=None, max_size=None):
15
+ """
16
+ Create a list of default :class:`Augmentation` from config.
17
+ Now it includes resizing and flipping.
18
+
19
+ Returns:
20
+ list[Augmentation]
21
+ """
22
+ if cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge':
23
+ if is_train:
24
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN if min_size is None else min_size
25
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN if max_size is None else max_size
26
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
27
+ else:
28
+ min_size = cfg.INPUT.MIN_SIZE_TEST
29
+ max_size = cfg.INPUT.MAX_SIZE_TEST
30
+ sample_style = "choice"
31
+ augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
32
+ elif cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
33
+ if is_train:
34
+ scale = cfg.INPUT.SCALE_RANGE if scale is None else scale
35
+ size = cfg.INPUT.TRAIN_SIZE if size is None else size
36
+ else:
37
+ scale = (1, 1)
38
+ size = cfg.INPUT.TEST_SIZE
39
+ augmentation = [EfficientDetResizeCrop(size, scale)]
40
+ else:
41
+ assert 0, cfg.INPUT.CUSTOM_AUG
42
+
43
+ if is_train:
44
+ augmentation.append(T.RandomFlip())
45
+ return augmentation
46
+
47
+
48
+ build_custom_transform_gen = build_custom_augmentation
49
+ """
50
+ Alias for backward-compatibility.
51
+ """
detic/data/custom_dataset_dataloader.py ADDED
@@ -0,0 +1,331 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/multi_dataset_dataloader.py (Apache-2.0 License)
3
+ import copy
4
+ import logging
5
+ import numpy as np
6
+ import operator
7
+ import torch
8
+ import torch.utils.data
9
+ import json
10
+ from detectron2.utils.comm import get_world_size
11
+ from detectron2.utils.logger import _log_api_usage, log_first_n
12
+
13
+ from detectron2.config import configurable
14
+ from detectron2.data import samplers
15
+ from torch.utils.data.sampler import BatchSampler, Sampler
16
+ from detectron2.data.common import DatasetFromList, MapDataset
17
+ from detectron2.data.dataset_mapper import DatasetMapper
18
+ from detectron2.data.build import get_detection_dataset_dicts, build_batch_data_loader
19
+ from detectron2.data.samplers import TrainingSampler, RepeatFactorTrainingSampler
20
+ from detectron2.data.build import worker_init_reset_seed, print_instances_class_histogram
21
+ from detectron2.data.build import filter_images_with_only_crowd_annotations
22
+ from detectron2.data.build import filter_images_with_few_keypoints
23
+ from detectron2.data.build import check_metadata_consistency
24
+ from detectron2.data.catalog import MetadataCatalog, DatasetCatalog
25
+ from detectron2.utils import comm
26
+ import itertools
27
+ import math
28
+ from collections import defaultdict
29
+ from typing import Optional
30
+
31
+
32
+ def _custom_train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
33
+ sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
34
+ if 'MultiDataset' in sampler_name:
35
+ dataset_dicts = get_detection_dataset_dicts_with_source(
36
+ cfg.DATASETS.TRAIN,
37
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
38
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
39
+ if cfg.MODEL.KEYPOINT_ON else 0,
40
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
41
+ )
42
+ else:
43
+ dataset_dicts = get_detection_dataset_dicts(
44
+ cfg.DATASETS.TRAIN,
45
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
46
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
47
+ if cfg.MODEL.KEYPOINT_ON else 0,
48
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
49
+ )
50
+
51
+ if mapper is None:
52
+ mapper = DatasetMapper(cfg, True)
53
+
54
+ if sampler is not None:
55
+ pass
56
+ elif sampler_name == "TrainingSampler":
57
+ sampler = TrainingSampler(len(dataset))
58
+ elif sampler_name == "MultiDatasetSampler":
59
+ sampler = MultiDatasetSampler(
60
+ dataset_dicts,
61
+ dataset_ratio = cfg.DATALOADER.DATASET_RATIO,
62
+ use_rfs = cfg.DATALOADER.USE_RFS,
63
+ dataset_ann = cfg.DATALOADER.DATASET_ANN,
64
+ repeat_threshold = cfg.DATALOADER.REPEAT_THRESHOLD,
65
+ )
66
+ elif sampler_name == "RepeatFactorTrainingSampler":
67
+ repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
68
+ dataset_dicts, cfg.DATALOADER.REPEAT_THRESHOLD
69
+ )
70
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
71
+ else:
72
+ raise ValueError("Unknown training sampler: {}".format(sampler_name))
73
+
74
+ return {
75
+ "dataset": dataset_dicts,
76
+ "sampler": sampler,
77
+ "mapper": mapper,
78
+ "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
79
+ "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
80
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
81
+ 'multi_dataset_grouping': cfg.DATALOADER.MULTI_DATASET_GROUPING,
82
+ 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE,
83
+ 'dataset_bs': cfg.DATALOADER.DATASET_BS,
84
+ 'num_datasets': len(cfg.DATASETS.TRAIN)
85
+ }
86
+
87
+
88
+ @configurable(from_config=_custom_train_loader_from_config)
89
+ def build_custom_train_loader(
90
+ dataset, *, mapper, sampler,
91
+ total_batch_size=16,
92
+ aspect_ratio_grouping=True,
93
+ num_workers=0,
94
+ num_datasets=1,
95
+ multi_dataset_grouping=False,
96
+ use_diff_bs_size=False,
97
+ dataset_bs=[]
98
+ ):
99
+ """
100
+ Modified from detectron2.data.build.build_custom_train_loader, but supports
101
+ different samplers
102
+ """
103
+ if isinstance(dataset, list):
104
+ dataset = DatasetFromList(dataset, copy=False)
105
+ if mapper is not None:
106
+ dataset = MapDataset(dataset, mapper)
107
+ if sampler is None:
108
+ sampler = TrainingSampler(len(dataset))
109
+ assert isinstance(sampler, torch.utils.data.sampler.Sampler)
110
+ if multi_dataset_grouping:
111
+ return build_multi_dataset_batch_data_loader(
112
+ use_diff_bs_size,
113
+ dataset_bs,
114
+ dataset,
115
+ sampler,
116
+ total_batch_size,
117
+ num_datasets=num_datasets,
118
+ num_workers=num_workers,
119
+ )
120
+ else:
121
+ return build_batch_data_loader(
122
+ dataset,
123
+ sampler,
124
+ total_batch_size,
125
+ aspect_ratio_grouping=aspect_ratio_grouping,
126
+ num_workers=num_workers,
127
+ )
128
+
129
+
130
+ def build_multi_dataset_batch_data_loader(
131
+ use_diff_bs_size, dataset_bs,
132
+ dataset, sampler, total_batch_size, num_datasets, num_workers=0
133
+ ):
134
+ """
135
+ """
136
+ world_size = get_world_size()
137
+ assert (
138
+ total_batch_size > 0 and total_batch_size % world_size == 0
139
+ ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
140
+ total_batch_size, world_size
141
+ )
142
+
143
+ batch_size = total_batch_size // world_size
144
+ data_loader = torch.utils.data.DataLoader(
145
+ dataset,
146
+ sampler=sampler,
147
+ num_workers=num_workers,
148
+ batch_sampler=None,
149
+ collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
150
+ worker_init_fn=worker_init_reset_seed,
151
+ ) # yield individual mapped dict
152
+ if use_diff_bs_size:
153
+ return DIFFMDAspectRatioGroupedDataset(
154
+ data_loader, dataset_bs, num_datasets)
155
+ else:
156
+ return MDAspectRatioGroupedDataset(
157
+ data_loader, batch_size, num_datasets)
158
+
159
+
160
+ def get_detection_dataset_dicts_with_source(
161
+ dataset_names, filter_empty=True, min_keypoints=0, proposal_files=None
162
+ ):
163
+ assert len(dataset_names)
164
+ dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in dataset_names]
165
+ for dataset_name, dicts in zip(dataset_names, dataset_dicts):
166
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
167
+
168
+ for source_id, (dataset_name, dicts) in \
169
+ enumerate(zip(dataset_names, dataset_dicts)):
170
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
171
+ for d in dicts:
172
+ d['dataset_source'] = source_id
173
+
174
+ if "annotations" in dicts[0]:
175
+ try:
176
+ class_names = MetadataCatalog.get(dataset_name).thing_classes
177
+ check_metadata_consistency("thing_classes", dataset_name)
178
+ print_instances_class_histogram(dicts, class_names)
179
+ except AttributeError: # class names are not available for this dataset
180
+ pass
181
+
182
+ assert proposal_files is None
183
+
184
+ dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
185
+
186
+ has_instances = "annotations" in dataset_dicts[0]
187
+ if filter_empty and has_instances:
188
+ dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
189
+ if min_keypoints > 0 and has_instances:
190
+ dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
191
+
192
+ return dataset_dicts
193
+
194
+
195
+ class MultiDatasetSampler(Sampler):
196
+ def __init__(
197
+ self,
198
+ dataset_dicts,
199
+ dataset_ratio,
200
+ use_rfs,
201
+ dataset_ann,
202
+ repeat_threshold=0.001,
203
+ seed: Optional[int] = None,
204
+ ):
205
+ """
206
+ """
207
+ sizes = [0 for _ in range(len(dataset_ratio))]
208
+ for d in dataset_dicts:
209
+ sizes[d['dataset_source']] += 1
210
+ print('dataset sizes', sizes)
211
+ self.sizes = sizes
212
+ assert len(dataset_ratio) == len(sizes), \
213
+ 'length of dataset ratio {} should be equal to number if dataset {}'.format(
214
+ len(dataset_ratio), len(sizes)
215
+ )
216
+ if seed is None:
217
+ seed = comm.shared_random_seed()
218
+ self._seed = int(seed)
219
+ self._rank = comm.get_rank()
220
+ self._world_size = comm.get_world_size()
221
+
222
+ self.dataset_ids = torch.tensor(
223
+ [d['dataset_source'] for d in dataset_dicts], dtype=torch.long)
224
+
225
+ dataset_weight = [torch.ones(s) * max(sizes) / s * r / sum(dataset_ratio) \
226
+ for i, (r, s) in enumerate(zip(dataset_ratio, sizes))]
227
+ dataset_weight = torch.cat(dataset_weight)
228
+
229
+ rfs_factors = []
230
+ st = 0
231
+ for i, s in enumerate(sizes):
232
+ if use_rfs[i]:
233
+ if dataset_ann[i] == 'box':
234
+ rfs_func = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency
235
+ else:
236
+ rfs_func = repeat_factors_from_tag_frequency
237
+ rfs_factor = rfs_func(
238
+ dataset_dicts[st: st + s],
239
+ repeat_thresh=repeat_threshold)
240
+ rfs_factor = rfs_factor * (s / rfs_factor.sum())
241
+ else:
242
+ rfs_factor = torch.ones(s)
243
+ rfs_factors.append(rfs_factor)
244
+ st = st + s
245
+ rfs_factors = torch.cat(rfs_factors)
246
+
247
+ self.weights = dataset_weight * rfs_factors
248
+ self.sample_epoch_size = len(self.weights)
249
+
250
+ def __iter__(self):
251
+ start = self._rank
252
+ yield from itertools.islice(
253
+ self._infinite_indices(), start, None, self._world_size)
254
+
255
+
256
+ def _infinite_indices(self):
257
+ g = torch.Generator()
258
+ g.manual_seed(self._seed)
259
+ while True:
260
+ ids = torch.multinomial(
261
+ self.weights, self.sample_epoch_size, generator=g,
262
+ replacement=True)
263
+ nums = [(self.dataset_ids[ids] == i).sum().int().item() \
264
+ for i in range(len(self.sizes))]
265
+ yield from ids
266
+
267
+
268
+ class MDAspectRatioGroupedDataset(torch.utils.data.IterableDataset):
269
+ def __init__(self, dataset, batch_size, num_datasets):
270
+ """
271
+ """
272
+ self.dataset = dataset
273
+ self.batch_size = batch_size
274
+ self._buckets = [[] for _ in range(2 * num_datasets)]
275
+
276
+ def __iter__(self):
277
+ for d in self.dataset:
278
+ w, h = d["width"], d["height"]
279
+ aspect_ratio_bucket_id = 0 if w > h else 1
280
+ bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id
281
+ bucket = self._buckets[bucket_id]
282
+ bucket.append(d)
283
+ if len(bucket) == self.batch_size:
284
+ yield bucket[:]
285
+ del bucket[:]
286
+
287
+
288
+ class DIFFMDAspectRatioGroupedDataset(torch.utils.data.IterableDataset):
289
+ def __init__(self, dataset, batch_sizes, num_datasets):
290
+ """
291
+ """
292
+ self.dataset = dataset
293
+ self.batch_sizes = batch_sizes
294
+ self._buckets = [[] for _ in range(2 * num_datasets)]
295
+
296
+ def __iter__(self):
297
+ for d in self.dataset:
298
+ w, h = d["width"], d["height"]
299
+ aspect_ratio_bucket_id = 0 if w > h else 1
300
+ bucket_id = d['dataset_source'] * 2 + aspect_ratio_bucket_id
301
+ bucket = self._buckets[bucket_id]
302
+ bucket.append(d)
303
+ if len(bucket) == self.batch_sizes[d['dataset_source']]:
304
+ yield bucket[:]
305
+ del bucket[:]
306
+
307
+
308
+ def repeat_factors_from_tag_frequency(dataset_dicts, repeat_thresh):
309
+ """
310
+ """
311
+ category_freq = defaultdict(int)
312
+ for dataset_dict in dataset_dicts:
313
+ cat_ids = dataset_dict['pos_category_ids']
314
+ for cat_id in cat_ids:
315
+ category_freq[cat_id] += 1
316
+ num_images = len(dataset_dicts)
317
+ for k, v in category_freq.items():
318
+ category_freq[k] = v / num_images
319
+
320
+ category_rep = {
321
+ cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
322
+ for cat_id, cat_freq in category_freq.items()
323
+ }
324
+
325
+ rep_factors = []
326
+ for dataset_dict in dataset_dicts:
327
+ cat_ids = dataset_dict['pos_category_ids']
328
+ rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
329
+ rep_factors.append(rep_factor)
330
+
331
+ return torch.tensor(rep_factors, dtype=torch.float32)
detic/data/custom_dataset_mapper.py ADDED
@@ -0,0 +1,280 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
2
+ import copy
3
+ import logging
4
+ import numpy as np
5
+ from typing import List, Optional, Union
6
+ import torch
7
+ import pycocotools.mask as mask_util
8
+
9
+ from detectron2.config import configurable
10
+
11
+ from detectron2.data import detection_utils as utils
12
+ from detectron2.data.detection_utils import transform_keypoint_annotations
13
+ from detectron2.data import transforms as T
14
+ from detectron2.data.dataset_mapper import DatasetMapper
15
+ from detectron2.structures import Boxes, BoxMode, Instances
16
+ from detectron2.structures import Keypoints, PolygonMasks, BitMasks
17
+ from fvcore.transforms.transform import TransformList
18
+ from .custom_build_augmentation import build_custom_augmentation
19
+ from .tar_dataset import DiskTarDataset
20
+
21
+ __all__ = ["CustomDatasetMapper"]
22
+
23
+ class CustomDatasetMapper(DatasetMapper):
24
+ @configurable
25
+ def __init__(self, is_train: bool,
26
+ with_ann_type=False,
27
+ dataset_ann=[],
28
+ use_diff_bs_size=False,
29
+ dataset_augs=[],
30
+ is_debug=False,
31
+ use_tar_dataset=False,
32
+ tarfile_path='',
33
+ tar_index_dir='',
34
+ **kwargs):
35
+ """
36
+ add image labels
37
+ """
38
+ self.with_ann_type = with_ann_type
39
+ self.dataset_ann = dataset_ann
40
+ self.use_diff_bs_size = use_diff_bs_size
41
+ if self.use_diff_bs_size and is_train:
42
+ self.dataset_augs = [T.AugmentationList(x) for x in dataset_augs]
43
+ self.is_debug = is_debug
44
+ self.use_tar_dataset = use_tar_dataset
45
+ if self.use_tar_dataset:
46
+ print('Using tar dataset')
47
+ self.tar_dataset = DiskTarDataset(tarfile_path, tar_index_dir)
48
+ super().__init__(is_train, **kwargs)
49
+
50
+
51
+ @classmethod
52
+ def from_config(cls, cfg, is_train: bool = True):
53
+ ret = super().from_config(cfg, is_train)
54
+ ret.update({
55
+ 'with_ann_type': cfg.WITH_IMAGE_LABELS,
56
+ 'dataset_ann': cfg.DATALOADER.DATASET_ANN,
57
+ 'use_diff_bs_size': cfg.DATALOADER.USE_DIFF_BS_SIZE,
58
+ 'is_debug': cfg.IS_DEBUG,
59
+ 'use_tar_dataset': cfg.DATALOADER.USE_TAR_DATASET,
60
+ 'tarfile_path': cfg.DATALOADER.TARFILE_PATH,
61
+ 'tar_index_dir': cfg.DATALOADER.TAR_INDEX_DIR,
62
+ })
63
+ if ret['use_diff_bs_size'] and is_train:
64
+ if cfg.INPUT.CUSTOM_AUG == 'EfficientDetResizeCrop':
65
+ dataset_scales = cfg.DATALOADER.DATASET_INPUT_SCALE
66
+ dataset_sizes = cfg.DATALOADER.DATASET_INPUT_SIZE
67
+ ret['dataset_augs'] = [
68
+ build_custom_augmentation(cfg, True, scale, size) \
69
+ for scale, size in zip(dataset_scales, dataset_sizes)]
70
+ else:
71
+ assert cfg.INPUT.CUSTOM_AUG == 'ResizeShortestEdge'
72
+ min_sizes = cfg.DATALOADER.DATASET_MIN_SIZES
73
+ max_sizes = cfg.DATALOADER.DATASET_MAX_SIZES
74
+ ret['dataset_augs'] = [
75
+ build_custom_augmentation(
76
+ cfg, True, min_size=mi, max_size=ma) \
77
+ for mi, ma in zip(min_sizes, max_sizes)]
78
+ else:
79
+ ret['dataset_augs'] = []
80
+
81
+ return ret
82
+
83
+ def __call__(self, dataset_dict):
84
+ """
85
+ include image labels
86
+ """
87
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
88
+ # USER: Write your own image loading if it's not from a file
89
+ if 'file_name' in dataset_dict:
90
+ ori_image = utils.read_image(
91
+ dataset_dict["file_name"], format=self.image_format)
92
+ else:
93
+ ori_image, _, _ = self.tar_dataset[dataset_dict["tar_index"]]
94
+ ori_image = utils._apply_exif_orientation(ori_image)
95
+ ori_image = utils.convert_PIL_to_numpy(ori_image, self.image_format)
96
+ utils.check_image_size(dataset_dict, ori_image)
97
+
98
+ # USER: Remove if you don't do semantic/panoptic segmentation.
99
+ if "sem_seg_file_name" in dataset_dict:
100
+ sem_seg_gt = utils.read_image(
101
+ dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
102
+ else:
103
+ sem_seg_gt = None
104
+
105
+ if self.is_debug:
106
+ dataset_dict['dataset_source'] = 0
107
+
108
+ not_full_labeled = 'dataset_source' in dataset_dict and \
109
+ self.with_ann_type and \
110
+ self.dataset_ann[dataset_dict['dataset_source']] != 'box'
111
+
112
+ aug_input = T.AugInput(copy.deepcopy(ori_image), sem_seg=sem_seg_gt)
113
+ if self.use_diff_bs_size and self.is_train:
114
+ transforms = \
115
+ self.dataset_augs[dataset_dict['dataset_source']](aug_input)
116
+ else:
117
+ transforms = self.augmentations(aug_input)
118
+ image, sem_seg_gt = aug_input.image, aug_input.sem_seg
119
+
120
+ image_shape = image.shape[:2] # h, w
121
+ dataset_dict["image"] = torch.as_tensor(
122
+ np.ascontiguousarray(image.transpose(2, 0, 1)))
123
+
124
+ if sem_seg_gt is not None:
125
+ dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
126
+
127
+ # USER: Remove if you don't use pre-computed proposals.
128
+ # Most users would not need this feature.
129
+ if self.proposal_topk is not None:
130
+ utils.transform_proposals(
131
+ dataset_dict, image_shape, transforms,
132
+ proposal_topk=self.proposal_topk
133
+ )
134
+
135
+ if not self.is_train:
136
+ # USER: Modify this if you want to keep them for some reason.
137
+ dataset_dict.pop("annotations", None)
138
+ dataset_dict.pop("sem_seg_file_name", None)
139
+ return dataset_dict
140
+
141
+ if "annotations" in dataset_dict:
142
+ # USER: Modify this if you want to keep them for some reason.
143
+ for anno in dataset_dict["annotations"]:
144
+ if not self.use_instance_mask:
145
+ anno.pop("segmentation", None)
146
+ if not self.use_keypoint:
147
+ anno.pop("keypoints", None)
148
+
149
+ # USER: Implement additional transformations if you have other types of data
150
+ all_annos = [
151
+ (utils.transform_instance_annotations(
152
+ obj, transforms, image_shape,
153
+ keypoint_hflip_indices=self.keypoint_hflip_indices,
154
+ ), obj.get("iscrowd", 0))
155
+ for obj in dataset_dict.pop("annotations")
156
+ ]
157
+ annos = [ann[0] for ann in all_annos if ann[1] == 0]
158
+ instances = utils.annotations_to_instances(
159
+ annos, image_shape, mask_format=self.instance_mask_format
160
+ )
161
+
162
+ del all_annos
163
+ if self.recompute_boxes:
164
+ instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
165
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
166
+ if self.with_ann_type:
167
+ dataset_dict["pos_category_ids"] = dataset_dict.get(
168
+ 'pos_category_ids', [])
169
+ dataset_dict["ann_type"] = \
170
+ self.dataset_ann[dataset_dict['dataset_source']]
171
+ if self.is_debug and (('pos_category_ids' not in dataset_dict) or \
172
+ (dataset_dict['pos_category_ids'] == [])):
173
+ dataset_dict['pos_category_ids'] = [x for x in sorted(set(
174
+ dataset_dict['instances'].gt_classes.tolist()
175
+ ))]
176
+ return dataset_dict
177
+
178
+ # DETR augmentation
179
+ def build_transform_gen(cfg, is_train):
180
+ """
181
+ """
182
+ if is_train:
183
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN
184
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN
185
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
186
+ else:
187
+ min_size = cfg.INPUT.MIN_SIZE_TEST
188
+ max_size = cfg.INPUT.MAX_SIZE_TEST
189
+ sample_style = "choice"
190
+ if sample_style == "range":
191
+ assert len(min_size) == 2, "more than 2 ({}) min_size(s) are provided for ranges".format(len(min_size))
192
+
193
+ logger = logging.getLogger(__name__)
194
+ tfm_gens = []
195
+ if is_train:
196
+ tfm_gens.append(T.RandomFlip())
197
+ tfm_gens.append(T.ResizeShortestEdge(min_size, max_size, sample_style))
198
+ if is_train:
199
+ logger.info("TransformGens used in training: " + str(tfm_gens))
200
+ return tfm_gens
201
+
202
+
203
+ class DetrDatasetMapper:
204
+ """
205
+ A callable which takes a dataset dict in Detectron2 Dataset format,
206
+ and map it into a format used by DETR.
207
+ The callable currently does the following:
208
+ 1. Read the image from "file_name"
209
+ 2. Applies geometric transforms to the image and annotation
210
+ 3. Find and applies suitable cropping to the image and annotation
211
+ 4. Prepare image and annotation to Tensors
212
+ """
213
+
214
+ def __init__(self, cfg, is_train=True):
215
+ if cfg.INPUT.CROP.ENABLED and is_train:
216
+ self.crop_gen = [
217
+ T.ResizeShortestEdge([400, 500, 600], sample_style="choice"),
218
+ T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE),
219
+ ]
220
+ else:
221
+ self.crop_gen = None
222
+
223
+ self.mask_on = cfg.MODEL.MASK_ON
224
+ self.tfm_gens = build_transform_gen(cfg, is_train)
225
+ logging.getLogger(__name__).info(
226
+ "Full TransformGens used in training: {}, crop: {}".format(str(self.tfm_gens), str(self.crop_gen))
227
+ )
228
+
229
+ self.img_format = cfg.INPUT.FORMAT
230
+ self.is_train = is_train
231
+
232
+ def __call__(self, dataset_dict):
233
+ """
234
+ Args:
235
+ dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
236
+ Returns:
237
+ dict: a format that builtin models in detectron2 accept
238
+ """
239
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
240
+ image = utils.read_image(dataset_dict["file_name"], format=self.img_format)
241
+ utils.check_image_size(dataset_dict, image)
242
+
243
+ if self.crop_gen is None:
244
+ image, transforms = T.apply_transform_gens(self.tfm_gens, image)
245
+ else:
246
+ if np.random.rand() > 0.5:
247
+ image, transforms = T.apply_transform_gens(self.tfm_gens, image)
248
+ else:
249
+ image, transforms = T.apply_transform_gens(
250
+ self.tfm_gens[:-1] + self.crop_gen + self.tfm_gens[-1:], image
251
+ )
252
+
253
+ image_shape = image.shape[:2] # h, w
254
+
255
+ # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
256
+ # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
257
+ # Therefore it's important to use torch.Tensor.
258
+ dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
259
+
260
+ if not self.is_train:
261
+ # USER: Modify this if you want to keep them for some reason.
262
+ dataset_dict.pop("annotations", None)
263
+ return dataset_dict
264
+
265
+ if "annotations" in dataset_dict:
266
+ # USER: Modify this if you want to keep them for some reason.
267
+ for anno in dataset_dict["annotations"]:
268
+ if not self.mask_on:
269
+ anno.pop("segmentation", None)
270
+ anno.pop("keypoints", None)
271
+
272
+ # USER: Implement additional transformations if you have other types of data
273
+ annos = [
274
+ utils.transform_instance_annotations(obj, transforms, image_shape)
275
+ for obj in dataset_dict.pop("annotations")
276
+ if obj.get("iscrowd", 0) == 0
277
+ ]
278
+ instances = utils.annotations_to_instances(annos, image_shape)
279
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
280
+ return dataset_dict
detic/data/datasets/cc.py ADDED
@@ -0,0 +1,23 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+
5
+ from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
6
+ from detectron2.data.datasets.lvis import get_lvis_instances_meta
7
+ from .lvis_v1 import custom_register_lvis_instances
8
+
9
+ _CUSTOM_SPLITS = {
10
+ "cc3m_v1_val": ("cc3m/validation/", "cc3m/val_image_info.json"),
11
+ "cc3m_v1_train": ("cc3m/training/", "cc3m/train_image_info.json"),
12
+ "cc3m_v1_train_tags": ("cc3m/training/", "cc3m/train_image_info_tags.json"),
13
+
14
+ }
15
+
16
+ for key, (image_root, json_file) in _CUSTOM_SPLITS.items():
17
+ custom_register_lvis_instances(
18
+ key,
19
+ get_lvis_instances_meta('lvis_v1'),
20
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
21
+ os.path.join("datasets", image_root),
22
+ )
23
+
detic/data/datasets/coco_zeroshot.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import os
3
+
4
+ from detectron2.data.datasets.register_coco import register_coco_instances
5
+ from detectron2.data.datasets.builtin_meta import _get_builtin_metadata
6
+ from .lvis_v1 import custom_register_lvis_instances
7
+
8
+ categories_seen = [
9
+ {'id': 1, 'name': 'person'},
10
+ {'id': 2, 'name': 'bicycle'},
11
+ {'id': 3, 'name': 'car'},
12
+ {'id': 4, 'name': 'motorcycle'},
13
+ {'id': 7, 'name': 'train'},
14
+ {'id': 8, 'name': 'truck'},
15
+ {'id': 9, 'name': 'boat'},
16
+ {'id': 15, 'name': 'bench'},
17
+ {'id': 16, 'name': 'bird'},
18
+ {'id': 19, 'name': 'horse'},
19
+ {'id': 20, 'name': 'sheep'},
20
+ {'id': 23, 'name': 'bear'},
21
+ {'id': 24, 'name': 'zebra'},
22
+ {'id': 25, 'name': 'giraffe'},
23
+ {'id': 27, 'name': 'backpack'},
24
+ {'id': 31, 'name': 'handbag'},
25
+ {'id': 33, 'name': 'suitcase'},
26
+ {'id': 34, 'name': 'frisbee'},
27
+ {'id': 35, 'name': 'skis'},
28
+ {'id': 38, 'name': 'kite'},
29
+ {'id': 42, 'name': 'surfboard'},
30
+ {'id': 44, 'name': 'bottle'},
31
+ {'id': 48, 'name': 'fork'},
32
+ {'id': 50, 'name': 'spoon'},
33
+ {'id': 51, 'name': 'bowl'},
34
+ {'id': 52, 'name': 'banana'},
35
+ {'id': 53, 'name': 'apple'},
36
+ {'id': 54, 'name': 'sandwich'},
37
+ {'id': 55, 'name': 'orange'},
38
+ {'id': 56, 'name': 'broccoli'},
39
+ {'id': 57, 'name': 'carrot'},
40
+ {'id': 59, 'name': 'pizza'},
41
+ {'id': 60, 'name': 'donut'},
42
+ {'id': 62, 'name': 'chair'},
43
+ {'id': 65, 'name': 'bed'},
44
+ {'id': 70, 'name': 'toilet'},
45
+ {'id': 72, 'name': 'tv'},
46
+ {'id': 73, 'name': 'laptop'},
47
+ {'id': 74, 'name': 'mouse'},
48
+ {'id': 75, 'name': 'remote'},
49
+ {'id': 78, 'name': 'microwave'},
50
+ {'id': 79, 'name': 'oven'},
51
+ {'id': 80, 'name': 'toaster'},
52
+ {'id': 82, 'name': 'refrigerator'},
53
+ {'id': 84, 'name': 'book'},
54
+ {'id': 85, 'name': 'clock'},
55
+ {'id': 86, 'name': 'vase'},
56
+ {'id': 90, 'name': 'toothbrush'},
57
+ ]
58
+
59
+ categories_unseen = [
60
+ {'id': 5, 'name': 'airplane'},
61
+ {'id': 6, 'name': 'bus'},
62
+ {'id': 17, 'name': 'cat'},
63
+ {'id': 18, 'name': 'dog'},
64
+ {'id': 21, 'name': 'cow'},
65
+ {'id': 22, 'name': 'elephant'},
66
+ {'id': 28, 'name': 'umbrella'},
67
+ {'id': 32, 'name': 'tie'},
68
+ {'id': 36, 'name': 'snowboard'},
69
+ {'id': 41, 'name': 'skateboard'},
70
+ {'id': 47, 'name': 'cup'},
71
+ {'id': 49, 'name': 'knife'},
72
+ {'id': 61, 'name': 'cake'},
73
+ {'id': 63, 'name': 'couch'},
74
+ {'id': 76, 'name': 'keyboard'},
75
+ {'id': 81, 'name': 'sink'},
76
+ {'id': 87, 'name': 'scissors'},
77
+ ]
78
+
79
+ def _get_metadata(cat):
80
+ if cat == 'all':
81
+ return _get_builtin_metadata('coco')
82
+ elif cat == 'seen':
83
+ id_to_name = {x['id']: x['name'] for x in categories_seen}
84
+ else:
85
+ assert cat == 'unseen'
86
+ id_to_name = {x['id']: x['name'] for x in categories_unseen}
87
+
88
+ thing_dataset_id_to_contiguous_id = {
89
+ x: i for i, x in enumerate(sorted(id_to_name))}
90
+ thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
91
+ return {
92
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
93
+ "thing_classes": thing_classes}
94
+
95
+ _PREDEFINED_SPLITS_COCO = {
96
+ "coco_zeroshot_train": ("coco/train2017", "coco/zero-shot/instances_train2017_seen_2.json", 'seen'),
97
+ "coco_zeroshot_val": ("coco/val2017", "coco/zero-shot/instances_val2017_unseen_2.json", 'unseen'),
98
+ "coco_not_zeroshot_val": ("coco/val2017", "coco/zero-shot/instances_val2017_seen_2.json", 'seen'),
99
+ "coco_generalized_zeroshot_val": ("coco/val2017", "coco/zero-shot/instances_val2017_all_2_oriorder.json", 'all'),
100
+ "coco_zeroshot_train_oriorder": ("coco/train2017", "coco/zero-shot/instances_train2017_seen_2_oriorder.json", 'all'),
101
+ }
102
+
103
+ for key, (image_root, json_file, cat) in _PREDEFINED_SPLITS_COCO.items():
104
+ register_coco_instances(
105
+ key,
106
+ _get_metadata(cat),
107
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
108
+ os.path.join("datasets", image_root),
109
+ )
110
+
111
+ _CUSTOM_SPLITS_COCO = {
112
+ "cc3m_coco_train_tags": ("cc3m/training/", "cc3m/coco_train_image_info_tags.json"),
113
+ "coco_caption_train_tags": ("coco/train2017/", "coco/annotations/captions_train2017_tags_allcaps.json"),}
114
+
115
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_COCO.items():
116
+ custom_register_lvis_instances(
117
+ key,
118
+ _get_builtin_metadata('coco'),
119
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
120
+ os.path.join("datasets", image_root),
121
+ )
detic/data/datasets/imagenet.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+
5
+ from detectron2.data import DatasetCatalog, MetadataCatalog
6
+ from detectron2.data.datasets.lvis import get_lvis_instances_meta
7
+ from .lvis_v1 import custom_load_lvis_json, get_lvis_22k_meta
8
+ def custom_register_imagenet_instances(name, metadata, json_file, image_root):
9
+ """
10
+ """
11
+ DatasetCatalog.register(name, lambda: custom_load_lvis_json(
12
+ json_file, image_root, name))
13
+ MetadataCatalog.get(name).set(
14
+ json_file=json_file, image_root=image_root,
15
+ evaluator_type="imagenet", **metadata
16
+ )
17
+
18
+ _CUSTOM_SPLITS_IMAGENET = {
19
+ "imagenet_lvis_v1": ("imagenet/ImageNet-LVIS/", "imagenet/annotations/imagenet_lvis_image_info.json"),
20
+ }
21
+
22
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_IMAGENET.items():
23
+ custom_register_imagenet_instances(
24
+ key,
25
+ get_lvis_instances_meta('lvis_v1'),
26
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
27
+ os.path.join("datasets", image_root),
28
+ )
29
+
30
+
31
+ _CUSTOM_SPLITS_IMAGENET_22K = {
32
+ "imagenet_lvis-22k": ("imagenet/ImageNet-LVIS/", "imagenet/annotations/imagenet-22k_image_info_lvis-22k.json"),
33
+ }
34
+
35
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_IMAGENET_22K.items():
36
+ custom_register_imagenet_instances(
37
+ key,
38
+ get_lvis_22k_meta(),
39
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
40
+ os.path.join("datasets", image_root),
41
+ )
detic/data/datasets/lvis_22k_categories.py ADDED
The diff for this file is too large to render. See raw diff
detic/data/datasets/lvis_v1.py ADDED
@@ -0,0 +1,155 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+
5
+ from fvcore.common.timer import Timer
6
+ from detectron2.structures import BoxMode
7
+ from fvcore.common.file_io import PathManager
8
+ from detectron2.data import DatasetCatalog, MetadataCatalog
9
+ from detectron2.data.datasets.lvis import get_lvis_instances_meta
10
+
11
+ logger = logging.getLogger(__name__)
12
+
13
+ __all__ = ["custom_load_lvis_json", "custom_register_lvis_instances"]
14
+
15
+
16
+ def custom_register_lvis_instances(name, metadata, json_file, image_root):
17
+ """
18
+ """
19
+ DatasetCatalog.register(name, lambda: custom_load_lvis_json(
20
+ json_file, image_root, name))
21
+ MetadataCatalog.get(name).set(
22
+ json_file=json_file, image_root=image_root,
23
+ evaluator_type="lvis", **metadata
24
+ )
25
+
26
+
27
+ def custom_load_lvis_json(json_file, image_root, dataset_name=None):
28
+ '''
29
+ Modifications:
30
+ use `file_name`
31
+ convert neg_category_ids
32
+ add pos_category_ids
33
+ '''
34
+ from lvis import LVIS
35
+
36
+ json_file = PathManager.get_local_path(json_file)
37
+
38
+ timer = Timer()
39
+ lvis_api = LVIS(json_file)
40
+ if timer.seconds() > 1:
41
+ logger.info("Loading {} takes {:.2f} seconds.".format(
42
+ json_file, timer.seconds()))
43
+
44
+ catid2contid = {x['id']: i for i, x in enumerate(
45
+ sorted(lvis_api.dataset['categories'], key=lambda x: x['id']))}
46
+ if len(lvis_api.dataset['categories']) == 1203:
47
+ for x in lvis_api.dataset['categories']:
48
+ assert catid2contid[x['id']] == x['id'] - 1
49
+ img_ids = sorted(lvis_api.imgs.keys())
50
+ imgs = lvis_api.load_imgs(img_ids)
51
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
52
+
53
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
54
+ assert len(set(ann_ids)) == len(ann_ids), \
55
+ "Annotation ids in '{}' are not unique".format(json_file)
56
+
57
+ imgs_anns = list(zip(imgs, anns))
58
+ logger.info("Loaded {} images in the LVIS v1 format from {}".format(
59
+ len(imgs_anns), json_file))
60
+
61
+ dataset_dicts = []
62
+
63
+ for (img_dict, anno_dict_list) in imgs_anns:
64
+ record = {}
65
+ if "file_name" in img_dict:
66
+ file_name = img_dict["file_name"]
67
+ if img_dict["file_name"].startswith("COCO"):
68
+ file_name = file_name[-16:]
69
+ record["file_name"] = os.path.join(image_root, file_name)
70
+ elif 'coco_url' in img_dict:
71
+ # e.g., http://images.cocodataset.org/train2017/000000391895.jpg
72
+ file_name = img_dict["coco_url"][30:]
73
+ record["file_name"] = os.path.join(image_root, file_name)
74
+ elif 'tar_index' in img_dict:
75
+ record['tar_index'] = img_dict['tar_index']
76
+
77
+ record["height"] = img_dict["height"]
78
+ record["width"] = img_dict["width"]
79
+ record["not_exhaustive_category_ids"] = img_dict.get(
80
+ "not_exhaustive_category_ids", [])
81
+ record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
82
+ # NOTE: modified by Xingyi: convert to 0-based
83
+ record["neg_category_ids"] = [
84
+ catid2contid[x] for x in record["neg_category_ids"]]
85
+ if 'pos_category_ids' in img_dict:
86
+ record['pos_category_ids'] = [
87
+ catid2contid[x] for x in img_dict.get("pos_category_ids", [])]
88
+ if 'captions' in img_dict:
89
+ record['captions'] = img_dict['captions']
90
+ if 'caption_features' in img_dict:
91
+ record['caption_features'] = img_dict['caption_features']
92
+ image_id = record["image_id"] = img_dict["id"]
93
+
94
+ objs = []
95
+ for anno in anno_dict_list:
96
+ assert anno["image_id"] == image_id
97
+ if anno.get('iscrowd', 0) > 0:
98
+ continue
99
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
100
+ obj["category_id"] = catid2contid[anno['category_id']]
101
+ if 'segmentation' in anno:
102
+ segm = anno["segmentation"]
103
+ valid_segm = [poly for poly in segm \
104
+ if len(poly) % 2 == 0 and len(poly) >= 6]
105
+ # assert len(segm) == len(
106
+ # valid_segm
107
+ # ), "Annotation contains an invalid polygon with < 3 points"
108
+ if not len(segm) == len(valid_segm):
109
+ print('Annotation contains an invalid polygon with < 3 points')
110
+ assert len(segm) > 0
111
+ obj["segmentation"] = segm
112
+ objs.append(obj)
113
+ record["annotations"] = objs
114
+ dataset_dicts.append(record)
115
+
116
+ return dataset_dicts
117
+
118
+ _CUSTOM_SPLITS_LVIS = {
119
+ "lvis_v1_train+coco": ("coco/", "lvis/lvis_v1_train+coco_mask.json"),
120
+ "lvis_v1_train_norare": ("coco/", "lvis/lvis_v1_train_norare.json"),
121
+ }
122
+
123
+
124
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS.items():
125
+ custom_register_lvis_instances(
126
+ key,
127
+ get_lvis_instances_meta(key),
128
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
129
+ os.path.join("datasets", image_root),
130
+ )
131
+
132
+
133
+ def get_lvis_22k_meta():
134
+ from .lvis_22k_categories import CATEGORIES
135
+ cat_ids = [k["id"] for k in CATEGORIES]
136
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
137
+ cat_ids
138
+ ), "Category ids are not in [1, #categories], as expected"
139
+ # Ensure that the category list is sorted by id
140
+ lvis_categories = sorted(CATEGORIES, key=lambda x: x["id"])
141
+ thing_classes = [k["name"] for k in lvis_categories]
142
+ meta = {"thing_classes": thing_classes}
143
+ return meta
144
+
145
+ _CUSTOM_SPLITS_LVIS_22K = {
146
+ "lvis_v1_train_22k": ("coco/", "lvis/lvis_v1_train_lvis-22k.json"),
147
+ }
148
+
149
+ for key, (image_root, json_file) in _CUSTOM_SPLITS_LVIS_22K.items():
150
+ custom_register_lvis_instances(
151
+ key,
152
+ get_lvis_22k_meta(),
153
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
154
+ os.path.join("datasets", image_root),
155
+ )
detic/data/datasets/objects365.py ADDED
@@ -0,0 +1,770 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from detectron2.data.datasets.register_coco import register_coco_instances
3
+ import os
4
+
5
+ # categories_v2 = [
6
+ # {'id': 1, 'name': 'Person'},
7
+ # {'id': 2, 'name': 'Sneakers'},
8
+ # {'id': 3, 'name': 'Chair'},
9
+ # {'id': 4, 'name': 'Other Shoes'},
10
+ # {'id': 5, 'name': 'Hat'},
11
+ # {'id': 6, 'name': 'Car'},
12
+ # {'id': 7, 'name': 'Lamp'},
13
+ # {'id': 8, 'name': 'Glasses'},
14
+ # {'id': 9, 'name': 'Bottle'},
15
+ # {'id': 10, 'name': 'Desk'},
16
+ # {'id': 11, 'name': 'Cup'},
17
+ # {'id': 12, 'name': 'Street Lights'},
18
+ # {'id': 13, 'name': 'Cabinet/shelf'},
19
+ # {'id': 14, 'name': 'Handbag/Satchel'},
20
+ # {'id': 15, 'name': 'Bracelet'},
21
+ # {'id': 16, 'name': 'Plate'},
22
+ # {'id': 17, 'name': 'Picture/Frame'},
23
+ # {'id': 18, 'name': 'Helmet'},
24
+ # {'id': 19, 'name': 'Book'},
25
+ # {'id': 20, 'name': 'Gloves'},
26
+ # {'id': 21, 'name': 'Storage box'},
27
+ # {'id': 22, 'name': 'Boat'},
28
+ # {'id': 23, 'name': 'Leather Shoes'},
29
+ # {'id': 24, 'name': 'Flower'},
30
+ # {'id': 25, 'name': 'Bench'},
31
+ # {'id': 26, 'name': 'Potted Plant'},
32
+ # {'id': 27, 'name': 'Bowl/Basin'},
33
+ # {'id': 28, 'name': 'Flag'},
34
+ # {'id': 29, 'name': 'Pillow'},
35
+ # {'id': 30, 'name': 'Boots'},
36
+ # {'id': 31, 'name': 'Vase'},
37
+ # {'id': 32, 'name': 'Microphone'},
38
+ # {'id': 33, 'name': 'Necklace'},
39
+ # {'id': 34, 'name': 'Ring'},
40
+ # {'id': 35, 'name': 'SUV'},
41
+ # {'id': 36, 'name': 'Wine Glass'},
42
+ # {'id': 37, 'name': 'Belt'},
43
+ # {'id': 38, 'name': 'Moniter/TV'},
44
+ # {'id': 39, 'name': 'Backpack'},
45
+ # {'id': 40, 'name': 'Umbrella'},
46
+ # {'id': 41, 'name': 'Traffic Light'},
47
+ # {'id': 42, 'name': 'Speaker'},
48
+ # {'id': 43, 'name': 'Watch'},
49
+ # {'id': 44, 'name': 'Tie'},
50
+ # {'id': 45, 'name': 'Trash bin Can'},
51
+ # {'id': 46, 'name': 'Slippers'},
52
+ # {'id': 47, 'name': 'Bicycle'},
53
+ # {'id': 48, 'name': 'Stool'},
54
+ # {'id': 49, 'name': 'Barrel/bucket'},
55
+ # {'id': 50, 'name': 'Van'},
56
+ # {'id': 51, 'name': 'Couch'},
57
+ # {'id': 52, 'name': 'Sandals'},
58
+ # {'id': 53, 'name': 'Bakset'},
59
+ # {'id': 54, 'name': 'Drum'},
60
+ # {'id': 55, 'name': 'Pen/Pencil'},
61
+ # {'id': 56, 'name': 'Bus'},
62
+ # {'id': 57, 'name': 'Wild Bird'},
63
+ # {'id': 58, 'name': 'High Heels'},
64
+ # {'id': 59, 'name': 'Motorcycle'},
65
+ # {'id': 60, 'name': 'Guitar'},
66
+ # {'id': 61, 'name': 'Carpet'},
67
+ # {'id': 62, 'name': 'Cell Phone'},
68
+ # {'id': 63, 'name': 'Bread'},
69
+ # {'id': 64, 'name': 'Camera'},
70
+ # {'id': 65, 'name': 'Canned'},
71
+ # {'id': 66, 'name': 'Truck'},
72
+ # {'id': 67, 'name': 'Traffic cone'},
73
+ # {'id': 68, 'name': 'Cymbal'},
74
+ # {'id': 69, 'name': 'Lifesaver'},
75
+ # {'id': 70, 'name': 'Towel'},
76
+ # {'id': 71, 'name': 'Stuffed Toy'},
77
+ # {'id': 72, 'name': 'Candle'},
78
+ # {'id': 73, 'name': 'Sailboat'},
79
+ # {'id': 74, 'name': 'Laptop'},
80
+ # {'id': 75, 'name': 'Awning'},
81
+ # {'id': 76, 'name': 'Bed'},
82
+ # {'id': 77, 'name': 'Faucet'},
83
+ # {'id': 78, 'name': 'Tent'},
84
+ # {'id': 79, 'name': 'Horse'},
85
+ # {'id': 80, 'name': 'Mirror'},
86
+ # {'id': 81, 'name': 'Power outlet'},
87
+ # {'id': 82, 'name': 'Sink'},
88
+ # {'id': 83, 'name': 'Apple'},
89
+ # {'id': 84, 'name': 'Air Conditioner'},
90
+ # {'id': 85, 'name': 'Knife'},
91
+ # {'id': 86, 'name': 'Hockey Stick'},
92
+ # {'id': 87, 'name': 'Paddle'},
93
+ # {'id': 88, 'name': 'Pickup Truck'},
94
+ # {'id': 89, 'name': 'Fork'},
95
+ # {'id': 90, 'name': 'Traffic Sign'},
96
+ # {'id': 91, 'name': 'Ballon'},
97
+ # {'id': 92, 'name': 'Tripod'},
98
+ # {'id': 93, 'name': 'Dog'},
99
+ # {'id': 94, 'name': 'Spoon'},
100
+ # {'id': 95, 'name': 'Clock'},
101
+ # {'id': 96, 'name': 'Pot'},
102
+ # {'id': 97, 'name': 'Cow'},
103
+ # {'id': 98, 'name': 'Cake'},
104
+ # {'id': 99, 'name': 'Dinning Table'},
105
+ # {'id': 100, 'name': 'Sheep'},
106
+ # {'id': 101, 'name': 'Hanger'},
107
+ # {'id': 102, 'name': 'Blackboard/Whiteboard'},
108
+ # {'id': 103, 'name': 'Napkin'},
109
+ # {'id': 104, 'name': 'Other Fish'},
110
+ # {'id': 105, 'name': 'Orange/Tangerine'},
111
+ # {'id': 106, 'name': 'Toiletry'},
112
+ # {'id': 107, 'name': 'Keyboard'},
113
+ # {'id': 108, 'name': 'Tomato'},
114
+ # {'id': 109, 'name': 'Lantern'},
115
+ # {'id': 110, 'name': 'Machinery Vehicle'},
116
+ # {'id': 111, 'name': 'Fan'},
117
+ # {'id': 112, 'name': 'Green Vegetables'},
118
+ # {'id': 113, 'name': 'Banana'},
119
+ # {'id': 114, 'name': 'Baseball Glove'},
120
+ # {'id': 115, 'name': 'Airplane'},
121
+ # {'id': 116, 'name': 'Mouse'},
122
+ # {'id': 117, 'name': 'Train'},
123
+ # {'id': 118, 'name': 'Pumpkin'},
124
+ # {'id': 119, 'name': 'Soccer'},
125
+ # {'id': 120, 'name': 'Skiboard'},
126
+ # {'id': 121, 'name': 'Luggage'},
127
+ # {'id': 122, 'name': 'Nightstand'},
128
+ # {'id': 123, 'name': 'Tea pot'},
129
+ # {'id': 124, 'name': 'Telephone'},
130
+ # {'id': 125, 'name': 'Trolley'},
131
+ # {'id': 126, 'name': 'Head Phone'},
132
+ # {'id': 127, 'name': 'Sports Car'},
133
+ # {'id': 128, 'name': 'Stop Sign'},
134
+ # {'id': 129, 'name': 'Dessert'},
135
+ # {'id': 130, 'name': 'Scooter'},
136
+ # {'id': 131, 'name': 'Stroller'},
137
+ # {'id': 132, 'name': 'Crane'},
138
+ # {'id': 133, 'name': 'Remote'},
139
+ # {'id': 134, 'name': 'Refrigerator'},
140
+ # {'id': 135, 'name': 'Oven'},
141
+ # {'id': 136, 'name': 'Lemon'},
142
+ # {'id': 137, 'name': 'Duck'},
143
+ # {'id': 138, 'name': 'Baseball Bat'},
144
+ # {'id': 139, 'name': 'Surveillance Camera'},
145
+ # {'id': 140, 'name': 'Cat'},
146
+ # {'id': 141, 'name': 'Jug'},
147
+ # {'id': 142, 'name': 'Broccoli'},
148
+ # {'id': 143, 'name': 'Piano'},
149
+ # {'id': 144, 'name': 'Pizza'},
150
+ # {'id': 145, 'name': 'Elephant'},
151
+ # {'id': 146, 'name': 'Skateboard'},
152
+ # {'id': 147, 'name': 'Surfboard'},
153
+ # {'id': 148, 'name': 'Gun'},
154
+ # {'id': 149, 'name': 'Skating and Skiing shoes'},
155
+ # {'id': 150, 'name': 'Gas stove'},
156
+ # {'id': 151, 'name': 'Donut'},
157
+ # {'id': 152, 'name': 'Bow Tie'},
158
+ # {'id': 153, 'name': 'Carrot'},
159
+ # {'id': 154, 'name': 'Toilet'},
160
+ # {'id': 155, 'name': 'Kite'},
161
+ # {'id': 156, 'name': 'Strawberry'},
162
+ # {'id': 157, 'name': 'Other Balls'},
163
+ # {'id': 158, 'name': 'Shovel'},
164
+ # {'id': 159, 'name': 'Pepper'},
165
+ # {'id': 160, 'name': 'Computer Box'},
166
+ # {'id': 161, 'name': 'Toilet Paper'},
167
+ # {'id': 162, 'name': 'Cleaning Products'},
168
+ # {'id': 163, 'name': 'Chopsticks'},
169
+ # {'id': 164, 'name': 'Microwave'},
170
+ # {'id': 165, 'name': 'Pigeon'},
171
+ # {'id': 166, 'name': 'Baseball'},
172
+ # {'id': 167, 'name': 'Cutting/chopping Board'},
173
+ # {'id': 168, 'name': 'Coffee Table'},
174
+ # {'id': 169, 'name': 'Side Table'},
175
+ # {'id': 170, 'name': 'Scissors'},
176
+ # {'id': 171, 'name': 'Marker'},
177
+ # {'id': 172, 'name': 'Pie'},
178
+ # {'id': 173, 'name': 'Ladder'},
179
+ # {'id': 174, 'name': 'Snowboard'},
180
+ # {'id': 175, 'name': 'Cookies'},
181
+ # {'id': 176, 'name': 'Radiator'},
182
+ # {'id': 177, 'name': 'Fire Hydrant'},
183
+ # {'id': 178, 'name': 'Basketball'},
184
+ # {'id': 179, 'name': 'Zebra'},
185
+ # {'id': 180, 'name': 'Grape'},
186
+ # {'id': 181, 'name': 'Giraffe'},
187
+ # {'id': 182, 'name': 'Potato'},
188
+ # {'id': 183, 'name': 'Sausage'},
189
+ # {'id': 184, 'name': 'Tricycle'},
190
+ # {'id': 185, 'name': 'Violin'},
191
+ # {'id': 186, 'name': 'Egg'},
192
+ # {'id': 187, 'name': 'Fire Extinguisher'},
193
+ # {'id': 188, 'name': 'Candy'},
194
+ # {'id': 189, 'name': 'Fire Truck'},
195
+ # {'id': 190, 'name': 'Billards'},
196
+ # {'id': 191, 'name': 'Converter'},
197
+ # {'id': 192, 'name': 'Bathtub'},
198
+ # {'id': 193, 'name': 'Wheelchair'},
199
+ # {'id': 194, 'name': 'Golf Club'},
200
+ # {'id': 195, 'name': 'Briefcase'},
201
+ # {'id': 196, 'name': 'Cucumber'},
202
+ # {'id': 197, 'name': 'Cigar/Cigarette '},
203
+ # {'id': 198, 'name': 'Paint Brush'},
204
+ # {'id': 199, 'name': 'Pear'},
205
+ # {'id': 200, 'name': 'Heavy Truck'},
206
+ # {'id': 201, 'name': 'Hamburger'},
207
+ # {'id': 202, 'name': 'Extractor'},
208
+ # {'id': 203, 'name': 'Extention Cord'},
209
+ # {'id': 204, 'name': 'Tong'},
210
+ # {'id': 205, 'name': 'Tennis Racket'},
211
+ # {'id': 206, 'name': 'Folder'},
212
+ # {'id': 207, 'name': 'American Football'},
213
+ # {'id': 208, 'name': 'earphone'},
214
+ # {'id': 209, 'name': 'Mask'},
215
+ # {'id': 210, 'name': 'Kettle'},
216
+ # {'id': 211, 'name': 'Tennis'},
217
+ # {'id': 212, 'name': 'Ship'},
218
+ # {'id': 213, 'name': 'Swing'},
219
+ # {'id': 214, 'name': 'Coffee Machine'},
220
+ # {'id': 215, 'name': 'Slide'},
221
+ # {'id': 216, 'name': 'Carriage'},
222
+ # {'id': 217, 'name': 'Onion'},
223
+ # {'id': 218, 'name': 'Green beans'},
224
+ # {'id': 219, 'name': 'Projector'},
225
+ # {'id': 220, 'name': 'Frisbee'},
226
+ # {'id': 221, 'name': 'Washing Machine/Drying Machine'},
227
+ # {'id': 222, 'name': 'Chicken'},
228
+ # {'id': 223, 'name': 'Printer'},
229
+ # {'id': 224, 'name': 'Watermelon'},
230
+ # {'id': 225, 'name': 'Saxophone'},
231
+ # {'id': 226, 'name': 'Tissue'},
232
+ # {'id': 227, 'name': 'Toothbrush'},
233
+ # {'id': 228, 'name': 'Ice cream'},
234
+ # {'id': 229, 'name': 'Hotair ballon'},
235
+ # {'id': 230, 'name': 'Cello'},
236
+ # {'id': 231, 'name': 'French Fries'},
237
+ # {'id': 232, 'name': 'Scale'},
238
+ # {'id': 233, 'name': 'Trophy'},
239
+ # {'id': 234, 'name': 'Cabbage'},
240
+ # {'id': 235, 'name': 'Hot dog'},
241
+ # {'id': 236, 'name': 'Blender'},
242
+ # {'id': 237, 'name': 'Peach'},
243
+ # {'id': 238, 'name': 'Rice'},
244
+ # {'id': 239, 'name': 'Wallet/Purse'},
245
+ # {'id': 240, 'name': 'Volleyball'},
246
+ # {'id': 241, 'name': 'Deer'},
247
+ # {'id': 242, 'name': 'Goose'},
248
+ # {'id': 243, 'name': 'Tape'},
249
+ # {'id': 244, 'name': 'Tablet'},
250
+ # {'id': 245, 'name': 'Cosmetics'},
251
+ # {'id': 246, 'name': 'Trumpet'},
252
+ # {'id': 247, 'name': 'Pineapple'},
253
+ # {'id': 248, 'name': 'Golf Ball'},
254
+ # {'id': 249, 'name': 'Ambulance'},
255
+ # {'id': 250, 'name': 'Parking meter'},
256
+ # {'id': 251, 'name': 'Mango'},
257
+ # {'id': 252, 'name': 'Key'},
258
+ # {'id': 253, 'name': 'Hurdle'},
259
+ # {'id': 254, 'name': 'Fishing Rod'},
260
+ # {'id': 255, 'name': 'Medal'},
261
+ # {'id': 256, 'name': 'Flute'},
262
+ # {'id': 257, 'name': 'Brush'},
263
+ # {'id': 258, 'name': 'Penguin'},
264
+ # {'id': 259, 'name': 'Megaphone'},
265
+ # {'id': 260, 'name': 'Corn'},
266
+ # {'id': 261, 'name': 'Lettuce'},
267
+ # {'id': 262, 'name': 'Garlic'},
268
+ # {'id': 263, 'name': 'Swan'},
269
+ # {'id': 264, 'name': 'Helicopter'},
270
+ # {'id': 265, 'name': 'Green Onion'},
271
+ # {'id': 266, 'name': 'Sandwich'},
272
+ # {'id': 267, 'name': 'Nuts'},
273
+ # {'id': 268, 'name': 'Speed Limit Sign'},
274
+ # {'id': 269, 'name': 'Induction Cooker'},
275
+ # {'id': 270, 'name': 'Broom'},
276
+ # {'id': 271, 'name': 'Trombone'},
277
+ # {'id': 272, 'name': 'Plum'},
278
+ # {'id': 273, 'name': 'Rickshaw'},
279
+ # {'id': 274, 'name': 'Goldfish'},
280
+ # {'id': 275, 'name': 'Kiwi fruit'},
281
+ # {'id': 276, 'name': 'Router/modem'},
282
+ # {'id': 277, 'name': 'Poker Card'},
283
+ # {'id': 278, 'name': 'Toaster'},
284
+ # {'id': 279, 'name': 'Shrimp'},
285
+ # {'id': 280, 'name': 'Sushi'},
286
+ # {'id': 281, 'name': 'Cheese'},
287
+ # {'id': 282, 'name': 'Notepaper'},
288
+ # {'id': 283, 'name': 'Cherry'},
289
+ # {'id': 284, 'name': 'Pliers'},
290
+ # {'id': 285, 'name': 'CD'},
291
+ # {'id': 286, 'name': 'Pasta'},
292
+ # {'id': 287, 'name': 'Hammer'},
293
+ # {'id': 288, 'name': 'Cue'},
294
+ # {'id': 289, 'name': 'Avocado'},
295
+ # {'id': 290, 'name': 'Hamimelon'},
296
+ # {'id': 291, 'name': 'Flask'},
297
+ # {'id': 292, 'name': 'Mushroon'},
298
+ # {'id': 293, 'name': 'Screwdriver'},
299
+ # {'id': 294, 'name': 'Soap'},
300
+ # {'id': 295, 'name': 'Recorder'},
301
+ # {'id': 296, 'name': 'Bear'},
302
+ # {'id': 297, 'name': 'Eggplant'},
303
+ # {'id': 298, 'name': 'Board Eraser'},
304
+ # {'id': 299, 'name': 'Coconut'},
305
+ # {'id': 300, 'name': 'Tape Measur/ Ruler'},
306
+ # {'id': 301, 'name': 'Pig'},
307
+ # {'id': 302, 'name': 'Showerhead'},
308
+ # {'id': 303, 'name': 'Globe'},
309
+ # {'id': 304, 'name': 'Chips'},
310
+ # {'id': 305, 'name': 'Steak'},
311
+ # {'id': 306, 'name': 'Crosswalk Sign'},
312
+ # {'id': 307, 'name': 'Stapler'},
313
+ # {'id': 308, 'name': 'Campel'},
314
+ # {'id': 309, 'name': 'Formula 1 '},
315
+ # {'id': 310, 'name': 'Pomegranate'},
316
+ # {'id': 311, 'name': 'Dishwasher'},
317
+ # {'id': 312, 'name': 'Crab'},
318
+ # {'id': 313, 'name': 'Hoverboard'},
319
+ # {'id': 314, 'name': 'Meat ball'},
320
+ # {'id': 315, 'name': 'Rice Cooker'},
321
+ # {'id': 316, 'name': 'Tuba'},
322
+ # {'id': 317, 'name': 'Calculator'},
323
+ # {'id': 318, 'name': 'Papaya'},
324
+ # {'id': 319, 'name': 'Antelope'},
325
+ # {'id': 320, 'name': 'Parrot'},
326
+ # {'id': 321, 'name': 'Seal'},
327
+ # {'id': 322, 'name': 'Buttefly'},
328
+ # {'id': 323, 'name': 'Dumbbell'},
329
+ # {'id': 324, 'name': 'Donkey'},
330
+ # {'id': 325, 'name': 'Lion'},
331
+ # {'id': 326, 'name': 'Urinal'},
332
+ # {'id': 327, 'name': 'Dolphin'},
333
+ # {'id': 328, 'name': 'Electric Drill'},
334
+ # {'id': 329, 'name': 'Hair Dryer'},
335
+ # {'id': 330, 'name': 'Egg tart'},
336
+ # {'id': 331, 'name': 'Jellyfish'},
337
+ # {'id': 332, 'name': 'Treadmill'},
338
+ # {'id': 333, 'name': 'Lighter'},
339
+ # {'id': 334, 'name': 'Grapefruit'},
340
+ # {'id': 335, 'name': 'Game board'},
341
+ # {'id': 336, 'name': 'Mop'},
342
+ # {'id': 337, 'name': 'Radish'},
343
+ # {'id': 338, 'name': 'Baozi'},
344
+ # {'id': 339, 'name': 'Target'},
345
+ # {'id': 340, 'name': 'French'},
346
+ # {'id': 341, 'name': 'Spring Rolls'},
347
+ # {'id': 342, 'name': 'Monkey'},
348
+ # {'id': 343, 'name': 'Rabbit'},
349
+ # {'id': 344, 'name': 'Pencil Case'},
350
+ # {'id': 345, 'name': 'Yak'},
351
+ # {'id': 346, 'name': 'Red Cabbage'},
352
+ # {'id': 347, 'name': 'Binoculars'},
353
+ # {'id': 348, 'name': 'Asparagus'},
354
+ # {'id': 349, 'name': 'Barbell'},
355
+ # {'id': 350, 'name': 'Scallop'},
356
+ # {'id': 351, 'name': 'Noddles'},
357
+ # {'id': 352, 'name': 'Comb'},
358
+ # {'id': 353, 'name': 'Dumpling'},
359
+ # {'id': 354, 'name': 'Oyster'},
360
+ # {'id': 355, 'name': 'Table Teniis paddle'},
361
+ # {'id': 356, 'name': 'Cosmetics Brush/Eyeliner Pencil'},
362
+ # {'id': 357, 'name': 'Chainsaw'},
363
+ # {'id': 358, 'name': 'Eraser'},
364
+ # {'id': 359, 'name': 'Lobster'},
365
+ # {'id': 360, 'name': 'Durian'},
366
+ # {'id': 361, 'name': 'Okra'},
367
+ # {'id': 362, 'name': 'Lipstick'},
368
+ # {'id': 363, 'name': 'Cosmetics Mirror'},
369
+ # {'id': 364, 'name': 'Curling'},
370
+ # {'id': 365, 'name': 'Table Tennis '},
371
+ # ]
372
+
373
+ '''
374
+ The official Objects365 category names contains typos.
375
+ Below is a manual fix.
376
+ '''
377
+ categories_v2_fix = [
378
+ {'id': 1, 'name': 'Person'},
379
+ {'id': 2, 'name': 'Sneakers'},
380
+ {'id': 3, 'name': 'Chair'},
381
+ {'id': 4, 'name': 'Other Shoes'},
382
+ {'id': 5, 'name': 'Hat'},
383
+ {'id': 6, 'name': 'Car'},
384
+ {'id': 7, 'name': 'Lamp'},
385
+ {'id': 8, 'name': 'Glasses'},
386
+ {'id': 9, 'name': 'Bottle'},
387
+ {'id': 10, 'name': 'Desk'},
388
+ {'id': 11, 'name': 'Cup'},
389
+ {'id': 12, 'name': 'Street Lights'},
390
+ {'id': 13, 'name': 'Cabinet/shelf'},
391
+ {'id': 14, 'name': 'Handbag/Satchel'},
392
+ {'id': 15, 'name': 'Bracelet'},
393
+ {'id': 16, 'name': 'Plate'},
394
+ {'id': 17, 'name': 'Picture/Frame'},
395
+ {'id': 18, 'name': 'Helmet'},
396
+ {'id': 19, 'name': 'Book'},
397
+ {'id': 20, 'name': 'Gloves'},
398
+ {'id': 21, 'name': 'Storage box'},
399
+ {'id': 22, 'name': 'Boat'},
400
+ {'id': 23, 'name': 'Leather Shoes'},
401
+ {'id': 24, 'name': 'Flower'},
402
+ {'id': 25, 'name': 'Bench'},
403
+ {'id': 26, 'name': 'Potted Plant'},
404
+ {'id': 27, 'name': 'Bowl/Basin'},
405
+ {'id': 28, 'name': 'Flag'},
406
+ {'id': 29, 'name': 'Pillow'},
407
+ {'id': 30, 'name': 'Boots'},
408
+ {'id': 31, 'name': 'Vase'},
409
+ {'id': 32, 'name': 'Microphone'},
410
+ {'id': 33, 'name': 'Necklace'},
411
+ {'id': 34, 'name': 'Ring'},
412
+ {'id': 35, 'name': 'SUV'},
413
+ {'id': 36, 'name': 'Wine Glass'},
414
+ {'id': 37, 'name': 'Belt'},
415
+ {'id': 38, 'name': 'Monitor/TV'},
416
+ {'id': 39, 'name': 'Backpack'},
417
+ {'id': 40, 'name': 'Umbrella'},
418
+ {'id': 41, 'name': 'Traffic Light'},
419
+ {'id': 42, 'name': 'Speaker'},
420
+ {'id': 43, 'name': 'Watch'},
421
+ {'id': 44, 'name': 'Tie'},
422
+ {'id': 45, 'name': 'Trash bin Can'},
423
+ {'id': 46, 'name': 'Slippers'},
424
+ {'id': 47, 'name': 'Bicycle'},
425
+ {'id': 48, 'name': 'Stool'},
426
+ {'id': 49, 'name': 'Barrel/bucket'},
427
+ {'id': 50, 'name': 'Van'},
428
+ {'id': 51, 'name': 'Couch'},
429
+ {'id': 52, 'name': 'Sandals'},
430
+ {'id': 53, 'name': 'Basket'},
431
+ {'id': 54, 'name': 'Drum'},
432
+ {'id': 55, 'name': 'Pen/Pencil'},
433
+ {'id': 56, 'name': 'Bus'},
434
+ {'id': 57, 'name': 'Wild Bird'},
435
+ {'id': 58, 'name': 'High Heels'},
436
+ {'id': 59, 'name': 'Motorcycle'},
437
+ {'id': 60, 'name': 'Guitar'},
438
+ {'id': 61, 'name': 'Carpet'},
439
+ {'id': 62, 'name': 'Cell Phone'},
440
+ {'id': 63, 'name': 'Bread'},
441
+ {'id': 64, 'name': 'Camera'},
442
+ {'id': 65, 'name': 'Canned'},
443
+ {'id': 66, 'name': 'Truck'},
444
+ {'id': 67, 'name': 'Traffic cone'},
445
+ {'id': 68, 'name': 'Cymbal'},
446
+ {'id': 69, 'name': 'Lifesaver'},
447
+ {'id': 70, 'name': 'Towel'},
448
+ {'id': 71, 'name': 'Stuffed Toy'},
449
+ {'id': 72, 'name': 'Candle'},
450
+ {'id': 73, 'name': 'Sailboat'},
451
+ {'id': 74, 'name': 'Laptop'},
452
+ {'id': 75, 'name': 'Awning'},
453
+ {'id': 76, 'name': 'Bed'},
454
+ {'id': 77, 'name': 'Faucet'},
455
+ {'id': 78, 'name': 'Tent'},
456
+ {'id': 79, 'name': 'Horse'},
457
+ {'id': 80, 'name': 'Mirror'},
458
+ {'id': 81, 'name': 'Power outlet'},
459
+ {'id': 82, 'name': 'Sink'},
460
+ {'id': 83, 'name': 'Apple'},
461
+ {'id': 84, 'name': 'Air Conditioner'},
462
+ {'id': 85, 'name': 'Knife'},
463
+ {'id': 86, 'name': 'Hockey Stick'},
464
+ {'id': 87, 'name': 'Paddle'},
465
+ {'id': 88, 'name': 'Pickup Truck'},
466
+ {'id': 89, 'name': 'Fork'},
467
+ {'id': 90, 'name': 'Traffic Sign'},
468
+ {'id': 91, 'name': 'Ballon'},
469
+ {'id': 92, 'name': 'Tripod'},
470
+ {'id': 93, 'name': 'Dog'},
471
+ {'id': 94, 'name': 'Spoon'},
472
+ {'id': 95, 'name': 'Clock'},
473
+ {'id': 96, 'name': 'Pot'},
474
+ {'id': 97, 'name': 'Cow'},
475
+ {'id': 98, 'name': 'Cake'},
476
+ {'id': 99, 'name': 'Dining Table'},
477
+ {'id': 100, 'name': 'Sheep'},
478
+ {'id': 101, 'name': 'Hanger'},
479
+ {'id': 102, 'name': 'Blackboard/Whiteboard'},
480
+ {'id': 103, 'name': 'Napkin'},
481
+ {'id': 104, 'name': 'Other Fish'},
482
+ {'id': 105, 'name': 'Orange/Tangerine'},
483
+ {'id': 106, 'name': 'Toiletry'},
484
+ {'id': 107, 'name': 'Keyboard'},
485
+ {'id': 108, 'name': 'Tomato'},
486
+ {'id': 109, 'name': 'Lantern'},
487
+ {'id': 110, 'name': 'Machinery Vehicle'},
488
+ {'id': 111, 'name': 'Fan'},
489
+ {'id': 112, 'name': 'Green Vegetables'},
490
+ {'id': 113, 'name': 'Banana'},
491
+ {'id': 114, 'name': 'Baseball Glove'},
492
+ {'id': 115, 'name': 'Airplane'},
493
+ {'id': 116, 'name': 'Mouse'},
494
+ {'id': 117, 'name': 'Train'},
495
+ {'id': 118, 'name': 'Pumpkin'},
496
+ {'id': 119, 'name': 'Soccer'},
497
+ {'id': 120, 'name': 'Skiboard'},
498
+ {'id': 121, 'name': 'Luggage'},
499
+ {'id': 122, 'name': 'Nightstand'},
500
+ {'id': 123, 'name': 'Teapot'},
501
+ {'id': 124, 'name': 'Telephone'},
502
+ {'id': 125, 'name': 'Trolley'},
503
+ {'id': 126, 'name': 'Head Phone'},
504
+ {'id': 127, 'name': 'Sports Car'},
505
+ {'id': 128, 'name': 'Stop Sign'},
506
+ {'id': 129, 'name': 'Dessert'},
507
+ {'id': 130, 'name': 'Scooter'},
508
+ {'id': 131, 'name': 'Stroller'},
509
+ {'id': 132, 'name': 'Crane'},
510
+ {'id': 133, 'name': 'Remote'},
511
+ {'id': 134, 'name': 'Refrigerator'},
512
+ {'id': 135, 'name': 'Oven'},
513
+ {'id': 136, 'name': 'Lemon'},
514
+ {'id': 137, 'name': 'Duck'},
515
+ {'id': 138, 'name': 'Baseball Bat'},
516
+ {'id': 139, 'name': 'Surveillance Camera'},
517
+ {'id': 140, 'name': 'Cat'},
518
+ {'id': 141, 'name': 'Jug'},
519
+ {'id': 142, 'name': 'Broccoli'},
520
+ {'id': 143, 'name': 'Piano'},
521
+ {'id': 144, 'name': 'Pizza'},
522
+ {'id': 145, 'name': 'Elephant'},
523
+ {'id': 146, 'name': 'Skateboard'},
524
+ {'id': 147, 'name': 'Surfboard'},
525
+ {'id': 148, 'name': 'Gun'},
526
+ {'id': 149, 'name': 'Skating and Skiing shoes'},
527
+ {'id': 150, 'name': 'Gas stove'},
528
+ {'id': 151, 'name': 'Donut'},
529
+ {'id': 152, 'name': 'Bow Tie'},
530
+ {'id': 153, 'name': 'Carrot'},
531
+ {'id': 154, 'name': 'Toilet'},
532
+ {'id': 155, 'name': 'Kite'},
533
+ {'id': 156, 'name': 'Strawberry'},
534
+ {'id': 157, 'name': 'Other Balls'},
535
+ {'id': 158, 'name': 'Shovel'},
536
+ {'id': 159, 'name': 'Pepper'},
537
+ {'id': 160, 'name': 'Computer Box'},
538
+ {'id': 161, 'name': 'Toilet Paper'},
539
+ {'id': 162, 'name': 'Cleaning Products'},
540
+ {'id': 163, 'name': 'Chopsticks'},
541
+ {'id': 164, 'name': 'Microwave'},
542
+ {'id': 165, 'name': 'Pigeon'},
543
+ {'id': 166, 'name': 'Baseball'},
544
+ {'id': 167, 'name': 'Cutting/chopping Board'},
545
+ {'id': 168, 'name': 'Coffee Table'},
546
+ {'id': 169, 'name': 'Side Table'},
547
+ {'id': 170, 'name': 'Scissors'},
548
+ {'id': 171, 'name': 'Marker'},
549
+ {'id': 172, 'name': 'Pie'},
550
+ {'id': 173, 'name': 'Ladder'},
551
+ {'id': 174, 'name': 'Snowboard'},
552
+ {'id': 175, 'name': 'Cookies'},
553
+ {'id': 176, 'name': 'Radiator'},
554
+ {'id': 177, 'name': 'Fire Hydrant'},
555
+ {'id': 178, 'name': 'Basketball'},
556
+ {'id': 179, 'name': 'Zebra'},
557
+ {'id': 180, 'name': 'Grape'},
558
+ {'id': 181, 'name': 'Giraffe'},
559
+ {'id': 182, 'name': 'Potato'},
560
+ {'id': 183, 'name': 'Sausage'},
561
+ {'id': 184, 'name': 'Tricycle'},
562
+ {'id': 185, 'name': 'Violin'},
563
+ {'id': 186, 'name': 'Egg'},
564
+ {'id': 187, 'name': 'Fire Extinguisher'},
565
+ {'id': 188, 'name': 'Candy'},
566
+ {'id': 189, 'name': 'Fire Truck'},
567
+ {'id': 190, 'name': 'Billards'},
568
+ {'id': 191, 'name': 'Converter'},
569
+ {'id': 192, 'name': 'Bathtub'},
570
+ {'id': 193, 'name': 'Wheelchair'},
571
+ {'id': 194, 'name': 'Golf Club'},
572
+ {'id': 195, 'name': 'Briefcase'},
573
+ {'id': 196, 'name': 'Cucumber'},
574
+ {'id': 197, 'name': 'Cigar/Cigarette '},
575
+ {'id': 198, 'name': 'Paint Brush'},
576
+ {'id': 199, 'name': 'Pear'},
577
+ {'id': 200, 'name': 'Heavy Truck'},
578
+ {'id': 201, 'name': 'Hamburger'},
579
+ {'id': 202, 'name': 'Extractor'},
580
+ {'id': 203, 'name': 'Extension Cord'},
581
+ {'id': 204, 'name': 'Tong'},
582
+ {'id': 205, 'name': 'Tennis Racket'},
583
+ {'id': 206, 'name': 'Folder'},
584
+ {'id': 207, 'name': 'American Football'},
585
+ {'id': 208, 'name': 'earphone'},
586
+ {'id': 209, 'name': 'Mask'},
587
+ {'id': 210, 'name': 'Kettle'},
588
+ {'id': 211, 'name': 'Tennis'},
589
+ {'id': 212, 'name': 'Ship'},
590
+ {'id': 213, 'name': 'Swing'},
591
+ {'id': 214, 'name': 'Coffee Machine'},
592
+ {'id': 215, 'name': 'Slide'},
593
+ {'id': 216, 'name': 'Carriage'},
594
+ {'id': 217, 'name': 'Onion'},
595
+ {'id': 218, 'name': 'Green beans'},
596
+ {'id': 219, 'name': 'Projector'},
597
+ {'id': 220, 'name': 'Frisbee'},
598
+ {'id': 221, 'name': 'Washing Machine/Drying Machine'},
599
+ {'id': 222, 'name': 'Chicken'},
600
+ {'id': 223, 'name': 'Printer'},
601
+ {'id': 224, 'name': 'Watermelon'},
602
+ {'id': 225, 'name': 'Saxophone'},
603
+ {'id': 226, 'name': 'Tissue'},
604
+ {'id': 227, 'name': 'Toothbrush'},
605
+ {'id': 228, 'name': 'Ice cream'},
606
+ {'id': 229, 'name': 'Hot air balloon'},
607
+ {'id': 230, 'name': 'Cello'},
608
+ {'id': 231, 'name': 'French Fries'},
609
+ {'id': 232, 'name': 'Scale'},
610
+ {'id': 233, 'name': 'Trophy'},
611
+ {'id': 234, 'name': 'Cabbage'},
612
+ {'id': 235, 'name': 'Hot dog'},
613
+ {'id': 236, 'name': 'Blender'},
614
+ {'id': 237, 'name': 'Peach'},
615
+ {'id': 238, 'name': 'Rice'},
616
+ {'id': 239, 'name': 'Wallet/Purse'},
617
+ {'id': 240, 'name': 'Volleyball'},
618
+ {'id': 241, 'name': 'Deer'},
619
+ {'id': 242, 'name': 'Goose'},
620
+ {'id': 243, 'name': 'Tape'},
621
+ {'id': 244, 'name': 'Tablet'},
622
+ {'id': 245, 'name': 'Cosmetics'},
623
+ {'id': 246, 'name': 'Trumpet'},
624
+ {'id': 247, 'name': 'Pineapple'},
625
+ {'id': 248, 'name': 'Golf Ball'},
626
+ {'id': 249, 'name': 'Ambulance'},
627
+ {'id': 250, 'name': 'Parking meter'},
628
+ {'id': 251, 'name': 'Mango'},
629
+ {'id': 252, 'name': 'Key'},
630
+ {'id': 253, 'name': 'Hurdle'},
631
+ {'id': 254, 'name': 'Fishing Rod'},
632
+ {'id': 255, 'name': 'Medal'},
633
+ {'id': 256, 'name': 'Flute'},
634
+ {'id': 257, 'name': 'Brush'},
635
+ {'id': 258, 'name': 'Penguin'},
636
+ {'id': 259, 'name': 'Megaphone'},
637
+ {'id': 260, 'name': 'Corn'},
638
+ {'id': 261, 'name': 'Lettuce'},
639
+ {'id': 262, 'name': 'Garlic'},
640
+ {'id': 263, 'name': 'Swan'},
641
+ {'id': 264, 'name': 'Helicopter'},
642
+ {'id': 265, 'name': 'Green Onion'},
643
+ {'id': 266, 'name': 'Sandwich'},
644
+ {'id': 267, 'name': 'Nuts'},
645
+ {'id': 268, 'name': 'Speed Limit Sign'},
646
+ {'id': 269, 'name': 'Induction Cooker'},
647
+ {'id': 270, 'name': 'Broom'},
648
+ {'id': 271, 'name': 'Trombone'},
649
+ {'id': 272, 'name': 'Plum'},
650
+ {'id': 273, 'name': 'Rickshaw'},
651
+ {'id': 274, 'name': 'Goldfish'},
652
+ {'id': 275, 'name': 'Kiwi fruit'},
653
+ {'id': 276, 'name': 'Router/modem'},
654
+ {'id': 277, 'name': 'Poker Card'},
655
+ {'id': 278, 'name': 'Toaster'},
656
+ {'id': 279, 'name': 'Shrimp'},
657
+ {'id': 280, 'name': 'Sushi'},
658
+ {'id': 281, 'name': 'Cheese'},
659
+ {'id': 282, 'name': 'Notepaper'},
660
+ {'id': 283, 'name': 'Cherry'},
661
+ {'id': 284, 'name': 'Pliers'},
662
+ {'id': 285, 'name': 'CD'},
663
+ {'id': 286, 'name': 'Pasta'},
664
+ {'id': 287, 'name': 'Hammer'},
665
+ {'id': 288, 'name': 'Cue'},
666
+ {'id': 289, 'name': 'Avocado'},
667
+ {'id': 290, 'name': 'Hami melon'},
668
+ {'id': 291, 'name': 'Flask'},
669
+ {'id': 292, 'name': 'Mushroom'},
670
+ {'id': 293, 'name': 'Screwdriver'},
671
+ {'id': 294, 'name': 'Soap'},
672
+ {'id': 295, 'name': 'Recorder'},
673
+ {'id': 296, 'name': 'Bear'},
674
+ {'id': 297, 'name': 'Eggplant'},
675
+ {'id': 298, 'name': 'Board Eraser'},
676
+ {'id': 299, 'name': 'Coconut'},
677
+ {'id': 300, 'name': 'Tape Measure/ Ruler'},
678
+ {'id': 301, 'name': 'Pig'},
679
+ {'id': 302, 'name': 'Showerhead'},
680
+ {'id': 303, 'name': 'Globe'},
681
+ {'id': 304, 'name': 'Chips'},
682
+ {'id': 305, 'name': 'Steak'},
683
+ {'id': 306, 'name': 'Crosswalk Sign'},
684
+ {'id': 307, 'name': 'Stapler'},
685
+ {'id': 308, 'name': 'Camel'},
686
+ {'id': 309, 'name': 'Formula 1 '},
687
+ {'id': 310, 'name': 'Pomegranate'},
688
+ {'id': 311, 'name': 'Dishwasher'},
689
+ {'id': 312, 'name': 'Crab'},
690
+ {'id': 313, 'name': 'Hoverboard'},
691
+ {'id': 314, 'name': 'Meatball'},
692
+ {'id': 315, 'name': 'Rice Cooker'},
693
+ {'id': 316, 'name': 'Tuba'},
694
+ {'id': 317, 'name': 'Calculator'},
695
+ {'id': 318, 'name': 'Papaya'},
696
+ {'id': 319, 'name': 'Antelope'},
697
+ {'id': 320, 'name': 'Parrot'},
698
+ {'id': 321, 'name': 'Seal'},
699
+ {'id': 322, 'name': 'Butterfly'},
700
+ {'id': 323, 'name': 'Dumbbell'},
701
+ {'id': 324, 'name': 'Donkey'},
702
+ {'id': 325, 'name': 'Lion'},
703
+ {'id': 326, 'name': 'Urinal'},
704
+ {'id': 327, 'name': 'Dolphin'},
705
+ {'id': 328, 'name': 'Electric Drill'},
706
+ {'id': 329, 'name': 'Hair Dryer'},
707
+ {'id': 330, 'name': 'Egg tart'},
708
+ {'id': 331, 'name': 'Jellyfish'},
709
+ {'id': 332, 'name': 'Treadmill'},
710
+ {'id': 333, 'name': 'Lighter'},
711
+ {'id': 334, 'name': 'Grapefruit'},
712
+ {'id': 335, 'name': 'Game board'},
713
+ {'id': 336, 'name': 'Mop'},
714
+ {'id': 337, 'name': 'Radish'},
715
+ {'id': 338, 'name': 'Baozi'},
716
+ {'id': 339, 'name': 'Target'},
717
+ {'id': 340, 'name': 'French'},
718
+ {'id': 341, 'name': 'Spring Rolls'},
719
+ {'id': 342, 'name': 'Monkey'},
720
+ {'id': 343, 'name': 'Rabbit'},
721
+ {'id': 344, 'name': 'Pencil Case'},
722
+ {'id': 345, 'name': 'Yak'},
723
+ {'id': 346, 'name': 'Red Cabbage'},
724
+ {'id': 347, 'name': 'Binoculars'},
725
+ {'id': 348, 'name': 'Asparagus'},
726
+ {'id': 349, 'name': 'Barbell'},
727
+ {'id': 350, 'name': 'Scallop'},
728
+ {'id': 351, 'name': 'Noddles'},
729
+ {'id': 352, 'name': 'Comb'},
730
+ {'id': 353, 'name': 'Dumpling'},
731
+ {'id': 354, 'name': 'Oyster'},
732
+ {'id': 355, 'name': 'Table Tennis paddle'},
733
+ {'id': 356, 'name': 'Cosmetics Brush/Eyeliner Pencil'},
734
+ {'id': 357, 'name': 'Chainsaw'},
735
+ {'id': 358, 'name': 'Eraser'},
736
+ {'id': 359, 'name': 'Lobster'},
737
+ {'id': 360, 'name': 'Durian'},
738
+ {'id': 361, 'name': 'Okra'},
739
+ {'id': 362, 'name': 'Lipstick'},
740
+ {'id': 363, 'name': 'Cosmetics Mirror'},
741
+ {'id': 364, 'name': 'Curling'},
742
+ {'id': 365, 'name': 'Table Tennis '},
743
+ ]
744
+
745
+
746
+ def _get_builtin_metadata():
747
+ id_to_name = {x['id']: x['name'] for x in categories_v2_fix}
748
+ thing_dataset_id_to_contiguous_id = {
749
+ x['id']: i for i, x in enumerate(
750
+ sorted(categories_v2_fix, key=lambda x: x['id']))}
751
+ thing_classes = [id_to_name[k] for k in sorted(id_to_name)]
752
+ return {
753
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
754
+ "thing_classes": thing_classes}
755
+
756
+
757
+ _PREDEFINED_SPLITS_OBJECTS365 = {
758
+ "objects365_v2_train": ("objects365/train", "objects365/annotations/zhiyuan_objv2_train_fixname_fixmiss.json"),
759
+ # 80,000 images, 1,240,587 annotations
760
+ "objects365_v2_val": ("objects365/val", "objects365/annotations/zhiyuan_objv2_val_fixname.json"),
761
+ "objects365_v2_val_rare": ("objects365/val", "objects365/annotations/zhiyuan_objv2_val_fixname_rare.json"),
762
+ }
763
+
764
+ for key, (image_root, json_file) in _PREDEFINED_SPLITS_OBJECTS365.items():
765
+ register_coco_instances(
766
+ key,
767
+ _get_builtin_metadata(),
768
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
769
+ os.path.join("datasets", image_root),
770
+ )
detic/data/datasets/oid.py ADDED
@@ -0,0 +1,535 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Part of the code is from https://github.com/xingyizhou/UniDet/blob/master/projects/UniDet/unidet/data/datasets/oid.py
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ from .register_oid import register_oid_instances
4
+ import os
5
+
6
+ categories = [
7
+ {'id': 1, 'name': 'Infant bed', 'freebase_id': '/m/061hd_'},
8
+ {'id': 2, 'name': 'Rose', 'freebase_id': '/m/06m11'},
9
+ {'id': 3, 'name': 'Flag', 'freebase_id': '/m/03120'},
10
+ {'id': 4, 'name': 'Flashlight', 'freebase_id': '/m/01kb5b'},
11
+ {'id': 5, 'name': 'Sea turtle', 'freebase_id': '/m/0120dh'},
12
+ {'id': 6, 'name': 'Camera', 'freebase_id': '/m/0dv5r'},
13
+ {'id': 7, 'name': 'Animal', 'freebase_id': '/m/0jbk'},
14
+ {'id': 8, 'name': 'Glove', 'freebase_id': '/m/0174n1'},
15
+ {'id': 9, 'name': 'Crocodile', 'freebase_id': '/m/09f_2'},
16
+ {'id': 10, 'name': 'Cattle', 'freebase_id': '/m/01xq0k1'},
17
+ {'id': 11, 'name': 'House', 'freebase_id': '/m/03jm5'},
18
+ {'id': 12, 'name': 'Guacamole', 'freebase_id': '/m/02g30s'},
19
+ {'id': 13, 'name': 'Penguin', 'freebase_id': '/m/05z6w'},
20
+ {'id': 14, 'name': 'Vehicle registration plate', 'freebase_id': '/m/01jfm_'},
21
+ {'id': 15, 'name': 'Bench', 'freebase_id': '/m/076lb9'},
22
+ {'id': 16, 'name': 'Ladybug', 'freebase_id': '/m/0gj37'},
23
+ {'id': 17, 'name': 'Human nose', 'freebase_id': '/m/0k0pj'},
24
+ {'id': 18, 'name': 'Watermelon', 'freebase_id': '/m/0kpqd'},
25
+ {'id': 19, 'name': 'Flute', 'freebase_id': '/m/0l14j_'},
26
+ {'id': 20, 'name': 'Butterfly', 'freebase_id': '/m/0cyf8'},
27
+ {'id': 21, 'name': 'Washing machine', 'freebase_id': '/m/0174k2'},
28
+ {'id': 22, 'name': 'Raccoon', 'freebase_id': '/m/0dq75'},
29
+ {'id': 23, 'name': 'Segway', 'freebase_id': '/m/076bq'},
30
+ {'id': 24, 'name': 'Taco', 'freebase_id': '/m/07crc'},
31
+ {'id': 25, 'name': 'Jellyfish', 'freebase_id': '/m/0d8zb'},
32
+ {'id': 26, 'name': 'Cake', 'freebase_id': '/m/0fszt'},
33
+ {'id': 27, 'name': 'Pen', 'freebase_id': '/m/0k1tl'},
34
+ {'id': 28, 'name': 'Cannon', 'freebase_id': '/m/020kz'},
35
+ {'id': 29, 'name': 'Bread', 'freebase_id': '/m/09728'},
36
+ {'id': 30, 'name': 'Tree', 'freebase_id': '/m/07j7r'},
37
+ {'id': 31, 'name': 'Shellfish', 'freebase_id': '/m/0fbdv'},
38
+ {'id': 32, 'name': 'Bed', 'freebase_id': '/m/03ssj5'},
39
+ {'id': 33, 'name': 'Hamster', 'freebase_id': '/m/03qrc'},
40
+ {'id': 34, 'name': 'Hat', 'freebase_id': '/m/02dl1y'},
41
+ {'id': 35, 'name': 'Toaster', 'freebase_id': '/m/01k6s3'},
42
+ {'id': 36, 'name': 'Sombrero', 'freebase_id': '/m/02jfl0'},
43
+ {'id': 37, 'name': 'Tiara', 'freebase_id': '/m/01krhy'},
44
+ {'id': 38, 'name': 'Bowl', 'freebase_id': '/m/04kkgm'},
45
+ {'id': 39, 'name': 'Dragonfly', 'freebase_id': '/m/0ft9s'},
46
+ {'id': 40, 'name': 'Moths and butterflies', 'freebase_id': '/m/0d_2m'},
47
+ {'id': 41, 'name': 'Antelope', 'freebase_id': '/m/0czz2'},
48
+ {'id': 42, 'name': 'Vegetable', 'freebase_id': '/m/0f4s2w'},
49
+ {'id': 43, 'name': 'Torch', 'freebase_id': '/m/07dd4'},
50
+ {'id': 44, 'name': 'Building', 'freebase_id': '/m/0cgh4'},
51
+ {'id': 45, 'name': 'Power plugs and sockets', 'freebase_id': '/m/03bbps'},
52
+ {'id': 46, 'name': 'Blender', 'freebase_id': '/m/02pjr4'},
53
+ {'id': 47, 'name': 'Billiard table', 'freebase_id': '/m/04p0qw'},
54
+ {'id': 48, 'name': 'Cutting board', 'freebase_id': '/m/02pdsw'},
55
+ {'id': 49, 'name': 'Bronze sculpture', 'freebase_id': '/m/01yx86'},
56
+ {'id': 50, 'name': 'Turtle', 'freebase_id': '/m/09dzg'},
57
+ {'id': 51, 'name': 'Broccoli', 'freebase_id': '/m/0hkxq'},
58
+ {'id': 52, 'name': 'Tiger', 'freebase_id': '/m/07dm6'},
59
+ {'id': 53, 'name': 'Mirror', 'freebase_id': '/m/054_l'},
60
+ {'id': 54, 'name': 'Bear', 'freebase_id': '/m/01dws'},
61
+ {'id': 55, 'name': 'Zucchini', 'freebase_id': '/m/027pcv'},
62
+ {'id': 56, 'name': 'Dress', 'freebase_id': '/m/01d40f'},
63
+ {'id': 57, 'name': 'Volleyball', 'freebase_id': '/m/02rgn06'},
64
+ {'id': 58, 'name': 'Guitar', 'freebase_id': '/m/0342h'},
65
+ {'id': 59, 'name': 'Reptile', 'freebase_id': '/m/06bt6'},
66
+ {'id': 60, 'name': 'Golf cart', 'freebase_id': '/m/0323sq'},
67
+ {'id': 61, 'name': 'Tart', 'freebase_id': '/m/02zvsm'},
68
+ {'id': 62, 'name': 'Fedora', 'freebase_id': '/m/02fq_6'},
69
+ {'id': 63, 'name': 'Carnivore', 'freebase_id': '/m/01lrl'},
70
+ {'id': 64, 'name': 'Car', 'freebase_id': '/m/0k4j'},
71
+ {'id': 65, 'name': 'Lighthouse', 'freebase_id': '/m/04h7h'},
72
+ {'id': 66, 'name': 'Coffeemaker', 'freebase_id': '/m/07xyvk'},
73
+ {'id': 67, 'name': 'Food processor', 'freebase_id': '/m/03y6mg'},
74
+ {'id': 68, 'name': 'Truck', 'freebase_id': '/m/07r04'},
75
+ {'id': 69, 'name': 'Bookcase', 'freebase_id': '/m/03__z0'},
76
+ {'id': 70, 'name': 'Surfboard', 'freebase_id': '/m/019w40'},
77
+ {'id': 71, 'name': 'Footwear', 'freebase_id': '/m/09j5n'},
78
+ {'id': 72, 'name': 'Bench', 'freebase_id': '/m/0cvnqh'},
79
+ {'id': 73, 'name': 'Necklace', 'freebase_id': '/m/01llwg'},
80
+ {'id': 74, 'name': 'Flower', 'freebase_id': '/m/0c9ph5'},
81
+ {'id': 75, 'name': 'Radish', 'freebase_id': '/m/015x5n'},
82
+ {'id': 76, 'name': 'Marine mammal', 'freebase_id': '/m/0gd2v'},
83
+ {'id': 77, 'name': 'Frying pan', 'freebase_id': '/m/04v6l4'},
84
+ {'id': 78, 'name': 'Tap', 'freebase_id': '/m/02jz0l'},
85
+ {'id': 79, 'name': 'Peach', 'freebase_id': '/m/0dj6p'},
86
+ {'id': 80, 'name': 'Knife', 'freebase_id': '/m/04ctx'},
87
+ {'id': 81, 'name': 'Handbag', 'freebase_id': '/m/080hkjn'},
88
+ {'id': 82, 'name': 'Laptop', 'freebase_id': '/m/01c648'},
89
+ {'id': 83, 'name': 'Tent', 'freebase_id': '/m/01j61q'},
90
+ {'id': 84, 'name': 'Ambulance', 'freebase_id': '/m/012n7d'},
91
+ {'id': 85, 'name': 'Christmas tree', 'freebase_id': '/m/025nd'},
92
+ {'id': 86, 'name': 'Eagle', 'freebase_id': '/m/09csl'},
93
+ {'id': 87, 'name': 'Limousine', 'freebase_id': '/m/01lcw4'},
94
+ {'id': 88, 'name': 'Kitchen & dining room table', 'freebase_id': '/m/0h8n5zk'},
95
+ {'id': 89, 'name': 'Polar bear', 'freebase_id': '/m/0633h'},
96
+ {'id': 90, 'name': 'Tower', 'freebase_id': '/m/01fdzj'},
97
+ {'id': 91, 'name': 'Football', 'freebase_id': '/m/01226z'},
98
+ {'id': 92, 'name': 'Willow', 'freebase_id': '/m/0mw_6'},
99
+ {'id': 93, 'name': 'Human head', 'freebase_id': '/m/04hgtk'},
100
+ {'id': 94, 'name': 'Stop sign', 'freebase_id': '/m/02pv19'},
101
+ {'id': 95, 'name': 'Banana', 'freebase_id': '/m/09qck'},
102
+ {'id': 96, 'name': 'Mixer', 'freebase_id': '/m/063rgb'},
103
+ {'id': 97, 'name': 'Binoculars', 'freebase_id': '/m/0lt4_'},
104
+ {'id': 98, 'name': 'Dessert', 'freebase_id': '/m/0270h'},
105
+ {'id': 99, 'name': 'Bee', 'freebase_id': '/m/01h3n'},
106
+ {'id': 100, 'name': 'Chair', 'freebase_id': '/m/01mzpv'},
107
+ {'id': 101, 'name': 'Wood-burning stove', 'freebase_id': '/m/04169hn'},
108
+ {'id': 102, 'name': 'Flowerpot', 'freebase_id': '/m/0fm3zh'},
109
+ {'id': 103, 'name': 'Beaker', 'freebase_id': '/m/0d20w4'},
110
+ {'id': 104, 'name': 'Oyster', 'freebase_id': '/m/0_cp5'},
111
+ {'id': 105, 'name': 'Woodpecker', 'freebase_id': '/m/01dy8n'},
112
+ {'id': 106, 'name': 'Harp', 'freebase_id': '/m/03m5k'},
113
+ {'id': 107, 'name': 'Bathtub', 'freebase_id': '/m/03dnzn'},
114
+ {'id': 108, 'name': 'Wall clock', 'freebase_id': '/m/0h8mzrc'},
115
+ {'id': 109, 'name': 'Sports uniform', 'freebase_id': '/m/0h8mhzd'},
116
+ {'id': 110, 'name': 'Rhinoceros', 'freebase_id': '/m/03d443'},
117
+ {'id': 111, 'name': 'Beehive', 'freebase_id': '/m/01gllr'},
118
+ {'id': 112, 'name': 'Cupboard', 'freebase_id': '/m/0642b4'},
119
+ {'id': 113, 'name': 'Chicken', 'freebase_id': '/m/09b5t'},
120
+ {'id': 114, 'name': 'Man', 'freebase_id': '/m/04yx4'},
121
+ {'id': 115, 'name': 'Blue jay', 'freebase_id': '/m/01f8m5'},
122
+ {'id': 116, 'name': 'Cucumber', 'freebase_id': '/m/015x4r'},
123
+ {'id': 117, 'name': 'Balloon', 'freebase_id': '/m/01j51'},
124
+ {'id': 118, 'name': 'Kite', 'freebase_id': '/m/02zt3'},
125
+ {'id': 119, 'name': 'Fireplace', 'freebase_id': '/m/03tw93'},
126
+ {'id': 120, 'name': 'Lantern', 'freebase_id': '/m/01jfsr'},
127
+ {'id': 121, 'name': 'Missile', 'freebase_id': '/m/04ylt'},
128
+ {'id': 122, 'name': 'Book', 'freebase_id': '/m/0bt_c3'},
129
+ {'id': 123, 'name': 'Spoon', 'freebase_id': '/m/0cmx8'},
130
+ {'id': 124, 'name': 'Grapefruit', 'freebase_id': '/m/0hqkz'},
131
+ {'id': 125, 'name': 'Squirrel', 'freebase_id': '/m/071qp'},
132
+ {'id': 126, 'name': 'Orange', 'freebase_id': '/m/0cyhj_'},
133
+ {'id': 127, 'name': 'Coat', 'freebase_id': '/m/01xygc'},
134
+ {'id': 128, 'name': 'Punching bag', 'freebase_id': '/m/0420v5'},
135
+ {'id': 129, 'name': 'Zebra', 'freebase_id': '/m/0898b'},
136
+ {'id': 130, 'name': 'Billboard', 'freebase_id': '/m/01knjb'},
137
+ {'id': 131, 'name': 'Bicycle', 'freebase_id': '/m/0199g'},
138
+ {'id': 132, 'name': 'Door handle', 'freebase_id': '/m/03c7gz'},
139
+ {'id': 133, 'name': 'Mechanical fan', 'freebase_id': '/m/02x984l'},
140
+ {'id': 134, 'name': 'Ring binder', 'freebase_id': '/m/04zwwv'},
141
+ {'id': 135, 'name': 'Table', 'freebase_id': '/m/04bcr3'},
142
+ {'id': 136, 'name': 'Parrot', 'freebase_id': '/m/0gv1x'},
143
+ {'id': 137, 'name': 'Sock', 'freebase_id': '/m/01nq26'},
144
+ {'id': 138, 'name': 'Vase', 'freebase_id': '/m/02s195'},
145
+ {'id': 139, 'name': 'Weapon', 'freebase_id': '/m/083kb'},
146
+ {'id': 140, 'name': 'Shotgun', 'freebase_id': '/m/06nrc'},
147
+ {'id': 141, 'name': 'Glasses', 'freebase_id': '/m/0jyfg'},
148
+ {'id': 142, 'name': 'Seahorse', 'freebase_id': '/m/0nybt'},
149
+ {'id': 143, 'name': 'Belt', 'freebase_id': '/m/0176mf'},
150
+ {'id': 144, 'name': 'Watercraft', 'freebase_id': '/m/01rzcn'},
151
+ {'id': 145, 'name': 'Window', 'freebase_id': '/m/0d4v4'},
152
+ {'id': 146, 'name': 'Giraffe', 'freebase_id': '/m/03bk1'},
153
+ {'id': 147, 'name': 'Lion', 'freebase_id': '/m/096mb'},
154
+ {'id': 148, 'name': 'Tire', 'freebase_id': '/m/0h9mv'},
155
+ {'id': 149, 'name': 'Vehicle', 'freebase_id': '/m/07yv9'},
156
+ {'id': 150, 'name': 'Canoe', 'freebase_id': '/m/0ph39'},
157
+ {'id': 151, 'name': 'Tie', 'freebase_id': '/m/01rkbr'},
158
+ {'id': 152, 'name': 'Shelf', 'freebase_id': '/m/0gjbg72'},
159
+ {'id': 153, 'name': 'Picture frame', 'freebase_id': '/m/06z37_'},
160
+ {'id': 154, 'name': 'Printer', 'freebase_id': '/m/01m4t'},
161
+ {'id': 155, 'name': 'Human leg', 'freebase_id': '/m/035r7c'},
162
+ {'id': 156, 'name': 'Boat', 'freebase_id': '/m/019jd'},
163
+ {'id': 157, 'name': 'Slow cooker', 'freebase_id': '/m/02tsc9'},
164
+ {'id': 158, 'name': 'Croissant', 'freebase_id': '/m/015wgc'},
165
+ {'id': 159, 'name': 'Candle', 'freebase_id': '/m/0c06p'},
166
+ {'id': 160, 'name': 'Pancake', 'freebase_id': '/m/01dwwc'},
167
+ {'id': 161, 'name': 'Pillow', 'freebase_id': '/m/034c16'},
168
+ {'id': 162, 'name': 'Coin', 'freebase_id': '/m/0242l'},
169
+ {'id': 163, 'name': 'Stretcher', 'freebase_id': '/m/02lbcq'},
170
+ {'id': 164, 'name': 'Sandal', 'freebase_id': '/m/03nfch'},
171
+ {'id': 165, 'name': 'Woman', 'freebase_id': '/m/03bt1vf'},
172
+ {'id': 166, 'name': 'Stairs', 'freebase_id': '/m/01lynh'},
173
+ {'id': 167, 'name': 'Harpsichord', 'freebase_id': '/m/03q5t'},
174
+ {'id': 168, 'name': 'Stool', 'freebase_id': '/m/0fqt361'},
175
+ {'id': 169, 'name': 'Bus', 'freebase_id': '/m/01bjv'},
176
+ {'id': 170, 'name': 'Suitcase', 'freebase_id': '/m/01s55n'},
177
+ {'id': 171, 'name': 'Human mouth', 'freebase_id': '/m/0283dt1'},
178
+ {'id': 172, 'name': 'Juice', 'freebase_id': '/m/01z1kdw'},
179
+ {'id': 173, 'name': 'Skull', 'freebase_id': '/m/016m2d'},
180
+ {'id': 174, 'name': 'Door', 'freebase_id': '/m/02dgv'},
181
+ {'id': 175, 'name': 'Violin', 'freebase_id': '/m/07y_7'},
182
+ {'id': 176, 'name': 'Chopsticks', 'freebase_id': '/m/01_5g'},
183
+ {'id': 177, 'name': 'Digital clock', 'freebase_id': '/m/06_72j'},
184
+ {'id': 178, 'name': 'Sunflower', 'freebase_id': '/m/0ftb8'},
185
+ {'id': 179, 'name': 'Leopard', 'freebase_id': '/m/0c29q'},
186
+ {'id': 180, 'name': 'Bell pepper', 'freebase_id': '/m/0jg57'},
187
+ {'id': 181, 'name': 'Harbor seal', 'freebase_id': '/m/02l8p9'},
188
+ {'id': 182, 'name': 'Snake', 'freebase_id': '/m/078jl'},
189
+ {'id': 183, 'name': 'Sewing machine', 'freebase_id': '/m/0llzx'},
190
+ {'id': 184, 'name': 'Goose', 'freebase_id': '/m/0dbvp'},
191
+ {'id': 185, 'name': 'Helicopter', 'freebase_id': '/m/09ct_'},
192
+ {'id': 186, 'name': 'Seat belt', 'freebase_id': '/m/0dkzw'},
193
+ {'id': 187, 'name': 'Coffee cup', 'freebase_id': '/m/02p5f1q'},
194
+ {'id': 188, 'name': 'Microwave oven', 'freebase_id': '/m/0fx9l'},
195
+ {'id': 189, 'name': 'Hot dog', 'freebase_id': '/m/01b9xk'},
196
+ {'id': 190, 'name': 'Countertop', 'freebase_id': '/m/0b3fp9'},
197
+ {'id': 191, 'name': 'Serving tray', 'freebase_id': '/m/0h8n27j'},
198
+ {'id': 192, 'name': 'Dog bed', 'freebase_id': '/m/0h8n6f9'},
199
+ {'id': 193, 'name': 'Beer', 'freebase_id': '/m/01599'},
200
+ {'id': 194, 'name': 'Sunglasses', 'freebase_id': '/m/017ftj'},
201
+ {'id': 195, 'name': 'Golf ball', 'freebase_id': '/m/044r5d'},
202
+ {'id': 196, 'name': 'Waffle', 'freebase_id': '/m/01dwsz'},
203
+ {'id': 197, 'name': 'Palm tree', 'freebase_id': '/m/0cdl1'},
204
+ {'id': 198, 'name': 'Trumpet', 'freebase_id': '/m/07gql'},
205
+ {'id': 199, 'name': 'Ruler', 'freebase_id': '/m/0hdln'},
206
+ {'id': 200, 'name': 'Helmet', 'freebase_id': '/m/0zvk5'},
207
+ {'id': 201, 'name': 'Ladder', 'freebase_id': '/m/012w5l'},
208
+ {'id': 202, 'name': 'Office building', 'freebase_id': '/m/021sj1'},
209
+ {'id': 203, 'name': 'Tablet computer', 'freebase_id': '/m/0bh9flk'},
210
+ {'id': 204, 'name': 'Toilet paper', 'freebase_id': '/m/09gtd'},
211
+ {'id': 205, 'name': 'Pomegranate', 'freebase_id': '/m/0jwn_'},
212
+ {'id': 206, 'name': 'Skirt', 'freebase_id': '/m/02wv6h6'},
213
+ {'id': 207, 'name': 'Gas stove', 'freebase_id': '/m/02wv84t'},
214
+ {'id': 208, 'name': 'Cookie', 'freebase_id': '/m/021mn'},
215
+ {'id': 209, 'name': 'Cart', 'freebase_id': '/m/018p4k'},
216
+ {'id': 210, 'name': 'Raven', 'freebase_id': '/m/06j2d'},
217
+ {'id': 211, 'name': 'Egg', 'freebase_id': '/m/033cnk'},
218
+ {'id': 212, 'name': 'Burrito', 'freebase_id': '/m/01j3zr'},
219
+ {'id': 213, 'name': 'Goat', 'freebase_id': '/m/03fwl'},
220
+ {'id': 214, 'name': 'Kitchen knife', 'freebase_id': '/m/058qzx'},
221
+ {'id': 215, 'name': 'Skateboard', 'freebase_id': '/m/06_fw'},
222
+ {'id': 216, 'name': 'Salt and pepper shakers', 'freebase_id': '/m/02x8cch'},
223
+ {'id': 217, 'name': 'Lynx', 'freebase_id': '/m/04g2r'},
224
+ {'id': 218, 'name': 'Boot', 'freebase_id': '/m/01b638'},
225
+ {'id': 219, 'name': 'Platter', 'freebase_id': '/m/099ssp'},
226
+ {'id': 220, 'name': 'Ski', 'freebase_id': '/m/071p9'},
227
+ {'id': 221, 'name': 'Swimwear', 'freebase_id': '/m/01gkx_'},
228
+ {'id': 222, 'name': 'Swimming pool', 'freebase_id': '/m/0b_rs'},
229
+ {'id': 223, 'name': 'Drinking straw', 'freebase_id': '/m/03v5tg'},
230
+ {'id': 224, 'name': 'Wrench', 'freebase_id': '/m/01j5ks'},
231
+ {'id': 225, 'name': 'Drum', 'freebase_id': '/m/026t6'},
232
+ {'id': 226, 'name': 'Ant', 'freebase_id': '/m/0_k2'},
233
+ {'id': 227, 'name': 'Human ear', 'freebase_id': '/m/039xj_'},
234
+ {'id': 228, 'name': 'Headphones', 'freebase_id': '/m/01b7fy'},
235
+ {'id': 229, 'name': 'Fountain', 'freebase_id': '/m/0220r2'},
236
+ {'id': 230, 'name': 'Bird', 'freebase_id': '/m/015p6'},
237
+ {'id': 231, 'name': 'Jeans', 'freebase_id': '/m/0fly7'},
238
+ {'id': 232, 'name': 'Television', 'freebase_id': '/m/07c52'},
239
+ {'id': 233, 'name': 'Crab', 'freebase_id': '/m/0n28_'},
240
+ {'id': 234, 'name': 'Microphone', 'freebase_id': '/m/0hg7b'},
241
+ {'id': 235, 'name': 'Home appliance', 'freebase_id': '/m/019dx1'},
242
+ {'id': 236, 'name': 'Snowplow', 'freebase_id': '/m/04vv5k'},
243
+ {'id': 237, 'name': 'Beetle', 'freebase_id': '/m/020jm'},
244
+ {'id': 238, 'name': 'Artichoke', 'freebase_id': '/m/047v4b'},
245
+ {'id': 239, 'name': 'Jet ski', 'freebase_id': '/m/01xs3r'},
246
+ {'id': 240, 'name': 'Stationary bicycle', 'freebase_id': '/m/03kt2w'},
247
+ {'id': 241, 'name': 'Human hair', 'freebase_id': '/m/03q69'},
248
+ {'id': 242, 'name': 'Brown bear', 'freebase_id': '/m/01dxs'},
249
+ {'id': 243, 'name': 'Starfish', 'freebase_id': '/m/01h8tj'},
250
+ {'id': 244, 'name': 'Fork', 'freebase_id': '/m/0dt3t'},
251
+ {'id': 245, 'name': 'Lobster', 'freebase_id': '/m/0cjq5'},
252
+ {'id': 246, 'name': 'Corded phone', 'freebase_id': '/m/0h8lkj8'},
253
+ {'id': 247, 'name': 'Drink', 'freebase_id': '/m/0271t'},
254
+ {'id': 248, 'name': 'Saucer', 'freebase_id': '/m/03q5c7'},
255
+ {'id': 249, 'name': 'Carrot', 'freebase_id': '/m/0fj52s'},
256
+ {'id': 250, 'name': 'Insect', 'freebase_id': '/m/03vt0'},
257
+ {'id': 251, 'name': 'Clock', 'freebase_id': '/m/01x3z'},
258
+ {'id': 252, 'name': 'Castle', 'freebase_id': '/m/0d5gx'},
259
+ {'id': 253, 'name': 'Tennis racket', 'freebase_id': '/m/0h8my_4'},
260
+ {'id': 254, 'name': 'Ceiling fan', 'freebase_id': '/m/03ldnb'},
261
+ {'id': 255, 'name': 'Asparagus', 'freebase_id': '/m/0cjs7'},
262
+ {'id': 256, 'name': 'Jaguar', 'freebase_id': '/m/0449p'},
263
+ {'id': 257, 'name': 'Musical instrument', 'freebase_id': '/m/04szw'},
264
+ {'id': 258, 'name': 'Train', 'freebase_id': '/m/07jdr'},
265
+ {'id': 259, 'name': 'Cat', 'freebase_id': '/m/01yrx'},
266
+ {'id': 260, 'name': 'Rifle', 'freebase_id': '/m/06c54'},
267
+ {'id': 261, 'name': 'Dumbbell', 'freebase_id': '/m/04h8sr'},
268
+ {'id': 262, 'name': 'Mobile phone', 'freebase_id': '/m/050k8'},
269
+ {'id': 263, 'name': 'Taxi', 'freebase_id': '/m/0pg52'},
270
+ {'id': 264, 'name': 'Shower', 'freebase_id': '/m/02f9f_'},
271
+ {'id': 265, 'name': 'Pitcher', 'freebase_id': '/m/054fyh'},
272
+ {'id': 266, 'name': 'Lemon', 'freebase_id': '/m/09k_b'},
273
+ {'id': 267, 'name': 'Invertebrate', 'freebase_id': '/m/03xxp'},
274
+ {'id': 268, 'name': 'Turkey', 'freebase_id': '/m/0jly1'},
275
+ {'id': 269, 'name': 'High heels', 'freebase_id': '/m/06k2mb'},
276
+ {'id': 270, 'name': 'Bust', 'freebase_id': '/m/04yqq2'},
277
+ {'id': 271, 'name': 'Elephant', 'freebase_id': '/m/0bwd_0j'},
278
+ {'id': 272, 'name': 'Scarf', 'freebase_id': '/m/02h19r'},
279
+ {'id': 273, 'name': 'Barrel', 'freebase_id': '/m/02zn6n'},
280
+ {'id': 274, 'name': 'Trombone', 'freebase_id': '/m/07c6l'},
281
+ {'id': 275, 'name': 'Pumpkin', 'freebase_id': '/m/05zsy'},
282
+ {'id': 276, 'name': 'Box', 'freebase_id': '/m/025dyy'},
283
+ {'id': 277, 'name': 'Tomato', 'freebase_id': '/m/07j87'},
284
+ {'id': 278, 'name': 'Frog', 'freebase_id': '/m/09ld4'},
285
+ {'id': 279, 'name': 'Bidet', 'freebase_id': '/m/01vbnl'},
286
+ {'id': 280, 'name': 'Human face', 'freebase_id': '/m/0dzct'},
287
+ {'id': 281, 'name': 'Houseplant', 'freebase_id': '/m/03fp41'},
288
+ {'id': 282, 'name': 'Van', 'freebase_id': '/m/0h2r6'},
289
+ {'id': 283, 'name': 'Shark', 'freebase_id': '/m/0by6g'},
290
+ {'id': 284, 'name': 'Ice cream', 'freebase_id': '/m/0cxn2'},
291
+ {'id': 285, 'name': 'Swim cap', 'freebase_id': '/m/04tn4x'},
292
+ {'id': 286, 'name': 'Falcon', 'freebase_id': '/m/0f6wt'},
293
+ {'id': 287, 'name': 'Ostrich', 'freebase_id': '/m/05n4y'},
294
+ {'id': 288, 'name': 'Handgun', 'freebase_id': '/m/0gxl3'},
295
+ {'id': 289, 'name': 'Whiteboard', 'freebase_id': '/m/02d9qx'},
296
+ {'id': 290, 'name': 'Lizard', 'freebase_id': '/m/04m9y'},
297
+ {'id': 291, 'name': 'Pasta', 'freebase_id': '/m/05z55'},
298
+ {'id': 292, 'name': 'Snowmobile', 'freebase_id': '/m/01x3jk'},
299
+ {'id': 293, 'name': 'Light bulb', 'freebase_id': '/m/0h8l4fh'},
300
+ {'id': 294, 'name': 'Window blind', 'freebase_id': '/m/031b6r'},
301
+ {'id': 295, 'name': 'Muffin', 'freebase_id': '/m/01tcjp'},
302
+ {'id': 296, 'name': 'Pretzel', 'freebase_id': '/m/01f91_'},
303
+ {'id': 297, 'name': 'Computer monitor', 'freebase_id': '/m/02522'},
304
+ {'id': 298, 'name': 'Horn', 'freebase_id': '/m/0319l'},
305
+ {'id': 299, 'name': 'Furniture', 'freebase_id': '/m/0c_jw'},
306
+ {'id': 300, 'name': 'Sandwich', 'freebase_id': '/m/0l515'},
307
+ {'id': 301, 'name': 'Fox', 'freebase_id': '/m/0306r'},
308
+ {'id': 302, 'name': 'Convenience store', 'freebase_id': '/m/0crjs'},
309
+ {'id': 303, 'name': 'Fish', 'freebase_id': '/m/0ch_cf'},
310
+ {'id': 304, 'name': 'Fruit', 'freebase_id': '/m/02xwb'},
311
+ {'id': 305, 'name': 'Earrings', 'freebase_id': '/m/01r546'},
312
+ {'id': 306, 'name': 'Curtain', 'freebase_id': '/m/03rszm'},
313
+ {'id': 307, 'name': 'Grape', 'freebase_id': '/m/0388q'},
314
+ {'id': 308, 'name': 'Sofa bed', 'freebase_id': '/m/03m3pdh'},
315
+ {'id': 309, 'name': 'Horse', 'freebase_id': '/m/03k3r'},
316
+ {'id': 310, 'name': 'Luggage and bags', 'freebase_id': '/m/0hf58v5'},
317
+ {'id': 311, 'name': 'Desk', 'freebase_id': '/m/01y9k5'},
318
+ {'id': 312, 'name': 'Crutch', 'freebase_id': '/m/05441v'},
319
+ {'id': 313, 'name': 'Bicycle helmet', 'freebase_id': '/m/03p3bw'},
320
+ {'id': 314, 'name': 'Tick', 'freebase_id': '/m/0175cv'},
321
+ {'id': 315, 'name': 'Airplane', 'freebase_id': '/m/0cmf2'},
322
+ {'id': 316, 'name': 'Canary', 'freebase_id': '/m/0ccs93'},
323
+ {'id': 317, 'name': 'Spatula', 'freebase_id': '/m/02d1br'},
324
+ {'id': 318, 'name': 'Watch', 'freebase_id': '/m/0gjkl'},
325
+ {'id': 319, 'name': 'Lily', 'freebase_id': '/m/0jqgx'},
326
+ {'id': 320, 'name': 'Kitchen appliance', 'freebase_id': '/m/0h99cwc'},
327
+ {'id': 321, 'name': 'Filing cabinet', 'freebase_id': '/m/047j0r'},
328
+ {'id': 322, 'name': 'Aircraft', 'freebase_id': '/m/0k5j'},
329
+ {'id': 323, 'name': 'Cake stand', 'freebase_id': '/m/0h8n6ft'},
330
+ {'id': 324, 'name': 'Candy', 'freebase_id': '/m/0gm28'},
331
+ {'id': 325, 'name': 'Sink', 'freebase_id': '/m/0130jx'},
332
+ {'id': 326, 'name': 'Mouse', 'freebase_id': '/m/04rmv'},
333
+ {'id': 327, 'name': 'Wine', 'freebase_id': '/m/081qc'},
334
+ {'id': 328, 'name': 'Wheelchair', 'freebase_id': '/m/0qmmr'},
335
+ {'id': 329, 'name': 'Goldfish', 'freebase_id': '/m/03fj2'},
336
+ {'id': 330, 'name': 'Refrigerator', 'freebase_id': '/m/040b_t'},
337
+ {'id': 331, 'name': 'French fries', 'freebase_id': '/m/02y6n'},
338
+ {'id': 332, 'name': 'Drawer', 'freebase_id': '/m/0fqfqc'},
339
+ {'id': 333, 'name': 'Treadmill', 'freebase_id': '/m/030610'},
340
+ {'id': 334, 'name': 'Picnic basket', 'freebase_id': '/m/07kng9'},
341
+ {'id': 335, 'name': 'Dice', 'freebase_id': '/m/029b3'},
342
+ {'id': 336, 'name': 'Cabbage', 'freebase_id': '/m/0fbw6'},
343
+ {'id': 337, 'name': 'Football helmet', 'freebase_id': '/m/07qxg_'},
344
+ {'id': 338, 'name': 'Pig', 'freebase_id': '/m/068zj'},
345
+ {'id': 339, 'name': 'Person', 'freebase_id': '/m/01g317'},
346
+ {'id': 340, 'name': 'Shorts', 'freebase_id': '/m/01bfm9'},
347
+ {'id': 341, 'name': 'Gondola', 'freebase_id': '/m/02068x'},
348
+ {'id': 342, 'name': 'Honeycomb', 'freebase_id': '/m/0fz0h'},
349
+ {'id': 343, 'name': 'Doughnut', 'freebase_id': '/m/0jy4k'},
350
+ {'id': 344, 'name': 'Chest of drawers', 'freebase_id': '/m/05kyg_'},
351
+ {'id': 345, 'name': 'Land vehicle', 'freebase_id': '/m/01prls'},
352
+ {'id': 346, 'name': 'Bat', 'freebase_id': '/m/01h44'},
353
+ {'id': 347, 'name': 'Monkey', 'freebase_id': '/m/08pbxl'},
354
+ {'id': 348, 'name': 'Dagger', 'freebase_id': '/m/02gzp'},
355
+ {'id': 349, 'name': 'Tableware', 'freebase_id': '/m/04brg2'},
356
+ {'id': 350, 'name': 'Human foot', 'freebase_id': '/m/031n1'},
357
+ {'id': 351, 'name': 'Mug', 'freebase_id': '/m/02jvh9'},
358
+ {'id': 352, 'name': 'Alarm clock', 'freebase_id': '/m/046dlr'},
359
+ {'id': 353, 'name': 'Pressure cooker', 'freebase_id': '/m/0h8ntjv'},
360
+ {'id': 354, 'name': 'Human hand', 'freebase_id': '/m/0k65p'},
361
+ {'id': 355, 'name': 'Tortoise', 'freebase_id': '/m/011k07'},
362
+ {'id': 356, 'name': 'Baseball glove', 'freebase_id': '/m/03grzl'},
363
+ {'id': 357, 'name': 'Sword', 'freebase_id': '/m/06y5r'},
364
+ {'id': 358, 'name': 'Pear', 'freebase_id': '/m/061_f'},
365
+ {'id': 359, 'name': 'Miniskirt', 'freebase_id': '/m/01cmb2'},
366
+ {'id': 360, 'name': 'Traffic sign', 'freebase_id': '/m/01mqdt'},
367
+ {'id': 361, 'name': 'Girl', 'freebase_id': '/m/05r655'},
368
+ {'id': 362, 'name': 'Roller skates', 'freebase_id': '/m/02p3w7d'},
369
+ {'id': 363, 'name': 'Dinosaur', 'freebase_id': '/m/029tx'},
370
+ {'id': 364, 'name': 'Porch', 'freebase_id': '/m/04m6gz'},
371
+ {'id': 365, 'name': 'Human beard', 'freebase_id': '/m/015h_t'},
372
+ {'id': 366, 'name': 'Submarine sandwich', 'freebase_id': '/m/06pcq'},
373
+ {'id': 367, 'name': 'Screwdriver', 'freebase_id': '/m/01bms0'},
374
+ {'id': 368, 'name': 'Strawberry', 'freebase_id': '/m/07fbm7'},
375
+ {'id': 369, 'name': 'Wine glass', 'freebase_id': '/m/09tvcd'},
376
+ {'id': 370, 'name': 'Seafood', 'freebase_id': '/m/06nwz'},
377
+ {'id': 371, 'name': 'Racket', 'freebase_id': '/m/0dv9c'},
378
+ {'id': 372, 'name': 'Wheel', 'freebase_id': '/m/083wq'},
379
+ {'id': 373, 'name': 'Sea lion', 'freebase_id': '/m/0gd36'},
380
+ {'id': 374, 'name': 'Toy', 'freebase_id': '/m/0138tl'},
381
+ {'id': 375, 'name': 'Tea', 'freebase_id': '/m/07clx'},
382
+ {'id': 376, 'name': 'Tennis ball', 'freebase_id': '/m/05ctyq'},
383
+ {'id': 377, 'name': 'Waste container', 'freebase_id': '/m/0bjyj5'},
384
+ {'id': 378, 'name': 'Mule', 'freebase_id': '/m/0dbzx'},
385
+ {'id': 379, 'name': 'Cricket ball', 'freebase_id': '/m/02ctlc'},
386
+ {'id': 380, 'name': 'Pineapple', 'freebase_id': '/m/0fp6w'},
387
+ {'id': 381, 'name': 'Coconut', 'freebase_id': '/m/0djtd'},
388
+ {'id': 382, 'name': 'Doll', 'freebase_id': '/m/0167gd'},
389
+ {'id': 383, 'name': 'Coffee table', 'freebase_id': '/m/078n6m'},
390
+ {'id': 384, 'name': 'Snowman', 'freebase_id': '/m/0152hh'},
391
+ {'id': 385, 'name': 'Lavender', 'freebase_id': '/m/04gth'},
392
+ {'id': 386, 'name': 'Shrimp', 'freebase_id': '/m/0ll1f78'},
393
+ {'id': 387, 'name': 'Maple', 'freebase_id': '/m/0cffdh'},
394
+ {'id': 388, 'name': 'Cowboy hat', 'freebase_id': '/m/025rp__'},
395
+ {'id': 389, 'name': 'Goggles', 'freebase_id': '/m/02_n6y'},
396
+ {'id': 390, 'name': 'Rugby ball', 'freebase_id': '/m/0wdt60w'},
397
+ {'id': 391, 'name': 'Caterpillar', 'freebase_id': '/m/0cydv'},
398
+ {'id': 392, 'name': 'Poster', 'freebase_id': '/m/01n5jq'},
399
+ {'id': 393, 'name': 'Rocket', 'freebase_id': '/m/09rvcxw'},
400
+ {'id': 394, 'name': 'Organ', 'freebase_id': '/m/013y1f'},
401
+ {'id': 395, 'name': 'Saxophone', 'freebase_id': '/m/06ncr'},
402
+ {'id': 396, 'name': 'Traffic light', 'freebase_id': '/m/015qff'},
403
+ {'id': 397, 'name': 'Cocktail', 'freebase_id': '/m/024g6'},
404
+ {'id': 398, 'name': 'Plastic bag', 'freebase_id': '/m/05gqfk'},
405
+ {'id': 399, 'name': 'Squash', 'freebase_id': '/m/0dv77'},
406
+ {'id': 400, 'name': 'Mushroom', 'freebase_id': '/m/052sf'},
407
+ {'id': 401, 'name': 'Hamburger', 'freebase_id': '/m/0cdn1'},
408
+ {'id': 402, 'name': 'Light switch', 'freebase_id': '/m/03jbxj'},
409
+ {'id': 403, 'name': 'Parachute', 'freebase_id': '/m/0cyfs'},
410
+ {'id': 404, 'name': 'Teddy bear', 'freebase_id': '/m/0kmg4'},
411
+ {'id': 405, 'name': 'Winter melon', 'freebase_id': '/m/02cvgx'},
412
+ {'id': 406, 'name': 'Deer', 'freebase_id': '/m/09kx5'},
413
+ {'id': 407, 'name': 'Musical keyboard', 'freebase_id': '/m/057cc'},
414
+ {'id': 408, 'name': 'Plumbing fixture', 'freebase_id': '/m/02pkr5'},
415
+ {'id': 409, 'name': 'Scoreboard', 'freebase_id': '/m/057p5t'},
416
+ {'id': 410, 'name': 'Baseball bat', 'freebase_id': '/m/03g8mr'},
417
+ {'id': 411, 'name': 'Envelope', 'freebase_id': '/m/0frqm'},
418
+ {'id': 412, 'name': 'Adhesive tape', 'freebase_id': '/m/03m3vtv'},
419
+ {'id': 413, 'name': 'Briefcase', 'freebase_id': '/m/0584n8'},
420
+ {'id': 414, 'name': 'Paddle', 'freebase_id': '/m/014y4n'},
421
+ {'id': 415, 'name': 'Bow and arrow', 'freebase_id': '/m/01g3x7'},
422
+ {'id': 416, 'name': 'Telephone', 'freebase_id': '/m/07cx4'},
423
+ {'id': 417, 'name': 'Sheep', 'freebase_id': '/m/07bgp'},
424
+ {'id': 418, 'name': 'Jacket', 'freebase_id': '/m/032b3c'},
425
+ {'id': 419, 'name': 'Boy', 'freebase_id': '/m/01bl7v'},
426
+ {'id': 420, 'name': 'Pizza', 'freebase_id': '/m/0663v'},
427
+ {'id': 421, 'name': 'Otter', 'freebase_id': '/m/0cn6p'},
428
+ {'id': 422, 'name': 'Office supplies', 'freebase_id': '/m/02rdsp'},
429
+ {'id': 423, 'name': 'Couch', 'freebase_id': '/m/02crq1'},
430
+ {'id': 424, 'name': 'Cello', 'freebase_id': '/m/01xqw'},
431
+ {'id': 425, 'name': 'Bull', 'freebase_id': '/m/0cnyhnx'},
432
+ {'id': 426, 'name': 'Camel', 'freebase_id': '/m/01x_v'},
433
+ {'id': 427, 'name': 'Ball', 'freebase_id': '/m/018xm'},
434
+ {'id': 428, 'name': 'Duck', 'freebase_id': '/m/09ddx'},
435
+ {'id': 429, 'name': 'Whale', 'freebase_id': '/m/084zz'},
436
+ {'id': 430, 'name': 'Shirt', 'freebase_id': '/m/01n4qj'},
437
+ {'id': 431, 'name': 'Tank', 'freebase_id': '/m/07cmd'},
438
+ {'id': 432, 'name': 'Motorcycle', 'freebase_id': '/m/04_sv'},
439
+ {'id': 433, 'name': 'Accordion', 'freebase_id': '/m/0mkg'},
440
+ {'id': 434, 'name': 'Owl', 'freebase_id': '/m/09d5_'},
441
+ {'id': 435, 'name': 'Porcupine', 'freebase_id': '/m/0c568'},
442
+ {'id': 436, 'name': 'Sun hat', 'freebase_id': '/m/02wbtzl'},
443
+ {'id': 437, 'name': 'Nail', 'freebase_id': '/m/05bm6'},
444
+ {'id': 438, 'name': 'Scissors', 'freebase_id': '/m/01lsmm'},
445
+ {'id': 439, 'name': 'Swan', 'freebase_id': '/m/0dftk'},
446
+ {'id': 440, 'name': 'Lamp', 'freebase_id': '/m/0dtln'},
447
+ {'id': 441, 'name': 'Crown', 'freebase_id': '/m/0nl46'},
448
+ {'id': 442, 'name': 'Piano', 'freebase_id': '/m/05r5c'},
449
+ {'id': 443, 'name': 'Sculpture', 'freebase_id': '/m/06msq'},
450
+ {'id': 444, 'name': 'Cheetah', 'freebase_id': '/m/0cd4d'},
451
+ {'id': 445, 'name': 'Oboe', 'freebase_id': '/m/05kms'},
452
+ {'id': 446, 'name': 'Tin can', 'freebase_id': '/m/02jnhm'},
453
+ {'id': 447, 'name': 'Mango', 'freebase_id': '/m/0fldg'},
454
+ {'id': 448, 'name': 'Tripod', 'freebase_id': '/m/073bxn'},
455
+ {'id': 449, 'name': 'Oven', 'freebase_id': '/m/029bxz'},
456
+ {'id': 450, 'name': 'Mouse', 'freebase_id': '/m/020lf'},
457
+ {'id': 451, 'name': 'Barge', 'freebase_id': '/m/01btn'},
458
+ {'id': 452, 'name': 'Coffee', 'freebase_id': '/m/02vqfm'},
459
+ {'id': 453, 'name': 'Snowboard', 'freebase_id': '/m/06__v'},
460
+ {'id': 454, 'name': 'Common fig', 'freebase_id': '/m/043nyj'},
461
+ {'id': 455, 'name': 'Salad', 'freebase_id': '/m/0grw1'},
462
+ {'id': 456, 'name': 'Marine invertebrates', 'freebase_id': '/m/03hl4l9'},
463
+ {'id': 457, 'name': 'Umbrella', 'freebase_id': '/m/0hnnb'},
464
+ {'id': 458, 'name': 'Kangaroo', 'freebase_id': '/m/04c0y'},
465
+ {'id': 459, 'name': 'Human arm', 'freebase_id': '/m/0dzf4'},
466
+ {'id': 460, 'name': 'Measuring cup', 'freebase_id': '/m/07v9_z'},
467
+ {'id': 461, 'name': 'Snail', 'freebase_id': '/m/0f9_l'},
468
+ {'id': 462, 'name': 'Loveseat', 'freebase_id': '/m/0703r8'},
469
+ {'id': 463, 'name': 'Suit', 'freebase_id': '/m/01xyhv'},
470
+ {'id': 464, 'name': 'Teapot', 'freebase_id': '/m/01fh4r'},
471
+ {'id': 465, 'name': 'Bottle', 'freebase_id': '/m/04dr76w'},
472
+ {'id': 466, 'name': 'Alpaca', 'freebase_id': '/m/0pcr'},
473
+ {'id': 467, 'name': 'Kettle', 'freebase_id': '/m/03s_tn'},
474
+ {'id': 468, 'name': 'Trousers', 'freebase_id': '/m/07mhn'},
475
+ {'id': 469, 'name': 'Popcorn', 'freebase_id': '/m/01hrv5'},
476
+ {'id': 470, 'name': 'Centipede', 'freebase_id': '/m/019h78'},
477
+ {'id': 471, 'name': 'Spider', 'freebase_id': '/m/09kmb'},
478
+ {'id': 472, 'name': 'Sparrow', 'freebase_id': '/m/0h23m'},
479
+ {'id': 473, 'name': 'Plate', 'freebase_id': '/m/050gv4'},
480
+ {'id': 474, 'name': 'Bagel', 'freebase_id': '/m/01fb_0'},
481
+ {'id': 475, 'name': 'Personal care', 'freebase_id': '/m/02w3_ws'},
482
+ {'id': 476, 'name': 'Apple', 'freebase_id': '/m/014j1m'},
483
+ {'id': 477, 'name': 'Brassiere', 'freebase_id': '/m/01gmv2'},
484
+ {'id': 478, 'name': 'Bathroom cabinet', 'freebase_id': '/m/04y4h8h'},
485
+ {'id': 479, 'name': 'studio couch', 'freebase_id': '/m/026qbn5'},
486
+ {'id': 480, 'name': 'Computer keyboard', 'freebase_id': '/m/01m2v'},
487
+ {'id': 481, 'name': 'Table tennis racket', 'freebase_id': '/m/05_5p_0'},
488
+ {'id': 482, 'name': 'Sushi', 'freebase_id': '/m/07030'},
489
+ {'id': 483, 'name': 'Cabinetry', 'freebase_id': '/m/01s105'},
490
+ {'id': 484, 'name': 'Street light', 'freebase_id': '/m/033rq4'},
491
+ {'id': 485, 'name': 'Towel', 'freebase_id': '/m/0162_1'},
492
+ {'id': 486, 'name': 'Nightstand', 'freebase_id': '/m/02z51p'},
493
+ {'id': 487, 'name': 'Rabbit', 'freebase_id': '/m/06mf6'},
494
+ {'id': 488, 'name': 'Dolphin', 'freebase_id': '/m/02hj4'},
495
+ {'id': 489, 'name': 'Dog', 'freebase_id': '/m/0bt9lr'},
496
+ {'id': 490, 'name': 'Jug', 'freebase_id': '/m/08hvt4'},
497
+ {'id': 491, 'name': 'Wok', 'freebase_id': '/m/084rd'},
498
+ {'id': 492, 'name': 'Fire hydrant', 'freebase_id': '/m/01pns0'},
499
+ {'id': 493, 'name': 'Human eye', 'freebase_id': '/m/014sv8'},
500
+ {'id': 494, 'name': 'Skyscraper', 'freebase_id': '/m/079cl'},
501
+ {'id': 495, 'name': 'Backpack', 'freebase_id': '/m/01940j'},
502
+ {'id': 496, 'name': 'Potato', 'freebase_id': '/m/05vtc'},
503
+ {'id': 497, 'name': 'Paper towel', 'freebase_id': '/m/02w3r3'},
504
+ {'id': 498, 'name': 'Lifejacket', 'freebase_id': '/m/054xkw'},
505
+ {'id': 499, 'name': 'Bicycle wheel', 'freebase_id': '/m/01bqk0'},
506
+ {'id': 500, 'name': 'Toilet', 'freebase_id': '/m/09g1w'},
507
+ ]
508
+
509
+
510
+ def _get_builtin_metadata(cats):
511
+ id_to_name = {x['id']: x['name'] for x in cats}
512
+ thing_dataset_id_to_contiguous_id = {i + 1: i for i in range(len(cats))}
513
+ thing_classes = [x['name'] for x in sorted(cats, key=lambda x: x['id'])]
514
+ return {
515
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
516
+ "thing_classes": thing_classes}
517
+
518
+ _PREDEFINED_SPLITS_OID = {
519
+ # cat threshold: 500, 1500: r 170, c 151, f 179
520
+ "oid_train": ("oid/images/", "oid/annotations/oid_challenge_2019_train_bbox.json"),
521
+ # "expanded" duplicates annotations to their father classes based on the official
522
+ # hierarchy. This is used in the official evaulation protocol.
523
+ # https://storage.googleapis.com/openimages/web/evaluation.html
524
+ "oid_val_expanded": ("oid/images/validation/", "oid/annotations/oid_challenge_2019_val_expanded.json"),
525
+ "oid_val_expanded_rare": ("oid/images/validation/", "oid/annotations/oid_challenge_2019_val_expanded_rare.json"),
526
+ }
527
+
528
+
529
+ for key, (image_root, json_file) in _PREDEFINED_SPLITS_OID.items():
530
+ register_oid_instances(
531
+ key,
532
+ _get_builtin_metadata(categories),
533
+ os.path.join("datasets", json_file) if "://" not in json_file else json_file,
534
+ os.path.join("datasets", image_root),
535
+ )
detic/data/datasets/register_oid.py ADDED
@@ -0,0 +1,122 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # Modified by Xingyi Zhou from https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/coco.py
3
+ import copy
4
+ import io
5
+ import logging
6
+ import contextlib
7
+ import os
8
+ import datetime
9
+ import json
10
+ import numpy as np
11
+
12
+ from PIL import Image
13
+
14
+ from fvcore.common.timer import Timer
15
+ from fvcore.common.file_io import PathManager, file_lock
16
+ from detectron2.structures import BoxMode, PolygonMasks, Boxes
17
+ from detectron2.data import DatasetCatalog, MetadataCatalog
18
+
19
+ logger = logging.getLogger(__name__)
20
+
21
+ """
22
+ This file contains functions to register a COCO-format dataset to the DatasetCatalog.
23
+ """
24
+
25
+ __all__ = ["register_coco_instances", "register_coco_panoptic_separated"]
26
+
27
+
28
+
29
+ def register_oid_instances(name, metadata, json_file, image_root):
30
+ """
31
+ """
32
+ # 1. register a function which returns dicts
33
+ DatasetCatalog.register(name, lambda: load_coco_json_mem_efficient(
34
+ json_file, image_root, name))
35
+
36
+ # 2. Optionally, add metadata about this dataset,
37
+ # since they might be useful in evaluation, visualization or logging
38
+ MetadataCatalog.get(name).set(
39
+ json_file=json_file, image_root=image_root, evaluator_type="oid", **metadata
40
+ )
41
+
42
+
43
+ def load_coco_json_mem_efficient(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
44
+ """
45
+ Actually not mem efficient
46
+ """
47
+ from pycocotools.coco import COCO
48
+
49
+ timer = Timer()
50
+ json_file = PathManager.get_local_path(json_file)
51
+ with contextlib.redirect_stdout(io.StringIO()):
52
+ coco_api = COCO(json_file)
53
+ if timer.seconds() > 1:
54
+ logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
55
+
56
+ id_map = None
57
+ if dataset_name is not None:
58
+ meta = MetadataCatalog.get(dataset_name)
59
+ cat_ids = sorted(coco_api.getCatIds())
60
+ cats = coco_api.loadCats(cat_ids)
61
+ # The categories in a custom json file may not be sorted.
62
+ thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
63
+ meta.thing_classes = thing_classes
64
+
65
+ if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
66
+ if "coco" not in dataset_name:
67
+ logger.warning(
68
+ """
69
+ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
70
+ """
71
+ )
72
+ id_map = {v: i for i, v in enumerate(cat_ids)}
73
+ meta.thing_dataset_id_to_contiguous_id = id_map
74
+
75
+ # sort indices for reproducible results
76
+ img_ids = sorted(coco_api.imgs.keys())
77
+ imgs = coco_api.loadImgs(img_ids)
78
+ logger.info("Loaded {} images in COCO format from {}".format(len(imgs), json_file))
79
+
80
+ dataset_dicts = []
81
+
82
+ ann_keys = ["iscrowd", "bbox", "category_id"] + (extra_annotation_keys or [])
83
+
84
+ for img_dict in imgs:
85
+ record = {}
86
+ record["file_name"] = os.path.join(image_root, img_dict["file_name"])
87
+ record["height"] = img_dict["height"]
88
+ record["width"] = img_dict["width"]
89
+ image_id = record["image_id"] = img_dict["id"]
90
+ anno_dict_list = coco_api.imgToAnns[image_id]
91
+ if 'neg_category_ids' in img_dict:
92
+ record['neg_category_ids'] = \
93
+ [id_map[x] for x in img_dict['neg_category_ids']]
94
+
95
+ objs = []
96
+ for anno in anno_dict_list:
97
+ assert anno["image_id"] == image_id
98
+
99
+ assert anno.get("ignore", 0) == 0
100
+
101
+ obj = {key: anno[key] for key in ann_keys if key in anno}
102
+
103
+ segm = anno.get("segmentation", None)
104
+ if segm: # either list[list[float]] or dict(RLE)
105
+ if not isinstance(segm, dict):
106
+ # filter out invalid polygons (< 3 points)
107
+ segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
108
+ if len(segm) == 0:
109
+ num_instances_without_valid_segmentation += 1
110
+ continue # ignore this instance
111
+ obj["segmentation"] = segm
112
+
113
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
114
+
115
+ if id_map:
116
+ obj["category_id"] = id_map[obj["category_id"]]
117
+ objs.append(obj)
118
+ record["annotations"] = objs
119
+ dataset_dicts.append(record)
120
+
121
+ del coco_api
122
+ return dataset_dicts
detic/data/tar_dataset.py ADDED
@@ -0,0 +1,138 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ import os
4
+ import gzip
5
+ import numpy as np
6
+ import io
7
+ from PIL import Image
8
+ from torch.utils.data import Dataset
9
+
10
+ try:
11
+ from PIL import UnidentifiedImageError
12
+
13
+ unidentified_error_available = True
14
+ except ImportError:
15
+ # UnidentifiedImageError isn't available in older versions of PIL
16
+ unidentified_error_available = False
17
+
18
+ class DiskTarDataset(Dataset):
19
+ def __init__(self,
20
+ tarfile_path='dataset/imagenet/ImageNet-21k/metadata/tar_files.npy',
21
+ tar_index_dir='dataset/imagenet/ImageNet-21k/metadata/tarindex_npy',
22
+ preload=False,
23
+ num_synsets="all"):
24
+ """
25
+ - preload (bool): Recommend to set preload to False when using
26
+ - num_synsets (integer or string "all"): set to small number for debugging
27
+ will load subset of dataset
28
+ """
29
+ tar_files = np.load(tarfile_path)
30
+
31
+ chunk_datasets = []
32
+ dataset_lens = []
33
+ if isinstance(num_synsets, int):
34
+ assert num_synsets < len(tar_files)
35
+ tar_files = tar_files[:num_synsets]
36
+ for tar_file in tar_files:
37
+ dataset = _TarDataset(tar_file, tar_index_dir, preload=preload)
38
+ chunk_datasets.append(dataset)
39
+ dataset_lens.append(len(dataset))
40
+
41
+ self.chunk_datasets = chunk_datasets
42
+ self.dataset_lens = np.array(dataset_lens).astype(np.int32)
43
+ self.dataset_cumsums = np.cumsum(self.dataset_lens)
44
+ self.num_samples = sum(self.dataset_lens)
45
+ labels = np.zeros(self.dataset_lens.sum(), dtype=np.int64)
46
+ sI = 0
47
+ for k in range(len(self.dataset_lens)):
48
+ assert (sI+self.dataset_lens[k]) <= len(labels), f"{k} {sI+self.dataset_lens[k]} vs. {len(labels)}"
49
+ labels[sI:(sI+self.dataset_lens[k])] = k
50
+ sI += self.dataset_lens[k]
51
+ self.labels = labels
52
+
53
+ def __len__(self):
54
+ return self.num_samples
55
+
56
+ def __getitem__(self, index):
57
+ assert index >= 0 and index < len(self)
58
+ # find the dataset file we need to go to
59
+ d_index = np.searchsorted(self.dataset_cumsums, index)
60
+
61
+ # edge case, if index is at edge of chunks, move right
62
+ if index in self.dataset_cumsums:
63
+ d_index += 1
64
+
65
+ assert d_index == self.labels[index], f"{d_index} vs. {self.labels[index]} mismatch for {index}"
66
+
67
+ # change index to local dataset index
68
+ if d_index == 0:
69
+ local_index = index
70
+ else:
71
+ local_index = index - self.dataset_cumsums[d_index - 1]
72
+ data_bytes = self.chunk_datasets[d_index][local_index]
73
+ exception_to_catch = UnidentifiedImageError if unidentified_error_available else Exception
74
+ try:
75
+ image = Image.open(data_bytes).convert("RGB")
76
+ except exception_to_catch:
77
+ image = Image.fromarray(np.ones((224,224,3), dtype=np.uint8)*128)
78
+ d_index = -1
79
+
80
+ # label is the dataset (synset) we indexed into
81
+ return image, d_index, index
82
+
83
+ def __repr__(self):
84
+ st = f"DiskTarDataset(subdatasets={len(self.dataset_lens)},samples={self.num_samples})"
85
+ return st
86
+
87
+ class _TarDataset(object):
88
+
89
+ def __init__(self, filename, npy_index_dir, preload=False):
90
+ # translated from
91
+ # fbcode/experimental/deeplearning/matthijs/comp_descs/tardataset.lua
92
+ self.filename = filename
93
+ self.names = []
94
+ self.offsets = []
95
+ self.npy_index_dir = npy_index_dir
96
+ names, offsets = self.load_index()
97
+
98
+ self.num_samples = len(names)
99
+ if preload:
100
+ self.data = np.memmap(filename, mode='r', dtype='uint8')
101
+ self.offsets = offsets
102
+ else:
103
+ self.data = None
104
+
105
+
106
+ def __len__(self):
107
+ return self.num_samples
108
+
109
+ def load_index(self):
110
+ basename = os.path.basename(self.filename)
111
+ basename = os.path.splitext(basename)[0]
112
+ names = np.load(os.path.join(self.npy_index_dir, f"{basename}_names.npy"))
113
+ offsets = np.load(os.path.join(self.npy_index_dir, f"{basename}_offsets.npy"))
114
+ return names, offsets
115
+
116
+ def __getitem__(self, idx):
117
+ if self.data is None:
118
+ self.data = np.memmap(self.filename, mode='r', dtype='uint8')
119
+ _, self.offsets = self.load_index()
120
+
121
+ ofs = self.offsets[idx] * 512
122
+ fsize = 512 * (self.offsets[idx + 1] - self.offsets[idx])
123
+ data = self.data[ofs:ofs + fsize]
124
+
125
+ if data[:13].tostring() == '././@LongLink':
126
+ data = data[3 * 512:]
127
+ else:
128
+ data = data[512:]
129
+
130
+ # just to make it more fun a few JPEGs are GZIP compressed...
131
+ # catch this case
132
+ if tuple(data[:2]) == (0x1f, 0x8b):
133
+ s = io.BytesIO(data.tostring())
134
+ g = gzip.GzipFile(None, 'r', 0, s)
135
+ sdata = g.read()
136
+ else:
137
+ sdata = data.tostring()
138
+ return io.BytesIO(sdata)
detic/data/transforms/custom_augmentation_impl.py ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
+ # Part of the code is from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/data/transforms.py
4
+ # Modified by Xingyi Zhou
5
+ # The original code is under Apache-2.0 License
6
+ import numpy as np
7
+ import sys
8
+ from fvcore.transforms.transform import (
9
+ BlendTransform,
10
+ CropTransform,
11
+ HFlipTransform,
12
+ NoOpTransform,
13
+ Transform,
14
+ VFlipTransform,
15
+ )
16
+ from PIL import Image
17
+
18
+ from detectron2.data.transforms.augmentation import Augmentation
19
+ from .custom_transform import EfficientDetResizeCropTransform
20
+
21
+ __all__ = [
22
+ "EfficientDetResizeCrop",
23
+ ]
24
+
25
+ class EfficientDetResizeCrop(Augmentation):
26
+ """
27
+ Scale the shorter edge to the given size, with a limit of `max_size` on the longer edge.
28
+ If `max_size` is reached, then downscale so that the longer edge does not exceed max_size.
29
+ """
30
+
31
+ def __init__(
32
+ self, size, scale, interp=Image.BILINEAR
33
+ ):
34
+ """
35
+ """
36
+ super().__init__()
37
+ self.target_size = (size, size)
38
+ self.scale = scale
39
+ self.interp = interp
40
+
41
+ def get_transform(self, img):
42
+ # Select a random scale factor.
43
+ scale_factor = np.random.uniform(*self.scale)
44
+ scaled_target_height = scale_factor * self.target_size[0]
45
+ scaled_target_width = scale_factor * self.target_size[1]
46
+ # Recompute the accurate scale_factor using rounded scaled image size.
47
+ width, height = img.shape[1], img.shape[0]
48
+ img_scale_y = scaled_target_height / height
49
+ img_scale_x = scaled_target_width / width
50
+ img_scale = min(img_scale_y, img_scale_x)
51
+
52
+ # Select non-zero random offset (x, y) if scaled image is larger than target size
53
+ scaled_h = int(height * img_scale)
54
+ scaled_w = int(width * img_scale)
55
+ offset_y = scaled_h - self.target_size[0]
56
+ offset_x = scaled_w - self.target_size[1]
57
+ offset_y = int(max(0.0, float(offset_y)) * np.random.uniform(0, 1))
58
+ offset_x = int(max(0.0, float(offset_x)) * np.random.uniform(0, 1))
59
+ return EfficientDetResizeCropTransform(
60
+ scaled_h, scaled_w, offset_y, offset_x, img_scale, self.target_size, self.interp)
detic/data/transforms/custom_transform.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
3
+ # Part of the code is from https://github.com/rwightman/efficientdet-pytorch/blob/master/effdet/data/transforms.py
4
+ # Modified by Xingyi Zhou
5
+ # The original code is under Apache-2.0 License
6
+ import numpy as np
7
+ import torch
8
+ import torch.nn.functional as F
9
+ from fvcore.transforms.transform import (
10
+ CropTransform,
11
+ HFlipTransform,
12
+ NoOpTransform,
13
+ Transform,
14
+ TransformList,
15
+ )
16
+ from PIL import Image
17
+
18
+ try:
19
+ import cv2 # noqa
20
+ except ImportError:
21
+ # OpenCV is an optional dependency at the moment
22
+ pass
23
+
24
+ __all__ = [
25
+ "EfficientDetResizeCropTransform",
26
+ ]
27
+
28
+ class EfficientDetResizeCropTransform(Transform):
29
+ """
30
+ """
31
+
32
+ def __init__(self, scaled_h, scaled_w, offset_y, offset_x, img_scale, \
33
+ target_size, interp=None):
34
+ """
35
+ Args:
36
+ h, w (int): original image size
37
+ new_h, new_w (int): new image size
38
+ interp: PIL interpolation methods, defaults to bilinear.
39
+ """
40
+ # TODO decide on PIL vs opencv
41
+ super().__init__()
42
+ if interp is None:
43
+ interp = Image.BILINEAR
44
+ self._set_attributes(locals())
45
+
46
+ def apply_image(self, img, interp=None):
47
+ assert len(img.shape) <= 4
48
+
49
+ if img.dtype == np.uint8:
50
+ pil_image = Image.fromarray(img)
51
+ interp_method = interp if interp is not None else self.interp
52
+ pil_image = pil_image.resize((self.scaled_w, self.scaled_h), interp_method)
53
+ ret = np.asarray(pil_image)
54
+ right = min(self.scaled_w, self.offset_x + self.target_size[1])
55
+ lower = min(self.scaled_h, self.offset_y + self.target_size[0])
56
+ if len(ret.shape) <= 3:
57
+ ret = ret[self.offset_y: lower, self.offset_x: right]
58
+ else:
59
+ ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
60
+ else:
61
+ # PIL only supports uint8
62
+ img = torch.from_numpy(img)
63
+ shape = list(img.shape)
64
+ shape_4d = shape[:2] + [1] * (4 - len(shape)) + shape[2:]
65
+ img = img.view(shape_4d).permute(2, 3, 0, 1) # hw(c) -> nchw
66
+ _PIL_RESIZE_TO_INTERPOLATE_MODE = {Image.BILINEAR: "bilinear", Image.BICUBIC: "bicubic"}
67
+ mode = _PIL_RESIZE_TO_INTERPOLATE_MODE[self.interp]
68
+ img = F.interpolate(img, (self.scaled_h, self.scaled_w), mode=mode, align_corners=False)
69
+ shape[:2] = (self.scaled_h, self.scaled_w)
70
+ ret = img.permute(2, 3, 0, 1).view(shape).numpy() # nchw -> hw(c)
71
+ right = min(self.scaled_w, self.offset_x + self.target_size[1])
72
+ lower = min(self.scaled_h, self.offset_y + self.target_size[0])
73
+ if len(ret.shape) <= 3:
74
+ ret = ret[self.offset_y: lower, self.offset_x: right]
75
+ else:
76
+ ret = ret[..., self.offset_y: lower, self.offset_x: right, :]
77
+ return ret
78
+
79
+
80
+ def apply_coords(self, coords):
81
+ coords[:, 0] = coords[:, 0] * self.img_scale
82
+ coords[:, 1] = coords[:, 1] * self.img_scale
83
+ coords[:, 0] -= self.offset_x
84
+ coords[:, 1] -= self.offset_y
85
+ return coords
86
+
87
+
88
+ def apply_segmentation(self, segmentation):
89
+ segmentation = self.apply_image(segmentation, interp=Image.NEAREST)
90
+ return segmentation
91
+
92
+
93
+ def inverse(self):
94
+ raise NotImplementedError
95
+
96
+
97
+ def inverse_apply_coords(self, coords):
98
+ coords[:, 0] += self.offset_x
99
+ coords[:, 1] += self.offset_y
100
+ coords[:, 0] = coords[:, 0] / self.img_scale
101
+ coords[:, 1] = coords[:, 1] / self.img_scale
102
+ return coords
103
+
104
+
105
+ def inverse_apply_box(self, box: np.ndarray) -> np.ndarray:
106
+ """
107
+ """
108
+ idxs = np.array([(0, 1), (2, 1), (0, 3), (2, 3)]).flatten()
109
+ coords = np.asarray(box).reshape(-1, 4)[:, idxs].reshape(-1, 2)
110
+ coords = self.inverse_apply_coords(coords).reshape((-1, 4, 2))
111
+ minxy = coords.min(axis=1)
112
+ maxxy = coords.max(axis=1)
113
+ trans_boxes = np.concatenate((minxy, maxxy), axis=1)
114
+ return trans_boxes