toto10 commited on
Commit
4a29d15
1 Parent(s): 6b84065

38afb5f5719662db57c3f6c655cff696b6dad0307227f85cdcdcd033eed31c4e

Browse files
Files changed (50) hide show
  1. extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/efficientnet_repo/setup.py +47 -0
  2. extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/efficientnet_repo/utils.py +52 -0
  3. extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/efficientnet_repo/validate.py +166 -0
  4. extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/encoder.py +34 -0
  5. extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/submodules.py +140 -0
  6. extensions/microsoftexcel-controlnet/annotator/oneformer/LICENSE +21 -0
  7. extensions/microsoftexcel-controlnet/annotator/oneformer/__init__.py +45 -0
  8. extensions/microsoftexcel-controlnet/annotator/oneformer/api.py +39 -0
  9. extensions/microsoftexcel-controlnet/annotator/oneformer/configs/ade20k/Base-ADE20K-UnifiedSegmentation.yaml +68 -0
  10. extensions/microsoftexcel-controlnet/annotator/oneformer/configs/ade20k/oneformer_R50_bs16_160k.yaml +58 -0
  11. extensions/microsoftexcel-controlnet/annotator/oneformer/configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml +40 -0
  12. extensions/microsoftexcel-controlnet/annotator/oneformer/configs/coco/Base-COCO-UnifiedSegmentation.yaml +54 -0
  13. extensions/microsoftexcel-controlnet/annotator/oneformer/configs/coco/oneformer_R50_bs16_50ep.yaml +59 -0
  14. extensions/microsoftexcel-controlnet/annotator/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml +25 -0
  15. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/__init__.py +10 -0
  16. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/__init__.py +10 -0
  17. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/c2_model_loading.py +412 -0
  18. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/catalog.py +115 -0
  19. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/detection_checkpoint.py +145 -0
  20. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/__init__.py +24 -0
  21. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/compat.py +229 -0
  22. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/config.py +265 -0
  23. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/defaults.py +650 -0
  24. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/instantiate.py +88 -0
  25. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/lazy.py +435 -0
  26. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/__init__.py +19 -0
  27. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/benchmark.py +225 -0
  28. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/build.py +556 -0
  29. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/catalog.py +236 -0
  30. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/common.py +301 -0
  31. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/dataset_mapper.py +191 -0
  32. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/README.md +9 -0
  33. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/__init__.py +9 -0
  34. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/builtin.py +259 -0
  35. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/builtin_meta.py +350 -0
  36. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/cityscapes.py +329 -0
  37. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/cityscapes_panoptic.py +187 -0
  38. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/coco.py +539 -0
  39. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/coco_panoptic.py +228 -0
  40. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis.py +241 -0
  41. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis_v0_5_categories.py +0 -0
  42. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis_v1_categories.py +0 -0
  43. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis_v1_category_image_count.py +20 -0
  44. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/pascal_voc.py +82 -0
  45. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/register_coco.py +3 -0
  46. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/detection_utils.py +659 -0
  47. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/samplers/__init__.py +17 -0
  48. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/samplers/distributed_sampler.py +278 -0
  49. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/samplers/grouped_batch_sampler.py +47 -0
  50. extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/transforms/__init__.py +14 -0
extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/efficientnet_repo/setup.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """ Setup
2
+ """
3
+ from setuptools import setup, find_packages
4
+ from codecs import open
5
+ from os import path
6
+
7
+ here = path.abspath(path.dirname(__file__))
8
+
9
+ # Get the long description from the README file
10
+ with open(path.join(here, 'README.md'), encoding='utf-8') as f:
11
+ long_description = f.read()
12
+
13
+ exec(open('geffnet/version.py').read())
14
+ setup(
15
+ name='geffnet',
16
+ version=__version__,
17
+ description='(Generic) EfficientNets for PyTorch',
18
+ long_description=long_description,
19
+ long_description_content_type='text/markdown',
20
+ url='https://github.com/rwightman/gen-efficientnet-pytorch',
21
+ author='Ross Wightman',
22
+ author_email='hello@rwightman.com',
23
+ classifiers=[
24
+ # How mature is this project? Common values are
25
+ # 3 - Alpha
26
+ # 4 - Beta
27
+ # 5 - Production/Stable
28
+ 'Development Status :: 3 - Alpha',
29
+ 'Intended Audience :: Education',
30
+ 'Intended Audience :: Science/Research',
31
+ 'License :: OSI Approved :: Apache Software License',
32
+ 'Programming Language :: Python :: 3.6',
33
+ 'Programming Language :: Python :: 3.7',
34
+ 'Programming Language :: Python :: 3.8',
35
+ 'Topic :: Scientific/Engineering',
36
+ 'Topic :: Scientific/Engineering :: Artificial Intelligence',
37
+ 'Topic :: Software Development',
38
+ 'Topic :: Software Development :: Libraries',
39
+ 'Topic :: Software Development :: Libraries :: Python Modules',
40
+ ],
41
+
42
+ # Note that this is a string of words separated by whitespace, not a list.
43
+ keywords='pytorch pretrained models efficientnet mixnet mobilenetv3 mnasnet',
44
+ packages=find_packages(exclude=['data']),
45
+ install_requires=['torch >= 1.4', 'torchvision'],
46
+ python_requires='>=3.6',
47
+ )
extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/efficientnet_repo/utils.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+
3
+
4
+ class AverageMeter:
5
+ """Computes and stores the average and current value"""
6
+ def __init__(self):
7
+ self.reset()
8
+
9
+ def reset(self):
10
+ self.val = 0
11
+ self.avg = 0
12
+ self.sum = 0
13
+ self.count = 0
14
+
15
+ def update(self, val, n=1):
16
+ self.val = val
17
+ self.sum += val * n
18
+ self.count += n
19
+ self.avg = self.sum / self.count
20
+
21
+
22
+ def accuracy(output, target, topk=(1,)):
23
+ """Computes the precision@k for the specified values of k"""
24
+ maxk = max(topk)
25
+ batch_size = target.size(0)
26
+
27
+ _, pred = output.topk(maxk, 1, True, True)
28
+ pred = pred.t()
29
+ correct = pred.eq(target.view(1, -1).expand_as(pred))
30
+
31
+ res = []
32
+ for k in topk:
33
+ correct_k = correct[:k].reshape(-1).float().sum(0)
34
+ res.append(correct_k.mul_(100.0 / batch_size))
35
+ return res
36
+
37
+
38
+ def get_outdir(path, *paths, inc=False):
39
+ outdir = os.path.join(path, *paths)
40
+ if not os.path.exists(outdir):
41
+ os.makedirs(outdir)
42
+ elif inc:
43
+ count = 1
44
+ outdir_inc = outdir + '-' + str(count)
45
+ while os.path.exists(outdir_inc):
46
+ count = count + 1
47
+ outdir_inc = outdir + '-' + str(count)
48
+ assert count < 100
49
+ outdir = outdir_inc
50
+ os.makedirs(outdir)
51
+ return outdir
52
+
extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/efficientnet_repo/validate.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from __future__ import absolute_import
2
+ from __future__ import division
3
+ from __future__ import print_function
4
+
5
+ import argparse
6
+ import time
7
+ import torch
8
+ import torch.nn as nn
9
+ import torch.nn.parallel
10
+ from contextlib import suppress
11
+
12
+ import geffnet
13
+ from data import Dataset, create_loader, resolve_data_config
14
+ from utils import accuracy, AverageMeter
15
+
16
+ has_native_amp = False
17
+ try:
18
+ if getattr(torch.cuda.amp, 'autocast') is not None:
19
+ has_native_amp = True
20
+ except AttributeError:
21
+ pass
22
+
23
+ torch.backends.cudnn.benchmark = True
24
+
25
+ parser = argparse.ArgumentParser(description='PyTorch ImageNet Validation')
26
+ parser.add_argument('data', metavar='DIR',
27
+ help='path to dataset')
28
+ parser.add_argument('--model', '-m', metavar='MODEL', default='spnasnet1_00',
29
+ help='model architecture (default: dpn92)')
30
+ parser.add_argument('-j', '--workers', default=4, type=int, metavar='N',
31
+ help='number of data loading workers (default: 2)')
32
+ parser.add_argument('-b', '--batch-size', default=256, type=int,
33
+ metavar='N', help='mini-batch size (default: 256)')
34
+ parser.add_argument('--img-size', default=None, type=int,
35
+ metavar='N', help='Input image dimension, uses model default if empty')
36
+ parser.add_argument('--mean', type=float, nargs='+', default=None, metavar='MEAN',
37
+ help='Override mean pixel value of dataset')
38
+ parser.add_argument('--std', type=float, nargs='+', default=None, metavar='STD',
39
+ help='Override std deviation of of dataset')
40
+ parser.add_argument('--crop-pct', type=float, default=None, metavar='PCT',
41
+ help='Override default crop pct of 0.875')
42
+ parser.add_argument('--interpolation', default='', type=str, metavar='NAME',
43
+ help='Image resize interpolation type (overrides model)')
44
+ parser.add_argument('--num-classes', type=int, default=1000,
45
+ help='Number classes in dataset')
46
+ parser.add_argument('--print-freq', '-p', default=10, type=int,
47
+ metavar='N', help='print frequency (default: 10)')
48
+ parser.add_argument('--checkpoint', default='', type=str, metavar='PATH',
49
+ help='path to latest checkpoint (default: none)')
50
+ parser.add_argument('--pretrained', dest='pretrained', action='store_true',
51
+ help='use pre-trained model')
52
+ parser.add_argument('--torchscript', dest='torchscript', action='store_true',
53
+ help='convert model torchscript for inference')
54
+ parser.add_argument('--num-gpu', type=int, default=1,
55
+ help='Number of GPUS to use')
56
+ parser.add_argument('--tf-preprocessing', dest='tf_preprocessing', action='store_true',
57
+ help='use tensorflow mnasnet preporcessing')
58
+ parser.add_argument('--no-cuda', dest='no_cuda', action='store_true',
59
+ help='')
60
+ parser.add_argument('--channels-last', action='store_true', default=False,
61
+ help='Use channels_last memory layout')
62
+ parser.add_argument('--amp', action='store_true', default=False,
63
+ help='Use native Torch AMP mixed precision.')
64
+
65
+
66
+ def main():
67
+ args = parser.parse_args()
68
+
69
+ if not args.checkpoint and not args.pretrained:
70
+ args.pretrained = True
71
+
72
+ amp_autocast = suppress # do nothing
73
+ if args.amp:
74
+ if not has_native_amp:
75
+ print("Native Torch AMP is not available (requires torch >= 1.6), using FP32.")
76
+ else:
77
+ amp_autocast = torch.cuda.amp.autocast
78
+
79
+ # create model
80
+ model = geffnet.create_model(
81
+ args.model,
82
+ num_classes=args.num_classes,
83
+ in_chans=3,
84
+ pretrained=args.pretrained,
85
+ checkpoint_path=args.checkpoint,
86
+ scriptable=args.torchscript)
87
+
88
+ if args.channels_last:
89
+ model = model.to(memory_format=torch.channels_last)
90
+
91
+ if args.torchscript:
92
+ torch.jit.optimized_execution(True)
93
+ model = torch.jit.script(model)
94
+
95
+ print('Model %s created, param count: %d' %
96
+ (args.model, sum([m.numel() for m in model.parameters()])))
97
+
98
+ data_config = resolve_data_config(model, args)
99
+
100
+ criterion = nn.CrossEntropyLoss()
101
+
102
+ if not args.no_cuda:
103
+ if args.num_gpu > 1:
104
+ model = torch.nn.DataParallel(model, device_ids=list(range(args.num_gpu))).cuda()
105
+ else:
106
+ model = model.cuda()
107
+ criterion = criterion.cuda()
108
+
109
+ loader = create_loader(
110
+ Dataset(args.data, load_bytes=args.tf_preprocessing),
111
+ input_size=data_config['input_size'],
112
+ batch_size=args.batch_size,
113
+ use_prefetcher=not args.no_cuda,
114
+ interpolation=data_config['interpolation'],
115
+ mean=data_config['mean'],
116
+ std=data_config['std'],
117
+ num_workers=args.workers,
118
+ crop_pct=data_config['crop_pct'],
119
+ tensorflow_preprocessing=args.tf_preprocessing)
120
+
121
+ batch_time = AverageMeter()
122
+ losses = AverageMeter()
123
+ top1 = AverageMeter()
124
+ top5 = AverageMeter()
125
+
126
+ model.eval()
127
+ end = time.time()
128
+ with torch.no_grad():
129
+ for i, (input, target) in enumerate(loader):
130
+ if not args.no_cuda:
131
+ target = target.cuda()
132
+ input = input.cuda()
133
+ if args.channels_last:
134
+ input = input.contiguous(memory_format=torch.channels_last)
135
+
136
+ # compute output
137
+ with amp_autocast():
138
+ output = model(input)
139
+ loss = criterion(output, target)
140
+
141
+ # measure accuracy and record loss
142
+ prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
143
+ losses.update(loss.item(), input.size(0))
144
+ top1.update(prec1.item(), input.size(0))
145
+ top5.update(prec5.item(), input.size(0))
146
+
147
+ # measure elapsed time
148
+ batch_time.update(time.time() - end)
149
+ end = time.time()
150
+
151
+ if i % args.print_freq == 0:
152
+ print('Test: [{0}/{1}]\t'
153
+ 'Time {batch_time.val:.3f} ({batch_time.avg:.3f}, {rate_avg:.3f}/s) \t'
154
+ 'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
155
+ 'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
156
+ 'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
157
+ i, len(loader), batch_time=batch_time,
158
+ rate_avg=input.size(0) / batch_time.avg,
159
+ loss=losses, top1=top1, top5=top5))
160
+
161
+ print(' * Prec@1 {top1.avg:.3f} ({top1a:.3f}) Prec@5 {top5.avg:.3f} ({top5a:.3f})'.format(
162
+ top1=top1, top1a=100-top1.avg, top5=top5, top5a=100.-top5.avg))
163
+
164
+
165
+ if __name__ == '__main__':
166
+ main()
extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/encoder.py ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ import torch.nn.functional as F
5
+
6
+
7
+ class Encoder(nn.Module):
8
+ def __init__(self):
9
+ super(Encoder, self).__init__()
10
+
11
+ basemodel_name = 'tf_efficientnet_b5_ap'
12
+ print('Loading base model ()...'.format(basemodel_name), end='')
13
+ repo_path = os.path.join(os.path.dirname(__file__), 'efficientnet_repo')
14
+ basemodel = torch.hub.load(repo_path, basemodel_name, pretrained=False, source='local')
15
+ print('Done.')
16
+
17
+ # Remove last layer
18
+ print('Removing last two layers (global_pool & classifier).')
19
+ basemodel.global_pool = nn.Identity()
20
+ basemodel.classifier = nn.Identity()
21
+
22
+ self.original_model = basemodel
23
+
24
+ def forward(self, x):
25
+ features = [x]
26
+ for k, v in self.original_model._modules.items():
27
+ if (k == 'blocks'):
28
+ for ki, vi in v._modules.items():
29
+ features.append(vi(features[-1]))
30
+ else:
31
+ features.append(v(features[-1]))
32
+ return features
33
+
34
+
extensions/microsoftexcel-controlnet/annotator/normalbae/models/submodules/submodules.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ import torch.nn as nn
3
+ import torch.nn.functional as F
4
+
5
+
6
+ ########################################################################################################################
7
+
8
+
9
+ # Upsample + BatchNorm
10
+ class UpSampleBN(nn.Module):
11
+ def __init__(self, skip_input, output_features):
12
+ super(UpSampleBN, self).__init__()
13
+
14
+ self._net = nn.Sequential(nn.Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),
15
+ nn.BatchNorm2d(output_features),
16
+ nn.LeakyReLU(),
17
+ nn.Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1),
18
+ nn.BatchNorm2d(output_features),
19
+ nn.LeakyReLU())
20
+
21
+ def forward(self, x, concat_with):
22
+ up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
23
+ f = torch.cat([up_x, concat_with], dim=1)
24
+ return self._net(f)
25
+
26
+
27
+ # Upsample + GroupNorm + Weight Standardization
28
+ class UpSampleGN(nn.Module):
29
+ def __init__(self, skip_input, output_features):
30
+ super(UpSampleGN, self).__init__()
31
+
32
+ self._net = nn.Sequential(Conv2d(skip_input, output_features, kernel_size=3, stride=1, padding=1),
33
+ nn.GroupNorm(8, output_features),
34
+ nn.LeakyReLU(),
35
+ Conv2d(output_features, output_features, kernel_size=3, stride=1, padding=1),
36
+ nn.GroupNorm(8, output_features),
37
+ nn.LeakyReLU())
38
+
39
+ def forward(self, x, concat_with):
40
+ up_x = F.interpolate(x, size=[concat_with.size(2), concat_with.size(3)], mode='bilinear', align_corners=True)
41
+ f = torch.cat([up_x, concat_with], dim=1)
42
+ return self._net(f)
43
+
44
+
45
+ # Conv2d with weight standardization
46
+ class Conv2d(nn.Conv2d):
47
+ def __init__(self, in_channels, out_channels, kernel_size, stride=1,
48
+ padding=0, dilation=1, groups=1, bias=True):
49
+ super(Conv2d, self).__init__(in_channels, out_channels, kernel_size, stride,
50
+ padding, dilation, groups, bias)
51
+
52
+ def forward(self, x):
53
+ weight = self.weight
54
+ weight_mean = weight.mean(dim=1, keepdim=True).mean(dim=2,
55
+ keepdim=True).mean(dim=3, keepdim=True)
56
+ weight = weight - weight_mean
57
+ std = weight.view(weight.size(0), -1).std(dim=1).view(-1, 1, 1, 1) + 1e-5
58
+ weight = weight / std.expand_as(weight)
59
+ return F.conv2d(x, weight, self.bias, self.stride,
60
+ self.padding, self.dilation, self.groups)
61
+
62
+
63
+ # normalize
64
+ def norm_normalize(norm_out):
65
+ min_kappa = 0.01
66
+ norm_x, norm_y, norm_z, kappa = torch.split(norm_out, 1, dim=1)
67
+ norm = torch.sqrt(norm_x ** 2.0 + norm_y ** 2.0 + norm_z ** 2.0) + 1e-10
68
+ kappa = F.elu(kappa) + 1.0 + min_kappa
69
+ final_out = torch.cat([norm_x / norm, norm_y / norm, norm_z / norm, kappa], dim=1)
70
+ return final_out
71
+
72
+
73
+ # uncertainty-guided sampling (only used during training)
74
+ @torch.no_grad()
75
+ def sample_points(init_normal, gt_norm_mask, sampling_ratio, beta):
76
+ device = init_normal.device
77
+ B, _, H, W = init_normal.shape
78
+ N = int(sampling_ratio * H * W)
79
+ beta = beta
80
+
81
+ # uncertainty map
82
+ uncertainty_map = -1 * init_normal[:, 3, :, :] # B, H, W
83
+
84
+ # gt_invalid_mask (B, H, W)
85
+ if gt_norm_mask is not None:
86
+ gt_invalid_mask = F.interpolate(gt_norm_mask.float(), size=[H, W], mode='nearest')
87
+ gt_invalid_mask = gt_invalid_mask[:, 0, :, :] < 0.5
88
+ uncertainty_map[gt_invalid_mask] = -1e4
89
+
90
+ # (B, H*W)
91
+ _, idx = uncertainty_map.view(B, -1).sort(1, descending=True)
92
+
93
+ # importance sampling
94
+ if int(beta * N) > 0:
95
+ importance = idx[:, :int(beta * N)] # B, beta*N
96
+
97
+ # remaining
98
+ remaining = idx[:, int(beta * N):] # B, H*W - beta*N
99
+
100
+ # coverage
101
+ num_coverage = N - int(beta * N)
102
+
103
+ if num_coverage <= 0:
104
+ samples = importance
105
+ else:
106
+ coverage_list = []
107
+ for i in range(B):
108
+ idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
109
+ coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
110
+ coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
111
+ samples = torch.cat((importance, coverage), dim=1) # B, N
112
+
113
+ else:
114
+ # remaining
115
+ remaining = idx[:, :] # B, H*W
116
+
117
+ # coverage
118
+ num_coverage = N
119
+
120
+ coverage_list = []
121
+ for i in range(B):
122
+ idx_c = torch.randperm(remaining.size()[1]) # shuffles "H*W - beta*N"
123
+ coverage_list.append(remaining[i, :][idx_c[:num_coverage]].view(1, -1)) # 1, N-beta*N
124
+ coverage = torch.cat(coverage_list, dim=0) # B, N-beta*N
125
+ samples = coverage
126
+
127
+ # point coordinates
128
+ rows_int = samples // W # 0 for first row, H-1 for last row
129
+ rows_float = rows_int / float(H-1) # 0 to 1.0
130
+ rows_float = (rows_float * 2.0) - 1.0 # -1.0 to 1.0
131
+
132
+ cols_int = samples % W # 0 for first column, W-1 for last column
133
+ cols_float = cols_int / float(W-1) # 0 to 1.0
134
+ cols_float = (cols_float * 2.0) - 1.0 # -1.0 to 1.0
135
+
136
+ point_coords = torch.zeros(B, 1, N, 2)
137
+ point_coords[:, 0, :, 0] = cols_float # x coord
138
+ point_coords[:, 0, :, 1] = rows_float # y coord
139
+ point_coords = point_coords.to(device)
140
+ return point_coords, rows_int, cols_int
extensions/microsoftexcel-controlnet/annotator/oneformer/LICENSE ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MIT License
2
+
3
+ Copyright (c) 2022 Caroline Chan
4
+
5
+ Permission is hereby granted, free of charge, to any person obtaining a copy
6
+ of this software and associated documentation files (the "Software"), to deal
7
+ in the Software without restriction, including without limitation the rights
8
+ to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
9
+ copies of the Software, and to permit persons to whom the Software is
10
+ furnished to do so, subject to the following conditions:
11
+
12
+ The above copyright notice and this permission notice shall be included in all
13
+ copies or substantial portions of the Software.
14
+
15
+ THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
16
+ IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
17
+ FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
18
+ AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
19
+ LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
20
+ OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
21
+ SOFTWARE.
extensions/microsoftexcel-controlnet/annotator/oneformer/__init__.py ADDED
@@ -0,0 +1,45 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from modules import devices
3
+ from annotator.annotator_path import models_path
4
+ from .api import make_detectron2_model, semantic_run
5
+
6
+
7
+ class OneformerDetector:
8
+ model_dir = os.path.join(models_path, "oneformer")
9
+ configs = {
10
+ "coco": {
11
+ "name": "150_16_swin_l_oneformer_coco_100ep.pth",
12
+ "config": 'configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml'
13
+ },
14
+ "ade20k": {
15
+ "name": "250_16_swin_l_oneformer_ade20k_160k.pth",
16
+ "config": 'configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml'
17
+ }
18
+ }
19
+
20
+ def __init__(self, config):
21
+ self.model = None
22
+ self.metadata = None
23
+ self.config = config
24
+ self.device = devices.get_device_for("controlnet")
25
+
26
+ def load_model(self):
27
+ remote_model_path = "https://huggingface.co/lllyasviel/Annotators/resolve/main/" + self.config["name"]
28
+ modelpath = os.path.join(self.model_dir, self.config["name"])
29
+ if not os.path.exists(modelpath):
30
+ from basicsr.utils.download_util import load_file_from_url
31
+ load_file_from_url(remote_model_path, model_dir=self.model_dir)
32
+ config = os.path.join(os.path.dirname(__file__), self.config["config"])
33
+ model, self.metadata = make_detectron2_model(config, modelpath)
34
+ self.model = model
35
+
36
+ def unload_model(self):
37
+ if self.model is not None:
38
+ self.model.model.cpu()
39
+
40
+ def __call__(self, img):
41
+ if self.model is None:
42
+ self.load_model()
43
+
44
+ self.model.model.to(self.device)
45
+ return semantic_run(img, self.model, self.metadata)
extensions/microsoftexcel-controlnet/annotator/oneformer/api.py ADDED
@@ -0,0 +1,39 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"
3
+
4
+ import torch
5
+
6
+ from annotator.oneformer.detectron2.config import get_cfg
7
+ from annotator.oneformer.detectron2.projects.deeplab import add_deeplab_config
8
+ from annotator.oneformer.detectron2.data import MetadataCatalog
9
+
10
+ from annotator.oneformer.oneformer import (
11
+ add_oneformer_config,
12
+ add_common_config,
13
+ add_swin_config,
14
+ add_dinat_config,
15
+ )
16
+
17
+ from annotator.oneformer.oneformer.demo.defaults import DefaultPredictor
18
+ from annotator.oneformer.oneformer.demo.visualizer import Visualizer, ColorMode
19
+
20
+
21
+ def make_detectron2_model(config_path, ckpt_path):
22
+ cfg = get_cfg()
23
+ add_deeplab_config(cfg)
24
+ add_common_config(cfg)
25
+ add_swin_config(cfg)
26
+ add_oneformer_config(cfg)
27
+ add_dinat_config(cfg)
28
+ cfg.merge_from_file(config_path)
29
+ cfg.MODEL.WEIGHTS = ckpt_path
30
+ cfg.freeze()
31
+ metadata = MetadataCatalog.get(cfg.DATASETS.TEST_PANOPTIC[0] if len(cfg.DATASETS.TEST_PANOPTIC) else "__unused")
32
+ return DefaultPredictor(cfg), metadata
33
+
34
+
35
+ def semantic_run(img, predictor, metadata):
36
+ predictions = predictor(img[:, :, ::-1], "semantic") # Predictor of OneFormer must use BGR image !!!
37
+ visualizer_map = Visualizer(img, is_img=False, metadata=metadata, instance_mode=ColorMode.IMAGE)
38
+ out_map = visualizer_map.draw_sem_seg(predictions["sem_seg"].argmax(dim=0).cpu(), alpha=1, is_text=False).get_image()
39
+ return out_map
extensions/microsoftexcel-controlnet/annotator/oneformer/configs/ade20k/Base-ADE20K-UnifiedSegmentation.yaml ADDED
@@ -0,0 +1,68 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ BACKBONE:
3
+ FREEZE_AT: 0
4
+ NAME: "build_resnet_backbone"
5
+ WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
6
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
7
+ PIXEL_STD: [58.395, 57.120, 57.375]
8
+ RESNETS:
9
+ DEPTH: 50
10
+ STEM_TYPE: "basic" # not used
11
+ STEM_OUT_CHANNELS: 64
12
+ STRIDE_IN_1X1: False
13
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
14
+ # NORM: "SyncBN"
15
+ RES5_MULTI_GRID: [1, 1, 1] # not used
16
+ DATASETS:
17
+ TRAIN: ("ade20k_panoptic_train",)
18
+ TEST_PANOPTIC: ("ade20k_panoptic_val",)
19
+ TEST_INSTANCE: ("ade20k_instance_val",)
20
+ TEST_SEMANTIC: ("ade20k_sem_seg_val",)
21
+ SOLVER:
22
+ IMS_PER_BATCH: 16
23
+ BASE_LR: 0.0001
24
+ MAX_ITER: 160000
25
+ WARMUP_FACTOR: 1.0
26
+ WARMUP_ITERS: 0
27
+ WEIGHT_DECAY: 0.05
28
+ OPTIMIZER: "ADAMW"
29
+ LR_SCHEDULER_NAME: "WarmupPolyLR"
30
+ BACKBONE_MULTIPLIER: 0.1
31
+ CLIP_GRADIENTS:
32
+ ENABLED: True
33
+ CLIP_TYPE: "full_model"
34
+ CLIP_VALUE: 0.01
35
+ NORM_TYPE: 2.0
36
+ AMP:
37
+ ENABLED: True
38
+ INPUT:
39
+ MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 512) for x in range(5, 21)]"]
40
+ MIN_SIZE_TRAIN_SAMPLING: "choice"
41
+ MIN_SIZE_TEST: 512
42
+ MAX_SIZE_TRAIN: 2048
43
+ MAX_SIZE_TEST: 2048
44
+ CROP:
45
+ ENABLED: True
46
+ TYPE: "absolute"
47
+ SIZE: (512, 512)
48
+ SINGLE_CATEGORY_MAX_AREA: 1.0
49
+ COLOR_AUG_SSD: True
50
+ SIZE_DIVISIBILITY: 512 # used in dataset mapper
51
+ FORMAT: "RGB"
52
+ DATASET_MAPPER_NAME: "oneformer_unified"
53
+ MAX_SEQ_LEN: 77
54
+ TASK_SEQ_LEN: 77
55
+ TASK_PROB:
56
+ SEMANTIC: 0.33
57
+ INSTANCE: 0.66
58
+ TEST:
59
+ EVAL_PERIOD: 5000
60
+ AUG:
61
+ ENABLED: False
62
+ MIN_SIZES: [256, 384, 512, 640, 768, 896]
63
+ MAX_SIZE: 3584
64
+ FLIP: True
65
+ DATALOADER:
66
+ FILTER_EMPTY_ANNOTATIONS: True
67
+ NUM_WORKERS: 4
68
+ VERSION: 2
extensions/microsoftexcel-controlnet/annotator/oneformer/configs/ade20k/oneformer_R50_bs16_160k.yaml ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: Base-ADE20K-UnifiedSegmentation.yaml
2
+ MODEL:
3
+ META_ARCHITECTURE: "OneFormer"
4
+ SEM_SEG_HEAD:
5
+ NAME: "OneFormerHead"
6
+ IGNORE_VALUE: 255
7
+ NUM_CLASSES: 150
8
+ LOSS_WEIGHT: 1.0
9
+ CONVS_DIM: 256
10
+ MASK_DIM: 256
11
+ NORM: "GN"
12
+ # pixel decoder
13
+ PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
14
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
15
+ DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
16
+ COMMON_STRIDE: 4
17
+ TRANSFORMER_ENC_LAYERS: 6
18
+ ONE_FORMER:
19
+ TRANSFORMER_DECODER_NAME: "ContrastiveMultiScaleMaskedTransformerDecoder"
20
+ TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
21
+ DEEP_SUPERVISION: True
22
+ NO_OBJECT_WEIGHT: 0.1
23
+ CLASS_WEIGHT: 2.0
24
+ MASK_WEIGHT: 5.0
25
+ DICE_WEIGHT: 5.0
26
+ CONTRASTIVE_WEIGHT: 0.5
27
+ CONTRASTIVE_TEMPERATURE: 0.07
28
+ HIDDEN_DIM: 256
29
+ NUM_OBJECT_QUERIES: 150
30
+ USE_TASK_NORM: True
31
+ NHEADS: 8
32
+ DROPOUT: 0.1
33
+ DIM_FEEDFORWARD: 2048
34
+ ENC_LAYERS: 0
35
+ PRE_NORM: False
36
+ ENFORCE_INPUT_PROJ: False
37
+ SIZE_DIVISIBILITY: 32
38
+ CLASS_DEC_LAYERS: 2
39
+ DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
40
+ TRAIN_NUM_POINTS: 12544
41
+ OVERSAMPLE_RATIO: 3.0
42
+ IMPORTANCE_SAMPLE_RATIO: 0.75
43
+ TEXT_ENCODER:
44
+ WIDTH: 256
45
+ CONTEXT_LENGTH: 77
46
+ NUM_LAYERS: 6
47
+ VOCAB_SIZE: 49408
48
+ PROJ_NUM_LAYERS: 2
49
+ N_CTX: 16
50
+ TEST:
51
+ SEMANTIC_ON: True
52
+ INSTANCE_ON: True
53
+ PANOPTIC_ON: True
54
+ OVERLAP_THRESHOLD: 0.8
55
+ OBJECT_MASK_THRESHOLD: 0.8
56
+ TASK: "panoptic"
57
+ TEST:
58
+ DETECTIONS_PER_IMAGE: 150
extensions/microsoftexcel-controlnet/annotator/oneformer/configs/ade20k/oneformer_swin_large_IN21k_384_bs16_160k.yaml ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: oneformer_R50_bs16_160k.yaml
2
+ MODEL:
3
+ BACKBONE:
4
+ NAME: "D2SwinTransformer"
5
+ SWIN:
6
+ EMBED_DIM: 192
7
+ DEPTHS: [2, 2, 18, 2]
8
+ NUM_HEADS: [6, 12, 24, 48]
9
+ WINDOW_SIZE: 12
10
+ APE: False
11
+ DROP_PATH_RATE: 0.3
12
+ PATCH_NORM: True
13
+ PRETRAIN_IMG_SIZE: 384
14
+ WEIGHTS: "swin_large_patch4_window12_384_22k.pkl"
15
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
16
+ PIXEL_STD: [58.395, 57.120, 57.375]
17
+ ONE_FORMER:
18
+ NUM_OBJECT_QUERIES: 250
19
+ INPUT:
20
+ MIN_SIZE_TRAIN: !!python/object/apply:eval ["[int(x * 0.1 * 640) for x in range(5, 21)]"]
21
+ MIN_SIZE_TRAIN_SAMPLING: "choice"
22
+ MIN_SIZE_TEST: 640
23
+ MAX_SIZE_TRAIN: 2560
24
+ MAX_SIZE_TEST: 2560
25
+ CROP:
26
+ ENABLED: True
27
+ TYPE: "absolute"
28
+ SIZE: (640, 640)
29
+ SINGLE_CATEGORY_MAX_AREA: 1.0
30
+ COLOR_AUG_SSD: True
31
+ SIZE_DIVISIBILITY: 640 # used in dataset mapper
32
+ FORMAT: "RGB"
33
+ TEST:
34
+ DETECTIONS_PER_IMAGE: 250
35
+ EVAL_PERIOD: 5000
36
+ AUG:
37
+ ENABLED: False
38
+ MIN_SIZES: [320, 480, 640, 800, 960, 1120]
39
+ MAX_SIZE: 4480
40
+ FLIP: True
extensions/microsoftexcel-controlnet/annotator/oneformer/configs/coco/Base-COCO-UnifiedSegmentation.yaml ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ MODEL:
2
+ BACKBONE:
3
+ FREEZE_AT: 0
4
+ NAME: "build_resnet_backbone"
5
+ WEIGHTS: "detectron2://ImageNetPretrained/torchvision/R-50.pkl"
6
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
7
+ PIXEL_STD: [58.395, 57.120, 57.375]
8
+ RESNETS:
9
+ DEPTH: 50
10
+ STEM_TYPE: "basic" # not used
11
+ STEM_OUT_CHANNELS: 64
12
+ STRIDE_IN_1X1: False
13
+ OUT_FEATURES: ["res2", "res3", "res4", "res5"]
14
+ # NORM: "SyncBN"
15
+ RES5_MULTI_GRID: [1, 1, 1] # not used
16
+ DATASETS:
17
+ TRAIN: ("coco_2017_train_panoptic_with_sem_seg",)
18
+ TEST_PANOPTIC: ("coco_2017_val_panoptic_with_sem_seg",) # to evaluate instance and semantic performance as well
19
+ TEST_INSTANCE: ("coco_2017_val",)
20
+ TEST_SEMANTIC: ("coco_2017_val_panoptic_with_sem_seg",)
21
+ SOLVER:
22
+ IMS_PER_BATCH: 16
23
+ BASE_LR: 0.0001
24
+ STEPS: (327778, 355092)
25
+ MAX_ITER: 368750
26
+ WARMUP_FACTOR: 1.0
27
+ WARMUP_ITERS: 10
28
+ WEIGHT_DECAY: 0.05
29
+ OPTIMIZER: "ADAMW"
30
+ BACKBONE_MULTIPLIER: 0.1
31
+ CLIP_GRADIENTS:
32
+ ENABLED: True
33
+ CLIP_TYPE: "full_model"
34
+ CLIP_VALUE: 0.01
35
+ NORM_TYPE: 2.0
36
+ AMP:
37
+ ENABLED: True
38
+ INPUT:
39
+ IMAGE_SIZE: 1024
40
+ MIN_SCALE: 0.1
41
+ MAX_SCALE: 2.0
42
+ FORMAT: "RGB"
43
+ DATASET_MAPPER_NAME: "coco_unified_lsj"
44
+ MAX_SEQ_LEN: 77
45
+ TASK_SEQ_LEN: 77
46
+ TASK_PROB:
47
+ SEMANTIC: 0.33
48
+ INSTANCE: 0.66
49
+ TEST:
50
+ EVAL_PERIOD: 5000
51
+ DATALOADER:
52
+ FILTER_EMPTY_ANNOTATIONS: True
53
+ NUM_WORKERS: 4
54
+ VERSION: 2
extensions/microsoftexcel-controlnet/annotator/oneformer/configs/coco/oneformer_R50_bs16_50ep.yaml ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: Base-COCO-UnifiedSegmentation.yaml
2
+ MODEL:
3
+ META_ARCHITECTURE: "OneFormer"
4
+ SEM_SEG_HEAD:
5
+ NAME: "OneFormerHead"
6
+ IGNORE_VALUE: 255
7
+ NUM_CLASSES: 133
8
+ LOSS_WEIGHT: 1.0
9
+ CONVS_DIM: 256
10
+ MASK_DIM: 256
11
+ NORM: "GN"
12
+ # pixel decoder
13
+ PIXEL_DECODER_NAME: "MSDeformAttnPixelDecoder"
14
+ IN_FEATURES: ["res2", "res3", "res4", "res5"]
15
+ DEFORMABLE_TRANSFORMER_ENCODER_IN_FEATURES: ["res3", "res4", "res5"]
16
+ COMMON_STRIDE: 4
17
+ TRANSFORMER_ENC_LAYERS: 6
18
+ ONE_FORMER:
19
+ TRANSFORMER_DECODER_NAME: "ContrastiveMultiScaleMaskedTransformerDecoder"
20
+ TRANSFORMER_IN_FEATURE: "multi_scale_pixel_decoder"
21
+ DEEP_SUPERVISION: True
22
+ NO_OBJECT_WEIGHT: 0.1
23
+ CLASS_WEIGHT: 2.0
24
+ MASK_WEIGHT: 5.0
25
+ DICE_WEIGHT: 5.0
26
+ CONTRASTIVE_WEIGHT: 0.5
27
+ CONTRASTIVE_TEMPERATURE: 0.07
28
+ HIDDEN_DIM: 256
29
+ NUM_OBJECT_QUERIES: 150
30
+ USE_TASK_NORM: True
31
+ NHEADS: 8
32
+ DROPOUT: 0.1
33
+ DIM_FEEDFORWARD: 2048
34
+ ENC_LAYERS: 0
35
+ PRE_NORM: False
36
+ ENFORCE_INPUT_PROJ: False
37
+ SIZE_DIVISIBILITY: 32
38
+ CLASS_DEC_LAYERS: 2
39
+ DEC_LAYERS: 10 # 9 decoder layers, add one for the loss on learnable query
40
+ TRAIN_NUM_POINTS: 12544
41
+ OVERSAMPLE_RATIO: 3.0
42
+ IMPORTANCE_SAMPLE_RATIO: 0.75
43
+ TEXT_ENCODER:
44
+ WIDTH: 256
45
+ CONTEXT_LENGTH: 77
46
+ NUM_LAYERS: 6
47
+ VOCAB_SIZE: 49408
48
+ PROJ_NUM_LAYERS: 2
49
+ N_CTX: 16
50
+ TEST:
51
+ SEMANTIC_ON: True
52
+ INSTANCE_ON: True
53
+ PANOPTIC_ON: True
54
+ DETECTION_ON: False
55
+ OVERLAP_THRESHOLD: 0.8
56
+ OBJECT_MASK_THRESHOLD: 0.8
57
+ TASK: "panoptic"
58
+ TEST:
59
+ DETECTIONS_PER_IMAGE: 150
extensions/microsoftexcel-controlnet/annotator/oneformer/configs/coco/oneformer_swin_large_IN21k_384_bs16_100ep.yaml ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ _BASE_: oneformer_R50_bs16_50ep.yaml
2
+ MODEL:
3
+ BACKBONE:
4
+ NAME: "D2SwinTransformer"
5
+ SWIN:
6
+ EMBED_DIM: 192
7
+ DEPTHS: [2, 2, 18, 2]
8
+ NUM_HEADS: [6, 12, 24, 48]
9
+ WINDOW_SIZE: 12
10
+ APE: False
11
+ DROP_PATH_RATE: 0.3
12
+ PATCH_NORM: True
13
+ PRETRAIN_IMG_SIZE: 384
14
+ WEIGHTS: "swin_large_patch4_window12_384_22k.pkl"
15
+ PIXEL_MEAN: [123.675, 116.280, 103.530]
16
+ PIXEL_STD: [58.395, 57.120, 57.375]
17
+ ONE_FORMER:
18
+ NUM_OBJECT_QUERIES: 150
19
+ SOLVER:
20
+ STEPS: (655556, 735184)
21
+ MAX_ITER: 737500
22
+ AMP:
23
+ ENABLED: False
24
+ TEST:
25
+ DETECTIONS_PER_IMAGE: 150
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+
3
+ from .utils.env import setup_environment
4
+
5
+ setup_environment()
6
+
7
+
8
+ # This line will be programatically read/write by setup.py.
9
+ # Leave them at the bottom of this file and don't touch them.
10
+ __version__ = "0.6"
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/__init__.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+ # File:
4
+
5
+
6
+ from . import catalog as _UNUSED # register the handler
7
+ from .detection_checkpoint import DetectionCheckpointer
8
+ from fvcore.common.checkpoint import Checkpointer, PeriodicCheckpointer
9
+
10
+ __all__ = ["Checkpointer", "PeriodicCheckpointer", "DetectionCheckpointer"]
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/c2_model_loading.py ADDED
@@ -0,0 +1,412 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import logging
4
+ import re
5
+ from typing import Dict, List
6
+ import torch
7
+ from tabulate import tabulate
8
+
9
+
10
+ def convert_basic_c2_names(original_keys):
11
+ """
12
+ Apply some basic name conversion to names in C2 weights.
13
+ It only deals with typical backbone models.
14
+
15
+ Args:
16
+ original_keys (list[str]):
17
+ Returns:
18
+ list[str]: The same number of strings matching those in original_keys.
19
+ """
20
+ layer_keys = copy.deepcopy(original_keys)
21
+ layer_keys = [
22
+ {"pred_b": "linear_b", "pred_w": "linear_w"}.get(k, k) for k in layer_keys
23
+ ] # some hard-coded mappings
24
+
25
+ layer_keys = [k.replace("_", ".") for k in layer_keys]
26
+ layer_keys = [re.sub("\\.b$", ".bias", k) for k in layer_keys]
27
+ layer_keys = [re.sub("\\.w$", ".weight", k) for k in layer_keys]
28
+ # Uniform both bn and gn names to "norm"
29
+ layer_keys = [re.sub("bn\\.s$", "norm.weight", k) for k in layer_keys]
30
+ layer_keys = [re.sub("bn\\.bias$", "norm.bias", k) for k in layer_keys]
31
+ layer_keys = [re.sub("bn\\.rm", "norm.running_mean", k) for k in layer_keys]
32
+ layer_keys = [re.sub("bn\\.running.mean$", "norm.running_mean", k) for k in layer_keys]
33
+ layer_keys = [re.sub("bn\\.riv$", "norm.running_var", k) for k in layer_keys]
34
+ layer_keys = [re.sub("bn\\.running.var$", "norm.running_var", k) for k in layer_keys]
35
+ layer_keys = [re.sub("bn\\.gamma$", "norm.weight", k) for k in layer_keys]
36
+ layer_keys = [re.sub("bn\\.beta$", "norm.bias", k) for k in layer_keys]
37
+ layer_keys = [re.sub("gn\\.s$", "norm.weight", k) for k in layer_keys]
38
+ layer_keys = [re.sub("gn\\.bias$", "norm.bias", k) for k in layer_keys]
39
+
40
+ # stem
41
+ layer_keys = [re.sub("^res\\.conv1\\.norm\\.", "conv1.norm.", k) for k in layer_keys]
42
+ # to avoid mis-matching with "conv1" in other components (e.g. detection head)
43
+ layer_keys = [re.sub("^conv1\\.", "stem.conv1.", k) for k in layer_keys]
44
+
45
+ # layer1-4 is used by torchvision, however we follow the C2 naming strategy (res2-5)
46
+ # layer_keys = [re.sub("^res2.", "layer1.", k) for k in layer_keys]
47
+ # layer_keys = [re.sub("^res3.", "layer2.", k) for k in layer_keys]
48
+ # layer_keys = [re.sub("^res4.", "layer3.", k) for k in layer_keys]
49
+ # layer_keys = [re.sub("^res5.", "layer4.", k) for k in layer_keys]
50
+
51
+ # blocks
52
+ layer_keys = [k.replace(".branch1.", ".shortcut.") for k in layer_keys]
53
+ layer_keys = [k.replace(".branch2a.", ".conv1.") for k in layer_keys]
54
+ layer_keys = [k.replace(".branch2b.", ".conv2.") for k in layer_keys]
55
+ layer_keys = [k.replace(".branch2c.", ".conv3.") for k in layer_keys]
56
+
57
+ # DensePose substitutions
58
+ layer_keys = [re.sub("^body.conv.fcn", "body_conv_fcn", k) for k in layer_keys]
59
+ layer_keys = [k.replace("AnnIndex.lowres", "ann_index_lowres") for k in layer_keys]
60
+ layer_keys = [k.replace("Index.UV.lowres", "index_uv_lowres") for k in layer_keys]
61
+ layer_keys = [k.replace("U.lowres", "u_lowres") for k in layer_keys]
62
+ layer_keys = [k.replace("V.lowres", "v_lowres") for k in layer_keys]
63
+ return layer_keys
64
+
65
+
66
+ def convert_c2_detectron_names(weights):
67
+ """
68
+ Map Caffe2 Detectron weight names to Detectron2 names.
69
+
70
+ Args:
71
+ weights (dict): name -> tensor
72
+
73
+ Returns:
74
+ dict: detectron2 names -> tensor
75
+ dict: detectron2 names -> C2 names
76
+ """
77
+ logger = logging.getLogger(__name__)
78
+ logger.info("Renaming Caffe2 weights ......")
79
+ original_keys = sorted(weights.keys())
80
+ layer_keys = copy.deepcopy(original_keys)
81
+
82
+ layer_keys = convert_basic_c2_names(layer_keys)
83
+
84
+ # --------------------------------------------------------------------------
85
+ # RPN hidden representation conv
86
+ # --------------------------------------------------------------------------
87
+ # FPN case
88
+ # In the C2 model, the RPN hidden layer conv is defined for FPN level 2 and then
89
+ # shared for all other levels, hence the appearance of "fpn2"
90
+ layer_keys = [
91
+ k.replace("conv.rpn.fpn2", "proposal_generator.rpn_head.conv") for k in layer_keys
92
+ ]
93
+ # Non-FPN case
94
+ layer_keys = [k.replace("conv.rpn", "proposal_generator.rpn_head.conv") for k in layer_keys]
95
+
96
+ # --------------------------------------------------------------------------
97
+ # RPN box transformation conv
98
+ # --------------------------------------------------------------------------
99
+ # FPN case (see note above about "fpn2")
100
+ layer_keys = [
101
+ k.replace("rpn.bbox.pred.fpn2", "proposal_generator.rpn_head.anchor_deltas")
102
+ for k in layer_keys
103
+ ]
104
+ layer_keys = [
105
+ k.replace("rpn.cls.logits.fpn2", "proposal_generator.rpn_head.objectness_logits")
106
+ for k in layer_keys
107
+ ]
108
+ # Non-FPN case
109
+ layer_keys = [
110
+ k.replace("rpn.bbox.pred", "proposal_generator.rpn_head.anchor_deltas") for k in layer_keys
111
+ ]
112
+ layer_keys = [
113
+ k.replace("rpn.cls.logits", "proposal_generator.rpn_head.objectness_logits")
114
+ for k in layer_keys
115
+ ]
116
+
117
+ # --------------------------------------------------------------------------
118
+ # Fast R-CNN box head
119
+ # --------------------------------------------------------------------------
120
+ layer_keys = [re.sub("^bbox\\.pred", "bbox_pred", k) for k in layer_keys]
121
+ layer_keys = [re.sub("^cls\\.score", "cls_score", k) for k in layer_keys]
122
+ layer_keys = [re.sub("^fc6\\.", "box_head.fc1.", k) for k in layer_keys]
123
+ layer_keys = [re.sub("^fc7\\.", "box_head.fc2.", k) for k in layer_keys]
124
+ # 4conv1fc head tensor names: head_conv1_w, head_conv1_gn_s
125
+ layer_keys = [re.sub("^head\\.conv", "box_head.conv", k) for k in layer_keys]
126
+
127
+ # --------------------------------------------------------------------------
128
+ # FPN lateral and output convolutions
129
+ # --------------------------------------------------------------------------
130
+ def fpn_map(name):
131
+ """
132
+ Look for keys with the following patterns:
133
+ 1) Starts with "fpn.inner."
134
+ Example: "fpn.inner.res2.2.sum.lateral.weight"
135
+ Meaning: These are lateral pathway convolutions
136
+ 2) Starts with "fpn.res"
137
+ Example: "fpn.res2.2.sum.weight"
138
+ Meaning: These are FPN output convolutions
139
+ """
140
+ splits = name.split(".")
141
+ norm = ".norm" if "norm" in splits else ""
142
+ if name.startswith("fpn.inner."):
143
+ # splits example: ['fpn', 'inner', 'res2', '2', 'sum', 'lateral', 'weight']
144
+ stage = int(splits[2][len("res") :])
145
+ return "fpn_lateral{}{}.{}".format(stage, norm, splits[-1])
146
+ elif name.startswith("fpn.res"):
147
+ # splits example: ['fpn', 'res2', '2', 'sum', 'weight']
148
+ stage = int(splits[1][len("res") :])
149
+ return "fpn_output{}{}.{}".format(stage, norm, splits[-1])
150
+ return name
151
+
152
+ layer_keys = [fpn_map(k) for k in layer_keys]
153
+
154
+ # --------------------------------------------------------------------------
155
+ # Mask R-CNN mask head
156
+ # --------------------------------------------------------------------------
157
+ # roi_heads.StandardROIHeads case
158
+ layer_keys = [k.replace(".[mask].fcn", "mask_head.mask_fcn") for k in layer_keys]
159
+ layer_keys = [re.sub("^\\.mask\\.fcn", "mask_head.mask_fcn", k) for k in layer_keys]
160
+ layer_keys = [k.replace("mask.fcn.logits", "mask_head.predictor") for k in layer_keys]
161
+ # roi_heads.Res5ROIHeads case
162
+ layer_keys = [k.replace("conv5.mask", "mask_head.deconv") for k in layer_keys]
163
+
164
+ # --------------------------------------------------------------------------
165
+ # Keypoint R-CNN head
166
+ # --------------------------------------------------------------------------
167
+ # interestingly, the keypoint head convs have blob names that are simply "conv_fcnX"
168
+ layer_keys = [k.replace("conv.fcn", "roi_heads.keypoint_head.conv_fcn") for k in layer_keys]
169
+ layer_keys = [
170
+ k.replace("kps.score.lowres", "roi_heads.keypoint_head.score_lowres") for k in layer_keys
171
+ ]
172
+ layer_keys = [k.replace("kps.score.", "roi_heads.keypoint_head.score.") for k in layer_keys]
173
+
174
+ # --------------------------------------------------------------------------
175
+ # Done with replacements
176
+ # --------------------------------------------------------------------------
177
+ assert len(set(layer_keys)) == len(layer_keys)
178
+ assert len(original_keys) == len(layer_keys)
179
+
180
+ new_weights = {}
181
+ new_keys_to_original_keys = {}
182
+ for orig, renamed in zip(original_keys, layer_keys):
183
+ new_keys_to_original_keys[renamed] = orig
184
+ if renamed.startswith("bbox_pred.") or renamed.startswith("mask_head.predictor."):
185
+ # remove the meaningless prediction weight for background class
186
+ new_start_idx = 4 if renamed.startswith("bbox_pred.") else 1
187
+ new_weights[renamed] = weights[orig][new_start_idx:]
188
+ logger.info(
189
+ "Remove prediction weight for background class in {}. The shape changes from "
190
+ "{} to {}.".format(
191
+ renamed, tuple(weights[orig].shape), tuple(new_weights[renamed].shape)
192
+ )
193
+ )
194
+ elif renamed.startswith("cls_score."):
195
+ # move weights of bg class from original index 0 to last index
196
+ logger.info(
197
+ "Move classification weights for background class in {} from index 0 to "
198
+ "index {}.".format(renamed, weights[orig].shape[0] - 1)
199
+ )
200
+ new_weights[renamed] = torch.cat([weights[orig][1:], weights[orig][:1]])
201
+ else:
202
+ new_weights[renamed] = weights[orig]
203
+
204
+ return new_weights, new_keys_to_original_keys
205
+
206
+
207
+ # Note the current matching is not symmetric.
208
+ # it assumes model_state_dict will have longer names.
209
+ def align_and_update_state_dicts(model_state_dict, ckpt_state_dict, c2_conversion=True):
210
+ """
211
+ Match names between the two state-dict, and returns a new chkpt_state_dict with names
212
+ converted to match model_state_dict with heuristics. The returned dict can be later
213
+ loaded with fvcore checkpointer.
214
+ If `c2_conversion==True`, `ckpt_state_dict` is assumed to be a Caffe2
215
+ model and will be renamed at first.
216
+
217
+ Strategy: suppose that the models that we will create will have prefixes appended
218
+ to each of its keys, for example due to an extra level of nesting that the original
219
+ pre-trained weights from ImageNet won't contain. For example, model.state_dict()
220
+ might return backbone[0].body.res2.conv1.weight, while the pre-trained model contains
221
+ res2.conv1.weight. We thus want to match both parameters together.
222
+ For that, we look for each model weight, look among all loaded keys if there is one
223
+ that is a suffix of the current weight name, and use it if that's the case.
224
+ If multiple matches exist, take the one with longest size
225
+ of the corresponding name. For example, for the same model as before, the pretrained
226
+ weight file can contain both res2.conv1.weight, as well as conv1.weight. In this case,
227
+ we want to match backbone[0].body.conv1.weight to conv1.weight, and
228
+ backbone[0].body.res2.conv1.weight to res2.conv1.weight.
229
+ """
230
+ model_keys = sorted(model_state_dict.keys())
231
+ if c2_conversion:
232
+ ckpt_state_dict, original_keys = convert_c2_detectron_names(ckpt_state_dict)
233
+ # original_keys: the name in the original dict (before renaming)
234
+ else:
235
+ original_keys = {x: x for x in ckpt_state_dict.keys()}
236
+ ckpt_keys = sorted(ckpt_state_dict.keys())
237
+
238
+ def match(a, b):
239
+ # Matched ckpt_key should be a complete (starts with '.') suffix.
240
+ # For example, roi_heads.mesh_head.whatever_conv1 does not match conv1,
241
+ # but matches whatever_conv1 or mesh_head.whatever_conv1.
242
+ return a == b or a.endswith("." + b)
243
+
244
+ # get a matrix of string matches, where each (i, j) entry correspond to the size of the
245
+ # ckpt_key string, if it matches
246
+ match_matrix = [len(j) if match(i, j) else 0 for i in model_keys for j in ckpt_keys]
247
+ match_matrix = torch.as_tensor(match_matrix).view(len(model_keys), len(ckpt_keys))
248
+ # use the matched one with longest size in case of multiple matches
249
+ max_match_size, idxs = match_matrix.max(1)
250
+ # remove indices that correspond to no-match
251
+ idxs[max_match_size == 0] = -1
252
+
253
+ logger = logging.getLogger(__name__)
254
+ # matched_pairs (matched checkpoint key --> matched model key)
255
+ matched_keys = {}
256
+ result_state_dict = {}
257
+ for idx_model, idx_ckpt in enumerate(idxs.tolist()):
258
+ if idx_ckpt == -1:
259
+ continue
260
+ key_model = model_keys[idx_model]
261
+ key_ckpt = ckpt_keys[idx_ckpt]
262
+ value_ckpt = ckpt_state_dict[key_ckpt]
263
+ shape_in_model = model_state_dict[key_model].shape
264
+
265
+ if shape_in_model != value_ckpt.shape:
266
+ logger.warning(
267
+ "Shape of {} in checkpoint is {}, while shape of {} in model is {}.".format(
268
+ key_ckpt, value_ckpt.shape, key_model, shape_in_model
269
+ )
270
+ )
271
+ logger.warning(
272
+ "{} will not be loaded. Please double check and see if this is desired.".format(
273
+ key_ckpt
274
+ )
275
+ )
276
+ continue
277
+
278
+ assert key_model not in result_state_dict
279
+ result_state_dict[key_model] = value_ckpt
280
+ if key_ckpt in matched_keys: # already added to matched_keys
281
+ logger.error(
282
+ "Ambiguity found for {} in checkpoint!"
283
+ "It matches at least two keys in the model ({} and {}).".format(
284
+ key_ckpt, key_model, matched_keys[key_ckpt]
285
+ )
286
+ )
287
+ raise ValueError("Cannot match one checkpoint key to multiple keys in the model.")
288
+
289
+ matched_keys[key_ckpt] = key_model
290
+
291
+ # logging:
292
+ matched_model_keys = sorted(matched_keys.values())
293
+ if len(matched_model_keys) == 0:
294
+ logger.warning("No weights in checkpoint matched with model.")
295
+ return ckpt_state_dict
296
+ common_prefix = _longest_common_prefix(matched_model_keys)
297
+ rev_matched_keys = {v: k for k, v in matched_keys.items()}
298
+ original_keys = {k: original_keys[rev_matched_keys[k]] for k in matched_model_keys}
299
+
300
+ model_key_groups = _group_keys_by_module(matched_model_keys, original_keys)
301
+ table = []
302
+ memo = set()
303
+ for key_model in matched_model_keys:
304
+ if key_model in memo:
305
+ continue
306
+ if key_model in model_key_groups:
307
+ group = model_key_groups[key_model]
308
+ memo |= set(group)
309
+ shapes = [tuple(model_state_dict[k].shape) for k in group]
310
+ table.append(
311
+ (
312
+ _longest_common_prefix([k[len(common_prefix) :] for k in group]) + "*",
313
+ _group_str([original_keys[k] for k in group]),
314
+ " ".join([str(x).replace(" ", "") for x in shapes]),
315
+ )
316
+ )
317
+ else:
318
+ key_checkpoint = original_keys[key_model]
319
+ shape = str(tuple(model_state_dict[key_model].shape))
320
+ table.append((key_model[len(common_prefix) :], key_checkpoint, shape))
321
+ table_str = tabulate(
322
+ table, tablefmt="pipe", headers=["Names in Model", "Names in Checkpoint", "Shapes"]
323
+ )
324
+ logger.info(
325
+ "Following weights matched with "
326
+ + (f"submodule {common_prefix[:-1]}" if common_prefix else "model")
327
+ + ":\n"
328
+ + table_str
329
+ )
330
+
331
+ unmatched_ckpt_keys = [k for k in ckpt_keys if k not in set(matched_keys.keys())]
332
+ for k in unmatched_ckpt_keys:
333
+ result_state_dict[k] = ckpt_state_dict[k]
334
+ return result_state_dict
335
+
336
+
337
+ def _group_keys_by_module(keys: List[str], original_names: Dict[str, str]):
338
+ """
339
+ Params in the same submodule are grouped together.
340
+
341
+ Args:
342
+ keys: names of all parameters
343
+ original_names: mapping from parameter name to their name in the checkpoint
344
+
345
+ Returns:
346
+ dict[name -> all other names in the same group]
347
+ """
348
+
349
+ def _submodule_name(key):
350
+ pos = key.rfind(".")
351
+ if pos < 0:
352
+ return None
353
+ prefix = key[: pos + 1]
354
+ return prefix
355
+
356
+ all_submodules = [_submodule_name(k) for k in keys]
357
+ all_submodules = [x for x in all_submodules if x]
358
+ all_submodules = sorted(all_submodules, key=len)
359
+
360
+ ret = {}
361
+ for prefix in all_submodules:
362
+ group = [k for k in keys if k.startswith(prefix)]
363
+ if len(group) <= 1:
364
+ continue
365
+ original_name_lcp = _longest_common_prefix_str([original_names[k] for k in group])
366
+ if len(original_name_lcp) == 0:
367
+ # don't group weights if original names don't share prefix
368
+ continue
369
+
370
+ for k in group:
371
+ if k in ret:
372
+ continue
373
+ ret[k] = group
374
+ return ret
375
+
376
+
377
+ def _longest_common_prefix(names: List[str]) -> str:
378
+ """
379
+ ["abc.zfg", "abc.zef"] -> "abc."
380
+ """
381
+ names = [n.split(".") for n in names]
382
+ m1, m2 = min(names), max(names)
383
+ ret = [a for a, b in zip(m1, m2) if a == b]
384
+ ret = ".".join(ret) + "." if len(ret) else ""
385
+ return ret
386
+
387
+
388
+ def _longest_common_prefix_str(names: List[str]) -> str:
389
+ m1, m2 = min(names), max(names)
390
+ lcp = []
391
+ for a, b in zip(m1, m2):
392
+ if a == b:
393
+ lcp.append(a)
394
+ else:
395
+ break
396
+ lcp = "".join(lcp)
397
+ return lcp
398
+
399
+
400
+ def _group_str(names: List[str]) -> str:
401
+ """
402
+ Turn "common1", "common2", "common3" into "common{1,2,3}"
403
+ """
404
+ lcp = _longest_common_prefix_str(names)
405
+ rest = [x[len(lcp) :] for x in names]
406
+ rest = "{" + ",".join(rest) + "}"
407
+ ret = lcp + rest
408
+
409
+ # add some simplification for BN specifically
410
+ ret = ret.replace("bn_{beta,running_mean,running_var,gamma}", "bn_*")
411
+ ret = ret.replace("bn_beta,bn_running_mean,bn_running_var,bn_gamma", "bn_*")
412
+ return ret
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/catalog.py ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+
4
+ from annotator.oneformer.detectron2.utils.file_io import PathHandler, PathManager
5
+
6
+
7
+ class ModelCatalog(object):
8
+ """
9
+ Store mappings from names to third-party models.
10
+ """
11
+
12
+ S3_C2_DETECTRON_PREFIX = "https://dl.fbaipublicfiles.com/detectron"
13
+
14
+ # MSRA models have STRIDE_IN_1X1=True. False otherwise.
15
+ # NOTE: all BN models here have fused BN into an affine layer.
16
+ # As a result, you should only load them to a model with "FrozenBN".
17
+ # Loading them to a model with regular BN or SyncBN is wrong.
18
+ # Even when loaded to FrozenBN, it is still different from affine by an epsilon,
19
+ # which should be negligible for training.
20
+ # NOTE: all models here uses PIXEL_STD=[1,1,1]
21
+ # NOTE: Most of the BN models here are no longer used. We use the
22
+ # re-converted pre-trained models under detectron2 model zoo instead.
23
+ C2_IMAGENET_MODELS = {
24
+ "MSRA/R-50": "ImageNetPretrained/MSRA/R-50.pkl",
25
+ "MSRA/R-101": "ImageNetPretrained/MSRA/R-101.pkl",
26
+ "FAIR/R-50-GN": "ImageNetPretrained/47261647/R-50-GN.pkl",
27
+ "FAIR/R-101-GN": "ImageNetPretrained/47592356/R-101-GN.pkl",
28
+ "FAIR/X-101-32x8d": "ImageNetPretrained/20171220/X-101-32x8d.pkl",
29
+ "FAIR/X-101-64x4d": "ImageNetPretrained/FBResNeXt/X-101-64x4d.pkl",
30
+ "FAIR/X-152-32x8d-IN5k": "ImageNetPretrained/25093814/X-152-32x8d-IN5k.pkl",
31
+ }
32
+
33
+ C2_DETECTRON_PATH_FORMAT = (
34
+ "{prefix}/{url}/output/train/{dataset}/{type}/model_final.pkl" # noqa B950
35
+ )
36
+
37
+ C2_DATASET_COCO = "coco_2014_train%3Acoco_2014_valminusminival"
38
+ C2_DATASET_COCO_KEYPOINTS = "keypoints_coco_2014_train%3Akeypoints_coco_2014_valminusminival"
39
+
40
+ # format: {model_name} -> part of the url
41
+ C2_DETECTRON_MODELS = {
42
+ "35857197/e2e_faster_rcnn_R-50-C4_1x": "35857197/12_2017_baselines/e2e_faster_rcnn_R-50-C4_1x.yaml.01_33_49.iAX0mXvW", # noqa B950
43
+ "35857345/e2e_faster_rcnn_R-50-FPN_1x": "35857345/12_2017_baselines/e2e_faster_rcnn_R-50-FPN_1x.yaml.01_36_30.cUF7QR7I", # noqa B950
44
+ "35857890/e2e_faster_rcnn_R-101-FPN_1x": "35857890/12_2017_baselines/e2e_faster_rcnn_R-101-FPN_1x.yaml.01_38_50.sNxI7sX7", # noqa B950
45
+ "36761737/e2e_faster_rcnn_X-101-32x8d-FPN_1x": "36761737/12_2017_baselines/e2e_faster_rcnn_X-101-32x8d-FPN_1x.yaml.06_31_39.5MIHi1fZ", # noqa B950
46
+ "35858791/e2e_mask_rcnn_R-50-C4_1x": "35858791/12_2017_baselines/e2e_mask_rcnn_R-50-C4_1x.yaml.01_45_57.ZgkA7hPB", # noqa B950
47
+ "35858933/e2e_mask_rcnn_R-50-FPN_1x": "35858933/12_2017_baselines/e2e_mask_rcnn_R-50-FPN_1x.yaml.01_48_14.DzEQe4wC", # noqa B950
48
+ "35861795/e2e_mask_rcnn_R-101-FPN_1x": "35861795/12_2017_baselines/e2e_mask_rcnn_R-101-FPN_1x.yaml.02_31_37.KqyEK4tT", # noqa B950
49
+ "36761843/e2e_mask_rcnn_X-101-32x8d-FPN_1x": "36761843/12_2017_baselines/e2e_mask_rcnn_X-101-32x8d-FPN_1x.yaml.06_35_59.RZotkLKI", # noqa B950
50
+ "48616381/e2e_mask_rcnn_R-50-FPN_2x_gn": "GN/48616381/04_2018_gn_baselines/e2e_mask_rcnn_R-50-FPN_2x_gn_0416.13_23_38.bTlTI97Q", # noqa B950
51
+ "37697547/e2e_keypoint_rcnn_R-50-FPN_1x": "37697547/12_2017_baselines/e2e_keypoint_rcnn_R-50-FPN_1x.yaml.08_42_54.kdzV35ao", # noqa B950
52
+ "35998355/rpn_R-50-C4_1x": "35998355/12_2017_baselines/rpn_R-50-C4_1x.yaml.08_00_43.njH5oD9L", # noqa B950
53
+ "35998814/rpn_R-50-FPN_1x": "35998814/12_2017_baselines/rpn_R-50-FPN_1x.yaml.08_06_03.Axg0r179", # noqa B950
54
+ "36225147/fast_R-50-FPN_1x": "36225147/12_2017_baselines/fast_rcnn_R-50-FPN_1x.yaml.08_39_09.L3obSdQ2", # noqa B950
55
+ }
56
+
57
+ @staticmethod
58
+ def get(name):
59
+ if name.startswith("Caffe2Detectron/COCO"):
60
+ return ModelCatalog._get_c2_detectron_baseline(name)
61
+ if name.startswith("ImageNetPretrained/"):
62
+ return ModelCatalog._get_c2_imagenet_pretrained(name)
63
+ raise RuntimeError("model not present in the catalog: {}".format(name))
64
+
65
+ @staticmethod
66
+ def _get_c2_imagenet_pretrained(name):
67
+ prefix = ModelCatalog.S3_C2_DETECTRON_PREFIX
68
+ name = name[len("ImageNetPretrained/") :]
69
+ name = ModelCatalog.C2_IMAGENET_MODELS[name]
70
+ url = "/".join([prefix, name])
71
+ return url
72
+
73
+ @staticmethod
74
+ def _get_c2_detectron_baseline(name):
75
+ name = name[len("Caffe2Detectron/COCO/") :]
76
+ url = ModelCatalog.C2_DETECTRON_MODELS[name]
77
+ if "keypoint_rcnn" in name:
78
+ dataset = ModelCatalog.C2_DATASET_COCO_KEYPOINTS
79
+ else:
80
+ dataset = ModelCatalog.C2_DATASET_COCO
81
+
82
+ if "35998355/rpn_R-50-C4_1x" in name:
83
+ # this one model is somehow different from others ..
84
+ type = "rpn"
85
+ else:
86
+ type = "generalized_rcnn"
87
+
88
+ # Detectron C2 models are stored in the structure defined in `C2_DETECTRON_PATH_FORMAT`.
89
+ url = ModelCatalog.C2_DETECTRON_PATH_FORMAT.format(
90
+ prefix=ModelCatalog.S3_C2_DETECTRON_PREFIX, url=url, type=type, dataset=dataset
91
+ )
92
+ return url
93
+
94
+
95
+ class ModelCatalogHandler(PathHandler):
96
+ """
97
+ Resolve URL like catalog://.
98
+ """
99
+
100
+ PREFIX = "catalog://"
101
+
102
+ def _get_supported_prefixes(self):
103
+ return [self.PREFIX]
104
+
105
+ def _get_local_path(self, path, **kwargs):
106
+ logger = logging.getLogger(__name__)
107
+ catalog_path = ModelCatalog.get(path[len(self.PREFIX) :])
108
+ logger.info("Catalog entry {} points to {}".format(path, catalog_path))
109
+ return PathManager.get_local_path(catalog_path, **kwargs)
110
+
111
+ def _open(self, path, mode="r", **kwargs):
112
+ return PathManager.open(self._get_local_path(path), mode, **kwargs)
113
+
114
+
115
+ PathManager.register_handler(ModelCatalogHandler())
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/checkpoint/detection_checkpoint.py ADDED
@@ -0,0 +1,145 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+ import pickle
5
+ from urllib.parse import parse_qs, urlparse
6
+ import torch
7
+ from fvcore.common.checkpoint import Checkpointer
8
+ from torch.nn.parallel import DistributedDataParallel
9
+
10
+ import annotator.oneformer.detectron2.utils.comm as comm
11
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
12
+
13
+ from .c2_model_loading import align_and_update_state_dicts
14
+
15
+
16
+ class DetectionCheckpointer(Checkpointer):
17
+ """
18
+ Same as :class:`Checkpointer`, but is able to:
19
+ 1. handle models in detectron & detectron2 model zoo, and apply conversions for legacy models.
20
+ 2. correctly load checkpoints that are only available on the master worker
21
+ """
22
+
23
+ def __init__(self, model, save_dir="", *, save_to_disk=None, **checkpointables):
24
+ is_main_process = comm.is_main_process()
25
+ super().__init__(
26
+ model,
27
+ save_dir,
28
+ save_to_disk=is_main_process if save_to_disk is None else save_to_disk,
29
+ **checkpointables,
30
+ )
31
+ self.path_manager = PathManager
32
+ self._parsed_url_during_load = None
33
+
34
+ def load(self, path, *args, **kwargs):
35
+ assert self._parsed_url_during_load is None
36
+ need_sync = False
37
+ logger = logging.getLogger(__name__)
38
+ logger.info("[DetectionCheckpointer] Loading from {} ...".format(path))
39
+
40
+ if path and isinstance(self.model, DistributedDataParallel):
41
+ path = self.path_manager.get_local_path(path)
42
+ has_file = os.path.isfile(path)
43
+ all_has_file = comm.all_gather(has_file)
44
+ if not all_has_file[0]:
45
+ raise OSError(f"File {path} not found on main worker.")
46
+ if not all(all_has_file):
47
+ logger.warning(
48
+ f"Not all workers can read checkpoint {path}. "
49
+ "Training may fail to fully resume."
50
+ )
51
+ # TODO: broadcast the checkpoint file contents from main
52
+ # worker, and load from it instead.
53
+ need_sync = True
54
+ if not has_file:
55
+ path = None # don't load if not readable
56
+
57
+ if path:
58
+ parsed_url = urlparse(path)
59
+ self._parsed_url_during_load = parsed_url
60
+ path = parsed_url._replace(query="").geturl() # remove query from filename
61
+ path = self.path_manager.get_local_path(path)
62
+
63
+ self.logger.setLevel('CRITICAL')
64
+ ret = super().load(path, *args, **kwargs)
65
+
66
+ if need_sync:
67
+ logger.info("Broadcasting model states from main worker ...")
68
+ self.model._sync_params_and_buffers()
69
+ self._parsed_url_during_load = None # reset to None
70
+ return ret
71
+
72
+ def _load_file(self, filename):
73
+ if filename.endswith(".pkl"):
74
+ with PathManager.open(filename, "rb") as f:
75
+ data = pickle.load(f, encoding="latin1")
76
+ if "model" in data and "__author__" in data:
77
+ # file is in Detectron2 model zoo format
78
+ self.logger.info("Reading a file from '{}'".format(data["__author__"]))
79
+ return data
80
+ else:
81
+ # assume file is from Caffe2 / Detectron1 model zoo
82
+ if "blobs" in data:
83
+ # Detection models have "blobs", but ImageNet models don't
84
+ data = data["blobs"]
85
+ data = {k: v for k, v in data.items() if not k.endswith("_momentum")}
86
+ return {"model": data, "__author__": "Caffe2", "matching_heuristics": True}
87
+ elif filename.endswith(".pyth"):
88
+ # assume file is from pycls; no one else seems to use the ".pyth" extension
89
+ with PathManager.open(filename, "rb") as f:
90
+ data = torch.load(f)
91
+ assert (
92
+ "model_state" in data
93
+ ), f"Cannot load .pyth file {filename}; pycls checkpoints must contain 'model_state'."
94
+ model_state = {
95
+ k: v
96
+ for k, v in data["model_state"].items()
97
+ if not k.endswith("num_batches_tracked")
98
+ }
99
+ return {"model": model_state, "__author__": "pycls", "matching_heuristics": True}
100
+
101
+ loaded = self._torch_load(filename)
102
+ if "model" not in loaded:
103
+ loaded = {"model": loaded}
104
+ assert self._parsed_url_during_load is not None, "`_load_file` must be called inside `load`"
105
+ parsed_url = self._parsed_url_during_load
106
+ queries = parse_qs(parsed_url.query)
107
+ if queries.pop("matching_heuristics", "False") == ["True"]:
108
+ loaded["matching_heuristics"] = True
109
+ if len(queries) > 0:
110
+ raise ValueError(
111
+ f"Unsupported query remaining: f{queries}, orginal filename: {parsed_url.geturl()}"
112
+ )
113
+ return loaded
114
+
115
+ def _torch_load(self, f):
116
+ return super()._load_file(f)
117
+
118
+ def _load_model(self, checkpoint):
119
+ if checkpoint.get("matching_heuristics", False):
120
+ self._convert_ndarray_to_tensor(checkpoint["model"])
121
+ # convert weights by name-matching heuristics
122
+ checkpoint["model"] = align_and_update_state_dicts(
123
+ self.model.state_dict(),
124
+ checkpoint["model"],
125
+ c2_conversion=checkpoint.get("__author__", None) == "Caffe2",
126
+ )
127
+ # for non-caffe2 models, use standard ways to load it
128
+ incompatible = super()._load_model(checkpoint)
129
+
130
+ model_buffers = dict(self.model.named_buffers(recurse=False))
131
+ for k in ["pixel_mean", "pixel_std"]:
132
+ # Ignore missing key message about pixel_mean/std.
133
+ # Though they may be missing in old checkpoints, they will be correctly
134
+ # initialized from config anyway.
135
+ if k in model_buffers:
136
+ try:
137
+ incompatible.missing_keys.remove(k)
138
+ except ValueError:
139
+ pass
140
+ for k in incompatible.unexpected_keys[:]:
141
+ # Ignore unexpected keys about cell anchors. They exist in old checkpoints
142
+ # but now they are non-persistent buffers and will not be in new checkpoints.
143
+ if "anchor_generator.cell_anchors" in k:
144
+ incompatible.unexpected_keys.remove(k)
145
+ return incompatible
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/__init__.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .compat import downgrade_config, upgrade_config
3
+ from .config import CfgNode, get_cfg, global_cfg, set_global_cfg, configurable
4
+ from .instantiate import instantiate
5
+ from .lazy import LazyCall, LazyConfig
6
+
7
+ __all__ = [
8
+ "CfgNode",
9
+ "get_cfg",
10
+ "global_cfg",
11
+ "set_global_cfg",
12
+ "downgrade_config",
13
+ "upgrade_config",
14
+ "configurable",
15
+ "instantiate",
16
+ "LazyCall",
17
+ "LazyConfig",
18
+ ]
19
+
20
+
21
+ from annotator.oneformer.detectron2.utils.env import fixup_module_metadata
22
+
23
+ fixup_module_metadata(__name__, globals(), __all__)
24
+ del fixup_module_metadata
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/compat.py ADDED
@@ -0,0 +1,229 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ """
3
+ Backward compatibility of configs.
4
+
5
+ Instructions to bump version:
6
+ + It's not needed to bump version if new keys are added.
7
+ It's only needed when backward-incompatible changes happen
8
+ (i.e., some existing keys disappear, or the meaning of a key changes)
9
+ + To bump version, do the following:
10
+ 1. Increment _C.VERSION in defaults.py
11
+ 2. Add a converter in this file.
12
+
13
+ Each ConverterVX has a function "upgrade" which in-place upgrades config from X-1 to X,
14
+ and a function "downgrade" which in-place downgrades config from X to X-1
15
+
16
+ In each function, VERSION is left unchanged.
17
+
18
+ Each converter assumes that its input has the relevant keys
19
+ (i.e., the input is not a partial config).
20
+ 3. Run the tests (test_config.py) to make sure the upgrade & downgrade
21
+ functions are consistent.
22
+ """
23
+
24
+ import logging
25
+ from typing import List, Optional, Tuple
26
+
27
+ from .config import CfgNode as CN
28
+ from .defaults import _C
29
+
30
+ __all__ = ["upgrade_config", "downgrade_config"]
31
+
32
+
33
+ def upgrade_config(cfg: CN, to_version: Optional[int] = None) -> CN:
34
+ """
35
+ Upgrade a config from its current version to a newer version.
36
+
37
+ Args:
38
+ cfg (CfgNode):
39
+ to_version (int): defaults to the latest version.
40
+ """
41
+ cfg = cfg.clone()
42
+ if to_version is None:
43
+ to_version = _C.VERSION
44
+
45
+ assert cfg.VERSION <= to_version, "Cannot upgrade from v{} to v{}!".format(
46
+ cfg.VERSION, to_version
47
+ )
48
+ for k in range(cfg.VERSION, to_version):
49
+ converter = globals()["ConverterV" + str(k + 1)]
50
+ converter.upgrade(cfg)
51
+ cfg.VERSION = k + 1
52
+ return cfg
53
+
54
+
55
+ def downgrade_config(cfg: CN, to_version: int) -> CN:
56
+ """
57
+ Downgrade a config from its current version to an older version.
58
+
59
+ Args:
60
+ cfg (CfgNode):
61
+ to_version (int):
62
+
63
+ Note:
64
+ A general downgrade of arbitrary configs is not always possible due to the
65
+ different functionalities in different versions.
66
+ The purpose of downgrade is only to recover the defaults in old versions,
67
+ allowing it to load an old partial yaml config.
68
+ Therefore, the implementation only needs to fill in the default values
69
+ in the old version when a general downgrade is not possible.
70
+ """
71
+ cfg = cfg.clone()
72
+ assert cfg.VERSION >= to_version, "Cannot downgrade from v{} to v{}!".format(
73
+ cfg.VERSION, to_version
74
+ )
75
+ for k in range(cfg.VERSION, to_version, -1):
76
+ converter = globals()["ConverterV" + str(k)]
77
+ converter.downgrade(cfg)
78
+ cfg.VERSION = k - 1
79
+ return cfg
80
+
81
+
82
+ def guess_version(cfg: CN, filename: str) -> int:
83
+ """
84
+ Guess the version of a partial config where the VERSION field is not specified.
85
+ Returns the version, or the latest if cannot make a guess.
86
+
87
+ This makes it easier for users to migrate.
88
+ """
89
+ logger = logging.getLogger(__name__)
90
+
91
+ def _has(name: str) -> bool:
92
+ cur = cfg
93
+ for n in name.split("."):
94
+ if n not in cur:
95
+ return False
96
+ cur = cur[n]
97
+ return True
98
+
99
+ # Most users' partial configs have "MODEL.WEIGHT", so guess on it
100
+ ret = None
101
+ if _has("MODEL.WEIGHT") or _has("TEST.AUG_ON"):
102
+ ret = 1
103
+
104
+ if ret is not None:
105
+ logger.warning("Config '{}' has no VERSION. Assuming it to be v{}.".format(filename, ret))
106
+ else:
107
+ ret = _C.VERSION
108
+ logger.warning(
109
+ "Config '{}' has no VERSION. Assuming it to be compatible with latest v{}.".format(
110
+ filename, ret
111
+ )
112
+ )
113
+ return ret
114
+
115
+
116
+ def _rename(cfg: CN, old: str, new: str) -> None:
117
+ old_keys = old.split(".")
118
+ new_keys = new.split(".")
119
+
120
+ def _set(key_seq: List[str], val: str) -> None:
121
+ cur = cfg
122
+ for k in key_seq[:-1]:
123
+ if k not in cur:
124
+ cur[k] = CN()
125
+ cur = cur[k]
126
+ cur[key_seq[-1]] = val
127
+
128
+ def _get(key_seq: List[str]) -> CN:
129
+ cur = cfg
130
+ for k in key_seq:
131
+ cur = cur[k]
132
+ return cur
133
+
134
+ def _del(key_seq: List[str]) -> None:
135
+ cur = cfg
136
+ for k in key_seq[:-1]:
137
+ cur = cur[k]
138
+ del cur[key_seq[-1]]
139
+ if len(cur) == 0 and len(key_seq) > 1:
140
+ _del(key_seq[:-1])
141
+
142
+ _set(new_keys, _get(old_keys))
143
+ _del(old_keys)
144
+
145
+
146
+ class _RenameConverter:
147
+ """
148
+ A converter that handles simple rename.
149
+ """
150
+
151
+ RENAME: List[Tuple[str, str]] = [] # list of tuples of (old name, new name)
152
+
153
+ @classmethod
154
+ def upgrade(cls, cfg: CN) -> None:
155
+ for old, new in cls.RENAME:
156
+ _rename(cfg, old, new)
157
+
158
+ @classmethod
159
+ def downgrade(cls, cfg: CN) -> None:
160
+ for old, new in cls.RENAME[::-1]:
161
+ _rename(cfg, new, old)
162
+
163
+
164
+ class ConverterV1(_RenameConverter):
165
+ RENAME = [("MODEL.RPN_HEAD.NAME", "MODEL.RPN.HEAD_NAME")]
166
+
167
+
168
+ class ConverterV2(_RenameConverter):
169
+ """
170
+ A large bulk of rename, before public release.
171
+ """
172
+
173
+ RENAME = [
174
+ ("MODEL.WEIGHT", "MODEL.WEIGHTS"),
175
+ ("MODEL.PANOPTIC_FPN.SEMANTIC_LOSS_SCALE", "MODEL.SEM_SEG_HEAD.LOSS_WEIGHT"),
176
+ ("MODEL.PANOPTIC_FPN.RPN_LOSS_SCALE", "MODEL.RPN.LOSS_WEIGHT"),
177
+ ("MODEL.PANOPTIC_FPN.INSTANCE_LOSS_SCALE", "MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT"),
178
+ ("MODEL.PANOPTIC_FPN.COMBINE_ON", "MODEL.PANOPTIC_FPN.COMBINE.ENABLED"),
179
+ (
180
+ "MODEL.PANOPTIC_FPN.COMBINE_OVERLAP_THRESHOLD",
181
+ "MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH",
182
+ ),
183
+ (
184
+ "MODEL.PANOPTIC_FPN.COMBINE_STUFF_AREA_LIMIT",
185
+ "MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT",
186
+ ),
187
+ (
188
+ "MODEL.PANOPTIC_FPN.COMBINE_INSTANCES_CONFIDENCE_THRESHOLD",
189
+ "MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH",
190
+ ),
191
+ ("MODEL.ROI_HEADS.SCORE_THRESH", "MODEL.ROI_HEADS.SCORE_THRESH_TEST"),
192
+ ("MODEL.ROI_HEADS.NMS", "MODEL.ROI_HEADS.NMS_THRESH_TEST"),
193
+ ("MODEL.RETINANET.INFERENCE_SCORE_THRESHOLD", "MODEL.RETINANET.SCORE_THRESH_TEST"),
194
+ ("MODEL.RETINANET.INFERENCE_TOPK_CANDIDATES", "MODEL.RETINANET.TOPK_CANDIDATES_TEST"),
195
+ ("MODEL.RETINANET.INFERENCE_NMS_THRESHOLD", "MODEL.RETINANET.NMS_THRESH_TEST"),
196
+ ("TEST.DETECTIONS_PER_IMG", "TEST.DETECTIONS_PER_IMAGE"),
197
+ ("TEST.AUG_ON", "TEST.AUG.ENABLED"),
198
+ ("TEST.AUG_MIN_SIZES", "TEST.AUG.MIN_SIZES"),
199
+ ("TEST.AUG_MAX_SIZE", "TEST.AUG.MAX_SIZE"),
200
+ ("TEST.AUG_FLIP", "TEST.AUG.FLIP"),
201
+ ]
202
+
203
+ @classmethod
204
+ def upgrade(cls, cfg: CN) -> None:
205
+ super().upgrade(cfg)
206
+
207
+ if cfg.MODEL.META_ARCHITECTURE == "RetinaNet":
208
+ _rename(
209
+ cfg, "MODEL.RETINANET.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS"
210
+ )
211
+ _rename(cfg, "MODEL.RETINANET.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
212
+ del cfg["MODEL"]["RPN"]["ANCHOR_SIZES"]
213
+ del cfg["MODEL"]["RPN"]["ANCHOR_ASPECT_RATIOS"]
214
+ else:
215
+ _rename(cfg, "MODEL.RPN.ANCHOR_ASPECT_RATIOS", "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS")
216
+ _rename(cfg, "MODEL.RPN.ANCHOR_SIZES", "MODEL.ANCHOR_GENERATOR.SIZES")
217
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_SIZES"]
218
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_ASPECT_RATIOS"]
219
+ del cfg["MODEL"]["RETINANET"]["ANCHOR_STRIDES"]
220
+
221
+ @classmethod
222
+ def downgrade(cls, cfg: CN) -> None:
223
+ super().downgrade(cfg)
224
+
225
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS", "MODEL.RPN.ANCHOR_ASPECT_RATIOS")
226
+ _rename(cfg, "MODEL.ANCHOR_GENERATOR.SIZES", "MODEL.RPN.ANCHOR_SIZES")
227
+ cfg.MODEL.RETINANET.ANCHOR_ASPECT_RATIOS = cfg.MODEL.RPN.ANCHOR_ASPECT_RATIOS
228
+ cfg.MODEL.RETINANET.ANCHOR_SIZES = cfg.MODEL.RPN.ANCHOR_SIZES
229
+ cfg.MODEL.RETINANET.ANCHOR_STRIDES = [] # this is not used anywhere in any version
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/config.py ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ import functools
5
+ import inspect
6
+ import logging
7
+ from fvcore.common.config import CfgNode as _CfgNode
8
+
9
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
10
+
11
+
12
+ class CfgNode(_CfgNode):
13
+ """
14
+ The same as `fvcore.common.config.CfgNode`, but different in:
15
+
16
+ 1. Use unsafe yaml loading by default.
17
+ Note that this may lead to arbitrary code execution: you must not
18
+ load a config file from untrusted sources before manually inspecting
19
+ the content of the file.
20
+ 2. Support config versioning.
21
+ When attempting to merge an old config, it will convert the old config automatically.
22
+
23
+ .. automethod:: clone
24
+ .. automethod:: freeze
25
+ .. automethod:: defrost
26
+ .. automethod:: is_frozen
27
+ .. automethod:: load_yaml_with_base
28
+ .. automethod:: merge_from_list
29
+ .. automethod:: merge_from_other_cfg
30
+ """
31
+
32
+ @classmethod
33
+ def _open_cfg(cls, filename):
34
+ return PathManager.open(filename, "r")
35
+
36
+ # Note that the default value of allow_unsafe is changed to True
37
+ def merge_from_file(self, cfg_filename: str, allow_unsafe: bool = True) -> None:
38
+ """
39
+ Load content from the given config file and merge it into self.
40
+
41
+ Args:
42
+ cfg_filename: config filename
43
+ allow_unsafe: allow unsafe yaml syntax
44
+ """
45
+ assert PathManager.isfile(cfg_filename), f"Config file '{cfg_filename}' does not exist!"
46
+ loaded_cfg = self.load_yaml_with_base(cfg_filename, allow_unsafe=allow_unsafe)
47
+ loaded_cfg = type(self)(loaded_cfg)
48
+
49
+ # defaults.py needs to import CfgNode
50
+ from .defaults import _C
51
+
52
+ latest_ver = _C.VERSION
53
+ assert (
54
+ latest_ver == self.VERSION
55
+ ), "CfgNode.merge_from_file is only allowed on a config object of latest version!"
56
+
57
+ logger = logging.getLogger(__name__)
58
+
59
+ loaded_ver = loaded_cfg.get("VERSION", None)
60
+ if loaded_ver is None:
61
+ from .compat import guess_version
62
+
63
+ loaded_ver = guess_version(loaded_cfg, cfg_filename)
64
+ assert loaded_ver <= self.VERSION, "Cannot merge a v{} config into a v{} config.".format(
65
+ loaded_ver, self.VERSION
66
+ )
67
+
68
+ if loaded_ver == self.VERSION:
69
+ self.merge_from_other_cfg(loaded_cfg)
70
+ else:
71
+ # compat.py needs to import CfgNode
72
+ from .compat import upgrade_config, downgrade_config
73
+
74
+ logger.warning(
75
+ "Loading an old v{} config file '{}' by automatically upgrading to v{}. "
76
+ "See docs/CHANGELOG.md for instructions to update your files.".format(
77
+ loaded_ver, cfg_filename, self.VERSION
78
+ )
79
+ )
80
+ # To convert, first obtain a full config at an old version
81
+ old_self = downgrade_config(self, to_version=loaded_ver)
82
+ old_self.merge_from_other_cfg(loaded_cfg)
83
+ new_config = upgrade_config(old_self)
84
+ self.clear()
85
+ self.update(new_config)
86
+
87
+ def dump(self, *args, **kwargs):
88
+ """
89
+ Returns:
90
+ str: a yaml string representation of the config
91
+ """
92
+ # to make it show up in docs
93
+ return super().dump(*args, **kwargs)
94
+
95
+
96
+ global_cfg = CfgNode()
97
+
98
+
99
+ def get_cfg() -> CfgNode:
100
+ """
101
+ Get a copy of the default config.
102
+
103
+ Returns:
104
+ a detectron2 CfgNode instance.
105
+ """
106
+ from .defaults import _C
107
+
108
+ return _C.clone()
109
+
110
+
111
+ def set_global_cfg(cfg: CfgNode) -> None:
112
+ """
113
+ Let the global config point to the given cfg.
114
+
115
+ Assume that the given "cfg" has the key "KEY", after calling
116
+ `set_global_cfg(cfg)`, the key can be accessed by:
117
+ ::
118
+ from annotator.oneformer.detectron2.config import global_cfg
119
+ print(global_cfg.KEY)
120
+
121
+ By using a hacky global config, you can access these configs anywhere,
122
+ without having to pass the config object or the values deep into the code.
123
+ This is a hacky feature introduced for quick prototyping / research exploration.
124
+ """
125
+ global global_cfg
126
+ global_cfg.clear()
127
+ global_cfg.update(cfg)
128
+
129
+
130
+ def configurable(init_func=None, *, from_config=None):
131
+ """
132
+ Decorate a function or a class's __init__ method so that it can be called
133
+ with a :class:`CfgNode` object using a :func:`from_config` function that translates
134
+ :class:`CfgNode` to arguments.
135
+
136
+ Examples:
137
+ ::
138
+ # Usage 1: Decorator on __init__:
139
+ class A:
140
+ @configurable
141
+ def __init__(self, a, b=2, c=3):
142
+ pass
143
+
144
+ @classmethod
145
+ def from_config(cls, cfg): # 'cfg' must be the first argument
146
+ # Returns kwargs to be passed to __init__
147
+ return {"a": cfg.A, "b": cfg.B}
148
+
149
+ a1 = A(a=1, b=2) # regular construction
150
+ a2 = A(cfg) # construct with a cfg
151
+ a3 = A(cfg, b=3, c=4) # construct with extra overwrite
152
+
153
+ # Usage 2: Decorator on any function. Needs an extra from_config argument:
154
+ @configurable(from_config=lambda cfg: {"a: cfg.A, "b": cfg.B})
155
+ def a_func(a, b=2, c=3):
156
+ pass
157
+
158
+ a1 = a_func(a=1, b=2) # regular call
159
+ a2 = a_func(cfg) # call with a cfg
160
+ a3 = a_func(cfg, b=3, c=4) # call with extra overwrite
161
+
162
+ Args:
163
+ init_func (callable): a class's ``__init__`` method in usage 1. The
164
+ class must have a ``from_config`` classmethod which takes `cfg` as
165
+ the first argument.
166
+ from_config (callable): the from_config function in usage 2. It must take `cfg`
167
+ as its first argument.
168
+ """
169
+
170
+ if init_func is not None:
171
+ assert (
172
+ inspect.isfunction(init_func)
173
+ and from_config is None
174
+ and init_func.__name__ == "__init__"
175
+ ), "Incorrect use of @configurable. Check API documentation for examples."
176
+
177
+ @functools.wraps(init_func)
178
+ def wrapped(self, *args, **kwargs):
179
+ try:
180
+ from_config_func = type(self).from_config
181
+ except AttributeError as e:
182
+ raise AttributeError(
183
+ "Class with @configurable must have a 'from_config' classmethod."
184
+ ) from e
185
+ if not inspect.ismethod(from_config_func):
186
+ raise TypeError("Class with @configurable must have a 'from_config' classmethod.")
187
+
188
+ if _called_with_cfg(*args, **kwargs):
189
+ explicit_args = _get_args_from_config(from_config_func, *args, **kwargs)
190
+ init_func(self, **explicit_args)
191
+ else:
192
+ init_func(self, *args, **kwargs)
193
+
194
+ return wrapped
195
+
196
+ else:
197
+ if from_config is None:
198
+ return configurable # @configurable() is made equivalent to @configurable
199
+ assert inspect.isfunction(
200
+ from_config
201
+ ), "from_config argument of configurable must be a function!"
202
+
203
+ def wrapper(orig_func):
204
+ @functools.wraps(orig_func)
205
+ def wrapped(*args, **kwargs):
206
+ if _called_with_cfg(*args, **kwargs):
207
+ explicit_args = _get_args_from_config(from_config, *args, **kwargs)
208
+ return orig_func(**explicit_args)
209
+ else:
210
+ return orig_func(*args, **kwargs)
211
+
212
+ wrapped.from_config = from_config
213
+ return wrapped
214
+
215
+ return wrapper
216
+
217
+
218
+ def _get_args_from_config(from_config_func, *args, **kwargs):
219
+ """
220
+ Use `from_config` to obtain explicit arguments.
221
+
222
+ Returns:
223
+ dict: arguments to be used for cls.__init__
224
+ """
225
+ signature = inspect.signature(from_config_func)
226
+ if list(signature.parameters.keys())[0] != "cfg":
227
+ if inspect.isfunction(from_config_func):
228
+ name = from_config_func.__name__
229
+ else:
230
+ name = f"{from_config_func.__self__}.from_config"
231
+ raise TypeError(f"{name} must take 'cfg' as the first argument!")
232
+ support_var_arg = any(
233
+ param.kind in [param.VAR_POSITIONAL, param.VAR_KEYWORD]
234
+ for param in signature.parameters.values()
235
+ )
236
+ if support_var_arg: # forward all arguments to from_config, if from_config accepts them
237
+ ret = from_config_func(*args, **kwargs)
238
+ else:
239
+ # forward supported arguments to from_config
240
+ supported_arg_names = set(signature.parameters.keys())
241
+ extra_kwargs = {}
242
+ for name in list(kwargs.keys()):
243
+ if name not in supported_arg_names:
244
+ extra_kwargs[name] = kwargs.pop(name)
245
+ ret = from_config_func(*args, **kwargs)
246
+ # forward the other arguments to __init__
247
+ ret.update(extra_kwargs)
248
+ return ret
249
+
250
+
251
+ def _called_with_cfg(*args, **kwargs):
252
+ """
253
+ Returns:
254
+ bool: whether the arguments contain CfgNode and should be considered
255
+ forwarded to from_config.
256
+ """
257
+ from omegaconf import DictConfig
258
+
259
+ if len(args) and isinstance(args[0], (_CfgNode, DictConfig)):
260
+ return True
261
+ if isinstance(kwargs.pop("cfg", None), (_CfgNode, DictConfig)):
262
+ return True
263
+ # `from_config`'s first argument is forced to be "cfg".
264
+ # So the above check covers all cases.
265
+ return False
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/defaults.py ADDED
@@ -0,0 +1,650 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .config import CfgNode as CN
3
+
4
+ # NOTE: given the new config system
5
+ # (https://detectron2.readthedocs.io/en/latest/tutorials/lazyconfigs.html),
6
+ # we will stop adding new functionalities to default CfgNode.
7
+
8
+ # -----------------------------------------------------------------------------
9
+ # Convention about Training / Test specific parameters
10
+ # -----------------------------------------------------------------------------
11
+ # Whenever an argument can be either used for training or for testing, the
12
+ # corresponding name will be post-fixed by a _TRAIN for a training parameter,
13
+ # or _TEST for a test-specific parameter.
14
+ # For example, the number of images during training will be
15
+ # IMAGES_PER_BATCH_TRAIN, while the number of images for testing will be
16
+ # IMAGES_PER_BATCH_TEST
17
+
18
+ # -----------------------------------------------------------------------------
19
+ # Config definition
20
+ # -----------------------------------------------------------------------------
21
+
22
+ _C = CN()
23
+
24
+ # The version number, to upgrade from old configs to new ones if any
25
+ # changes happen. It's recommended to keep a VERSION in your config file.
26
+ _C.VERSION = 2
27
+
28
+ _C.MODEL = CN()
29
+ _C.MODEL.LOAD_PROPOSALS = False
30
+ _C.MODEL.MASK_ON = False
31
+ _C.MODEL.KEYPOINT_ON = False
32
+ _C.MODEL.DEVICE = "cuda"
33
+ _C.MODEL.META_ARCHITECTURE = "GeneralizedRCNN"
34
+
35
+ # Path (a file path, or URL like detectron2://.., https://..) to a checkpoint file
36
+ # to be loaded to the model. You can find available models in the model zoo.
37
+ _C.MODEL.WEIGHTS = ""
38
+
39
+ # Values to be used for image normalization (BGR order, since INPUT.FORMAT defaults to BGR).
40
+ # To train on images of different number of channels, just set different mean & std.
41
+ # Default values are the mean pixel value from ImageNet: [103.53, 116.28, 123.675]
42
+ _C.MODEL.PIXEL_MEAN = [103.530, 116.280, 123.675]
43
+ # When using pre-trained models in Detectron1 or any MSRA models,
44
+ # std has been absorbed into its conv1 weights, so the std needs to be set 1.
45
+ # Otherwise, you can use [57.375, 57.120, 58.395] (ImageNet std)
46
+ _C.MODEL.PIXEL_STD = [1.0, 1.0, 1.0]
47
+
48
+
49
+ # -----------------------------------------------------------------------------
50
+ # INPUT
51
+ # -----------------------------------------------------------------------------
52
+ _C.INPUT = CN()
53
+ # By default, {MIN,MAX}_SIZE options are used in transforms.ResizeShortestEdge.
54
+ # Please refer to ResizeShortestEdge for detailed definition.
55
+ # Size of the smallest side of the image during training
56
+ _C.INPUT.MIN_SIZE_TRAIN = (800,)
57
+ # Sample size of smallest side by choice or random selection from range give by
58
+ # INPUT.MIN_SIZE_TRAIN
59
+ _C.INPUT.MIN_SIZE_TRAIN_SAMPLING = "choice"
60
+ # Maximum size of the side of the image during training
61
+ _C.INPUT.MAX_SIZE_TRAIN = 1333
62
+ # Size of the smallest side of the image during testing. Set to zero to disable resize in testing.
63
+ _C.INPUT.MIN_SIZE_TEST = 800
64
+ # Maximum size of the side of the image during testing
65
+ _C.INPUT.MAX_SIZE_TEST = 1333
66
+ # Mode for flipping images used in data augmentation during training
67
+ # choose one of ["horizontal, "vertical", "none"]
68
+ _C.INPUT.RANDOM_FLIP = "horizontal"
69
+
70
+ # `True` if cropping is used for data augmentation during training
71
+ _C.INPUT.CROP = CN({"ENABLED": False})
72
+ # Cropping type. See documentation of `detectron2.data.transforms.RandomCrop` for explanation.
73
+ _C.INPUT.CROP.TYPE = "relative_range"
74
+ # Size of crop in range (0, 1] if CROP.TYPE is "relative" or "relative_range" and in number of
75
+ # pixels if CROP.TYPE is "absolute"
76
+ _C.INPUT.CROP.SIZE = [0.9, 0.9]
77
+
78
+
79
+ # Whether the model needs RGB, YUV, HSV etc.
80
+ # Should be one of the modes defined here, as we use PIL to read the image:
81
+ # https://pillow.readthedocs.io/en/stable/handbook/concepts.html#concept-modes
82
+ # with BGR being the one exception. One can set image format to BGR, we will
83
+ # internally use RGB for conversion and flip the channels over
84
+ _C.INPUT.FORMAT = "BGR"
85
+ # The ground truth mask format that the model will use.
86
+ # Mask R-CNN supports either "polygon" or "bitmask" as ground truth.
87
+ _C.INPUT.MASK_FORMAT = "polygon" # alternative: "bitmask"
88
+
89
+
90
+ # -----------------------------------------------------------------------------
91
+ # Dataset
92
+ # -----------------------------------------------------------------------------
93
+ _C.DATASETS = CN()
94
+ # List of the dataset names for training. Must be registered in DatasetCatalog
95
+ # Samples from these datasets will be merged and used as one dataset.
96
+ _C.DATASETS.TRAIN = ()
97
+ # List of the pre-computed proposal files for training, which must be consistent
98
+ # with datasets listed in DATASETS.TRAIN.
99
+ _C.DATASETS.PROPOSAL_FILES_TRAIN = ()
100
+ # Number of top scoring precomputed proposals to keep for training
101
+ _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN = 2000
102
+ # List of the dataset names for testing. Must be registered in DatasetCatalog
103
+ _C.DATASETS.TEST = ()
104
+ # List of the pre-computed proposal files for test, which must be consistent
105
+ # with datasets listed in DATASETS.TEST.
106
+ _C.DATASETS.PROPOSAL_FILES_TEST = ()
107
+ # Number of top scoring precomputed proposals to keep for test
108
+ _C.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST = 1000
109
+
110
+ # -----------------------------------------------------------------------------
111
+ # DataLoader
112
+ # -----------------------------------------------------------------------------
113
+ _C.DATALOADER = CN()
114
+ # Number of data loading threads
115
+ _C.DATALOADER.NUM_WORKERS = 4
116
+ # If True, each batch should contain only images for which the aspect ratio
117
+ # is compatible. This groups portrait images together, and landscape images
118
+ # are not batched with portrait images.
119
+ _C.DATALOADER.ASPECT_RATIO_GROUPING = True
120
+ # Options: TrainingSampler, RepeatFactorTrainingSampler
121
+ _C.DATALOADER.SAMPLER_TRAIN = "TrainingSampler"
122
+ # Repeat threshold for RepeatFactorTrainingSampler
123
+ _C.DATALOADER.REPEAT_THRESHOLD = 0.0
124
+ # Tf True, when working on datasets that have instance annotations, the
125
+ # training dataloader will filter out images without associated annotations
126
+ _C.DATALOADER.FILTER_EMPTY_ANNOTATIONS = True
127
+
128
+ # ---------------------------------------------------------------------------- #
129
+ # Backbone options
130
+ # ---------------------------------------------------------------------------- #
131
+ _C.MODEL.BACKBONE = CN()
132
+
133
+ _C.MODEL.BACKBONE.NAME = "build_resnet_backbone"
134
+ # Freeze the first several stages so they are not trained.
135
+ # There are 5 stages in ResNet. The first is a convolution, and the following
136
+ # stages are each group of residual blocks.
137
+ _C.MODEL.BACKBONE.FREEZE_AT = 2
138
+
139
+
140
+ # ---------------------------------------------------------------------------- #
141
+ # FPN options
142
+ # ---------------------------------------------------------------------------- #
143
+ _C.MODEL.FPN = CN()
144
+ # Names of the input feature maps to be used by FPN
145
+ # They must have contiguous power of 2 strides
146
+ # e.g., ["res2", "res3", "res4", "res5"]
147
+ _C.MODEL.FPN.IN_FEATURES = []
148
+ _C.MODEL.FPN.OUT_CHANNELS = 256
149
+
150
+ # Options: "" (no norm), "GN"
151
+ _C.MODEL.FPN.NORM = ""
152
+
153
+ # Types for fusing the FPN top-down and lateral features. Can be either "sum" or "avg"
154
+ _C.MODEL.FPN.FUSE_TYPE = "sum"
155
+
156
+
157
+ # ---------------------------------------------------------------------------- #
158
+ # Proposal generator options
159
+ # ---------------------------------------------------------------------------- #
160
+ _C.MODEL.PROPOSAL_GENERATOR = CN()
161
+ # Current proposal generators include "RPN", "RRPN" and "PrecomputedProposals"
162
+ _C.MODEL.PROPOSAL_GENERATOR.NAME = "RPN"
163
+ # Proposal height and width both need to be greater than MIN_SIZE
164
+ # (a the scale used during training or inference)
165
+ _C.MODEL.PROPOSAL_GENERATOR.MIN_SIZE = 0
166
+
167
+
168
+ # ---------------------------------------------------------------------------- #
169
+ # Anchor generator options
170
+ # ---------------------------------------------------------------------------- #
171
+ _C.MODEL.ANCHOR_GENERATOR = CN()
172
+ # The generator can be any name in the ANCHOR_GENERATOR registry
173
+ _C.MODEL.ANCHOR_GENERATOR.NAME = "DefaultAnchorGenerator"
174
+ # Anchor sizes (i.e. sqrt of area) in absolute pixels w.r.t. the network input.
175
+ # Format: list[list[float]]. SIZES[i] specifies the list of sizes to use for
176
+ # IN_FEATURES[i]; len(SIZES) must be equal to len(IN_FEATURES) or 1.
177
+ # When len(SIZES) == 1, SIZES[0] is used for all IN_FEATURES.
178
+ _C.MODEL.ANCHOR_GENERATOR.SIZES = [[32, 64, 128, 256, 512]]
179
+ # Anchor aspect ratios. For each area given in `SIZES`, anchors with different aspect
180
+ # ratios are generated by an anchor generator.
181
+ # Format: list[list[float]]. ASPECT_RATIOS[i] specifies the list of aspect ratios (H/W)
182
+ # to use for IN_FEATURES[i]; len(ASPECT_RATIOS) == len(IN_FEATURES) must be true,
183
+ # or len(ASPECT_RATIOS) == 1 is true and aspect ratio list ASPECT_RATIOS[0] is used
184
+ # for all IN_FEATURES.
185
+ _C.MODEL.ANCHOR_GENERATOR.ASPECT_RATIOS = [[0.5, 1.0, 2.0]]
186
+ # Anchor angles.
187
+ # list[list[float]], the angle in degrees, for each input feature map.
188
+ # ANGLES[i] specifies the list of angles for IN_FEATURES[i].
189
+ _C.MODEL.ANCHOR_GENERATOR.ANGLES = [[-90, 0, 90]]
190
+ # Relative offset between the center of the first anchor and the top-left corner of the image
191
+ # Value has to be in [0, 1). Recommend to use 0.5, which means half stride.
192
+ # The value is not expected to affect model accuracy.
193
+ _C.MODEL.ANCHOR_GENERATOR.OFFSET = 0.0
194
+
195
+ # ---------------------------------------------------------------------------- #
196
+ # RPN options
197
+ # ---------------------------------------------------------------------------- #
198
+ _C.MODEL.RPN = CN()
199
+ _C.MODEL.RPN.HEAD_NAME = "StandardRPNHead" # used by RPN_HEAD_REGISTRY
200
+
201
+ # Names of the input feature maps to be used by RPN
202
+ # e.g., ["p2", "p3", "p4", "p5", "p6"] for FPN
203
+ _C.MODEL.RPN.IN_FEATURES = ["res4"]
204
+ # Remove RPN anchors that go outside the image by BOUNDARY_THRESH pixels
205
+ # Set to -1 or a large value, e.g. 100000, to disable pruning anchors
206
+ _C.MODEL.RPN.BOUNDARY_THRESH = -1
207
+ # IOU overlap ratios [BG_IOU_THRESHOLD, FG_IOU_THRESHOLD]
208
+ # Minimum overlap required between an anchor and ground-truth box for the
209
+ # (anchor, gt box) pair to be a positive example (IoU >= FG_IOU_THRESHOLD
210
+ # ==> positive RPN example: 1)
211
+ # Maximum overlap allowed between an anchor and ground-truth box for the
212
+ # (anchor, gt box) pair to be a negative examples (IoU < BG_IOU_THRESHOLD
213
+ # ==> negative RPN example: 0)
214
+ # Anchors with overlap in between (BG_IOU_THRESHOLD <= IoU < FG_IOU_THRESHOLD)
215
+ # are ignored (-1)
216
+ _C.MODEL.RPN.IOU_THRESHOLDS = [0.3, 0.7]
217
+ _C.MODEL.RPN.IOU_LABELS = [0, -1, 1]
218
+ # Number of regions per image used to train RPN
219
+ _C.MODEL.RPN.BATCH_SIZE_PER_IMAGE = 256
220
+ # Target fraction of foreground (positive) examples per RPN minibatch
221
+ _C.MODEL.RPN.POSITIVE_FRACTION = 0.5
222
+ # Options are: "smooth_l1", "giou", "diou", "ciou"
223
+ _C.MODEL.RPN.BBOX_REG_LOSS_TYPE = "smooth_l1"
224
+ _C.MODEL.RPN.BBOX_REG_LOSS_WEIGHT = 1.0
225
+ # Weights on (dx, dy, dw, dh) for normalizing RPN anchor regression targets
226
+ _C.MODEL.RPN.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
227
+ # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
228
+ _C.MODEL.RPN.SMOOTH_L1_BETA = 0.0
229
+ _C.MODEL.RPN.LOSS_WEIGHT = 1.0
230
+ # Number of top scoring RPN proposals to keep before applying NMS
231
+ # When FPN is used, this is *per FPN level* (not total)
232
+ _C.MODEL.RPN.PRE_NMS_TOPK_TRAIN = 12000
233
+ _C.MODEL.RPN.PRE_NMS_TOPK_TEST = 6000
234
+ # Number of top scoring RPN proposals to keep after applying NMS
235
+ # When FPN is used, this limit is applied per level and then again to the union
236
+ # of proposals from all levels
237
+ # NOTE: When FPN is used, the meaning of this config is different from Detectron1.
238
+ # It means per-batch topk in Detectron1, but per-image topk here.
239
+ # See the "find_top_rpn_proposals" function for details.
240
+ _C.MODEL.RPN.POST_NMS_TOPK_TRAIN = 2000
241
+ _C.MODEL.RPN.POST_NMS_TOPK_TEST = 1000
242
+ # NMS threshold used on RPN proposals
243
+ _C.MODEL.RPN.NMS_THRESH = 0.7
244
+ # Set this to -1 to use the same number of output channels as input channels.
245
+ _C.MODEL.RPN.CONV_DIMS = [-1]
246
+
247
+ # ---------------------------------------------------------------------------- #
248
+ # ROI HEADS options
249
+ # ---------------------------------------------------------------------------- #
250
+ _C.MODEL.ROI_HEADS = CN()
251
+ _C.MODEL.ROI_HEADS.NAME = "Res5ROIHeads"
252
+ # Number of foreground classes
253
+ _C.MODEL.ROI_HEADS.NUM_CLASSES = 80
254
+ # Names of the input feature maps to be used by ROI heads
255
+ # Currently all heads (box, mask, ...) use the same input feature map list
256
+ # e.g., ["p2", "p3", "p4", "p5"] is commonly used for FPN
257
+ _C.MODEL.ROI_HEADS.IN_FEATURES = ["res4"]
258
+ # IOU overlap ratios [IOU_THRESHOLD]
259
+ # Overlap threshold for an RoI to be considered background (if < IOU_THRESHOLD)
260
+ # Overlap threshold for an RoI to be considered foreground (if >= IOU_THRESHOLD)
261
+ _C.MODEL.ROI_HEADS.IOU_THRESHOLDS = [0.5]
262
+ _C.MODEL.ROI_HEADS.IOU_LABELS = [0, 1]
263
+ # RoI minibatch size *per image* (number of regions of interest [ROIs]) during training
264
+ # Total number of RoIs per training minibatch =
265
+ # ROI_HEADS.BATCH_SIZE_PER_IMAGE * SOLVER.IMS_PER_BATCH
266
+ # E.g., a common configuration is: 512 * 16 = 8192
267
+ _C.MODEL.ROI_HEADS.BATCH_SIZE_PER_IMAGE = 512
268
+ # Target fraction of RoI minibatch that is labeled foreground (i.e. class > 0)
269
+ _C.MODEL.ROI_HEADS.POSITIVE_FRACTION = 0.25
270
+
271
+ # Only used on test mode
272
+
273
+ # Minimum score threshold (assuming scores in a [0, 1] range); a value chosen to
274
+ # balance obtaining high recall with not having too many low precision
275
+ # detections that will slow down inference post processing steps (like NMS)
276
+ # A default threshold of 0.0 increases AP by ~0.2-0.3 but significantly slows down
277
+ # inference.
278
+ _C.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.05
279
+ # Overlap threshold used for non-maximum suppression (suppress boxes with
280
+ # IoU >= this threshold)
281
+ _C.MODEL.ROI_HEADS.NMS_THRESH_TEST = 0.5
282
+ # If True, augment proposals with ground-truth boxes before sampling proposals to
283
+ # train ROI heads.
284
+ _C.MODEL.ROI_HEADS.PROPOSAL_APPEND_GT = True
285
+
286
+ # ---------------------------------------------------------------------------- #
287
+ # Box Head
288
+ # ---------------------------------------------------------------------------- #
289
+ _C.MODEL.ROI_BOX_HEAD = CN()
290
+ # C4 don't use head name option
291
+ # Options for non-C4 models: FastRCNNConvFCHead,
292
+ _C.MODEL.ROI_BOX_HEAD.NAME = ""
293
+ # Options are: "smooth_l1", "giou", "diou", "ciou"
294
+ _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_TYPE = "smooth_l1"
295
+ # The final scaling coefficient on the box regression loss, used to balance the magnitude of its
296
+ # gradients with other losses in the model. See also `MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT`.
297
+ _C.MODEL.ROI_BOX_HEAD.BBOX_REG_LOSS_WEIGHT = 1.0
298
+ # Default weights on (dx, dy, dw, dh) for normalizing bbox regression targets
299
+ # These are empirically chosen to approximately lead to unit variance targets
300
+ _C.MODEL.ROI_BOX_HEAD.BBOX_REG_WEIGHTS = (10.0, 10.0, 5.0, 5.0)
301
+ # The transition point from L1 to L2 loss. Set to 0.0 to make the loss simply L1.
302
+ _C.MODEL.ROI_BOX_HEAD.SMOOTH_L1_BETA = 0.0
303
+ _C.MODEL.ROI_BOX_HEAD.POOLER_RESOLUTION = 14
304
+ _C.MODEL.ROI_BOX_HEAD.POOLER_SAMPLING_RATIO = 0
305
+ # Type of pooling operation applied to the incoming feature map for each RoI
306
+ _C.MODEL.ROI_BOX_HEAD.POOLER_TYPE = "ROIAlignV2"
307
+
308
+ _C.MODEL.ROI_BOX_HEAD.NUM_FC = 0
309
+ # Hidden layer dimension for FC layers in the RoI box head
310
+ _C.MODEL.ROI_BOX_HEAD.FC_DIM = 1024
311
+ _C.MODEL.ROI_BOX_HEAD.NUM_CONV = 0
312
+ # Channel dimension for Conv layers in the RoI box head
313
+ _C.MODEL.ROI_BOX_HEAD.CONV_DIM = 256
314
+ # Normalization method for the convolution layers.
315
+ # Options: "" (no norm), "GN", "SyncBN".
316
+ _C.MODEL.ROI_BOX_HEAD.NORM = ""
317
+ # Whether to use class agnostic for bbox regression
318
+ _C.MODEL.ROI_BOX_HEAD.CLS_AGNOSTIC_BBOX_REG = False
319
+ # If true, RoI heads use bounding boxes predicted by the box head rather than proposal boxes.
320
+ _C.MODEL.ROI_BOX_HEAD.TRAIN_ON_PRED_BOXES = False
321
+
322
+ # Federated loss can be used to improve the training of LVIS
323
+ _C.MODEL.ROI_BOX_HEAD.USE_FED_LOSS = False
324
+ # Sigmoid cross entrophy is used with federated loss
325
+ _C.MODEL.ROI_BOX_HEAD.USE_SIGMOID_CE = False
326
+ # The power value applied to image_count when calcualting frequency weight
327
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_FREQ_WEIGHT_POWER = 0.5
328
+ # Number of classes to keep in total
329
+ _C.MODEL.ROI_BOX_HEAD.FED_LOSS_NUM_CLASSES = 50
330
+
331
+ # ---------------------------------------------------------------------------- #
332
+ # Cascaded Box Head
333
+ # ---------------------------------------------------------------------------- #
334
+ _C.MODEL.ROI_BOX_CASCADE_HEAD = CN()
335
+ # The number of cascade stages is implicitly defined by the length of the following two configs.
336
+ _C.MODEL.ROI_BOX_CASCADE_HEAD.BBOX_REG_WEIGHTS = (
337
+ (10.0, 10.0, 5.0, 5.0),
338
+ (20.0, 20.0, 10.0, 10.0),
339
+ (30.0, 30.0, 15.0, 15.0),
340
+ )
341
+ _C.MODEL.ROI_BOX_CASCADE_HEAD.IOUS = (0.5, 0.6, 0.7)
342
+
343
+
344
+ # ---------------------------------------------------------------------------- #
345
+ # Mask Head
346
+ # ---------------------------------------------------------------------------- #
347
+ _C.MODEL.ROI_MASK_HEAD = CN()
348
+ _C.MODEL.ROI_MASK_HEAD.NAME = "MaskRCNNConvUpsampleHead"
349
+ _C.MODEL.ROI_MASK_HEAD.POOLER_RESOLUTION = 14
350
+ _C.MODEL.ROI_MASK_HEAD.POOLER_SAMPLING_RATIO = 0
351
+ _C.MODEL.ROI_MASK_HEAD.NUM_CONV = 0 # The number of convs in the mask head
352
+ _C.MODEL.ROI_MASK_HEAD.CONV_DIM = 256
353
+ # Normalization method for the convolution layers.
354
+ # Options: "" (no norm), "GN", "SyncBN".
355
+ _C.MODEL.ROI_MASK_HEAD.NORM = ""
356
+ # Whether to use class agnostic for mask prediction
357
+ _C.MODEL.ROI_MASK_HEAD.CLS_AGNOSTIC_MASK = False
358
+ # Type of pooling operation applied to the incoming feature map for each RoI
359
+ _C.MODEL.ROI_MASK_HEAD.POOLER_TYPE = "ROIAlignV2"
360
+
361
+
362
+ # ---------------------------------------------------------------------------- #
363
+ # Keypoint Head
364
+ # ---------------------------------------------------------------------------- #
365
+ _C.MODEL.ROI_KEYPOINT_HEAD = CN()
366
+ _C.MODEL.ROI_KEYPOINT_HEAD.NAME = "KRCNNConvDeconvUpsampleHead"
367
+ _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_RESOLUTION = 14
368
+ _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_SAMPLING_RATIO = 0
369
+ _C.MODEL.ROI_KEYPOINT_HEAD.CONV_DIMS = tuple(512 for _ in range(8))
370
+ _C.MODEL.ROI_KEYPOINT_HEAD.NUM_KEYPOINTS = 17 # 17 is the number of keypoints in COCO.
371
+
372
+ # Images with too few (or no) keypoints are excluded from training.
373
+ _C.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE = 1
374
+ # Normalize by the total number of visible keypoints in the minibatch if True.
375
+ # Otherwise, normalize by the total number of keypoints that could ever exist
376
+ # in the minibatch.
377
+ # The keypoint softmax loss is only calculated on visible keypoints.
378
+ # Since the number of visible keypoints can vary significantly between
379
+ # minibatches, this has the effect of up-weighting the importance of
380
+ # minibatches with few visible keypoints. (Imagine the extreme case of
381
+ # only one visible keypoint versus N: in the case of N, each one
382
+ # contributes 1/N to the gradient compared to the single keypoint
383
+ # determining the gradient direction). Instead, we can normalize the
384
+ # loss by the total number of keypoints, if it were the case that all
385
+ # keypoints were visible in a full minibatch. (Returning to the example,
386
+ # this means that the one visible keypoint contributes as much as each
387
+ # of the N keypoints.)
388
+ _C.MODEL.ROI_KEYPOINT_HEAD.NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS = True
389
+ # Multi-task loss weight to use for keypoints
390
+ # Recommended values:
391
+ # - use 1.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is True
392
+ # - use 4.0 if NORMALIZE_LOSS_BY_VISIBLE_KEYPOINTS is False
393
+ _C.MODEL.ROI_KEYPOINT_HEAD.LOSS_WEIGHT = 1.0
394
+ # Type of pooling operation applied to the incoming feature map for each RoI
395
+ _C.MODEL.ROI_KEYPOINT_HEAD.POOLER_TYPE = "ROIAlignV2"
396
+
397
+ # ---------------------------------------------------------------------------- #
398
+ # Semantic Segmentation Head
399
+ # ---------------------------------------------------------------------------- #
400
+ _C.MODEL.SEM_SEG_HEAD = CN()
401
+ _C.MODEL.SEM_SEG_HEAD.NAME = "SemSegFPNHead"
402
+ _C.MODEL.SEM_SEG_HEAD.IN_FEATURES = ["p2", "p3", "p4", "p5"]
403
+ # Label in the semantic segmentation ground truth that is ignored, i.e., no loss is calculated for
404
+ # the correposnding pixel.
405
+ _C.MODEL.SEM_SEG_HEAD.IGNORE_VALUE = 255
406
+ # Number of classes in the semantic segmentation head
407
+ _C.MODEL.SEM_SEG_HEAD.NUM_CLASSES = 54
408
+ # Number of channels in the 3x3 convs inside semantic-FPN heads.
409
+ _C.MODEL.SEM_SEG_HEAD.CONVS_DIM = 128
410
+ # Outputs from semantic-FPN heads are up-scaled to the COMMON_STRIDE stride.
411
+ _C.MODEL.SEM_SEG_HEAD.COMMON_STRIDE = 4
412
+ # Normalization method for the convolution layers. Options: "" (no norm), "GN".
413
+ _C.MODEL.SEM_SEG_HEAD.NORM = "GN"
414
+ _C.MODEL.SEM_SEG_HEAD.LOSS_WEIGHT = 1.0
415
+
416
+ _C.MODEL.PANOPTIC_FPN = CN()
417
+ # Scaling of all losses from instance detection / segmentation head.
418
+ _C.MODEL.PANOPTIC_FPN.INSTANCE_LOSS_WEIGHT = 1.0
419
+
420
+ # options when combining instance & semantic segmentation outputs
421
+ _C.MODEL.PANOPTIC_FPN.COMBINE = CN({"ENABLED": True}) # "COMBINE.ENABLED" is deprecated & not used
422
+ _C.MODEL.PANOPTIC_FPN.COMBINE.OVERLAP_THRESH = 0.5
423
+ _C.MODEL.PANOPTIC_FPN.COMBINE.STUFF_AREA_LIMIT = 4096
424
+ _C.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = 0.5
425
+
426
+
427
+ # ---------------------------------------------------------------------------- #
428
+ # RetinaNet Head
429
+ # ---------------------------------------------------------------------------- #
430
+ _C.MODEL.RETINANET = CN()
431
+
432
+ # This is the number of foreground classes.
433
+ _C.MODEL.RETINANET.NUM_CLASSES = 80
434
+
435
+ _C.MODEL.RETINANET.IN_FEATURES = ["p3", "p4", "p5", "p6", "p7"]
436
+
437
+ # Convolutions to use in the cls and bbox tower
438
+ # NOTE: this doesn't include the last conv for logits
439
+ _C.MODEL.RETINANET.NUM_CONVS = 4
440
+
441
+ # IoU overlap ratio [bg, fg] for labeling anchors.
442
+ # Anchors with < bg are labeled negative (0)
443
+ # Anchors with >= bg and < fg are ignored (-1)
444
+ # Anchors with >= fg are labeled positive (1)
445
+ _C.MODEL.RETINANET.IOU_THRESHOLDS = [0.4, 0.5]
446
+ _C.MODEL.RETINANET.IOU_LABELS = [0, -1, 1]
447
+
448
+ # Prior prob for rare case (i.e. foreground) at the beginning of training.
449
+ # This is used to set the bias for the logits layer of the classifier subnet.
450
+ # This improves training stability in the case of heavy class imbalance.
451
+ _C.MODEL.RETINANET.PRIOR_PROB = 0.01
452
+
453
+ # Inference cls score threshold, only anchors with score > INFERENCE_TH are
454
+ # considered for inference (to improve speed)
455
+ _C.MODEL.RETINANET.SCORE_THRESH_TEST = 0.05
456
+ # Select topk candidates before NMS
457
+ _C.MODEL.RETINANET.TOPK_CANDIDATES_TEST = 1000
458
+ _C.MODEL.RETINANET.NMS_THRESH_TEST = 0.5
459
+
460
+ # Weights on (dx, dy, dw, dh) for normalizing Retinanet anchor regression targets
461
+ _C.MODEL.RETINANET.BBOX_REG_WEIGHTS = (1.0, 1.0, 1.0, 1.0)
462
+
463
+ # Loss parameters
464
+ _C.MODEL.RETINANET.FOCAL_LOSS_GAMMA = 2.0
465
+ _C.MODEL.RETINANET.FOCAL_LOSS_ALPHA = 0.25
466
+ _C.MODEL.RETINANET.SMOOTH_L1_LOSS_BETA = 0.1
467
+ # Options are: "smooth_l1", "giou", "diou", "ciou"
468
+ _C.MODEL.RETINANET.BBOX_REG_LOSS_TYPE = "smooth_l1"
469
+
470
+ # One of BN, SyncBN, FrozenBN, GN
471
+ # Only supports GN until unshared norm is implemented
472
+ _C.MODEL.RETINANET.NORM = ""
473
+
474
+
475
+ # ---------------------------------------------------------------------------- #
476
+ # ResNe[X]t options (ResNets = {ResNet, ResNeXt}
477
+ # Note that parts of a resnet may be used for both the backbone and the head
478
+ # These options apply to both
479
+ # ---------------------------------------------------------------------------- #
480
+ _C.MODEL.RESNETS = CN()
481
+
482
+ _C.MODEL.RESNETS.DEPTH = 50
483
+ _C.MODEL.RESNETS.OUT_FEATURES = ["res4"] # res4 for C4 backbone, res2..5 for FPN backbone
484
+
485
+ # Number of groups to use; 1 ==> ResNet; > 1 ==> ResNeXt
486
+ _C.MODEL.RESNETS.NUM_GROUPS = 1
487
+
488
+ # Options: FrozenBN, GN, "SyncBN", "BN"
489
+ _C.MODEL.RESNETS.NORM = "FrozenBN"
490
+
491
+ # Baseline width of each group.
492
+ # Scaling this parameters will scale the width of all bottleneck layers.
493
+ _C.MODEL.RESNETS.WIDTH_PER_GROUP = 64
494
+
495
+ # Place the stride 2 conv on the 1x1 filter
496
+ # Use True only for the original MSRA ResNet; use False for C2 and Torch models
497
+ _C.MODEL.RESNETS.STRIDE_IN_1X1 = True
498
+
499
+ # Apply dilation in stage "res5"
500
+ _C.MODEL.RESNETS.RES5_DILATION = 1
501
+
502
+ # Output width of res2. Scaling this parameters will scale the width of all 1x1 convs in ResNet
503
+ # For R18 and R34, this needs to be set to 64
504
+ _C.MODEL.RESNETS.RES2_OUT_CHANNELS = 256
505
+ _C.MODEL.RESNETS.STEM_OUT_CHANNELS = 64
506
+
507
+ # Apply Deformable Convolution in stages
508
+ # Specify if apply deform_conv on Res2, Res3, Res4, Res5
509
+ _C.MODEL.RESNETS.DEFORM_ON_PER_STAGE = [False, False, False, False]
510
+ # Use True to use modulated deform_conv (DeformableV2, https://arxiv.org/abs/1811.11168);
511
+ # Use False for DeformableV1.
512
+ _C.MODEL.RESNETS.DEFORM_MODULATED = False
513
+ # Number of groups in deformable conv.
514
+ _C.MODEL.RESNETS.DEFORM_NUM_GROUPS = 1
515
+
516
+
517
+ # ---------------------------------------------------------------------------- #
518
+ # Solver
519
+ # ---------------------------------------------------------------------------- #
520
+ _C.SOLVER = CN()
521
+
522
+ # Options: WarmupMultiStepLR, WarmupCosineLR.
523
+ # See detectron2/solver/build.py for definition.
524
+ _C.SOLVER.LR_SCHEDULER_NAME = "WarmupMultiStepLR"
525
+
526
+ _C.SOLVER.MAX_ITER = 40000
527
+
528
+ _C.SOLVER.BASE_LR = 0.001
529
+ # The end lr, only used by WarmupCosineLR
530
+ _C.SOLVER.BASE_LR_END = 0.0
531
+
532
+ _C.SOLVER.MOMENTUM = 0.9
533
+
534
+ _C.SOLVER.NESTEROV = False
535
+
536
+ _C.SOLVER.WEIGHT_DECAY = 0.0001
537
+ # The weight decay that's applied to parameters of normalization layers
538
+ # (typically the affine transformation)
539
+ _C.SOLVER.WEIGHT_DECAY_NORM = 0.0
540
+
541
+ _C.SOLVER.GAMMA = 0.1
542
+ # The iteration number to decrease learning rate by GAMMA.
543
+ _C.SOLVER.STEPS = (30000,)
544
+ # Number of decays in WarmupStepWithFixedGammaLR schedule
545
+ _C.SOLVER.NUM_DECAYS = 3
546
+
547
+ _C.SOLVER.WARMUP_FACTOR = 1.0 / 1000
548
+ _C.SOLVER.WARMUP_ITERS = 1000
549
+ _C.SOLVER.WARMUP_METHOD = "linear"
550
+ # Whether to rescale the interval for the learning schedule after warmup
551
+ _C.SOLVER.RESCALE_INTERVAL = False
552
+
553
+ # Save a checkpoint after every this number of iterations
554
+ _C.SOLVER.CHECKPOINT_PERIOD = 5000
555
+
556
+ # Number of images per batch across all machines. This is also the number
557
+ # of training images per step (i.e. per iteration). If we use 16 GPUs
558
+ # and IMS_PER_BATCH = 32, each GPU will see 2 images per batch.
559
+ # May be adjusted automatically if REFERENCE_WORLD_SIZE is set.
560
+ _C.SOLVER.IMS_PER_BATCH = 16
561
+
562
+ # The reference number of workers (GPUs) this config is meant to train with.
563
+ # It takes no effect when set to 0.
564
+ # With a non-zero value, it will be used by DefaultTrainer to compute a desired
565
+ # per-worker batch size, and then scale the other related configs (total batch size,
566
+ # learning rate, etc) to match the per-worker batch size.
567
+ # See documentation of `DefaultTrainer.auto_scale_workers` for details:
568
+ _C.SOLVER.REFERENCE_WORLD_SIZE = 0
569
+
570
+ # Detectron v1 (and previous detection code) used a 2x higher LR and 0 WD for
571
+ # biases. This is not useful (at least for recent models). You should avoid
572
+ # changing these and they exist only to reproduce Detectron v1 training if
573
+ # desired.
574
+ _C.SOLVER.BIAS_LR_FACTOR = 1.0
575
+ _C.SOLVER.WEIGHT_DECAY_BIAS = None # None means following WEIGHT_DECAY
576
+
577
+ # Gradient clipping
578
+ _C.SOLVER.CLIP_GRADIENTS = CN({"ENABLED": False})
579
+ # Type of gradient clipping, currently 2 values are supported:
580
+ # - "value": the absolute values of elements of each gradients are clipped
581
+ # - "norm": the norm of the gradient for each parameter is clipped thus
582
+ # affecting all elements in the parameter
583
+ _C.SOLVER.CLIP_GRADIENTS.CLIP_TYPE = "value"
584
+ # Maximum absolute value used for clipping gradients
585
+ _C.SOLVER.CLIP_GRADIENTS.CLIP_VALUE = 1.0
586
+ # Floating point number p for L-p norm to be used with the "norm"
587
+ # gradient clipping type; for L-inf, please specify .inf
588
+ _C.SOLVER.CLIP_GRADIENTS.NORM_TYPE = 2.0
589
+
590
+ # Enable automatic mixed precision for training
591
+ # Note that this does not change model's inference behavior.
592
+ # To use AMP in inference, run inference under autocast()
593
+ _C.SOLVER.AMP = CN({"ENABLED": False})
594
+
595
+ # ---------------------------------------------------------------------------- #
596
+ # Specific test options
597
+ # ---------------------------------------------------------------------------- #
598
+ _C.TEST = CN()
599
+ # For end-to-end tests to verify the expected accuracy.
600
+ # Each item is [task, metric, value, tolerance]
601
+ # e.g.: [['bbox', 'AP', 38.5, 0.2]]
602
+ _C.TEST.EXPECTED_RESULTS = []
603
+ # The period (in terms of steps) to evaluate the model during training.
604
+ # Set to 0 to disable.
605
+ _C.TEST.EVAL_PERIOD = 0
606
+ # The sigmas used to calculate keypoint OKS. See http://cocodataset.org/#keypoints-eval
607
+ # When empty, it will use the defaults in COCO.
608
+ # Otherwise it should be a list[float] with the same length as ROI_KEYPOINT_HEAD.NUM_KEYPOINTS.
609
+ _C.TEST.KEYPOINT_OKS_SIGMAS = []
610
+ # Maximum number of detections to return per image during inference (100 is
611
+ # based on the limit established for the COCO dataset).
612
+ _C.TEST.DETECTIONS_PER_IMAGE = 100
613
+
614
+ _C.TEST.AUG = CN({"ENABLED": False})
615
+ _C.TEST.AUG.MIN_SIZES = (400, 500, 600, 700, 800, 900, 1000, 1100, 1200)
616
+ _C.TEST.AUG.MAX_SIZE = 4000
617
+ _C.TEST.AUG.FLIP = True
618
+
619
+ _C.TEST.PRECISE_BN = CN({"ENABLED": False})
620
+ _C.TEST.PRECISE_BN.NUM_ITER = 200
621
+
622
+ # ---------------------------------------------------------------------------- #
623
+ # Misc options
624
+ # ---------------------------------------------------------------------------- #
625
+ # Directory where output files are written
626
+ _C.OUTPUT_DIR = "./output"
627
+ # Set seed to negative to fully randomize everything.
628
+ # Set seed to positive to use a fixed seed. Note that a fixed seed increases
629
+ # reproducibility but does not guarantee fully deterministic behavior.
630
+ # Disabling all parallelism further increases reproducibility.
631
+ _C.SEED = -1
632
+ # Benchmark different cudnn algorithms.
633
+ # If input images have very different sizes, this option will have large overhead
634
+ # for about 10k iterations. It usually hurts total time, but can benefit for certain models.
635
+ # If input images have the same or similar sizes, benchmark is often helpful.
636
+ _C.CUDNN_BENCHMARK = False
637
+ # The period (in terms of steps) for minibatch visualization at train time.
638
+ # Set to 0 to disable.
639
+ _C.VIS_PERIOD = 0
640
+
641
+ # global config is for quick hack purposes.
642
+ # You can set them in command line or config files,
643
+ # and access it with:
644
+ #
645
+ # from annotator.oneformer.detectron2.config import global_cfg
646
+ # print(global_cfg.HACK)
647
+ #
648
+ # Do not commit any configs into it.
649
+ _C.GLOBAL = CN()
650
+ _C.GLOBAL.HACK = 1.0
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/instantiate.py ADDED
@@ -0,0 +1,88 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+
3
+ import collections.abc as abc
4
+ import dataclasses
5
+ import logging
6
+ from typing import Any
7
+
8
+ from annotator.oneformer.detectron2.utils.registry import _convert_target_to_string, locate
9
+
10
+ __all__ = ["dump_dataclass", "instantiate"]
11
+
12
+
13
+ def dump_dataclass(obj: Any):
14
+ """
15
+ Dump a dataclass recursively into a dict that can be later instantiated.
16
+
17
+ Args:
18
+ obj: a dataclass object
19
+
20
+ Returns:
21
+ dict
22
+ """
23
+ assert dataclasses.is_dataclass(obj) and not isinstance(
24
+ obj, type
25
+ ), "dump_dataclass() requires an instance of a dataclass."
26
+ ret = {"_target_": _convert_target_to_string(type(obj))}
27
+ for f in dataclasses.fields(obj):
28
+ v = getattr(obj, f.name)
29
+ if dataclasses.is_dataclass(v):
30
+ v = dump_dataclass(v)
31
+ if isinstance(v, (list, tuple)):
32
+ v = [dump_dataclass(x) if dataclasses.is_dataclass(x) else x for x in v]
33
+ ret[f.name] = v
34
+ return ret
35
+
36
+
37
+ def instantiate(cfg):
38
+ """
39
+ Recursively instantiate objects defined in dictionaries by
40
+ "_target_" and arguments.
41
+
42
+ Args:
43
+ cfg: a dict-like object with "_target_" that defines the caller, and
44
+ other keys that define the arguments
45
+
46
+ Returns:
47
+ object instantiated by cfg
48
+ """
49
+ from omegaconf import ListConfig, DictConfig, OmegaConf
50
+
51
+ if isinstance(cfg, ListConfig):
52
+ lst = [instantiate(x) for x in cfg]
53
+ return ListConfig(lst, flags={"allow_objects": True})
54
+ if isinstance(cfg, list):
55
+ # Specialize for list, because many classes take
56
+ # list[objects] as arguments, such as ResNet, DatasetMapper
57
+ return [instantiate(x) for x in cfg]
58
+
59
+ # If input is a DictConfig backed by dataclasses (i.e. omegaconf's structured config),
60
+ # instantiate it to the actual dataclass.
61
+ if isinstance(cfg, DictConfig) and dataclasses.is_dataclass(cfg._metadata.object_type):
62
+ return OmegaConf.to_object(cfg)
63
+
64
+ if isinstance(cfg, abc.Mapping) and "_target_" in cfg:
65
+ # conceptually equivalent to hydra.utils.instantiate(cfg) with _convert_=all,
66
+ # but faster: https://github.com/facebookresearch/hydra/issues/1200
67
+ cfg = {k: instantiate(v) for k, v in cfg.items()}
68
+ cls = cfg.pop("_target_")
69
+ cls = instantiate(cls)
70
+
71
+ if isinstance(cls, str):
72
+ cls_name = cls
73
+ cls = locate(cls_name)
74
+ assert cls is not None, cls_name
75
+ else:
76
+ try:
77
+ cls_name = cls.__module__ + "." + cls.__qualname__
78
+ except Exception:
79
+ # target could be anything, so the above could fail
80
+ cls_name = str(cls)
81
+ assert callable(cls), f"_target_ {cls} does not define a callable object"
82
+ try:
83
+ return cls(**cfg)
84
+ except TypeError:
85
+ logger = logging.getLogger(__name__)
86
+ logger.error(f"Error when instantiating {cls_name}!")
87
+ raise
88
+ return cfg # return as-is if don't know what to do
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/config/lazy.py ADDED
@@ -0,0 +1,435 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+
3
+ import ast
4
+ import builtins
5
+ import collections.abc as abc
6
+ import importlib
7
+ import inspect
8
+ import logging
9
+ import os
10
+ import uuid
11
+ from contextlib import contextmanager
12
+ from copy import deepcopy
13
+ from dataclasses import is_dataclass
14
+ from typing import List, Tuple, Union
15
+ import yaml
16
+ from omegaconf import DictConfig, ListConfig, OmegaConf, SCMode
17
+
18
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
19
+ from annotator.oneformer.detectron2.utils.registry import _convert_target_to_string
20
+
21
+ __all__ = ["LazyCall", "LazyConfig"]
22
+
23
+
24
+ class LazyCall:
25
+ """
26
+ Wrap a callable so that when it's called, the call will not be executed,
27
+ but returns a dict that describes the call.
28
+
29
+ LazyCall object has to be called with only keyword arguments. Positional
30
+ arguments are not yet supported.
31
+
32
+ Examples:
33
+ ::
34
+ from annotator.oneformer.detectron2.config import instantiate, LazyCall
35
+
36
+ layer_cfg = LazyCall(nn.Conv2d)(in_channels=32, out_channels=32)
37
+ layer_cfg.out_channels = 64 # can edit it afterwards
38
+ layer = instantiate(layer_cfg)
39
+ """
40
+
41
+ def __init__(self, target):
42
+ if not (callable(target) or isinstance(target, (str, abc.Mapping))):
43
+ raise TypeError(
44
+ f"target of LazyCall must be a callable or defines a callable! Got {target}"
45
+ )
46
+ self._target = target
47
+
48
+ def __call__(self, **kwargs):
49
+ if is_dataclass(self._target):
50
+ # omegaconf object cannot hold dataclass type
51
+ # https://github.com/omry/omegaconf/issues/784
52
+ target = _convert_target_to_string(self._target)
53
+ else:
54
+ target = self._target
55
+ kwargs["_target_"] = target
56
+
57
+ return DictConfig(content=kwargs, flags={"allow_objects": True})
58
+
59
+
60
+ def _visit_dict_config(cfg, func):
61
+ """
62
+ Apply func recursively to all DictConfig in cfg.
63
+ """
64
+ if isinstance(cfg, DictConfig):
65
+ func(cfg)
66
+ for v in cfg.values():
67
+ _visit_dict_config(v, func)
68
+ elif isinstance(cfg, ListConfig):
69
+ for v in cfg:
70
+ _visit_dict_config(v, func)
71
+
72
+
73
+ def _validate_py_syntax(filename):
74
+ # see also https://github.com/open-mmlab/mmcv/blob/master/mmcv/utils/config.py
75
+ with PathManager.open(filename, "r") as f:
76
+ content = f.read()
77
+ try:
78
+ ast.parse(content)
79
+ except SyntaxError as e:
80
+ raise SyntaxError(f"Config file {filename} has syntax error!") from e
81
+
82
+
83
+ def _cast_to_config(obj):
84
+ # if given a dict, return DictConfig instead
85
+ if isinstance(obj, dict):
86
+ return DictConfig(obj, flags={"allow_objects": True})
87
+ return obj
88
+
89
+
90
+ _CFG_PACKAGE_NAME = "detectron2._cfg_loader"
91
+ """
92
+ A namespace to put all imported config into.
93
+ """
94
+
95
+
96
+ def _random_package_name(filename):
97
+ # generate a random package name when loading config files
98
+ return _CFG_PACKAGE_NAME + str(uuid.uuid4())[:4] + "." + os.path.basename(filename)
99
+
100
+
101
+ @contextmanager
102
+ def _patch_import():
103
+ """
104
+ Enhance relative import statements in config files, so that they:
105
+ 1. locate files purely based on relative location, regardless of packages.
106
+ e.g. you can import file without having __init__
107
+ 2. do not cache modules globally; modifications of module states has no side effect
108
+ 3. support other storage system through PathManager, so config files can be in the cloud
109
+ 4. imported dict are turned into omegaconf.DictConfig automatically
110
+ """
111
+ old_import = builtins.__import__
112
+
113
+ def find_relative_file(original_file, relative_import_path, level):
114
+ # NOTE: "from . import x" is not handled. Because then it's unclear
115
+ # if such import should produce `x` as a python module or DictConfig.
116
+ # This can be discussed further if needed.
117
+ relative_import_err = """
118
+ Relative import of directories is not allowed within config files.
119
+ Within a config file, relative import can only import other config files.
120
+ """.replace(
121
+ "\n", " "
122
+ )
123
+ if not len(relative_import_path):
124
+ raise ImportError(relative_import_err)
125
+
126
+ cur_file = os.path.dirname(original_file)
127
+ for _ in range(level - 1):
128
+ cur_file = os.path.dirname(cur_file)
129
+ cur_name = relative_import_path.lstrip(".")
130
+ for part in cur_name.split("."):
131
+ cur_file = os.path.join(cur_file, part)
132
+ if not cur_file.endswith(".py"):
133
+ cur_file += ".py"
134
+ if not PathManager.isfile(cur_file):
135
+ cur_file_no_suffix = cur_file[: -len(".py")]
136
+ if PathManager.isdir(cur_file_no_suffix):
137
+ raise ImportError(f"Cannot import from {cur_file_no_suffix}." + relative_import_err)
138
+ else:
139
+ raise ImportError(
140
+ f"Cannot import name {relative_import_path} from "
141
+ f"{original_file}: {cur_file} does not exist."
142
+ )
143
+ return cur_file
144
+
145
+ def new_import(name, globals=None, locals=None, fromlist=(), level=0):
146
+ if (
147
+ # Only deal with relative imports inside config files
148
+ level != 0
149
+ and globals is not None
150
+ and (globals.get("__package__", "") or "").startswith(_CFG_PACKAGE_NAME)
151
+ ):
152
+ cur_file = find_relative_file(globals["__file__"], name, level)
153
+ _validate_py_syntax(cur_file)
154
+ spec = importlib.machinery.ModuleSpec(
155
+ _random_package_name(cur_file), None, origin=cur_file
156
+ )
157
+ module = importlib.util.module_from_spec(spec)
158
+ module.__file__ = cur_file
159
+ with PathManager.open(cur_file) as f:
160
+ content = f.read()
161
+ exec(compile(content, cur_file, "exec"), module.__dict__)
162
+ for name in fromlist: # turn imported dict into DictConfig automatically
163
+ val = _cast_to_config(module.__dict__[name])
164
+ module.__dict__[name] = val
165
+ return module
166
+ return old_import(name, globals, locals, fromlist=fromlist, level=level)
167
+
168
+ builtins.__import__ = new_import
169
+ yield new_import
170
+ builtins.__import__ = old_import
171
+
172
+
173
+ class LazyConfig:
174
+ """
175
+ Provide methods to save, load, and overrides an omegaconf config object
176
+ which may contain definition of lazily-constructed objects.
177
+ """
178
+
179
+ @staticmethod
180
+ def load_rel(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
181
+ """
182
+ Similar to :meth:`load()`, but load path relative to the caller's
183
+ source file.
184
+
185
+ This has the same functionality as a relative import, except that this method
186
+ accepts filename as a string, so more characters are allowed in the filename.
187
+ """
188
+ caller_frame = inspect.stack()[1]
189
+ caller_fname = caller_frame[0].f_code.co_filename
190
+ assert caller_fname != "<string>", "load_rel Unable to find caller"
191
+ caller_dir = os.path.dirname(caller_fname)
192
+ filename = os.path.join(caller_dir, filename)
193
+ return LazyConfig.load(filename, keys)
194
+
195
+ @staticmethod
196
+ def load(filename: str, keys: Union[None, str, Tuple[str, ...]] = None):
197
+ """
198
+ Load a config file.
199
+
200
+ Args:
201
+ filename: absolute path or relative path w.r.t. the current working directory
202
+ keys: keys to load and return. If not given, return all keys
203
+ (whose values are config objects) in a dict.
204
+ """
205
+ has_keys = keys is not None
206
+ filename = filename.replace("/./", "/") # redundant
207
+ if os.path.splitext(filename)[1] not in [".py", ".yaml", ".yml"]:
208
+ raise ValueError(f"Config file {filename} has to be a python or yaml file.")
209
+ if filename.endswith(".py"):
210
+ _validate_py_syntax(filename)
211
+
212
+ with _patch_import():
213
+ # Record the filename
214
+ module_namespace = {
215
+ "__file__": filename,
216
+ "__package__": _random_package_name(filename),
217
+ }
218
+ with PathManager.open(filename) as f:
219
+ content = f.read()
220
+ # Compile first with filename to:
221
+ # 1. make filename appears in stacktrace
222
+ # 2. make load_rel able to find its parent's (possibly remote) location
223
+ exec(compile(content, filename, "exec"), module_namespace)
224
+
225
+ ret = module_namespace
226
+ else:
227
+ with PathManager.open(filename) as f:
228
+ obj = yaml.unsafe_load(f)
229
+ ret = OmegaConf.create(obj, flags={"allow_objects": True})
230
+
231
+ if has_keys:
232
+ if isinstance(keys, str):
233
+ return _cast_to_config(ret[keys])
234
+ else:
235
+ return tuple(_cast_to_config(ret[a]) for a in keys)
236
+ else:
237
+ if filename.endswith(".py"):
238
+ # when not specified, only load those that are config objects
239
+ ret = DictConfig(
240
+ {
241
+ name: _cast_to_config(value)
242
+ for name, value in ret.items()
243
+ if isinstance(value, (DictConfig, ListConfig, dict))
244
+ and not name.startswith("_")
245
+ },
246
+ flags={"allow_objects": True},
247
+ )
248
+ return ret
249
+
250
+ @staticmethod
251
+ def save(cfg, filename: str):
252
+ """
253
+ Save a config object to a yaml file.
254
+ Note that when the config dictionary contains complex objects (e.g. lambda),
255
+ it can't be saved to yaml. In that case we will print an error and
256
+ attempt to save to a pkl file instead.
257
+
258
+ Args:
259
+ cfg: an omegaconf config object
260
+ filename: yaml file name to save the config file
261
+ """
262
+ logger = logging.getLogger(__name__)
263
+ try:
264
+ cfg = deepcopy(cfg)
265
+ except Exception:
266
+ pass
267
+ else:
268
+ # if it's deep-copyable, then...
269
+ def _replace_type_by_name(x):
270
+ if "_target_" in x and callable(x._target_):
271
+ try:
272
+ x._target_ = _convert_target_to_string(x._target_)
273
+ except AttributeError:
274
+ pass
275
+
276
+ # not necessary, but makes yaml looks nicer
277
+ _visit_dict_config(cfg, _replace_type_by_name)
278
+
279
+ save_pkl = False
280
+ try:
281
+ dict = OmegaConf.to_container(
282
+ cfg,
283
+ # Do not resolve interpolation when saving, i.e. do not turn ${a} into
284
+ # actual values when saving.
285
+ resolve=False,
286
+ # Save structures (dataclasses) in a format that can be instantiated later.
287
+ # Without this option, the type information of the dataclass will be erased.
288
+ structured_config_mode=SCMode.INSTANTIATE,
289
+ )
290
+ dumped = yaml.dump(dict, default_flow_style=None, allow_unicode=True, width=9999)
291
+ with PathManager.open(filename, "w") as f:
292
+ f.write(dumped)
293
+
294
+ try:
295
+ _ = yaml.unsafe_load(dumped) # test that it is loadable
296
+ except Exception:
297
+ logger.warning(
298
+ "The config contains objects that cannot serialize to a valid yaml. "
299
+ f"{filename} is human-readable but cannot be loaded."
300
+ )
301
+ save_pkl = True
302
+ except Exception:
303
+ logger.exception("Unable to serialize the config to yaml. Error:")
304
+ save_pkl = True
305
+
306
+ if save_pkl:
307
+ new_filename = filename + ".pkl"
308
+ # try:
309
+ # # retry by pickle
310
+ # with PathManager.open(new_filename, "wb") as f:
311
+ # cloudpickle.dump(cfg, f)
312
+ # logger.warning(f"Config is saved using cloudpickle at {new_filename}.")
313
+ # except Exception:
314
+ # pass
315
+
316
+ @staticmethod
317
+ def apply_overrides(cfg, overrides: List[str]):
318
+ """
319
+ In-place override contents of cfg.
320
+
321
+ Args:
322
+ cfg: an omegaconf config object
323
+ overrides: list of strings in the format of "a=b" to override configs.
324
+ See https://hydra.cc/docs/next/advanced/override_grammar/basic/
325
+ for syntax.
326
+
327
+ Returns:
328
+ the cfg object
329
+ """
330
+
331
+ def safe_update(cfg, key, value):
332
+ parts = key.split(".")
333
+ for idx in range(1, len(parts)):
334
+ prefix = ".".join(parts[:idx])
335
+ v = OmegaConf.select(cfg, prefix, default=None)
336
+ if v is None:
337
+ break
338
+ if not OmegaConf.is_config(v):
339
+ raise KeyError(
340
+ f"Trying to update key {key}, but {prefix} "
341
+ f"is not a config, but has type {type(v)}."
342
+ )
343
+ OmegaConf.update(cfg, key, value, merge=True)
344
+
345
+ try:
346
+ from hydra.core.override_parser.overrides_parser import OverridesParser
347
+
348
+ has_hydra = True
349
+ except ImportError:
350
+ has_hydra = False
351
+
352
+ if has_hydra:
353
+ parser = OverridesParser.create()
354
+ overrides = parser.parse_overrides(overrides)
355
+ for o in overrides:
356
+ key = o.key_or_group
357
+ value = o.value()
358
+ if o.is_delete():
359
+ # TODO support this
360
+ raise NotImplementedError("deletion is not yet a supported override")
361
+ safe_update(cfg, key, value)
362
+ else:
363
+ # Fallback. Does not support all the features and error checking like hydra.
364
+ for o in overrides:
365
+ key, value = o.split("=")
366
+ try:
367
+ value = eval(value, {})
368
+ except NameError:
369
+ pass
370
+ safe_update(cfg, key, value)
371
+ return cfg
372
+
373
+ # @staticmethod
374
+ # def to_py(cfg, prefix: str = "cfg."):
375
+ # """
376
+ # Try to convert a config object into Python-like psuedo code.
377
+ #
378
+ # Note that perfect conversion is not always possible. So the returned
379
+ # results are mainly meant to be human-readable, and not meant to be executed.
380
+ #
381
+ # Args:
382
+ # cfg: an omegaconf config object
383
+ # prefix: root name for the resulting code (default: "cfg.")
384
+ #
385
+ #
386
+ # Returns:
387
+ # str of formatted Python code
388
+ # """
389
+ # import black
390
+ #
391
+ # cfg = OmegaConf.to_container(cfg, resolve=True)
392
+ #
393
+ # def _to_str(obj, prefix=None, inside_call=False):
394
+ # if prefix is None:
395
+ # prefix = []
396
+ # if isinstance(obj, abc.Mapping) and "_target_" in obj:
397
+ # # Dict representing a function call
398
+ # target = _convert_target_to_string(obj.pop("_target_"))
399
+ # args = []
400
+ # for k, v in sorted(obj.items()):
401
+ # args.append(f"{k}={_to_str(v, inside_call=True)}")
402
+ # args = ", ".join(args)
403
+ # call = f"{target}({args})"
404
+ # return "".join(prefix) + call
405
+ # elif isinstance(obj, abc.Mapping) and not inside_call:
406
+ # # Dict that is not inside a call is a list of top-level config objects that we
407
+ # # render as one object per line with dot separated prefixes
408
+ # key_list = []
409
+ # for k, v in sorted(obj.items()):
410
+ # if isinstance(v, abc.Mapping) and "_target_" not in v:
411
+ # key_list.append(_to_str(v, prefix=prefix + [k + "."]))
412
+ # else:
413
+ # key = "".join(prefix) + k
414
+ # key_list.append(f"{key}={_to_str(v)}")
415
+ # return "\n".join(key_list)
416
+ # elif isinstance(obj, abc.Mapping):
417
+ # # Dict that is inside a call is rendered as a regular dict
418
+ # return (
419
+ # "{"
420
+ # + ",".join(
421
+ # f"{repr(k)}: {_to_str(v, inside_call=inside_call)}"
422
+ # for k, v in sorted(obj.items())
423
+ # )
424
+ # + "}"
425
+ # )
426
+ # elif isinstance(obj, list):
427
+ # return "[" + ",".join(_to_str(x, inside_call=inside_call) for x in obj) + "]"
428
+ # else:
429
+ # return repr(obj)
430
+ #
431
+ # py_str = _to_str(cfg, prefix=[prefix])
432
+ # try:
433
+ # return black.format_str(py_str, mode=black.Mode())
434
+ # except black.InvalidInput:
435
+ # return py_str
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/__init__.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from . import transforms # isort:skip
3
+
4
+ from .build import (
5
+ build_batch_data_loader,
6
+ build_detection_test_loader,
7
+ build_detection_train_loader,
8
+ get_detection_dataset_dicts,
9
+ load_proposals_into_dataset,
10
+ print_instances_class_histogram,
11
+ )
12
+ from .catalog import DatasetCatalog, MetadataCatalog, Metadata
13
+ from .common import DatasetFromList, MapDataset, ToIterableDataset
14
+ from .dataset_mapper import DatasetMapper
15
+
16
+ # ensure the builtin datasets are registered
17
+ from . import datasets, samplers # isort:skip
18
+
19
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/benchmark.py ADDED
@@ -0,0 +1,225 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import numpy as np
4
+ from itertools import count
5
+ from typing import List, Tuple
6
+ import torch
7
+ import tqdm
8
+ from fvcore.common.timer import Timer
9
+
10
+ from annotator.oneformer.detectron2.utils import comm
11
+
12
+ from .build import build_batch_data_loader
13
+ from .common import DatasetFromList, MapDataset
14
+ from .samplers import TrainingSampler
15
+
16
+ logger = logging.getLogger(__name__)
17
+
18
+
19
+ class _EmptyMapDataset(torch.utils.data.Dataset):
20
+ """
21
+ Map anything to emptiness.
22
+ """
23
+
24
+ def __init__(self, dataset):
25
+ self.ds = dataset
26
+
27
+ def __len__(self):
28
+ return len(self.ds)
29
+
30
+ def __getitem__(self, idx):
31
+ _ = self.ds[idx]
32
+ return [0]
33
+
34
+
35
+ def iter_benchmark(
36
+ iterator, num_iter: int, warmup: int = 5, max_time_seconds: float = 60
37
+ ) -> Tuple[float, List[float]]:
38
+ """
39
+ Benchmark an iterator/iterable for `num_iter` iterations with an extra
40
+ `warmup` iterations of warmup.
41
+ End early if `max_time_seconds` time is spent on iterations.
42
+
43
+ Returns:
44
+ float: average time (seconds) per iteration
45
+ list[float]: time spent on each iteration. Sometimes useful for further analysis.
46
+ """
47
+ num_iter, warmup = int(num_iter), int(warmup)
48
+
49
+ iterator = iter(iterator)
50
+ for _ in range(warmup):
51
+ next(iterator)
52
+ timer = Timer()
53
+ all_times = []
54
+ for curr_iter in tqdm.trange(num_iter):
55
+ start = timer.seconds()
56
+ if start > max_time_seconds:
57
+ num_iter = curr_iter
58
+ break
59
+ next(iterator)
60
+ all_times.append(timer.seconds() - start)
61
+ avg = timer.seconds() / num_iter
62
+ return avg, all_times
63
+
64
+
65
+ class DataLoaderBenchmark:
66
+ """
67
+ Some common benchmarks that help understand perf bottleneck of a standard dataloader
68
+ made of dataset, mapper and sampler.
69
+ """
70
+
71
+ def __init__(
72
+ self,
73
+ dataset,
74
+ *,
75
+ mapper,
76
+ sampler=None,
77
+ total_batch_size,
78
+ num_workers=0,
79
+ max_time_seconds: int = 90,
80
+ ):
81
+ """
82
+ Args:
83
+ max_time_seconds (int): maximum time to spent for each benchmark
84
+ other args: same as in `build.py:build_detection_train_loader`
85
+ """
86
+ if isinstance(dataset, list):
87
+ dataset = DatasetFromList(dataset, copy=False, serialize=True)
88
+ if sampler is None:
89
+ sampler = TrainingSampler(len(dataset))
90
+
91
+ self.dataset = dataset
92
+ self.mapper = mapper
93
+ self.sampler = sampler
94
+ self.total_batch_size = total_batch_size
95
+ self.num_workers = num_workers
96
+ self.per_gpu_batch_size = self.total_batch_size // comm.get_world_size()
97
+
98
+ self.max_time_seconds = max_time_seconds
99
+
100
+ def _benchmark(self, iterator, num_iter, warmup, msg=None):
101
+ avg, all_times = iter_benchmark(iterator, num_iter, warmup, self.max_time_seconds)
102
+ if msg is not None:
103
+ self._log_time(msg, avg, all_times)
104
+ return avg, all_times
105
+
106
+ def _log_time(self, msg, avg, all_times, distributed=False):
107
+ percentiles = [np.percentile(all_times, k, interpolation="nearest") for k in [1, 5, 95, 99]]
108
+ if not distributed:
109
+ logger.info(
110
+ f"{msg}: avg={1.0/avg:.1f} it/s, "
111
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
112
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
113
+ )
114
+ return
115
+ avg_per_gpu = comm.all_gather(avg)
116
+ percentiles_per_gpu = comm.all_gather(percentiles)
117
+ if comm.get_rank() > 0:
118
+ return
119
+ for idx, avg, percentiles in zip(count(), avg_per_gpu, percentiles_per_gpu):
120
+ logger.info(
121
+ f"GPU{idx} {msg}: avg={1.0/avg:.1f} it/s, "
122
+ f"p1={percentiles[0]:.2g}s, p5={percentiles[1]:.2g}s, "
123
+ f"p95={percentiles[2]:.2g}s, p99={percentiles[3]:.2g}s."
124
+ )
125
+
126
+ def benchmark_dataset(self, num_iter, warmup=5):
127
+ """
128
+ Benchmark the speed of taking raw samples from the dataset.
129
+ """
130
+
131
+ def loader():
132
+ while True:
133
+ for k in self.sampler:
134
+ yield self.dataset[k]
135
+
136
+ self._benchmark(loader(), num_iter, warmup, "Dataset Alone")
137
+
138
+ def benchmark_mapper(self, num_iter, warmup=5):
139
+ """
140
+ Benchmark the speed of taking raw samples from the dataset and map
141
+ them in a single process.
142
+ """
143
+
144
+ def loader():
145
+ while True:
146
+ for k in self.sampler:
147
+ yield self.mapper(self.dataset[k])
148
+
149
+ self._benchmark(loader(), num_iter, warmup, "Single Process Mapper (sec/sample)")
150
+
151
+ def benchmark_workers(self, num_iter, warmup=10):
152
+ """
153
+ Benchmark the dataloader by tuning num_workers to [0, 1, self.num_workers].
154
+ """
155
+ candidates = [0, 1]
156
+ if self.num_workers not in candidates:
157
+ candidates.append(self.num_workers)
158
+
159
+ dataset = MapDataset(self.dataset, self.mapper)
160
+ for n in candidates:
161
+ loader = build_batch_data_loader(
162
+ dataset,
163
+ self.sampler,
164
+ self.total_batch_size,
165
+ num_workers=n,
166
+ )
167
+ self._benchmark(
168
+ iter(loader),
169
+ num_iter * max(n, 1),
170
+ warmup * max(n, 1),
171
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size})",
172
+ )
173
+ del loader
174
+
175
+ def benchmark_IPC(self, num_iter, warmup=10):
176
+ """
177
+ Benchmark the dataloader where each worker outputs nothing. This
178
+ eliminates the IPC overhead compared to the regular dataloader.
179
+
180
+ PyTorch multiprocessing's IPC only optimizes for torch tensors.
181
+ Large numpy arrays or other data structure may incur large IPC overhead.
182
+ """
183
+ n = self.num_workers
184
+ dataset = _EmptyMapDataset(MapDataset(self.dataset, self.mapper))
185
+ loader = build_batch_data_loader(
186
+ dataset, self.sampler, self.total_batch_size, num_workers=n
187
+ )
188
+ self._benchmark(
189
+ iter(loader),
190
+ num_iter * max(n, 1),
191
+ warmup * max(n, 1),
192
+ f"DataLoader ({n} workers, bs={self.per_gpu_batch_size}) w/o comm",
193
+ )
194
+
195
+ def benchmark_distributed(self, num_iter, warmup=10):
196
+ """
197
+ Benchmark the dataloader in each distributed worker, and log results of
198
+ all workers. This helps understand the final performance as well as
199
+ the variances among workers.
200
+
201
+ It also prints startup time (first iter) of the dataloader.
202
+ """
203
+ gpu = comm.get_world_size()
204
+ dataset = MapDataset(self.dataset, self.mapper)
205
+ n = self.num_workers
206
+ loader = build_batch_data_loader(
207
+ dataset, self.sampler, self.total_batch_size, num_workers=n
208
+ )
209
+
210
+ timer = Timer()
211
+ loader = iter(loader)
212
+ next(loader)
213
+ startup_time = timer.seconds()
214
+ logger.info("Dataloader startup time: {:.2f} seconds".format(startup_time))
215
+
216
+ comm.synchronize()
217
+
218
+ avg, all_times = self._benchmark(loader, num_iter * max(n, 1), warmup * max(n, 1))
219
+ del loader
220
+ self._log_time(
221
+ f"DataLoader ({gpu} GPUs x {n} workers, total bs={self.total_batch_size})",
222
+ avg,
223
+ all_times,
224
+ True,
225
+ )
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/build.py ADDED
@@ -0,0 +1,556 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import itertools
3
+ import logging
4
+ import numpy as np
5
+ import operator
6
+ import pickle
7
+ from typing import Any, Callable, Dict, List, Optional, Union
8
+ import torch
9
+ import torch.utils.data as torchdata
10
+ from tabulate import tabulate
11
+ from termcolor import colored
12
+
13
+ from annotator.oneformer.detectron2.config import configurable
14
+ from annotator.oneformer.detectron2.structures import BoxMode
15
+ from annotator.oneformer.detectron2.utils.comm import get_world_size
16
+ from annotator.oneformer.detectron2.utils.env import seed_all_rng
17
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
18
+ from annotator.oneformer.detectron2.utils.logger import _log_api_usage, log_first_n
19
+
20
+ from .catalog import DatasetCatalog, MetadataCatalog
21
+ from .common import AspectRatioGroupedDataset, DatasetFromList, MapDataset, ToIterableDataset
22
+ from .dataset_mapper import DatasetMapper
23
+ from .detection_utils import check_metadata_consistency
24
+ from .samplers import (
25
+ InferenceSampler,
26
+ RandomSubsetTrainingSampler,
27
+ RepeatFactorTrainingSampler,
28
+ TrainingSampler,
29
+ )
30
+
31
+ """
32
+ This file contains the default logic to build a dataloader for training or testing.
33
+ """
34
+
35
+ __all__ = [
36
+ "build_batch_data_loader",
37
+ "build_detection_train_loader",
38
+ "build_detection_test_loader",
39
+ "get_detection_dataset_dicts",
40
+ "load_proposals_into_dataset",
41
+ "print_instances_class_histogram",
42
+ ]
43
+
44
+
45
+ def filter_images_with_only_crowd_annotations(dataset_dicts):
46
+ """
47
+ Filter out images with none annotations or only crowd annotations
48
+ (i.e., images without non-crowd annotations).
49
+ A common training-time preprocessing on COCO dataset.
50
+
51
+ Args:
52
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
53
+
54
+ Returns:
55
+ list[dict]: the same format, but filtered.
56
+ """
57
+ num_before = len(dataset_dicts)
58
+
59
+ def valid(anns):
60
+ for ann in anns:
61
+ if ann.get("iscrowd", 0) == 0:
62
+ return True
63
+ return False
64
+
65
+ dataset_dicts = [x for x in dataset_dicts if valid(x["annotations"])]
66
+ num_after = len(dataset_dicts)
67
+ logger = logging.getLogger(__name__)
68
+ logger.info(
69
+ "Removed {} images with no usable annotations. {} images left.".format(
70
+ num_before - num_after, num_after
71
+ )
72
+ )
73
+ return dataset_dicts
74
+
75
+
76
+ def filter_images_with_few_keypoints(dataset_dicts, min_keypoints_per_image):
77
+ """
78
+ Filter out images with too few number of keypoints.
79
+
80
+ Args:
81
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
82
+
83
+ Returns:
84
+ list[dict]: the same format as dataset_dicts, but filtered.
85
+ """
86
+ num_before = len(dataset_dicts)
87
+
88
+ def visible_keypoints_in_image(dic):
89
+ # Each keypoints field has the format [x1, y1, v1, ...], where v is visibility
90
+ annotations = dic["annotations"]
91
+ return sum(
92
+ (np.array(ann["keypoints"][2::3]) > 0).sum()
93
+ for ann in annotations
94
+ if "keypoints" in ann
95
+ )
96
+
97
+ dataset_dicts = [
98
+ x for x in dataset_dicts if visible_keypoints_in_image(x) >= min_keypoints_per_image
99
+ ]
100
+ num_after = len(dataset_dicts)
101
+ logger = logging.getLogger(__name__)
102
+ logger.info(
103
+ "Removed {} images with fewer than {} keypoints.".format(
104
+ num_before - num_after, min_keypoints_per_image
105
+ )
106
+ )
107
+ return dataset_dicts
108
+
109
+
110
+ def load_proposals_into_dataset(dataset_dicts, proposal_file):
111
+ """
112
+ Load precomputed object proposals into the dataset.
113
+
114
+ The proposal file should be a pickled dict with the following keys:
115
+
116
+ - "ids": list[int] or list[str], the image ids
117
+ - "boxes": list[np.ndarray], each is an Nx4 array of boxes corresponding to the image id
118
+ - "objectness_logits": list[np.ndarray], each is an N sized array of objectness scores
119
+ corresponding to the boxes.
120
+ - "bbox_mode": the BoxMode of the boxes array. Defaults to ``BoxMode.XYXY_ABS``.
121
+
122
+ Args:
123
+ dataset_dicts (list[dict]): annotations in Detectron2 Dataset format.
124
+ proposal_file (str): file path of pre-computed proposals, in pkl format.
125
+
126
+ Returns:
127
+ list[dict]: the same format as dataset_dicts, but added proposal field.
128
+ """
129
+ logger = logging.getLogger(__name__)
130
+ logger.info("Loading proposals from: {}".format(proposal_file))
131
+
132
+ with PathManager.open(proposal_file, "rb") as f:
133
+ proposals = pickle.load(f, encoding="latin1")
134
+
135
+ # Rename the key names in D1 proposal files
136
+ rename_keys = {"indexes": "ids", "scores": "objectness_logits"}
137
+ for key in rename_keys:
138
+ if key in proposals:
139
+ proposals[rename_keys[key]] = proposals.pop(key)
140
+
141
+ # Fetch the indexes of all proposals that are in the dataset
142
+ # Convert image_id to str since they could be int.
143
+ img_ids = set({str(record["image_id"]) for record in dataset_dicts})
144
+ id_to_index = {str(id): i for i, id in enumerate(proposals["ids"]) if str(id) in img_ids}
145
+
146
+ # Assuming default bbox_mode of precomputed proposals are 'XYXY_ABS'
147
+ bbox_mode = BoxMode(proposals["bbox_mode"]) if "bbox_mode" in proposals else BoxMode.XYXY_ABS
148
+
149
+ for record in dataset_dicts:
150
+ # Get the index of the proposal
151
+ i = id_to_index[str(record["image_id"])]
152
+
153
+ boxes = proposals["boxes"][i]
154
+ objectness_logits = proposals["objectness_logits"][i]
155
+ # Sort the proposals in descending order of the scores
156
+ inds = objectness_logits.argsort()[::-1]
157
+ record["proposal_boxes"] = boxes[inds]
158
+ record["proposal_objectness_logits"] = objectness_logits[inds]
159
+ record["proposal_bbox_mode"] = bbox_mode
160
+
161
+ return dataset_dicts
162
+
163
+
164
+ def print_instances_class_histogram(dataset_dicts, class_names):
165
+ """
166
+ Args:
167
+ dataset_dicts (list[dict]): list of dataset dicts.
168
+ class_names (list[str]): list of class names (zero-indexed).
169
+ """
170
+ num_classes = len(class_names)
171
+ hist_bins = np.arange(num_classes + 1)
172
+ histogram = np.zeros((num_classes,), dtype=np.int)
173
+ for entry in dataset_dicts:
174
+ annos = entry["annotations"]
175
+ classes = np.asarray(
176
+ [x["category_id"] for x in annos if not x.get("iscrowd", 0)], dtype=np.int
177
+ )
178
+ if len(classes):
179
+ assert classes.min() >= 0, f"Got an invalid category_id={classes.min()}"
180
+ assert (
181
+ classes.max() < num_classes
182
+ ), f"Got an invalid category_id={classes.max()} for a dataset of {num_classes} classes"
183
+ histogram += np.histogram(classes, bins=hist_bins)[0]
184
+
185
+ N_COLS = min(6, len(class_names) * 2)
186
+
187
+ def short_name(x):
188
+ # make long class names shorter. useful for lvis
189
+ if len(x) > 13:
190
+ return x[:11] + ".."
191
+ return x
192
+
193
+ data = list(
194
+ itertools.chain(*[[short_name(class_names[i]), int(v)] for i, v in enumerate(histogram)])
195
+ )
196
+ total_num_instances = sum(data[1::2])
197
+ data.extend([None] * (N_COLS - (len(data) % N_COLS)))
198
+ if num_classes > 1:
199
+ data.extend(["total", total_num_instances])
200
+ data = itertools.zip_longest(*[data[i::N_COLS] for i in range(N_COLS)])
201
+ table = tabulate(
202
+ data,
203
+ headers=["category", "#instances"] * (N_COLS // 2),
204
+ tablefmt="pipe",
205
+ numalign="left",
206
+ stralign="center",
207
+ )
208
+ log_first_n(
209
+ logging.INFO,
210
+ "Distribution of instances among all {} categories:\n".format(num_classes)
211
+ + colored(table, "cyan"),
212
+ key="message",
213
+ )
214
+
215
+
216
+ def get_detection_dataset_dicts(
217
+ names,
218
+ filter_empty=True,
219
+ min_keypoints=0,
220
+ proposal_files=None,
221
+ check_consistency=True,
222
+ ):
223
+ """
224
+ Load and prepare dataset dicts for instance detection/segmentation and semantic segmentation.
225
+
226
+ Args:
227
+ names (str or list[str]): a dataset name or a list of dataset names
228
+ filter_empty (bool): whether to filter out images without instance annotations
229
+ min_keypoints (int): filter out images with fewer keypoints than
230
+ `min_keypoints`. Set to 0 to do nothing.
231
+ proposal_files (list[str]): if given, a list of object proposal files
232
+ that match each dataset in `names`.
233
+ check_consistency (bool): whether to check if datasets have consistent metadata.
234
+
235
+ Returns:
236
+ list[dict]: a list of dicts following the standard dataset dict format.
237
+ """
238
+ if isinstance(names, str):
239
+ names = [names]
240
+ assert len(names), names
241
+ dataset_dicts = [DatasetCatalog.get(dataset_name) for dataset_name in names]
242
+
243
+ if isinstance(dataset_dicts[0], torchdata.Dataset):
244
+ if len(dataset_dicts) > 1:
245
+ # ConcatDataset does not work for iterable style dataset.
246
+ # We could support concat for iterable as well, but it's often
247
+ # not a good idea to concat iterables anyway.
248
+ return torchdata.ConcatDataset(dataset_dicts)
249
+ return dataset_dicts[0]
250
+
251
+ for dataset_name, dicts in zip(names, dataset_dicts):
252
+ assert len(dicts), "Dataset '{}' is empty!".format(dataset_name)
253
+
254
+ if proposal_files is not None:
255
+ assert len(names) == len(proposal_files)
256
+ # load precomputed proposals from proposal files
257
+ dataset_dicts = [
258
+ load_proposals_into_dataset(dataset_i_dicts, proposal_file)
259
+ for dataset_i_dicts, proposal_file in zip(dataset_dicts, proposal_files)
260
+ ]
261
+
262
+ dataset_dicts = list(itertools.chain.from_iterable(dataset_dicts))
263
+
264
+ has_instances = "annotations" in dataset_dicts[0]
265
+ if filter_empty and has_instances:
266
+ dataset_dicts = filter_images_with_only_crowd_annotations(dataset_dicts)
267
+ if min_keypoints > 0 and has_instances:
268
+ dataset_dicts = filter_images_with_few_keypoints(dataset_dicts, min_keypoints)
269
+
270
+ if check_consistency and has_instances:
271
+ try:
272
+ class_names = MetadataCatalog.get(names[0]).thing_classes
273
+ check_metadata_consistency("thing_classes", names)
274
+ print_instances_class_histogram(dataset_dicts, class_names)
275
+ except AttributeError: # class names are not available for this dataset
276
+ pass
277
+
278
+ assert len(dataset_dicts), "No valid data found in {}.".format(",".join(names))
279
+ return dataset_dicts
280
+
281
+
282
+ def build_batch_data_loader(
283
+ dataset,
284
+ sampler,
285
+ total_batch_size,
286
+ *,
287
+ aspect_ratio_grouping=False,
288
+ num_workers=0,
289
+ collate_fn=None,
290
+ ):
291
+ """
292
+ Build a batched dataloader. The main differences from `torch.utils.data.DataLoader` are:
293
+ 1. support aspect ratio grouping options
294
+ 2. use no "batch collation", because this is common for detection training
295
+
296
+ Args:
297
+ dataset (torch.utils.data.Dataset): a pytorch map-style or iterable dataset.
298
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces indices.
299
+ Must be provided iff. ``dataset`` is a map-style dataset.
300
+ total_batch_size, aspect_ratio_grouping, num_workers, collate_fn: see
301
+ :func:`build_detection_train_loader`.
302
+
303
+ Returns:
304
+ iterable[list]. Length of each list is the batch size of the current
305
+ GPU. Each element in the list comes from the dataset.
306
+ """
307
+ world_size = get_world_size()
308
+ assert (
309
+ total_batch_size > 0 and total_batch_size % world_size == 0
310
+ ), "Total batch size ({}) must be divisible by the number of gpus ({}).".format(
311
+ total_batch_size, world_size
312
+ )
313
+ batch_size = total_batch_size // world_size
314
+
315
+ if isinstance(dataset, torchdata.IterableDataset):
316
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
317
+ else:
318
+ dataset = ToIterableDataset(dataset, sampler)
319
+
320
+ if aspect_ratio_grouping:
321
+ data_loader = torchdata.DataLoader(
322
+ dataset,
323
+ num_workers=num_workers,
324
+ collate_fn=operator.itemgetter(0), # don't batch, but yield individual elements
325
+ worker_init_fn=worker_init_reset_seed,
326
+ ) # yield individual mapped dict
327
+ data_loader = AspectRatioGroupedDataset(data_loader, batch_size)
328
+ if collate_fn is None:
329
+ return data_loader
330
+ return MapDataset(data_loader, collate_fn)
331
+ else:
332
+ return torchdata.DataLoader(
333
+ dataset,
334
+ batch_size=batch_size,
335
+ drop_last=True,
336
+ num_workers=num_workers,
337
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
338
+ worker_init_fn=worker_init_reset_seed,
339
+ )
340
+
341
+
342
+ def _train_loader_from_config(cfg, mapper=None, *, dataset=None, sampler=None):
343
+ if dataset is None:
344
+ dataset = get_detection_dataset_dicts(
345
+ cfg.DATASETS.TRAIN,
346
+ filter_empty=cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS,
347
+ min_keypoints=cfg.MODEL.ROI_KEYPOINT_HEAD.MIN_KEYPOINTS_PER_IMAGE
348
+ if cfg.MODEL.KEYPOINT_ON
349
+ else 0,
350
+ proposal_files=cfg.DATASETS.PROPOSAL_FILES_TRAIN if cfg.MODEL.LOAD_PROPOSALS else None,
351
+ )
352
+ _log_api_usage("dataset." + cfg.DATASETS.TRAIN[0])
353
+
354
+ if mapper is None:
355
+ mapper = DatasetMapper(cfg, True)
356
+
357
+ if sampler is None:
358
+ sampler_name = cfg.DATALOADER.SAMPLER_TRAIN
359
+ logger = logging.getLogger(__name__)
360
+ if isinstance(dataset, torchdata.IterableDataset):
361
+ logger.info("Not using any sampler since the dataset is IterableDataset.")
362
+ sampler = None
363
+ else:
364
+ logger.info("Using training sampler {}".format(sampler_name))
365
+ if sampler_name == "TrainingSampler":
366
+ sampler = TrainingSampler(len(dataset))
367
+ elif sampler_name == "RepeatFactorTrainingSampler":
368
+ repeat_factors = RepeatFactorTrainingSampler.repeat_factors_from_category_frequency(
369
+ dataset, cfg.DATALOADER.REPEAT_THRESHOLD
370
+ )
371
+ sampler = RepeatFactorTrainingSampler(repeat_factors)
372
+ elif sampler_name == "RandomSubsetTrainingSampler":
373
+ sampler = RandomSubsetTrainingSampler(
374
+ len(dataset), cfg.DATALOADER.RANDOM_SUBSET_RATIO
375
+ )
376
+ else:
377
+ raise ValueError("Unknown training sampler: {}".format(sampler_name))
378
+
379
+ return {
380
+ "dataset": dataset,
381
+ "sampler": sampler,
382
+ "mapper": mapper,
383
+ "total_batch_size": cfg.SOLVER.IMS_PER_BATCH,
384
+ "aspect_ratio_grouping": cfg.DATALOADER.ASPECT_RATIO_GROUPING,
385
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
386
+ }
387
+
388
+
389
+ @configurable(from_config=_train_loader_from_config)
390
+ def build_detection_train_loader(
391
+ dataset,
392
+ *,
393
+ mapper,
394
+ sampler=None,
395
+ total_batch_size,
396
+ aspect_ratio_grouping=True,
397
+ num_workers=0,
398
+ collate_fn=None,
399
+ ):
400
+ """
401
+ Build a dataloader for object detection with some default features.
402
+
403
+ Args:
404
+ dataset (list or torch.utils.data.Dataset): a list of dataset dicts,
405
+ or a pytorch dataset (either map-style or iterable). It can be obtained
406
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
407
+ mapper (callable): a callable which takes a sample (dict) from dataset and
408
+ returns the format to be consumed by the model.
409
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=True)``.
410
+ sampler (torch.utils.data.sampler.Sampler or None): a sampler that produces
411
+ indices to be applied on ``dataset``.
412
+ If ``dataset`` is map-style, the default sampler is a :class:`TrainingSampler`,
413
+ which coordinates an infinite random shuffle sequence across all workers.
414
+ Sampler must be None if ``dataset`` is iterable.
415
+ total_batch_size (int): total batch size across all workers.
416
+ aspect_ratio_grouping (bool): whether to group images with similar
417
+ aspect ratio for efficiency. When enabled, it requires each
418
+ element in dataset be a dict with keys "width" and "height".
419
+ num_workers (int): number of parallel data loading workers
420
+ collate_fn: a function that determines how to do batching, same as the argument of
421
+ `torch.utils.data.DataLoader`. Defaults to do no collation and return a list of
422
+ data. No collation is OK for small batch size and simple data structures.
423
+ If your batch size is large and each sample contains too many small tensors,
424
+ it's more efficient to collate them in data loader.
425
+
426
+ Returns:
427
+ torch.utils.data.DataLoader:
428
+ a dataloader. Each output from it is a ``list[mapped_element]`` of length
429
+ ``total_batch_size / num_workers``, where ``mapped_element`` is produced
430
+ by the ``mapper``.
431
+ """
432
+ if isinstance(dataset, list):
433
+ dataset = DatasetFromList(dataset, copy=False)
434
+ if mapper is not None:
435
+ dataset = MapDataset(dataset, mapper)
436
+
437
+ if isinstance(dataset, torchdata.IterableDataset):
438
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
439
+ else:
440
+ if sampler is None:
441
+ sampler = TrainingSampler(len(dataset))
442
+ assert isinstance(sampler, torchdata.Sampler), f"Expect a Sampler but got {type(sampler)}"
443
+ return build_batch_data_loader(
444
+ dataset,
445
+ sampler,
446
+ total_batch_size,
447
+ aspect_ratio_grouping=aspect_ratio_grouping,
448
+ num_workers=num_workers,
449
+ collate_fn=collate_fn,
450
+ )
451
+
452
+
453
+ def _test_loader_from_config(cfg, dataset_name, mapper=None):
454
+ """
455
+ Uses the given `dataset_name` argument (instead of the names in cfg), because the
456
+ standard practice is to evaluate each test set individually (not combining them).
457
+ """
458
+ if isinstance(dataset_name, str):
459
+ dataset_name = [dataset_name]
460
+
461
+ dataset = get_detection_dataset_dicts(
462
+ dataset_name,
463
+ filter_empty=False,
464
+ proposal_files=[
465
+ cfg.DATASETS.PROPOSAL_FILES_TEST[list(cfg.DATASETS.TEST).index(x)] for x in dataset_name
466
+ ]
467
+ if cfg.MODEL.LOAD_PROPOSALS
468
+ else None,
469
+ )
470
+ if mapper is None:
471
+ mapper = DatasetMapper(cfg, False)
472
+ return {
473
+ "dataset": dataset,
474
+ "mapper": mapper,
475
+ "num_workers": cfg.DATALOADER.NUM_WORKERS,
476
+ "sampler": InferenceSampler(len(dataset))
477
+ if not isinstance(dataset, torchdata.IterableDataset)
478
+ else None,
479
+ }
480
+
481
+
482
+ @configurable(from_config=_test_loader_from_config)
483
+ def build_detection_test_loader(
484
+ dataset: Union[List[Any], torchdata.Dataset],
485
+ *,
486
+ mapper: Callable[[Dict[str, Any]], Any],
487
+ sampler: Optional[torchdata.Sampler] = None,
488
+ batch_size: int = 1,
489
+ num_workers: int = 0,
490
+ collate_fn: Optional[Callable[[List[Any]], Any]] = None,
491
+ ) -> torchdata.DataLoader:
492
+ """
493
+ Similar to `build_detection_train_loader`, with default batch size = 1,
494
+ and sampler = :class:`InferenceSampler`. This sampler coordinates all workers
495
+ to produce the exact set of all samples.
496
+
497
+ Args:
498
+ dataset: a list of dataset dicts,
499
+ or a pytorch dataset (either map-style or iterable). They can be obtained
500
+ by using :func:`DatasetCatalog.get` or :func:`get_detection_dataset_dicts`.
501
+ mapper: a callable which takes a sample (dict) from dataset
502
+ and returns the format to be consumed by the model.
503
+ When using cfg, the default choice is ``DatasetMapper(cfg, is_train=False)``.
504
+ sampler: a sampler that produces
505
+ indices to be applied on ``dataset``. Default to :class:`InferenceSampler`,
506
+ which splits the dataset across all workers. Sampler must be None
507
+ if `dataset` is iterable.
508
+ batch_size: the batch size of the data loader to be created.
509
+ Default to 1 image per worker since this is the standard when reporting
510
+ inference time in papers.
511
+ num_workers: number of parallel data loading workers
512
+ collate_fn: same as the argument of `torch.utils.data.DataLoader`.
513
+ Defaults to do no collation and return a list of data.
514
+
515
+ Returns:
516
+ DataLoader: a torch DataLoader, that loads the given detection
517
+ dataset, with test-time transformation and batching.
518
+
519
+ Examples:
520
+ ::
521
+ data_loader = build_detection_test_loader(
522
+ DatasetRegistry.get("my_test"),
523
+ mapper=DatasetMapper(...))
524
+
525
+ # or, instantiate with a CfgNode:
526
+ data_loader = build_detection_test_loader(cfg, "my_test")
527
+ """
528
+ if isinstance(dataset, list):
529
+ dataset = DatasetFromList(dataset, copy=False)
530
+ if mapper is not None:
531
+ dataset = MapDataset(dataset, mapper)
532
+ if isinstance(dataset, torchdata.IterableDataset):
533
+ assert sampler is None, "sampler must be None if dataset is IterableDataset"
534
+ else:
535
+ if sampler is None:
536
+ sampler = InferenceSampler(len(dataset))
537
+ return torchdata.DataLoader(
538
+ dataset,
539
+ batch_size=batch_size,
540
+ sampler=sampler,
541
+ drop_last=False,
542
+ num_workers=num_workers,
543
+ collate_fn=trivial_batch_collator if collate_fn is None else collate_fn,
544
+ )
545
+
546
+
547
+ def trivial_batch_collator(batch):
548
+ """
549
+ A batch collator that does nothing.
550
+ """
551
+ return batch
552
+
553
+
554
+ def worker_init_reset_seed(worker_id):
555
+ initial_seed = torch.initial_seed() % 2**31
556
+ seed_all_rng(initial_seed + worker_id)
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/catalog.py ADDED
@@ -0,0 +1,236 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import logging
4
+ import types
5
+ from collections import UserDict
6
+ from typing import List
7
+
8
+ from annotator.oneformer.detectron2.utils.logger import log_first_n
9
+
10
+ __all__ = ["DatasetCatalog", "MetadataCatalog", "Metadata"]
11
+
12
+
13
+ class _DatasetCatalog(UserDict):
14
+ """
15
+ A global dictionary that stores information about the datasets and how to obtain them.
16
+
17
+ It contains a mapping from strings
18
+ (which are names that identify a dataset, e.g. "coco_2014_train")
19
+ to a function which parses the dataset and returns the samples in the
20
+ format of `list[dict]`.
21
+
22
+ The returned dicts should be in Detectron2 Dataset format (See DATASETS.md for details)
23
+ if used with the data loader functionalities in `data/build.py,data/detection_transform.py`.
24
+
25
+ The purpose of having this catalog is to make it easy to choose
26
+ different datasets, by just using the strings in the config.
27
+ """
28
+
29
+ def register(self, name, func):
30
+ """
31
+ Args:
32
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
33
+ func (callable): a callable which takes no arguments and returns a list of dicts.
34
+ It must return the same results if called multiple times.
35
+ """
36
+ assert callable(func), "You must register a function with `DatasetCatalog.register`!"
37
+ assert name not in self, "Dataset '{}' is already registered!".format(name)
38
+ self[name] = func
39
+
40
+ def get(self, name):
41
+ """
42
+ Call the registered function and return its results.
43
+
44
+ Args:
45
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
46
+
47
+ Returns:
48
+ list[dict]: dataset annotations.
49
+ """
50
+ try:
51
+ f = self[name]
52
+ except KeyError as e:
53
+ raise KeyError(
54
+ "Dataset '{}' is not registered! Available datasets are: {}".format(
55
+ name, ", ".join(list(self.keys()))
56
+ )
57
+ ) from e
58
+ return f()
59
+
60
+ def list(self) -> List[str]:
61
+ """
62
+ List all registered datasets.
63
+
64
+ Returns:
65
+ list[str]
66
+ """
67
+ return list(self.keys())
68
+
69
+ def remove(self, name):
70
+ """
71
+ Alias of ``pop``.
72
+ """
73
+ self.pop(name)
74
+
75
+ def __str__(self):
76
+ return "DatasetCatalog(registered datasets: {})".format(", ".join(self.keys()))
77
+
78
+ __repr__ = __str__
79
+
80
+
81
+ DatasetCatalog = _DatasetCatalog()
82
+ DatasetCatalog.__doc__ = (
83
+ _DatasetCatalog.__doc__
84
+ + """
85
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.register
86
+ .. automethod:: detectron2.data.catalog.DatasetCatalog.get
87
+ """
88
+ )
89
+
90
+
91
+ class Metadata(types.SimpleNamespace):
92
+ """
93
+ A class that supports simple attribute setter/getter.
94
+ It is intended for storing metadata of a dataset and make it accessible globally.
95
+
96
+ Examples:
97
+ ::
98
+ # somewhere when you load the data:
99
+ MetadataCatalog.get("mydataset").thing_classes = ["person", "dog"]
100
+
101
+ # somewhere when you print statistics or visualize:
102
+ classes = MetadataCatalog.get("mydataset").thing_classes
103
+ """
104
+
105
+ # the name of the dataset
106
+ # set default to N/A so that `self.name` in the errors will not trigger getattr again
107
+ name: str = "N/A"
108
+
109
+ _RENAMED = {
110
+ "class_names": "thing_classes",
111
+ "dataset_id_to_contiguous_id": "thing_dataset_id_to_contiguous_id",
112
+ "stuff_class_names": "stuff_classes",
113
+ }
114
+
115
+ def __getattr__(self, key):
116
+ if key in self._RENAMED:
117
+ log_first_n(
118
+ logging.WARNING,
119
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
120
+ n=10,
121
+ )
122
+ return getattr(self, self._RENAMED[key])
123
+
124
+ # "name" exists in every metadata
125
+ if len(self.__dict__) > 1:
126
+ raise AttributeError(
127
+ "Attribute '{}' does not exist in the metadata of dataset '{}'. Available "
128
+ "keys are {}.".format(key, self.name, str(self.__dict__.keys()))
129
+ )
130
+ else:
131
+ raise AttributeError(
132
+ f"Attribute '{key}' does not exist in the metadata of dataset '{self.name}': "
133
+ "metadata is empty."
134
+ )
135
+
136
+ def __setattr__(self, key, val):
137
+ if key in self._RENAMED:
138
+ log_first_n(
139
+ logging.WARNING,
140
+ "Metadata '{}' was renamed to '{}'!".format(key, self._RENAMED[key]),
141
+ n=10,
142
+ )
143
+ setattr(self, self._RENAMED[key], val)
144
+
145
+ # Ensure that metadata of the same name stays consistent
146
+ try:
147
+ oldval = getattr(self, key)
148
+ assert oldval == val, (
149
+ "Attribute '{}' in the metadata of '{}' cannot be set "
150
+ "to a different value!\n{} != {}".format(key, self.name, oldval, val)
151
+ )
152
+ except AttributeError:
153
+ super().__setattr__(key, val)
154
+
155
+ def as_dict(self):
156
+ """
157
+ Returns all the metadata as a dict.
158
+ Note that modifications to the returned dict will not reflect on the Metadata object.
159
+ """
160
+ return copy.copy(self.__dict__)
161
+
162
+ def set(self, **kwargs):
163
+ """
164
+ Set multiple metadata with kwargs.
165
+ """
166
+ for k, v in kwargs.items():
167
+ setattr(self, k, v)
168
+ return self
169
+
170
+ def get(self, key, default=None):
171
+ """
172
+ Access an attribute and return its value if exists.
173
+ Otherwise return default.
174
+ """
175
+ try:
176
+ return getattr(self, key)
177
+ except AttributeError:
178
+ return default
179
+
180
+
181
+ class _MetadataCatalog(UserDict):
182
+ """
183
+ MetadataCatalog is a global dictionary that provides access to
184
+ :class:`Metadata` of a given dataset.
185
+
186
+ The metadata associated with a certain name is a singleton: once created, the
187
+ metadata will stay alive and will be returned by future calls to ``get(name)``.
188
+
189
+ It's like global variables, so don't abuse it.
190
+ It's meant for storing knowledge that's constant and shared across the execution
191
+ of the program, e.g.: the class names in COCO.
192
+ """
193
+
194
+ def get(self, name):
195
+ """
196
+ Args:
197
+ name (str): name of a dataset (e.g. coco_2014_train).
198
+
199
+ Returns:
200
+ Metadata: The :class:`Metadata` instance associated with this name,
201
+ or create an empty one if none is available.
202
+ """
203
+ assert len(name)
204
+ r = super().get(name, None)
205
+ if r is None:
206
+ r = self[name] = Metadata(name=name)
207
+ return r
208
+
209
+ def list(self):
210
+ """
211
+ List all registered metadata.
212
+
213
+ Returns:
214
+ list[str]: keys (names of datasets) of all registered metadata
215
+ """
216
+ return list(self.keys())
217
+
218
+ def remove(self, name):
219
+ """
220
+ Alias of ``pop``.
221
+ """
222
+ self.pop(name)
223
+
224
+ def __str__(self):
225
+ return "MetadataCatalog(registered metadata: {})".format(", ".join(self.keys()))
226
+
227
+ __repr__ = __str__
228
+
229
+
230
+ MetadataCatalog = _MetadataCatalog()
231
+ MetadataCatalog.__doc__ = (
232
+ _MetadataCatalog.__doc__
233
+ + """
234
+ .. automethod:: detectron2.data.catalog.MetadataCatalog.get
235
+ """
236
+ )
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/common.py ADDED
@@ -0,0 +1,301 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import contextlib
3
+ import copy
4
+ import itertools
5
+ import logging
6
+ import numpy as np
7
+ import pickle
8
+ import random
9
+ from typing import Callable, Union
10
+ import torch
11
+ import torch.utils.data as data
12
+ from torch.utils.data.sampler import Sampler
13
+
14
+ from annotator.oneformer.detectron2.utils.serialize import PicklableWrapper
15
+
16
+ __all__ = ["MapDataset", "DatasetFromList", "AspectRatioGroupedDataset", "ToIterableDataset"]
17
+
18
+ logger = logging.getLogger(__name__)
19
+
20
+
21
+ def _shard_iterator_dataloader_worker(iterable):
22
+ # Shard the iterable if we're currently inside pytorch dataloader worker.
23
+ worker_info = data.get_worker_info()
24
+ if worker_info is None or worker_info.num_workers == 1:
25
+ # do nothing
26
+ yield from iterable
27
+ else:
28
+ yield from itertools.islice(iterable, worker_info.id, None, worker_info.num_workers)
29
+
30
+
31
+ class _MapIterableDataset(data.IterableDataset):
32
+ """
33
+ Map a function over elements in an IterableDataset.
34
+
35
+ Similar to pytorch's MapIterDataPipe, but support filtering when map_func
36
+ returns None.
37
+
38
+ This class is not public-facing. Will be called by `MapDataset`.
39
+ """
40
+
41
+ def __init__(self, dataset, map_func):
42
+ self._dataset = dataset
43
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
44
+
45
+ def __len__(self):
46
+ return len(self._dataset)
47
+
48
+ def __iter__(self):
49
+ for x in map(self._map_func, self._dataset):
50
+ if x is not None:
51
+ yield x
52
+
53
+
54
+ class MapDataset(data.Dataset):
55
+ """
56
+ Map a function over the elements in a dataset.
57
+ """
58
+
59
+ def __init__(self, dataset, map_func):
60
+ """
61
+ Args:
62
+ dataset: a dataset where map function is applied. Can be either
63
+ map-style or iterable dataset. When given an iterable dataset,
64
+ the returned object will also be an iterable dataset.
65
+ map_func: a callable which maps the element in dataset. map_func can
66
+ return None to skip the data (e.g. in case of errors).
67
+ How None is handled depends on the style of `dataset`.
68
+ If `dataset` is map-style, it randomly tries other elements.
69
+ If `dataset` is iterable, it skips the data and tries the next.
70
+ """
71
+ self._dataset = dataset
72
+ self._map_func = PicklableWrapper(map_func) # wrap so that a lambda will work
73
+
74
+ self._rng = random.Random(42)
75
+ self._fallback_candidates = set(range(len(dataset)))
76
+
77
+ def __new__(cls, dataset, map_func):
78
+ is_iterable = isinstance(dataset, data.IterableDataset)
79
+ if is_iterable:
80
+ return _MapIterableDataset(dataset, map_func)
81
+ else:
82
+ return super().__new__(cls)
83
+
84
+ def __getnewargs__(self):
85
+ return self._dataset, self._map_func
86
+
87
+ def __len__(self):
88
+ return len(self._dataset)
89
+
90
+ def __getitem__(self, idx):
91
+ retry_count = 0
92
+ cur_idx = int(idx)
93
+
94
+ while True:
95
+ data = self._map_func(self._dataset[cur_idx])
96
+ if data is not None:
97
+ self._fallback_candidates.add(cur_idx)
98
+ return data
99
+
100
+ # _map_func fails for this idx, use a random new index from the pool
101
+ retry_count += 1
102
+ self._fallback_candidates.discard(cur_idx)
103
+ cur_idx = self._rng.sample(self._fallback_candidates, k=1)[0]
104
+
105
+ if retry_count >= 3:
106
+ logger = logging.getLogger(__name__)
107
+ logger.warning(
108
+ "Failed to apply `_map_func` for idx: {}, retry count: {}".format(
109
+ idx, retry_count
110
+ )
111
+ )
112
+
113
+
114
+ class _TorchSerializedList(object):
115
+ """
116
+ A list-like object whose items are serialized and stored in a torch tensor. When
117
+ launching a process that uses TorchSerializedList with "fork" start method,
118
+ the subprocess can read the same buffer without triggering copy-on-access. When
119
+ launching a process that uses TorchSerializedList with "spawn/forkserver" start
120
+ method, the list will be pickled by a special ForkingPickler registered by PyTorch
121
+ that moves data to shared memory. In both cases, this allows parent and child
122
+ processes to share RAM for the list data, hence avoids the issue in
123
+ https://github.com/pytorch/pytorch/issues/13246.
124
+
125
+ See also https://ppwwyyxx.com/blog/2022/Demystify-RAM-Usage-in-Multiprocess-DataLoader/
126
+ on how it works.
127
+ """
128
+
129
+ def __init__(self, lst: list):
130
+ self._lst = lst
131
+
132
+ def _serialize(data):
133
+ buffer = pickle.dumps(data, protocol=-1)
134
+ return np.frombuffer(buffer, dtype=np.uint8)
135
+
136
+ logger.info(
137
+ "Serializing {} elements to byte tensors and concatenating them all ...".format(
138
+ len(self._lst)
139
+ )
140
+ )
141
+ self._lst = [_serialize(x) for x in self._lst]
142
+ self._addr = np.asarray([len(x) for x in self._lst], dtype=np.int64)
143
+ self._addr = torch.from_numpy(np.cumsum(self._addr))
144
+ self._lst = torch.from_numpy(np.concatenate(self._lst))
145
+ logger.info("Serialized dataset takes {:.2f} MiB".format(len(self._lst) / 1024**2))
146
+
147
+ def __len__(self):
148
+ return len(self._addr)
149
+
150
+ def __getitem__(self, idx):
151
+ start_addr = 0 if idx == 0 else self._addr[idx - 1].item()
152
+ end_addr = self._addr[idx].item()
153
+ bytes = memoryview(self._lst[start_addr:end_addr].numpy())
154
+
155
+ # @lint-ignore PYTHONPICKLEISBAD
156
+ return pickle.loads(bytes)
157
+
158
+
159
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = _TorchSerializedList
160
+
161
+
162
+ @contextlib.contextmanager
163
+ def set_default_dataset_from_list_serialize_method(new):
164
+ """
165
+ Context manager for using custom serialize function when creating DatasetFromList
166
+ """
167
+
168
+ global _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
169
+ orig = _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
170
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = new
171
+ yield
172
+ _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD = orig
173
+
174
+
175
+ class DatasetFromList(data.Dataset):
176
+ """
177
+ Wrap a list to a torch Dataset. It produces elements of the list as data.
178
+ """
179
+
180
+ def __init__(
181
+ self,
182
+ lst: list,
183
+ copy: bool = True,
184
+ serialize: Union[bool, Callable] = True,
185
+ ):
186
+ """
187
+ Args:
188
+ lst (list): a list which contains elements to produce.
189
+ copy (bool): whether to deepcopy the element when producing it,
190
+ so that the result can be modified in place without affecting the
191
+ source in the list.
192
+ serialize (bool or callable): whether to serialize the stroage to other
193
+ backend. If `True`, the default serialize method will be used, if given
194
+ a callable, the callable will be used as serialize method.
195
+ """
196
+ self._lst = lst
197
+ self._copy = copy
198
+ if not isinstance(serialize, (bool, Callable)):
199
+ raise TypeError(f"Unsupported type for argument `serailzie`: {serialize}")
200
+ self._serialize = serialize is not False
201
+
202
+ if self._serialize:
203
+ serialize_method = (
204
+ serialize
205
+ if isinstance(serialize, Callable)
206
+ else _DEFAULT_DATASET_FROM_LIST_SERIALIZE_METHOD
207
+ )
208
+ logger.info(f"Serializing the dataset using: {serialize_method}")
209
+ self._lst = serialize_method(self._lst)
210
+
211
+ def __len__(self):
212
+ return len(self._lst)
213
+
214
+ def __getitem__(self, idx):
215
+ if self._copy and not self._serialize:
216
+ return copy.deepcopy(self._lst[idx])
217
+ else:
218
+ return self._lst[idx]
219
+
220
+
221
+ class ToIterableDataset(data.IterableDataset):
222
+ """
223
+ Convert an old indices-based (also called map-style) dataset
224
+ to an iterable-style dataset.
225
+ """
226
+
227
+ def __init__(self, dataset: data.Dataset, sampler: Sampler, shard_sampler: bool = True):
228
+ """
229
+ Args:
230
+ dataset: an old-style dataset with ``__getitem__``
231
+ sampler: a cheap iterable that produces indices to be applied on ``dataset``.
232
+ shard_sampler: whether to shard the sampler based on the current pytorch data loader
233
+ worker id. When an IterableDataset is forked by pytorch's DataLoader into multiple
234
+ workers, it is responsible for sharding its data based on worker id so that workers
235
+ don't produce identical data.
236
+
237
+ Most samplers (like our TrainingSampler) do not shard based on dataloader worker id
238
+ and this argument should be set to True. But certain samplers may be already
239
+ sharded, in that case this argument should be set to False.
240
+ """
241
+ assert not isinstance(dataset, data.IterableDataset), dataset
242
+ assert isinstance(sampler, Sampler), sampler
243
+ self.dataset = dataset
244
+ self.sampler = sampler
245
+ self.shard_sampler = shard_sampler
246
+
247
+ def __iter__(self):
248
+ if not self.shard_sampler:
249
+ sampler = self.sampler
250
+ else:
251
+ # With map-style dataset, `DataLoader(dataset, sampler)` runs the
252
+ # sampler in main process only. But `DataLoader(ToIterableDataset(dataset, sampler))`
253
+ # will run sampler in every of the N worker. So we should only keep 1/N of the ids on
254
+ # each worker. The assumption is that sampler is cheap to iterate so it's fine to
255
+ # discard ids in workers.
256
+ sampler = _shard_iterator_dataloader_worker(self.sampler)
257
+ for idx in sampler:
258
+ yield self.dataset[idx]
259
+
260
+ def __len__(self):
261
+ return len(self.sampler)
262
+
263
+
264
+ class AspectRatioGroupedDataset(data.IterableDataset):
265
+ """
266
+ Batch data that have similar aspect ratio together.
267
+ In this implementation, images whose aspect ratio < (or >) 1 will
268
+ be batched together.
269
+ This improves training speed because the images then need less padding
270
+ to form a batch.
271
+
272
+ It assumes the underlying dataset produces dicts with "width" and "height" keys.
273
+ It will then produce a list of original dicts with length = batch_size,
274
+ all with similar aspect ratios.
275
+ """
276
+
277
+ def __init__(self, dataset, batch_size):
278
+ """
279
+ Args:
280
+ dataset: an iterable. Each element must be a dict with keys
281
+ "width" and "height", which will be used to batch data.
282
+ batch_size (int):
283
+ """
284
+ self.dataset = dataset
285
+ self.batch_size = batch_size
286
+ self._buckets = [[] for _ in range(2)]
287
+ # Hard-coded two aspect ratio groups: w > h and w < h.
288
+ # Can add support for more aspect ratio groups, but doesn't seem useful
289
+
290
+ def __iter__(self):
291
+ for d in self.dataset:
292
+ w, h = d["width"], d["height"]
293
+ bucket_id = 0 if w > h else 1
294
+ bucket = self._buckets[bucket_id]
295
+ bucket.append(d)
296
+ if len(bucket) == self.batch_size:
297
+ data = bucket[:]
298
+ # Clear bucket first, because code after yield is not
299
+ # guaranteed to execute
300
+ del bucket[:]
301
+ yield data
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/dataset_mapper.py ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import logging
4
+ import numpy as np
5
+ from typing import List, Optional, Union
6
+ import torch
7
+
8
+ from annotator.oneformer.detectron2.config import configurable
9
+
10
+ from . import detection_utils as utils
11
+ from . import transforms as T
12
+
13
+ """
14
+ This file contains the default mapping that's applied to "dataset dicts".
15
+ """
16
+
17
+ __all__ = ["DatasetMapper"]
18
+
19
+
20
+ class DatasetMapper:
21
+ """
22
+ A callable which takes a dataset dict in Detectron2 Dataset format,
23
+ and map it into a format used by the model.
24
+
25
+ This is the default callable to be used to map your dataset dict into training data.
26
+ You may need to follow it to implement your own one for customized logic,
27
+ such as a different way to read or transform images.
28
+ See :doc:`/tutorials/data_loading` for details.
29
+
30
+ The callable currently does the following:
31
+
32
+ 1. Read the image from "file_name"
33
+ 2. Applies cropping/geometric transforms to the image and annotations
34
+ 3. Prepare data and annotations to Tensor and :class:`Instances`
35
+ """
36
+
37
+ @configurable
38
+ def __init__(
39
+ self,
40
+ is_train: bool,
41
+ *,
42
+ augmentations: List[Union[T.Augmentation, T.Transform]],
43
+ image_format: str,
44
+ use_instance_mask: bool = False,
45
+ use_keypoint: bool = False,
46
+ instance_mask_format: str = "polygon",
47
+ keypoint_hflip_indices: Optional[np.ndarray] = None,
48
+ precomputed_proposal_topk: Optional[int] = None,
49
+ recompute_boxes: bool = False,
50
+ ):
51
+ """
52
+ NOTE: this interface is experimental.
53
+
54
+ Args:
55
+ is_train: whether it's used in training or inference
56
+ augmentations: a list of augmentations or deterministic transforms to apply
57
+ image_format: an image format supported by :func:`detection_utils.read_image`.
58
+ use_instance_mask: whether to process instance segmentation annotations, if available
59
+ use_keypoint: whether to process keypoint annotations if available
60
+ instance_mask_format: one of "polygon" or "bitmask". Process instance segmentation
61
+ masks into this format.
62
+ keypoint_hflip_indices: see :func:`detection_utils.create_keypoint_hflip_indices`
63
+ precomputed_proposal_topk: if given, will load pre-computed
64
+ proposals from dataset_dict and keep the top k proposals for each image.
65
+ recompute_boxes: whether to overwrite bounding box annotations
66
+ by computing tight bounding boxes from instance mask annotations.
67
+ """
68
+ if recompute_boxes:
69
+ assert use_instance_mask, "recompute_boxes requires instance masks"
70
+ # fmt: off
71
+ self.is_train = is_train
72
+ self.augmentations = T.AugmentationList(augmentations)
73
+ self.image_format = image_format
74
+ self.use_instance_mask = use_instance_mask
75
+ self.instance_mask_format = instance_mask_format
76
+ self.use_keypoint = use_keypoint
77
+ self.keypoint_hflip_indices = keypoint_hflip_indices
78
+ self.proposal_topk = precomputed_proposal_topk
79
+ self.recompute_boxes = recompute_boxes
80
+ # fmt: on
81
+ logger = logging.getLogger(__name__)
82
+ mode = "training" if is_train else "inference"
83
+ logger.info(f"[DatasetMapper] Augmentations used in {mode}: {augmentations}")
84
+
85
+ @classmethod
86
+ def from_config(cls, cfg, is_train: bool = True):
87
+ augs = utils.build_augmentation(cfg, is_train)
88
+ if cfg.INPUT.CROP.ENABLED and is_train:
89
+ augs.insert(0, T.RandomCrop(cfg.INPUT.CROP.TYPE, cfg.INPUT.CROP.SIZE))
90
+ recompute_boxes = cfg.MODEL.MASK_ON
91
+ else:
92
+ recompute_boxes = False
93
+
94
+ ret = {
95
+ "is_train": is_train,
96
+ "augmentations": augs,
97
+ "image_format": cfg.INPUT.FORMAT,
98
+ "use_instance_mask": cfg.MODEL.MASK_ON,
99
+ "instance_mask_format": cfg.INPUT.MASK_FORMAT,
100
+ "use_keypoint": cfg.MODEL.KEYPOINT_ON,
101
+ "recompute_boxes": recompute_boxes,
102
+ }
103
+
104
+ if cfg.MODEL.KEYPOINT_ON:
105
+ ret["keypoint_hflip_indices"] = utils.create_keypoint_hflip_indices(cfg.DATASETS.TRAIN)
106
+
107
+ if cfg.MODEL.LOAD_PROPOSALS:
108
+ ret["precomputed_proposal_topk"] = (
109
+ cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TRAIN
110
+ if is_train
111
+ else cfg.DATASETS.PRECOMPUTED_PROPOSAL_TOPK_TEST
112
+ )
113
+ return ret
114
+
115
+ def _transform_annotations(self, dataset_dict, transforms, image_shape):
116
+ # USER: Modify this if you want to keep them for some reason.
117
+ for anno in dataset_dict["annotations"]:
118
+ if not self.use_instance_mask:
119
+ anno.pop("segmentation", None)
120
+ if not self.use_keypoint:
121
+ anno.pop("keypoints", None)
122
+
123
+ # USER: Implement additional transformations if you have other types of data
124
+ annos = [
125
+ utils.transform_instance_annotations(
126
+ obj, transforms, image_shape, keypoint_hflip_indices=self.keypoint_hflip_indices
127
+ )
128
+ for obj in dataset_dict.pop("annotations")
129
+ if obj.get("iscrowd", 0) == 0
130
+ ]
131
+ instances = utils.annotations_to_instances(
132
+ annos, image_shape, mask_format=self.instance_mask_format
133
+ )
134
+
135
+ # After transforms such as cropping are applied, the bounding box may no longer
136
+ # tightly bound the object. As an example, imagine a triangle object
137
+ # [(0,0), (2,0), (0,2)] cropped by a box [(1,0),(2,2)] (XYXY format). The tight
138
+ # bounding box of the cropped triangle should be [(1,0),(2,1)], which is not equal to
139
+ # the intersection of original bounding box and the cropping box.
140
+ if self.recompute_boxes:
141
+ instances.gt_boxes = instances.gt_masks.get_bounding_boxes()
142
+ dataset_dict["instances"] = utils.filter_empty_instances(instances)
143
+
144
+ def __call__(self, dataset_dict):
145
+ """
146
+ Args:
147
+ dataset_dict (dict): Metadata of one image, in Detectron2 Dataset format.
148
+
149
+ Returns:
150
+ dict: a format that builtin models in detectron2 accept
151
+ """
152
+ dataset_dict = copy.deepcopy(dataset_dict) # it will be modified by code below
153
+ # USER: Write your own image loading if it's not from a file
154
+ image = utils.read_image(dataset_dict["file_name"], format=self.image_format)
155
+ utils.check_image_size(dataset_dict, image)
156
+
157
+ # USER: Remove if you don't do semantic/panoptic segmentation.
158
+ if "sem_seg_file_name" in dataset_dict:
159
+ sem_seg_gt = utils.read_image(dataset_dict.pop("sem_seg_file_name"), "L").squeeze(2)
160
+ else:
161
+ sem_seg_gt = None
162
+
163
+ aug_input = T.AugInput(image, sem_seg=sem_seg_gt)
164
+ transforms = self.augmentations(aug_input)
165
+ image, sem_seg_gt = aug_input.image, aug_input.sem_seg
166
+
167
+ image_shape = image.shape[:2] # h, w
168
+ # Pytorch's dataloader is efficient on torch.Tensor due to shared-memory,
169
+ # but not efficient on large generic data structures due to the use of pickle & mp.Queue.
170
+ # Therefore it's important to use torch.Tensor.
171
+ dataset_dict["image"] = torch.as_tensor(np.ascontiguousarray(image.transpose(2, 0, 1)))
172
+ if sem_seg_gt is not None:
173
+ dataset_dict["sem_seg"] = torch.as_tensor(sem_seg_gt.astype("long"))
174
+
175
+ # USER: Remove if you don't use pre-computed proposals.
176
+ # Most users would not need this feature.
177
+ if self.proposal_topk is not None:
178
+ utils.transform_proposals(
179
+ dataset_dict, image_shape, transforms, proposal_topk=self.proposal_topk
180
+ )
181
+
182
+ if not self.is_train:
183
+ # USER: Modify this if you want to keep them for some reason.
184
+ dataset_dict.pop("annotations", None)
185
+ dataset_dict.pop("sem_seg_file_name", None)
186
+ return dataset_dict
187
+
188
+ if "annotations" in dataset_dict:
189
+ self._transform_annotations(dataset_dict, transforms, image_shape)
190
+
191
+ return dataset_dict
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/README.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+
2
+
3
+ ### Common Datasets
4
+
5
+ The dataset implemented here do not need to load the data into the final format.
6
+ It should provide the minimal data structure needed to use the dataset, so it can be very efficient.
7
+
8
+ For example, for an image dataset, just provide the file names and labels, but don't read the images.
9
+ Let the downstream decide how to read.
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/__init__.py ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .coco import load_coco_json, load_sem_seg, register_coco_instances, convert_to_coco_json
3
+ from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
4
+ from .lvis import load_lvis_json, register_lvis_instances, get_lvis_instances_meta
5
+ from .pascal_voc import load_voc_instances, register_pascal_voc
6
+ from . import builtin as _builtin # ensure the builtin datasets are registered
7
+
8
+
9
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/builtin.py ADDED
@@ -0,0 +1,259 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+
5
+ """
6
+ This file registers pre-defined datasets at hard-coded paths, and their metadata.
7
+
8
+ We hard-code metadata for common datasets. This will enable:
9
+ 1. Consistency check when loading the datasets
10
+ 2. Use models on these standard datasets directly and run demos,
11
+ without having to download the dataset annotations
12
+
13
+ We hard-code some paths to the dataset that's assumed to
14
+ exist in "./datasets/".
15
+
16
+ Users SHOULD NOT use this file to create new dataset / metadata for new dataset.
17
+ To add new dataset, refer to the tutorial "docs/DATASETS.md".
18
+ """
19
+
20
+ import os
21
+
22
+ from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
23
+
24
+ from .builtin_meta import ADE20K_SEM_SEG_CATEGORIES, _get_builtin_metadata
25
+ from .cityscapes import load_cityscapes_instances, load_cityscapes_semantic
26
+ from .cityscapes_panoptic import register_all_cityscapes_panoptic
27
+ from .coco import load_sem_seg, register_coco_instances
28
+ from .coco_panoptic import register_coco_panoptic, register_coco_panoptic_separated
29
+ from .lvis import get_lvis_instances_meta, register_lvis_instances
30
+ from .pascal_voc import register_pascal_voc
31
+
32
+ # ==== Predefined datasets and splits for COCO ==========
33
+
34
+ _PREDEFINED_SPLITS_COCO = {}
35
+ _PREDEFINED_SPLITS_COCO["coco"] = {
36
+ "coco_2014_train": ("coco/train2014", "coco/annotations/instances_train2014.json"),
37
+ "coco_2014_val": ("coco/val2014", "coco/annotations/instances_val2014.json"),
38
+ "coco_2014_minival": ("coco/val2014", "coco/annotations/instances_minival2014.json"),
39
+ "coco_2014_valminusminival": (
40
+ "coco/val2014",
41
+ "coco/annotations/instances_valminusminival2014.json",
42
+ ),
43
+ "coco_2017_train": ("coco/train2017", "coco/annotations/instances_train2017.json"),
44
+ "coco_2017_val": ("coco/val2017", "coco/annotations/instances_val2017.json"),
45
+ "coco_2017_test": ("coco/test2017", "coco/annotations/image_info_test2017.json"),
46
+ "coco_2017_test-dev": ("coco/test2017", "coco/annotations/image_info_test-dev2017.json"),
47
+ "coco_2017_val_100": ("coco/val2017", "coco/annotations/instances_val2017_100.json"),
48
+ }
49
+
50
+ _PREDEFINED_SPLITS_COCO["coco_person"] = {
51
+ "keypoints_coco_2014_train": (
52
+ "coco/train2014",
53
+ "coco/annotations/person_keypoints_train2014.json",
54
+ ),
55
+ "keypoints_coco_2014_val": ("coco/val2014", "coco/annotations/person_keypoints_val2014.json"),
56
+ "keypoints_coco_2014_minival": (
57
+ "coco/val2014",
58
+ "coco/annotations/person_keypoints_minival2014.json",
59
+ ),
60
+ "keypoints_coco_2014_valminusminival": (
61
+ "coco/val2014",
62
+ "coco/annotations/person_keypoints_valminusminival2014.json",
63
+ ),
64
+ "keypoints_coco_2017_train": (
65
+ "coco/train2017",
66
+ "coco/annotations/person_keypoints_train2017.json",
67
+ ),
68
+ "keypoints_coco_2017_val": ("coco/val2017", "coco/annotations/person_keypoints_val2017.json"),
69
+ "keypoints_coco_2017_val_100": (
70
+ "coco/val2017",
71
+ "coco/annotations/person_keypoints_val2017_100.json",
72
+ ),
73
+ }
74
+
75
+
76
+ _PREDEFINED_SPLITS_COCO_PANOPTIC = {
77
+ "coco_2017_train_panoptic": (
78
+ # This is the original panoptic annotation directory
79
+ "coco/panoptic_train2017",
80
+ "coco/annotations/panoptic_train2017.json",
81
+ # This directory contains semantic annotations that are
82
+ # converted from panoptic annotations.
83
+ # It is used by PanopticFPN.
84
+ # You can use the script at detectron2/datasets/prepare_panoptic_fpn.py
85
+ # to create these directories.
86
+ "coco/panoptic_stuff_train2017",
87
+ ),
88
+ "coco_2017_val_panoptic": (
89
+ "coco/panoptic_val2017",
90
+ "coco/annotations/panoptic_val2017.json",
91
+ "coco/panoptic_stuff_val2017",
92
+ ),
93
+ "coco_2017_val_100_panoptic": (
94
+ "coco/panoptic_val2017_100",
95
+ "coco/annotations/panoptic_val2017_100.json",
96
+ "coco/panoptic_stuff_val2017_100",
97
+ ),
98
+ }
99
+
100
+
101
+ def register_all_coco(root):
102
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_COCO.items():
103
+ for key, (image_root, json_file) in splits_per_dataset.items():
104
+ # Assume pre-defined datasets live in `./datasets`.
105
+ register_coco_instances(
106
+ key,
107
+ _get_builtin_metadata(dataset_name),
108
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
109
+ os.path.join(root, image_root),
110
+ )
111
+
112
+ for (
113
+ prefix,
114
+ (panoptic_root, panoptic_json, semantic_root),
115
+ ) in _PREDEFINED_SPLITS_COCO_PANOPTIC.items():
116
+ prefix_instances = prefix[: -len("_panoptic")]
117
+ instances_meta = MetadataCatalog.get(prefix_instances)
118
+ image_root, instances_json = instances_meta.image_root, instances_meta.json_file
119
+ # The "separated" version of COCO panoptic segmentation dataset,
120
+ # e.g. used by Panoptic FPN
121
+ register_coco_panoptic_separated(
122
+ prefix,
123
+ _get_builtin_metadata("coco_panoptic_separated"),
124
+ image_root,
125
+ os.path.join(root, panoptic_root),
126
+ os.path.join(root, panoptic_json),
127
+ os.path.join(root, semantic_root),
128
+ instances_json,
129
+ )
130
+ # The "standard" version of COCO panoptic segmentation dataset,
131
+ # e.g. used by Panoptic-DeepLab
132
+ register_coco_panoptic(
133
+ prefix,
134
+ _get_builtin_metadata("coco_panoptic_standard"),
135
+ image_root,
136
+ os.path.join(root, panoptic_root),
137
+ os.path.join(root, panoptic_json),
138
+ instances_json,
139
+ )
140
+
141
+
142
+ # ==== Predefined datasets and splits for LVIS ==========
143
+
144
+
145
+ _PREDEFINED_SPLITS_LVIS = {
146
+ "lvis_v1": {
147
+ "lvis_v1_train": ("coco/", "lvis/lvis_v1_train.json"),
148
+ "lvis_v1_val": ("coco/", "lvis/lvis_v1_val.json"),
149
+ "lvis_v1_test_dev": ("coco/", "lvis/lvis_v1_image_info_test_dev.json"),
150
+ "lvis_v1_test_challenge": ("coco/", "lvis/lvis_v1_image_info_test_challenge.json"),
151
+ },
152
+ "lvis_v0.5": {
153
+ "lvis_v0.5_train": ("coco/", "lvis/lvis_v0.5_train.json"),
154
+ "lvis_v0.5_val": ("coco/", "lvis/lvis_v0.5_val.json"),
155
+ "lvis_v0.5_val_rand_100": ("coco/", "lvis/lvis_v0.5_val_rand_100.json"),
156
+ "lvis_v0.5_test": ("coco/", "lvis/lvis_v0.5_image_info_test.json"),
157
+ },
158
+ "lvis_v0.5_cocofied": {
159
+ "lvis_v0.5_train_cocofied": ("coco/", "lvis/lvis_v0.5_train_cocofied.json"),
160
+ "lvis_v0.5_val_cocofied": ("coco/", "lvis/lvis_v0.5_val_cocofied.json"),
161
+ },
162
+ }
163
+
164
+
165
+ def register_all_lvis(root):
166
+ for dataset_name, splits_per_dataset in _PREDEFINED_SPLITS_LVIS.items():
167
+ for key, (image_root, json_file) in splits_per_dataset.items():
168
+ register_lvis_instances(
169
+ key,
170
+ get_lvis_instances_meta(dataset_name),
171
+ os.path.join(root, json_file) if "://" not in json_file else json_file,
172
+ os.path.join(root, image_root),
173
+ )
174
+
175
+
176
+ # ==== Predefined splits for raw cityscapes images ===========
177
+ _RAW_CITYSCAPES_SPLITS = {
178
+ "cityscapes_fine_{task}_train": ("cityscapes/leftImg8bit/train/", "cityscapes/gtFine/train/"),
179
+ "cityscapes_fine_{task}_val": ("cityscapes/leftImg8bit/val/", "cityscapes/gtFine/val/"),
180
+ "cityscapes_fine_{task}_test": ("cityscapes/leftImg8bit/test/", "cityscapes/gtFine/test/"),
181
+ }
182
+
183
+
184
+ def register_all_cityscapes(root):
185
+ for key, (image_dir, gt_dir) in _RAW_CITYSCAPES_SPLITS.items():
186
+ meta = _get_builtin_metadata("cityscapes")
187
+ image_dir = os.path.join(root, image_dir)
188
+ gt_dir = os.path.join(root, gt_dir)
189
+
190
+ inst_key = key.format(task="instance_seg")
191
+ DatasetCatalog.register(
192
+ inst_key,
193
+ lambda x=image_dir, y=gt_dir: load_cityscapes_instances(
194
+ x, y, from_json=True, to_polygons=True
195
+ ),
196
+ )
197
+ MetadataCatalog.get(inst_key).set(
198
+ image_dir=image_dir, gt_dir=gt_dir, evaluator_type="cityscapes_instance", **meta
199
+ )
200
+
201
+ sem_key = key.format(task="sem_seg")
202
+ DatasetCatalog.register(
203
+ sem_key, lambda x=image_dir, y=gt_dir: load_cityscapes_semantic(x, y)
204
+ )
205
+ MetadataCatalog.get(sem_key).set(
206
+ image_dir=image_dir,
207
+ gt_dir=gt_dir,
208
+ evaluator_type="cityscapes_sem_seg",
209
+ ignore_label=255,
210
+ **meta,
211
+ )
212
+
213
+
214
+ # ==== Predefined splits for PASCAL VOC ===========
215
+ def register_all_pascal_voc(root):
216
+ SPLITS = [
217
+ ("voc_2007_trainval", "VOC2007", "trainval"),
218
+ ("voc_2007_train", "VOC2007", "train"),
219
+ ("voc_2007_val", "VOC2007", "val"),
220
+ ("voc_2007_test", "VOC2007", "test"),
221
+ ("voc_2012_trainval", "VOC2012", "trainval"),
222
+ ("voc_2012_train", "VOC2012", "train"),
223
+ ("voc_2012_val", "VOC2012", "val"),
224
+ ]
225
+ for name, dirname, split in SPLITS:
226
+ year = 2007 if "2007" in name else 2012
227
+ register_pascal_voc(name, os.path.join(root, dirname), split, year)
228
+ MetadataCatalog.get(name).evaluator_type = "pascal_voc"
229
+
230
+
231
+ def register_all_ade20k(root):
232
+ root = os.path.join(root, "ADEChallengeData2016")
233
+ for name, dirname in [("train", "training"), ("val", "validation")]:
234
+ image_dir = os.path.join(root, "images", dirname)
235
+ gt_dir = os.path.join(root, "annotations_detectron2", dirname)
236
+ name = f"ade20k_sem_seg_{name}"
237
+ DatasetCatalog.register(
238
+ name, lambda x=image_dir, y=gt_dir: load_sem_seg(y, x, gt_ext="png", image_ext="jpg")
239
+ )
240
+ MetadataCatalog.get(name).set(
241
+ stuff_classes=ADE20K_SEM_SEG_CATEGORIES[:],
242
+ image_root=image_dir,
243
+ sem_seg_root=gt_dir,
244
+ evaluator_type="sem_seg",
245
+ ignore_label=255,
246
+ )
247
+
248
+
249
+ # True for open source;
250
+ # Internally at fb, we register them elsewhere
251
+ if __name__.endswith(".builtin"):
252
+ # Assume pre-defined datasets live in `./datasets`.
253
+ _root = os.path.expanduser(os.getenv("DETECTRON2_DATASETS", "datasets"))
254
+ register_all_coco(_root)
255
+ register_all_lvis(_root)
256
+ register_all_cityscapes(_root)
257
+ register_all_cityscapes_panoptic(_root)
258
+ register_all_pascal_voc(_root)
259
+ register_all_ade20k(_root)
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/builtin_meta.py ADDED
@@ -0,0 +1,350 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ """
5
+ Note:
6
+ For your custom dataset, there is no need to hard-code metadata anywhere in the code.
7
+ For example, for COCO-format dataset, metadata will be obtained automatically
8
+ when calling `load_coco_json`. For other dataset, metadata may also be obtained in other ways
9
+ during loading.
10
+
11
+ However, we hard-coded metadata for a few common dataset here.
12
+ The only goal is to allow users who don't have these dataset to use pre-trained models.
13
+ Users don't have to download a COCO json (which contains metadata), in order to visualize a
14
+ COCO model (with correct class names and colors).
15
+ """
16
+
17
+
18
+ # All coco categories, together with their nice-looking visualization colors
19
+ # It's from https://github.com/cocodataset/panopticapi/blob/master/panoptic_coco_categories.json
20
+ COCO_CATEGORIES = [
21
+ {"color": [220, 20, 60], "isthing": 1, "id": 1, "name": "person"},
22
+ {"color": [119, 11, 32], "isthing": 1, "id": 2, "name": "bicycle"},
23
+ {"color": [0, 0, 142], "isthing": 1, "id": 3, "name": "car"},
24
+ {"color": [0, 0, 230], "isthing": 1, "id": 4, "name": "motorcycle"},
25
+ {"color": [106, 0, 228], "isthing": 1, "id": 5, "name": "airplane"},
26
+ {"color": [0, 60, 100], "isthing": 1, "id": 6, "name": "bus"},
27
+ {"color": [0, 80, 100], "isthing": 1, "id": 7, "name": "train"},
28
+ {"color": [0, 0, 70], "isthing": 1, "id": 8, "name": "truck"},
29
+ {"color": [0, 0, 192], "isthing": 1, "id": 9, "name": "boat"},
30
+ {"color": [250, 170, 30], "isthing": 1, "id": 10, "name": "traffic light"},
31
+ {"color": [100, 170, 30], "isthing": 1, "id": 11, "name": "fire hydrant"},
32
+ {"color": [220, 220, 0], "isthing": 1, "id": 13, "name": "stop sign"},
33
+ {"color": [175, 116, 175], "isthing": 1, "id": 14, "name": "parking meter"},
34
+ {"color": [250, 0, 30], "isthing": 1, "id": 15, "name": "bench"},
35
+ {"color": [165, 42, 42], "isthing": 1, "id": 16, "name": "bird"},
36
+ {"color": [255, 77, 255], "isthing": 1, "id": 17, "name": "cat"},
37
+ {"color": [0, 226, 252], "isthing": 1, "id": 18, "name": "dog"},
38
+ {"color": [182, 182, 255], "isthing": 1, "id": 19, "name": "horse"},
39
+ {"color": [0, 82, 0], "isthing": 1, "id": 20, "name": "sheep"},
40
+ {"color": [120, 166, 157], "isthing": 1, "id": 21, "name": "cow"},
41
+ {"color": [110, 76, 0], "isthing": 1, "id": 22, "name": "elephant"},
42
+ {"color": [174, 57, 255], "isthing": 1, "id": 23, "name": "bear"},
43
+ {"color": [199, 100, 0], "isthing": 1, "id": 24, "name": "zebra"},
44
+ {"color": [72, 0, 118], "isthing": 1, "id": 25, "name": "giraffe"},
45
+ {"color": [255, 179, 240], "isthing": 1, "id": 27, "name": "backpack"},
46
+ {"color": [0, 125, 92], "isthing": 1, "id": 28, "name": "umbrella"},
47
+ {"color": [209, 0, 151], "isthing": 1, "id": 31, "name": "handbag"},
48
+ {"color": [188, 208, 182], "isthing": 1, "id": 32, "name": "tie"},
49
+ {"color": [0, 220, 176], "isthing": 1, "id": 33, "name": "suitcase"},
50
+ {"color": [255, 99, 164], "isthing": 1, "id": 34, "name": "frisbee"},
51
+ {"color": [92, 0, 73], "isthing": 1, "id": 35, "name": "skis"},
52
+ {"color": [133, 129, 255], "isthing": 1, "id": 36, "name": "snowboard"},
53
+ {"color": [78, 180, 255], "isthing": 1, "id": 37, "name": "sports ball"},
54
+ {"color": [0, 228, 0], "isthing": 1, "id": 38, "name": "kite"},
55
+ {"color": [174, 255, 243], "isthing": 1, "id": 39, "name": "baseball bat"},
56
+ {"color": [45, 89, 255], "isthing": 1, "id": 40, "name": "baseball glove"},
57
+ {"color": [134, 134, 103], "isthing": 1, "id": 41, "name": "skateboard"},
58
+ {"color": [145, 148, 174], "isthing": 1, "id": 42, "name": "surfboard"},
59
+ {"color": [255, 208, 186], "isthing": 1, "id": 43, "name": "tennis racket"},
60
+ {"color": [197, 226, 255], "isthing": 1, "id": 44, "name": "bottle"},
61
+ {"color": [171, 134, 1], "isthing": 1, "id": 46, "name": "wine glass"},
62
+ {"color": [109, 63, 54], "isthing": 1, "id": 47, "name": "cup"},
63
+ {"color": [207, 138, 255], "isthing": 1, "id": 48, "name": "fork"},
64
+ {"color": [151, 0, 95], "isthing": 1, "id": 49, "name": "knife"},
65
+ {"color": [9, 80, 61], "isthing": 1, "id": 50, "name": "spoon"},
66
+ {"color": [84, 105, 51], "isthing": 1, "id": 51, "name": "bowl"},
67
+ {"color": [74, 65, 105], "isthing": 1, "id": 52, "name": "banana"},
68
+ {"color": [166, 196, 102], "isthing": 1, "id": 53, "name": "apple"},
69
+ {"color": [208, 195, 210], "isthing": 1, "id": 54, "name": "sandwich"},
70
+ {"color": [255, 109, 65], "isthing": 1, "id": 55, "name": "orange"},
71
+ {"color": [0, 143, 149], "isthing": 1, "id": 56, "name": "broccoli"},
72
+ {"color": [179, 0, 194], "isthing": 1, "id": 57, "name": "carrot"},
73
+ {"color": [209, 99, 106], "isthing": 1, "id": 58, "name": "hot dog"},
74
+ {"color": [5, 121, 0], "isthing": 1, "id": 59, "name": "pizza"},
75
+ {"color": [227, 255, 205], "isthing": 1, "id": 60, "name": "donut"},
76
+ {"color": [147, 186, 208], "isthing": 1, "id": 61, "name": "cake"},
77
+ {"color": [153, 69, 1], "isthing": 1, "id": 62, "name": "chair"},
78
+ {"color": [3, 95, 161], "isthing": 1, "id": 63, "name": "couch"},
79
+ {"color": [163, 255, 0], "isthing": 1, "id": 64, "name": "potted plant"},
80
+ {"color": [119, 0, 170], "isthing": 1, "id": 65, "name": "bed"},
81
+ {"color": [0, 182, 199], "isthing": 1, "id": 67, "name": "dining table"},
82
+ {"color": [0, 165, 120], "isthing": 1, "id": 70, "name": "toilet"},
83
+ {"color": [183, 130, 88], "isthing": 1, "id": 72, "name": "tv"},
84
+ {"color": [95, 32, 0], "isthing": 1, "id": 73, "name": "laptop"},
85
+ {"color": [130, 114, 135], "isthing": 1, "id": 74, "name": "mouse"},
86
+ {"color": [110, 129, 133], "isthing": 1, "id": 75, "name": "remote"},
87
+ {"color": [166, 74, 118], "isthing": 1, "id": 76, "name": "keyboard"},
88
+ {"color": [219, 142, 185], "isthing": 1, "id": 77, "name": "cell phone"},
89
+ {"color": [79, 210, 114], "isthing": 1, "id": 78, "name": "microwave"},
90
+ {"color": [178, 90, 62], "isthing": 1, "id": 79, "name": "oven"},
91
+ {"color": [65, 70, 15], "isthing": 1, "id": 80, "name": "toaster"},
92
+ {"color": [127, 167, 115], "isthing": 1, "id": 81, "name": "sink"},
93
+ {"color": [59, 105, 106], "isthing": 1, "id": 82, "name": "refrigerator"},
94
+ {"color": [142, 108, 45], "isthing": 1, "id": 84, "name": "book"},
95
+ {"color": [196, 172, 0], "isthing": 1, "id": 85, "name": "clock"},
96
+ {"color": [95, 54, 80], "isthing": 1, "id": 86, "name": "vase"},
97
+ {"color": [128, 76, 255], "isthing": 1, "id": 87, "name": "scissors"},
98
+ {"color": [201, 57, 1], "isthing": 1, "id": 88, "name": "teddy bear"},
99
+ {"color": [246, 0, 122], "isthing": 1, "id": 89, "name": "hair drier"},
100
+ {"color": [191, 162, 208], "isthing": 1, "id": 90, "name": "toothbrush"},
101
+ {"color": [255, 255, 128], "isthing": 0, "id": 92, "name": "banner"},
102
+ {"color": [147, 211, 203], "isthing": 0, "id": 93, "name": "blanket"},
103
+ {"color": [150, 100, 100], "isthing": 0, "id": 95, "name": "bridge"},
104
+ {"color": [168, 171, 172], "isthing": 0, "id": 100, "name": "cardboard"},
105
+ {"color": [146, 112, 198], "isthing": 0, "id": 107, "name": "counter"},
106
+ {"color": [210, 170, 100], "isthing": 0, "id": 109, "name": "curtain"},
107
+ {"color": [92, 136, 89], "isthing": 0, "id": 112, "name": "door-stuff"},
108
+ {"color": [218, 88, 184], "isthing": 0, "id": 118, "name": "floor-wood"},
109
+ {"color": [241, 129, 0], "isthing": 0, "id": 119, "name": "flower"},
110
+ {"color": [217, 17, 255], "isthing": 0, "id": 122, "name": "fruit"},
111
+ {"color": [124, 74, 181], "isthing": 0, "id": 125, "name": "gravel"},
112
+ {"color": [70, 70, 70], "isthing": 0, "id": 128, "name": "house"},
113
+ {"color": [255, 228, 255], "isthing": 0, "id": 130, "name": "light"},
114
+ {"color": [154, 208, 0], "isthing": 0, "id": 133, "name": "mirror-stuff"},
115
+ {"color": [193, 0, 92], "isthing": 0, "id": 138, "name": "net"},
116
+ {"color": [76, 91, 113], "isthing": 0, "id": 141, "name": "pillow"},
117
+ {"color": [255, 180, 195], "isthing": 0, "id": 144, "name": "platform"},
118
+ {"color": [106, 154, 176], "isthing": 0, "id": 145, "name": "playingfield"},
119
+ {"color": [230, 150, 140], "isthing": 0, "id": 147, "name": "railroad"},
120
+ {"color": [60, 143, 255], "isthing": 0, "id": 148, "name": "river"},
121
+ {"color": [128, 64, 128], "isthing": 0, "id": 149, "name": "road"},
122
+ {"color": [92, 82, 55], "isthing": 0, "id": 151, "name": "roof"},
123
+ {"color": [254, 212, 124], "isthing": 0, "id": 154, "name": "sand"},
124
+ {"color": [73, 77, 174], "isthing": 0, "id": 155, "name": "sea"},
125
+ {"color": [255, 160, 98], "isthing": 0, "id": 156, "name": "shelf"},
126
+ {"color": [255, 255, 255], "isthing": 0, "id": 159, "name": "snow"},
127
+ {"color": [104, 84, 109], "isthing": 0, "id": 161, "name": "stairs"},
128
+ {"color": [169, 164, 131], "isthing": 0, "id": 166, "name": "tent"},
129
+ {"color": [225, 199, 255], "isthing": 0, "id": 168, "name": "towel"},
130
+ {"color": [137, 54, 74], "isthing": 0, "id": 171, "name": "wall-brick"},
131
+ {"color": [135, 158, 223], "isthing": 0, "id": 175, "name": "wall-stone"},
132
+ {"color": [7, 246, 231], "isthing": 0, "id": 176, "name": "wall-tile"},
133
+ {"color": [107, 255, 200], "isthing": 0, "id": 177, "name": "wall-wood"},
134
+ {"color": [58, 41, 149], "isthing": 0, "id": 178, "name": "water-other"},
135
+ {"color": [183, 121, 142], "isthing": 0, "id": 180, "name": "window-blind"},
136
+ {"color": [255, 73, 97], "isthing": 0, "id": 181, "name": "window-other"},
137
+ {"color": [107, 142, 35], "isthing": 0, "id": 184, "name": "tree-merged"},
138
+ {"color": [190, 153, 153], "isthing": 0, "id": 185, "name": "fence-merged"},
139
+ {"color": [146, 139, 141], "isthing": 0, "id": 186, "name": "ceiling-merged"},
140
+ {"color": [70, 130, 180], "isthing": 0, "id": 187, "name": "sky-other-merged"},
141
+ {"color": [134, 199, 156], "isthing": 0, "id": 188, "name": "cabinet-merged"},
142
+ {"color": [209, 226, 140], "isthing": 0, "id": 189, "name": "table-merged"},
143
+ {"color": [96, 36, 108], "isthing": 0, "id": 190, "name": "floor-other-merged"},
144
+ {"color": [96, 96, 96], "isthing": 0, "id": 191, "name": "pavement-merged"},
145
+ {"color": [64, 170, 64], "isthing": 0, "id": 192, "name": "mountain-merged"},
146
+ {"color": [152, 251, 152], "isthing": 0, "id": 193, "name": "grass-merged"},
147
+ {"color": [208, 229, 228], "isthing": 0, "id": 194, "name": "dirt-merged"},
148
+ {"color": [206, 186, 171], "isthing": 0, "id": 195, "name": "paper-merged"},
149
+ {"color": [152, 161, 64], "isthing": 0, "id": 196, "name": "food-other-merged"},
150
+ {"color": [116, 112, 0], "isthing": 0, "id": 197, "name": "building-other-merged"},
151
+ {"color": [0, 114, 143], "isthing": 0, "id": 198, "name": "rock-merged"},
152
+ {"color": [102, 102, 156], "isthing": 0, "id": 199, "name": "wall-other-merged"},
153
+ {"color": [250, 141, 255], "isthing": 0, "id": 200, "name": "rug-merged"},
154
+ ]
155
+
156
+ # fmt: off
157
+ COCO_PERSON_KEYPOINT_NAMES = (
158
+ "nose",
159
+ "left_eye", "right_eye",
160
+ "left_ear", "right_ear",
161
+ "left_shoulder", "right_shoulder",
162
+ "left_elbow", "right_elbow",
163
+ "left_wrist", "right_wrist",
164
+ "left_hip", "right_hip",
165
+ "left_knee", "right_knee",
166
+ "left_ankle", "right_ankle",
167
+ )
168
+ # fmt: on
169
+
170
+ # Pairs of keypoints that should be exchanged under horizontal flipping
171
+ COCO_PERSON_KEYPOINT_FLIP_MAP = (
172
+ ("left_eye", "right_eye"),
173
+ ("left_ear", "right_ear"),
174
+ ("left_shoulder", "right_shoulder"),
175
+ ("left_elbow", "right_elbow"),
176
+ ("left_wrist", "right_wrist"),
177
+ ("left_hip", "right_hip"),
178
+ ("left_knee", "right_knee"),
179
+ ("left_ankle", "right_ankle"),
180
+ )
181
+
182
+ # rules for pairs of keypoints to draw a line between, and the line color to use.
183
+ KEYPOINT_CONNECTION_RULES = [
184
+ # face
185
+ ("left_ear", "left_eye", (102, 204, 255)),
186
+ ("right_ear", "right_eye", (51, 153, 255)),
187
+ ("left_eye", "nose", (102, 0, 204)),
188
+ ("nose", "right_eye", (51, 102, 255)),
189
+ # upper-body
190
+ ("left_shoulder", "right_shoulder", (255, 128, 0)),
191
+ ("left_shoulder", "left_elbow", (153, 255, 204)),
192
+ ("right_shoulder", "right_elbow", (128, 229, 255)),
193
+ ("left_elbow", "left_wrist", (153, 255, 153)),
194
+ ("right_elbow", "right_wrist", (102, 255, 224)),
195
+ # lower-body
196
+ ("left_hip", "right_hip", (255, 102, 0)),
197
+ ("left_hip", "left_knee", (255, 255, 77)),
198
+ ("right_hip", "right_knee", (153, 255, 204)),
199
+ ("left_knee", "left_ankle", (191, 255, 128)),
200
+ ("right_knee", "right_ankle", (255, 195, 77)),
201
+ ]
202
+
203
+ # All Cityscapes categories, together with their nice-looking visualization colors
204
+ # It's from https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/helpers/labels.py # noqa
205
+ CITYSCAPES_CATEGORIES = [
206
+ {"color": (128, 64, 128), "isthing": 0, "id": 7, "trainId": 0, "name": "road"},
207
+ {"color": (244, 35, 232), "isthing": 0, "id": 8, "trainId": 1, "name": "sidewalk"},
208
+ {"color": (70, 70, 70), "isthing": 0, "id": 11, "trainId": 2, "name": "building"},
209
+ {"color": (102, 102, 156), "isthing": 0, "id": 12, "trainId": 3, "name": "wall"},
210
+ {"color": (190, 153, 153), "isthing": 0, "id": 13, "trainId": 4, "name": "fence"},
211
+ {"color": (153, 153, 153), "isthing": 0, "id": 17, "trainId": 5, "name": "pole"},
212
+ {"color": (250, 170, 30), "isthing": 0, "id": 19, "trainId": 6, "name": "traffic light"},
213
+ {"color": (220, 220, 0), "isthing": 0, "id": 20, "trainId": 7, "name": "traffic sign"},
214
+ {"color": (107, 142, 35), "isthing": 0, "id": 21, "trainId": 8, "name": "vegetation"},
215
+ {"color": (152, 251, 152), "isthing": 0, "id": 22, "trainId": 9, "name": "terrain"},
216
+ {"color": (70, 130, 180), "isthing": 0, "id": 23, "trainId": 10, "name": "sky"},
217
+ {"color": (220, 20, 60), "isthing": 1, "id": 24, "trainId": 11, "name": "person"},
218
+ {"color": (255, 0, 0), "isthing": 1, "id": 25, "trainId": 12, "name": "rider"},
219
+ {"color": (0, 0, 142), "isthing": 1, "id": 26, "trainId": 13, "name": "car"},
220
+ {"color": (0, 0, 70), "isthing": 1, "id": 27, "trainId": 14, "name": "truck"},
221
+ {"color": (0, 60, 100), "isthing": 1, "id": 28, "trainId": 15, "name": "bus"},
222
+ {"color": (0, 80, 100), "isthing": 1, "id": 31, "trainId": 16, "name": "train"},
223
+ {"color": (0, 0, 230), "isthing": 1, "id": 32, "trainId": 17, "name": "motorcycle"},
224
+ {"color": (119, 11, 32), "isthing": 1, "id": 33, "trainId": 18, "name": "bicycle"},
225
+ ]
226
+
227
+ # fmt: off
228
+ ADE20K_SEM_SEG_CATEGORIES = [
229
+ "wall", "building", "sky", "floor", "tree", "ceiling", "road, route", "bed", "window ", "grass", "cabinet", "sidewalk, pavement", "person", "earth, ground", "door", "table", "mountain, mount", "plant", "curtain", "chair", "car", "water", "painting, picture", "sofa", "shelf", "house", "sea", "mirror", "rug", "field", "armchair", "seat", "fence", "desk", "rock, stone", "wardrobe, closet, press", "lamp", "tub", "rail", "cushion", "base, pedestal, stand", "box", "column, pillar", "signboard, sign", "chest of drawers, chest, bureau, dresser", "counter", "sand", "sink", "skyscraper", "fireplace", "refrigerator, icebox", "grandstand, covered stand", "path", "stairs", "runway", "case, display case, showcase, vitrine", "pool table, billiard table, snooker table", "pillow", "screen door, screen", "stairway, staircase", "river", "bridge, span", "bookcase", "blind, screen", "coffee table", "toilet, can, commode, crapper, pot, potty, stool, throne", "flower", "book", "hill", "bench", "countertop", "stove", "palm, palm tree", "kitchen island", "computer", "swivel chair", "boat", "bar", "arcade machine", "hovel, hut, hutch, shack, shanty", "bus", "towel", "light", "truck", "tower", "chandelier", "awning, sunshade, sunblind", "street lamp", "booth", "tv", "plane", "dirt track", "clothes", "pole", "land, ground, soil", "bannister, banister, balustrade, balusters, handrail", "escalator, moving staircase, moving stairway", "ottoman, pouf, pouffe, puff, hassock", "bottle", "buffet, counter, sideboard", "poster, posting, placard, notice, bill, card", "stage", "van", "ship", "fountain", "conveyer belt, conveyor belt, conveyer, conveyor, transporter", "canopy", "washer, automatic washer, washing machine", "plaything, toy", "pool", "stool", "barrel, cask", "basket, handbasket", "falls", "tent", "bag", "minibike, motorbike", "cradle", "oven", "ball", "food, solid food", "step, stair", "tank, storage tank", "trade name", "microwave", "pot", "animal", "bicycle", "lake", "dishwasher", "screen", "blanket, cover", "sculpture", "hood, exhaust hood", "sconce", "vase", "traffic light", "tray", "trash can", "fan", "pier", "crt screen", "plate", "monitor", "bulletin board", "shower", "radiator", "glass, drinking glass", "clock", "flag", # noqa
230
+ ]
231
+ # After processed by `prepare_ade20k_sem_seg.py`, id 255 means ignore
232
+ # fmt: on
233
+
234
+
235
+ def _get_coco_instances_meta():
236
+ thing_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 1]
237
+ thing_colors = [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 1]
238
+ assert len(thing_ids) == 80, len(thing_ids)
239
+ # Mapping from the incontiguous COCO category id to an id in [0, 79]
240
+ thing_dataset_id_to_contiguous_id = {k: i for i, k in enumerate(thing_ids)}
241
+ thing_classes = [k["name"] for k in COCO_CATEGORIES if k["isthing"] == 1]
242
+ ret = {
243
+ "thing_dataset_id_to_contiguous_id": thing_dataset_id_to_contiguous_id,
244
+ "thing_classes": thing_classes,
245
+ "thing_colors": thing_colors,
246
+ }
247
+ return ret
248
+
249
+
250
+ def _get_coco_panoptic_separated_meta():
251
+ """
252
+ Returns metadata for "separated" version of the panoptic segmentation dataset.
253
+ """
254
+ stuff_ids = [k["id"] for k in COCO_CATEGORIES if k["isthing"] == 0]
255
+ assert len(stuff_ids) == 53, len(stuff_ids)
256
+
257
+ # For semantic segmentation, this mapping maps from contiguous stuff id
258
+ # (in [0, 53], used in models) to ids in the dataset (used for processing results)
259
+ # The id 0 is mapped to an extra category "thing".
260
+ stuff_dataset_id_to_contiguous_id = {k: i + 1 for i, k in enumerate(stuff_ids)}
261
+ # When converting COCO panoptic annotations to semantic annotations
262
+ # We label the "thing" category to 0
263
+ stuff_dataset_id_to_contiguous_id[0] = 0
264
+
265
+ # 54 names for COCO stuff categories (including "things")
266
+ stuff_classes = ["things"] + [
267
+ k["name"].replace("-other", "").replace("-merged", "")
268
+ for k in COCO_CATEGORIES
269
+ if k["isthing"] == 0
270
+ ]
271
+
272
+ # NOTE: I randomly picked a color for things
273
+ stuff_colors = [[82, 18, 128]] + [k["color"] for k in COCO_CATEGORIES if k["isthing"] == 0]
274
+ ret = {
275
+ "stuff_dataset_id_to_contiguous_id": stuff_dataset_id_to_contiguous_id,
276
+ "stuff_classes": stuff_classes,
277
+ "stuff_colors": stuff_colors,
278
+ }
279
+ ret.update(_get_coco_instances_meta())
280
+ return ret
281
+
282
+
283
+ def _get_builtin_metadata(dataset_name):
284
+ if dataset_name == "coco":
285
+ return _get_coco_instances_meta()
286
+ if dataset_name == "coco_panoptic_separated":
287
+ return _get_coco_panoptic_separated_meta()
288
+ elif dataset_name == "coco_panoptic_standard":
289
+ meta = {}
290
+ # The following metadata maps contiguous id from [0, #thing categories +
291
+ # #stuff categories) to their names and colors. We have to replica of the
292
+ # same name and color under "thing_*" and "stuff_*" because the current
293
+ # visualization function in D2 handles thing and class classes differently
294
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
295
+ # enable reusing existing visualization functions.
296
+ thing_classes = [k["name"] for k in COCO_CATEGORIES]
297
+ thing_colors = [k["color"] for k in COCO_CATEGORIES]
298
+ stuff_classes = [k["name"] for k in COCO_CATEGORIES]
299
+ stuff_colors = [k["color"] for k in COCO_CATEGORIES]
300
+
301
+ meta["thing_classes"] = thing_classes
302
+ meta["thing_colors"] = thing_colors
303
+ meta["stuff_classes"] = stuff_classes
304
+ meta["stuff_colors"] = stuff_colors
305
+
306
+ # Convert category id for training:
307
+ # category id: like semantic segmentation, it is the class id for each
308
+ # pixel. Since there are some classes not used in evaluation, the category
309
+ # id is not always contiguous and thus we have two set of category ids:
310
+ # - original category id: category id in the original dataset, mainly
311
+ # used for evaluation.
312
+ # - contiguous category id: [0, #classes), in order to train the linear
313
+ # softmax classifier.
314
+ thing_dataset_id_to_contiguous_id = {}
315
+ stuff_dataset_id_to_contiguous_id = {}
316
+
317
+ for i, cat in enumerate(COCO_CATEGORIES):
318
+ if cat["isthing"]:
319
+ thing_dataset_id_to_contiguous_id[cat["id"]] = i
320
+ else:
321
+ stuff_dataset_id_to_contiguous_id[cat["id"]] = i
322
+
323
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
324
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
325
+
326
+ return meta
327
+ elif dataset_name == "coco_person":
328
+ return {
329
+ "thing_classes": ["person"],
330
+ "keypoint_names": COCO_PERSON_KEYPOINT_NAMES,
331
+ "keypoint_flip_map": COCO_PERSON_KEYPOINT_FLIP_MAP,
332
+ "keypoint_connection_rules": KEYPOINT_CONNECTION_RULES,
333
+ }
334
+ elif dataset_name == "cityscapes":
335
+ # fmt: off
336
+ CITYSCAPES_THING_CLASSES = [
337
+ "person", "rider", "car", "truck",
338
+ "bus", "train", "motorcycle", "bicycle",
339
+ ]
340
+ CITYSCAPES_STUFF_CLASSES = [
341
+ "road", "sidewalk", "building", "wall", "fence", "pole", "traffic light",
342
+ "traffic sign", "vegetation", "terrain", "sky", "person", "rider", "car",
343
+ "truck", "bus", "train", "motorcycle", "bicycle",
344
+ ]
345
+ # fmt: on
346
+ return {
347
+ "thing_classes": CITYSCAPES_THING_CLASSES,
348
+ "stuff_classes": CITYSCAPES_STUFF_CLASSES,
349
+ }
350
+ raise KeyError("No built-in metadata for dataset {}".format(dataset_name))
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/cityscapes.py ADDED
@@ -0,0 +1,329 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import functools
3
+ import json
4
+ import logging
5
+ import multiprocessing as mp
6
+ import numpy as np
7
+ import os
8
+ from itertools import chain
9
+ import annotator.oneformer.pycocotools.mask as mask_util
10
+ from PIL import Image
11
+
12
+ from annotator.oneformer.detectron2.structures import BoxMode
13
+ from annotator.oneformer.detectron2.utils.comm import get_world_size
14
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
15
+ from annotator.oneformer.detectron2.utils.logger import setup_logger
16
+
17
+ try:
18
+ import cv2 # noqa
19
+ except ImportError:
20
+ # OpenCV is an optional dependency at the moment
21
+ pass
22
+
23
+
24
+ logger = logging.getLogger(__name__)
25
+
26
+
27
+ def _get_cityscapes_files(image_dir, gt_dir):
28
+ files = []
29
+ # scan through the directory
30
+ cities = PathManager.ls(image_dir)
31
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
32
+ for city in cities:
33
+ city_img_dir = os.path.join(image_dir, city)
34
+ city_gt_dir = os.path.join(gt_dir, city)
35
+ for basename in PathManager.ls(city_img_dir):
36
+ image_file = os.path.join(city_img_dir, basename)
37
+
38
+ suffix = "leftImg8bit.png"
39
+ assert basename.endswith(suffix), basename
40
+ basename = basename[: -len(suffix)]
41
+
42
+ instance_file = os.path.join(city_gt_dir, basename + "gtFine_instanceIds.png")
43
+ label_file = os.path.join(city_gt_dir, basename + "gtFine_labelIds.png")
44
+ json_file = os.path.join(city_gt_dir, basename + "gtFine_polygons.json")
45
+
46
+ files.append((image_file, instance_file, label_file, json_file))
47
+ assert len(files), "No images found in {}".format(image_dir)
48
+ for f in files[0]:
49
+ assert PathManager.isfile(f), f
50
+ return files
51
+
52
+
53
+ def load_cityscapes_instances(image_dir, gt_dir, from_json=True, to_polygons=True):
54
+ """
55
+ Args:
56
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
57
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
58
+ from_json (bool): whether to read annotations from the raw json file or the png files.
59
+ to_polygons (bool): whether to represent the segmentation as polygons
60
+ (COCO's format) instead of masks (cityscapes's format).
61
+
62
+ Returns:
63
+ list[dict]: a list of dicts in Detectron2 standard format. (See
64
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
65
+ """
66
+ if from_json:
67
+ assert to_polygons, (
68
+ "Cityscapes's json annotations are in polygon format. "
69
+ "Converting to mask format is not supported now."
70
+ )
71
+ files = _get_cityscapes_files(image_dir, gt_dir)
72
+
73
+ logger.info("Preprocessing cityscapes annotations ...")
74
+ # This is still not fast: all workers will execute duplicate works and will
75
+ # take up to 10m on a 8GPU server.
76
+ pool = mp.Pool(processes=max(mp.cpu_count() // get_world_size() // 2, 4))
77
+
78
+ ret = pool.map(
79
+ functools.partial(_cityscapes_files_to_dict, from_json=from_json, to_polygons=to_polygons),
80
+ files,
81
+ )
82
+ logger.info("Loaded {} images from {}".format(len(ret), image_dir))
83
+
84
+ # Map cityscape ids to contiguous ids
85
+ from cityscapesscripts.helpers.labels import labels
86
+
87
+ labels = [l for l in labels if l.hasInstances and not l.ignoreInEval]
88
+ dataset_id_to_contiguous_id = {l.id: idx for idx, l in enumerate(labels)}
89
+ for dict_per_image in ret:
90
+ for anno in dict_per_image["annotations"]:
91
+ anno["category_id"] = dataset_id_to_contiguous_id[anno["category_id"]]
92
+ return ret
93
+
94
+
95
+ def load_cityscapes_semantic(image_dir, gt_dir):
96
+ """
97
+ Args:
98
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
99
+ gt_dir (str): path to the raw annotations. e.g., "~/cityscapes/gtFine/train".
100
+
101
+ Returns:
102
+ list[dict]: a list of dict, each has "file_name" and
103
+ "sem_seg_file_name".
104
+ """
105
+ ret = []
106
+ # gt_dir is small and contain many small files. make sense to fetch to local first
107
+ gt_dir = PathManager.get_local_path(gt_dir)
108
+ for image_file, _, label_file, json_file in _get_cityscapes_files(image_dir, gt_dir):
109
+ label_file = label_file.replace("labelIds", "labelTrainIds")
110
+
111
+ with PathManager.open(json_file, "r") as f:
112
+ jsonobj = json.load(f)
113
+ ret.append(
114
+ {
115
+ "file_name": image_file,
116
+ "sem_seg_file_name": label_file,
117
+ "height": jsonobj["imgHeight"],
118
+ "width": jsonobj["imgWidth"],
119
+ }
120
+ )
121
+ assert len(ret), f"No images found in {image_dir}!"
122
+ assert PathManager.isfile(
123
+ ret[0]["sem_seg_file_name"]
124
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
125
+ return ret
126
+
127
+
128
+ def _cityscapes_files_to_dict(files, from_json, to_polygons):
129
+ """
130
+ Parse cityscapes annotation files to a instance segmentation dataset dict.
131
+
132
+ Args:
133
+ files (tuple): consists of (image_file, instance_id_file, label_id_file, json_file)
134
+ from_json (bool): whether to read annotations from the raw json file or the png files.
135
+ to_polygons (bool): whether to represent the segmentation as polygons
136
+ (COCO's format) instead of masks (cityscapes's format).
137
+
138
+ Returns:
139
+ A dict in Detectron2 Dataset format.
140
+ """
141
+ from cityscapesscripts.helpers.labels import id2label, name2label
142
+
143
+ image_file, instance_id_file, _, json_file = files
144
+
145
+ annos = []
146
+
147
+ if from_json:
148
+ from shapely.geometry import MultiPolygon, Polygon
149
+
150
+ with PathManager.open(json_file, "r") as f:
151
+ jsonobj = json.load(f)
152
+ ret = {
153
+ "file_name": image_file,
154
+ "image_id": os.path.basename(image_file),
155
+ "height": jsonobj["imgHeight"],
156
+ "width": jsonobj["imgWidth"],
157
+ }
158
+
159
+ # `polygons_union` contains the union of all valid polygons.
160
+ polygons_union = Polygon()
161
+
162
+ # CityscapesScripts draw the polygons in sequential order
163
+ # and each polygon *overwrites* existing ones. See
164
+ # (https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/preparation/json2instanceImg.py) # noqa
165
+ # We use reverse order, and each polygon *avoids* early ones.
166
+ # This will resolve the ploygon overlaps in the same way as CityscapesScripts.
167
+ for obj in jsonobj["objects"][::-1]:
168
+ if "deleted" in obj: # cityscapes data format specific
169
+ continue
170
+ label_name = obj["label"]
171
+
172
+ try:
173
+ label = name2label[label_name]
174
+ except KeyError:
175
+ if label_name.endswith("group"): # crowd area
176
+ label = name2label[label_name[: -len("group")]]
177
+ else:
178
+ raise
179
+ if label.id < 0: # cityscapes data format
180
+ continue
181
+
182
+ # Cityscapes's raw annotations uses integer coordinates
183
+ # Therefore +0.5 here
184
+ poly_coord = np.asarray(obj["polygon"], dtype="f4") + 0.5
185
+ # CityscapesScript uses PIL.ImageDraw.polygon to rasterize
186
+ # polygons for evaluation. This function operates in integer space
187
+ # and draws each pixel whose center falls into the polygon.
188
+ # Therefore it draws a polygon which is 0.5 "fatter" in expectation.
189
+ # We therefore dilate the input polygon by 0.5 as our input.
190
+ poly = Polygon(poly_coord).buffer(0.5, resolution=4)
191
+
192
+ if not label.hasInstances or label.ignoreInEval:
193
+ # even if we won't store the polygon it still contributes to overlaps resolution
194
+ polygons_union = polygons_union.union(poly)
195
+ continue
196
+
197
+ # Take non-overlapping part of the polygon
198
+ poly_wo_overlaps = poly.difference(polygons_union)
199
+ if poly_wo_overlaps.is_empty:
200
+ continue
201
+ polygons_union = polygons_union.union(poly)
202
+
203
+ anno = {}
204
+ anno["iscrowd"] = label_name.endswith("group")
205
+ anno["category_id"] = label.id
206
+
207
+ if isinstance(poly_wo_overlaps, Polygon):
208
+ poly_list = [poly_wo_overlaps]
209
+ elif isinstance(poly_wo_overlaps, MultiPolygon):
210
+ poly_list = poly_wo_overlaps.geoms
211
+ else:
212
+ raise NotImplementedError("Unknown geometric structure {}".format(poly_wo_overlaps))
213
+
214
+ poly_coord = []
215
+ for poly_el in poly_list:
216
+ # COCO API can work only with exterior boundaries now, hence we store only them.
217
+ # TODO: store both exterior and interior boundaries once other parts of the
218
+ # codebase support holes in polygons.
219
+ poly_coord.append(list(chain(*poly_el.exterior.coords)))
220
+ anno["segmentation"] = poly_coord
221
+ (xmin, ymin, xmax, ymax) = poly_wo_overlaps.bounds
222
+
223
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
224
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
225
+
226
+ annos.append(anno)
227
+ else:
228
+ # See also the official annotation parsing scripts at
229
+ # https://github.com/mcordts/cityscapesScripts/blob/master/cityscapesscripts/evaluation/instances2dict.py # noqa
230
+ with PathManager.open(instance_id_file, "rb") as f:
231
+ inst_image = np.asarray(Image.open(f), order="F")
232
+ # ids < 24 are stuff labels (filtering them first is about 5% faster)
233
+ flattened_ids = np.unique(inst_image[inst_image >= 24])
234
+
235
+ ret = {
236
+ "file_name": image_file,
237
+ "image_id": os.path.basename(image_file),
238
+ "height": inst_image.shape[0],
239
+ "width": inst_image.shape[1],
240
+ }
241
+
242
+ for instance_id in flattened_ids:
243
+ # For non-crowd annotations, instance_id // 1000 is the label_id
244
+ # Crowd annotations have <1000 instance ids
245
+ label_id = instance_id // 1000 if instance_id >= 1000 else instance_id
246
+ label = id2label[label_id]
247
+ if not label.hasInstances or label.ignoreInEval:
248
+ continue
249
+
250
+ anno = {}
251
+ anno["iscrowd"] = instance_id < 1000
252
+ anno["category_id"] = label.id
253
+
254
+ mask = np.asarray(inst_image == instance_id, dtype=np.uint8, order="F")
255
+
256
+ inds = np.nonzero(mask)
257
+ ymin, ymax = inds[0].min(), inds[0].max()
258
+ xmin, xmax = inds[1].min(), inds[1].max()
259
+ anno["bbox"] = (xmin, ymin, xmax, ymax)
260
+ if xmax <= xmin or ymax <= ymin:
261
+ continue
262
+ anno["bbox_mode"] = BoxMode.XYXY_ABS
263
+ if to_polygons:
264
+ # This conversion comes from D4809743 and D5171122,
265
+ # when Mask-RCNN was first developed.
266
+ contours = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[
267
+ -2
268
+ ]
269
+ polygons = [c.reshape(-1).tolist() for c in contours if len(c) >= 3]
270
+ # opencv's can produce invalid polygons
271
+ if len(polygons) == 0:
272
+ continue
273
+ anno["segmentation"] = polygons
274
+ else:
275
+ anno["segmentation"] = mask_util.encode(mask[:, :, None])[0]
276
+ annos.append(anno)
277
+ ret["annotations"] = annos
278
+ return ret
279
+
280
+
281
+ if __name__ == "__main__":
282
+ """
283
+ Test the cityscapes dataset loader.
284
+
285
+ Usage:
286
+ python -m detectron2.data.datasets.cityscapes \
287
+ cityscapes/leftImg8bit/train cityscapes/gtFine/train
288
+ """
289
+ import argparse
290
+
291
+ parser = argparse.ArgumentParser()
292
+ parser.add_argument("image_dir")
293
+ parser.add_argument("gt_dir")
294
+ parser.add_argument("--type", choices=["instance", "semantic"], default="instance")
295
+ args = parser.parse_args()
296
+ from annotator.oneformer.detectron2.data.catalog import Metadata
297
+ from annotator.oneformer.detectron2.utils.visualizer import Visualizer
298
+ from cityscapesscripts.helpers.labels import labels
299
+
300
+ logger = setup_logger(name=__name__)
301
+
302
+ dirname = "cityscapes-data-vis"
303
+ os.makedirs(dirname, exist_ok=True)
304
+
305
+ if args.type == "instance":
306
+ dicts = load_cityscapes_instances(
307
+ args.image_dir, args.gt_dir, from_json=True, to_polygons=True
308
+ )
309
+ logger.info("Done loading {} samples.".format(len(dicts)))
310
+
311
+ thing_classes = [k.name for k in labels if k.hasInstances and not k.ignoreInEval]
312
+ meta = Metadata().set(thing_classes=thing_classes)
313
+
314
+ else:
315
+ dicts = load_cityscapes_semantic(args.image_dir, args.gt_dir)
316
+ logger.info("Done loading {} samples.".format(len(dicts)))
317
+
318
+ stuff_classes = [k.name for k in labels if k.trainId != 255]
319
+ stuff_colors = [k.color for k in labels if k.trainId != 255]
320
+ meta = Metadata().set(stuff_classes=stuff_classes, stuff_colors=stuff_colors)
321
+
322
+ for d in dicts:
323
+ img = np.array(Image.open(PathManager.open(d["file_name"], "rb")))
324
+ visualizer = Visualizer(img, metadata=meta)
325
+ vis = visualizer.draw_dataset_dict(d)
326
+ # cv2.imshow("a", vis.get_image()[:, :, ::-1])
327
+ # cv2.waitKey()
328
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
329
+ vis.save(fpath)
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/cityscapes_panoptic.py ADDED
@@ -0,0 +1,187 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import json
3
+ import logging
4
+ import os
5
+
6
+ from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
7
+ from annotator.oneformer.detectron2.data.datasets.builtin_meta import CITYSCAPES_CATEGORIES
8
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
9
+
10
+ """
11
+ This file contains functions to register the Cityscapes panoptic dataset to the DatasetCatalog.
12
+ """
13
+
14
+
15
+ logger = logging.getLogger(__name__)
16
+
17
+
18
+ def get_cityscapes_panoptic_files(image_dir, gt_dir, json_info):
19
+ files = []
20
+ # scan through the directory
21
+ cities = PathManager.ls(image_dir)
22
+ logger.info(f"{len(cities)} cities found in '{image_dir}'.")
23
+ image_dict = {}
24
+ for city in cities:
25
+ city_img_dir = os.path.join(image_dir, city)
26
+ for basename in PathManager.ls(city_img_dir):
27
+ image_file = os.path.join(city_img_dir, basename)
28
+
29
+ suffix = "_leftImg8bit.png"
30
+ assert basename.endswith(suffix), basename
31
+ basename = os.path.basename(basename)[: -len(suffix)]
32
+
33
+ image_dict[basename] = image_file
34
+
35
+ for ann in json_info["annotations"]:
36
+ image_file = image_dict.get(ann["image_id"], None)
37
+ assert image_file is not None, "No image {} found for annotation {}".format(
38
+ ann["image_id"], ann["file_name"]
39
+ )
40
+ label_file = os.path.join(gt_dir, ann["file_name"])
41
+ segments_info = ann["segments_info"]
42
+
43
+ files.append((image_file, label_file, segments_info))
44
+
45
+ assert len(files), "No images found in {}".format(image_dir)
46
+ assert PathManager.isfile(files[0][0]), files[0][0]
47
+ assert PathManager.isfile(files[0][1]), files[0][1]
48
+ return files
49
+
50
+
51
+ def load_cityscapes_panoptic(image_dir, gt_dir, gt_json, meta):
52
+ """
53
+ Args:
54
+ image_dir (str): path to the raw dataset. e.g., "~/cityscapes/leftImg8bit/train".
55
+ gt_dir (str): path to the raw annotations. e.g.,
56
+ "~/cityscapes/gtFine/cityscapes_panoptic_train".
57
+ gt_json (str): path to the json file. e.g.,
58
+ "~/cityscapes/gtFine/cityscapes_panoptic_train.json".
59
+ meta (dict): dictionary containing "thing_dataset_id_to_contiguous_id"
60
+ and "stuff_dataset_id_to_contiguous_id" to map category ids to
61
+ contiguous ids for training.
62
+
63
+ Returns:
64
+ list[dict]: a list of dicts in Detectron2 standard format. (See
65
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
66
+ """
67
+
68
+ def _convert_category_id(segment_info, meta):
69
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
70
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
71
+ segment_info["category_id"]
72
+ ]
73
+ else:
74
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
75
+ segment_info["category_id"]
76
+ ]
77
+ return segment_info
78
+
79
+ assert os.path.exists(
80
+ gt_json
81
+ ), "Please run `python cityscapesscripts/preparation/createPanopticImgs.py` to generate label files." # noqa
82
+ with open(gt_json) as f:
83
+ json_info = json.load(f)
84
+ files = get_cityscapes_panoptic_files(image_dir, gt_dir, json_info)
85
+ ret = []
86
+ for image_file, label_file, segments_info in files:
87
+ sem_label_file = (
88
+ image_file.replace("leftImg8bit", "gtFine").split(".")[0] + "_labelTrainIds.png"
89
+ )
90
+ segments_info = [_convert_category_id(x, meta) for x in segments_info]
91
+ ret.append(
92
+ {
93
+ "file_name": image_file,
94
+ "image_id": "_".join(
95
+ os.path.splitext(os.path.basename(image_file))[0].split("_")[:3]
96
+ ),
97
+ "sem_seg_file_name": sem_label_file,
98
+ "pan_seg_file_name": label_file,
99
+ "segments_info": segments_info,
100
+ }
101
+ )
102
+ assert len(ret), f"No images found in {image_dir}!"
103
+ assert PathManager.isfile(
104
+ ret[0]["sem_seg_file_name"]
105
+ ), "Please generate labelTrainIds.png with cityscapesscripts/preparation/createTrainIdLabelImgs.py" # noqa
106
+ assert PathManager.isfile(
107
+ ret[0]["pan_seg_file_name"]
108
+ ), "Please generate panoptic annotation with python cityscapesscripts/preparation/createPanopticImgs.py" # noqa
109
+ return ret
110
+
111
+
112
+ _RAW_CITYSCAPES_PANOPTIC_SPLITS = {
113
+ "cityscapes_fine_panoptic_train": (
114
+ "cityscapes/leftImg8bit/train",
115
+ "cityscapes/gtFine/cityscapes_panoptic_train",
116
+ "cityscapes/gtFine/cityscapes_panoptic_train.json",
117
+ ),
118
+ "cityscapes_fine_panoptic_val": (
119
+ "cityscapes/leftImg8bit/val",
120
+ "cityscapes/gtFine/cityscapes_panoptic_val",
121
+ "cityscapes/gtFine/cityscapes_panoptic_val.json",
122
+ ),
123
+ # "cityscapes_fine_panoptic_test": not supported yet
124
+ }
125
+
126
+
127
+ def register_all_cityscapes_panoptic(root):
128
+ meta = {}
129
+ # The following metadata maps contiguous id from [0, #thing categories +
130
+ # #stuff categories) to their names and colors. We have to replica of the
131
+ # same name and color under "thing_*" and "stuff_*" because the current
132
+ # visualization function in D2 handles thing and class classes differently
133
+ # due to some heuristic used in Panoptic FPN. We keep the same naming to
134
+ # enable reusing existing visualization functions.
135
+ thing_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
136
+ thing_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
137
+ stuff_classes = [k["name"] for k in CITYSCAPES_CATEGORIES]
138
+ stuff_colors = [k["color"] for k in CITYSCAPES_CATEGORIES]
139
+
140
+ meta["thing_classes"] = thing_classes
141
+ meta["thing_colors"] = thing_colors
142
+ meta["stuff_classes"] = stuff_classes
143
+ meta["stuff_colors"] = stuff_colors
144
+
145
+ # There are three types of ids in cityscapes panoptic segmentation:
146
+ # (1) category id: like semantic segmentation, it is the class id for each
147
+ # pixel. Since there are some classes not used in evaluation, the category
148
+ # id is not always contiguous and thus we have two set of category ids:
149
+ # - original category id: category id in the original dataset, mainly
150
+ # used for evaluation.
151
+ # - contiguous category id: [0, #classes), in order to train the classifier
152
+ # (2) instance id: this id is used to differentiate different instances from
153
+ # the same category. For "stuff" classes, the instance id is always 0; for
154
+ # "thing" classes, the instance id starts from 1 and 0 is reserved for
155
+ # ignored instances (e.g. crowd annotation).
156
+ # (3) panoptic id: this is the compact id that encode both category and
157
+ # instance id by: category_id * 1000 + instance_id.
158
+ thing_dataset_id_to_contiguous_id = {}
159
+ stuff_dataset_id_to_contiguous_id = {}
160
+
161
+ for k in CITYSCAPES_CATEGORIES:
162
+ if k["isthing"] == 1:
163
+ thing_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
164
+ else:
165
+ stuff_dataset_id_to_contiguous_id[k["id"]] = k["trainId"]
166
+
167
+ meta["thing_dataset_id_to_contiguous_id"] = thing_dataset_id_to_contiguous_id
168
+ meta["stuff_dataset_id_to_contiguous_id"] = stuff_dataset_id_to_contiguous_id
169
+
170
+ for key, (image_dir, gt_dir, gt_json) in _RAW_CITYSCAPES_PANOPTIC_SPLITS.items():
171
+ image_dir = os.path.join(root, image_dir)
172
+ gt_dir = os.path.join(root, gt_dir)
173
+ gt_json = os.path.join(root, gt_json)
174
+
175
+ DatasetCatalog.register(
176
+ key, lambda x=image_dir, y=gt_dir, z=gt_json: load_cityscapes_panoptic(x, y, z, meta)
177
+ )
178
+ MetadataCatalog.get(key).set(
179
+ panoptic_root=gt_dir,
180
+ image_root=image_dir,
181
+ panoptic_json=gt_json,
182
+ gt_dir=gt_dir.replace("cityscapes_panoptic_", ""),
183
+ evaluator_type="cityscapes_panoptic_seg",
184
+ ignore_label=255,
185
+ label_divisor=1000,
186
+ **meta,
187
+ )
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/coco.py ADDED
@@ -0,0 +1,539 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import contextlib
3
+ import datetime
4
+ import io
5
+ import json
6
+ import logging
7
+ import numpy as np
8
+ import os
9
+ import shutil
10
+ import annotator.oneformer.pycocotools.mask as mask_util
11
+ from fvcore.common.timer import Timer
12
+ from iopath.common.file_io import file_lock
13
+ from PIL import Image
14
+
15
+ from annotator.oneformer.detectron2.structures import Boxes, BoxMode, PolygonMasks, RotatedBoxes
16
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
17
+
18
+ from .. import DatasetCatalog, MetadataCatalog
19
+
20
+ """
21
+ This file contains functions to parse COCO-format annotations into dicts in "Detectron2 format".
22
+ """
23
+
24
+
25
+ logger = logging.getLogger(__name__)
26
+
27
+ __all__ = ["load_coco_json", "load_sem_seg", "convert_to_coco_json", "register_coco_instances"]
28
+
29
+
30
+ def load_coco_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
31
+ """
32
+ Load a json file with COCO's instances annotation format.
33
+ Currently supports instance detection, instance segmentation,
34
+ and person keypoints annotations.
35
+
36
+ Args:
37
+ json_file (str): full path to the json file in COCO instances annotation format.
38
+ image_root (str or path-like): the directory where the images in this json file exists.
39
+ dataset_name (str or None): the name of the dataset (e.g., coco_2017_train).
40
+ When provided, this function will also do the following:
41
+
42
+ * Put "thing_classes" into the metadata associated with this dataset.
43
+ * Map the category ids into a contiguous range (needed by standard dataset format),
44
+ and add "thing_dataset_id_to_contiguous_id" to the metadata associated
45
+ with this dataset.
46
+
47
+ This option should usually be provided, unless users need to load
48
+ the original json content and apply more processing manually.
49
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
50
+ loaded into the dataset dict (besides "iscrowd", "bbox", "keypoints",
51
+ "category_id", "segmentation"). The values for these keys will be returned as-is.
52
+ For example, the densepose annotations are loaded in this way.
53
+
54
+ Returns:
55
+ list[dict]: a list of dicts in Detectron2 standard dataset dicts format (See
56
+ `Using Custom Datasets </tutorials/datasets.html>`_ ) when `dataset_name` is not None.
57
+ If `dataset_name` is None, the returned `category_ids` may be
58
+ incontiguous and may not conform to the Detectron2 standard format.
59
+
60
+ Notes:
61
+ 1. This function does not read the image files.
62
+ The results do not have the "image" field.
63
+ """
64
+ from annotator.oneformer.pycocotools.coco import COCO
65
+
66
+ timer = Timer()
67
+ json_file = PathManager.get_local_path(json_file)
68
+ with contextlib.redirect_stdout(io.StringIO()):
69
+ coco_api = COCO(json_file)
70
+ if timer.seconds() > 1:
71
+ logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
72
+
73
+ id_map = None
74
+ if dataset_name is not None:
75
+ meta = MetadataCatalog.get(dataset_name)
76
+ cat_ids = sorted(coco_api.getCatIds())
77
+ cats = coco_api.loadCats(cat_ids)
78
+ # The categories in a custom json file may not be sorted.
79
+ thing_classes = [c["name"] for c in sorted(cats, key=lambda x: x["id"])]
80
+ meta.thing_classes = thing_classes
81
+
82
+ # In COCO, certain category ids are artificially removed,
83
+ # and by convention they are always ignored.
84
+ # We deal with COCO's id issue and translate
85
+ # the category ids to contiguous ids in [0, 80).
86
+
87
+ # It works by looking at the "categories" field in the json, therefore
88
+ # if users' own json also have incontiguous ids, we'll
89
+ # apply this mapping as well but print a warning.
90
+ if not (min(cat_ids) == 1 and max(cat_ids) == len(cat_ids)):
91
+ if "coco" not in dataset_name:
92
+ logger.warning(
93
+ """
94
+ Category ids in annotations are not in [1, #categories]! We'll apply a mapping for you.
95
+ """
96
+ )
97
+ id_map = {v: i for i, v in enumerate(cat_ids)}
98
+ meta.thing_dataset_id_to_contiguous_id = id_map
99
+
100
+ # sort indices for reproducible results
101
+ img_ids = sorted(coco_api.imgs.keys())
102
+ # imgs is a list of dicts, each looks something like:
103
+ # {'license': 4,
104
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
105
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
106
+ # 'height': 427,
107
+ # 'width': 640,
108
+ # 'date_captured': '2013-11-17 05:57:24',
109
+ # 'id': 1268}
110
+ imgs = coco_api.loadImgs(img_ids)
111
+ # anns is a list[list[dict]], where each dict is an annotation
112
+ # record for an object. The inner list enumerates the objects in an image
113
+ # and the outer list enumerates over images. Example of anns[0]:
114
+ # [{'segmentation': [[192.81,
115
+ # 247.09,
116
+ # ...
117
+ # 219.03,
118
+ # 249.06]],
119
+ # 'area': 1035.749,
120
+ # 'iscrowd': 0,
121
+ # 'image_id': 1268,
122
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
123
+ # 'category_id': 16,
124
+ # 'id': 42986},
125
+ # ...]
126
+ anns = [coco_api.imgToAnns[img_id] for img_id in img_ids]
127
+ total_num_valid_anns = sum([len(x) for x in anns])
128
+ total_num_anns = len(coco_api.anns)
129
+ if total_num_valid_anns < total_num_anns:
130
+ logger.warning(
131
+ f"{json_file} contains {total_num_anns} annotations, but only "
132
+ f"{total_num_valid_anns} of them match to images in the file."
133
+ )
134
+
135
+ if "minival" not in json_file:
136
+ # The popular valminusminival & minival annotations for COCO2014 contain this bug.
137
+ # However the ratio of buggy annotations there is tiny and does not affect accuracy.
138
+ # Therefore we explicitly white-list them.
139
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
140
+ assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique!".format(
141
+ json_file
142
+ )
143
+
144
+ imgs_anns = list(zip(imgs, anns))
145
+ logger.info("Loaded {} images in COCO format from {}".format(len(imgs_anns), json_file))
146
+
147
+ dataset_dicts = []
148
+
149
+ ann_keys = ["iscrowd", "bbox", "keypoints", "category_id"] + (extra_annotation_keys or [])
150
+
151
+ num_instances_without_valid_segmentation = 0
152
+
153
+ for (img_dict, anno_dict_list) in imgs_anns:
154
+ record = {}
155
+ record["file_name"] = os.path.join(image_root, img_dict["file_name"])
156
+ record["height"] = img_dict["height"]
157
+ record["width"] = img_dict["width"]
158
+ image_id = record["image_id"] = img_dict["id"]
159
+
160
+ objs = []
161
+ for anno in anno_dict_list:
162
+ # Check that the image_id in this annotation is the same as
163
+ # the image_id we're looking at.
164
+ # This fails only when the data parsing logic or the annotation file is buggy.
165
+
166
+ # The original COCO valminusminival2014 & minival2014 annotation files
167
+ # actually contains bugs that, together with certain ways of using COCO API,
168
+ # can trigger this assertion.
169
+ assert anno["image_id"] == image_id
170
+
171
+ assert anno.get("ignore", 0) == 0, '"ignore" in COCO json file is not supported.'
172
+
173
+ obj = {key: anno[key] for key in ann_keys if key in anno}
174
+ if "bbox" in obj and len(obj["bbox"]) == 0:
175
+ raise ValueError(
176
+ f"One annotation of image {image_id} contains empty 'bbox' value! "
177
+ "This json does not have valid COCO format."
178
+ )
179
+
180
+ segm = anno.get("segmentation", None)
181
+ if segm: # either list[list[float]] or dict(RLE)
182
+ if isinstance(segm, dict):
183
+ if isinstance(segm["counts"], list):
184
+ # convert to compressed RLE
185
+ segm = mask_util.frPyObjects(segm, *segm["size"])
186
+ else:
187
+ # filter out invalid polygons (< 3 points)
188
+ segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
189
+ if len(segm) == 0:
190
+ num_instances_without_valid_segmentation += 1
191
+ continue # ignore this instance
192
+ obj["segmentation"] = segm
193
+
194
+ keypts = anno.get("keypoints", None)
195
+ if keypts: # list[int]
196
+ for idx, v in enumerate(keypts):
197
+ if idx % 3 != 2:
198
+ # COCO's segmentation coordinates are floating points in [0, H or W],
199
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
200
+ # Therefore we assume the coordinates are "pixel indices" and
201
+ # add 0.5 to convert to floating point coordinates.
202
+ keypts[idx] = v + 0.5
203
+ obj["keypoints"] = keypts
204
+
205
+ obj["bbox_mode"] = BoxMode.XYWH_ABS
206
+ if id_map:
207
+ annotation_category_id = obj["category_id"]
208
+ try:
209
+ obj["category_id"] = id_map[annotation_category_id]
210
+ except KeyError as e:
211
+ raise KeyError(
212
+ f"Encountered category_id={annotation_category_id} "
213
+ "but this id does not exist in 'categories' of the json file."
214
+ ) from e
215
+ objs.append(obj)
216
+ record["annotations"] = objs
217
+ dataset_dicts.append(record)
218
+
219
+ if num_instances_without_valid_segmentation > 0:
220
+ logger.warning(
221
+ "Filtered out {} instances without valid segmentation. ".format(
222
+ num_instances_without_valid_segmentation
223
+ )
224
+ + "There might be issues in your dataset generation process. Please "
225
+ "check https://detectron2.readthedocs.io/en/latest/tutorials/datasets.html carefully"
226
+ )
227
+ return dataset_dicts
228
+
229
+
230
+ def load_sem_seg(gt_root, image_root, gt_ext="png", image_ext="jpg"):
231
+ """
232
+ Load semantic segmentation datasets. All files under "gt_root" with "gt_ext" extension are
233
+ treated as ground truth annotations and all files under "image_root" with "image_ext" extension
234
+ as input images. Ground truth and input images are matched using file paths relative to
235
+ "gt_root" and "image_root" respectively without taking into account file extensions.
236
+ This works for COCO as well as some other datasets.
237
+
238
+ Args:
239
+ gt_root (str): full path to ground truth semantic segmentation files. Semantic segmentation
240
+ annotations are stored as images with integer values in pixels that represent
241
+ corresponding semantic labels.
242
+ image_root (str): the directory where the input images are.
243
+ gt_ext (str): file extension for ground truth annotations.
244
+ image_ext (str): file extension for input images.
245
+
246
+ Returns:
247
+ list[dict]:
248
+ a list of dicts in detectron2 standard format without instance-level
249
+ annotation.
250
+
251
+ Notes:
252
+ 1. This function does not read the image and ground truth files.
253
+ The results do not have the "image" and "sem_seg" fields.
254
+ """
255
+
256
+ # We match input images with ground truth based on their relative filepaths (without file
257
+ # extensions) starting from 'image_root' and 'gt_root' respectively.
258
+ def file2id(folder_path, file_path):
259
+ # extract relative path starting from `folder_path`
260
+ image_id = os.path.normpath(os.path.relpath(file_path, start=folder_path))
261
+ # remove file extension
262
+ image_id = os.path.splitext(image_id)[0]
263
+ return image_id
264
+
265
+ input_files = sorted(
266
+ (os.path.join(image_root, f) for f in PathManager.ls(image_root) if f.endswith(image_ext)),
267
+ key=lambda file_path: file2id(image_root, file_path),
268
+ )
269
+ gt_files = sorted(
270
+ (os.path.join(gt_root, f) for f in PathManager.ls(gt_root) if f.endswith(gt_ext)),
271
+ key=lambda file_path: file2id(gt_root, file_path),
272
+ )
273
+
274
+ assert len(gt_files) > 0, "No annotations found in {}.".format(gt_root)
275
+
276
+ # Use the intersection, so that val2017_100 annotations can run smoothly with val2017 images
277
+ if len(input_files) != len(gt_files):
278
+ logger.warn(
279
+ "Directory {} and {} has {} and {} files, respectively.".format(
280
+ image_root, gt_root, len(input_files), len(gt_files)
281
+ )
282
+ )
283
+ input_basenames = [os.path.basename(f)[: -len(image_ext)] for f in input_files]
284
+ gt_basenames = [os.path.basename(f)[: -len(gt_ext)] for f in gt_files]
285
+ intersect = list(set(input_basenames) & set(gt_basenames))
286
+ # sort, otherwise each worker may obtain a list[dict] in different order
287
+ intersect = sorted(intersect)
288
+ logger.warn("Will use their intersection of {} files.".format(len(intersect)))
289
+ input_files = [os.path.join(image_root, f + image_ext) for f in intersect]
290
+ gt_files = [os.path.join(gt_root, f + gt_ext) for f in intersect]
291
+
292
+ logger.info(
293
+ "Loaded {} images with semantic segmentation from {}".format(len(input_files), image_root)
294
+ )
295
+
296
+ dataset_dicts = []
297
+ for (img_path, gt_path) in zip(input_files, gt_files):
298
+ record = {}
299
+ record["file_name"] = img_path
300
+ record["sem_seg_file_name"] = gt_path
301
+ dataset_dicts.append(record)
302
+
303
+ return dataset_dicts
304
+
305
+
306
+ def convert_to_coco_dict(dataset_name):
307
+ """
308
+ Convert an instance detection/segmentation or keypoint detection dataset
309
+ in detectron2's standard format into COCO json format.
310
+
311
+ Generic dataset description can be found here:
312
+ https://detectron2.readthedocs.io/tutorials/datasets.html#register-a-dataset
313
+
314
+ COCO data format description can be found here:
315
+ http://cocodataset.org/#format-data
316
+
317
+ Args:
318
+ dataset_name (str):
319
+ name of the source dataset
320
+ Must be registered in DatastCatalog and in detectron2's standard format.
321
+ Must have corresponding metadata "thing_classes"
322
+ Returns:
323
+ coco_dict: serializable dict in COCO json format
324
+ """
325
+
326
+ dataset_dicts = DatasetCatalog.get(dataset_name)
327
+ metadata = MetadataCatalog.get(dataset_name)
328
+
329
+ # unmap the category mapping ids for COCO
330
+ if hasattr(metadata, "thing_dataset_id_to_contiguous_id"):
331
+ reverse_id_mapping = {v: k for k, v in metadata.thing_dataset_id_to_contiguous_id.items()}
332
+ reverse_id_mapper = lambda contiguous_id: reverse_id_mapping[contiguous_id] # noqa
333
+ else:
334
+ reverse_id_mapper = lambda contiguous_id: contiguous_id # noqa
335
+
336
+ categories = [
337
+ {"id": reverse_id_mapper(id), "name": name}
338
+ for id, name in enumerate(metadata.thing_classes)
339
+ ]
340
+
341
+ logger.info("Converting dataset dicts into COCO format")
342
+ coco_images = []
343
+ coco_annotations = []
344
+
345
+ for image_id, image_dict in enumerate(dataset_dicts):
346
+ coco_image = {
347
+ "id": image_dict.get("image_id", image_id),
348
+ "width": int(image_dict["width"]),
349
+ "height": int(image_dict["height"]),
350
+ "file_name": str(image_dict["file_name"]),
351
+ }
352
+ coco_images.append(coco_image)
353
+
354
+ anns_per_image = image_dict.get("annotations", [])
355
+ for annotation in anns_per_image:
356
+ # create a new dict with only COCO fields
357
+ coco_annotation = {}
358
+
359
+ # COCO requirement: XYWH box format for axis-align and XYWHA for rotated
360
+ bbox = annotation["bbox"]
361
+ if isinstance(bbox, np.ndarray):
362
+ if bbox.ndim != 1:
363
+ raise ValueError(f"bbox has to be 1-dimensional. Got shape={bbox.shape}.")
364
+ bbox = bbox.tolist()
365
+ if len(bbox) not in [4, 5]:
366
+ raise ValueError(f"bbox has to has length 4 or 5. Got {bbox}.")
367
+ from_bbox_mode = annotation["bbox_mode"]
368
+ to_bbox_mode = BoxMode.XYWH_ABS if len(bbox) == 4 else BoxMode.XYWHA_ABS
369
+ bbox = BoxMode.convert(bbox, from_bbox_mode, to_bbox_mode)
370
+
371
+ # COCO requirement: instance area
372
+ if "segmentation" in annotation:
373
+ # Computing areas for instances by counting the pixels
374
+ segmentation = annotation["segmentation"]
375
+ # TODO: check segmentation type: RLE, BinaryMask or Polygon
376
+ if isinstance(segmentation, list):
377
+ polygons = PolygonMasks([segmentation])
378
+ area = polygons.area()[0].item()
379
+ elif isinstance(segmentation, dict): # RLE
380
+ area = mask_util.area(segmentation).item()
381
+ else:
382
+ raise TypeError(f"Unknown segmentation type {type(segmentation)}!")
383
+ else:
384
+ # Computing areas using bounding boxes
385
+ if to_bbox_mode == BoxMode.XYWH_ABS:
386
+ bbox_xy = BoxMode.convert(bbox, to_bbox_mode, BoxMode.XYXY_ABS)
387
+ area = Boxes([bbox_xy]).area()[0].item()
388
+ else:
389
+ area = RotatedBoxes([bbox]).area()[0].item()
390
+
391
+ if "keypoints" in annotation:
392
+ keypoints = annotation["keypoints"] # list[int]
393
+ for idx, v in enumerate(keypoints):
394
+ if idx % 3 != 2:
395
+ # COCO's segmentation coordinates are floating points in [0, H or W],
396
+ # but keypoint coordinates are integers in [0, H-1 or W-1]
397
+ # For COCO format consistency we substract 0.5
398
+ # https://github.com/facebookresearch/detectron2/pull/175#issuecomment-551202163
399
+ keypoints[idx] = v - 0.5
400
+ if "num_keypoints" in annotation:
401
+ num_keypoints = annotation["num_keypoints"]
402
+ else:
403
+ num_keypoints = sum(kp > 0 for kp in keypoints[2::3])
404
+
405
+ # COCO requirement:
406
+ # linking annotations to images
407
+ # "id" field must start with 1
408
+ coco_annotation["id"] = len(coco_annotations) + 1
409
+ coco_annotation["image_id"] = coco_image["id"]
410
+ coco_annotation["bbox"] = [round(float(x), 3) for x in bbox]
411
+ coco_annotation["area"] = float(area)
412
+ coco_annotation["iscrowd"] = int(annotation.get("iscrowd", 0))
413
+ coco_annotation["category_id"] = int(reverse_id_mapper(annotation["category_id"]))
414
+
415
+ # Add optional fields
416
+ if "keypoints" in annotation:
417
+ coco_annotation["keypoints"] = keypoints
418
+ coco_annotation["num_keypoints"] = num_keypoints
419
+
420
+ if "segmentation" in annotation:
421
+ seg = coco_annotation["segmentation"] = annotation["segmentation"]
422
+ if isinstance(seg, dict): # RLE
423
+ counts = seg["counts"]
424
+ if not isinstance(counts, str):
425
+ # make it json-serializable
426
+ seg["counts"] = counts.decode("ascii")
427
+
428
+ coco_annotations.append(coco_annotation)
429
+
430
+ logger.info(
431
+ "Conversion finished, "
432
+ f"#images: {len(coco_images)}, #annotations: {len(coco_annotations)}"
433
+ )
434
+
435
+ info = {
436
+ "date_created": str(datetime.datetime.now()),
437
+ "description": "Automatically generated COCO json file for Detectron2.",
438
+ }
439
+ coco_dict = {"info": info, "images": coco_images, "categories": categories, "licenses": None}
440
+ if len(coco_annotations) > 0:
441
+ coco_dict["annotations"] = coco_annotations
442
+ return coco_dict
443
+
444
+
445
+ def convert_to_coco_json(dataset_name, output_file, allow_cached=True):
446
+ """
447
+ Converts dataset into COCO format and saves it to a json file.
448
+ dataset_name must be registered in DatasetCatalog and in detectron2's standard format.
449
+
450
+ Args:
451
+ dataset_name:
452
+ reference from the config file to the catalogs
453
+ must be registered in DatasetCatalog and in detectron2's standard format
454
+ output_file: path of json file that will be saved to
455
+ allow_cached: if json file is already present then skip conversion
456
+ """
457
+
458
+ # TODO: The dataset or the conversion script *may* change,
459
+ # a checksum would be useful for validating the cached data
460
+
461
+ PathManager.mkdirs(os.path.dirname(output_file))
462
+ with file_lock(output_file):
463
+ if PathManager.exists(output_file) and allow_cached:
464
+ logger.warning(
465
+ f"Using previously cached COCO format annotations at '{output_file}'. "
466
+ "You need to clear the cache file if your dataset has been modified."
467
+ )
468
+ else:
469
+ logger.info(f"Converting annotations of dataset '{dataset_name}' to COCO format ...)")
470
+ coco_dict = convert_to_coco_dict(dataset_name)
471
+
472
+ logger.info(f"Caching COCO format annotations at '{output_file}' ...")
473
+ tmp_file = output_file + ".tmp"
474
+ with PathManager.open(tmp_file, "w") as f:
475
+ json.dump(coco_dict, f)
476
+ shutil.move(tmp_file, output_file)
477
+
478
+
479
+ def register_coco_instances(name, metadata, json_file, image_root):
480
+ """
481
+ Register a dataset in COCO's json annotation format for
482
+ instance detection, instance segmentation and keypoint detection.
483
+ (i.e., Type 1 and 2 in http://cocodataset.org/#format-data.
484
+ `instances*.json` and `person_keypoints*.json` in the dataset).
485
+
486
+ This is an example of how to register a new dataset.
487
+ You can do something similar to this function, to register new datasets.
488
+
489
+ Args:
490
+ name (str): the name that identifies a dataset, e.g. "coco_2014_train".
491
+ metadata (dict): extra metadata associated with this dataset. You can
492
+ leave it as an empty dict.
493
+ json_file (str): path to the json instance annotation file.
494
+ image_root (str or path-like): directory which contains all the images.
495
+ """
496
+ assert isinstance(name, str), name
497
+ assert isinstance(json_file, (str, os.PathLike)), json_file
498
+ assert isinstance(image_root, (str, os.PathLike)), image_root
499
+ # 1. register a function which returns dicts
500
+ DatasetCatalog.register(name, lambda: load_coco_json(json_file, image_root, name))
501
+
502
+ # 2. Optionally, add metadata about this dataset,
503
+ # since they might be useful in evaluation, visualization or logging
504
+ MetadataCatalog.get(name).set(
505
+ json_file=json_file, image_root=image_root, evaluator_type="coco", **metadata
506
+ )
507
+
508
+
509
+ if __name__ == "__main__":
510
+ """
511
+ Test the COCO json dataset loader.
512
+
513
+ Usage:
514
+ python -m detectron2.data.datasets.coco \
515
+ path/to/json path/to/image_root dataset_name
516
+
517
+ "dataset_name" can be "coco_2014_minival_100", or other
518
+ pre-registered ones
519
+ """
520
+ from annotator.oneformer.detectron2.utils.logger import setup_logger
521
+ from annotator.oneformer.detectron2.utils.visualizer import Visualizer
522
+ import annotator.oneformer.detectron2.data.datasets # noqa # add pre-defined metadata
523
+ import sys
524
+
525
+ logger = setup_logger(name=__name__)
526
+ assert sys.argv[3] in DatasetCatalog.list()
527
+ meta = MetadataCatalog.get(sys.argv[3])
528
+
529
+ dicts = load_coco_json(sys.argv[1], sys.argv[2], sys.argv[3])
530
+ logger.info("Done loading {} samples.".format(len(dicts)))
531
+
532
+ dirname = "coco-data-vis"
533
+ os.makedirs(dirname, exist_ok=True)
534
+ for d in dicts:
535
+ img = np.array(Image.open(d["file_name"]))
536
+ visualizer = Visualizer(img, metadata=meta)
537
+ vis = visualizer.draw_dataset_dict(d)
538
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
539
+ vis.save(fpath)
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/coco_panoptic.py ADDED
@@ -0,0 +1,228 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import copy
3
+ import json
4
+ import os
5
+
6
+ from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
7
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
8
+
9
+ from .coco import load_coco_json, load_sem_seg
10
+
11
+ __all__ = ["register_coco_panoptic", "register_coco_panoptic_separated"]
12
+
13
+
14
+ def load_coco_panoptic_json(json_file, image_dir, gt_dir, meta):
15
+ """
16
+ Args:
17
+ image_dir (str): path to the raw dataset. e.g., "~/coco/train2017".
18
+ gt_dir (str): path to the raw annotations. e.g., "~/coco/panoptic_train2017".
19
+ json_file (str): path to the json file. e.g., "~/coco/annotations/panoptic_train2017.json".
20
+
21
+ Returns:
22
+ list[dict]: a list of dicts in Detectron2 standard format. (See
23
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
24
+ """
25
+
26
+ def _convert_category_id(segment_info, meta):
27
+ if segment_info["category_id"] in meta["thing_dataset_id_to_contiguous_id"]:
28
+ segment_info["category_id"] = meta["thing_dataset_id_to_contiguous_id"][
29
+ segment_info["category_id"]
30
+ ]
31
+ segment_info["isthing"] = True
32
+ else:
33
+ segment_info["category_id"] = meta["stuff_dataset_id_to_contiguous_id"][
34
+ segment_info["category_id"]
35
+ ]
36
+ segment_info["isthing"] = False
37
+ return segment_info
38
+
39
+ with PathManager.open(json_file) as f:
40
+ json_info = json.load(f)
41
+
42
+ ret = []
43
+ for ann in json_info["annotations"]:
44
+ image_id = int(ann["image_id"])
45
+ # TODO: currently we assume image and label has the same filename but
46
+ # different extension, and images have extension ".jpg" for COCO. Need
47
+ # to make image extension a user-provided argument if we extend this
48
+ # function to support other COCO-like datasets.
49
+ image_file = os.path.join(image_dir, os.path.splitext(ann["file_name"])[0] + ".jpg")
50
+ label_file = os.path.join(gt_dir, ann["file_name"])
51
+ segments_info = [_convert_category_id(x, meta) for x in ann["segments_info"]]
52
+ ret.append(
53
+ {
54
+ "file_name": image_file,
55
+ "image_id": image_id,
56
+ "pan_seg_file_name": label_file,
57
+ "segments_info": segments_info,
58
+ }
59
+ )
60
+ assert len(ret), f"No images found in {image_dir}!"
61
+ assert PathManager.isfile(ret[0]["file_name"]), ret[0]["file_name"]
62
+ assert PathManager.isfile(ret[0]["pan_seg_file_name"]), ret[0]["pan_seg_file_name"]
63
+ return ret
64
+
65
+
66
+ def register_coco_panoptic(
67
+ name, metadata, image_root, panoptic_root, panoptic_json, instances_json=None
68
+ ):
69
+ """
70
+ Register a "standard" version of COCO panoptic segmentation dataset named `name`.
71
+ The dictionaries in this registered dataset follows detectron2's standard format.
72
+ Hence it's called "standard".
73
+
74
+ Args:
75
+ name (str): the name that identifies a dataset,
76
+ e.g. "coco_2017_train_panoptic"
77
+ metadata (dict): extra metadata associated with this dataset.
78
+ image_root (str): directory which contains all the images
79
+ panoptic_root (str): directory which contains panoptic annotation images in COCO format
80
+ panoptic_json (str): path to the json panoptic annotation file in COCO format
81
+ sem_seg_root (none): not used, to be consistent with
82
+ `register_coco_panoptic_separated`.
83
+ instances_json (str): path to the json instance annotation file
84
+ """
85
+ panoptic_name = name
86
+ DatasetCatalog.register(
87
+ panoptic_name,
88
+ lambda: load_coco_panoptic_json(panoptic_json, image_root, panoptic_root, metadata),
89
+ )
90
+ MetadataCatalog.get(panoptic_name).set(
91
+ panoptic_root=panoptic_root,
92
+ image_root=image_root,
93
+ panoptic_json=panoptic_json,
94
+ json_file=instances_json,
95
+ evaluator_type="coco_panoptic_seg",
96
+ ignore_label=255,
97
+ label_divisor=1000,
98
+ **metadata,
99
+ )
100
+
101
+
102
+ def register_coco_panoptic_separated(
103
+ name, metadata, image_root, panoptic_root, panoptic_json, sem_seg_root, instances_json
104
+ ):
105
+ """
106
+ Register a "separated" version of COCO panoptic segmentation dataset named `name`.
107
+ The annotations in this registered dataset will contain both instance annotations and
108
+ semantic annotations, each with its own contiguous ids. Hence it's called "separated".
109
+
110
+ It follows the setting used by the PanopticFPN paper:
111
+
112
+ 1. The instance annotations directly come from polygons in the COCO
113
+ instances annotation task, rather than from the masks in the COCO panoptic annotations.
114
+
115
+ The two format have small differences:
116
+ Polygons in the instance annotations may have overlaps.
117
+ The mask annotations are produced by labeling the overlapped polygons
118
+ with depth ordering.
119
+
120
+ 2. The semantic annotations are converted from panoptic annotations, where
121
+ all "things" are assigned a semantic id of 0.
122
+ All semantic categories will therefore have ids in contiguous
123
+ range [1, #stuff_categories].
124
+
125
+ This function will also register a pure semantic segmentation dataset
126
+ named ``name + '_stuffonly'``.
127
+
128
+ Args:
129
+ name (str): the name that identifies a dataset,
130
+ e.g. "coco_2017_train_panoptic"
131
+ metadata (dict): extra metadata associated with this dataset.
132
+ image_root (str): directory which contains all the images
133
+ panoptic_root (str): directory which contains panoptic annotation images
134
+ panoptic_json (str): path to the json panoptic annotation file
135
+ sem_seg_root (str): directory which contains all the ground truth segmentation annotations.
136
+ instances_json (str): path to the json instance annotation file
137
+ """
138
+ panoptic_name = name + "_separated"
139
+ DatasetCatalog.register(
140
+ panoptic_name,
141
+ lambda: merge_to_panoptic(
142
+ load_coco_json(instances_json, image_root, panoptic_name),
143
+ load_sem_seg(sem_seg_root, image_root),
144
+ ),
145
+ )
146
+ MetadataCatalog.get(panoptic_name).set(
147
+ panoptic_root=panoptic_root,
148
+ image_root=image_root,
149
+ panoptic_json=panoptic_json,
150
+ sem_seg_root=sem_seg_root,
151
+ json_file=instances_json, # TODO rename
152
+ evaluator_type="coco_panoptic_seg",
153
+ ignore_label=255,
154
+ **metadata,
155
+ )
156
+
157
+ semantic_name = name + "_stuffonly"
158
+ DatasetCatalog.register(semantic_name, lambda: load_sem_seg(sem_seg_root, image_root))
159
+ MetadataCatalog.get(semantic_name).set(
160
+ sem_seg_root=sem_seg_root,
161
+ image_root=image_root,
162
+ evaluator_type="sem_seg",
163
+ ignore_label=255,
164
+ **metadata,
165
+ )
166
+
167
+
168
+ def merge_to_panoptic(detection_dicts, sem_seg_dicts):
169
+ """
170
+ Create dataset dicts for panoptic segmentation, by
171
+ merging two dicts using "file_name" field to match their entries.
172
+
173
+ Args:
174
+ detection_dicts (list[dict]): lists of dicts for object detection or instance segmentation.
175
+ sem_seg_dicts (list[dict]): lists of dicts for semantic segmentation.
176
+
177
+ Returns:
178
+ list[dict] (one per input image): Each dict contains all (key, value) pairs from dicts in
179
+ both detection_dicts and sem_seg_dicts that correspond to the same image.
180
+ The function assumes that the same key in different dicts has the same value.
181
+ """
182
+ results = []
183
+ sem_seg_file_to_entry = {x["file_name"]: x for x in sem_seg_dicts}
184
+ assert len(sem_seg_file_to_entry) > 0
185
+
186
+ for det_dict in detection_dicts:
187
+ dic = copy.copy(det_dict)
188
+ dic.update(sem_seg_file_to_entry[dic["file_name"]])
189
+ results.append(dic)
190
+ return results
191
+
192
+
193
+ if __name__ == "__main__":
194
+ """
195
+ Test the COCO panoptic dataset loader.
196
+
197
+ Usage:
198
+ python -m detectron2.data.datasets.coco_panoptic \
199
+ path/to/image_root path/to/panoptic_root path/to/panoptic_json dataset_name 10
200
+
201
+ "dataset_name" can be "coco_2017_train_panoptic", or other
202
+ pre-registered ones
203
+ """
204
+ from annotator.oneformer.detectron2.utils.logger import setup_logger
205
+ from annotator.oneformer.detectron2.utils.visualizer import Visualizer
206
+ import annotator.oneformer.detectron2.data.datasets # noqa # add pre-defined metadata
207
+ import sys
208
+ from PIL import Image
209
+ import numpy as np
210
+
211
+ logger = setup_logger(name=__name__)
212
+ assert sys.argv[4] in DatasetCatalog.list()
213
+ meta = MetadataCatalog.get(sys.argv[4])
214
+
215
+ dicts = load_coco_panoptic_json(sys.argv[3], sys.argv[1], sys.argv[2], meta.as_dict())
216
+ logger.info("Done loading {} samples.".format(len(dicts)))
217
+
218
+ dirname = "coco-data-vis"
219
+ os.makedirs(dirname, exist_ok=True)
220
+ num_imgs_to_vis = int(sys.argv[5])
221
+ for i, d in enumerate(dicts):
222
+ img = np.array(Image.open(d["file_name"]))
223
+ visualizer = Visualizer(img, metadata=meta)
224
+ vis = visualizer.draw_dataset_dict(d)
225
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
226
+ vis.save(fpath)
227
+ if i + 1 >= num_imgs_to_vis:
228
+ break
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis.py ADDED
@@ -0,0 +1,241 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import logging
3
+ import os
4
+ from fvcore.common.timer import Timer
5
+
6
+ from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
7
+ from annotator.oneformer.detectron2.structures import BoxMode
8
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
9
+
10
+ from .builtin_meta import _get_coco_instances_meta
11
+ from .lvis_v0_5_categories import LVIS_CATEGORIES as LVIS_V0_5_CATEGORIES
12
+ from .lvis_v1_categories import LVIS_CATEGORIES as LVIS_V1_CATEGORIES
13
+ from .lvis_v1_category_image_count import LVIS_CATEGORY_IMAGE_COUNT as LVIS_V1_CATEGORY_IMAGE_COUNT
14
+
15
+ """
16
+ This file contains functions to parse LVIS-format annotations into dicts in the
17
+ "Detectron2 format".
18
+ """
19
+
20
+ logger = logging.getLogger(__name__)
21
+
22
+ __all__ = ["load_lvis_json", "register_lvis_instances", "get_lvis_instances_meta"]
23
+
24
+
25
+ def register_lvis_instances(name, metadata, json_file, image_root):
26
+ """
27
+ Register a dataset in LVIS's json annotation format for instance detection and segmentation.
28
+
29
+ Args:
30
+ name (str): a name that identifies the dataset, e.g. "lvis_v0.5_train".
31
+ metadata (dict): extra metadata associated with this dataset. It can be an empty dict.
32
+ json_file (str): path to the json instance annotation file.
33
+ image_root (str or path-like): directory which contains all the images.
34
+ """
35
+ DatasetCatalog.register(name, lambda: load_lvis_json(json_file, image_root, name))
36
+ MetadataCatalog.get(name).set(
37
+ json_file=json_file, image_root=image_root, evaluator_type="lvis", **metadata
38
+ )
39
+
40
+
41
+ def load_lvis_json(json_file, image_root, dataset_name=None, extra_annotation_keys=None):
42
+ """
43
+ Load a json file in LVIS's annotation format.
44
+
45
+ Args:
46
+ json_file (str): full path to the LVIS json annotation file.
47
+ image_root (str): the directory where the images in this json file exists.
48
+ dataset_name (str): the name of the dataset (e.g., "lvis_v0.5_train").
49
+ If provided, this function will put "thing_classes" into the metadata
50
+ associated with this dataset.
51
+ extra_annotation_keys (list[str]): list of per-annotation keys that should also be
52
+ loaded into the dataset dict (besides "bbox", "bbox_mode", "category_id",
53
+ "segmentation"). The values for these keys will be returned as-is.
54
+
55
+ Returns:
56
+ list[dict]: a list of dicts in Detectron2 standard format. (See
57
+ `Using Custom Datasets </tutorials/datasets.html>`_ )
58
+
59
+ Notes:
60
+ 1. This function does not read the image files.
61
+ The results do not have the "image" field.
62
+ """
63
+ from lvis import LVIS
64
+
65
+ json_file = PathManager.get_local_path(json_file)
66
+
67
+ timer = Timer()
68
+ lvis_api = LVIS(json_file)
69
+ if timer.seconds() > 1:
70
+ logger.info("Loading {} takes {:.2f} seconds.".format(json_file, timer.seconds()))
71
+
72
+ if dataset_name is not None:
73
+ meta = get_lvis_instances_meta(dataset_name)
74
+ MetadataCatalog.get(dataset_name).set(**meta)
75
+
76
+ # sort indices for reproducible results
77
+ img_ids = sorted(lvis_api.imgs.keys())
78
+ # imgs is a list of dicts, each looks something like:
79
+ # {'license': 4,
80
+ # 'url': 'http://farm6.staticflickr.com/5454/9413846304_881d5e5c3b_z.jpg',
81
+ # 'file_name': 'COCO_val2014_000000001268.jpg',
82
+ # 'height': 427,
83
+ # 'width': 640,
84
+ # 'date_captured': '2013-11-17 05:57:24',
85
+ # 'id': 1268}
86
+ imgs = lvis_api.load_imgs(img_ids)
87
+ # anns is a list[list[dict]], where each dict is an annotation
88
+ # record for an object. The inner list enumerates the objects in an image
89
+ # and the outer list enumerates over images. Example of anns[0]:
90
+ # [{'segmentation': [[192.81,
91
+ # 247.09,
92
+ # ...
93
+ # 219.03,
94
+ # 249.06]],
95
+ # 'area': 1035.749,
96
+ # 'image_id': 1268,
97
+ # 'bbox': [192.81, 224.8, 74.73, 33.43],
98
+ # 'category_id': 16,
99
+ # 'id': 42986},
100
+ # ...]
101
+ anns = [lvis_api.img_ann_map[img_id] for img_id in img_ids]
102
+
103
+ # Sanity check that each annotation has a unique id
104
+ ann_ids = [ann["id"] for anns_per_image in anns for ann in anns_per_image]
105
+ assert len(set(ann_ids)) == len(ann_ids), "Annotation ids in '{}' are not unique".format(
106
+ json_file
107
+ )
108
+
109
+ imgs_anns = list(zip(imgs, anns))
110
+
111
+ logger.info("Loaded {} images in the LVIS format from {}".format(len(imgs_anns), json_file))
112
+
113
+ if extra_annotation_keys:
114
+ logger.info(
115
+ "The following extra annotation keys will be loaded: {} ".format(extra_annotation_keys)
116
+ )
117
+ else:
118
+ extra_annotation_keys = []
119
+
120
+ def get_file_name(img_root, img_dict):
121
+ # Determine the path including the split folder ("train2017", "val2017", "test2017") from
122
+ # the coco_url field. Example:
123
+ # 'coco_url': 'http://images.cocodataset.org/train2017/000000155379.jpg'
124
+ split_folder, file_name = img_dict["coco_url"].split("/")[-2:]
125
+ return os.path.join(img_root + split_folder, file_name)
126
+
127
+ dataset_dicts = []
128
+
129
+ for (img_dict, anno_dict_list) in imgs_anns:
130
+ record = {}
131
+ record["file_name"] = get_file_name(image_root, img_dict)
132
+ record["height"] = img_dict["height"]
133
+ record["width"] = img_dict["width"]
134
+ record["not_exhaustive_category_ids"] = img_dict.get("not_exhaustive_category_ids", [])
135
+ record["neg_category_ids"] = img_dict.get("neg_category_ids", [])
136
+ image_id = record["image_id"] = img_dict["id"]
137
+
138
+ objs = []
139
+ for anno in anno_dict_list:
140
+ # Check that the image_id in this annotation is the same as
141
+ # the image_id we're looking at.
142
+ # This fails only when the data parsing logic or the annotation file is buggy.
143
+ assert anno["image_id"] == image_id
144
+ obj = {"bbox": anno["bbox"], "bbox_mode": BoxMode.XYWH_ABS}
145
+ # LVIS data loader can be used to load COCO dataset categories. In this case `meta`
146
+ # variable will have a field with COCO-specific category mapping.
147
+ if dataset_name is not None and "thing_dataset_id_to_contiguous_id" in meta:
148
+ obj["category_id"] = meta["thing_dataset_id_to_contiguous_id"][anno["category_id"]]
149
+ else:
150
+ obj["category_id"] = anno["category_id"] - 1 # Convert 1-indexed to 0-indexed
151
+ segm = anno["segmentation"] # list[list[float]]
152
+ # filter out invalid polygons (< 3 points)
153
+ valid_segm = [poly for poly in segm if len(poly) % 2 == 0 and len(poly) >= 6]
154
+ assert len(segm) == len(
155
+ valid_segm
156
+ ), "Annotation contains an invalid polygon with < 3 points"
157
+ assert len(segm) > 0
158
+ obj["segmentation"] = segm
159
+ for extra_ann_key in extra_annotation_keys:
160
+ obj[extra_ann_key] = anno[extra_ann_key]
161
+ objs.append(obj)
162
+ record["annotations"] = objs
163
+ dataset_dicts.append(record)
164
+
165
+ return dataset_dicts
166
+
167
+
168
+ def get_lvis_instances_meta(dataset_name):
169
+ """
170
+ Load LVIS metadata.
171
+
172
+ Args:
173
+ dataset_name (str): LVIS dataset name without the split name (e.g., "lvis_v0.5").
174
+
175
+ Returns:
176
+ dict: LVIS metadata with keys: thing_classes
177
+ """
178
+ if "cocofied" in dataset_name:
179
+ return _get_coco_instances_meta()
180
+ if "v0.5" in dataset_name:
181
+ return _get_lvis_instances_meta_v0_5()
182
+ elif "v1" in dataset_name:
183
+ return _get_lvis_instances_meta_v1()
184
+ raise ValueError("No built-in metadata for dataset {}".format(dataset_name))
185
+
186
+
187
+ def _get_lvis_instances_meta_v0_5():
188
+ assert len(LVIS_V0_5_CATEGORIES) == 1230
189
+ cat_ids = [k["id"] for k in LVIS_V0_5_CATEGORIES]
190
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
191
+ cat_ids
192
+ ), "Category ids are not in [1, #categories], as expected"
193
+ # Ensure that the category list is sorted by id
194
+ lvis_categories = sorted(LVIS_V0_5_CATEGORIES, key=lambda x: x["id"])
195
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
196
+ meta = {"thing_classes": thing_classes}
197
+ return meta
198
+
199
+
200
+ def _get_lvis_instances_meta_v1():
201
+ assert len(LVIS_V1_CATEGORIES) == 1203
202
+ cat_ids = [k["id"] for k in LVIS_V1_CATEGORIES]
203
+ assert min(cat_ids) == 1 and max(cat_ids) == len(
204
+ cat_ids
205
+ ), "Category ids are not in [1, #categories], as expected"
206
+ # Ensure that the category list is sorted by id
207
+ lvis_categories = sorted(LVIS_V1_CATEGORIES, key=lambda x: x["id"])
208
+ thing_classes = [k["synonyms"][0] for k in lvis_categories]
209
+ meta = {"thing_classes": thing_classes, "class_image_count": LVIS_V1_CATEGORY_IMAGE_COUNT}
210
+ return meta
211
+
212
+
213
+ if __name__ == "__main__":
214
+ """
215
+ Test the LVIS json dataset loader.
216
+
217
+ Usage:
218
+ python -m detectron2.data.datasets.lvis \
219
+ path/to/json path/to/image_root dataset_name vis_limit
220
+ """
221
+ import sys
222
+ import numpy as np
223
+ from annotator.oneformer.detectron2.utils.logger import setup_logger
224
+ from PIL import Image
225
+ import annotator.oneformer.detectron2.data.datasets # noqa # add pre-defined metadata
226
+ from annotator.oneformer.detectron2.utils.visualizer import Visualizer
227
+
228
+ logger = setup_logger(name=__name__)
229
+ meta = MetadataCatalog.get(sys.argv[3])
230
+
231
+ dicts = load_lvis_json(sys.argv[1], sys.argv[2], sys.argv[3])
232
+ logger.info("Done loading {} samples.".format(len(dicts)))
233
+
234
+ dirname = "lvis-data-vis"
235
+ os.makedirs(dirname, exist_ok=True)
236
+ for d in dicts[: int(sys.argv[4])]:
237
+ img = np.array(Image.open(d["file_name"]))
238
+ visualizer = Visualizer(img, metadata=meta)
239
+ vis = visualizer.draw_dataset_dict(d)
240
+ fpath = os.path.join(dirname, os.path.basename(d["file_name"]))
241
+ vis.save(fpath)
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis_v0_5_categories.py ADDED
The diff for this file is too large to render. See raw diff
 
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis_v1_categories.py ADDED
The diff for this file is too large to render. See raw diff
 
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/lvis_v1_category_image_count.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ # Autogen with
3
+ # with open("lvis_v1_train.json", "r") as f:
4
+ # a = json.load(f)
5
+ # c = a["categories"]
6
+ # for x in c:
7
+ # del x["name"]
8
+ # del x["instance_count"]
9
+ # del x["def"]
10
+ # del x["synonyms"]
11
+ # del x["frequency"]
12
+ # del x["synset"]
13
+ # LVIS_CATEGORY_IMAGE_COUNT = repr(c) + " # noqa"
14
+ # with open("/tmp/lvis_category_image_count.py", "wt") as f:
15
+ # f.write(f"LVIS_CATEGORY_IMAGE_COUNT = {LVIS_CATEGORY_IMAGE_COUNT}")
16
+ # Then paste the contents of that file below
17
+
18
+ # fmt: off
19
+ LVIS_CATEGORY_IMAGE_COUNT = [{'id': 1, 'image_count': 64}, {'id': 2, 'image_count': 364}, {'id': 3, 'image_count': 1911}, {'id': 4, 'image_count': 149}, {'id': 5, 'image_count': 29}, {'id': 6, 'image_count': 26}, {'id': 7, 'image_count': 59}, {'id': 8, 'image_count': 22}, {'id': 9, 'image_count': 12}, {'id': 10, 'image_count': 28}, {'id': 11, 'image_count': 505}, {'id': 12, 'image_count': 1207}, {'id': 13, 'image_count': 4}, {'id': 14, 'image_count': 10}, {'id': 15, 'image_count': 500}, {'id': 16, 'image_count': 33}, {'id': 17, 'image_count': 3}, {'id': 18, 'image_count': 44}, {'id': 19, 'image_count': 561}, {'id': 20, 'image_count': 8}, {'id': 21, 'image_count': 9}, {'id': 22, 'image_count': 33}, {'id': 23, 'image_count': 1883}, {'id': 24, 'image_count': 98}, {'id': 25, 'image_count': 70}, {'id': 26, 'image_count': 46}, {'id': 27, 'image_count': 117}, {'id': 28, 'image_count': 41}, {'id': 29, 'image_count': 1395}, {'id': 30, 'image_count': 7}, {'id': 31, 'image_count': 1}, {'id': 32, 'image_count': 314}, {'id': 33, 'image_count': 31}, {'id': 34, 'image_count': 1905}, {'id': 35, 'image_count': 1859}, {'id': 36, 'image_count': 1623}, {'id': 37, 'image_count': 47}, {'id': 38, 'image_count': 3}, {'id': 39, 'image_count': 3}, {'id': 40, 'image_count': 1}, {'id': 41, 'image_count': 305}, {'id': 42, 'image_count': 6}, {'id': 43, 'image_count': 210}, {'id': 44, 'image_count': 36}, {'id': 45, 'image_count': 1787}, {'id': 46, 'image_count': 17}, {'id': 47, 'image_count': 51}, {'id': 48, 'image_count': 138}, {'id': 49, 'image_count': 3}, {'id': 50, 'image_count': 1470}, {'id': 51, 'image_count': 3}, {'id': 52, 'image_count': 2}, {'id': 53, 'image_count': 186}, {'id': 54, 'image_count': 76}, {'id': 55, 'image_count': 26}, {'id': 56, 'image_count': 303}, {'id': 57, 'image_count': 738}, {'id': 58, 'image_count': 1799}, {'id': 59, 'image_count': 1934}, {'id': 60, 'image_count': 1609}, {'id': 61, 'image_count': 1622}, {'id': 62, 'image_count': 41}, {'id': 63, 'image_count': 4}, {'id': 64, 'image_count': 11}, {'id': 65, 'image_count': 270}, {'id': 66, 'image_count': 349}, {'id': 67, 'image_count': 42}, {'id': 68, 'image_count': 823}, {'id': 69, 'image_count': 6}, {'id': 70, 'image_count': 48}, {'id': 71, 'image_count': 3}, {'id': 72, 'image_count': 42}, {'id': 73, 'image_count': 24}, {'id': 74, 'image_count': 16}, {'id': 75, 'image_count': 605}, {'id': 76, 'image_count': 646}, {'id': 77, 'image_count': 1765}, {'id': 78, 'image_count': 2}, {'id': 79, 'image_count': 125}, {'id': 80, 'image_count': 1420}, {'id': 81, 'image_count': 140}, {'id': 82, 'image_count': 4}, {'id': 83, 'image_count': 322}, {'id': 84, 'image_count': 60}, {'id': 85, 'image_count': 2}, {'id': 86, 'image_count': 231}, {'id': 87, 'image_count': 333}, {'id': 88, 'image_count': 1941}, {'id': 89, 'image_count': 367}, {'id': 90, 'image_count': 1922}, {'id': 91, 'image_count': 18}, {'id': 92, 'image_count': 81}, {'id': 93, 'image_count': 1}, {'id': 94, 'image_count': 1852}, {'id': 95, 'image_count': 430}, {'id': 96, 'image_count': 247}, {'id': 97, 'image_count': 94}, {'id': 98, 'image_count': 21}, {'id': 99, 'image_count': 1821}, {'id': 100, 'image_count': 16}, {'id': 101, 'image_count': 12}, {'id': 102, 'image_count': 25}, {'id': 103, 'image_count': 41}, {'id': 104, 'image_count': 244}, {'id': 105, 'image_count': 7}, {'id': 106, 'image_count': 1}, {'id': 107, 'image_count': 40}, {'id': 108, 'image_count': 40}, {'id': 109, 'image_count': 104}, {'id': 110, 'image_count': 1671}, {'id': 111, 'image_count': 49}, {'id': 112, 'image_count': 243}, {'id': 113, 'image_count': 2}, {'id': 114, 'image_count': 242}, {'id': 115, 'image_count': 271}, {'id': 116, 'image_count': 104}, {'id': 117, 'image_count': 8}, {'id': 118, 'image_count': 1758}, {'id': 119, 'image_count': 1}, {'id': 120, 'image_count': 48}, {'id': 121, 'image_count': 14}, {'id': 122, 'image_count': 40}, {'id': 123, 'image_count': 1}, {'id': 124, 'image_count': 37}, {'id': 125, 'image_count': 1510}, {'id': 126, 'image_count': 6}, {'id': 127, 'image_count': 1903}, {'id': 128, 'image_count': 70}, {'id': 129, 'image_count': 86}, {'id': 130, 'image_count': 7}, {'id': 131, 'image_count': 5}, {'id': 132, 'image_count': 1406}, {'id': 133, 'image_count': 1901}, {'id': 134, 'image_count': 15}, {'id': 135, 'image_count': 28}, {'id': 136, 'image_count': 6}, {'id': 137, 'image_count': 494}, {'id': 138, 'image_count': 234}, {'id': 139, 'image_count': 1922}, {'id': 140, 'image_count': 1}, {'id': 141, 'image_count': 35}, {'id': 142, 'image_count': 5}, {'id': 143, 'image_count': 1828}, {'id': 144, 'image_count': 8}, {'id': 145, 'image_count': 63}, {'id': 146, 'image_count': 1668}, {'id': 147, 'image_count': 4}, {'id': 148, 'image_count': 95}, {'id': 149, 'image_count': 17}, {'id': 150, 'image_count': 1567}, {'id': 151, 'image_count': 2}, {'id': 152, 'image_count': 103}, {'id': 153, 'image_count': 50}, {'id': 154, 'image_count': 1309}, {'id': 155, 'image_count': 6}, {'id': 156, 'image_count': 92}, {'id': 157, 'image_count': 19}, {'id': 158, 'image_count': 37}, {'id': 159, 'image_count': 4}, {'id': 160, 'image_count': 709}, {'id': 161, 'image_count': 9}, {'id': 162, 'image_count': 82}, {'id': 163, 'image_count': 15}, {'id': 164, 'image_count': 3}, {'id': 165, 'image_count': 61}, {'id': 166, 'image_count': 51}, {'id': 167, 'image_count': 5}, {'id': 168, 'image_count': 13}, {'id': 169, 'image_count': 642}, {'id': 170, 'image_count': 24}, {'id': 171, 'image_count': 255}, {'id': 172, 'image_count': 9}, {'id': 173, 'image_count': 1808}, {'id': 174, 'image_count': 31}, {'id': 175, 'image_count': 158}, {'id': 176, 'image_count': 80}, {'id': 177, 'image_count': 1884}, {'id': 178, 'image_count': 158}, {'id': 179, 'image_count': 2}, {'id': 180, 'image_count': 12}, {'id': 181, 'image_count': 1659}, {'id': 182, 'image_count': 7}, {'id': 183, 'image_count': 834}, {'id': 184, 'image_count': 57}, {'id': 185, 'image_count': 174}, {'id': 186, 'image_count': 95}, {'id': 187, 'image_count': 27}, {'id': 188, 'image_count': 22}, {'id': 189, 'image_count': 1391}, {'id': 190, 'image_count': 90}, {'id': 191, 'image_count': 40}, {'id': 192, 'image_count': 445}, {'id': 193, 'image_count': 21}, {'id': 194, 'image_count': 1132}, {'id': 195, 'image_count': 177}, {'id': 196, 'image_count': 4}, {'id': 197, 'image_count': 17}, {'id': 198, 'image_count': 84}, {'id': 199, 'image_count': 55}, {'id': 200, 'image_count': 30}, {'id': 201, 'image_count': 25}, {'id': 202, 'image_count': 2}, {'id': 203, 'image_count': 125}, {'id': 204, 'image_count': 1135}, {'id': 205, 'image_count': 19}, {'id': 206, 'image_count': 72}, {'id': 207, 'image_count': 1926}, {'id': 208, 'image_count': 159}, {'id': 209, 'image_count': 7}, {'id': 210, 'image_count': 1}, {'id': 211, 'image_count': 13}, {'id': 212, 'image_count': 35}, {'id': 213, 'image_count': 18}, {'id': 214, 'image_count': 8}, {'id': 215, 'image_count': 6}, {'id': 216, 'image_count': 35}, {'id': 217, 'image_count': 1222}, {'id': 218, 'image_count': 103}, {'id': 219, 'image_count': 28}, {'id': 220, 'image_count': 63}, {'id': 221, 'image_count': 28}, {'id': 222, 'image_count': 5}, {'id': 223, 'image_count': 7}, {'id': 224, 'image_count': 14}, {'id': 225, 'image_count': 1918}, {'id': 226, 'image_count': 133}, {'id': 227, 'image_count': 16}, {'id': 228, 'image_count': 27}, {'id': 229, 'image_count': 110}, {'id': 230, 'image_count': 1895}, {'id': 231, 'image_count': 4}, {'id': 232, 'image_count': 1927}, {'id': 233, 'image_count': 8}, {'id': 234, 'image_count': 1}, {'id': 235, 'image_count': 263}, {'id': 236, 'image_count': 10}, {'id': 237, 'image_count': 2}, {'id': 238, 'image_count': 3}, {'id': 239, 'image_count': 87}, {'id': 240, 'image_count': 9}, {'id': 241, 'image_count': 71}, {'id': 242, 'image_count': 13}, {'id': 243, 'image_count': 18}, {'id': 244, 'image_count': 2}, {'id': 245, 'image_count': 5}, {'id': 246, 'image_count': 45}, {'id': 247, 'image_count': 1}, {'id': 248, 'image_count': 23}, {'id': 249, 'image_count': 32}, {'id': 250, 'image_count': 4}, {'id': 251, 'image_count': 1}, {'id': 252, 'image_count': 858}, {'id': 253, 'image_count': 661}, {'id': 254, 'image_count': 168}, {'id': 255, 'image_count': 210}, {'id': 256, 'image_count': 65}, {'id': 257, 'image_count': 4}, {'id': 258, 'image_count': 2}, {'id': 259, 'image_count': 159}, {'id': 260, 'image_count': 31}, {'id': 261, 'image_count': 811}, {'id': 262, 'image_count': 1}, {'id': 263, 'image_count': 42}, {'id': 264, 'image_count': 27}, {'id': 265, 'image_count': 2}, {'id': 266, 'image_count': 5}, {'id': 267, 'image_count': 95}, {'id': 268, 'image_count': 32}, {'id': 269, 'image_count': 1}, {'id': 270, 'image_count': 1}, {'id': 271, 'image_count': 1844}, {'id': 272, 'image_count': 897}, {'id': 273, 'image_count': 31}, {'id': 274, 'image_count': 23}, {'id': 275, 'image_count': 1}, {'id': 276, 'image_count': 202}, {'id': 277, 'image_count': 746}, {'id': 278, 'image_count': 44}, {'id': 279, 'image_count': 14}, {'id': 280, 'image_count': 26}, {'id': 281, 'image_count': 1}, {'id': 282, 'image_count': 2}, {'id': 283, 'image_count': 25}, {'id': 284, 'image_count': 238}, {'id': 285, 'image_count': 592}, {'id': 286, 'image_count': 26}, {'id': 287, 'image_count': 5}, {'id': 288, 'image_count': 42}, {'id': 289, 'image_count': 13}, {'id': 290, 'image_count': 46}, {'id': 291, 'image_count': 1}, {'id': 292, 'image_count': 8}, {'id': 293, 'image_count': 34}, {'id': 294, 'image_count': 5}, {'id': 295, 'image_count': 1}, {'id': 296, 'image_count': 1871}, {'id': 297, 'image_count': 717}, {'id': 298, 'image_count': 1010}, {'id': 299, 'image_count': 679}, {'id': 300, 'image_count': 3}, {'id': 301, 'image_count': 4}, {'id': 302, 'image_count': 1}, {'id': 303, 'image_count': 166}, {'id': 304, 'image_count': 2}, {'id': 305, 'image_count': 266}, {'id': 306, 'image_count': 101}, {'id': 307, 'image_count': 6}, {'id': 308, 'image_count': 14}, {'id': 309, 'image_count': 133}, {'id': 310, 'image_count': 2}, {'id': 311, 'image_count': 38}, {'id': 312, 'image_count': 95}, {'id': 313, 'image_count': 1}, {'id': 314, 'image_count': 12}, {'id': 315, 'image_count': 49}, {'id': 316, 'image_count': 5}, {'id': 317, 'image_count': 5}, {'id': 318, 'image_count': 16}, {'id': 319, 'image_count': 216}, {'id': 320, 'image_count': 12}, {'id': 321, 'image_count': 1}, {'id': 322, 'image_count': 54}, {'id': 323, 'image_count': 5}, {'id': 324, 'image_count': 245}, {'id': 325, 'image_count': 12}, {'id': 326, 'image_count': 7}, {'id': 327, 'image_count': 35}, {'id': 328, 'image_count': 36}, {'id': 329, 'image_count': 32}, {'id': 330, 'image_count': 1027}, {'id': 331, 'image_count': 10}, {'id': 332, 'image_count': 12}, {'id': 333, 'image_count': 1}, {'id': 334, 'image_count': 67}, {'id': 335, 'image_count': 71}, {'id': 336, 'image_count': 30}, {'id': 337, 'image_count': 48}, {'id': 338, 'image_count': 249}, {'id': 339, 'image_count': 13}, {'id': 340, 'image_count': 29}, {'id': 341, 'image_count': 14}, {'id': 342, 'image_count': 236}, {'id': 343, 'image_count': 15}, {'id': 344, 'image_count': 1521}, {'id': 345, 'image_count': 25}, {'id': 346, 'image_count': 249}, {'id': 347, 'image_count': 139}, {'id': 348, 'image_count': 2}, {'id': 349, 'image_count': 2}, {'id': 350, 'image_count': 1890}, {'id': 351, 'image_count': 1240}, {'id': 352, 'image_count': 1}, {'id': 353, 'image_count': 9}, {'id': 354, 'image_count': 1}, {'id': 355, 'image_count': 3}, {'id': 356, 'image_count': 11}, {'id': 357, 'image_count': 4}, {'id': 358, 'image_count': 236}, {'id': 359, 'image_count': 44}, {'id': 360, 'image_count': 19}, {'id': 361, 'image_count': 1100}, {'id': 362, 'image_count': 7}, {'id': 363, 'image_count': 69}, {'id': 364, 'image_count': 2}, {'id': 365, 'image_count': 8}, {'id': 366, 'image_count': 5}, {'id': 367, 'image_count': 227}, {'id': 368, 'image_count': 6}, {'id': 369, 'image_count': 106}, {'id': 370, 'image_count': 81}, {'id': 371, 'image_count': 17}, {'id': 372, 'image_count': 134}, {'id': 373, 'image_count': 312}, {'id': 374, 'image_count': 8}, {'id': 375, 'image_count': 271}, {'id': 376, 'image_count': 2}, {'id': 377, 'image_count': 103}, {'id': 378, 'image_count': 1938}, {'id': 379, 'image_count': 574}, {'id': 380, 'image_count': 120}, {'id': 381, 'image_count': 2}, {'id': 382, 'image_count': 2}, {'id': 383, 'image_count': 13}, {'id': 384, 'image_count': 29}, {'id': 385, 'image_count': 1710}, {'id': 386, 'image_count': 66}, {'id': 387, 'image_count': 1008}, {'id': 388, 'image_count': 1}, {'id': 389, 'image_count': 3}, {'id': 390, 'image_count': 1942}, {'id': 391, 'image_count': 19}, {'id': 392, 'image_count': 1488}, {'id': 393, 'image_count': 46}, {'id': 394, 'image_count': 106}, {'id': 395, 'image_count': 115}, {'id': 396, 'image_count': 19}, {'id': 397, 'image_count': 2}, {'id': 398, 'image_count': 1}, {'id': 399, 'image_count': 28}, {'id': 400, 'image_count': 9}, {'id': 401, 'image_count': 192}, {'id': 402, 'image_count': 12}, {'id': 403, 'image_count': 21}, {'id': 404, 'image_count': 247}, {'id': 405, 'image_count': 6}, {'id': 406, 'image_count': 64}, {'id': 407, 'image_count': 7}, {'id': 408, 'image_count': 40}, {'id': 409, 'image_count': 542}, {'id': 410, 'image_count': 2}, {'id': 411, 'image_count': 1898}, {'id': 412, 'image_count': 36}, {'id': 413, 'image_count': 4}, {'id': 414, 'image_count': 1}, {'id': 415, 'image_count': 191}, {'id': 416, 'image_count': 6}, {'id': 417, 'image_count': 41}, {'id': 418, 'image_count': 39}, {'id': 419, 'image_count': 46}, {'id': 420, 'image_count': 1}, {'id': 421, 'image_count': 1451}, {'id': 422, 'image_count': 1878}, {'id': 423, 'image_count': 11}, {'id': 424, 'image_count': 82}, {'id': 425, 'image_count': 18}, {'id': 426, 'image_count': 1}, {'id': 427, 'image_count': 7}, {'id': 428, 'image_count': 3}, {'id': 429, 'image_count': 575}, {'id': 430, 'image_count': 1907}, {'id': 431, 'image_count': 8}, {'id': 432, 'image_count': 4}, {'id': 433, 'image_count': 32}, {'id': 434, 'image_count': 11}, {'id': 435, 'image_count': 4}, {'id': 436, 'image_count': 54}, {'id': 437, 'image_count': 202}, {'id': 438, 'image_count': 32}, {'id': 439, 'image_count': 3}, {'id': 440, 'image_count': 130}, {'id': 441, 'image_count': 119}, {'id': 442, 'image_count': 141}, {'id': 443, 'image_count': 29}, {'id': 444, 'image_count': 525}, {'id': 445, 'image_count': 1323}, {'id': 446, 'image_count': 2}, {'id': 447, 'image_count': 113}, {'id': 448, 'image_count': 16}, {'id': 449, 'image_count': 7}, {'id': 450, 'image_count': 35}, {'id': 451, 'image_count': 1908}, {'id': 452, 'image_count': 353}, {'id': 453, 'image_count': 18}, {'id': 454, 'image_count': 14}, {'id': 455, 'image_count': 77}, {'id': 456, 'image_count': 8}, {'id': 457, 'image_count': 37}, {'id': 458, 'image_count': 1}, {'id': 459, 'image_count': 346}, {'id': 460, 'image_count': 19}, {'id': 461, 'image_count': 1779}, {'id': 462, 'image_count': 23}, {'id': 463, 'image_count': 25}, {'id': 464, 'image_count': 67}, {'id': 465, 'image_count': 19}, {'id': 466, 'image_count': 28}, {'id': 467, 'image_count': 4}, {'id': 468, 'image_count': 27}, {'id': 469, 'image_count': 1861}, {'id': 470, 'image_count': 11}, {'id': 471, 'image_count': 13}, {'id': 472, 'image_count': 13}, {'id': 473, 'image_count': 32}, {'id': 474, 'image_count': 1767}, {'id': 475, 'image_count': 42}, {'id': 476, 'image_count': 17}, {'id': 477, 'image_count': 128}, {'id': 478, 'image_count': 1}, {'id': 479, 'image_count': 9}, {'id': 480, 'image_count': 10}, {'id': 481, 'image_count': 4}, {'id': 482, 'image_count': 9}, {'id': 483, 'image_count': 18}, {'id': 484, 'image_count': 41}, {'id': 485, 'image_count': 28}, {'id': 486, 'image_count': 3}, {'id': 487, 'image_count': 65}, {'id': 488, 'image_count': 9}, {'id': 489, 'image_count': 23}, {'id': 490, 'image_count': 24}, {'id': 491, 'image_count': 1}, {'id': 492, 'image_count': 2}, {'id': 493, 'image_count': 59}, {'id': 494, 'image_count': 48}, {'id': 495, 'image_count': 17}, {'id': 496, 'image_count': 1877}, {'id': 497, 'image_count': 18}, {'id': 498, 'image_count': 1920}, {'id': 499, 'image_count': 50}, {'id': 500, 'image_count': 1890}, {'id': 501, 'image_count': 99}, {'id': 502, 'image_count': 1530}, {'id': 503, 'image_count': 3}, {'id': 504, 'image_count': 11}, {'id': 505, 'image_count': 19}, {'id': 506, 'image_count': 3}, {'id': 507, 'image_count': 63}, {'id': 508, 'image_count': 5}, {'id': 509, 'image_count': 6}, {'id': 510, 'image_count': 233}, {'id': 511, 'image_count': 54}, {'id': 512, 'image_count': 36}, {'id': 513, 'image_count': 10}, {'id': 514, 'image_count': 124}, {'id': 515, 'image_count': 101}, {'id': 516, 'image_count': 3}, {'id': 517, 'image_count': 363}, {'id': 518, 'image_count': 3}, {'id': 519, 'image_count': 30}, {'id': 520, 'image_count': 18}, {'id': 521, 'image_count': 199}, {'id': 522, 'image_count': 97}, {'id': 523, 'image_count': 32}, {'id': 524, 'image_count': 121}, {'id': 525, 'image_count': 16}, {'id': 526, 'image_count': 12}, {'id': 527, 'image_count': 2}, {'id': 528, 'image_count': 214}, {'id': 529, 'image_count': 48}, {'id': 530, 'image_count': 26}, {'id': 531, 'image_count': 13}, {'id': 532, 'image_count': 4}, {'id': 533, 'image_count': 11}, {'id': 534, 'image_count': 123}, {'id': 535, 'image_count': 7}, {'id': 536, 'image_count': 200}, {'id': 537, 'image_count': 91}, {'id': 538, 'image_count': 9}, {'id': 539, 'image_count': 72}, {'id': 540, 'image_count': 1886}, {'id': 541, 'image_count': 4}, {'id': 542, 'image_count': 1}, {'id': 543, 'image_count': 1}, {'id': 544, 'image_count': 1932}, {'id': 545, 'image_count': 4}, {'id': 546, 'image_count': 56}, {'id': 547, 'image_count': 854}, {'id': 548, 'image_count': 755}, {'id': 549, 'image_count': 1843}, {'id': 550, 'image_count': 96}, {'id': 551, 'image_count': 7}, {'id': 552, 'image_count': 74}, {'id': 553, 'image_count': 66}, {'id': 554, 'image_count': 57}, {'id': 555, 'image_count': 44}, {'id': 556, 'image_count': 1905}, {'id': 557, 'image_count': 4}, {'id': 558, 'image_count': 90}, {'id': 559, 'image_count': 1635}, {'id': 560, 'image_count': 8}, {'id': 561, 'image_count': 5}, {'id': 562, 'image_count': 50}, {'id': 563, 'image_count': 545}, {'id': 564, 'image_count': 20}, {'id': 565, 'image_count': 193}, {'id': 566, 'image_count': 285}, {'id': 567, 'image_count': 3}, {'id': 568, 'image_count': 1}, {'id': 569, 'image_count': 1904}, {'id': 570, 'image_count': 294}, {'id': 571, 'image_count': 3}, {'id': 572, 'image_count': 5}, {'id': 573, 'image_count': 24}, {'id': 574, 'image_count': 2}, {'id': 575, 'image_count': 2}, {'id': 576, 'image_count': 16}, {'id': 577, 'image_count': 8}, {'id': 578, 'image_count': 154}, {'id': 579, 'image_count': 66}, {'id': 580, 'image_count': 1}, {'id': 581, 'image_count': 24}, {'id': 582, 'image_count': 1}, {'id': 583, 'image_count': 4}, {'id': 584, 'image_count': 75}, {'id': 585, 'image_count': 6}, {'id': 586, 'image_count': 126}, {'id': 587, 'image_count': 24}, {'id': 588, 'image_count': 22}, {'id': 589, 'image_count': 1872}, {'id': 590, 'image_count': 16}, {'id': 591, 'image_count': 423}, {'id': 592, 'image_count': 1927}, {'id': 593, 'image_count': 38}, {'id': 594, 'image_count': 3}, {'id': 595, 'image_count': 1945}, {'id': 596, 'image_count': 35}, {'id': 597, 'image_count': 1}, {'id': 598, 'image_count': 13}, {'id': 599, 'image_count': 9}, {'id': 600, 'image_count': 14}, {'id': 601, 'image_count': 37}, {'id': 602, 'image_count': 3}, {'id': 603, 'image_count': 4}, {'id': 604, 'image_count': 100}, {'id': 605, 'image_count': 195}, {'id': 606, 'image_count': 1}, {'id': 607, 'image_count': 12}, {'id': 608, 'image_count': 24}, {'id': 609, 'image_count': 489}, {'id': 610, 'image_count': 10}, {'id': 611, 'image_count': 1689}, {'id': 612, 'image_count': 42}, {'id': 613, 'image_count': 81}, {'id': 614, 'image_count': 894}, {'id': 615, 'image_count': 1868}, {'id': 616, 'image_count': 7}, {'id': 617, 'image_count': 1567}, {'id': 618, 'image_count': 10}, {'id': 619, 'image_count': 8}, {'id': 620, 'image_count': 7}, {'id': 621, 'image_count': 629}, {'id': 622, 'image_count': 89}, {'id': 623, 'image_count': 15}, {'id': 624, 'image_count': 134}, {'id': 625, 'image_count': 4}, {'id': 626, 'image_count': 1802}, {'id': 627, 'image_count': 595}, {'id': 628, 'image_count': 1210}, {'id': 629, 'image_count': 48}, {'id': 630, 'image_count': 418}, {'id': 631, 'image_count': 1846}, {'id': 632, 'image_count': 5}, {'id': 633, 'image_count': 221}, {'id': 634, 'image_count': 10}, {'id': 635, 'image_count': 7}, {'id': 636, 'image_count': 76}, {'id': 637, 'image_count': 22}, {'id': 638, 'image_count': 10}, {'id': 639, 'image_count': 341}, {'id': 640, 'image_count': 1}, {'id': 641, 'image_count': 705}, {'id': 642, 'image_count': 1900}, {'id': 643, 'image_count': 188}, {'id': 644, 'image_count': 227}, {'id': 645, 'image_count': 861}, {'id': 646, 'image_count': 6}, {'id': 647, 'image_count': 115}, {'id': 648, 'image_count': 5}, {'id': 649, 'image_count': 43}, {'id': 650, 'image_count': 14}, {'id': 651, 'image_count': 6}, {'id': 652, 'image_count': 15}, {'id': 653, 'image_count': 1167}, {'id': 654, 'image_count': 15}, {'id': 655, 'image_count': 994}, {'id': 656, 'image_count': 28}, {'id': 657, 'image_count': 2}, {'id': 658, 'image_count': 338}, {'id': 659, 'image_count': 334}, {'id': 660, 'image_count': 15}, {'id': 661, 'image_count': 102}, {'id': 662, 'image_count': 1}, {'id': 663, 'image_count': 8}, {'id': 664, 'image_count': 1}, {'id': 665, 'image_count': 1}, {'id': 666, 'image_count': 28}, {'id': 667, 'image_count': 91}, {'id': 668, 'image_count': 260}, {'id': 669, 'image_count': 131}, {'id': 670, 'image_count': 128}, {'id': 671, 'image_count': 3}, {'id': 672, 'image_count': 10}, {'id': 673, 'image_count': 39}, {'id': 674, 'image_count': 2}, {'id': 675, 'image_count': 925}, {'id': 676, 'image_count': 354}, {'id': 677, 'image_count': 31}, {'id': 678, 'image_count': 10}, {'id': 679, 'image_count': 215}, {'id': 680, 'image_count': 71}, {'id': 681, 'image_count': 43}, {'id': 682, 'image_count': 28}, {'id': 683, 'image_count': 34}, {'id': 684, 'image_count': 16}, {'id': 685, 'image_count': 273}, {'id': 686, 'image_count': 2}, {'id': 687, 'image_count': 999}, {'id': 688, 'image_count': 4}, {'id': 689, 'image_count': 107}, {'id': 690, 'image_count': 2}, {'id': 691, 'image_count': 1}, {'id': 692, 'image_count': 454}, {'id': 693, 'image_count': 9}, {'id': 694, 'image_count': 1901}, {'id': 695, 'image_count': 61}, {'id': 696, 'image_count': 91}, {'id': 697, 'image_count': 46}, {'id': 698, 'image_count': 1402}, {'id': 699, 'image_count': 74}, {'id': 700, 'image_count': 421}, {'id': 701, 'image_count': 226}, {'id': 702, 'image_count': 10}, {'id': 703, 'image_count': 1720}, {'id': 704, 'image_count': 261}, {'id': 705, 'image_count': 1337}, {'id': 706, 'image_count': 293}, {'id': 707, 'image_count': 62}, {'id': 708, 'image_count': 814}, {'id': 709, 'image_count': 407}, {'id': 710, 'image_count': 6}, {'id': 711, 'image_count': 16}, {'id': 712, 'image_count': 7}, {'id': 713, 'image_count': 1791}, {'id': 714, 'image_count': 2}, {'id': 715, 'image_count': 1915}, {'id': 716, 'image_count': 1940}, {'id': 717, 'image_count': 13}, {'id': 718, 'image_count': 16}, {'id': 719, 'image_count': 448}, {'id': 720, 'image_count': 12}, {'id': 721, 'image_count': 18}, {'id': 722, 'image_count': 4}, {'id': 723, 'image_count': 71}, {'id': 724, 'image_count': 189}, {'id': 725, 'image_count': 74}, {'id': 726, 'image_count': 103}, {'id': 727, 'image_count': 3}, {'id': 728, 'image_count': 110}, {'id': 729, 'image_count': 5}, {'id': 730, 'image_count': 9}, {'id': 731, 'image_count': 15}, {'id': 732, 'image_count': 25}, {'id': 733, 'image_count': 7}, {'id': 734, 'image_count': 647}, {'id': 735, 'image_count': 824}, {'id': 736, 'image_count': 100}, {'id': 737, 'image_count': 47}, {'id': 738, 'image_count': 121}, {'id': 739, 'image_count': 731}, {'id': 740, 'image_count': 73}, {'id': 741, 'image_count': 49}, {'id': 742, 'image_count': 23}, {'id': 743, 'image_count': 4}, {'id': 744, 'image_count': 62}, {'id': 745, 'image_count': 118}, {'id': 746, 'image_count': 99}, {'id': 747, 'image_count': 40}, {'id': 748, 'image_count': 1036}, {'id': 749, 'image_count': 105}, {'id': 750, 'image_count': 21}, {'id': 751, 'image_count': 229}, {'id': 752, 'image_count': 7}, {'id': 753, 'image_count': 72}, {'id': 754, 'image_count': 9}, {'id': 755, 'image_count': 10}, {'id': 756, 'image_count': 328}, {'id': 757, 'image_count': 468}, {'id': 758, 'image_count': 1}, {'id': 759, 'image_count': 2}, {'id': 760, 'image_count': 24}, {'id': 761, 'image_count': 11}, {'id': 762, 'image_count': 72}, {'id': 763, 'image_count': 17}, {'id': 764, 'image_count': 10}, {'id': 765, 'image_count': 17}, {'id': 766, 'image_count': 489}, {'id': 767, 'image_count': 47}, {'id': 768, 'image_count': 93}, {'id': 769, 'image_count': 1}, {'id': 770, 'image_count': 12}, {'id': 771, 'image_count': 228}, {'id': 772, 'image_count': 5}, {'id': 773, 'image_count': 76}, {'id': 774, 'image_count': 71}, {'id': 775, 'image_count': 30}, {'id': 776, 'image_count': 109}, {'id': 777, 'image_count': 14}, {'id': 778, 'image_count': 1}, {'id': 779, 'image_count': 8}, {'id': 780, 'image_count': 26}, {'id': 781, 'image_count': 339}, {'id': 782, 'image_count': 153}, {'id': 783, 'image_count': 2}, {'id': 784, 'image_count': 3}, {'id': 785, 'image_count': 8}, {'id': 786, 'image_count': 47}, {'id': 787, 'image_count': 8}, {'id': 788, 'image_count': 6}, {'id': 789, 'image_count': 116}, {'id': 790, 'image_count': 69}, {'id': 791, 'image_count': 13}, {'id': 792, 'image_count': 6}, {'id': 793, 'image_count': 1928}, {'id': 794, 'image_count': 79}, {'id': 795, 'image_count': 14}, {'id': 796, 'image_count': 7}, {'id': 797, 'image_count': 20}, {'id': 798, 'image_count': 114}, {'id': 799, 'image_count': 221}, {'id': 800, 'image_count': 502}, {'id': 801, 'image_count': 62}, {'id': 802, 'image_count': 87}, {'id': 803, 'image_count': 4}, {'id': 804, 'image_count': 1912}, {'id': 805, 'image_count': 7}, {'id': 806, 'image_count': 186}, {'id': 807, 'image_count': 18}, {'id': 808, 'image_count': 4}, {'id': 809, 'image_count': 3}, {'id': 810, 'image_count': 7}, {'id': 811, 'image_count': 1413}, {'id': 812, 'image_count': 7}, {'id': 813, 'image_count': 12}, {'id': 814, 'image_count': 248}, {'id': 815, 'image_count': 4}, {'id': 816, 'image_count': 1881}, {'id': 817, 'image_count': 529}, {'id': 818, 'image_count': 1932}, {'id': 819, 'image_count': 50}, {'id': 820, 'image_count': 3}, {'id': 821, 'image_count': 28}, {'id': 822, 'image_count': 10}, {'id': 823, 'image_count': 5}, {'id': 824, 'image_count': 5}, {'id': 825, 'image_count': 18}, {'id': 826, 'image_count': 14}, {'id': 827, 'image_count': 1890}, {'id': 828, 'image_count': 660}, {'id': 829, 'image_count': 8}, {'id': 830, 'image_count': 25}, {'id': 831, 'image_count': 10}, {'id': 832, 'image_count': 218}, {'id': 833, 'image_count': 36}, {'id': 834, 'image_count': 16}, {'id': 835, 'image_count': 808}, {'id': 836, 'image_count': 479}, {'id': 837, 'image_count': 1404}, {'id': 838, 'image_count': 307}, {'id': 839, 'image_count': 57}, {'id': 840, 'image_count': 28}, {'id': 841, 'image_count': 80}, {'id': 842, 'image_count': 11}, {'id': 843, 'image_count': 92}, {'id': 844, 'image_count': 20}, {'id': 845, 'image_count': 194}, {'id': 846, 'image_count': 23}, {'id': 847, 'image_count': 52}, {'id': 848, 'image_count': 673}, {'id': 849, 'image_count': 2}, {'id': 850, 'image_count': 2}, {'id': 851, 'image_count': 1}, {'id': 852, 'image_count': 2}, {'id': 853, 'image_count': 8}, {'id': 854, 'image_count': 80}, {'id': 855, 'image_count': 3}, {'id': 856, 'image_count': 3}, {'id': 857, 'image_count': 15}, {'id': 858, 'image_count': 2}, {'id': 859, 'image_count': 10}, {'id': 860, 'image_count': 386}, {'id': 861, 'image_count': 65}, {'id': 862, 'image_count': 3}, {'id': 863, 'image_count': 35}, {'id': 864, 'image_count': 5}, {'id': 865, 'image_count': 180}, {'id': 866, 'image_count': 99}, {'id': 867, 'image_count': 49}, {'id': 868, 'image_count': 28}, {'id': 869, 'image_count': 1}, {'id': 870, 'image_count': 52}, {'id': 871, 'image_count': 36}, {'id': 872, 'image_count': 70}, {'id': 873, 'image_count': 6}, {'id': 874, 'image_count': 29}, {'id': 875, 'image_count': 24}, {'id': 876, 'image_count': 1115}, {'id': 877, 'image_count': 61}, {'id': 878, 'image_count': 18}, {'id': 879, 'image_count': 18}, {'id': 880, 'image_count': 665}, {'id': 881, 'image_count': 1096}, {'id': 882, 'image_count': 29}, {'id': 883, 'image_count': 8}, {'id': 884, 'image_count': 14}, {'id': 885, 'image_count': 1622}, {'id': 886, 'image_count': 2}, {'id': 887, 'image_count': 3}, {'id': 888, 'image_count': 32}, {'id': 889, 'image_count': 55}, {'id': 890, 'image_count': 1}, {'id': 891, 'image_count': 10}, {'id': 892, 'image_count': 10}, {'id': 893, 'image_count': 47}, {'id': 894, 'image_count': 3}, {'id': 895, 'image_count': 29}, {'id': 896, 'image_count': 342}, {'id': 897, 'image_count': 25}, {'id': 898, 'image_count': 1469}, {'id': 899, 'image_count': 521}, {'id': 900, 'image_count': 347}, {'id': 901, 'image_count': 35}, {'id': 902, 'image_count': 7}, {'id': 903, 'image_count': 207}, {'id': 904, 'image_count': 108}, {'id': 905, 'image_count': 2}, {'id': 906, 'image_count': 34}, {'id': 907, 'image_count': 12}, {'id': 908, 'image_count': 10}, {'id': 909, 'image_count': 13}, {'id': 910, 'image_count': 361}, {'id': 911, 'image_count': 1023}, {'id': 912, 'image_count': 782}, {'id': 913, 'image_count': 2}, {'id': 914, 'image_count': 5}, {'id': 915, 'image_count': 247}, {'id': 916, 'image_count': 221}, {'id': 917, 'image_count': 4}, {'id': 918, 'image_count': 8}, {'id': 919, 'image_count': 158}, {'id': 920, 'image_count': 3}, {'id': 921, 'image_count': 752}, {'id': 922, 'image_count': 64}, {'id': 923, 'image_count': 707}, {'id': 924, 'image_count': 143}, {'id': 925, 'image_count': 1}, {'id': 926, 'image_count': 49}, {'id': 927, 'image_count': 126}, {'id': 928, 'image_count': 76}, {'id': 929, 'image_count': 11}, {'id': 930, 'image_count': 11}, {'id': 931, 'image_count': 4}, {'id': 932, 'image_count': 39}, {'id': 933, 'image_count': 11}, {'id': 934, 'image_count': 13}, {'id': 935, 'image_count': 91}, {'id': 936, 'image_count': 14}, {'id': 937, 'image_count': 5}, {'id': 938, 'image_count': 3}, {'id': 939, 'image_count': 10}, {'id': 940, 'image_count': 18}, {'id': 941, 'image_count': 9}, {'id': 942, 'image_count': 6}, {'id': 943, 'image_count': 951}, {'id': 944, 'image_count': 2}, {'id': 945, 'image_count': 1}, {'id': 946, 'image_count': 19}, {'id': 947, 'image_count': 1942}, {'id': 948, 'image_count': 1916}, {'id': 949, 'image_count': 139}, {'id': 950, 'image_count': 43}, {'id': 951, 'image_count': 1969}, {'id': 952, 'image_count': 5}, {'id': 953, 'image_count': 134}, {'id': 954, 'image_count': 74}, {'id': 955, 'image_count': 381}, {'id': 956, 'image_count': 1}, {'id': 957, 'image_count': 381}, {'id': 958, 'image_count': 6}, {'id': 959, 'image_count': 1826}, {'id': 960, 'image_count': 28}, {'id': 961, 'image_count': 1635}, {'id': 962, 'image_count': 1967}, {'id': 963, 'image_count': 16}, {'id': 964, 'image_count': 1926}, {'id': 965, 'image_count': 1789}, {'id': 966, 'image_count': 401}, {'id': 967, 'image_count': 1968}, {'id': 968, 'image_count': 1167}, {'id': 969, 'image_count': 1}, {'id': 970, 'image_count': 56}, {'id': 971, 'image_count': 17}, {'id': 972, 'image_count': 1}, {'id': 973, 'image_count': 58}, {'id': 974, 'image_count': 9}, {'id': 975, 'image_count': 8}, {'id': 976, 'image_count': 1124}, {'id': 977, 'image_count': 31}, {'id': 978, 'image_count': 16}, {'id': 979, 'image_count': 491}, {'id': 980, 'image_count': 432}, {'id': 981, 'image_count': 1945}, {'id': 982, 'image_count': 1899}, {'id': 983, 'image_count': 5}, {'id': 984, 'image_count': 28}, {'id': 985, 'image_count': 7}, {'id': 986, 'image_count': 146}, {'id': 987, 'image_count': 1}, {'id': 988, 'image_count': 25}, {'id': 989, 'image_count': 22}, {'id': 990, 'image_count': 1}, {'id': 991, 'image_count': 10}, {'id': 992, 'image_count': 9}, {'id': 993, 'image_count': 308}, {'id': 994, 'image_count': 4}, {'id': 995, 'image_count': 1969}, {'id': 996, 'image_count': 45}, {'id': 997, 'image_count': 12}, {'id': 998, 'image_count': 1}, {'id': 999, 'image_count': 85}, {'id': 1000, 'image_count': 1127}, {'id': 1001, 'image_count': 11}, {'id': 1002, 'image_count': 60}, {'id': 1003, 'image_count': 1}, {'id': 1004, 'image_count': 16}, {'id': 1005, 'image_count': 1}, {'id': 1006, 'image_count': 65}, {'id': 1007, 'image_count': 13}, {'id': 1008, 'image_count': 655}, {'id': 1009, 'image_count': 51}, {'id': 1010, 'image_count': 1}, {'id': 1011, 'image_count': 673}, {'id': 1012, 'image_count': 5}, {'id': 1013, 'image_count': 36}, {'id': 1014, 'image_count': 54}, {'id': 1015, 'image_count': 5}, {'id': 1016, 'image_count': 8}, {'id': 1017, 'image_count': 305}, {'id': 1018, 'image_count': 297}, {'id': 1019, 'image_count': 1053}, {'id': 1020, 'image_count': 223}, {'id': 1021, 'image_count': 1037}, {'id': 1022, 'image_count': 63}, {'id': 1023, 'image_count': 1881}, {'id': 1024, 'image_count': 507}, {'id': 1025, 'image_count': 333}, {'id': 1026, 'image_count': 1911}, {'id': 1027, 'image_count': 1765}, {'id': 1028, 'image_count': 1}, {'id': 1029, 'image_count': 5}, {'id': 1030, 'image_count': 1}, {'id': 1031, 'image_count': 9}, {'id': 1032, 'image_count': 2}, {'id': 1033, 'image_count': 151}, {'id': 1034, 'image_count': 82}, {'id': 1035, 'image_count': 1931}, {'id': 1036, 'image_count': 41}, {'id': 1037, 'image_count': 1895}, {'id': 1038, 'image_count': 24}, {'id': 1039, 'image_count': 22}, {'id': 1040, 'image_count': 35}, {'id': 1041, 'image_count': 69}, {'id': 1042, 'image_count': 962}, {'id': 1043, 'image_count': 588}, {'id': 1044, 'image_count': 21}, {'id': 1045, 'image_count': 825}, {'id': 1046, 'image_count': 52}, {'id': 1047, 'image_count': 5}, {'id': 1048, 'image_count': 5}, {'id': 1049, 'image_count': 5}, {'id': 1050, 'image_count': 1860}, {'id': 1051, 'image_count': 56}, {'id': 1052, 'image_count': 1582}, {'id': 1053, 'image_count': 7}, {'id': 1054, 'image_count': 2}, {'id': 1055, 'image_count': 1562}, {'id': 1056, 'image_count': 1885}, {'id': 1057, 'image_count': 1}, {'id': 1058, 'image_count': 5}, {'id': 1059, 'image_count': 137}, {'id': 1060, 'image_count': 1094}, {'id': 1061, 'image_count': 134}, {'id': 1062, 'image_count': 29}, {'id': 1063, 'image_count': 22}, {'id': 1064, 'image_count': 522}, {'id': 1065, 'image_count': 50}, {'id': 1066, 'image_count': 68}, {'id': 1067, 'image_count': 16}, {'id': 1068, 'image_count': 40}, {'id': 1069, 'image_count': 35}, {'id': 1070, 'image_count': 135}, {'id': 1071, 'image_count': 1413}, {'id': 1072, 'image_count': 772}, {'id': 1073, 'image_count': 50}, {'id': 1074, 'image_count': 1015}, {'id': 1075, 'image_count': 1}, {'id': 1076, 'image_count': 65}, {'id': 1077, 'image_count': 1900}, {'id': 1078, 'image_count': 1302}, {'id': 1079, 'image_count': 1977}, {'id': 1080, 'image_count': 2}, {'id': 1081, 'image_count': 29}, {'id': 1082, 'image_count': 36}, {'id': 1083, 'image_count': 138}, {'id': 1084, 'image_count': 4}, {'id': 1085, 'image_count': 67}, {'id': 1086, 'image_count': 26}, {'id': 1087, 'image_count': 25}, {'id': 1088, 'image_count': 33}, {'id': 1089, 'image_count': 37}, {'id': 1090, 'image_count': 50}, {'id': 1091, 'image_count': 270}, {'id': 1092, 'image_count': 12}, {'id': 1093, 'image_count': 316}, {'id': 1094, 'image_count': 41}, {'id': 1095, 'image_count': 224}, {'id': 1096, 'image_count': 105}, {'id': 1097, 'image_count': 1925}, {'id': 1098, 'image_count': 1021}, {'id': 1099, 'image_count': 1213}, {'id': 1100, 'image_count': 172}, {'id': 1101, 'image_count': 28}, {'id': 1102, 'image_count': 745}, {'id': 1103, 'image_count': 187}, {'id': 1104, 'image_count': 147}, {'id': 1105, 'image_count': 136}, {'id': 1106, 'image_count': 34}, {'id': 1107, 'image_count': 41}, {'id': 1108, 'image_count': 636}, {'id': 1109, 'image_count': 570}, {'id': 1110, 'image_count': 1149}, {'id': 1111, 'image_count': 61}, {'id': 1112, 'image_count': 1890}, {'id': 1113, 'image_count': 18}, {'id': 1114, 'image_count': 143}, {'id': 1115, 'image_count': 1517}, {'id': 1116, 'image_count': 7}, {'id': 1117, 'image_count': 943}, {'id': 1118, 'image_count': 6}, {'id': 1119, 'image_count': 1}, {'id': 1120, 'image_count': 11}, {'id': 1121, 'image_count': 101}, {'id': 1122, 'image_count': 1909}, {'id': 1123, 'image_count': 800}, {'id': 1124, 'image_count': 1}, {'id': 1125, 'image_count': 44}, {'id': 1126, 'image_count': 3}, {'id': 1127, 'image_count': 44}, {'id': 1128, 'image_count': 31}, {'id': 1129, 'image_count': 7}, {'id': 1130, 'image_count': 20}, {'id': 1131, 'image_count': 11}, {'id': 1132, 'image_count': 13}, {'id': 1133, 'image_count': 1924}, {'id': 1134, 'image_count': 113}, {'id': 1135, 'image_count': 2}, {'id': 1136, 'image_count': 139}, {'id': 1137, 'image_count': 12}, {'id': 1138, 'image_count': 37}, {'id': 1139, 'image_count': 1866}, {'id': 1140, 'image_count': 47}, {'id': 1141, 'image_count': 1468}, {'id': 1142, 'image_count': 729}, {'id': 1143, 'image_count': 24}, {'id': 1144, 'image_count': 1}, {'id': 1145, 'image_count': 10}, {'id': 1146, 'image_count': 3}, {'id': 1147, 'image_count': 14}, {'id': 1148, 'image_count': 4}, {'id': 1149, 'image_count': 29}, {'id': 1150, 'image_count': 4}, {'id': 1151, 'image_count': 70}, {'id': 1152, 'image_count': 46}, {'id': 1153, 'image_count': 14}, {'id': 1154, 'image_count': 48}, {'id': 1155, 'image_count': 1855}, {'id': 1156, 'image_count': 113}, {'id': 1157, 'image_count': 1}, {'id': 1158, 'image_count': 1}, {'id': 1159, 'image_count': 10}, {'id': 1160, 'image_count': 54}, {'id': 1161, 'image_count': 1923}, {'id': 1162, 'image_count': 630}, {'id': 1163, 'image_count': 31}, {'id': 1164, 'image_count': 69}, {'id': 1165, 'image_count': 7}, {'id': 1166, 'image_count': 11}, {'id': 1167, 'image_count': 1}, {'id': 1168, 'image_count': 30}, {'id': 1169, 'image_count': 50}, {'id': 1170, 'image_count': 45}, {'id': 1171, 'image_count': 28}, {'id': 1172, 'image_count': 114}, {'id': 1173, 'image_count': 193}, {'id': 1174, 'image_count': 21}, {'id': 1175, 'image_count': 91}, {'id': 1176, 'image_count': 31}, {'id': 1177, 'image_count': 1469}, {'id': 1178, 'image_count': 1924}, {'id': 1179, 'image_count': 87}, {'id': 1180, 'image_count': 77}, {'id': 1181, 'image_count': 11}, {'id': 1182, 'image_count': 47}, {'id': 1183, 'image_count': 21}, {'id': 1184, 'image_count': 47}, {'id': 1185, 'image_count': 70}, {'id': 1186, 'image_count': 1838}, {'id': 1187, 'image_count': 19}, {'id': 1188, 'image_count': 531}, {'id': 1189, 'image_count': 11}, {'id': 1190, 'image_count': 941}, {'id': 1191, 'image_count': 113}, {'id': 1192, 'image_count': 26}, {'id': 1193, 'image_count': 5}, {'id': 1194, 'image_count': 56}, {'id': 1195, 'image_count': 73}, {'id': 1196, 'image_count': 32}, {'id': 1197, 'image_count': 128}, {'id': 1198, 'image_count': 623}, {'id': 1199, 'image_count': 12}, {'id': 1200, 'image_count': 52}, {'id': 1201, 'image_count': 11}, {'id': 1202, 'image_count': 1674}, {'id': 1203, 'image_count': 81}] # noqa
20
+ # fmt: on
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/pascal_voc.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ import numpy as np
5
+ import os
6
+ import xml.etree.ElementTree as ET
7
+ from typing import List, Tuple, Union
8
+
9
+ from annotator.oneformer.detectron2.data import DatasetCatalog, MetadataCatalog
10
+ from annotator.oneformer.detectron2.structures import BoxMode
11
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
12
+
13
+ __all__ = ["load_voc_instances", "register_pascal_voc"]
14
+
15
+
16
+ # fmt: off
17
+ CLASS_NAMES = (
18
+ "aeroplane", "bicycle", "bird", "boat", "bottle", "bus", "car", "cat",
19
+ "chair", "cow", "diningtable", "dog", "horse", "motorbike", "person",
20
+ "pottedplant", "sheep", "sofa", "train", "tvmonitor"
21
+ )
22
+ # fmt: on
23
+
24
+
25
+ def load_voc_instances(dirname: str, split: str, class_names: Union[List[str], Tuple[str, ...]]):
26
+ """
27
+ Load Pascal VOC detection annotations to Detectron2 format.
28
+
29
+ Args:
30
+ dirname: Contain "Annotations", "ImageSets", "JPEGImages"
31
+ split (str): one of "train", "test", "val", "trainval"
32
+ class_names: list or tuple of class names
33
+ """
34
+ with PathManager.open(os.path.join(dirname, "ImageSets", "Main", split + ".txt")) as f:
35
+ fileids = np.loadtxt(f, dtype=np.str)
36
+
37
+ # Needs to read many small annotation files. Makes sense at local
38
+ annotation_dirname = PathManager.get_local_path(os.path.join(dirname, "Annotations/"))
39
+ dicts = []
40
+ for fileid in fileids:
41
+ anno_file = os.path.join(annotation_dirname, fileid + ".xml")
42
+ jpeg_file = os.path.join(dirname, "JPEGImages", fileid + ".jpg")
43
+
44
+ with PathManager.open(anno_file) as f:
45
+ tree = ET.parse(f)
46
+
47
+ r = {
48
+ "file_name": jpeg_file,
49
+ "image_id": fileid,
50
+ "height": int(tree.findall("./size/height")[0].text),
51
+ "width": int(tree.findall("./size/width")[0].text),
52
+ }
53
+ instances = []
54
+
55
+ for obj in tree.findall("object"):
56
+ cls = obj.find("name").text
57
+ # We include "difficult" samples in training.
58
+ # Based on limited experiments, they don't hurt accuracy.
59
+ # difficult = int(obj.find("difficult").text)
60
+ # if difficult == 1:
61
+ # continue
62
+ bbox = obj.find("bndbox")
63
+ bbox = [float(bbox.find(x).text) for x in ["xmin", "ymin", "xmax", "ymax"]]
64
+ # Original annotations are integers in the range [1, W or H]
65
+ # Assuming they mean 1-based pixel indices (inclusive),
66
+ # a box with annotation (xmin=1, xmax=W) covers the whole image.
67
+ # In coordinate space this is represented by (xmin=0, xmax=W)
68
+ bbox[0] -= 1.0
69
+ bbox[1] -= 1.0
70
+ instances.append(
71
+ {"category_id": class_names.index(cls), "bbox": bbox, "bbox_mode": BoxMode.XYXY_ABS}
72
+ )
73
+ r["annotations"] = instances
74
+ dicts.append(r)
75
+ return dicts
76
+
77
+
78
+ def register_pascal_voc(name, dirname, split, year, class_names=CLASS_NAMES):
79
+ DatasetCatalog.register(name, lambda: load_voc_instances(dirname, split, class_names))
80
+ MetadataCatalog.get(name).set(
81
+ thing_classes=list(class_names), dirname=dirname, year=year, split=split
82
+ )
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/datasets/register_coco.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .coco import register_coco_instances # noqa
3
+ from .coco_panoptic import register_coco_panoptic_separated # noqa
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/detection_utils.py ADDED
@@ -0,0 +1,659 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ # Copyright (c) Facebook, Inc. and its affiliates.
3
+
4
+ """
5
+ Common data processing utilities that are used in a
6
+ typical object detection data pipeline.
7
+ """
8
+ import logging
9
+ import numpy as np
10
+ from typing import List, Union
11
+ import annotator.oneformer.pycocotools.mask as mask_util
12
+ import torch
13
+ from PIL import Image
14
+
15
+ from annotator.oneformer.detectron2.structures import (
16
+ BitMasks,
17
+ Boxes,
18
+ BoxMode,
19
+ Instances,
20
+ Keypoints,
21
+ PolygonMasks,
22
+ RotatedBoxes,
23
+ polygons_to_bitmask,
24
+ )
25
+ from annotator.oneformer.detectron2.utils.file_io import PathManager
26
+
27
+ from . import transforms as T
28
+ from .catalog import MetadataCatalog
29
+
30
+ __all__ = [
31
+ "SizeMismatchError",
32
+ "convert_image_to_rgb",
33
+ "check_image_size",
34
+ "transform_proposals",
35
+ "transform_instance_annotations",
36
+ "annotations_to_instances",
37
+ "annotations_to_instances_rotated",
38
+ "build_augmentation",
39
+ "build_transform_gen",
40
+ "create_keypoint_hflip_indices",
41
+ "filter_empty_instances",
42
+ "read_image",
43
+ ]
44
+
45
+
46
+ class SizeMismatchError(ValueError):
47
+ """
48
+ When loaded image has difference width/height compared with annotation.
49
+ """
50
+
51
+
52
+ # https://en.wikipedia.org/wiki/YUV#SDTV_with_BT.601
53
+ _M_RGB2YUV = [[0.299, 0.587, 0.114], [-0.14713, -0.28886, 0.436], [0.615, -0.51499, -0.10001]]
54
+ _M_YUV2RGB = [[1.0, 0.0, 1.13983], [1.0, -0.39465, -0.58060], [1.0, 2.03211, 0.0]]
55
+
56
+ # https://www.exiv2.org/tags.html
57
+ _EXIF_ORIENT = 274 # exif 'Orientation' tag
58
+
59
+
60
+ def convert_PIL_to_numpy(image, format):
61
+ """
62
+ Convert PIL image to numpy array of target format.
63
+
64
+ Args:
65
+ image (PIL.Image): a PIL image
66
+ format (str): the format of output image
67
+
68
+ Returns:
69
+ (np.ndarray): also see `read_image`
70
+ """
71
+ if format is not None:
72
+ # PIL only supports RGB, so convert to RGB and flip channels over below
73
+ conversion_format = format
74
+ if format in ["BGR", "YUV-BT.601"]:
75
+ conversion_format = "RGB"
76
+ image = image.convert(conversion_format)
77
+ image = np.asarray(image)
78
+ # PIL squeezes out the channel dimension for "L", so make it HWC
79
+ if format == "L":
80
+ image = np.expand_dims(image, -1)
81
+
82
+ # handle formats not supported by PIL
83
+ elif format == "BGR":
84
+ # flip channels if needed
85
+ image = image[:, :, ::-1]
86
+ elif format == "YUV-BT.601":
87
+ image = image / 255.0
88
+ image = np.dot(image, np.array(_M_RGB2YUV).T)
89
+
90
+ return image
91
+
92
+
93
+ def convert_image_to_rgb(image, format):
94
+ """
95
+ Convert an image from given format to RGB.
96
+
97
+ Args:
98
+ image (np.ndarray or Tensor): an HWC image
99
+ format (str): the format of input image, also see `read_image`
100
+
101
+ Returns:
102
+ (np.ndarray): (H,W,3) RGB image in 0-255 range, can be either float or uint8
103
+ """
104
+ if isinstance(image, torch.Tensor):
105
+ image = image.cpu().numpy()
106
+ if format == "BGR":
107
+ image = image[:, :, [2, 1, 0]]
108
+ elif format == "YUV-BT.601":
109
+ image = np.dot(image, np.array(_M_YUV2RGB).T)
110
+ image = image * 255.0
111
+ else:
112
+ if format == "L":
113
+ image = image[:, :, 0]
114
+ image = image.astype(np.uint8)
115
+ image = np.asarray(Image.fromarray(image, mode=format).convert("RGB"))
116
+ return image
117
+
118
+
119
+ def _apply_exif_orientation(image):
120
+ """
121
+ Applies the exif orientation correctly.
122
+
123
+ This code exists per the bug:
124
+ https://github.com/python-pillow/Pillow/issues/3973
125
+ with the function `ImageOps.exif_transpose`. The Pillow source raises errors with
126
+ various methods, especially `tobytes`
127
+
128
+ Function based on:
129
+ https://github.com/wkentaro/labelme/blob/v4.5.4/labelme/utils/image.py#L59
130
+ https://github.com/python-pillow/Pillow/blob/7.1.2/src/PIL/ImageOps.py#L527
131
+
132
+ Args:
133
+ image (PIL.Image): a PIL image
134
+
135
+ Returns:
136
+ (PIL.Image): the PIL image with exif orientation applied, if applicable
137
+ """
138
+ if not hasattr(image, "getexif"):
139
+ return image
140
+
141
+ try:
142
+ exif = image.getexif()
143
+ except Exception: # https://github.com/facebookresearch/detectron2/issues/1885
144
+ exif = None
145
+
146
+ if exif is None:
147
+ return image
148
+
149
+ orientation = exif.get(_EXIF_ORIENT)
150
+
151
+ method = {
152
+ 2: Image.FLIP_LEFT_RIGHT,
153
+ 3: Image.ROTATE_180,
154
+ 4: Image.FLIP_TOP_BOTTOM,
155
+ 5: Image.TRANSPOSE,
156
+ 6: Image.ROTATE_270,
157
+ 7: Image.TRANSVERSE,
158
+ 8: Image.ROTATE_90,
159
+ }.get(orientation)
160
+
161
+ if method is not None:
162
+ return image.transpose(method)
163
+ return image
164
+
165
+
166
+ def read_image(file_name, format=None):
167
+ """
168
+ Read an image into the given format.
169
+ Will apply rotation and flipping if the image has such exif information.
170
+
171
+ Args:
172
+ file_name (str): image file path
173
+ format (str): one of the supported image modes in PIL, or "BGR" or "YUV-BT.601".
174
+
175
+ Returns:
176
+ image (np.ndarray):
177
+ an HWC image in the given format, which is 0-255, uint8 for
178
+ supported image modes in PIL or "BGR"; float (0-1 for Y) for YUV-BT.601.
179
+ """
180
+ with PathManager.open(file_name, "rb") as f:
181
+ image = Image.open(f)
182
+
183
+ # work around this bug: https://github.com/python-pillow/Pillow/issues/3973
184
+ image = _apply_exif_orientation(image)
185
+ return convert_PIL_to_numpy(image, format)
186
+
187
+
188
+ def check_image_size(dataset_dict, image):
189
+ """
190
+ Raise an error if the image does not match the size specified in the dict.
191
+ """
192
+ if "width" in dataset_dict or "height" in dataset_dict:
193
+ image_wh = (image.shape[1], image.shape[0])
194
+ expected_wh = (dataset_dict["width"], dataset_dict["height"])
195
+ if not image_wh == expected_wh:
196
+ raise SizeMismatchError(
197
+ "Mismatched image shape{}, got {}, expect {}.".format(
198
+ " for image " + dataset_dict["file_name"]
199
+ if "file_name" in dataset_dict
200
+ else "",
201
+ image_wh,
202
+ expected_wh,
203
+ )
204
+ + " Please check the width/height in your annotation."
205
+ )
206
+
207
+ # To ensure bbox always remap to original image size
208
+ if "width" not in dataset_dict:
209
+ dataset_dict["width"] = image.shape[1]
210
+ if "height" not in dataset_dict:
211
+ dataset_dict["height"] = image.shape[0]
212
+
213
+
214
+ def transform_proposals(dataset_dict, image_shape, transforms, *, proposal_topk, min_box_size=0):
215
+ """
216
+ Apply transformations to the proposals in dataset_dict, if any.
217
+
218
+ Args:
219
+ dataset_dict (dict): a dict read from the dataset, possibly
220
+ contains fields "proposal_boxes", "proposal_objectness_logits", "proposal_bbox_mode"
221
+ image_shape (tuple): height, width
222
+ transforms (TransformList):
223
+ proposal_topk (int): only keep top-K scoring proposals
224
+ min_box_size (int): proposals with either side smaller than this
225
+ threshold are removed
226
+
227
+ The input dict is modified in-place, with abovementioned keys removed. A new
228
+ key "proposals" will be added. Its value is an `Instances`
229
+ object which contains the transformed proposals in its field
230
+ "proposal_boxes" and "objectness_logits".
231
+ """
232
+ if "proposal_boxes" in dataset_dict:
233
+ # Transform proposal boxes
234
+ boxes = transforms.apply_box(
235
+ BoxMode.convert(
236
+ dataset_dict.pop("proposal_boxes"),
237
+ dataset_dict.pop("proposal_bbox_mode"),
238
+ BoxMode.XYXY_ABS,
239
+ )
240
+ )
241
+ boxes = Boxes(boxes)
242
+ objectness_logits = torch.as_tensor(
243
+ dataset_dict.pop("proposal_objectness_logits").astype("float32")
244
+ )
245
+
246
+ boxes.clip(image_shape)
247
+ keep = boxes.nonempty(threshold=min_box_size)
248
+ boxes = boxes[keep]
249
+ objectness_logits = objectness_logits[keep]
250
+
251
+ proposals = Instances(image_shape)
252
+ proposals.proposal_boxes = boxes[:proposal_topk]
253
+ proposals.objectness_logits = objectness_logits[:proposal_topk]
254
+ dataset_dict["proposals"] = proposals
255
+
256
+
257
+ def get_bbox(annotation):
258
+ """
259
+ Get bbox from data
260
+ Args:
261
+ annotation (dict): dict of instance annotations for a single instance.
262
+ Returns:
263
+ bbox (ndarray): x1, y1, x2, y2 coordinates
264
+ """
265
+ # bbox is 1d (per-instance bounding box)
266
+ bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
267
+ return bbox
268
+
269
+
270
+ def transform_instance_annotations(
271
+ annotation, transforms, image_size, *, keypoint_hflip_indices=None
272
+ ):
273
+ """
274
+ Apply transforms to box, segmentation and keypoints annotations of a single instance.
275
+
276
+ It will use `transforms.apply_box` for the box, and
277
+ `transforms.apply_coords` for segmentation polygons & keypoints.
278
+ If you need anything more specially designed for each data structure,
279
+ you'll need to implement your own version of this function or the transforms.
280
+
281
+ Args:
282
+ annotation (dict): dict of instance annotations for a single instance.
283
+ It will be modified in-place.
284
+ transforms (TransformList or list[Transform]):
285
+ image_size (tuple): the height, width of the transformed image
286
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
287
+
288
+ Returns:
289
+ dict:
290
+ the same input dict with fields "bbox", "segmentation", "keypoints"
291
+ transformed according to `transforms`.
292
+ The "bbox_mode" field will be set to XYXY_ABS.
293
+ """
294
+ if isinstance(transforms, (tuple, list)):
295
+ transforms = T.TransformList(transforms)
296
+ # bbox is 1d (per-instance bounding box)
297
+ bbox = BoxMode.convert(annotation["bbox"], annotation["bbox_mode"], BoxMode.XYXY_ABS)
298
+ # clip transformed bbox to image size
299
+ bbox = transforms.apply_box(np.array([bbox]))[0].clip(min=0)
300
+ annotation["bbox"] = np.minimum(bbox, list(image_size + image_size)[::-1])
301
+ annotation["bbox_mode"] = BoxMode.XYXY_ABS
302
+
303
+ if "segmentation" in annotation:
304
+ # each instance contains 1 or more polygons
305
+ segm = annotation["segmentation"]
306
+ if isinstance(segm, list):
307
+ # polygons
308
+ polygons = [np.asarray(p).reshape(-1, 2) for p in segm]
309
+ annotation["segmentation"] = [
310
+ p.reshape(-1) for p in transforms.apply_polygons(polygons)
311
+ ]
312
+ elif isinstance(segm, dict):
313
+ # RLE
314
+ mask = mask_util.decode(segm)
315
+ mask = transforms.apply_segmentation(mask)
316
+ assert tuple(mask.shape[:2]) == image_size
317
+ annotation["segmentation"] = mask
318
+ else:
319
+ raise ValueError(
320
+ "Cannot transform segmentation of type '{}'!"
321
+ "Supported types are: polygons as list[list[float] or ndarray],"
322
+ " COCO-style RLE as a dict.".format(type(segm))
323
+ )
324
+
325
+ if "keypoints" in annotation:
326
+ keypoints = transform_keypoint_annotations(
327
+ annotation["keypoints"], transforms, image_size, keypoint_hflip_indices
328
+ )
329
+ annotation["keypoints"] = keypoints
330
+
331
+ return annotation
332
+
333
+
334
+ def transform_keypoint_annotations(keypoints, transforms, image_size, keypoint_hflip_indices=None):
335
+ """
336
+ Transform keypoint annotations of an image.
337
+ If a keypoint is transformed out of image boundary, it will be marked "unlabeled" (visibility=0)
338
+
339
+ Args:
340
+ keypoints (list[float]): Nx3 float in Detectron2's Dataset format.
341
+ Each point is represented by (x, y, visibility).
342
+ transforms (TransformList):
343
+ image_size (tuple): the height, width of the transformed image
344
+ keypoint_hflip_indices (ndarray[int]): see `create_keypoint_hflip_indices`.
345
+ When `transforms` includes horizontal flip, will use the index
346
+ mapping to flip keypoints.
347
+ """
348
+ # (N*3,) -> (N, 3)
349
+ keypoints = np.asarray(keypoints, dtype="float64").reshape(-1, 3)
350
+ keypoints_xy = transforms.apply_coords(keypoints[:, :2])
351
+
352
+ # Set all out-of-boundary points to "unlabeled"
353
+ inside = (keypoints_xy >= np.array([0, 0])) & (keypoints_xy <= np.array(image_size[::-1]))
354
+ inside = inside.all(axis=1)
355
+ keypoints[:, :2] = keypoints_xy
356
+ keypoints[:, 2][~inside] = 0
357
+
358
+ # This assumes that HorizFlipTransform is the only one that does flip
359
+ do_hflip = sum(isinstance(t, T.HFlipTransform) for t in transforms.transforms) % 2 == 1
360
+
361
+ # Alternative way: check if probe points was horizontally flipped.
362
+ # probe = np.asarray([[0.0, 0.0], [image_width, 0.0]])
363
+ # probe_aug = transforms.apply_coords(probe.copy())
364
+ # do_hflip = np.sign(probe[1][0] - probe[0][0]) != np.sign(probe_aug[1][0] - probe_aug[0][0]) # noqa
365
+
366
+ # If flipped, swap each keypoint with its opposite-handed equivalent
367
+ if do_hflip:
368
+ if keypoint_hflip_indices is None:
369
+ raise ValueError("Cannot flip keypoints without providing flip indices!")
370
+ if len(keypoints) != len(keypoint_hflip_indices):
371
+ raise ValueError(
372
+ "Keypoint data has {} points, but metadata "
373
+ "contains {} points!".format(len(keypoints), len(keypoint_hflip_indices))
374
+ )
375
+ keypoints = keypoints[np.asarray(keypoint_hflip_indices, dtype=np.int32), :]
376
+
377
+ # Maintain COCO convention that if visibility == 0 (unlabeled), then x, y = 0
378
+ keypoints[keypoints[:, 2] == 0] = 0
379
+ return keypoints
380
+
381
+
382
+ def annotations_to_instances(annos, image_size, mask_format="polygon"):
383
+ """
384
+ Create an :class:`Instances` object used by the models,
385
+ from instance annotations in the dataset dict.
386
+
387
+ Args:
388
+ annos (list[dict]): a list of instance annotations in one image, each
389
+ element for one instance.
390
+ image_size (tuple): height, width
391
+
392
+ Returns:
393
+ Instances:
394
+ It will contain fields "gt_boxes", "gt_classes",
395
+ "gt_masks", "gt_keypoints", if they can be obtained from `annos`.
396
+ This is the format that builtin models expect.
397
+ """
398
+ boxes = (
399
+ np.stack(
400
+ [BoxMode.convert(obj["bbox"], obj["bbox_mode"], BoxMode.XYXY_ABS) for obj in annos]
401
+ )
402
+ if len(annos)
403
+ else np.zeros((0, 4))
404
+ )
405
+ target = Instances(image_size)
406
+ target.gt_boxes = Boxes(boxes)
407
+
408
+ classes = [int(obj["category_id"]) for obj in annos]
409
+ classes = torch.tensor(classes, dtype=torch.int64)
410
+ target.gt_classes = classes
411
+
412
+ if len(annos) and "segmentation" in annos[0]:
413
+ segms = [obj["segmentation"] for obj in annos]
414
+ if mask_format == "polygon":
415
+ try:
416
+ masks = PolygonMasks(segms)
417
+ except ValueError as e:
418
+ raise ValueError(
419
+ "Failed to use mask_format=='polygon' from the given annotations!"
420
+ ) from e
421
+ else:
422
+ assert mask_format == "bitmask", mask_format
423
+ masks = []
424
+ for segm in segms:
425
+ if isinstance(segm, list):
426
+ # polygon
427
+ masks.append(polygons_to_bitmask(segm, *image_size))
428
+ elif isinstance(segm, dict):
429
+ # COCO RLE
430
+ masks.append(mask_util.decode(segm))
431
+ elif isinstance(segm, np.ndarray):
432
+ assert segm.ndim == 2, "Expect segmentation of 2 dimensions, got {}.".format(
433
+ segm.ndim
434
+ )
435
+ # mask array
436
+ masks.append(segm)
437
+ else:
438
+ raise ValueError(
439
+ "Cannot convert segmentation of type '{}' to BitMasks!"
440
+ "Supported types are: polygons as list[list[float] or ndarray],"
441
+ " COCO-style RLE as a dict, or a binary segmentation mask "
442
+ " in a 2D numpy array of shape HxW.".format(type(segm))
443
+ )
444
+ # torch.from_numpy does not support array with negative stride.
445
+ masks = BitMasks(
446
+ torch.stack([torch.from_numpy(np.ascontiguousarray(x)) for x in masks])
447
+ )
448
+ target.gt_masks = masks
449
+
450
+ if len(annos) and "keypoints" in annos[0]:
451
+ kpts = [obj.get("keypoints", []) for obj in annos]
452
+ target.gt_keypoints = Keypoints(kpts)
453
+
454
+ return target
455
+
456
+
457
+ def annotations_to_instances_rotated(annos, image_size):
458
+ """
459
+ Create an :class:`Instances` object used by the models,
460
+ from instance annotations in the dataset dict.
461
+ Compared to `annotations_to_instances`, this function is for rotated boxes only
462
+
463
+ Args:
464
+ annos (list[dict]): a list of instance annotations in one image, each
465
+ element for one instance.
466
+ image_size (tuple): height, width
467
+
468
+ Returns:
469
+ Instances:
470
+ Containing fields "gt_boxes", "gt_classes",
471
+ if they can be obtained from `annos`.
472
+ This is the format that builtin models expect.
473
+ """
474
+ boxes = [obj["bbox"] for obj in annos]
475
+ target = Instances(image_size)
476
+ boxes = target.gt_boxes = RotatedBoxes(boxes)
477
+ boxes.clip(image_size)
478
+
479
+ classes = [obj["category_id"] for obj in annos]
480
+ classes = torch.tensor(classes, dtype=torch.int64)
481
+ target.gt_classes = classes
482
+
483
+ return target
484
+
485
+
486
+ def filter_empty_instances(
487
+ instances, by_box=True, by_mask=True, box_threshold=1e-5, return_mask=False
488
+ ):
489
+ """
490
+ Filter out empty instances in an `Instances` object.
491
+
492
+ Args:
493
+ instances (Instances):
494
+ by_box (bool): whether to filter out instances with empty boxes
495
+ by_mask (bool): whether to filter out instances with empty masks
496
+ box_threshold (float): minimum width and height to be considered non-empty
497
+ return_mask (bool): whether to return boolean mask of filtered instances
498
+
499
+ Returns:
500
+ Instances: the filtered instances.
501
+ tensor[bool], optional: boolean mask of filtered instances
502
+ """
503
+ assert by_box or by_mask
504
+ r = []
505
+ if by_box:
506
+ r.append(instances.gt_boxes.nonempty(threshold=box_threshold))
507
+ if instances.has("gt_masks") and by_mask:
508
+ r.append(instances.gt_masks.nonempty())
509
+
510
+ # TODO: can also filter visible keypoints
511
+
512
+ if not r:
513
+ return instances
514
+ m = r[0]
515
+ for x in r[1:]:
516
+ m = m & x
517
+ if return_mask:
518
+ return instances[m], m
519
+ return instances[m]
520
+
521
+
522
+ def create_keypoint_hflip_indices(dataset_names: Union[str, List[str]]) -> List[int]:
523
+ """
524
+ Args:
525
+ dataset_names: list of dataset names
526
+
527
+ Returns:
528
+ list[int]: a list of size=#keypoints, storing the
529
+ horizontally-flipped keypoint indices.
530
+ """
531
+ if isinstance(dataset_names, str):
532
+ dataset_names = [dataset_names]
533
+
534
+ check_metadata_consistency("keypoint_names", dataset_names)
535
+ check_metadata_consistency("keypoint_flip_map", dataset_names)
536
+
537
+ meta = MetadataCatalog.get(dataset_names[0])
538
+ names = meta.keypoint_names
539
+ # TODO flip -> hflip
540
+ flip_map = dict(meta.keypoint_flip_map)
541
+ flip_map.update({v: k for k, v in flip_map.items()})
542
+ flipped_names = [i if i not in flip_map else flip_map[i] for i in names]
543
+ flip_indices = [names.index(i) for i in flipped_names]
544
+ return flip_indices
545
+
546
+
547
+ def get_fed_loss_cls_weights(dataset_names: Union[str, List[str]], freq_weight_power=1.0):
548
+ """
549
+ Get frequency weight for each class sorted by class id.
550
+ We now calcualte freqency weight using image_count to the power freq_weight_power.
551
+
552
+ Args:
553
+ dataset_names: list of dataset names
554
+ freq_weight_power: power value
555
+ """
556
+ if isinstance(dataset_names, str):
557
+ dataset_names = [dataset_names]
558
+
559
+ check_metadata_consistency("class_image_count", dataset_names)
560
+
561
+ meta = MetadataCatalog.get(dataset_names[0])
562
+ class_freq_meta = meta.class_image_count
563
+ class_freq = torch.tensor(
564
+ [c["image_count"] for c in sorted(class_freq_meta, key=lambda x: x["id"])]
565
+ )
566
+ class_freq_weight = class_freq.float() ** freq_weight_power
567
+ return class_freq_weight
568
+
569
+
570
+ def gen_crop_transform_with_instance(crop_size, image_size, instance):
571
+ """
572
+ Generate a CropTransform so that the cropping region contains
573
+ the center of the given instance.
574
+
575
+ Args:
576
+ crop_size (tuple): h, w in pixels
577
+ image_size (tuple): h, w
578
+ instance (dict): an annotation dict of one instance, in Detectron2's
579
+ dataset format.
580
+ """
581
+ crop_size = np.asarray(crop_size, dtype=np.int32)
582
+ bbox = BoxMode.convert(instance["bbox"], instance["bbox_mode"], BoxMode.XYXY_ABS)
583
+ center_yx = (bbox[1] + bbox[3]) * 0.5, (bbox[0] + bbox[2]) * 0.5
584
+ assert (
585
+ image_size[0] >= center_yx[0] and image_size[1] >= center_yx[1]
586
+ ), "The annotation bounding box is outside of the image!"
587
+ assert (
588
+ image_size[0] >= crop_size[0] and image_size[1] >= crop_size[1]
589
+ ), "Crop size is larger than image size!"
590
+
591
+ min_yx = np.maximum(np.floor(center_yx).astype(np.int32) - crop_size, 0)
592
+ max_yx = np.maximum(np.asarray(image_size, dtype=np.int32) - crop_size, 0)
593
+ max_yx = np.minimum(max_yx, np.ceil(center_yx).astype(np.int32))
594
+
595
+ y0 = np.random.randint(min_yx[0], max_yx[0] + 1)
596
+ x0 = np.random.randint(min_yx[1], max_yx[1] + 1)
597
+ return T.CropTransform(x0, y0, crop_size[1], crop_size[0])
598
+
599
+
600
+ def check_metadata_consistency(key, dataset_names):
601
+ """
602
+ Check that the datasets have consistent metadata.
603
+
604
+ Args:
605
+ key (str): a metadata key
606
+ dataset_names (list[str]): a list of dataset names
607
+
608
+ Raises:
609
+ AttributeError: if the key does not exist in the metadata
610
+ ValueError: if the given datasets do not have the same metadata values defined by key
611
+ """
612
+ if len(dataset_names) == 0:
613
+ return
614
+ logger = logging.getLogger(__name__)
615
+ entries_per_dataset = [getattr(MetadataCatalog.get(d), key) for d in dataset_names]
616
+ for idx, entry in enumerate(entries_per_dataset):
617
+ if entry != entries_per_dataset[0]:
618
+ logger.error(
619
+ "Metadata '{}' for dataset '{}' is '{}'".format(key, dataset_names[idx], str(entry))
620
+ )
621
+ logger.error(
622
+ "Metadata '{}' for dataset '{}' is '{}'".format(
623
+ key, dataset_names[0], str(entries_per_dataset[0])
624
+ )
625
+ )
626
+ raise ValueError("Datasets have different metadata '{}'!".format(key))
627
+
628
+
629
+ def build_augmentation(cfg, is_train):
630
+ """
631
+ Create a list of default :class:`Augmentation` from config.
632
+ Now it includes resizing and flipping.
633
+
634
+ Returns:
635
+ list[Augmentation]
636
+ """
637
+ if is_train:
638
+ min_size = cfg.INPUT.MIN_SIZE_TRAIN
639
+ max_size = cfg.INPUT.MAX_SIZE_TRAIN
640
+ sample_style = cfg.INPUT.MIN_SIZE_TRAIN_SAMPLING
641
+ else:
642
+ min_size = cfg.INPUT.MIN_SIZE_TEST
643
+ max_size = cfg.INPUT.MAX_SIZE_TEST
644
+ sample_style = "choice"
645
+ augmentation = [T.ResizeShortestEdge(min_size, max_size, sample_style)]
646
+ if is_train and cfg.INPUT.RANDOM_FLIP != "none":
647
+ augmentation.append(
648
+ T.RandomFlip(
649
+ horizontal=cfg.INPUT.RANDOM_FLIP == "horizontal",
650
+ vertical=cfg.INPUT.RANDOM_FLIP == "vertical",
651
+ )
652
+ )
653
+ return augmentation
654
+
655
+
656
+ build_transform_gen = build_augmentation
657
+ """
658
+ Alias for backward-compatibility.
659
+ """
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/samplers/__init__.py ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from .distributed_sampler import (
3
+ InferenceSampler,
4
+ RandomSubsetTrainingSampler,
5
+ RepeatFactorTrainingSampler,
6
+ TrainingSampler,
7
+ )
8
+
9
+ from .grouped_batch_sampler import GroupedBatchSampler
10
+
11
+ __all__ = [
12
+ "GroupedBatchSampler",
13
+ "TrainingSampler",
14
+ "RandomSubsetTrainingSampler",
15
+ "InferenceSampler",
16
+ "RepeatFactorTrainingSampler",
17
+ ]
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/samplers/distributed_sampler.py ADDED
@@ -0,0 +1,278 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import itertools
3
+ import logging
4
+ import math
5
+ from collections import defaultdict
6
+ from typing import Optional
7
+ import torch
8
+ from torch.utils.data.sampler import Sampler
9
+
10
+ from annotator.oneformer.detectron2.utils import comm
11
+
12
+ logger = logging.getLogger(__name__)
13
+
14
+
15
+ class TrainingSampler(Sampler):
16
+ """
17
+ In training, we only care about the "infinite stream" of training data.
18
+ So this sampler produces an infinite stream of indices and
19
+ all workers cooperate to correctly shuffle the indices and sample different indices.
20
+
21
+ The samplers in each worker effectively produces `indices[worker_id::num_workers]`
22
+ where `indices` is an infinite stream of indices consisting of
23
+ `shuffle(range(size)) + shuffle(range(size)) + ...` (if shuffle is True)
24
+ or `range(size) + range(size) + ...` (if shuffle is False)
25
+
26
+ Note that this sampler does not shard based on pytorch DataLoader worker id.
27
+ A sampler passed to pytorch DataLoader is used only with map-style dataset
28
+ and will not be executed inside workers.
29
+ But if this sampler is used in a way that it gets execute inside a dataloader
30
+ worker, then extra work needs to be done to shard its outputs based on worker id.
31
+ This is required so that workers don't produce identical data.
32
+ :class:`ToIterableDataset` implements this logic.
33
+ This note is true for all samplers in detectron2.
34
+ """
35
+
36
+ def __init__(self, size: int, shuffle: bool = True, seed: Optional[int] = None):
37
+ """
38
+ Args:
39
+ size (int): the total number of data of the underlying dataset to sample from
40
+ shuffle (bool): whether to shuffle the indices or not
41
+ seed (int): the initial seed of the shuffle. Must be the same
42
+ across all workers. If None, will use a random seed shared
43
+ among workers (require synchronization among all workers).
44
+ """
45
+ if not isinstance(size, int):
46
+ raise TypeError(f"TrainingSampler(size=) expects an int. Got type {type(size)}.")
47
+ if size <= 0:
48
+ raise ValueError(f"TrainingSampler(size=) expects a positive int. Got {size}.")
49
+ self._size = size
50
+ self._shuffle = shuffle
51
+ if seed is None:
52
+ seed = comm.shared_random_seed()
53
+ self._seed = int(seed)
54
+
55
+ self._rank = comm.get_rank()
56
+ self._world_size = comm.get_world_size()
57
+
58
+ def __iter__(self):
59
+ start = self._rank
60
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
61
+
62
+ def _infinite_indices(self):
63
+ g = torch.Generator()
64
+ g.manual_seed(self._seed)
65
+ while True:
66
+ if self._shuffle:
67
+ yield from torch.randperm(self._size, generator=g).tolist()
68
+ else:
69
+ yield from torch.arange(self._size).tolist()
70
+
71
+
72
+ class RandomSubsetTrainingSampler(TrainingSampler):
73
+ """
74
+ Similar to TrainingSampler, but only sample a random subset of indices.
75
+ This is useful when you want to estimate the accuracy vs data-number curves by
76
+ training the model with different subset_ratio.
77
+ """
78
+
79
+ def __init__(
80
+ self,
81
+ size: int,
82
+ subset_ratio: float,
83
+ shuffle: bool = True,
84
+ seed_shuffle: Optional[int] = None,
85
+ seed_subset: Optional[int] = None,
86
+ ):
87
+ """
88
+ Args:
89
+ size (int): the total number of data of the underlying dataset to sample from
90
+ subset_ratio (float): the ratio of subset data to sample from the underlying dataset
91
+ shuffle (bool): whether to shuffle the indices or not
92
+ seed_shuffle (int): the initial seed of the shuffle. Must be the same
93
+ across all workers. If None, will use a random seed shared
94
+ among workers (require synchronization among all workers).
95
+ seed_subset (int): the seed to randomize the subset to be sampled.
96
+ Must be the same across all workers. If None, will use a random seed shared
97
+ among workers (require synchronization among all workers).
98
+ """
99
+ super().__init__(size=size, shuffle=shuffle, seed=seed_shuffle)
100
+
101
+ assert 0.0 < subset_ratio <= 1.0
102
+ self._size_subset = int(size * subset_ratio)
103
+ assert self._size_subset > 0
104
+ if seed_subset is None:
105
+ seed_subset = comm.shared_random_seed()
106
+ self._seed_subset = int(seed_subset)
107
+
108
+ # randomly generate the subset indexes to be sampled from
109
+ g = torch.Generator()
110
+ g.manual_seed(self._seed_subset)
111
+ indexes_randperm = torch.randperm(self._size, generator=g)
112
+ self._indexes_subset = indexes_randperm[: self._size_subset]
113
+
114
+ logger.info("Using RandomSubsetTrainingSampler......")
115
+ logger.info(f"Randomly sample {self._size_subset} data from the original {self._size} data")
116
+
117
+ def _infinite_indices(self):
118
+ g = torch.Generator()
119
+ g.manual_seed(self._seed) # self._seed equals seed_shuffle from __init__()
120
+ while True:
121
+ if self._shuffle:
122
+ # generate a random permutation to shuffle self._indexes_subset
123
+ randperm = torch.randperm(self._size_subset, generator=g)
124
+ yield from self._indexes_subset[randperm].tolist()
125
+ else:
126
+ yield from self._indexes_subset.tolist()
127
+
128
+
129
+ class RepeatFactorTrainingSampler(Sampler):
130
+ """
131
+ Similar to TrainingSampler, but a sample may appear more times than others based
132
+ on its "repeat factor". This is suitable for training on class imbalanced datasets like LVIS.
133
+ """
134
+
135
+ def __init__(self, repeat_factors, *, shuffle=True, seed=None):
136
+ """
137
+ Args:
138
+ repeat_factors (Tensor): a float vector, the repeat factor for each indice. When it's
139
+ full of ones, it is equivalent to ``TrainingSampler(len(repeat_factors), ...)``.
140
+ shuffle (bool): whether to shuffle the indices or not
141
+ seed (int): the initial seed of the shuffle. Must be the same
142
+ across all workers. If None, will use a random seed shared
143
+ among workers (require synchronization among all workers).
144
+ """
145
+ self._shuffle = shuffle
146
+ if seed is None:
147
+ seed = comm.shared_random_seed()
148
+ self._seed = int(seed)
149
+
150
+ self._rank = comm.get_rank()
151
+ self._world_size = comm.get_world_size()
152
+
153
+ # Split into whole number (_int_part) and fractional (_frac_part) parts.
154
+ self._int_part = torch.trunc(repeat_factors)
155
+ self._frac_part = repeat_factors - self._int_part
156
+
157
+ @staticmethod
158
+ def repeat_factors_from_category_frequency(dataset_dicts, repeat_thresh):
159
+ """
160
+ Compute (fractional) per-image repeat factors based on category frequency.
161
+ The repeat factor for an image is a function of the frequency of the rarest
162
+ category labeled in that image. The "frequency of category c" in [0, 1] is defined
163
+ as the fraction of images in the training set (without repeats) in which category c
164
+ appears.
165
+ See :paper:`lvis` (>= v2) Appendix B.2.
166
+
167
+ Args:
168
+ dataset_dicts (list[dict]): annotations in Detectron2 dataset format.
169
+ repeat_thresh (float): frequency threshold below which data is repeated.
170
+ If the frequency is half of `repeat_thresh`, the image will be
171
+ repeated twice.
172
+
173
+ Returns:
174
+ torch.Tensor:
175
+ the i-th element is the repeat factor for the dataset image at index i.
176
+ """
177
+ # 1. For each category c, compute the fraction of images that contain it: f(c)
178
+ category_freq = defaultdict(int)
179
+ for dataset_dict in dataset_dicts: # For each image (without repeats)
180
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
181
+ for cat_id in cat_ids:
182
+ category_freq[cat_id] += 1
183
+ num_images = len(dataset_dicts)
184
+ for k, v in category_freq.items():
185
+ category_freq[k] = v / num_images
186
+
187
+ # 2. For each category c, compute the category-level repeat factor:
188
+ # r(c) = max(1, sqrt(t / f(c)))
189
+ category_rep = {
190
+ cat_id: max(1.0, math.sqrt(repeat_thresh / cat_freq))
191
+ for cat_id, cat_freq in category_freq.items()
192
+ }
193
+
194
+ # 3. For each image I, compute the image-level repeat factor:
195
+ # r(I) = max_{c in I} r(c)
196
+ rep_factors = []
197
+ for dataset_dict in dataset_dicts:
198
+ cat_ids = {ann["category_id"] for ann in dataset_dict["annotations"]}
199
+ rep_factor = max({category_rep[cat_id] for cat_id in cat_ids}, default=1.0)
200
+ rep_factors.append(rep_factor)
201
+
202
+ return torch.tensor(rep_factors, dtype=torch.float32)
203
+
204
+ def _get_epoch_indices(self, generator):
205
+ """
206
+ Create a list of dataset indices (with repeats) to use for one epoch.
207
+
208
+ Args:
209
+ generator (torch.Generator): pseudo random number generator used for
210
+ stochastic rounding.
211
+
212
+ Returns:
213
+ torch.Tensor: list of dataset indices to use in one epoch. Each index
214
+ is repeated based on its calculated repeat factor.
215
+ """
216
+ # Since repeat factors are fractional, we use stochastic rounding so
217
+ # that the target repeat factor is achieved in expectation over the
218
+ # course of training
219
+ rands = torch.rand(len(self._frac_part), generator=generator)
220
+ rep_factors = self._int_part + (rands < self._frac_part).float()
221
+ # Construct a list of indices in which we repeat images as specified
222
+ indices = []
223
+ for dataset_index, rep_factor in enumerate(rep_factors):
224
+ indices.extend([dataset_index] * int(rep_factor.item()))
225
+ return torch.tensor(indices, dtype=torch.int64)
226
+
227
+ def __iter__(self):
228
+ start = self._rank
229
+ yield from itertools.islice(self._infinite_indices(), start, None, self._world_size)
230
+
231
+ def _infinite_indices(self):
232
+ g = torch.Generator()
233
+ g.manual_seed(self._seed)
234
+ while True:
235
+ # Sample indices with repeats determined by stochastic rounding; each
236
+ # "epoch" may have a slightly different size due to the rounding.
237
+ indices = self._get_epoch_indices(g)
238
+ if self._shuffle:
239
+ randperm = torch.randperm(len(indices), generator=g)
240
+ yield from indices[randperm].tolist()
241
+ else:
242
+ yield from indices.tolist()
243
+
244
+
245
+ class InferenceSampler(Sampler):
246
+ """
247
+ Produce indices for inference across all workers.
248
+ Inference needs to run on the __exact__ set of samples,
249
+ therefore when the total number of samples is not divisible by the number of workers,
250
+ this sampler produces different number of samples on different workers.
251
+ """
252
+
253
+ def __init__(self, size: int):
254
+ """
255
+ Args:
256
+ size (int): the total number of data of the underlying dataset to sample from
257
+ """
258
+ self._size = size
259
+ assert size > 0
260
+ self._rank = comm.get_rank()
261
+ self._world_size = comm.get_world_size()
262
+ self._local_indices = self._get_local_indices(size, self._world_size, self._rank)
263
+
264
+ @staticmethod
265
+ def _get_local_indices(total_size, world_size, rank):
266
+ shard_size = total_size // world_size
267
+ left = total_size % world_size
268
+ shard_sizes = [shard_size + int(r < left) for r in range(world_size)]
269
+
270
+ begin = sum(shard_sizes[:rank])
271
+ end = min(sum(shard_sizes[: rank + 1]), total_size)
272
+ return range(begin, end)
273
+
274
+ def __iter__(self):
275
+ yield from self._local_indices
276
+
277
+ def __len__(self):
278
+ return len(self._local_indices)
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/samplers/grouped_batch_sampler.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ import numpy as np
3
+ from torch.utils.data.sampler import BatchSampler, Sampler
4
+
5
+
6
+ class GroupedBatchSampler(BatchSampler):
7
+ """
8
+ Wraps another sampler to yield a mini-batch of indices.
9
+ It enforces that the batch only contain elements from the same group.
10
+ It also tries to provide mini-batches which follows an ordering which is
11
+ as close as possible to the ordering from the original sampler.
12
+ """
13
+
14
+ def __init__(self, sampler, group_ids, batch_size):
15
+ """
16
+ Args:
17
+ sampler (Sampler): Base sampler.
18
+ group_ids (list[int]): If the sampler produces indices in range [0, N),
19
+ `group_ids` must be a list of `N` ints which contains the group id of each sample.
20
+ The group ids must be a set of integers in the range [0, num_groups).
21
+ batch_size (int): Size of mini-batch.
22
+ """
23
+ if not isinstance(sampler, Sampler):
24
+ raise ValueError(
25
+ "sampler should be an instance of "
26
+ "torch.utils.data.Sampler, but got sampler={}".format(sampler)
27
+ )
28
+ self.sampler = sampler
29
+ self.group_ids = np.asarray(group_ids)
30
+ assert self.group_ids.ndim == 1
31
+ self.batch_size = batch_size
32
+ groups = np.unique(self.group_ids).tolist()
33
+
34
+ # buffer the indices of each group until batch size is reached
35
+ self.buffer_per_group = {k: [] for k in groups}
36
+
37
+ def __iter__(self):
38
+ for idx in self.sampler:
39
+ group_id = self.group_ids[idx]
40
+ group_buffer = self.buffer_per_group[group_id]
41
+ group_buffer.append(idx)
42
+ if len(group_buffer) == self.batch_size:
43
+ yield group_buffer[:] # yield a copy of the list
44
+ del group_buffer[:]
45
+
46
+ def __len__(self):
47
+ raise NotImplementedError("len() of GroupedBatchSampler is not well-defined.")
extensions/microsoftexcel-controlnet/annotator/oneformer/detectron2/data/transforms/__init__.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright (c) Facebook, Inc. and its affiliates.
2
+ from fvcore.transforms.transform import Transform, TransformList # order them first
3
+ from fvcore.transforms.transform import *
4
+ from .transform import *
5
+ from .augmentation import *
6
+ from .augmentation_impl import *
7
+
8
+ __all__ = [k for k in globals().keys() if not k.startswith("_")]
9
+
10
+
11
+ from annotator.oneformer.detectron2.utils.env import fixup_module_metadata
12
+
13
+ fixup_module_metadata(__name__, globals(), __all__)
14
+ del fixup_module_metadata