code
stringlengths 17
6.64M
|
---|
def resnet18(in_channels=3, pretrained=False, progress=True, **kwargs):
'ResNet-18 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet(in_channels, 'resnet18', BasicBlock, [2, 2, 2, 2], pretrained, progress, **kwargs)
|
def resnet34(in_channels=3, pretrained=False, progress=True, **kwargs):
'ResNet-34 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet(in_channels, 'resnet34', BasicBlock, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet50(in_channels=3, pretrained=False, progress=True, **kwargs):
'ResNet-50 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet(in_channels, 'resnet50', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnet101(in_channels=3, pretrained=False, progress=True, **kwargs):
'ResNet-101 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet(in_channels, 'resnet101', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def resnet152(in_channels=3, pretrained=False, progress=True, **kwargs):
'ResNet-152 model from\n `"Deep Residual Learning for Image Recognition" <https://arxiv.org/pdf/1512.03385.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
return _resnet(in_channels, 'resnet152', Bottleneck, [3, 8, 36, 3], pretrained, progress, **kwargs)
|
def resnext50_32x4d(in_channels=3, pretrained=False, progress=True, **kwargs):
'ResNeXt-50 32x4d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 4
return _resnet(in_channels, 'resnext50_32x4d', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def resnext101_32x8d(in_channels=3, pretrained=False, progress=True, **kwargs):
'ResNeXt-101 32x8d model from\n `"Aggregated Residual Transformation for Deep Neural Networks" <https://arxiv.org/pdf/1611.05431.pdf>`_\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['groups'] = 32
kwargs['width_per_group'] = 8
return _resnet(in_channels, 'resnext101_32x8d', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
def wide_resnet50_2(in_channels=3, pretrained=False, progress=True, **kwargs):
'Wide ResNet-50-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet(in_channels, 'wide_resnet50_2', Bottleneck, [3, 4, 6, 3], pretrained, progress, **kwargs)
|
def wide_resnet101_2(in_channels=3, pretrained=False, progress=True, **kwargs):
'Wide ResNet-101-2 model from\n `"Wide Residual Networks" <https://arxiv.org/pdf/1605.07146.pdf>`_\n\n The model is the same as ResNet except for the bottleneck number of channels\n which is twice larger in every block. The number of channels in outer 1x1\n convolutions is the same, e.g. last block in ResNet-50 has 2048-512-2048\n channels, and in Wide ResNet-50-2 has 2048-1024-2048.\n\n Args:\n pretrained (bool): If True, returns a model pre-trained on ImageNet\n progress (bool): If True, displays a progress bar of the download to stderr\n '
kwargs['width_per_group'] = (64 * 2)
return _resnet(in_channels, 'wide_resnet101_2', Bottleneck, [3, 4, 23, 3], pretrained, progress, **kwargs)
|
class ImageEncoder(nn.Module):
def __init__(self):
super(ImageEncoder, self).__init__()
self.backbone = resnet34(in_channels=3, pretrained=False, progress=True)
'input_mesh_np = np.meshgrid(np.linspace(start=0, stop=self.opt.img_W - 1, num=self.opt.img_W),\n np.linspace(start=0, stop=self.opt.img_H - 1, num=self.opt.img_H))\n input_mesh = torch.from_numpy(np.stack(input_mesh_np, axis=0).astype(np.float32)).to(self.opt.device) # 2xHxW\n self.input_mesh = input_mesh.unsqueeze(0).expand(self.opt.batch_size, 2, self.opt.img_H,\n self.opt.img_W) # Bx2xHxW\n '
def forward(self, x):
resnet_out = self.backbone(x)
return resnet_out
|
class ResidualConv(nn.Module):
def __init__(self, inplanes, planes, stride=1, kernel_1=False):
super(ResidualConv, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
if kernel_1:
self.conv_skip = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=1, bias=False), nn.BatchNorm2d(planes))
else:
self.conv_skip = nn.Sequential(nn.Conv2d(inplanes, planes, kernel_size=3, stride=stride, padding=1, bias=False), nn.BatchNorm2d(planes))
self.stride = stride
def forward(self, x):
identity = self.conv_skip(x)
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out += identity
out = self.relu(out)
return out
|
class attention_pc2img(nn.Module):
def __init__(self, in_channel, output_channel):
super(attention_pc2img, self).__init__()
'self.conv=nn.Sequential(nn.Conv2d(in_channel,in_channel,1),nn.BatchNorm2d(in_channel),nn.ReLU(),\n nn.Conv2d(in_channel,in_channel,1),nn.BatchNorm2d(in_channel),nn.ReLU(),\n nn.Conv2d(in_channel,output_channel,1),nn.BatchNorm2d(output_channel),nn.ReLU())'
self.conv = nn.Sequential(ResidualConv(in_channel, in_channel), ResidualConv(in_channel, in_channel), nn.Conv2d(in_channel, output_channel, 1), nn.BatchNorm2d(output_channel), nn.ReLU())
def forward(self, pc_global_feature, img_local_feature, pc_local_feature):
(B, _, H, W) = img_local_feature.size()
feature = torch.cat([img_local_feature, pc_global_feature.unsqueeze((- 1)).unsqueeze((- 1)).repeat(1, 1, H, W)], dim=1)
feature = self.conv(feature)
attention = F.softmax(feature, dim=1)
feature_fusion = torch.sum((attention.unsqueeze(1) * pc_local_feature.unsqueeze((- 1)).unsqueeze((- 1))), dim=2)
return feature_fusion
|
class ImageUpSample(nn.Module):
def __init__(self, in_channel, output_channel):
super(ImageUpSample, self).__init__()
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=False)
self.conv = nn.Sequential(ResidualConv(in_channel, output_channel), ResidualConv(output_channel, output_channel))
'self.conv=nn.Sequential(nn.Conv2d(in_channel,output_channel,1,bias=False),nn.BatchNorm2d(output_channel),nn.ReLU(),\n nn.Conv2d(output_channel,output_channel,1,bias=False),nn.BatchNorm2d(output_channel),nn.ReLU(),\n nn.Conv2d(output_channel,output_channel,1,bias=False),nn.BatchNorm2d(output_channel),nn.ReLU())'
def forward(self, x1, x2):
x1 = self.up(x1)
x = self.conv(torch.cat((x1, x2), dim=1))
return x
|
def desc_loss(img_features, pc_features, mask, pos_margin=0.1, neg_margin=1.4, log_scale=10, num_kpt=512):
pos_mask = mask
neg_mask = (1 - mask)
dists = (1 - torch.sum((img_features.unsqueeze((- 1)) * pc_features.unsqueeze((- 2))), dim=1))
pos = (dists - (100000.0 * neg_mask))
pos_weight = (pos - pos_margin).detach()
pos_weight = torch.max(torch.zeros_like(pos_weight), pos_weight)
lse_positive_row = torch.logsumexp(((log_scale * (pos - pos_margin)) * pos_weight), dim=(- 1))
lse_positive_col = torch.logsumexp(((log_scale * (pos - pos_margin)) * pos_weight), dim=(- 2))
neg = (dists + (100000.0 * pos_mask))
neg_weight = (neg_margin - neg).detach()
neg_weight = torch.max(torch.zeros_like(neg_weight), neg_weight)
lse_negative_row = torch.logsumexp(((log_scale * (neg_margin - neg)) * neg_weight), dim=(- 1))
lse_negative_col = torch.logsumexp(((log_scale * (neg_margin - neg)) * neg_weight), dim=(- 2))
loss_col = (F.softplus((lse_positive_row + lse_negative_row)) / log_scale)
loss_row = (F.softplus((lse_positive_col + lse_negative_col)) / log_scale)
loss = (loss_col + loss_row)
return (torch.mean(loss), dists)
|
def desc_loss2(img_features, pc_features, mask, pos_margin=0.1, neg_margin=1.4, log_scale=10, num_kpt=512):
pos_mask = mask
neg_mask = (1 - mask)
dists = (1 - torch.sum((img_features.unsqueeze((- 1)) * pc_features.unsqueeze((- 2))), dim=1))
pos = (dists - (100000.0 * neg_mask))
pos_weight = (pos - pos_margin).detach()
pos_weight = torch.max(torch.zeros_like(pos_weight), pos_weight)
lse_positive_row = torch.logsumexp(((log_scale * (pos - pos_margin)) * pos_weight), dim=(- 1))
neg = (dists + (100000.0 * pos_mask))
neg_weight = (neg_margin - neg).detach()
neg_weight = torch.max(torch.zeros_like(neg_weight), neg_weight)
lse_negative_row = torch.logsumexp(((log_scale * (neg_margin - neg)) * neg_weight), dim=(- 1))
loss_col = (F.softplus((lse_positive_row + lse_negative_row)) / log_scale)
loss = loss_col
return (torch.mean(loss), dists)
|
def det_loss(img_score_inline, img_score_outline, pc_score_inline, pc_score_outline, dists, mask):
pids = torch.FloatTensor(np.arange(mask.size((- 1)))).to(mask.device)
diag_mask = torch.eq(torch.unsqueeze(pids, dim=1), torch.unsqueeze(pids, dim=0)).unsqueeze(0).expand(mask.size()).float()
(furthest_positive, _) = torch.max((dists * diag_mask), dim=1)
(closest_negative, _) = torch.min((dists + (100000.0 * mask)), dim=1)
loss_inline = torch.mean(((furthest_positive - closest_negative) * (img_score_inline.squeeze() + pc_score_inline.squeeze())))
loss_outline = (torch.mean(img_score_outline) + torch.mean(pc_score_outline))
return (loss_inline + loss_outline)
|
def det_loss2(img_score_inline, img_score_outline, pc_score_inline, pc_score_outline, dists, mask):
pids = torch.FloatTensor(np.arange(mask.size((- 1)))).to(mask.device)
diag_mask = torch.eq(torch.unsqueeze(pids, dim=1), torch.unsqueeze(pids, dim=0)).unsqueeze(0).expand(mask.size()).float()
(furthest_positive, _) = torch.max((dists * diag_mask), dim=1)
(closest_negative, _) = torch.min((dists + (100000.0 * mask)), dim=1)
loss_inline = (torch.mean((1 - img_score_inline)) + torch.mean((1 - pc_score_inline)))
loss_outline = (torch.mean(img_score_outline) + torch.mean(pc_score_outline))
return (loss_inline + loss_outline)
|
def cal_acc(img_features, pc_features, mask):
dist = torch.sum(((img_features.unsqueeze((- 1)) - pc_features.unsqueeze((- 2))) ** 2), dim=1)
(furthest_positive, _) = torch.max((dist * mask), dim=1)
(closest_negative, _) = torch.min((dist + (100000.0 * mask)), dim=1)
'print(furthest_positive)\n print(closest_negative)\n print(torch.max(torch.sum(mask,dim=1)))\n assert False'
diff = (furthest_positive - closest_negative)
accuracy = ((diff < 0).sum(dim=1) / dist.size(1))
return accuracy
|
class CorrI2P(nn.Module):
def __init__(self, opt: Options):
super(CorrI2P, self).__init__()
self.opt = opt
self.pc_encoder = pointnet2.PCEncoder(opt, Ca=64, Cb=256, Cg=512)
self.img_encoder = imagenet.ImageEncoder()
self.H_fine_res = int(round((self.opt.img_H / self.opt.img_fine_resolution_scale)))
self.W_fine_res = int(round((self.opt.img_W / self.opt.img_fine_resolution_scale)))
self.node_b_attention_pn = layers_pc.PointNet((256 + 512), [256, (self.H_fine_res * self.W_fine_res)], activation=self.opt.activation, normalization=self.opt.normalization, norm_momentum=opt.norm_momentum, norm_act_at_last=False)
self.node_b_pn = layers_pc.PointNet((((256 + 512) + 512) + 512), [1024, 512, 512], activation=self.opt.activation, normalization=self.opt.normalization, norm_momentum=opt.norm_momentum, norm_act_at_last=False)
self.node_a_attention_pn = layers_pc.PointNet((64 + 512), [256, int(((self.H_fine_res * self.W_fine_res) * 4))], activation=self.opt.activation, normalization=self.opt.normalization, norm_momentum=opt.norm_momentum, norm_act_at_last=False)
self.node_a_pn = layers_pc.PointNet(((64 + 256) + 512), [512, 128, 128], activation=self.opt.activation, normalization=self.opt.normalization, norm_momentum=opt.norm_momentum, norm_act_at_last=False)
per_point_pn_in_channels = (((32 + 64) + 128) + 512)
self.per_point_pn = layers_pc.PointNet(per_point_pn_in_channels, [256, 256, 128], activation=self.opt.activation, normalization=self.opt.normalization, norm_momentum=opt.norm_momentum, norm_act_at_last=True)
self.pc_feature_layer = nn.Sequential(nn.Conv1d(128, 128, 1, bias=False), nn.BatchNorm1d(128), nn.ReLU(), nn.Conv1d(128, 128, 1, bias=False), nn.BatchNorm1d(128), nn.ReLU(), nn.Conv1d(128, 64, 1, bias=False))
self.pc_score_layer = nn.Sequential(nn.Conv1d(128, 128, 1, bias=False), nn.BatchNorm1d(128), nn.ReLU(), nn.Conv1d(128, 64, 1, bias=False), nn.BatchNorm1d(64), nn.ReLU(), nn.Conv1d(64, 1, 1, bias=False), nn.Sigmoid())
self.img_32_attention_conv = nn.Sequential(nn.Conv2d((512 + 512), 512, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, 512, 1, bias=False), nn.BatchNorm2d(512), nn.ReLU(), nn.Conv2d(512, self.opt.node_b_num, 1, bias=False))
self.img_16_attention_conv = nn.Sequential(nn.Conv2d((512 + 256), 256, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, 256, 1, bias=False), nn.BatchNorm2d(256), nn.ReLU(), nn.Conv2d(256, self.opt.node_a_num, 1, bias=False))
self.up_conv1 = ImageUpSample((768 + 320), 256)
self.up_conv2 = ImageUpSample((256 + 128), 128)
self.up_conv3 = ImageUpSample(((128 + 64) + 64), 64)
self.img_feature_layer = nn.Sequential(nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, 1, bias=False))
self.img_score_layer = nn.Sequential(nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 64, 1, bias=False), nn.BatchNorm2d(64), nn.ReLU(), nn.Conv2d(64, 1, 1, bias=False), nn.Sigmoid())
def gather_topk_features(self, min_k_idx, features):
'\n\n :param min_k_idx: BxNxk\n :param features: BxCxM\n :return:\n '
(B, N, k) = (min_k_idx.size(0), min_k_idx.size(1), min_k_idx.size(2))
(C, M) = (features.size(1), features.size(2))
return torch.gather(features.unsqueeze(3).expand(B, C, M, k), index=min_k_idx.unsqueeze(1).expand(B, C, N, k), dim=2)
def upsample_by_interpolation(self, interp_ab_topk_idx, node_a, node_b, up_node_b_features):
interp_ab_topk_node_b = self.gather_topk_features(interp_ab_topk_idx, node_b)
interp_ab_node_diff = torch.norm((node_a.unsqueeze(3) - interp_ab_topk_node_b), dim=1, p=2, keepdim=False)
interp_ab_weight = (1 - (interp_ab_node_diff / torch.sum(interp_ab_node_diff, dim=2, keepdim=True)))
interp_ab_topk_node_b_features = self.gather_topk_features(interp_ab_topk_idx, up_node_b_features)
interp_ab_weighted_node_b_features = torch.sum((interp_ab_weight.unsqueeze(1) * interp_ab_topk_node_b_features), dim=3)
return interp_ab_weighted_node_b_features
def forward(self, pc, intensity, sn, img, node_a, node_b):
(B, N, Ma, Mb) = (pc.size(0), pc.size(2), node_a.size(2), node_b.size(2))
(pc_center, cluster_mean, node_a_min_k_idx, first_pn_out, second_pn_out, node_a_features, node_b_features, global_feature) = self.pc_encoder(pc, intensity, sn, node_a, node_b)
'print(node_a_features.size())\n print(node_b_features.size())'
C_global = global_feature.size(1)
img_feature_set = self.img_encoder(img)
'for i in img_feature_set:\n print(i.size())'
img_global_feature = img_feature_set[(- 1)]
img_s32_feature_map = img_feature_set[(- 2)]
img_s16_feature_map = img_feature_set[(- 3)]
img_s8_feature_map = img_feature_set[(- 4)]
img_s4_feature_map = img_feature_set[(- 5)]
img_s2_feature_map = img_feature_set[(- 6)]
img_s32_feature_map_pc_global_feature = torch.cat((img_s32_feature_map, global_feature.unsqueeze((- 1)).expand(B, global_feature.size(1), img_s32_feature_map.size((- 2)), img_s32_feature_map.size((- 1)))), dim=1)
img_32_attention = self.img_32_attention_conv(img_s32_feature_map_pc_global_feature)
img_32_attention = F.softmax(img_32_attention, dim=1)
img_s32_feature_map_fusion = torch.cat((torch.sum((img_32_attention.unsqueeze(1) * node_b_features.unsqueeze((- 1)).unsqueeze((- 1))), dim=2), img_s32_feature_map), dim=1)
img_s16_feature_map_pc_global_feature = torch.cat((img_s16_feature_map, global_feature.unsqueeze((- 1)).expand(B, global_feature.size(1), img_s16_feature_map.size((- 2)), img_s16_feature_map.size((- 1)))), dim=1)
img_16_attention = self.img_16_attention_conv(img_s16_feature_map_pc_global_feature)
img_16_attention = F.softmax(img_16_attention, dim=1)
img_s16_feature_map_fusion = torch.cat((torch.sum((img_16_attention.unsqueeze(1) * node_a_features.unsqueeze((- 1)).unsqueeze((- 1))), dim=2), img_s16_feature_map), dim=1)
image_feature_16 = self.up_conv1(img_s32_feature_map_fusion, img_s16_feature_map_fusion)
image_feature_8 = self.up_conv2(image_feature_16, img_s8_feature_map)
img_s4_feature_map = torch.cat((img_s4_feature_map, F.interpolate(img_s2_feature_map, scale_factor=0.5)), dim=1)
image_feature_mid = self.up_conv3(image_feature_8, img_s4_feature_map)
img_feature = self.img_feature_layer(image_feature_mid)
img_score = self.img_score_layer(image_feature_mid)
img_feature_norm = F.normalize(img_feature, dim=1, p=2)
C_img = img_global_feature.size(1)
img_s16_feature_map_BCHw = img_s16_feature_map.view(B, img_s16_feature_map.size(1), (- 1))
img_s32_feature_map_BCHw = img_s32_feature_map.view(B, img_s32_feature_map.size(1), (- 1))
img_global_feature_BCMa = img_global_feature.squeeze(3).expand(B, C_img, Ma)
img_global_feature_BCMb = img_global_feature.squeeze(3).expand(B, C_img, Mb)
node_b_attention_score = self.node_b_attention_pn(torch.cat((node_b_features, img_global_feature_BCMb), dim=1))
node_b_attention_score = F.softmax(node_b_attention_score, dim=1)
node_b_weighted_img_s32_feature_map = torch.sum((img_s32_feature_map_BCHw.unsqueeze(3) * node_b_attention_score.unsqueeze(1)), dim=2)
up_node_b_features = self.node_b_pn(torch.cat((node_b_features, global_feature.expand(B, C_global, Mb), node_b_weighted_img_s32_feature_map, img_global_feature_BCMb), dim=1))
pc_node_b_diff = torch.norm((pc.unsqueeze(3) - node_b.unsqueeze(2)), p=2, dim=1, keepdim=False)
(_, interp_pc_node_b_topk_idx) = torch.topk(pc_node_b_diff, k=self.opt.k_interp_point_b, dim=2, largest=False, sorted=True)
interp_pb_weighted_node_b_features = self.upsample_by_interpolation(interp_pc_node_b_topk_idx, pc, node_b, up_node_b_features)
node_a_attention_score = self.node_a_attention_pn(torch.cat((node_a_features, img_global_feature_BCMa), dim=1))
node_a_attention_score = F.softmax(node_a_attention_score, dim=1)
node_a_weighted_img_s16_feature_map = torch.sum((img_s16_feature_map_BCHw.unsqueeze(3) * node_a_attention_score.unsqueeze(1)), dim=2)
node_a_node_b_diff = torch.norm((node_a.unsqueeze(3) - node_b.unsqueeze(2)), p=2, dim=1, keepdim=False)
(_, interp_nodea_nodeb_topk_idx) = torch.topk(node_a_node_b_diff, k=self.opt.k_interp_ab, dim=2, largest=False, sorted=True)
interp_ab_weighted_node_b_features = self.upsample_by_interpolation(interp_nodea_nodeb_topk_idx, node_a, node_b, up_node_b_features)
up_node_a_features = self.node_a_pn(torch.cat((node_a_features, interp_ab_weighted_node_b_features, node_a_weighted_img_s16_feature_map), dim=1))
interp_pa_weighted_node_a_features = self.upsample_by_interpolation(node_a_min_k_idx, pc, node_a, up_node_a_features)
pc_label_scores = self.per_point_pn(torch.cat((interp_pa_weighted_node_a_features, interp_pb_weighted_node_b_features, first_pn_out, second_pn_out), dim=1))
pc_feature = self.pc_feature_layer(pc_label_scores)
pc_score = self.pc_score_layer(pc_label_scores)
pc_feature_norm = F.normalize(pc_feature, dim=1, p=2)
return (img_feature_norm, pc_feature_norm, img_score, pc_score)
|
class Options():
def __init__(self):
self.dataroot = '/extssd/jiaxin/nuscenes'
self.checkpoints_dir = 'checkpoints'
self.version = '3.3'
self.is_debug = False
self.is_fine_resolution = True
self.is_remove_ground = False
self.accumulation_frame_num = 3
self.accumulation_frame_skip = 4
self.translation_max = 10.0
self.test_translation_max = 10.0
self.range_radius = 100
self.crop_original_top_rows = 100
self.img_scale = 0.2
self.img_H = 160
self.img_W = 320
self.img_fine_resolution_scale = 32
self.num_kpt = 512
self.input_pt_num = 40960
self.node_a_num = 256
self.node_b_num = 256
self.k_ab = 32
self.k_interp_ab = 3
self.k_interp_point_a = 3
self.k_interp_point_b = 3
self.P_tx_amplitude = 10
self.P_ty_amplitude = 0
self.P_tz_amplitude = 10
self.P_Rx_amplitude = 0
self.P_Ry_amplitude = (2.0 * math.pi)
self.P_Rz_amplitude = 0
self.dataloader_threads = 10
self.batch_size = 12
self.gpu_ids = [1]
self.device = torch.device('cuda', self.gpu_ids[0])
self.normalization = 'batch'
self.norm_momentum = 0.1
self.activation = 'relu'
self.lr = 0.001
self.lr_decay_step = 15
self.lr_decay_scale = 0.5
self.vis_max_batch = 4
if self.is_fine_resolution:
self.coarse_loss_alpha = 50
else:
self.coarse_loss_alpha = 1
|
def config_factory(configuration_name: str) -> Union[(DetectionConfig, TrackingConfig)]:
'\n Creates a *Config instance that can be used to initialize a *Eval instance, where * stands for Detection/Tracking.\n Note that this only works if the config file is located in the nuscenes/eval/common/configs folder.\n :param configuration_name: Name of desired configuration in eval_detection_configs.\n :return: *Config instance.\n '
tokens = configuration_name.split('_')
assert (len(tokens) > 1), 'Error: Configuration name must be have prefix "detection_" or "tracking_"!'
task = tokens[0]
this_dir = os.path.dirname(os.path.abspath(__file__))
cfg_path = os.path.join(this_dir, '..', task, 'configs', ('%s.json' % configuration_name))
assert os.path.exists(cfg_path), 'Requested unknown configuration {}'.format(configuration_name)
with open(cfg_path, 'r') as f:
data = json.load(f)
if (task == 'detection'):
cfg = DetectionConfig.deserialize(data)
elif (task == 'tracking'):
cfg = TrackingConfig.deserialize(data)
else:
raise Exception(('Error: Invalid config file name: %s' % configuration_name))
return cfg
|
class EvalBox(abc.ABC):
' Abstract base class for data classes used during detection evaluation. Can be a prediction or ground truth.'
def __init__(self, sample_token: str='', translation: Tuple[(float, float, float)]=(0, 0, 0), size: Tuple[(float, float, float)]=(0, 0, 0), rotation: Tuple[(float, float, float, float)]=(0, 0, 0, 0), velocity: Tuple[(float, float)]=(0, 0), ego_translation: Tuple[(float, float, float)]=(0, 0, 0), num_pts: int=(- 1)):
assert (type(sample_token) == str), 'Error: sample_token must be a string!'
assert (len(translation) == 3), 'Error: Translation must have 3 elements!'
assert (not np.any(np.isnan(translation))), 'Error: Translation may not be NaN!'
assert (len(size) == 3), 'Error: Size must have 3 elements!'
assert (not np.any(np.isnan(size))), 'Error: Size may not be NaN!'
assert (len(rotation) == 4), 'Error: Rotation must have 4 elements!'
assert (not np.any(np.isnan(rotation))), 'Error: Rotation may not be NaN!'
assert (len(velocity) == 2), 'Error: Velocity must have 2 elements!'
assert (len(ego_translation) == 3), 'Error: Translation must have 3 elements!'
assert (not np.any(np.isnan(ego_translation))), 'Error: Translation may not be NaN!'
assert (type(num_pts) == int), 'Error: num_pts must be int!'
assert (not np.any(np.isnan(num_pts))), 'Error: num_pts may not be NaN!'
self.sample_token = sample_token
self.translation = translation
self.size = size
self.rotation = rotation
self.velocity = velocity
self.ego_translation = ego_translation
self.num_pts = num_pts
@property
def ego_dist(self) -> float:
' Compute the distance from this box to the ego vehicle in 2D. '
return np.sqrt(np.sum((np.array(self.ego_translation[:2]) ** 2)))
def __repr__(self):
return str(self.serialize())
@abc.abstractmethod
def serialize(self) -> dict:
pass
@classmethod
@abc.abstractmethod
def deserialize(cls, content: dict):
pass
|
class EvalBoxes():
' Data class that groups EvalBox instances by sample. '
def __init__(self):
'\n Initializes the EvalBoxes for GT or predictions.\n '
self.boxes = defaultdict(list)
def __repr__(self):
return 'EvalBoxes with {} boxes across {} samples'.format(len(self.all), len(self.sample_tokens))
def __getitem__(self, item) -> List[EvalBoxType]:
return self.boxes[item]
def __eq__(self, other):
if (not (set(self.sample_tokens) == set(other.sample_tokens))):
return False
for token in self.sample_tokens:
if (not (len(self[token]) == len(other[token]))):
return False
for (box1, box2) in zip(self[token], other[token]):
if (box1 != box2):
return False
return True
def __len__(self):
return len(self.boxes)
@property
def all(self) -> List[EvalBoxType]:
' Returns all EvalBoxes in a list. '
ab = []
for sample_token in self.sample_tokens:
ab.extend(self[sample_token])
return ab
@property
def sample_tokens(self) -> List[str]:
' Returns a list of all keys. '
return list(self.boxes.keys())
def add_boxes(self, sample_token: str, boxes: List[EvalBoxType]) -> None:
' Adds a list of boxes. '
self.boxes[sample_token].extend(boxes)
def serialize(self) -> dict:
' Serialize instance into json-friendly format. '
return {key: [box.serialize() for box in boxes] for (key, boxes) in self.boxes.items()}
@classmethod
def deserialize(cls, content: dict, box_cls):
'\n Initialize from serialized content.\n :param content: A dictionary with the serialized content of the box.\n :param box_cls: The class of the boxes, DetectionBox or TrackingBox.\n '
eb = cls()
for (sample_token, boxes) in content.items():
eb.add_boxes(sample_token, [box_cls.deserialize(box) for box in boxes])
return eb
|
class MetricData(abc.ABC):
' Abstract base class for the *MetricData classes specific to each task. '
@abc.abstractmethod
def serialize(self):
' Serialize instance into json-friendly format. '
pass
@classmethod
@abc.abstractmethod
def deserialize(cls, content: dict):
' Initialize from serialized content. '
pass
|
def load_prediction(result_path: str, max_boxes_per_sample: int, box_cls, verbose: bool=False) -> Tuple[(EvalBoxes, Dict)]:
'\n Loads object predictions from file.\n :param result_path: Path to the .json result file provided by the user.\n :param max_boxes_per_sample: Maximim number of boxes allowed per sample.\n :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.\n :param verbose: Whether to print messages to stdout.\n :return: The deserialized results and meta data.\n '
with open(result_path) as f:
data = json.load(f)
assert ('results' in data), 'Error: No field `results` in result file. Please note that the result format changed.See https://www.nuscenes.org/object-detection for more information.'
all_results = EvalBoxes.deserialize(data['results'], box_cls)
meta = data['meta']
if verbose:
print('Loaded results from {}. Found detections for {} samples.'.format(result_path, len(all_results.sample_tokens)))
for sample_token in all_results.sample_tokens:
assert (len(all_results.boxes[sample_token]) <= max_boxes_per_sample), ('Error: Only <= %d boxes per sample allowed!' % max_boxes_per_sample)
return (all_results, meta)
|
def load_gt(nusc: NuScenes, eval_split: str, box_cls, verbose: bool=False) -> EvalBoxes:
'\n Loads ground truth boxes from DB.\n :param nusc: A NuScenes instance.\n :param eval_split: The evaluation split for which we load GT boxes.\n :param box_cls: Type of box to load, e.g. DetectionBox or TrackingBox.\n :param verbose: Whether to print messages to stdout.\n :return: The GT boxes.\n '
if (box_cls == DetectionBox):
attribute_map = {a['token']: a['name'] for a in nusc.attribute}
if verbose:
print('Loading annotations for {} split from nuScenes version: {}'.format(eval_split, nusc.version))
sample_tokens_all = [s['token'] for s in nusc.sample]
assert (len(sample_tokens_all) > 0), 'Error: Database has no samples!'
splits = create_splits_scenes()
version = nusc.version
if (eval_split in {'train', 'val', 'train_detect', 'train_track'}):
assert version.endswith('trainval'), 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
elif (eval_split in {'mini_train', 'mini_val'}):
assert version.endswith('mini'), 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
elif (eval_split == 'test'):
assert version.endswith('test'), 'Error: Requested split {} which is not compatible with NuScenes version {}'.format(eval_split, version)
else:
raise ValueError('Error: Requested split {} which this function cannot map to the correct NuScenes version.'.format(eval_split))
if (eval_split == 'test'):
assert (len(nusc.sample_annotation) > 0), 'Error: You are trying to evaluate on the test set but you do not have the annotations!'
sample_tokens = []
for sample_token in sample_tokens_all:
scene_token = nusc.get('sample', sample_token)['scene_token']
scene_record = nusc.get('scene', scene_token)
if (scene_record['name'] in splits[eval_split]):
sample_tokens.append(sample_token)
all_annotations = EvalBoxes()
tracking_id_set = set()
for sample_token in tqdm.tqdm(sample_tokens, leave=verbose):
sample = nusc.get('sample', sample_token)
sample_annotation_tokens = sample['anns']
sample_boxes = []
for sample_annotation_token in sample_annotation_tokens:
sample_annotation = nusc.get('sample_annotation', sample_annotation_token)
if (box_cls == DetectionBox):
detection_name = category_to_detection_name(sample_annotation['category_name'])
if (detection_name is None):
continue
attr_tokens = sample_annotation['attribute_tokens']
attr_count = len(attr_tokens)
if (attr_count == 0):
attribute_name = ''
elif (attr_count == 1):
attribute_name = attribute_map[attr_tokens[0]]
else:
raise Exception('Error: GT annotations must not have more than one attribute!')
sample_boxes.append(box_cls(sample_token=sample_token, translation=sample_annotation['translation'], size=sample_annotation['size'], rotation=sample_annotation['rotation'], velocity=nusc.box_velocity(sample_annotation['token'])[:2], num_pts=(sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts']), detection_name=detection_name, detection_score=(- 1.0), attribute_name=attribute_name))
elif (box_cls == TrackingBox):
tracking_id = sample_annotation['instance_token']
tracking_id_set.add(tracking_id)
from nuscenes.eval.tracking.utils import category_to_tracking_name
tracking_name = category_to_tracking_name(sample_annotation['category_name'])
if (tracking_name is None):
continue
sample_boxes.append(box_cls(sample_token=sample_token, translation=sample_annotation['translation'], size=sample_annotation['size'], rotation=sample_annotation['rotation'], velocity=nusc.box_velocity(sample_annotation['token'])[:2], num_pts=(sample_annotation['num_lidar_pts'] + sample_annotation['num_radar_pts']), tracking_id=tracking_id, tracking_name=tracking_name, tracking_score=(- 1.0)))
else:
raise NotImplementedError(('Error: Invalid box_cls %s!' % box_cls))
all_annotations.add_boxes(sample_token, sample_boxes)
if verbose:
print('Loaded ground truth annotations for {} samples.'.format(len(all_annotations.sample_tokens)))
return all_annotations
|
def add_center_dist(nusc: NuScenes, eval_boxes: EvalBoxes):
'\n Adds the cylindrical (xy) center distance from ego vehicle to each box.\n :param nusc: The NuScenes instance.\n :param eval_boxes: A set of boxes, either GT or predictions.\n :return: eval_boxes augmented with center distances.\n '
for sample_token in eval_boxes.sample_tokens:
sample_rec = nusc.get('sample', sample_token)
sd_record = nusc.get('sample_data', sample_rec['data']['LIDAR_TOP'])
pose_record = nusc.get('ego_pose', sd_record['ego_pose_token'])
for box in eval_boxes[sample_token]:
ego_translation = ((box.translation[0] - pose_record['translation'][0]), (box.translation[1] - pose_record['translation'][1]), (box.translation[2] - pose_record['translation'][2]))
if (isinstance(box, DetectionBox) or isinstance(box, TrackingBox)):
box.ego_translation = ego_translation
else:
raise NotImplementedError
return eval_boxes
|
def filter_eval_boxes(nusc: NuScenes, eval_boxes: EvalBoxes, max_dist: Dict[(str, float)], verbose: bool=False) -> EvalBoxes:
'\n Applies filtering to boxes. Distance, bike-racks and points per box.\n :param nusc: An instance of the NuScenes class.\n :param eval_boxes: An instance of the EvalBoxes class.\n :param max_dist: Maps the detection name to the eval distance threshold for that class.\n :param verbose: Whether to print to stdout.\n '
class_field = _get_box_class_field(eval_boxes)
(total, dist_filter, point_filter, bike_rack_filter) = (0, 0, 0, 0)
for (ind, sample_token) in enumerate(eval_boxes.sample_tokens):
total += len(eval_boxes[sample_token])
eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if (box.ego_dist < max_dist[box.__getattribute__(class_field)])]
dist_filter += len(eval_boxes[sample_token])
eval_boxes.boxes[sample_token] = [box for box in eval_boxes[sample_token] if (not (box.num_pts == 0))]
point_filter += len(eval_boxes[sample_token])
sample_anns = nusc.get('sample', sample_token)['anns']
bikerack_recs = [nusc.get('sample_annotation', ann) for ann in sample_anns if (nusc.get('sample_annotation', ann)['category_name'] == 'static_object.bicycle_rack')]
bikerack_boxes = [Box(rec['translation'], rec['size'], Quaternion(rec['rotation'])) for rec in bikerack_recs]
filtered_boxes = []
for box in eval_boxes[sample_token]:
if (box.__getattribute__(class_field) in ['bicycle', 'motorcycle']):
in_a_bikerack = False
for bikerack_box in bikerack_boxes:
if (np.sum(points_in_box(bikerack_box, np.expand_dims(np.array(box.translation), axis=1))) > 0):
in_a_bikerack = True
if (not in_a_bikerack):
filtered_boxes.append(box)
else:
filtered_boxes.append(box)
eval_boxes.boxes[sample_token] = filtered_boxes
bike_rack_filter += len(eval_boxes.boxes[sample_token])
if verbose:
print(('=> Original number of boxes: %d' % total))
print(('=> After distance based filtering: %d' % dist_filter))
print(('=> After LIDAR and RADAR points based filtering: %d' % point_filter))
print(('=> After bike rack filtering: %d' % bike_rack_filter))
return eval_boxes
|
def _get_box_class_field(eval_boxes: EvalBoxes) -> str:
'\n Retrieve the name of the class field in the boxes.\n This parses through all boxes until it finds a valid box.\n If there are no valid boxes, this function throws an exception.\n :param eval_boxes: The EvalBoxes used for evaluation.\n :return: The name of the class field in the boxes, e.g. detection_name or tracking_name.\n '
assert (len(eval_boxes.boxes) > 0)
box = None
for val in eval_boxes.boxes.values():
if (len(val) > 0):
box = val[0]
break
if isinstance(box, DetectionBox):
class_field = 'detection_name'
elif isinstance(box, TrackingBox):
class_field = 'tracking_name'
else:
raise Exception(('Error: Invalid box type: %s' % box))
return class_field
|
def setup_axis(xlabel: str=None, ylabel: str=None, xlim: int=None, ylim: int=None, title: str=None, min_precision: float=None, min_recall: float=None, ax: Axis=None, show_spines: str='none'):
"\n Helper method that sets up the axis for a plot.\n :param xlabel: x label text.\n :param ylabel: y label text.\n :param xlim: Upper limit for x axis.\n :param ylim: Upper limit for y axis.\n :param title: Axis title.\n :param min_precision: Visualize minimum precision as horizontal line.\n :param min_recall: Visualize minimum recall as vertical line.\n :param ax: (optional) an existing axis to be modified.\n :param show_spines: Whether to show axes spines, set to 'none' by default.\n :return: The axes object.\n "
if (ax is None):
ax = plt.subplot()
ax.get_xaxis().tick_bottom()
ax.tick_params(labelsize=16)
ax.get_yaxis().tick_left()
if (show_spines in ['bottomleft', 'none']):
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
if (show_spines == 'none'):
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
elif (show_spines in ['all']):
pass
else:
raise NotImplementedError
if (title is not None):
ax.set_title(title, size=24)
if (xlabel is not None):
ax.set_xlabel(xlabel, size=16)
if (ylabel is not None):
ax.set_ylabel(ylabel, size=16)
if (xlim is not None):
ax.set_xlim(0, xlim)
if (ylim is not None):
ax.set_ylim(0, ylim)
if (min_recall is not None):
ax.axvline(x=min_recall, linestyle='--', color=(0, 0, 0, 0.3))
if (min_precision is not None):
ax.axhline(y=min_precision, linestyle='--', color=(0, 0, 0, 0.3))
return ax
|
def center_distance(gt_box: EvalBox, pred_box: EvalBox) -> float:
'\n L2 distance between the box centers (xy only).\n :param gt_box: GT annotation sample.\n :param pred_box: Predicted sample.\n :return: L2 distance.\n '
return np.linalg.norm((np.array(pred_box.translation[:2]) - np.array(gt_box.translation[:2])))
|
def velocity_l2(gt_box: EvalBox, pred_box: EvalBox) -> float:
'\n L2 distance between the velocity vectors (xy only).\n If the predicted velocities are nan, we return inf, which is subsequently clipped to 1.\n :param gt_box: GT annotation sample.\n :param pred_box: Predicted sample.\n :return: L2 distance.\n '
return np.linalg.norm((np.array(pred_box.velocity) - np.array(gt_box.velocity)))
|
def yaw_diff(gt_box: EvalBox, eval_box: EvalBox, period: float=(2 * np.pi)) -> float:
'\n Returns the yaw angle difference between the orientation of two boxes.\n :param gt_box: Ground truth box.\n :param eval_box: Predicted box.\n :param period: Periodicity in radians for assessing angle difference.\n :return: Yaw angle difference in radians in [0, pi].\n '
yaw_gt = quaternion_yaw(Quaternion(gt_box.rotation))
yaw_est = quaternion_yaw(Quaternion(eval_box.rotation))
return abs(angle_diff(yaw_gt, yaw_est, period))
|
def angle_diff(x: float, y: float, period: float) -> float:
'\n Get the smallest angle difference between 2 angles: the angle from y to x.\n :param x: To angle.\n :param y: From angle.\n :param period: Periodicity in radians for assessing angle difference.\n :return: <float>. Signed smallest between-angle difference in range (-pi, pi).\n '
diff = ((((x - y) + (period / 2)) % period) - (period / 2))
if (diff > np.pi):
diff = (diff - (2 * np.pi))
return diff
|
def attr_acc(gt_box: DetectionBox, pred_box: DetectionBox) -> float:
'\n Computes the classification accuracy for the attribute of this class (if any).\n If the GT class has no attributes or the annotation is missing attributes, we assign an accuracy of nan, which is\n ignored later on.\n :param gt_box: GT annotation sample.\n :param pred_box: Predicted sample.\n :return: Attribute classification accuracy (0 or 1) or nan if GT annotation does not have any attributes.\n '
if (gt_box.attribute_name == ''):
acc = np.nan
else:
acc = float((gt_box.attribute_name == pred_box.attribute_name))
return acc
|
def scale_iou(sample_annotation: EvalBox, sample_result: EvalBox) -> float:
'\n This method compares predictions to the ground truth in terms of scale.\n It is equivalent to intersection over union (IOU) between the two boxes in 3D,\n if we assume that the boxes are aligned, i.e. translation and rotation are considered identical.\n :param sample_annotation: GT annotation sample.\n :param sample_result: Predicted sample.\n :return: Scale IOU.\n '
sa_size = np.array(sample_annotation.size)
sr_size = np.array(sample_result.size)
assert all((sa_size > 0)), 'Error: sample_annotation sizes must be >0.'
assert all((sr_size > 0)), 'Error: sample_result sizes must be >0.'
min_wlh = np.minimum(sa_size, sr_size)
volume_annotation = np.prod(sa_size)
volume_result = np.prod(sr_size)
intersection = np.prod(min_wlh)
union = ((volume_annotation + volume_result) - intersection)
iou = (intersection / union)
return iou
|
def quaternion_yaw(q: Quaternion) -> float:
'\n Calculate the yaw angle from a quaternion.\n Note that this only works for a quaternion that represents a box in lidar or global coordinate frame.\n It does not work for a box in the camera frame.\n :param q: Quaternion of interest.\n :return: Yaw angle in radians.\n '
v = np.dot(q.rotation_matrix, np.array([1, 0, 0]))
yaw = np.arctan2(v[1], v[0])
return yaw
|
def boxes_to_sensor(boxes: List[EvalBox], pose_record: Dict, cs_record: Dict):
"\n Map boxes from global coordinates to the vehicle's sensor coordinate system.\n :param boxes: The boxes in global coordinates.\n :param pose_record: The pose record of the vehicle at the current timestamp.\n :param cs_record: The calibrated sensor record of the sensor.\n :return: The transformed boxes.\n "
boxes_out = []
for box in boxes:
box = Box(box.translation, box.size, Quaternion(box.rotation))
box.translate((- np.array(pose_record['translation'])))
box.rotate(Quaternion(pose_record['rotation']).inverse)
box.translate((- np.array(cs_record['translation'])))
box.rotate(Quaternion(cs_record['rotation']).inverse)
boxes_out.append(box)
return boxes_out
|
def cummean(x: np.array) -> np.array:
'\n Computes the cumulative mean up to each position in a NaN sensitive way\n - If all values are NaN return an array of ones.\n - If some values are NaN, accumulate arrays discording those entries.\n '
if (sum(np.isnan(x)) == len(x)):
return np.ones(len(x))
else:
sum_vals = np.nancumsum(x.astype(float))
count_vals = np.cumsum((~ np.isnan(x)))
return np.divide(sum_vals, count_vals, out=np.zeros_like(sum_vals), where=(count_vals != 0))
|
def accumulate(gt_boxes: EvalBoxes, pred_boxes: EvalBoxes, class_name: str, dist_fcn: Callable, dist_th: float, verbose: bool=False) -> DetectionMetricData:
'\n Average Precision over predefined different recall thresholds for a single distance threshold.\n The recall/conf thresholds and other raw metrics will be used in secondary metrics.\n :param gt_boxes: Maps every sample_token to a list of its sample_annotations.\n :param pred_boxes: Maps every sample_token to a list of its sample_results.\n :param class_name: Class to compute AP on.\n :param dist_fcn: Distance function used to match detections and ground truths.\n :param dist_th: Distance threshold for a match.\n :param verbose: If true, print debug messages.\n :return: (average_prec, metrics). The average precision value and raw data for a number of metrics.\n '
npos = len([1 for gt_box in gt_boxes.all if (gt_box.detection_name == class_name)])
if verbose:
print('Found {} GT of class {} out of {} total across {} samples.'.format(npos, class_name, len(gt_boxes.all), len(gt_boxes.sample_tokens)))
if (npos == 0):
return DetectionMetricData.no_predictions()
pred_boxes_list = [box for box in pred_boxes.all if (box.detection_name == class_name)]
pred_confs = [box.detection_score for box in pred_boxes_list]
if verbose:
print('Found {} PRED of class {} out of {} total across {} samples.'.format(len(pred_confs), class_name, len(pred_boxes.all), len(pred_boxes.sample_tokens)))
sortind = [i for (v, i) in sorted(((v, i) for (i, v) in enumerate(pred_confs)))][::(- 1)]
tp = []
fp = []
conf = []
match_data = {'trans_err': [], 'vel_err': [], 'scale_err': [], 'orient_err': [], 'attr_err': [], 'conf': []}
taken = set()
for ind in sortind:
pred_box = pred_boxes_list[ind]
min_dist = np.inf
match_gt_idx = None
for (gt_idx, gt_box) in enumerate(gt_boxes[pred_box.sample_token]):
if ((gt_box.detection_name == class_name) and (not ((pred_box.sample_token, gt_idx) in taken))):
this_distance = dist_fcn(gt_box, pred_box)
if (this_distance < min_dist):
min_dist = this_distance
match_gt_idx = gt_idx
is_match = (min_dist < dist_th)
if is_match:
taken.add((pred_box.sample_token, match_gt_idx))
tp.append(1)
fp.append(0)
conf.append(pred_box.detection_score)
gt_box_match = gt_boxes[pred_box.sample_token][match_gt_idx]
match_data['trans_err'].append(center_distance(gt_box_match, pred_box))
match_data['vel_err'].append(velocity_l2(gt_box_match, pred_box))
match_data['scale_err'].append((1 - scale_iou(gt_box_match, pred_box)))
period = (np.pi if (class_name == 'barrier') else (2 * np.pi))
match_data['orient_err'].append(yaw_diff(gt_box_match, pred_box, period=period))
match_data['attr_err'].append((1 - attr_acc(gt_box_match, pred_box)))
match_data['conf'].append(pred_box.detection_score)
else:
tp.append(0)
fp.append(1)
conf.append(pred_box.detection_score)
if (len(match_data['trans_err']) == 0):
return DetectionMetricData.no_predictions()
tp = np.cumsum(tp).astype(float)
fp = np.cumsum(fp).astype(float)
conf = np.array(conf)
prec = (tp / (fp + tp))
rec = (tp / float(npos))
rec_interp = np.linspace(0, 1, DetectionMetricData.nelem)
prec = np.interp(rec_interp, rec, prec, right=0)
conf = np.interp(rec_interp, rec, conf, right=0)
rec = rec_interp
for key in match_data.keys():
if (key == 'conf'):
continue
else:
tmp = cummean(np.array(match_data[key]))
match_data[key] = np.interp(conf[::(- 1)], match_data['conf'][::(- 1)], tmp[::(- 1)])[::(- 1)]
return DetectionMetricData(recall=rec, precision=prec, confidence=conf, trans_err=match_data['trans_err'], vel_err=match_data['vel_err'], scale_err=match_data['scale_err'], orient_err=match_data['orient_err'], attr_err=match_data['attr_err'])
|
def calc_ap(md: DetectionMetricData, min_recall: float, min_precision: float) -> float:
' Calculated average precision. '
assert (0 <= min_precision < 1)
assert (0 <= min_recall <= 1)
prec = np.copy(md.precision)
prec = prec[(round((100 * min_recall)) + 1):]
prec -= min_precision
prec[(prec < 0)] = 0
return (float(np.mean(prec)) / (1.0 - min_precision))
|
def calc_tp(md: DetectionMetricData, min_recall: float, metric_name: str) -> float:
' Calculates true positive errors. '
first_ind = (round((100 * min_recall)) + 1)
last_ind = md.max_recall_ind
if (last_ind < first_ind):
return 1.0
else:
return float(np.mean(getattr(md, metric_name)[first_ind:(last_ind + 1)]))
|
def config_factory(configuration_name: str) -> DetectionConfig:
'\n Creates a DetectionConfig instance that can be used to initialize a NuScenesEval instance.\n Note that this only works if the config file is located in the nuscenes/eval/detection/configs folder.\n :param configuration_name: Name of desired configuration in eval_detection_configs.\n :return: DetectionConfig instance.\n '
this_dir = os.path.dirname(os.path.abspath(__file__))
cfg_path = os.path.join(this_dir, 'configs', ('%s.json' % configuration_name))
assert os.path.exists(cfg_path), 'Requested unknown configuration {}'.format(configuration_name)
with open(cfg_path, 'r') as f:
data = json.load(f)
cfg = DetectionConfig.deserialize(data)
return cfg
|
class DetectionConfig():
' Data class that specifies the detection evaluation settings. '
def __init__(self, class_range: Dict[(str, int)], dist_fcn: str, dist_ths: List[float], dist_th_tp: float, min_recall: float, min_precision: float, max_boxes_per_sample: int, mean_ap_weight: int):
assert (set(class_range.keys()) == set(DETECTION_NAMES)), 'Class count mismatch.'
assert (dist_th_tp in dist_ths), 'dist_th_tp must be in set of dist_ths.'
self.class_range = class_range
self.dist_fcn = dist_fcn
self.dist_ths = dist_ths
self.dist_th_tp = dist_th_tp
self.min_recall = min_recall
self.min_precision = min_precision
self.max_boxes_per_sample = max_boxes_per_sample
self.mean_ap_weight = mean_ap_weight
self.class_names = self.class_range.keys()
def __eq__(self, other):
eq = True
for key in self.serialize().keys():
eq = (eq and np.array_equal(getattr(self, key), getattr(other, key)))
return eq
def serialize(self) -> dict:
' Serialize instance into json-friendly format. '
return {'class_range': self.class_range, 'dist_fcn': self.dist_fcn, 'dist_ths': self.dist_ths, 'dist_th_tp': self.dist_th_tp, 'min_recall': self.min_recall, 'min_precision': self.min_precision, 'max_boxes_per_sample': self.max_boxes_per_sample, 'mean_ap_weight': self.mean_ap_weight}
@classmethod
def deserialize(cls, content: dict):
' Initialize from serialized dictionary. '
return cls(content['class_range'], content['dist_fcn'], content['dist_ths'], content['dist_th_tp'], content['min_recall'], content['min_precision'], content['max_boxes_per_sample'], content['mean_ap_weight'])
@property
def dist_fcn_callable(self):
' Return the distance function corresponding to the dist_fcn string. '
if (self.dist_fcn == 'center_distance'):
return center_distance
else:
raise Exception(('Error: Unknown distance function %s!' % self.dist_fcn))
|
class DetectionMetricData(MetricData):
' This class holds accumulated and interpolated data required to calculate the detection metrics. '
nelem = 101
def __init__(self, recall: np.array, precision: np.array, confidence: np.array, trans_err: np.array, vel_err: np.array, scale_err: np.array, orient_err: np.array, attr_err: np.array):
assert (len(recall) == self.nelem)
assert (len(precision) == self.nelem)
assert (len(confidence) == self.nelem)
assert (len(trans_err) == self.nelem)
assert (len(vel_err) == self.nelem)
assert (len(scale_err) == self.nelem)
assert (len(orient_err) == self.nelem)
assert (len(attr_err) == self.nelem)
assert all((confidence == sorted(confidence, reverse=True)))
assert all((recall == sorted(recall)))
self.recall = recall
self.precision = precision
self.confidence = confidence
self.trans_err = trans_err
self.vel_err = vel_err
self.scale_err = scale_err
self.orient_err = orient_err
self.attr_err = attr_err
def __eq__(self, other):
eq = True
for key in self.serialize().keys():
eq = (eq and np.array_equal(getattr(self, key), getattr(other, key)))
return eq
@property
def max_recall_ind(self):
' Returns index of max recall achieved. '
non_zero = np.nonzero(self.confidence)[0]
if (len(non_zero) == 0):
max_recall_ind = 0
else:
max_recall_ind = non_zero[(- 1)]
return max_recall_ind
@property
def max_recall(self):
' Returns max recall achieved. '
return self.recall[self.max_recall_ind]
def serialize(self):
' Serialize instance into json-friendly format. '
return {'recall': self.recall.tolist(), 'precision': self.precision.tolist(), 'confidence': self.confidence.tolist(), 'trans_err': self.trans_err.tolist(), 'vel_err': self.vel_err.tolist(), 'scale_err': self.scale_err.tolist(), 'orient_err': self.orient_err.tolist(), 'attr_err': self.attr_err.tolist()}
@classmethod
def deserialize(cls, content: dict):
' Initialize from serialized content. '
return cls(recall=np.array(content['recall']), precision=np.array(content['precision']), confidence=np.array(content['confidence']), trans_err=np.array(content['trans_err']), vel_err=np.array(content['vel_err']), scale_err=np.array(content['scale_err']), orient_err=np.array(content['orient_err']), attr_err=np.array(content['attr_err']))
@classmethod
def no_predictions(cls):
' Returns a md instance corresponding to having no predictions. '
return cls(recall=np.linspace(0, 1, cls.nelem), precision=np.zeros(cls.nelem), confidence=np.zeros(cls.nelem), trans_err=np.ones(cls.nelem), vel_err=np.ones(cls.nelem), scale_err=np.ones(cls.nelem), orient_err=np.ones(cls.nelem), attr_err=np.ones(cls.nelem))
@classmethod
def random_md(cls):
' Returns an md instance corresponding to a random results. '
return cls(recall=np.linspace(0, 1, cls.nelem), precision=np.random.random(cls.nelem), confidence=np.linspace(0, 1, cls.nelem)[::(- 1)], trans_err=np.random.random(cls.nelem), vel_err=np.random.random(cls.nelem), scale_err=np.random.random(cls.nelem), orient_err=np.random.random(cls.nelem), attr_err=np.random.random(cls.nelem))
|
class DetectionMetrics():
' Stores average precision and true positive metric results. Provides properties to summarize. '
def __init__(self, cfg: DetectionConfig):
self.cfg = cfg
self._label_aps = defaultdict((lambda : defaultdict(float)))
self._label_tp_errors = defaultdict((lambda : defaultdict(float)))
self.eval_time = None
def add_label_ap(self, detection_name: str, dist_th: float, ap: float) -> None:
self._label_aps[detection_name][dist_th] = ap
def get_label_ap(self, detection_name: str, dist_th: float) -> float:
return self._label_aps[detection_name][dist_th]
def add_label_tp(self, detection_name: str, metric_name: str, tp: float):
self._label_tp_errors[detection_name][metric_name] = tp
def get_label_tp(self, detection_name: str, metric_name: str) -> float:
return self._label_tp_errors[detection_name][metric_name]
def add_runtime(self, eval_time: float) -> None:
self.eval_time = eval_time
@property
def mean_dist_aps(self) -> Dict[(str, float)]:
' Calculates the mean over distance thresholds for each label. '
return {class_name: np.mean(list(d.values())) for (class_name, d) in self._label_aps.items()}
@property
def mean_ap(self) -> float:
' Calculates the mean AP by averaging over distance thresholds and classes. '
return float(np.mean(list(self.mean_dist_aps.values())))
@property
def tp_errors(self) -> Dict[(str, float)]:
' Calculates the mean true positive error across all classes for each metric. '
errors = {}
for metric_name in TP_METRICS:
class_errors = []
for detection_name in self.cfg.class_names:
class_errors.append(self.get_label_tp(detection_name, metric_name))
errors[metric_name] = float(np.nanmean(class_errors))
return errors
@property
def tp_scores(self) -> Dict[(str, float)]:
scores = {}
tp_errors = self.tp_errors
for metric_name in TP_METRICS:
score = (1.0 - tp_errors[metric_name])
score = max(0.0, score)
scores[metric_name] = score
return scores
@property
def nd_score(self) -> float:
'\n Compute the nuScenes detection score (NDS, weighted sum of the individual scores).\n :return: The NDS.\n '
total = float(((self.cfg.mean_ap_weight * self.mean_ap) + np.sum(list(self.tp_scores.values()))))
total = (total / float((self.cfg.mean_ap_weight + len(self.tp_scores.keys()))))
return total
def serialize(self):
return {'label_aps': self._label_aps, 'mean_dist_aps': self.mean_dist_aps, 'mean_ap': self.mean_ap, 'label_tp_errors': self._label_tp_errors, 'tp_errors': self.tp_errors, 'tp_scores': self.tp_scores, 'nd_score': self.nd_score, 'eval_time': self.eval_time, 'cfg': self.cfg.serialize()}
@classmethod
def deserialize(cls, content: dict):
' Initialize from serialized dictionary. '
cfg = DetectionConfig.deserialize(content['cfg'])
metrics = cls(cfg=cfg)
metrics.add_runtime(content['eval_time'])
for (detection_name, label_aps) in content['label_aps'].items():
for (dist_th, ap) in label_aps.items():
metrics.add_label_ap(detection_name=detection_name, dist_th=float(dist_th), ap=float(ap))
for (detection_name, label_tps) in content['label_tp_errors'].items():
for (metric_name, tp) in label_tps.items():
metrics.add_label_tp(detection_name=detection_name, metric_name=metric_name, tp=float(tp))
return metrics
def __eq__(self, other):
eq = True
eq = (eq and (self._label_aps == other._label_aps))
eq = (eq and (self._label_tp_errors == other._label_tp_errors))
eq = (eq and (self.eval_time == other.eval_time))
eq = (eq and (self.cfg == other.cfg))
return eq
|
class DetectionBox(EvalBox):
' Data class used during detection evaluation. Can be a prediction or ground truth.'
def __init__(self, sample_token: str='', translation: Tuple[(float, float, float)]=(0, 0, 0), size: Tuple[(float, float, float)]=(0, 0, 0), rotation: Tuple[(float, float, float, float)]=(0, 0, 0, 0), velocity: Tuple[(float, float)]=(0, 0), ego_translation: [float, float, float]=(0, 0, 0), num_pts: int=(- 1), detection_name: str='car', detection_score: float=(- 1.0), attribute_name: str=''):
super().__init__(sample_token, translation, size, rotation, velocity, ego_translation, num_pts)
assert (detection_name is not None), 'Error: detection_name cannot be empty!'
assert (detection_name in DETECTION_NAMES), ('Error: Unknown detection_name %s' % detection_name)
assert ((attribute_name in ATTRIBUTE_NAMES) or (attribute_name == '')), ('Error: Unknown attribute_name %s' % attribute_name)
assert (type(detection_score) == float), 'Error: detection_score must be a float!'
assert (not np.any(np.isnan(detection_score))), 'Error: detection_score may not be NaN!'
self.detection_name = detection_name
self.detection_score = detection_score
self.attribute_name = attribute_name
def __eq__(self, other):
return ((self.sample_token == other.sample_token) and (self.translation == other.translation) and (self.size == other.size) and (self.rotation == other.rotation) and (self.velocity == other.velocity) and (self.ego_translation == other.ego_translation) and (self.num_pts == other.num_pts) and (self.detection_name == other.detection_name) and (self.detection_score == other.detection_score) and (self.attribute_name == other.attribute_name))
def serialize(self) -> dict:
' Serialize instance into json-friendly format. '
return {'sample_token': self.sample_token, 'translation': self.translation, 'size': self.size, 'rotation': self.rotation, 'velocity': self.velocity, 'ego_translation': self.ego_translation, 'num_pts': self.num_pts, 'detection_name': self.detection_name, 'detection_score': self.detection_score, 'attribute_name': self.attribute_name}
@classmethod
def deserialize(cls, content: dict):
' Initialize from serialized content. '
return cls(sample_token=content['sample_token'], translation=tuple(content['translation']), size=tuple(content['size']), rotation=tuple(content['rotation']), velocity=tuple(content['velocity']), ego_translation=((0.0, 0.0, 0.0) if ('ego_translation' not in content) else tuple(content['ego_translation'])), num_pts=((- 1) if ('num_pts' not in content) else int(content['num_pts'])), detection_name=content['detection_name'], detection_score=((- 1.0) if ('detection_score' not in content) else float(content['detection_score'])), attribute_name=content['attribute_name'])
|
class DetectionMetricDataList():
' This stores a set of MetricData in a dict indexed by (name, match-distance). '
def __init__(self):
self.md = {}
def __getitem__(self, key):
return self.md[key]
def __eq__(self, other):
eq = True
for key in self.md.keys():
eq = (eq and (self[key] == other[key]))
return eq
def get_class_data(self, detection_name: str) -> List[Tuple[(DetectionMetricData, float)]]:
' Get all the MetricData entries for a certain detection_name. '
return [(md, dist_th) for ((name, dist_th), md) in self.md.items() if (name == detection_name)]
def get_dist_data(self, dist_th: float) -> List[Tuple[(DetectionMetricData, str)]]:
' Get all the MetricData entries for a certain match_distance. '
return [(md, detection_name) for ((detection_name, dist), md) in self.md.items() if (dist == dist_th)]
def set(self, detection_name: str, match_distance: float, data: DetectionMetricData):
' Sets the MetricData entry for a certain detection_name and match_distance. '
self.md[(detection_name, match_distance)] = data
def serialize(self) -> dict:
return {((key[0] + ':') + str(key[1])): value.serialize() for (key, value) in self.md.items()}
@classmethod
def deserialize(cls, content: dict):
mdl = cls()
for (key, md) in content.items():
(name, distance) = key.split(':')
mdl.set(name, float(distance), DetectionMetricData.deserialize(md))
return mdl
|
class DetectionEval():
'\n This is the official nuScenes detection evaluation code.\n Results are written to the provided output_dir.\n\n nuScenes uses the following detection metrics:\n - Mean Average Precision (mAP): Uses center-distance as matching criterion; averaged over distance thresholds.\n - True Positive (TP) metrics: Average of translation, velocity, scale, orientation and attribute errors.\n - nuScenes Detection Score (NDS): The weighted sum of the above.\n\n Here is an overview of the functions in this method:\n - init: Loads GT annotations and predictions stored in JSON format and filters the boxes.\n - run: Performs evaluation and dumps the metric data to disk.\n - render: Renders various plots and dumps to disk.\n\n We assume that:\n - Every sample_token is given in the results, although there may be not predictions for that sample.\n\n Please see https://www.nuscenes.org/object-detection for more details.\n '
def __init__(self, nusc: NuScenes, config: DetectionConfig, result_path: str, eval_set: str, output_dir: str=None, verbose: bool=True):
'\n Initialize a DetectionEval object.\n :param nusc: A NuScenes object.\n :param config: A DetectionConfig object.\n :param result_path: Path of the nuScenes JSON result file.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param output_dir: Folder to save plots and results to.\n :param verbose: Whether to print to stdout.\n '
self.nusc = nusc
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.cfg = config
assert os.path.exists(result_path), 'Error: The result file does not exist!'
self.plot_dir = os.path.join(self.output_dir, 'plots')
if (not os.path.isdir(self.output_dir)):
os.makedirs(self.output_dir)
if (not os.path.isdir(self.plot_dir)):
os.makedirs(self.plot_dir)
if verbose:
print('Initializing nuScenes detection evaluation')
(self.pred_boxes, self.meta) = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, DetectionBox, verbose=verbose)
self.gt_boxes = load_gt(self.nusc, self.eval_set, DetectionBox, verbose=verbose)
assert (set(self.pred_boxes.sample_tokens) == set(self.gt_boxes.sample_tokens)), "Samples in split doesn't match samples in predictions."
self.pred_boxes = add_center_dist(nusc, self.pred_boxes)
self.gt_boxes = add_center_dist(nusc, self.gt_boxes)
if verbose:
print('Filtering predictions')
self.pred_boxes = filter_eval_boxes(nusc, self.pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth annotations')
self.gt_boxes = filter_eval_boxes(nusc, self.gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = self.gt_boxes.sample_tokens
def evaluate(self) -> Tuple[(DetectionMetrics, DetectionMetricDataList)]:
'\n Performs the actual evaluation.\n :return: A tuple of high-level and the raw metric data.\n '
start_time = time.time()
if self.verbose:
print('Accumulating metric data...')
metric_data_list = DetectionMetricDataList()
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
md = accumulate(self.gt_boxes, self.pred_boxes, class_name, self.cfg.dist_fcn_callable, dist_th)
metric_data_list.set(class_name, dist_th, md)
if self.verbose:
print('Calculating metrics...')
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
metric_data = metric_data_list[(class_name, dist_th)]
ap = calc_ap(metric_data, self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = metric_data_list[(class_name, self.cfg.dist_th_tp)]
if ((class_name in ['traffic_cone']) and (metric_name in ['attr_err', 'vel_err', 'orient_err'])):
tp = np.nan
elif ((class_name in ['barrier']) and (metric_name in ['attr_err', 'vel_err'])):
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
metrics.add_runtime((time.time() - start_time))
return (metrics, metric_data_list)
def render(self, metrics: DetectionMetrics, md_list: DetectionMetricDataList) -> None:
'\n Renders various PR and TP curves.\n :param metrics: DetectionMetrics instance.\n :param md_list: DetectionMetricDataList instance.\n '
if self.verbose:
print('Rendering PR and TP curves')
def savepath(name):
return os.path.join(self.plot_dir, (name + '.pdf'))
summary_plot(md_list, metrics, min_precision=self.cfg.min_precision, min_recall=self.cfg.min_recall, dist_th_tp=self.cfg.dist_th_tp, savepath=savepath('summary'))
for detection_name in self.cfg.class_names:
class_pr_curve(md_list, metrics, detection_name, self.cfg.min_precision, self.cfg.min_recall, savepath=savepath((detection_name + '_pr')))
class_tp_curve(md_list, metrics, detection_name, self.cfg.min_recall, self.cfg.dist_th_tp, savepath=savepath((detection_name + '_tp')))
for dist_th in self.cfg.dist_ths:
dist_pr_curve(md_list, metrics, dist_th, self.cfg.min_precision, self.cfg.min_recall, savepath=savepath(('dist_pr_' + str(dist_th))))
def main(self, plot_examples: int=0, render_curves: bool=True) -> Dict[(str, Any)]:
'\n Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.\n :param plot_examples: How many example visualizations to write to disk.\n :param render_curves: Whether to render PR and TP curves to disk.\n :return: A dict that stores the high-level metrics and meta data.\n '
if (plot_examples > 0):
random.seed(42)
sample_tokens = list(self.sample_tokens)
random.shuffle(sample_tokens)
sample_tokens = sample_tokens[:plot_examples]
example_dir = os.path.join(self.output_dir, 'examples')
if (not os.path.isdir(example_dir)):
os.mkdir(example_dir)
for sample_token in sample_tokens:
visualize_sample(self.nusc, sample_token, (self.gt_boxes if (self.eval_set != 'test') else EvalBoxes()), self.pred_boxes, eval_range=max(self.cfg.class_range.values()), savepath=os.path.join(example_dir, '{}.png'.format(sample_token)))
(metrics, metric_data_list) = self.evaluate()
if render_curves:
self.render(metrics, metric_data_list)
if self.verbose:
print(('Saving metrics to: %s' % self.output_dir))
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
print(('mAP: %.4f' % metrics_summary['mean_ap']))
err_name_mapping = {'trans_err': 'mATE', 'scale_err': 'mASE', 'orient_err': 'mAOE', 'vel_err': 'mAVE', 'attr_err': 'mAAE'}
for (tp_name, tp_val) in metrics_summary['tp_errors'].items():
print(('%s: %.4f' % (err_name_mapping[tp_name], tp_val)))
print(('NDS: %.4f' % metrics_summary['nd_score']))
print(('Eval time: %.1fs' % metrics_summary['eval_time']))
print()
print('Per-class results:')
print('Object Class\tAP\tATE\tASE\tAOE\tAVE\tAAE')
class_aps = metrics_summary['mean_dist_aps']
class_tps = metrics_summary['label_tp_errors']
for class_name in class_aps.keys():
print(('%s\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f' % (class_name, class_aps[class_name], class_tps[class_name]['trans_err'], class_tps[class_name]['scale_err'], class_tps[class_name]['orient_err'], class_tps[class_name]['vel_err'], class_tps[class_name]['attr_err'])))
return metrics_summary
|
class NuScenesEval(DetectionEval):
'\n Dummy class for backward-compatibility. Same as DetectionEval.\n '
|
class TestAlgo(unittest.TestCase):
cfg = config_factory('detection_cvpr_2019')
@staticmethod
def _mock_results(nsamples, ngt, npred, detection_name):
def random_attr():
'\n This is the most straight-forward way to generate a random attribute.\n Not currently used b/c we want the test fixture to be back-wards compatible.\n '
rel_attributes = detection_name_to_rel_attributes(detection_name)
if (len(rel_attributes) == 0):
return ''
else:
return rel_attributes[np.random.randint(0, len(rel_attributes))]
pred = EvalBoxes()
gt = EvalBoxes()
for sample_itt in range(nsamples):
this_gt = []
for box_itt in range(ngt):
translation_xy = tuple((np.random.rand(2) * 15))
this_gt.append(DetectionBox(sample_token=str(sample_itt), translation=(translation_xy[0], translation_xy[1], 0.0), size=tuple((np.random.rand(3) * 4)), rotation=tuple(np.random.rand(4)), velocity=tuple((np.random.rand(3)[:2] * 4)), detection_name=detection_name, detection_score=random.random(), attribute_name=random_attr(), ego_translation=((random.random() * 10), 0, 0)))
gt.add_boxes(str(sample_itt), this_gt)
for sample_itt in range(nsamples):
this_pred = []
for box_itt in range(npred):
translation_xy = tuple((np.random.rand(2) * 10))
this_pred.append(DetectionBox(sample_token=str(sample_itt), translation=(translation_xy[0], translation_xy[1], 0.0), size=tuple((np.random.rand(3) * 4)), rotation=tuple(np.random.rand(4)), velocity=tuple((np.random.rand(3)[:2] * 4)), detection_name=detection_name, detection_score=random.random(), attribute_name=random_attr(), ego_translation=((random.random() * 10), 0, 0)))
pred.add_boxes(str(sample_itt), this_pred)
return (gt, pred)
def test_nd_score(self):
'\n This tests runs the full evaluation for an arbitrary random set of predictions.\n '
random.seed(42)
np.random.seed(42)
mdl = DetectionMetricDataList()
for class_name in self.cfg.class_names:
(gt, pred) = self._mock_results(30, 3, 25, class_name)
for dist_th in self.cfg.dist_ths:
mdl.set(class_name, dist_th, accumulate(gt, pred, class_name, center_distance, 2))
metrics = DetectionMetrics(self.cfg)
for class_name in self.cfg.class_names:
for dist_th in self.cfg.dist_ths:
ap = calc_ap(mdl[(class_name, dist_th)], self.cfg.min_recall, self.cfg.min_precision)
metrics.add_label_ap(class_name, dist_th, ap)
for metric_name in TP_METRICS:
metric_data = mdl[(class_name, self.cfg.dist_th_tp)]
if ((class_name in ['traffic_cone']) and (metric_name in ['attr_err', 'vel_err', 'orient_err'])):
tp = np.nan
elif ((class_name in ['barrier']) and (metric_name in ['attr_err', 'vel_err'])):
tp = np.nan
else:
tp = calc_tp(metric_data, self.cfg.min_recall, metric_name)
metrics.add_label_tp(class_name, metric_name, tp)
self.assertEqual(0.08606662159639042, metrics.nd_score)
def test_calc_tp(self):
'Test for calc_tp().'
random.seed(42)
np.random.seed(42)
md = DetectionMetricData.random_md()
self.assertEqual(1.0, calc_tp(md, min_recall=1, metric_name='trans_err'))
def test_calc_ap(self):
'Test for calc_ap().'
random.seed(42)
np.random.seed(42)
md = DetectionMetricData.random_md()
self.assertRaises(AssertionError, calc_ap, md, (- 0.5), 0.4)
self.assertRaises(AssertionError, calc_ap, md, 0.5, (- 0.8))
self.assertRaises(AssertionError, calc_ap, md, 0.7, 1)
self.assertRaises(AssertionError, calc_ap, md, 1.2, 0)
|
def get_metric_data(gts: Dict[(str, List[Dict])], preds: Dict[(str, List[Dict])], detection_name: str, dist_th: float) -> DetectionMetricData:
'\n Calculate and check the AP value.\n :param gts: Ground truth data.\n :param preds: Predictions.\n :param detection_name: Name of the class we are interested in.\n :param dist_th: Distance threshold for matching.\n '
defaults = {'trans': (0, 0, 0), 'size': (1, 1, 1), 'rot': (0, 0, 0, 0), 'vel': (0, 0), 'attr': 'vehicle.parked', 'score': (- 1.0), 'name': 'car'}
gt_eval_boxes = EvalBoxes()
for (sample_token, data) in gts.items():
gt_boxes = []
for gt in data:
gt = {**defaults, **gt}
eb = DetectionBox(sample_token=sample_token, translation=gt['trans'], size=gt['size'], rotation=gt['rot'], detection_name=gt['name'], attribute_name=gt['attr'], velocity=gt['vel'])
gt_boxes.append(eb)
gt_eval_boxes.add_boxes(sample_token, gt_boxes)
pred_eval_boxes = EvalBoxes()
for (sample_token, data) in preds.items():
pred_boxes = []
for pred in data:
pred = {**defaults, **pred}
eb = DetectionBox(sample_token=sample_token, translation=pred['trans'], size=pred['size'], rotation=pred['rot'], detection_name=pred['name'], detection_score=pred['score'], velocity=pred['vel'], attribute_name=pred['attr'])
pred_boxes.append(eb)
pred_eval_boxes.add_boxes(sample_token, pred_boxes)
metric_data = accumulate(gt_eval_boxes, pred_eval_boxes, class_name=detection_name, dist_fcn=center_distance, dist_th=dist_th)
return metric_data
|
class TestAPSimple(unittest.TestCase):
' Tests the correctness of AP calculation for simple cases. '
def setUp(self):
self.car1 = {'trans': (1, 1, 1), 'name': 'car', 'score': 1.0}
self.car2 = {'trans': (3, 3, 1), 'name': 'car', 'score': 0.7}
self.bicycle1 = {'trans': (5, 5, 1), 'name': 'bicycle', 'score': 1.0}
self.bicycle2 = {'trans': (7, 7, 1), 'name': 'bicycle', 'score': 0.7}
def check_ap(self, gts: Dict[(str, List[Dict])], preds: Dict[(str, List[Dict])], target_ap: float, detection_name: str='car', dist_th: float=2.0, min_precision: float=0.1, min_recall: float=0.1) -> None:
'\n Calculate and check the AP value.\n :param gts: Ground truth data.\n :param preds: Predictions.\n :param target_ap: Expected Average Precision value.\n :param detection_name: Name of the class we are interested in.\n :param dist_th: Distance threshold for matching.\n :param min_precision: Minimum precision value.\n :param min_recall: Minimum recall value.\n '
metric_data = get_metric_data(gts, preds, detection_name, dist_th)
ap = calc_ap(metric_data, min_precision=min_precision, min_recall=min_recall)
self.assertGreaterEqual(0.01, abs((ap - target_ap)), msg='Incorrect AP')
def test_no_data(self):
' Test empty ground truth and/or predictions. '
gts = {'sample1': [self.car1]}
preds = {'sample1': [self.car1]}
empty = {'sample1': []}
self.check_ap(empty, preds, target_ap=0.0)
self.check_ap(gts, empty, target_ap=0.0)
self.check_ap(empty, empty, target_ap=0.0)
def test_one_sample(self):
' Test the single sample case. '
self.check_ap({'sample1': [self.car1]}, {'sample1': [self.car1]}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car1, self.car2]}, {'sample1': [self.car1]}, target_ap=(0.4 / 0.9), detection_name='car')
self.check_ap({'sample1': [self.car1]}, {'sample1': [self.car1, self.car2]}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car2]}, {'sample1': [self.car1, self.car2]}, target_ap=(((0.8 * 0.4) / 2) / (0.9 * 0.9)), detection_name='car')
self.check_ap({'sample1': [self.car1]}, {'sample1': [self.car1, self.bicycle1]}, target_ap=1.0, detection_name='car')
def test_two_samples(self):
' Test more than one sample case. '
self.check_ap({'sample1': [self.car1], 'sample2': [self.car2]}, {'sample1': [self.car1], 'sample2': [self.car2]}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car1], 'sample2': []}, {'sample1': [self.car1], 'sample2': []}, target_ap=1.0, detection_name='car')
self.check_ap({'sample1': [self.car1], 'sample2': [self.car2]}, {'sample1': [self.car1], 'sample2': []}, target_ap=(0.4 / 0.9), detection_name='car')
|
class TestTPSimple(unittest.TestCase):
' Tests the correctness of true positives metrics calculation for simple cases. '
def setUp(self):
self.car3 = {'trans': (3, 3, 1), 'size': (2, 4, 2), 'rot': Quaternion(axis=(0, 0, 1), angle=0), 'score': 1.0}
self.car4 = {'trans': (3, 3, 1), 'size': (2, 4, 2), 'rot': Quaternion(axis=(0, 0, 1), angle=0), 'score': 1.0}
def check_tp(self, gts: Dict[(str, List[Dict])], preds: Dict[(str, List[Dict])], target_error: float, metric_name: str, detection_name: str='car', min_recall: float=0.1):
'\n Calculate and check the AP value.\n :param gts: Ground truth data.\n :param preds: Predictions.\n :param target_error: Expected error value.\n :param metric_name: Name of the TP metric.\n :param detection_name: Name of the class we are interested in.\n :param min_recall: Minimum recall value.\n '
metric_data = get_metric_data(gts, preds, detection_name, 2.0)
tp_error = calc_tp(metric_data, min_recall=min_recall, metric_name=metric_name)
self.assertGreaterEqual(0.01, abs((tp_error - target_error)), msg='Incorrect {} value'.format(metric_name))
def test_no_positives(self):
' Tests the error if there are no matches. The expected behaviour is to return error of 1.0. '
car1 = {'trans': (1, 1, 1), 'score': 1.0}
car2 = {'trans': (3, 3, 1), 'score': 1.0}
bike1 = {'trans': (1, 1, 1), 'score': 1.0, 'name': 'bicycle', 'attr': 'cycle.with_rider'}
for metric_name in TP_METRICS:
self.check_tp({'sample1': [car1]}, {'sample1': [car2]}, target_error=1.0, metric_name=metric_name)
for metric_name in TP_METRICS:
self.check_tp({'sample1': [car1]}, {'sample1': [bike1]}, target_error=1.0, metric_name=metric_name)
def test_perfect(self):
' Tests when everything is estimated perfectly. '
car1 = {'trans': (1, 1, 1), 'score': 1.0}
car2 = {'trans': (1, 1, 1), 'score': 0.3}
for metric_name in TP_METRICS:
self.check_tp({'sample1': [car1]}, {'sample1': [car1]}, target_error=0.0, metric_name=metric_name)
self.check_tp({'sample1': [car1]}, {'sample1': [car2]}, target_error=0.0, metric_name=metric_name)
def test_one_img(self):
' Test single sample case. '
gt1 = {'trans': (1, 1, 1)}
gt2 = {'trans': (10, 10, 1), 'size': (2, 2, 2)}
gt3 = {'trans': (20, 20, 1), 'size': (2, 4, 2)}
pred1 = {'trans': (1, 1, 1), 'score': 1.0}
pred2 = {'trans': (11, 10, 1), 'size': (2, 2, 2), 'score': 0.9}
pred3 = {'trans': (100, 10, 1), 'size': (2, 2, 2), 'score': 0.8}
pred4 = {'trans': (20, 20, 1), 'size': (2, 4, 2), 'score': 0.7}
pred5 = {'trans': (21, 20, 1), 'size': (2, 4, 2), 'score': 0.7}
self.check_tp({'sample1': [gt2]}, {'sample1': [pred2]}, target_error=1, metric_name='trans_err')
target_error = ((((0 + 0) / 2) + ((0 + 0.5) / 2)) / (2 * 0.9))
self.check_tp({'sample1': [gt1, gt2]}, {'sample1': [pred1, pred2]}, target_error=target_error, metric_name='trans_err')
self.check_tp({'sample1': [gt1, gt2]}, {'sample1': [pred1, pred2, pred3]}, target_error=target_error, metric_name='trans_err')
target_error = (((((0 + 0) / 2) + ((0 + 0.5) / 2)) + ((0.5 + 0.33) / 2)) / (3 * 0.9))
self.check_tp({'sample1': [gt1, gt2, gt3]}, {'sample1': [pred1, pred2, pred4]}, target_error=target_error, metric_name='trans_err')
self.check_tp({'sample1': [gt2, gt3]}, {'sample1': [pred2, pred5]}, target_error=1.0, metric_name='trans_err')
def test_two_imgs(self):
' Test the more than one sample case. '
gt1 = {'trans': (1, 1, 1)}
gt2 = {'trans': (10, 10, 1), 'size': (2, 2, 2)}
gt3 = {'trans': (20, 20, 1), 'size': (2, 4, 2)}
pred1 = {'trans': (1, 1, 1), 'score': 1.0}
pred2 = {'trans': (11, 10, 1), 'size': (2, 2, 2), 'score': 0.9}
pred3 = {'trans': (100, 10, 1), 'size': (2, 2, 2), 'score': 0.8}
pred4 = {'trans': (21, 20, 1), 'size': (2, 4, 2), 'score': 0.7}
self.check_tp({'sample1': [gt2]}, {'sample1': [pred2]}, target_error=1, metric_name='trans_err')
target_error = ((((0 + 0) / 2) + ((0 + 0.5) / 2)) / (2 * 0.9))
self.check_tp({'sample1': [gt1], 'sample2': [gt2]}, {'sample1': [pred1], 'sample2': [pred2]}, target_error=target_error, metric_name='trans_err')
self.check_tp({'sample1': [gt1], 'sample2': [gt2], 'sample3': []}, {'sample1': [pred1], 'sample2': [pred2, pred3], 'sample3': []}, target_error=target_error, metric_name='trans_err')
self.check_tp({'sample1': [gt2, gt3], 'sample2': [gt3]}, {'sample1': [pred2], 'sample2': [pred4]}, target_error=1.0, metric_name='trans_err')
|
class TestDetectionConfig(unittest.TestCase):
def test_serialization(self):
' test that instance serialization protocol works with json encoding '
this_dir = os.path.dirname(os.path.abspath(__file__))
cfg_name = 'detection_cvpr_2019'
config_path = os.path.join(this_dir, '..', 'configs', (cfg_name + '.json'))
with open(config_path) as f:
cfg = json.load(f)
detect_cfg = DetectionConfig.deserialize(cfg)
self.assertEqual(cfg, detect_cfg.serialize())
recovered = DetectionConfig.deserialize(json.loads(json.dumps(detect_cfg.serialize())))
self.assertEqual(detect_cfg, recovered)
|
class TestDetectionBox(unittest.TestCase):
def test_serialization(self):
' Test that instance serialization protocol works with json encoding. '
box = DetectionBox()
recovered = DetectionBox.deserialize(json.loads(json.dumps(box.serialize())))
self.assertEqual(box, recovered)
|
class TestEvalBoxes(unittest.TestCase):
def test_serialization(self):
' Test that instance serialization protocol works with json encoding. '
boxes = EvalBoxes()
for i in range(10):
boxes.add_boxes(str(i), [DetectionBox(), DetectionBox(), DetectionBox()])
recovered = EvalBoxes.deserialize(json.loads(json.dumps(boxes.serialize())), DetectionBox)
self.assertEqual(boxes, recovered)
|
class TestMetricData(unittest.TestCase):
def test_serialization(self):
' Test that instance serialization protocol works with json encoding. '
md = DetectionMetricData.random_md()
recovered = DetectionMetricData.deserialize(json.loads(json.dumps(md.serialize())))
self.assertEqual(md, recovered)
|
class TestDetectionMetricDataList(unittest.TestCase):
def test_serialization(self):
' Test that instance serialization protocol works with json encoding. '
mdl = DetectionMetricDataList()
for i in range(10):
mdl.set('name', 0.1, DetectionMetricData.random_md())
recovered = DetectionMetricDataList.deserialize(json.loads(json.dumps(mdl.serialize())))
self.assertEqual(mdl, recovered)
|
class TestDetectionMetrics(unittest.TestCase):
def test_serialization(self):
' Test that instance serialization protocol works with json encoding. '
cfg = {'class_range': {'car': 1.0, 'truck': 1.0, 'bus': 1.0, 'trailer': 1.0, 'construction_vehicle': 1.0, 'pedestrian': 1.0, 'motorcycle': 1.0, 'bicycle': 1.0, 'traffic_cone': 1.0, 'barrier': 1.0}, 'dist_fcn': 'distance', 'dist_ths': [0.0, 1.0], 'dist_th_tp': 1.0, 'min_recall': 0.0, 'min_precision': 0.0, 'max_boxes_per_sample': 1, 'mean_ap_weight': 1.0}
detect_config = DetectionConfig.deserialize(cfg)
metrics = DetectionMetrics(cfg=detect_config)
for (i, name) in enumerate(cfg['class_range'].keys()):
metrics.add_label_ap(name, 1.0, float(i))
for (j, tp_name) in enumerate(TP_METRICS):
metrics.add_label_tp(name, tp_name, float(j))
serialized = json.dumps(metrics.serialize())
deserialized = DetectionMetrics.deserialize(json.loads(serialized))
self.assertEqual(metrics, deserialized)
|
class TestMain(unittest.TestCase):
res_mockup = 'nusc_eval.json'
res_eval_folder = 'tmp'
def tearDown(self):
if os.path.exists(self.res_mockup):
os.remove(self.res_mockup)
if os.path.exists(self.res_eval_folder):
shutil.rmtree(self.res_eval_folder)
@staticmethod
def _mock_submission(nusc: NuScenes, split: str) -> Dict[(str, dict)]:
'\n Creates "reasonable" submission (results and metadata) by looping through the mini-val set, adding 1 GT\n prediction per sample. Predictions will be permuted randomly along all axes.\n '
def random_class(category_name: str) -> str:
class_names = sorted(DETECTION_NAMES)
tmp = category_to_detection_name(category_name)
if ((tmp is not None) and (np.random.rand() < 0.9)):
return tmp
else:
return class_names[np.random.randint(0, (len(class_names) - 1))]
def random_attr(name: str) -> str:
'\n This is the most straight-forward way to generate a random attribute.\n Not currently used b/c we want the test fixture to be back-wards compatible.\n '
rel_attributes = detection_name_to_rel_attributes(name)
if (len(rel_attributes) == 0):
return ''
else:
return rel_attributes[np.random.randint(0, len(rel_attributes))]
mock_meta = {'use_camera': False, 'use_lidar': True, 'use_radar': False, 'use_map': False, 'use_external': False}
mock_results = {}
splits = create_splits_scenes()
val_samples = []
for sample in nusc.sample:
if (nusc.get('scene', sample['scene_token'])['name'] in splits[split]):
val_samples.append(sample)
for sample in tqdm(val_samples, leave=False):
sample_res = []
for ann_token in sample['anns']:
ann = nusc.get('sample_annotation', ann_token)
detection_name = random_class(ann['category_name'])
sample_res.append({'sample_token': sample['token'], 'translation': list((np.array(ann['translation']) + (5 * (np.random.rand(3) - 0.5)))), 'size': list(((np.array(ann['size']) * 2) * (np.random.rand(3) + 0.5))), 'rotation': list((np.array(ann['rotation']) + ((np.random.rand(4) - 0.5) * 0.1))), 'velocity': list((nusc.box_velocity(ann_token)[:2] * (np.random.rand(3)[:2] + 0.5))), 'detection_name': detection_name, 'detection_score': random.random(), 'attribute_name': random_attr(detection_name)})
mock_results[sample['token']] = sample_res
mock_submission = {'meta': mock_meta, 'results': mock_results}
return mock_submission
def test_delta(self):
'\n This tests runs the evaluation for an arbitrary random set of predictions.\n This score is then captured in this very test such that if we change the eval code,\n this test will trigger if the results changed.\n '
random.seed(42)
np.random.seed(42)
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
with open(self.res_mockup, 'w') as f:
json.dump(self._mock_submission(nusc, 'mini_val'), f, indent=2)
cfg = config_factory('detection_cvpr_2019')
nusc_eval = DetectionEval(nusc, cfg, self.res_mockup, eval_set='mini_val', output_dir=self.res_eval_folder, verbose=False)
(metrics, md_list) = nusc_eval.evaluate()
self.assertAlmostEqual(metrics.nd_score, 0.19449091580477748)
|
class TestLoader(unittest.TestCase):
def test_filter_eval_boxes(self):
'\n This tests runs the evaluation for an arbitrary random set of predictions.\n This score is then captured in this very test such that if we change the eval code,\n this test will trigger if the results changed.\n '
cfg = config_factory('detection_cvpr_2019')
max_dist = cfg.class_range
assert ('NUSCENES' in os.environ), 'Set NUSCENES env. variable to enable tests.'
nusc = NuScenes(version='v1.0-mini', dataroot=os.environ['NUSCENES'], verbose=False)
sample_token = '0af0feb5b1394b928dd13d648de898f5'
box1 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle')
eval_boxes = EvalBoxes()
eval_boxes.add_boxes(sample_token, [box1])
filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
self.assertEqual(len(filtered_boxes.boxes[sample_token]), 0)
box2 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='motorcycle')
eval_boxes = EvalBoxes()
eval_boxes.add_boxes(sample_token, [box1, box2])
filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
self.assertEqual(len(filtered_boxes.boxes[sample_token]), 0)
box3 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='car')
eval_boxes = EvalBoxes()
eval_boxes.add_boxes(sample_token, [box1, box2, box3])
filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
self.assertEqual(len(filtered_boxes.boxes[sample_token]), 1)
self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name, 'car')
box4 = DetectionBox(sample_token=sample_token, translation=(68.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle')
eval_boxes = EvalBoxes()
eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4])
filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
self.assertEqual(len(filtered_boxes.boxes[sample_token]), 2)
self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name, 'car')
self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name, 'bicycle')
self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0], 68.681)
box5 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle', ego_translation=(100.0, 0.0, 0.0))
eval_boxes = EvalBoxes()
eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4, box5])
filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
self.assertEqual(len(filtered_boxes.boxes[sample_token]), 2)
self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name, 'car')
self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name, 'bicycle')
self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0], 68.681)
box6 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle', num_pts=0)
eval_boxes = EvalBoxes()
eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4, box5, box6])
filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
self.assertEqual(len(filtered_boxes.boxes[sample_token]), 2)
self.assertEqual(filtered_boxes.boxes[sample_token][0].detection_name, 'car')
self.assertEqual(filtered_boxes.boxes[sample_token][1].detection_name, 'bicycle')
self.assertEqual(filtered_boxes.boxes[sample_token][1].translation[0], 68.681)
sample_token = 'ca9a282c9e77460f8360f564131a8af5'
box1 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle', ego_translation=(25.0, 0.0, 0.0))
box2 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='motorcycle', ego_translation=(45.0, 0.0, 0.0))
box3 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='car', ego_translation=(45.0, 0.0, 0.0))
box4 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='car', ego_translation=(55.0, 0.0, 0.0))
box5 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle', num_pts=1)
box6 = DetectionBox(sample_token=sample_token, translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle', num_pts=0)
eval_boxes = EvalBoxes()
eval_boxes.add_boxes(sample_token, [box1, box2, box3, box4, box5, box6])
filtered_boxes = filter_eval_boxes(nusc, eval_boxes, max_dist)
self.assertEqual(len(filtered_boxes.boxes[sample_token]), 3)
self.assertEqual(filtered_boxes.boxes[sample_token][0].ego_dist, 25.0)
self.assertEqual(filtered_boxes.boxes[sample_token][1].ego_dist, 45.0)
self.assertEqual(filtered_boxes.boxes[sample_token][2].num_pts, 1)
def test_get_box_class_field(self):
eval_boxes = EvalBoxes()
box1 = DetectionBox(sample_token='box1', translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='bicycle', ego_translation=(25.0, 0.0, 0.0))
box2 = DetectionBox(sample_token='box2', translation=(683.681, 1592.002, 0.809), size=(1, 1, 1), detection_name='motorcycle', ego_translation=(45.0, 0.0, 0.0))
eval_boxes.add_boxes('sample1', [])
eval_boxes.add_boxes('sample2', [box1, box2])
class_field = _get_box_class_field(eval_boxes)
self.assertEqual(class_field, 'detection_name')
|
class TestEval(unittest.TestCase):
def test_scale_iou(self):
'Test valid and invalid inputs for scale_iou().'
sa = DetectionBox(size=(4, 4, 4))
sr = DetectionBox(size=(4, 4, 4))
res = scale_iou(sa, sr)
self.assertEqual(res, 1)
sa = DetectionBox(size=(2, 2, 2))
sr = DetectionBox(size=(1, 1, 1))
res = scale_iou(sa, sr)
self.assertEqual(res, (1 / 8))
sa = DetectionBox(size=(1, 1, 1))
sr = DetectionBox(size=(2, 2, 2))
res = scale_iou(sa, sr)
self.assertEqual(res, (1 / 8))
sa = DetectionBox(size=(0.96, 0.37, 0.69))
sr = DetectionBox(size=(0.32, 0.01, 0.39))
res = scale_iou(sa, sr)
self.assertAlmostEqual(res, 0.00509204)
sa = DetectionBox(size=(0, 4, 4))
sr = DetectionBox(size=(4, 4, 4))
self.assertRaises(AssertionError, scale_iou, sa, sr)
sa = DetectionBox(size=(0, 4, 4))
sr = DetectionBox(size=(4, 0, 4))
self.assertRaises(AssertionError, scale_iou, sa, sr)
sa = DetectionBox(size=(4, 4, 4))
sr = DetectionBox(size=(4, (- 5), 4))
self.assertRaises(AssertionError, scale_iou, sa, sr)
def test_yaw_diff(self):
'Test valid and invalid inputs for yaw_diff().'
sa = DetectionBox(rotation=Quaternion(axis=(0, 0, 1), angle=(np.pi / 8)).elements)
sr = DetectionBox(rotation=Quaternion(axis=(0, 0, 1), angle=(np.pi / 8)).elements)
diff = yaw_diff(sa, sr)
self.assertAlmostEqual(diff, 0)
sa = DetectionBox(rotation=Quaternion(axis=(0, 0, 1), angle=(np.pi / 8)).elements)
sr = DetectionBox(rotation=Quaternion(axis=(0, 1, 0), angle=(np.pi / 8)).elements)
diff = yaw_diff(sa, sr)
self.assertAlmostEqual(diff, (np.pi / 8))
q0 = Quaternion(axis=(0, 0, 1), angle=0)
sa = DetectionBox(rotation=q0.elements)
for yaw_in in np.linspace((- 10), 10, 100):
q1 = Quaternion(axis=(0, 0, 1), angle=yaw_in)
sr = DetectionBox(rotation=q1.elements)
diff = yaw_diff(sa, sr)
yaw_true = (yaw_in % (2 * np.pi))
if (yaw_true > np.pi):
yaw_true = ((2 * np.pi) - yaw_true)
self.assertAlmostEqual(diff, yaw_true)
sa = DetectionBox(rotation=Quaternion(axis=(0, 0, 1), angle=(1.1 * np.pi)).elements)
sr = DetectionBox(rotation=Quaternion(axis=(0, 0, 1), angle=(0.9 * np.pi)).elements)
diff = yaw_diff(sa, sr)
self.assertAlmostEqual(diff, (0.2 * np.pi))
def test_angle_diff(self):
'Test valid and invalid inputs for angle_diff().'
def rad(x):
return ((x / 180) * np.pi)
a = 90.0
b = 0.0
period = 360
self.assertAlmostEqual(rad(90), abs(angle_diff(rad(a), rad(b), rad(period))))
a = 90.0
b = 0.0
period = 180
self.assertAlmostEqual(rad(90), abs(angle_diff(rad(a), rad(b), rad(period))))
a = 90.0
b = 0.0
period = 90
self.assertAlmostEqual(rad(0), abs(angle_diff(rad(a), rad(b), rad(period))))
a = 0.0
b = 90.0
period = 90
self.assertAlmostEqual(rad(0), abs(angle_diff(rad(a), rad(b), rad(period))))
a = 0.0
b = 180.0
period = 180
self.assertAlmostEqual(rad(0), abs(angle_diff(rad(a), rad(b), rad(period))))
a = 0.0
b = 180.0
period = 360
self.assertAlmostEqual(rad(180), abs(angle_diff(rad(a), rad(b), rad(period))))
a = 0.0
b = (180.0 + (360 * 200))
period = 360
self.assertAlmostEqual(rad(180), abs(angle_diff(rad(a), rad(b), rad(period))))
def test_center_distance(self):
'Test for center_distance().'
sa = DetectionBox(translation=(4, 4, 5))
sr = DetectionBox(translation=(4, 4, 5))
self.assertAlmostEqual(center_distance(sa, sr), 0)
sa = DetectionBox(size=(4, 4, 4))
sr = DetectionBox(size=(3, 3, 3))
self.assertAlmostEqual(center_distance(sa, sr), 0)
sa = DetectionBox(translation=(4, 4, 4))
sr = DetectionBox(translation=(3, 3, 3))
self.assertAlmostEqual(center_distance(sa, sr), np.sqrt((((3 - 4) ** 2) + ((3 - 4) ** 2))))
sa = DetectionBox(translation=((- 1), (- 1), (- 1)))
sr = DetectionBox(translation=(1, 1, 1))
self.assertAlmostEqual(center_distance(sa, sr), np.sqrt((((1 + 1) ** 2) + ((1 + 1) ** 2))))
sa = DetectionBox(translation=(4.2, 2.8, 4.2))
sr = DetectionBox(translation=((- 1.45), 3.5, 3.9))
self.assertAlmostEqual(center_distance(sa, sr), np.sqrt(((((- 1.45) - 4.2) ** 2) + ((3.5 - 2.8) ** 2))))
def test_velocity_l2(self):
'Test for velocity_l2().'
sa = DetectionBox(velocity=(4, 4))
sr = DetectionBox(velocity=(4, 4))
self.assertAlmostEqual(velocity_l2(sa, sr), 0)
sa = DetectionBox(velocity=((- 1), (- 1)))
sr = DetectionBox(velocity=(1, 1))
self.assertAlmostEqual(velocity_l2(sa, sr), np.sqrt((((1 + 1) ** 2) + ((1 + 1) ** 2))))
sa = DetectionBox(velocity=(8.2, 1.4))
sr = DetectionBox(velocity=(6.4, (- 9.4)))
self.assertAlmostEqual(velocity_l2(sa, sr), np.sqrt((((6.4 - 8.2) ** 2) + (((- 9.4) - 1.4) ** 2))))
def test_cummean(self):
'Test for cummean().'
x = np.array((np.nan, 5))
assert_array_almost_equal(cummean(x), np.array((0, 5)))
x = np.array((5, 2, np.nan))
assert_array_almost_equal(cummean(x), np.array((5, 3.5, 3.5)))
x = np.array((np.nan, 4.5, np.nan))
assert_array_almost_equal(cummean(x), np.array((0, 4.5, 4.5)))
x = np.array((np.nan, np.nan, np.nan, np.nan))
assert_array_almost_equal(cummean(x), np.array((1, 1, 1, 1)))
x = np.array([np.nan])
assert_array_almost_equal(cummean(x), np.array([1]))
x = np.array([4])
assert_array_almost_equal(cummean(x), np.array([4.0]))
x = np.array((np.nan, 3.58, 2.14, np.nan, 9, 1.48, np.nan))
assert_array_almost_equal(cummean(x), np.array((0, 3.58, 2.86, 2.86, 4.906666, 4.05, 4.05)))
def test_attr_acc(self):
'Test for attr_acc().'
sa = DetectionBox(attribute_name='vehicle.parked')
sr = DetectionBox(attribute_name='vehicle.parked')
self.assertAlmostEqual(attr_acc(sa, sr), 1.0)
sa = DetectionBox(attribute_name='vehicle.parked')
sr = DetectionBox(attribute_name='vehicle.moving')
self.assertAlmostEqual(attr_acc(sa, sr), 0.0)
sa = DetectionBox(attribute_name='')
sr = DetectionBox(attribute_name='vehicle.parked')
self.assertIs(attr_acc(sa, sr), np.nan)
|
def category_to_detection_name(category_name: str) -> Optional[str]:
'\n Default label mapping from nuScenes to nuScenes detection classes.\n Note that pedestrian does not include personal_mobility, stroller and wheelchair.\n :param category_name: Generic nuScenes class.\n :return: nuScenes detection class.\n '
detection_mapping = {'movable_object.barrier': 'barrier', 'vehicle.bicycle': 'bicycle', 'vehicle.bus.bendy': 'bus', 'vehicle.bus.rigid': 'bus', 'vehicle.car': 'car', 'vehicle.construction': 'construction_vehicle', 'vehicle.motorcycle': 'motorcycle', 'human.pedestrian.adult': 'pedestrian', 'human.pedestrian.child': 'pedestrian', 'human.pedestrian.construction_worker': 'pedestrian', 'human.pedestrian.police_officer': 'pedestrian', 'movable_object.trafficcone': 'traffic_cone', 'vehicle.trailer': 'trailer', 'vehicle.truck': 'truck'}
if (category_name in detection_mapping):
return detection_mapping[category_name]
else:
return None
|
def detection_name_to_rel_attributes(detection_name: str) -> List[str]:
'\n Returns a list of relevant attributes for a given detection class.\n :param detection_name: The detection class.\n :return: List of relevant attributes.\n '
if (detection_name in ['pedestrian']):
rel_attributes = ['pedestrian.moving', 'pedestrian.sitting_lying_down', 'pedestrian.standing']
elif (detection_name in ['bicycle', 'motorcycle']):
rel_attributes = ['cycle.with_rider', 'cycle.without_rider']
elif (detection_name in ['car', 'bus', 'construction_vehicle', 'trailer', 'truck']):
rel_attributes = ['vehicle.moving', 'vehicle.parked', 'vehicle.stopped']
elif (detection_name in ['barrier', 'traffic_cone']):
rel_attributes = []
else:
raise ValueError(('Error: %s is not a valid detection class.' % detection_name))
return rel_attributes
|
class LidarSegEval():
'\n This is the official nuScenes-lidarseg evaluation code.\n Results are written to the provided output_dir.\n\n nuScenes-lidarseg uses the following metrics:\n - Mean Intersection-over-Union (mIOU): We use the well-known IOU metric, which is defined as TP / (TP + FP + FN).\n The IOU score is calculated separately for each class, and then the mean is\n computed across classes. Note that in the challenge, index 0 is ignored in\n the calculation.\n - Frequency-weighted IOU (FWIOU): Instead of taking the mean of the IOUs across all the classes, each IOU is\n weighted by the point-level frequency of its class. Note that in the challenge,\n index 0 is ignored in the calculation. FWIOU is not used for the challenge.\n\n We assume that:\n - For each pointcloud, the prediction for every point is present in a .bin file, in the same order as that of the\n points stored in the corresponding .bin file.\n - The naming convention of the .bin files containing the predictions for a single point cloud is:\n <lidar_sample_data_token>_lidarseg.bin\n - The predictions are between 1 and 16 (inclusive); 0 is the index of the ignored class.\n\n Please see https://www.nuscenes.org/lidar-segmentation for more details.\n '
def __init__(self, nusc: NuScenes, results_folder: str, eval_set: str, verbose: bool=False):
'\n Initialize a LidarSegEval object.\n :param nusc: A NuScenes object.\n :param results_folder: Path to the folder.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param verbose: Whether to print messages during the evaluation.\n '
assert (len(nusc.lidarseg) > 0), 'Error: No ground truth annotations found in {}.'.format(nusc.version)
self.results_folder = results_folder
self.results_bin_folder = os.path.join(results_folder, 'lidarseg', eval_set)
assert os.path.exists(self.results_bin_folder), 'Error: The folder containing the .bin files ({}) does not exist.'.format(self.results_bin_folder)
self.nusc = nusc
self.results_folder = results_folder
self.eval_set = eval_set
self.verbose = verbose
self.mapper = LidarsegClassMapper(self.nusc)
self.ignore_idx = self.mapper.ignore_class['index']
self.id2name = {idx: name for (name, idx) in self.mapper.coarse_name_2_coarse_idx_mapping.items()}
self.num_classes = len(self.mapper.coarse_name_2_coarse_idx_mapping)
if self.verbose:
print('There are {} classes.'.format(self.num_classes))
self.global_cm = ConfusionMatrix(self.num_classes, self.ignore_idx)
self.sample_tokens = get_samples_in_eval_set(self.nusc, self.eval_set)
if self.verbose:
print('There are {} samples.'.format(len(self.sample_tokens)))
def evaluate(self) -> Dict:
'\n Performs the actual evaluation.\n :return: A dictionary containing the evaluated metrics.\n '
for sample_token in tqdm(self.sample_tokens, disable=(not self.verbose)):
sample = self.nusc.get('sample', sample_token)
sd_token = sample['data']['LIDAR_TOP']
lidarseg_label_filename = os.path.join(self.nusc.dataroot, self.nusc.get('lidarseg', sd_token)['filename'])
lidarseg_label = load_bin_file(lidarseg_label_filename)
lidarseg_label = self.mapper.convert_label(lidarseg_label)
lidarseg_pred_filename = os.path.join(self.results_folder, 'lidarseg', self.eval_set, (sd_token + '_lidarseg.bin'))
lidarseg_pred = load_bin_file(lidarseg_pred_filename)
self.global_cm.update(lidarseg_label, lidarseg_pred)
iou_per_class = self.global_cm.get_per_class_iou()
miou = self.global_cm.get_mean_iou()
freqweighted_iou = self.global_cm.get_freqweighted_iou()
results = {'iou_per_class': {self.id2name[i]: class_iou for (i, class_iou) in enumerate(iou_per_class)}, 'miou': miou, 'freq_weighted_iou': freqweighted_iou}
if self.verbose:
print('======\nnuScenes-lidarseg evaluation for {}'.format(self.eval_set))
print(json.dumps(results, indent=4, sort_keys=False))
print('======')
return results
|
def validate_submission(nusc: NuScenes, results_folder: str, eval_set: str, verbose: bool=False, zip_out: str=None) -> None:
'\n Checks if a results folder is valid. The following checks are performed:\n - Check that the submission folder is according to that described in\n https://github.com/nutonomy/nuscenes-devkit/blob/master/python-sdk/nuscenes/eval/lidarseg/README.md\n - Check that the submission.json is of the following structure:\n {"meta": {"use_camera": false,\n "use_lidar": true,\n "use_radar": false,\n "use_map": false,\n "use_external": false}}\n - Check that each each lidar sample data in the evaluation set is present and valid.\n\n :param nusc: A NuScenes object.\n :param results_folder: Path to the folder.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param verbose: Whether to print messages during the evaluation.\n :param zip_out: Path to zip results_folder to, if provided.\n '
mapper = LidarsegClassMapper(nusc)
num_classes = len(mapper.coarse_name_2_coarse_idx_mapping)
if verbose:
print('Checking if folder structure of {} is correct...'.format(results_folder))
results_meta_folder = os.path.join(results_folder, eval_set)
assert os.path.exists(results_meta_folder), 'Error: The folder containing the submission.json ({}) does not exist.'.format(results_meta_folder)
submisson_json_path = os.path.join(results_meta_folder, 'submission.json')
assert os.path.exists(submisson_json_path), 'Error: submission.json ({}) does not exist.'.format(submisson_json_path)
results_bin_folder = os.path.join(results_folder, 'lidarseg', eval_set)
assert os.path.exists(results_bin_folder), 'Error: The folder containing the .bin files ({}) does not exist.'.format(results_bin_folder)
if verbose:
print('\tPassed.')
if verbose:
print('Checking contents of {}...'.format(submisson_json_path))
with open(submisson_json_path) as f:
submission_meta = json.load(f)
valid_meta = {'use_camera', 'use_lidar', 'use_radar', 'use_map', 'use_external'}
assert (valid_meta == set(submission_meta['meta'].keys())), '{} must contain {}.'.format(submisson_json_path, valid_meta)
for meta_key in valid_meta:
meta_key_type = type(submission_meta['meta'][meta_key])
assert (meta_key_type == bool), 'Error: Value for {} should be bool, not {}.'.format(meta_key, meta_key_type)
if verbose:
print('\tPassed.')
if verbose:
print('Checking if all .bin files for {} exist and are valid...'.format(eval_set))
sample_tokens = get_samples_in_eval_set(nusc, eval_set)
for sample_token in tqdm(sample_tokens, disable=(not verbose)):
sample = nusc.get('sample', sample_token)
sd_token = sample['data']['LIDAR_TOP']
lidarseg_pred_filename = os.path.join(results_bin_folder, (sd_token + '_lidarseg.bin'))
assert os.path.exists(lidarseg_pred_filename), 'Error: The prediction .bin file {} does not exist.'.format(lidarseg_pred_filename)
lidarseg_pred = np.fromfile(lidarseg_pred_filename, dtype=np.uint8)
if (len(nusc.lidarseg) > 0):
lidarseg_label_filename = os.path.join(nusc.dataroot, nusc.get('lidarseg', sd_token)['filename'])
assert os.path.exists(lidarseg_label_filename), 'Error: The ground truth .bin file {} does not exist.'.format(lidarseg_label_filename)
lidarseg_label = np.fromfile(lidarseg_label_filename, dtype=np.uint8)
num_points = len(lidarseg_label)
else:
pointsensor = nusc.get('sample_data', sd_token)
pcl_path = os.path.join(nusc.dataroot, pointsensor['filename'])
pc = LidarPointCloud.from_file(pcl_path)
points = pc.points
num_points = points.shape[1]
assert (num_points == len(lidarseg_pred)), 'Error: There are {} predictions for lidar sample data token {} but there are only {} points in the point cloud.'.format(len(lidarseg_pred), sd_token, num_points)
assert all(((lidarseg_pred > 0) & (lidarseg_pred < num_classes))), 'Error: Array for predictions in {} must be between 1 and {} (inclusive).'.format(lidarseg_pred_filename, (num_classes - 1))
if verbose:
print('\tPassed.')
if verbose:
print('Results folder {} successfully validated!'.format(results_folder))
if zip_out:
assert os.path.exists(zip_out), 'Error: The folder {} to zip the results to does not exist.'.format(zip_out)
results_zip = os.path.join(zip_out, os.path.basename(os.path.normpath(results_folder)))
results_zip_name = shutil.make_archive(results_zip, 'zip', results_folder)
if verbose:
print('Results folder {} zipped to {}'.format(results_folder, results_zip_name))
|
def prepare_files(method_names: List[str], root_dir: str) -> None:
'\n Prepare the files containing the predictions of the various method names.\n :param method_names: A list of method names.\n :param root_dir: The directory where the predictions of the various methods are stored at.\n '
for method_name in method_names:
zip_path_to_predictions_by_method = os.path.join(root_dir, (method_name + '.zip'))
dir_path_to_predictions_by_method = os.path.join(root_dir, method_name)
assert os.path.exists(zip_path_to_predictions_by_method), 'Error: Zip file for method {} does not exist at {}.'.format(method_name, zip_path_to_predictions_by_method)
zip_ref = zipfile.ZipFile(zip_path_to_predictions_by_method, 'r')
zip_ref.extractall(dir_path_to_predictions_by_method)
zip_ref.close()
|
def get_prediction_json_path(prediction_dir: str) -> str:
'\n Get the name of the json file in a directory (abort if there is more than one).\n :param prediction_dir: Path to the directory to check for the json file.\n :return: Absolute path to the json file.\n '
files_in_dir = os.listdir(prediction_dir)
files_in_dir = [f for f in files_in_dir if f.endswith('.json')]
assert (len(files_in_dir) == 1), 'Error: The submission .zip file must contain exactly one .json file.'
prediction_json_path = os.path.join(prediction_dir, files_in_dir[0])
assert os.path.exists(prediction_json_path), 'Error: JSON result file {} does not exist!'.format(prediction_json_path)
return prediction_json_path
|
def panop_baselines_from_lidarseg_detect_track(out_dir: str, lidarseg_preds_dir: str, lidarseg_method_names: List[str], det_or_track_preds_dir: str, det_or_track_method_names: List[str], task: str='tracking', version: str='v1.0-test', dataroot: str='/data/sets/nuscenes', n_jobs: int=(- 1), verbose: bool=False) -> None:
'\n Create baselines for a given panoptic task by merging the predictions of lidarseg and either tracking or detection\n methods.\n :param out_dir: Path to save any output to.\n :param lidarseg_preds_dir: Path to the directory where the lidarseg predictions are stored.\n :param lidarseg_method_names: List of lidarseg method names.\n :param det_or_track_preds_dir: Path to the directory which contains the predictions from some methods to merge with\n those of lidarseg to create panoptic predictions of a particular task.\n :param det_or_track_method_names: List of tracking (or detection) method names to merge with lidarseg to create\n panoptic predictions.\n :param task: The task to create the panoptic predictions for and run evaluation on (either tracking or\n segmentation).\n :param version: Version of nuScenes to use (e.g. "v1.0", ...).\n :param dataroot: Path to the tables and data for the specified version of nuScenes.\n :param n_jobs: The maximum number of concurrently running jobs. If -1, all CPUs are used..\n :param verbose: Whether to print messages to stdout.\n '
prepare_files(lidarseg_method_names, lidarseg_preds_dir)
prepare_files(det_or_track_method_names, det_or_track_preds_dir)
baselines = list(itertools.product(lidarseg_method_names, det_or_track_method_names))
print('There are {} baselines: {}'.format(len(baselines), baselines))
print('Generating and evaluating {} panoptic {} baselines...'.format(len(baselines), task))
start_time = time.time()
with joblib.Parallel(n_jobs=n_jobs) as parallel:
parallel([joblib.delayed(generate_and_evaluate_baseline)(out_dir, lidarseg_preds_dir, lidarseg_method_name, det_or_track_preds_dir, det_or_track_method_name, task, version, dataroot, verbose) for (lidarseg_method_name, det_or_track_method_name) in baselines])
print('Generated and evaluated {} panoptic {} baselines in {} seconds.'.format(len(baselines), task, (time.time() - start_time)))
|
def generate_and_evaluate_baseline(out_dir: str, lidarseg_preds_dir: str, lidarseg_method_name: str, det_or_track_preds_dir: str, det_or_track_method_name: str, task: str='tracking', version: str='v1.0-test', dataroot: str='/data/sets/nuscenes', verbose: bool=False) -> None:
'\n Generate panoptic predictions by merging a lidarseg method and a tracking (or detection) method, and evaluate the\n panoptic predictions.\n :param out_dir: Path to save any output to.\n :param lidarseg_preds_dir: Path to the directory where the lidarseg predictions are stored.\n :param lidarseg_method_name: A lidarseg method name.\n :param det_or_track_preds_dir: Path to the directory which contains the predictions from some methods to merge with\n those of lidarseg to create panoptic predictions of a particular task.\n :param det_or_track_method_name: A tracking (or detection) method name to merge with lidarseg to create\n panoptic predictions.\n :param task: The task to create the panoptic predictions for and run evaluation on (either tracking or\n segmentation).\n :param version: Version of nuScenes to use (e.g. "v1.0", ...).\n :param dataroot: Path to the tables and data for the specified version of nuScenes.\n :param verbose: Whether to print messages to stdout.\n '
nusc = NuScenes(version=version, dataroot=dataroot, verbose=verbose)
eval_set = nusc.version.split('-')[(- 1)]
dir_to_save_panoptic_preds_to = os.path.join(out_dir, task, 'panoptic_predictions', '{}_with_{}'.format(lidarseg_method_name, det_or_track_method_name))
os.makedirs(dir_to_save_panoptic_preds_to, exist_ok=True)
dir_of_lidarseg_method_preds = os.path.join(lidarseg_preds_dir, lidarseg_method_name)
json_of_preds_by_det_or_track_method = get_prediction_json_path(os.path.join(det_or_track_preds_dir, det_or_track_method_name))
generate_panoptic_labels(nusc, dir_of_lidarseg_method_preds, json_of_preds_by_det_or_track_method, eval_set=eval_set, task=task, out_dir=dir_to_save_panoptic_preds_to)
dir_to_save_evaluation_results_to = os.path.join(out_dir, task, 'panoptic_eval_results', '{}_with_{}'.format(lidarseg_method_name, det_or_track_method_name))
os.makedirs(dir_to_save_evaluation_results_to, exist_ok=True)
dir_of_panoptic_preds = dir_to_save_panoptic_preds_to
evaluator = NuScenesPanopticEval(nusc=nusc, results_folder=dir_of_panoptic_preds, eval_set=eval_set, task=task, min_inst_points=15, out_dir=dir_to_save_evaluation_results_to, verbose=verbose)
evaluator.evaluate()
print('Evaluation for panoptic {} using predictions merged from {} and {} saved at {}.'.format(task, lidarseg_method_name, det_or_track_method_name, dir_to_save_evaluation_results_to))
|
class NuScenesPanopticEval():
'\n This is the official Panoptic nuScenes evaluation code. Results are written to the provided output_dir.\n Panoptic nuScenes uses the following metrics:\n - Panoptic Segmentation: we use the PQ (Panoptic Quality) metric: which is defined as:\n PQ = IOU/(TP + 0.5*FP + 0.5*FN).\n - Multi-object Panoptic Tracking: we use the PAT (Panoptic Tracking) metric, which is defined as:\n PAT = 2*PQ*TQ / (PQ + TQ) where TQ is as defined in the paper: \n Panoptic nuScenes: A Large-Scale Benchmark for LiDAR Panoptic Segmentation and Tracking \n (https://arxiv.org/pdf/2109.03805.pdf)\n '
def __init__(self, nusc: NuScenes, results_folder: str, eval_set: str, task: str, min_inst_points: int, out_dir: str=None, verbose: bool=False):
"\n :param nusc: A NuScenes object.\n :param results_folder: Path to the folder.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param task: What panoptic task to evaluate on, 'segmentation' for panoptic segmentation evaluation only;\n and 'tracking' for both panoptic segmentation and multi-object panoptic tracking evaluation.\n :param min_inst_points: minimal number of instance points.\n :param out_dir: Output directory. The results is saved as 'out_dir/{task}-result.json' file;\n :param verbose: Whether to print messages during the evaluation.\n "
assert (hasattr(nusc, 'panoptic') and (len(getattr(nusc, 'panoptic')) > 0)), f'Error: no panoptic ground truths found in {nusc.version}'
supported_tasks = ['segmentation', 'tracking']
if (task not in supported_tasks):
raise ValueError(f'Supported task must be one of: {supported_tasks}, got: {task} !')
results_npz_folder = os.path.join(results_folder, 'panoptic', eval_set)
assert os.path.exists(results_npz_folder), f'Error: The folder containing the .npz files ({results_npz_folder}) does not exist.'
self.nusc = nusc
self.results_folder = results_folder
self.eval_set = eval_set
self.task = task
self.verbose = verbose
self.min_inst_points = min_inst_points
self.out_dir = out_dir
self.mapper = PanopticClassMapper(self.nusc)
self.ignore_idx = self.mapper.ignore_class['index']
self.id2name = {idx: name for (name, idx) in self.mapper.coarse_name_2_coarse_idx_mapping.items()}
self.num_classes = len(self.mapper.coarse_name_2_coarse_idx_mapping)
self.things = self.mapper.things.keys()
self.stuff = self.mapper.stuff.keys()
self.sample_tokens = get_samples_in_panoptic_eval_set(self.nusc, self.eval_set)
if self.verbose:
print(f'There are {self.num_classes} classes, {len(self.sample_tokens)} samples.')
self.evaluator = dict(segmentation=PanopticEval(n_classes=self.num_classes, ignore=[self.ignore_idx], min_points=self.min_inst_points))
if (self.task == 'tracking'):
self.scene_name2tok = {rec['name']: rec['token'] for rec in nusc.scene}
self.evaluator['tracking'] = PanopticTrackingEval(n_classes=self.num_classes, min_stuff_cls_id=(len(self.things) + 1), ignore=[self.ignore_idx], min_points=self.min_inst_points)
self.eval_result_file = os.path.join(self.out_dir, (self.task + '-result.json'))
if os.path.isfile(self.eval_result_file):
os.remove(self.eval_result_file)
def evaluate(self) -> None:
'\n Evaluate metrics for task. For segmentation task, only panoptic segmentation metrics will be evaluated. For\n tracking task, besides the multi-object panoptic tracking metrics, single frame based panoptic segmentation\n metrics will be evaluated as well.\n '
eval_results = {'segmentation': self.evaluate_segmentation()}
if (self.task == 'tracking'):
eval_results['tracking'] = self.evaluate_tracking()
self.save_result(eval_results)
def evaluate_segmentation(self) -> Dict[(str, Any)]:
'\n Calculate panoptic segmentation metrics.\n :return: A dict of panoptic metrics for mean of all classes and each class.\n {\n "all": { "PQ": float, "SQ": float, "RQ": float, "mIoU": float, "PQ_dagger": float},\n "ignore": { "PQ": float, "SQ": float, "RQ": float, "IoU": float},\n "car": { "PQ": float, "SQ": float, "RQ": float, "IoU": float},\n ...\n }\n '
for sample_token in tqdm(self.sample_tokens, disable=(not self.verbose)):
sample = self.nusc.get('sample', sample_token)
sd_token = sample['data']['LIDAR_TOP']
panoptic_label_filename = os.path.join(self.nusc.dataroot, self.nusc.get('panoptic', sd_token)['filename'])
panoptic_label = load_bin_file(panoptic_label_filename, type='panoptic')
label_sem = self.mapper.convert_label((panoptic_label // 1000))
label_inst = panoptic_label
panoptic_pred_filename = os.path.join(self.results_folder, 'panoptic', self.eval_set, (sd_token + '_panoptic.npz'))
panoptic_pred = load_bin_file(panoptic_pred_filename, type='panoptic')
pred_sem = (panoptic_pred // 1000)
pred_inst = panoptic_pred
self.evaluator['segmentation'].addBatch(pred_sem, pred_inst, label_sem, label_inst)
(mean_pq, mean_sq, mean_rq, class_all_pq, class_all_sq, class_all_rq) = self.evaluator['segmentation'].getPQ()
(mean_iou, class_all_iou) = self.evaluator['segmentation'].getSemIoU()
results = self.wrap_result_segmentation(mean_pq, mean_sq, mean_rq, mean_iou, class_all_pq, class_all_sq, class_all_rq, class_all_iou)
return results
def wrap_result_segmentation(self, mean_pq: np.ndarray, mean_sq: np.ndarray, mean_rq: np.ndarray, mean_iou: np.ndarray, class_all_pq: np.ndarray, class_all_sq: np.ndarray, class_all_rq: np.ndarray, class_all_iou: np.ndarray) -> Dict[(str, Any)]:
'\n Wrap panoptic segmentation results to dict format.\n :param mean_pq: <float64: 1>, Mean Panoptic Quality over all classes.\n :param mean_sq: <float64: 1>, Mean Segmentation Quality over all classes.\n :param mean_rq: <float64: 1>, Mean Recognition Quality over all classes.\n :param mean_iou: <float64: 1>, Mean IoU score over all classes.\n :param class_all_pq: <float64: num_classes,>, Panoptic Quality for each class.\n :param class_all_sq: <float64: num_classes,> Segmentation Quality for each class.\n :param class_all_rq: <float64: num_classes,>, Recognition Quality for each class.\n :param class_all_iou: <float64: num_classes,>, IoU scores for each class.\n :return: A dict of panoptic segmentation metrics.\n '
(mean_pq, mean_sq, mean_rq, mean_iou) = (mean_pq.item(), mean_sq.item(), mean_rq.item(), mean_iou.item())
class_all_pq = class_all_pq.flatten().tolist()
class_all_sq = class_all_sq.flatten().tolist()
class_all_rq = class_all_rq.flatten().tolist()
class_all_iou = class_all_iou.flatten().tolist()
results = dict()
results['all'] = dict(PQ=mean_pq, SQ=mean_sq, RQ=mean_rq, mIoU=mean_iou)
for (idx, (pq, rq, sq, iou)) in enumerate(zip(class_all_pq, class_all_rq, class_all_sq, class_all_iou)):
results[self.id2name[idx]] = dict(PQ=pq, SQ=sq, RQ=rq, IoU=iou)
thing_pq_list = [float(results[c]['PQ']) for c in self.things]
stuff_iou_list = [float(results[c]['IoU']) for c in self.stuff]
results['all']['PQ_dagger'] = np.mean((thing_pq_list + stuff_iou_list))
return results
def evaluate_tracking(self) -> Dict[(str, Any)]:
'\n Calculate multi-object panoptic tracking metrics.\n :return: A dict of panoptic metrics for mean of all classes and each class.\n {\n "all": { "PAT": float, "PQ": float, "TQ": float, PTQ": float, "sPTQ": float, "LSTQ": float,\n "mIoU": float, "S_assoc": float, "PTQ_dagger": float, "MOTSA": float, "sMOTSA": float,\n "MOTSP": float},\n "ignore": { "PTQ": float, "sPTQ": float, "IoU": float},\n "car": { "PTQ": float, "sPTQ": float, "IoU": float},\n ...\n }\n '
eval_scenes = create_splits_scenes(verbose=False)[self.eval_set]
for scene in tqdm(eval_scenes, disable=(not self.verbose)):
scene = self.nusc.get('scene', self.scene_name2tok[scene])
(cur_token, last_token) = (scene['first_sample_token'], scene['last_sample_token'])
(pred_sem, pred_inst, label_sem, label_inst) = ([None], [None], [None], [None])
while True:
cur_sample = self.nusc.get('sample', cur_token)
sd_token = cur_sample['data']['LIDAR_TOP']
gt_label_file = os.path.join(self.nusc.dataroot, self.nusc.get('panoptic', sd_token)['filename'])
panoptic_label = load_bin_file(gt_label_file, type='panoptic')
label_sem.append(self.mapper.convert_label((panoptic_label // 1000)))
label_sem = label_sem[(- 2):]
label_inst.append(panoptic_label)
label_inst = label_inst[(- 2):]
pred_file = os.path.join(self.results_folder, 'panoptic', self.eval_set, (sd_token + '_panoptic.npz'))
panoptic_pred = load_bin_file(pred_file, type='panoptic')
pred_sem.append((panoptic_pred // 1000))
pred_sem = pred_sem[(- 2):]
pred_inst.append(panoptic_pred)
pred_inst = pred_inst[(- 2):]
self.evaluator['tracking'].add_batch(scene['name'], pred_sem, pred_inst, label_sem, label_inst)
if (cur_token == last_token):
break
cur_token = cur_sample['next']
(pat, mean_pq, mean_tq) = self.evaluator['tracking'].get_pat()
(mean_ptq, class_all_ptq, mean_sptq, class_all_sptq) = self.evaluator['tracking'].get_ptq()
(mean_iou, class_all_iou) = self.evaluator['tracking'].getSemIoU()
(lstq, s_assoc) = self.evaluator['tracking'].get_lstq()
(mean_motsa, mean_s_motsa, mean_motsp) = self.evaluator['tracking'].get_motsa()
results = self.wrap_result_mopt(pat=pat, mean_pq=mean_pq, mean_tq=mean_tq, mean_ptq=mean_ptq, class_all_ptq=class_all_ptq, mean_sptq=mean_sptq, class_all_sptq=class_all_sptq, mean_iou=mean_iou, class_all_iou=class_all_iou, lstq=lstq, s_assoc=s_assoc, mean_motsa=mean_motsa, mean_s_motsa=mean_s_motsa, mean_motsp=mean_motsp)
return results
def wrap_result_mopt(self, pat: np.ndarray, mean_pq: np.ndarray, mean_tq: np.ndarray, mean_ptq: np.ndarray, class_all_ptq: np.ndarray, mean_sptq: np.ndarray, class_all_sptq: np.ndarray, mean_iou: np.ndarray, class_all_iou: np.ndarray, lstq: np.ndarray, s_assoc: np.ndarray, mean_motsa: np.ndarray, mean_s_motsa: np.ndarray, mean_motsp: np.ndarray) -> Dict[(str, Any)]:
'\n Wrap up MOPT results to dictionary.\n :param pat: <float64: 1>, Panoptic Tracking (PAT) score over all classes.\n :param mean_pq: <float64: 1>, Mean Panoptic Quality over all classes.\n :param mean_tq: <float64: 1>, Mean Tracking Quality over all temporally unique instances.\n :param mean_ptq: <float64: 1>, Mean PTQ score over all classes.\n :param mean_sptq: <float64: 1>, Mean soft-PTQ score over all classes.\n :param mean_iou: <float64: 1>, Mean IoU score over all classes.\n :param class_all_ptq: <float64: num_classes,>, PTQ scores for each class.\n :param class_all_sptq: <float64: num_classes,>, Soft-PTQ scores for each class.\n :param class_all_iou: <float64: num_classes,>, IoU scores for each class.\n :param lstq: <float64: 1>, LiDAR Segmentation and Tracking Quality (LSTQ) score over all classes.\n :param s_assoc: <float64: 1>, Association Score over all classes.\n :param mean_motsa: <float64: 1>, Mean MOTSA score over all thing classes.\n :param mean_s_motsa: <float64: 1>, Mean sMOTSA score over all thing classes.\n :param mean_motsp: <float64: 1>, Mean MOTSP score over all thing classes.\n :return: A dict of multi-object panoptic tracking metrics.\n '
(pat, mean_pq, mean_tq) = (pat.item(), mean_pq.item(), mean_tq.item())
(mean_ptq, mean_sptq, mean_iou) = (mean_ptq.item(), mean_sptq.item(), mean_iou.item())
class_all_ptq = class_all_ptq.flatten().tolist()
class_all_sptq = class_all_sptq.flatten().tolist()
class_all_iou = class_all_iou.flatten().tolist()
results = dict()
results['all'] = dict(PAT=pat, PQ=mean_pq, TQ=mean_tq, PTQ=mean_ptq, sPTQ=mean_sptq, LSTQ=lstq, mIoU=mean_iou, S_assoc=s_assoc, MOTSA=mean_motsa, sMOTSA=mean_s_motsa, MOTSP=mean_motsp)
for (idx, (ptq, sptq, iou)) in enumerate(zip(class_all_ptq, class_all_sptq, class_all_iou)):
results[self.id2name[idx]] = dict(PTQ=ptq, sPTQ=sptq, IoU=iou)
thing_ptq_list = [float(results[c]['PTQ']) for c in self.things]
stuff_iou_list = [float(results[c]['IoU']) for c in self.stuff]
results['all']['PTQ_dagger'] = np.mean((thing_ptq_list + stuff_iou_list))
return results
def save_result(self, results: Dict[(str, Dict[(str, Any)])]) -> None:
'\n Dump evaluation results to result.json\n :param results: {task_name: task_results}, evaluation results in a dictionary.\n '
if self.out_dir:
os.makedirs(self.out_dir, exist_ok=True)
with open(self.eval_result_file, 'w') as f:
json.dump(results, f, indent=2)
else:
raise ValueError(f'Invalid output dir: {self.out_dir}')
if self.verbose:
print(f'''======
Panoptic nuScenes {self.task} evaluation for {self.eval_set}''')
print(json.dumps(results, indent=4, sort_keys=False))
print('======')
|
def main():
parser = argparse.ArgumentParser(description='Evaluate Panoptic nuScenes results.')
parser.add_argument('--result_path', type=str, help='The path to the results folder.')
parser.add_argument('--eval_set', type=str, default='val', help='Which dataset split to evaluate on, train, val or test.')
parser.add_argument('--task', type=str, default='segmentation', help='What task to evaluate, segmentation or tracking.')
parser.add_argument('--dataroot', type=str, default='/data/sets/nuscenes', help='Default nuScenes data directory.')
parser.add_argument('--version', type=str, default='v1.0-trainval', help='Which version of the nuScenes dataset to evaluate on, e.g. v1.0-trainval.')
parser.add_argument('--min_inst_points', type=int, default=15, help='Lower bound for the number of points to be considered instance.')
parser.add_argument('--verbose', type=bool, default=False, help='Whether to print to stdout.')
parser.add_argument('--out_dir', type=str, default=None, help='Folder to write the panoptic labels to.')
args = parser.parse_args()
out_dir = (args.out_dir if (args.out_dir is not None) else f'Panoptic-nuScenes-{args.version}')
task = args.task
submission_file = os.path.join(args.result_path, args.eval_set, 'submission.json')
if os.path.exists(submission_file):
print(submission_file)
with open(submission_file, 'r') as f:
data = json.load(f)
if (('meta' in data) and ('task' in data['meta'])):
task = data['meta']['task']
supported_tasks = ['segmentation', 'tracking']
if (task not in supported_tasks):
raise ValueError(f'Supported task must be one of: {supported_tasks}, got: {task} !')
print(f'''Start {task} evaluation...
Arguments: {args}''')
nusc = NuScenes(version=args.version, dataroot=args.dataroot, verbose=args.verbose)
evaluator = NuScenesPanopticEval(nusc=nusc, results_folder=args.result_path, eval_set=args.eval_set, task=task, min_inst_points=args.min_inst_points, out_dir=out_dir, verbose=args.verbose)
evaluator.evaluate()
print(f'''Evaluation results saved at {args.out_dir}/{task}-result.json.
Finished {task} evaluation.''')
|
class PanopticTrackingEval(PanopticEval):
' Panoptic tracking evaluator'
def __init__(self, n_classes: int, min_stuff_cls_id: int, ignore: List[int]=None, offset: int=(2 ** 32), min_points: int=30, iou_thr: float=0.5):
'\n :param n_classes: Number of classes.\n :param min_stuff_cls_id: Minimum stuff class index, 11 for Panoptic nuScenes challenge classes.\n :param ignore: List of ignored class index.\n :param offset: Largest instance number in a frame.\n :param min_points: minimal number of points to consider instances in GT.\n :param iou_thr: IoU threshold to consider as a true positive. Note "iou_thr > 0.5" is required for Panoptic\n Quality metric and its variants.\n '
super().__init__(n_classes=n_classes, ignore=ignore, offset=offset, min_points=min_points)
self.iou_thr = iou_thr
assert (self.iou_thr >= 0.5), f'IoU threshold mush be >= 0.5, but {self.iou_thr} is given.'
self.min_stuff_cls_id = min_stuff_cls_id
self.px_iou_conf_matrix = np.zeros((self.n_classes, self.n_classes), dtype=np.int64)
self.pan_ids = np.zeros(self.n_classes, dtype=np.int64)
self.pan_soft_ids = np.zeros(self.n_classes, dtype=np.double)
self.pan_tp = np.zeros(self.n_classes, dtype=np.int64)
self.pan_iou = np.zeros(self.n_classes, dtype=np.double)
self.pan_fp = np.zeros(self.n_classes, dtype=np.int64)
self.pan_fn = np.zeros(self.n_classes, dtype=np.int64)
self.sequences = []
self.preds = {}
self.gts = {}
self.intersects = {}
self.intersects_ovr = {}
self.instance_preds = {}
self.instance_gts = {}
self.pan_aq = np.zeros(self.n_classes, dtype=np.double)
self.pan_aq_ovr = 0.0
@staticmethod
def update_dict_stat(stat_dict: Dict[(int, int)], unique_ids: np.ndarray, unique_cnts: np.ndarray) -> None:
'\n Update stats dict with new combo of ids and counts.\n :param stat_dict: {class_id: counts}, a dict of stats for the counts of each class.\n :param unique_ids: <np.int64, <k,>>, an array of class IDs.\n :param unique_cnts: <np.int64, <k,>>, an array of counts for corresponding class IDs.\n '
for (uniqueid, counts) in zip(unique_ids, unique_cnts):
if (uniqueid in stat_dict):
stat_dict[uniqueid] += counts
else:
stat_dict[uniqueid] = counts
def get_panoptic_track_stats(self, x_inst_in_cl: np.ndarray, y_inst_in_cl: np.ndarray, x_inst_row: np.ndarray=None, scene: str=None, cl: int=None) -> Tuple[(np.ndarray, np.ndarray, np.ndarray, np.ndarray, Dict[(int, int)], Dict[(int, int)], np.ndarray)]:
'\n Calculate class-specific panoptic tracking stats given predicted instances and target instances.\n :param x_inst_in_cl: <np.int64: num_points>, instance IDs of each point for predicted instances.\n :param y_inst_in_cl: <np.int64: num_points>, instance IDs of each point for target instances.\n :param x_inst_row: <np.int64: num_points>, class-agnostic instance IDs of each point for predicted instances.\n :param scene: str, name of scene.\n :param cl: int, semantic class id.\n :return: A tuple of MOPT stats:\n {\n counts_pred, # <np.int64, num_instances>, point counts of each predicted instance.\n counts_gt, # <np.int64, num_instances>, point counts of each ground truth instance.\n gt_labels, # <np.int64, num_instances>, instance ID of each ground truth instance.\n pred_labels, # <np.int64, num_instances>, instance ID of each predicted instance.\n id2idx_gt, # {instance ID: array index}, instance ID to array index mapping for ground truth instances.\n id2idx_pred, # {instance ID: array index}, instance ID to array index mapping for predicted instances.\n ious, # <np.float32, num_instances>, IoU scores between prediction and ground truth instance pair.\n }\n '
(unique_pred, counts_pred) = np.unique(x_inst_in_cl[(x_inst_in_cl > 0)], return_counts=True)
id2idx_pred = {inst_id: idx for (idx, inst_id) in enumerate(unique_pred)}
(unique_gt, counts_gt) = np.unique(y_inst_in_cl[(y_inst_in_cl > 0)], return_counts=True)
id2idx_gt = {inst_id: idx for (idx, inst_id) in enumerate(unique_gt)}
valid_combos = np.logical_and((x_inst_in_cl > 0), (y_inst_in_cl > 0))
offset_combo = (x_inst_in_cl[valid_combos] + (self.offset * y_inst_in_cl[valid_combos]))
(unique_combo, counts_combo) = np.unique(offset_combo, return_counts=True)
if ((scene is not None) and (cl < self.min_stuff_cls_id)):
cl_preds = self.preds[scene]
cl_gts = self.gts[scene][cl]
cl_intersects = self.intersects[scene][cl]
self.update_dict_stat(cl_gts, unique_gt[(counts_gt > self.min_points)], counts_gt[(counts_gt > self.min_points)])
self.update_dict_stat(cl_preds, unique_pred[(counts_pred > self.min_points)], counts_pred[(counts_pred > self.min_points)])
valid_combos_min_point = np.zeros_like(y_inst_in_cl)
for valid_id in unique_gt[(counts_gt > self.min_points)]:
valid_combos_min_point = np.logical_or(valid_combos_min_point, (y_inst_in_cl == valid_id))
y_inst_in_cl = (y_inst_in_cl * valid_combos_min_point)
valid_combos_ = np.logical_and((x_inst_row > 0), (y_inst_in_cl > 0))
offset_combo_ = (x_inst_row[valid_combos_] + (self.offset * y_inst_in_cl[valid_combos_]))
(unique_combo_, counts_combo_) = np.unique(offset_combo_, return_counts=True)
self.update_dict_stat(cl_intersects, unique_combo_, counts_combo_)
(unique_gt_, counts_gt_) = np.unique(y_inst_in_cl[(y_inst_in_cl > 0)], return_counts=True)
id2idx_gt_ = {inst_id: idx for (idx, inst_id) in enumerate(unique_gt_)}
(unique_pred_, counts_pred_) = np.unique(x_inst_row[(x_inst_row > 0)], return_counts=True)
id2idx_pred_ = {inst_id: idx for (idx, inst_id) in enumerate(unique_pred_)}
gt_labels_ = (unique_combo_ // self.offset)
pred_labels_ = (unique_combo_ % self.offset)
gt_areas_ = np.array([counts_gt_[id2idx_gt_[g_id]] for g_id in gt_labels_])
pred_areas_ = np.array([counts_pred_[id2idx_pred_[p_id]] for p_id in pred_labels_])
intersections_ = counts_combo_
unions_ = ((gt_areas_ + pred_areas_) - intersections_)
ious_agnostic = (intersections_.astype(np.float32) / unions_.astype(np.float32))
tp_indexes_agnostic = (ious_agnostic > 0.5)
matched_gt_ = np.array(([False] * len(id2idx_gt_)))
matched_gt_[[id2idx_gt_[g_id] for g_id in gt_labels_[tp_indexes_agnostic]]] = True
for (idx, value) in enumerate(tp_indexes_agnostic):
if value:
g_label = gt_labels_[idx]
p_label = pred_labels_[idx]
if (g_label not in self.instance_gts[scene][cl]):
self.instance_gts[scene][cl][g_label] = [p_label]
else:
self.instance_gts[scene][cl][g_label].append(p_label)
for g_label in unique_gt_:
if (not matched_gt_[id2idx_gt_[g_label]]):
if (g_label not in self.instance_gts[scene][cl]):
self.instance_gts[scene][cl][g_label] = [1]
else:
self.instance_gts[scene][cl][g_label].append(1)
gt_labels = (unique_combo // self.offset)
pred_labels = (unique_combo % self.offset)
gt_areas = np.array([counts_gt[id2idx_gt[g_id]] for g_id in gt_labels])
pred_areas = np.array([counts_pred[id2idx_pred[p_id]] for p_id in pred_labels])
intersections = counts_combo
unions = ((gt_areas + pred_areas) - intersections)
ious = (intersections.astype(np.float32) / unions.astype(np.float32))
return (counts_pred, counts_gt, gt_labels, pred_labels, id2idx_gt, id2idx_pred, ious)
def add_batch_panoptic(self, scene: str, x_sem_row: List[np.ndarray], x_inst_row: List[np.ndarray], y_sem_row: List[np.ndarray], y_inst_row: List[np.ndarray]) -> None:
'\n Add panoptic tracking metrics for one frame/batch.\n :param scene: str, name of scene.\n :param x_sem_row: [None, <np.int64: num_points>], predicted semantics.\n :param x_inst_row: [None, <np.uint64: num_points>], predicted instances.\n :param y_sem_row: [None, <np.int64: num_points>], target semantics.\n :param y_inst_row: [None, <np.uint64: num_points>], target instances.\n '
if (scene not in self.sequences):
self.sequences.append(scene)
self.preds[scene] = {}
self.gts[scene] = [{} for _ in range(self.n_classes)]
self.intersects[scene] = [{} for _ in range(self.n_classes)]
self.intersects_ovr[scene] = [{} for _ in range(self.n_classes)]
self.instance_preds[scene] = {}
self.instance_gts[scene] = [{} for _ in range(self.n_classes)]
x_inst_row[1] = (x_inst_row[1] + 1)
y_inst_row[1] = (y_inst_row[1] + 1)
for cl in self.ignore:
gt_not_in_excl_mask = (y_sem_row[1] != cl)
x_sem_row[1] = x_sem_row[1][gt_not_in_excl_mask]
y_sem_row[1] = y_sem_row[1][gt_not_in_excl_mask]
x_inst_row[1] = x_inst_row[1][gt_not_in_excl_mask]
y_inst_row[1] = y_inst_row[1][gt_not_in_excl_mask]
if (x_sem_row[0] is not None):
gt_not_in_excl_mask = (y_sem_row[0] != cl)
x_sem_row[0] = x_sem_row[0][gt_not_in_excl_mask]
y_sem_row[0] = y_sem_row[0][gt_not_in_excl_mask]
x_inst_row[0] = x_inst_row[0][gt_not_in_excl_mask]
y_inst_row[0] = y_inst_row[0][gt_not_in_excl_mask]
(unique_pred_, counts_pred_) = np.unique(x_inst_row[1][(x_inst_row[1] > 0)], return_counts=True)
for p_id in unique_pred_[(counts_pred_ > self.min_points)]:
if (p_id not in self.instance_preds[scene]):
self.instance_preds[scene][p_id] = 1
else:
self.instance_preds[scene][p_id] += 1
for cl in self.include:
(inst_prev, gt_labels_prev, tp_indexes_prev) = (None, None, None)
if (x_sem_row[0] is not None):
x_inst_in_cl_mask = (x_sem_row[0] == cl)
y_inst_in_cl_mask = (y_sem_row[0] == cl)
x_inst_in_cl = (x_inst_row[0] * x_inst_in_cl_mask.astype(np.int64))
y_inst_in_cl = (y_inst_row[0] * y_inst_in_cl_mask.astype(np.int64))
(_, _, gt_labels_prev, inst_prev, _, _, ious) = self.get_panoptic_track_stats(x_inst_in_cl, y_inst_in_cl)
tp_indexes_prev = (ious > self.iou_thr)
x_inst_in_cl_mask = (x_sem_row[1] == cl)
y_inst_in_cl_mask = (y_sem_row[1] == cl)
x_inst_in_cl = (x_inst_row[1] * x_inst_in_cl_mask.astype(np.int64))
y_inst_in_cl = (y_inst_row[1] * y_inst_in_cl_mask.astype(np.int64))
(counts_pred, counts_gt, gt_labels, pred_labels, id2idx_gt, id2idx_pred, ious) = self.get_panoptic_track_stats(x_inst_in_cl, y_inst_in_cl, x_inst_row[1], scene, cl)
inst_cur = pred_labels
tp_indexes = (ious > 0.5)
self.pan_tp[cl] += np.sum(tp_indexes)
self.pan_iou[cl] += np.sum(ious[tp_indexes])
matched_gt = np.array(([False] * len(id2idx_gt)))
matched_gt[[id2idx_gt[g_id] for g_id in gt_labels[tp_indexes]]] = True
matched_pred = np.array(([False] * len(id2idx_pred)))
matched_pred[[id2idx_pred[p_id] for p_id in pred_labels[tp_indexes]]] = True
self.pan_fn[cl] += np.sum(np.logical_and((counts_gt >= self.min_points), np.logical_not(matched_gt)))
self.pan_fp[cl] += np.sum(np.logical_and((counts_pred >= self.min_points), np.logical_not(matched_pred)))
if ((x_sem_row[0] is not None) and (cl < self.min_stuff_cls_id)):
(gt_labels_prev, gt_labels) = (gt_labels_prev[tp_indexes_prev], gt_labels[tp_indexes])
(inst_prev, inst_cur) = (inst_prev[tp_indexes_prev], inst_cur[tp_indexes])
ious = ious[tp_indexes]
(_, prev_ind, cur_ind) = np.intersect1d(gt_labels_prev, gt_labels, return_indices=True)
(ids, soft_ids) = (0, 0.0)
for (prev_i, cur_i) in zip(prev_ind, cur_ind):
if (inst_prev[prev_i] != inst_cur[cur_i]):
ids += 1
soft_ids += ious[cur_i]
self.pan_ids[cl] += ids
self.pan_soft_ids[cl] += soft_ids
def get_ptq(self) -> Tuple[(np.ndarray, np.ndarray, np.ndarray, np.ndarray)]:
'\n Calculate PTQ metrics.\n :return: (mean_PTQ, all_class_PTQ, mean_sPTQ, all_class_sPTQ).\n mean_PTQ: <float64, 1>, mean PTQ score over all classes.\n all_class_PTQ: <float64, num_classes,>, PTQ scores for all classes.\n mean_sPTQ: <float64, 1>, mean soft-PTQ score over all classes.\n all_class_sPTQ: <float64, num_classes,>, soft-PTQ scores for all classes.\n '
iou = self.pan_iou.astype(np.double)
(ids, soft_ids) = (self.pan_ids.astype(np.double), self.pan_soft_ids.astype(np.double))
(tp, fp) = (self.pan_tp.astype(np.double), self.pan_fp.astype(np.double))
(tp_eps, fn) = (np.maximum(tp, self.eps), self.pan_fn.astype(np.double))
tp_half_fp_half_fn_eps = np.maximum(((tp + (0.5 * fp)) + (0.5 * fn)), self.eps)
ptq_all = (((iou - ids) / tp_eps) * (tp / tp_half_fp_half_fn_eps))
soft_ptq_all = (((iou - soft_ids) / tp_eps) * (tp / tp_half_fp_half_fn_eps))
ground_truths = (tp + fn)
valid_classes = (ground_truths > 0.5)
for i in range(valid_classes.shape[0]):
if (i not in self.include):
valid_classes[i] = False
mean_ptq = ptq_all[valid_classes].mean()
mean_soft_ptq = soft_ptq_all[valid_classes].mean()
return (mean_ptq, ptq_all, mean_soft_ptq, soft_ptq_all)
def get_motsa(self) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
'\n Calculate MOTSA metrics.\n :return: (mean_MOTSA, mean_sMOTSA, mean_MOTSP).\n mean_MOTSA: <float64, 1>, mean MOTSA score over all thing classes.\n mean_sMOTSA: <float64, 1>, mean soft-MOTSA score over all thing classes.\n mean_sMOTSP: <float64, 1>, mean soft-MOTSP score over all thing classes.\n '
iou = self.pan_iou[1:self.min_stuff_cls_id].astype(np.double)
ids = self.pan_ids[1:self.min_stuff_cls_id].astype(np.double)
tp = self.pan_tp[1:self.min_stuff_cls_id].astype(np.double)
fp = self.pan_fp[1:self.min_stuff_cls_id].astype(np.double)
(tp_eps, fn) = (np.maximum(tp, self.eps), self.pan_fn[1:self.min_stuff_cls_id].astype(np.double))
ground_truths = (tp + fn)
valid_classes = (ground_truths > 0.5)
for i in range(valid_classes.shape[0]):
if ((i + 1) not in self.include):
valid_classes[i] = False
motsa = (((tp - fp) - ids)[valid_classes] / (tp_eps + fn)[valid_classes])
s_motsa = (((iou - fp) - ids)[valid_classes] / (tp_eps + fn)[valid_classes])
motsp = (iou[valid_classes] / tp_eps[valid_classes])
mean_motsa = motsa.mean()
mean_s_motsa = s_motsa.mean()
mean_motsp = motsp.mean()
return (mean_motsa, mean_s_motsa, mean_motsp)
def get_lstq(self) -> Tuple[(np.ndarray, np.ndarray)]:
'\n Calculate Lidar Segmentation and Tracking Quality (LSTQ) metric. https://arxiv.org/pdf/2102.12472.pdf\n :return: (LSTQ, S_assoc). LSTQ: <float64, 1>, LSTQ score over all classes. S_assoc: <float64, 1,>, S_assoc for\n all classes.\n '
num_tubes = ([0] * self.n_classes)
for seq in self.sequences:
for cl in self.include:
cl_preds = self.preds[seq]
cl_gts = self.gts[seq][cl]
cl_intersects = self.intersects[seq][cl]
outer_sum_iou = 0.0
num_tubes[cl] += len(cl_gts)
for (gt_id, gt_size) in cl_gts.items():
inner_sum_iou = 0.0
for (pr_id, pr_size) in cl_preds.items():
tpa_key = (pr_id + (self.offset * gt_id))
if (tpa_key in cl_intersects):
tpa_ovr = cl_intersects[tpa_key]
inner_sum_iou += (tpa_ovr * (tpa_ovr / ((gt_size + pr_size) - tpa_ovr)))
outer_sum_iou += (inner_sum_iou / float(gt_size))
self.pan_aq[cl] += outer_sum_iou
self.pan_aq_ovr += outer_sum_iou
s_assoc = (np.sum(self.pan_aq) / np.sum(num_tubes[1:self.min_stuff_cls_id]))
(s_cls, iou) = self.getSemIoU()
lstq = np.sqrt((s_assoc * s_cls))
return (lstq, s_assoc)
def get_pat(self) -> Tuple[(np.ndarray, np.ndarray, np.ndarray)]:
'\n Calculate Panoptic Tracking (PAT) metric. https://arxiv.org/pdf/2109.03805.pdf\n :return: (PAT, mean_PQ, mean_TQ).\n PAT: <float64, 1>, PAT score over all classes.\n mean_PQ: <float64, 1>, mean PQ scores over all classes.\n mean_TQ: <float64, 1>, mean TQ score over all classes.\n '
sq_all = (self.pan_iou.astype(np.double) / np.maximum(self.pan_tp.astype(np.double), self.eps))
rq_all = (self.pan_tp.astype(np.double) / np.maximum(((self.pan_tp.astype(np.double) + (0.5 * self.pan_fp.astype(np.double))) + (0.5 * self.pan_fn.astype(np.double))), self.eps))
pq_all = (sq_all * rq_all)
pq = pq_all[self.include].mean()
accumulate_tq = 0.0
accumulate_norm = 0
for seq in self.sequences:
preds = self.instance_preds[seq]
for cl in self.include:
cls_gts = self.instance_gts[seq][cl]
for (gt_id, pr_ids) in cls_gts.items():
(unique_pr_id, counts_pr_id) = np.unique(pr_ids, return_counts=True)
track_length = len(pr_ids)
(unique_pr_id, counts_pr_id) = (unique_pr_id[(unique_pr_id != 1)], counts_pr_id[(unique_pr_id != 1)])
fp_pr_id = []
for (idx, uid) in enumerate(unique_pr_id):
if (uid in preds):
fp_pr_id.append((preds[uid] - counts_pr_id[idx]))
else:
fp_pr_id.append(0)
fp_pr_id = np.array(fp_pr_id)
gt_id_aq = (np.sum(((counts_pr_id ** 2) / np.double((track_length + fp_pr_id)))) / np.double(track_length))
gt_id_is = 1.0
if (track_length > 1):
s_id = (- 1)
ids = 0
total_ids = (track_length - 1)
for pr_id in pr_ids:
if (s_id != (- 1)):
if ((pr_id != s_id) or (s_id == 1)):
ids += 1
s_id = pr_id
gt_id_is = (1 - (ids / np.double(total_ids)))
accumulate_tq += np.sqrt((gt_id_aq * gt_id_is))
accumulate_norm += 1
tq = np.array((accumulate_tq / accumulate_norm))
pat = (((2 * pq) * tq) / (pq + tq))
return (pat, pq, tq)
def add_batch(self, scene: str, x_sem: List[np.ndarray], x_inst: List[np.ndarray], y_sem: List[np.ndarray], y_inst: List[np.ndarray]) -> None:
'\n Add semantic IoU and panoptic tracking metrics for one frame/batch.\n :param scene: str, name of scene.\n :param x_sem: [None, <np.int64: num_points>], predicted semantics.\n :param x_inst: [None, <np.uint64: num_points>], predicted instances.\n :param y_sem: [None, <np.int64: num_points>], target semantics.\n :param y_inst: [None, <np.uint64: num_points>], target instances.\n '
self.addBatchSemIoU(x_sem[1], y_sem[1])
self.add_batch_panoptic(scene, x_sem, x_inst, y_sem, y_inst)
|
class PanopticClassMapper(LidarsegClassMapper):
"\n Maps the general (fine) classes to the challenge (coarse) classes in the Panoptic nuScenes challenge.\n\n Example usage::\n nusc_ = NuScenes(version='v1.0-mini', dataroot='/data/sets/nuscenes', verbose=True)\n mapper_ = PanopticClassMapper(nusc_)\n "
def __init__(self, nusc: NuScenes):
'\n Initialize a PanopticClassMapper object.\n :param nusc: A NuScenes object.\n '
super(PanopticClassMapper, self).__init__(nusc)
self.things = self.get_things()
self.stuff = self.get_stuff()
def get_stuff(self) -> Dict[(str, int)]:
"\n Returns the mapping from the challenge (coarse) class names to the challenge class indices for stuff.\n :return: A dictionary containing the mapping from the challenge class names to the challenge class indices for\n stuff.\n {\n 'driveable_surface': 11,\n 'other_flat': 12,\n 'sidewalk': 13,\n 'terrain': 14,\n 'manmade': 15,\n 'vegetation': 16\n }\n "
stuff_names = {'driveable_surface', 'other_flat', 'sidewalk', 'terrain', 'manmade', 'vegetation'}
coarse_name_to_id = self.get_coarse2idx()
assert (stuff_names <= set(coarse_name_to_id.keys())), 'Invalid stuff names, pls check !'
stuff_name_to_id = {name: coarse_name_to_id[name] for name in stuff_names}
return stuff_name_to_id
def get_things(self) -> Dict[(str, int)]:
"\n Returns the mapping from the challenge (coarse) class names to the challenge class indices for things.\n :return: A dictionary containing the mapping from the challenge class names to the challenge class indices for\n things.\n {\n 'barrier': 1,\n 'bicycle': 2,\n 'bus': 3,\n 'car': 4,\n 'construction_vehicle': 5,\n 'motorcycle': 6,\n 'pedestrian': 7,\n 'traffic_cone': 8,\n 'trailer': 9,\n 'truck': 10\n }\n "
thing_names = {'barrier', 'bicycle', 'bus', 'car', 'construction_vehicle', 'motorcycle', 'pedestrian', 'traffic_cone', 'trailer', 'truck'}
coarse_name_to_id = self.get_coarse2idx()
assert (thing_names <= set(coarse_name_to_id.keys())), 'Invalid thing names, pls check !'
thing_name_to_id = {name: coarse_name_to_id[name] for name in thing_names}
return thing_name_to_id
|
def main(version: str, data_root: str, split_name: str, output_dir: str, config_name: str='predict_2020_icra.json') -> None:
'\n Performs inference for all of the baseline models defined in the physics model module.\n :param version: nuScenes dataset version.\n :param data_root: Directory where the NuScenes data is stored.\n :param split_name: nuScenes data split name, e.g. train, val, mini_train, etc.\n :param output_dir: Directory where predictions should be stored.\n :param config_name: Name of config file.\n '
nusc = NuScenes(version=version, dataroot=data_root)
helper = PredictHelper(nusc)
dataset = get_prediction_challenge_split(split_name)
config = load_prediction_config(helper, config_name)
oracle = PhysicsOracle(config.seconds, helper)
cv_heading = ConstantVelocityHeading(config.seconds, helper)
cv_preds = []
oracle_preds = []
for token in dataset:
cv_preds.append(cv_heading(token).serialize())
oracle_preds.append(oracle(token).serialize())
json.dump(cv_preds, open(os.path.join(output_dir, 'cv_preds.json'), 'w'))
json.dump(oracle_preds, open(os.path.join(output_dir, 'oracle_preds.json'), 'w'))
|
def compute_metrics(predictions: List[Dict[(str, Any)]], helper: PredictHelper, config: PredictionConfig) -> Dict[(str, Any)]:
'\n Computes metrics from a set of predictions.\n :param predictions: List of prediction JSON objects.\n :param helper: Instance of PredictHelper that wraps the nuScenes val set.\n :param config: Config file.\n :return: Metrics. Nested dictionary where keys are metric names and value is a dictionary\n mapping the Aggregator name to the results.\n '
n_preds = len(predictions)
containers = {metric.name: np.zeros((n_preds, metric.shape)) for metric in config.metrics}
for (i, prediction_str) in enumerate(predictions):
prediction = Prediction.deserialize(prediction_str)
ground_truth = helper.get_future_for_agent(prediction.instance, prediction.sample, config.seconds, in_agent_frame=False)
for metric in config.metrics:
containers[metric.name][i] = metric(ground_truth, prediction)
aggregations: Dict[(str, Dict[(str, List[float])])] = defaultdict(dict)
for metric in config.metrics:
for agg in metric.aggregators:
aggregations[metric.name][agg.name] = agg(containers[metric.name])
return aggregations
|
def main(version: str, data_root: str, submission_path: str, config_name: str='predict_2020_icra.json') -> None:
'\n Computes metrics for a submission stored in submission_path with a given submission_name with the metrics\n specified by the config_name.\n :param version: nuScenes dataset version.\n :param data_root: Directory storing NuScenes data.\n :param submission_path: Directory storing submission.\n :param config_name: Name of config file.\n '
predictions = json.load(open(submission_path, 'r'))
nusc = NuScenes(version=version, dataroot=data_root)
helper = PredictHelper(nusc)
config = load_prediction_config(helper, config_name)
results = compute_metrics(predictions, helper, config)
json.dump(results, open(submission_path.replace('.json', '_metrics.json'), 'w'), indent=2)
|
class PredictionConfig():
def __init__(self, metrics: List[Metric], seconds: int=6, frequency: int=2):
'\n Data class that specifies the prediction evaluation settings.\n Initialized with:\n metrics: List of nuscenes.eval.prediction.metric.Metric objects.\n seconds: Number of seconds to predict for each agent.\n frequency: Rate at which prediction is made, in Hz.\n '
self.metrics = metrics
self.seconds = seconds
self.frequency = frequency
def serialize(self) -> Dict[(str, Any)]:
' Serialize instance into json-friendly format. '
return {'metrics': [metric.serialize() for metric in self.metrics], 'seconds': self.seconds}
@classmethod
def deserialize(cls, content: Dict[(str, Any)], helper: PredictHelper):
' Initialize from serialized dictionary. '
return cls([deserialize_metric(metric, helper) for metric in content['metrics']], seconds=content['seconds'])
|
def load_prediction_config(helper: PredictHelper, config_name: str='predict_2020_icra.json') -> PredictionConfig:
'\n Loads a PredictionConfig from json file stored in eval/prediction/configs.\n :param helper: Instance of PredictHelper. Needed for OffRoadRate metric.\n :param config_name: Name of json config file.\n :return: PredictionConfig.\n '
this_dir = os.path.dirname(os.path.abspath(__file__))
cfg_path = os.path.join(this_dir, 'configs', config_name)
assert os.path.exists(cfg_path), f'Requested unknown configuration {cfg_path}'
with open(cfg_path, 'r') as f:
config = json.load(f)
return PredictionConfig.deserialize(config, helper)
|
class Prediction(MetricData):
'\n Stores predictions of Models.\n Metrics are calculated from Predictions.\n\n Attributes:\n instance: Instance token for prediction.\n sample: Sample token for prediction.\n prediction: Prediction of model [num_modes, n_timesteps, state_dim].\n probabilities: Probabilities of each mode [num_modes].\n '
def __init__(self, instance: str, sample: str, prediction: np.ndarray, probabilities: np.ndarray):
self.is_valid(instance, sample, prediction, probabilities)
self.instance = instance
self.sample = sample
self.prediction = prediction
self.probabilities = probabilities
@property
def number_of_modes(self) -> int:
return self.prediction.shape[0]
def serialize(self):
' Serialize to json. '
return {'instance': self.instance, 'sample': self.sample, 'prediction': self.prediction.tolist(), 'probabilities': self.probabilities.tolist()}
@classmethod
def deserialize(cls, content: Dict[(str, Any)]):
' Initialize from serialized content. '
return cls(instance=content['instance'], sample=content['sample'], prediction=np.array(content['prediction']), probabilities=np.array(content['probabilities']))
@staticmethod
def is_valid(instance, sample, prediction, probabilities):
if (not isinstance(prediction, np.ndarray)):
raise ValueError(f'Error: prediction must be of type np.ndarray. Received {str(type(prediction))}.')
if (not isinstance(probabilities, np.ndarray)):
raise ValueError(f'Error: probabilities must be of type np.ndarray. Received {type(probabilities)}.')
if (not isinstance(instance, str)):
raise ValueError(f'Error: instance token must be of type string. Received {type(instance)}')
if (not isinstance(sample, str)):
raise ValueError(f'Error: sample token must be of type string. Received {type(sample)}.')
if (prediction.ndim != 3):
raise ValueError(f'''Error: prediction must have three dimensions (number of modes, number of timesteps, 2).
Received {prediction.ndim}''')
if (probabilities.ndim != 1):
raise ValueError(f'Error: probabilities must be a single dimension. Received {probabilities.ndim}.')
if (len(probabilities) != prediction.shape[0]):
raise ValueError(f'''Error: there must be the same number of probabilities as predicted modes.
Received {len(probabilities)} probabilities and {prediction.shape[0]} modes.''')
if (prediction.shape[0] > MAX_NUMBER_OF_MODES):
raise ValueError(f'Error: prediction contains more than {MAX_NUMBER_OF_MODES} modes.')
def __repr__(self):
return f'Prediction(instance={self.instance}, sample={self.sample}, prediction={self.prediction}, probabilities={self.probabilities})'
|
def get_prediction_challenge_split(split: str, dataroot: str='/data/sets/nuscenes') -> List[str]:
"\n Gets a list of {instance_token}_{sample_token} strings for each split.\n :param split: One of 'mini_train', 'mini_val', 'train', 'val'.\n :param dataroot: Path to the nuScenes dataset.\n :return: List of tokens belonging to the split. Format {instance_token}_{sample_token}.\n "
if (split not in {'mini_train', 'mini_val', 'train', 'train_val', 'val'}):
raise ValueError('split must be one of (mini_train, mini_val, train, train_val, val)')
if (split == 'train_val'):
split_name = 'train'
else:
split_name = split
path_to_file = os.path.join(dataroot, 'maps', 'prediction', 'prediction_scenes.json')
prediction_scenes = json.load(open(path_to_file, 'r'))
scenes = create_splits_scenes()
scenes_for_split = scenes[split_name]
if (split == 'train'):
scenes_for_split = scenes_for_split[NUM_IN_TRAIN_VAL:]
if (split == 'train_val'):
scenes_for_split = scenes_for_split[:NUM_IN_TRAIN_VAL]
token_list_for_scenes = map((lambda scene: prediction_scenes.get(scene, [])), scenes_for_split)
return list(chain.from_iterable(token_list_for_scenes))
|
def load_model(helper: PredictHelper, config: PredictionConfig, path_to_model_weights: str) -> Any:
' Loads model with desired weights. '
return ConstantVelocityHeading(config.seconds, helper)
|
def do_inference_for_submission(helper: PredictHelper, config: PredictionConfig, dataset_tokens: List[str]) -> List[Prediction]:
'\n Currently, this will make a submission with a constant velocity and heading model.\n Fill in all the code needed to run your model on the test set here. You do not need to worry\n about providing any of the parameters to this function since they are provided by the main function below.\n You can test if your script works by evaluating on the val set.\n :param helper: Instance of PredictHelper that wraps the nuScenes test set.\n :param config: Instance of PredictionConfig.\n :param dataset_tokens: Tokens of instance_sample pairs in the test set.\n :return: List of predictions.\n '
path_to_model_weights = ''
cv_heading = load_model(helper, config, path_to_model_weights)
cv_preds = []
for token in dataset_tokens:
cv_preds.append(cv_heading(token))
return cv_preds
|
def main(version: str, data_root: str, split_name: str, output_dir: str, submission_name: str, config_name: str) -> None:
'\n Makes predictions for a submission to the nuScenes prediction challenge.\n :param version: NuScenes version.\n :param data_root: Directory storing NuScenes data.\n :param split_name: Data split to run inference on.\n :param output_dir: Directory to store the output file.\n :param submission_name: Name of the submission to use for the results file.\n :param config_name: Name of config file to use.\n '
nusc = NuScenes(version=version, dataroot=data_root)
helper = PredictHelper(nusc)
dataset = get_prediction_challenge_split(split_name)
config = load_prediction_config(helper, config_name)
predictions = do_inference_for_submission(helper, config, dataset)
predictions = [prediction.serialize() for prediction in predictions]
json.dump(predictions, open(os.path.join(output_dir, f'{submission_name}_inference.json'), 'w'))
|
class TestPrediction(unittest.TestCase):
def test(self):
prediction = Prediction('instance', 'sample', np.ones((2, 2, 2)), np.zeros(2))
self.assertEqual(prediction.number_of_modes, 2)
self.assertDictEqual(prediction.serialize(), {'instance': 'instance', 'sample': 'sample', 'prediction': [[[1, 1], [1, 1]], [[1, 1], [1, 1]]], 'probabilities': [0, 0]})
|
class TrackingEvaluation(object):
def __init__(self, tracks_gt: Dict[(str, Dict[(int, List[TrackingBox])])], tracks_pred: Dict[(str, Dict[(int, List[TrackingBox])])], class_name: str, dist_fcn: Callable, dist_th_tp: float, min_recall: float, num_thresholds: int, metric_worst: Dict[(str, float)], verbose: bool=True, output_dir: str=None, render_classes: List[str]=None):
'\n Create a TrackingEvaluation object which computes all metrics for a given class.\n :param tracks_gt: The ground-truth tracks.\n :param tracks_pred: The predicted tracks.\n :param class_name: The current class we are evaluating on.\n :param dist_fcn: The distance function used for evaluation.\n :param dist_th_tp: The distance threshold used to determine matches.\n :param min_recall: The minimum recall value below which we drop thresholds due to too much noise.\n :param num_thresholds: The number of recall thresholds from 0 to 1. Note that some of these may be dropped.\n :param metric_worst: Mapping from metric name to the fallback value assigned if a recall threshold\n is not achieved.\n :param verbose: Whether to print to stdout.\n :param output_dir: Output directory to save renders.\n :param render_classes: Classes to render to disk or None.\n\n Computes the metrics defined in:\n - Stiefelhagen 2008: Evaluating Multiple Object Tracking Performance: The CLEAR MOT Metrics.\n MOTA, MOTP\n - Nevatia 2008: Global Data Association for Multi-Object Tracking Using Network Flows.\n MT/PT/ML\n - Weng 2019: "A Baseline for 3D Multi-Object Tracking".\n AMOTA/AMOTP\n '
self.tracks_gt = tracks_gt
self.tracks_pred = tracks_pred
self.class_name = class_name
self.dist_fcn = dist_fcn
self.dist_th_tp = dist_th_tp
self.min_recall = min_recall
self.num_thresholds = num_thresholds
self.metric_worst = metric_worst
self.verbose = verbose
self.output_dir = output_dir
self.render_classes = ([] if (render_classes is None) else render_classes)
self.n_scenes = len(self.tracks_gt)
def name_gen(_threshold):
return ('thr_%.4f' % _threshold)
self.name_gen = name_gen
for metric_name in MOT_METRIC_MAP.values():
assert ((metric_name == '') or (metric_name in TRACKING_METRICS))
def accumulate(self) -> TrackingMetricData:
'\n Compute metrics for all recall thresholds of the current class.\n :return: TrackingMetricData instance which holds the metrics for each threshold.\n '
if self.verbose:
print(('Computing metrics for class %s...\n' % self.class_name))
accumulators = []
thresh_metrics = []
md = TrackingMetricData()
gt_box_count = 0
gt_track_ids = set()
for scene_tracks_gt in self.tracks_gt.values():
for frame_gt in scene_tracks_gt.values():
for box in frame_gt:
if (box.tracking_name == self.class_name):
gt_box_count += 1
gt_track_ids.add(box.tracking_id)
if (gt_box_count == 0):
return md
mh = create_motmetrics()
(thresholds, recalls) = self.compute_thresholds(gt_box_count)
md.confidence = thresholds
md.recall_hypo = recalls
if self.verbose:
print('Computed thresholds\n')
for (t, threshold) in enumerate(thresholds):
if np.isnan(threshold):
continue
if (threshold in thresholds[:t]):
continue
(acc, _) = self.accumulate_threshold(threshold)
accumulators.append(acc)
thresh_name = self.name_gen(threshold)
thresh_summary = mh.compute(acc, metrics=MOT_METRIC_MAP.keys(), name=thresh_name)
thresh_metrics.append(thresh_summary)
if self.verbose:
print_threshold_metrics(thresh_summary.to_dict())
if (len(thresh_metrics) == 0):
summary = []
else:
summary = pandas.concat(thresh_metrics)
unachieved_thresholds = np.sum(np.isnan(thresholds))
duplicate_thresholds = (len(thresholds) - len(np.unique(thresholds)))
assert (((unachieved_thresholds + duplicate_thresholds) + len(thresh_metrics)) == self.num_thresholds)
valid_thresholds = [t for t in thresholds if (not np.isnan(t))]
assert (valid_thresholds == sorted(valid_thresholds))
rep_counts = [np.sum((thresholds == t)) for t in np.unique(valid_thresholds)]
for (mot_name, metric_name) in MOT_METRIC_MAP.items():
if (metric_name == ''):
continue
if (len(thresh_metrics) == 0):
worst = self.metric_worst[metric_name]
if (worst == (- 1)):
if (metric_name == 'ml'):
worst = len(gt_track_ids)
elif (metric_name in ['gt', 'fn']):
worst = gt_box_count
elif (metric_name in ['fp', 'ids', 'frag']):
worst = np.nan
else:
raise NotImplementedError
all_values = ([worst] * TrackingMetricData.nelem)
else:
values = summary.get(mot_name).values
assert np.all((values[np.logical_not(np.isnan(values))] >= 0))
assert (len(rep_counts) == len(values))
values = np.concatenate([([v] * r) for (v, r) in zip(values, rep_counts)])
all_values = ([np.nan] * unachieved_thresholds)
all_values.extend(values)
assert (len(all_values) == TrackingMetricData.nelem)
md.set_metric(metric_name, all_values)
return md
def accumulate_threshold(self, threshold: float=None) -> Tuple[(pandas.DataFrame, List[float])]:
'\n Accumulate metrics for a particular recall threshold of the current class.\n The scores are only computed if threshold is set to None. This is used to infer the recall thresholds.\n :param threshold: score threshold used to determine positives and negatives.\n :return: (The MOTAccumulator that stores all the hits/misses/etc, Scores for each TP).\n '
accs = []
scores = []
for scene_id in tqdm.tqdm(self.tracks_gt.keys(), disable=(not self.verbose), leave=False):
acc = MOTAccumulatorCustom()
frame_id = 0
scene_tracks_gt = self.tracks_gt[scene_id]
scene_tracks_pred = self.tracks_pred[scene_id]
if ((self.class_name in self.render_classes) and (threshold is None)):
save_path = os.path.join(self.output_dir, 'render', str(scene_id), self.class_name)
os.makedirs(save_path, exist_ok=True)
renderer = TrackingRenderer(save_path)
else:
renderer = None
for timestamp in scene_tracks_gt.keys():
frame_gt = scene_tracks_gt[timestamp]
frame_pred = scene_tracks_pred[timestamp]
frame_gt = [f for f in frame_gt if (f.tracking_name == self.class_name)]
frame_pred = [f for f in frame_pred if (f.tracking_name == self.class_name)]
if (threshold is not None):
frame_pred = [f for f in frame_pred if (f.tracking_score >= threshold)]
gt_ids = [gg.tracking_id for gg in frame_gt]
pred_ids = [tt.tracking_id for tt in frame_pred]
if ((len(gt_ids) == 0) and (len(pred_ids) == 0)):
continue
assert (self.dist_fcn.__name__ == 'center_distance')
if ((len(frame_gt) == 0) or (len(frame_pred) == 0)):
distances = np.ones((0, 0))
else:
gt_boxes = np.array([b.translation[:2] for b in frame_gt])
pred_boxes = np.array([b.translation[:2] for b in frame_pred])
distances = sklearn.metrics.pairwise.euclidean_distances(gt_boxes, pred_boxes)
assert ((len(distances) == 0) or (not np.all(np.isnan(distances))))
distances[(distances >= self.dist_th_tp)] = np.nan
acc.update(gt_ids, pred_ids, distances, frameid=frame_id)
if (threshold is None):
events = acc.events.loc[frame_id]
matches = events[(events.Type == 'MATCH')]
match_ids = matches.HId.values
match_scores = [tt.tracking_score for tt in frame_pred if (tt.tracking_id in match_ids)]
scores.extend(match_scores)
else:
events = None
if ((self.class_name in self.render_classes) and (threshold is None)):
renderer.render(events, timestamp, frame_gt, frame_pred)
frame_id += 1
accs.append(acc)
acc_merged = MOTAccumulatorCustom.merge_event_dataframes(accs)
return (acc_merged, scores)
def compute_thresholds(self, gt_box_count: int) -> Tuple[(List[float], List[float])]:
'\n Compute the score thresholds for predefined recall values.\n AMOTA/AMOTP average over all thresholds, whereas MOTA/MOTP/.. pick the threshold with the highest MOTA.\n :param gt_box_count: The number of GT boxes for this class.\n :return: The lists of thresholds and their recall values.\n '
(_, scores) = self.accumulate_threshold(threshold=None)
if (len(scores) == 0):
return (([np.nan] * self.num_thresholds), ([np.nan] * self.num_thresholds))
scores = np.array(scores)
scores.sort()
scores = scores[::(- 1)]
tps = np.array(range(1, (len(scores) + 1)))
rec = (tps / gt_box_count)
assert ((len(scores) / gt_box_count) <= 1)
max_recall_achieved = np.max(rec)
rec_interp = np.linspace(self.min_recall, 1, self.num_thresholds).round(12)
thresholds = np.interp(rec_interp, rec, scores, right=0)
thresholds[(rec_interp > max_recall_achieved)] = np.nan
thresholds = list(thresholds.tolist())
rec_interp = list(rec_interp.tolist())
thresholds.reverse()
rec_interp.reverse()
assert (len(thresholds) == len(rec_interp) == self.num_thresholds)
return (thresholds, rec_interp)
|
class TrackingConfig():
' Data class that specifies the tracking evaluation settings. '
def __init__(self, tracking_names: List[str], pretty_tracking_names: Dict[(str, str)], tracking_colors: Dict[(str, str)], class_range: Dict[(str, int)], dist_fcn: str, dist_th_tp: float, min_recall: float, max_boxes_per_sample: float, metric_worst: Dict[(str, float)], num_thresholds: int):
assert (set(class_range.keys()) == set(tracking_names)), 'Class count mismatch.'
global TRACKING_NAMES
TRACKING_NAMES = tracking_names
self.tracking_names = tracking_names
self.pretty_tracking_names = pretty_tracking_names
self.tracking_colors = tracking_colors
self.class_range = class_range
self.dist_fcn = dist_fcn
self.dist_th_tp = dist_th_tp
self.min_recall = min_recall
self.max_boxes_per_sample = max_boxes_per_sample
self.metric_worst = metric_worst
self.num_thresholds = num_thresholds
TrackingMetricData.set_nelem(num_thresholds)
self.class_names = sorted(self.class_range.keys())
def __eq__(self, other):
eq = True
for key in self.serialize().keys():
eq = (eq and np.array_equal(getattr(self, key), getattr(other, key)))
return eq
def serialize(self) -> dict:
' Serialize instance into json-friendly format. '
return {'tracking_names': self.tracking_names, 'pretty_tracking_names': self.pretty_tracking_names, 'tracking_colors': self.tracking_colors, 'class_range': self.class_range, 'dist_fcn': self.dist_fcn, 'dist_th_tp': self.dist_th_tp, 'min_recall': self.min_recall, 'max_boxes_per_sample': self.max_boxes_per_sample, 'metric_worst': self.metric_worst, 'num_thresholds': self.num_thresholds}
@classmethod
def deserialize(cls, content: dict):
' Initialize from serialized dictionary. '
return cls(content['tracking_names'], content['pretty_tracking_names'], content['tracking_colors'], content['class_range'], content['dist_fcn'], content['dist_th_tp'], content['min_recall'], content['max_boxes_per_sample'], content['metric_worst'], content['num_thresholds'])
@property
def dist_fcn_callable(self):
' Return the distance function corresponding to the dist_fcn string. '
if (self.dist_fcn == 'center_distance'):
return center_distance
else:
raise Exception(('Error: Unknown distance function %s!' % self.dist_fcn))
|
class TrackingMetricData(MetricData):
' This class holds accumulated and interpolated data required to calculate the tracking metrics. '
nelem = None
metrics = [m for m in list((set(TRACKING_METRICS) - set(AMOT_METRICS)))]
def __init__(self):
assert (TrackingMetricData.nelem is not None)
init = np.full(TrackingMetricData.nelem, np.nan)
self.confidence = init
self.recall_hypo = init
self.recall = init
self.motar = init
self.mota = init
self.motp = init
self.faf = init
self.gt = init
self.tp = init
self.mt = init
self.ml = init
self.fp = init
self.fn = init
self.ids = init
self.frag = init
self.tid = init
self.lgd = init
def __eq__(self, other):
eq = True
for key in self.serialize().keys():
eq = (eq and np.array_equal(getattr(self, key), getattr(other, key)))
return eq
def __setattr__(self, *args, **kwargs):
assert (len(args) == 2)
name = args[0]
values = np.array(args[1])
assert ((values is None) or (len(values) == TrackingMetricData.nelem))
super(TrackingMetricData, self).__setattr__(name, values)
def set_metric(self, metric_name: str, values: np.ndarray) -> None:
' Sets the specified metric. '
self.__setattr__(metric_name, values)
def get_metric(self, metric_name: str) -> np.ndarray:
' Returns the specified metric. '
return self.__getattribute__(metric_name)
@property
def max_recall_ind(self):
' Returns index of max recall achieved. '
non_zero = np.nonzero(self.confidence)[0]
if (len(non_zero) == 0):
max_recall_ind = 0
else:
max_recall_ind = non_zero[(- 1)]
return max_recall_ind
@property
def max_recall(self):
' Returns max recall achieved. '
return self.recall[self.max_recall_ind]
def serialize(self):
' Serialize instance into json-friendly format. '
ret_dict = dict()
for metric_name in (['confidence', 'recall_hypo'] + TrackingMetricData.metrics):
ret_dict[metric_name] = self.get_metric(metric_name).tolist()
return ret_dict
@classmethod
def set_nelem(cls, nelem: int) -> None:
cls.nelem = nelem
@classmethod
def deserialize(cls, content: dict):
' Initialize from serialized content. '
md = cls()
for metric in (['confidence', 'recall_hypo'] + TrackingMetricData.metrics):
md.set_metric(metric, content[metric])
return md
@classmethod
def no_predictions(cls):
' Returns an md instance corresponding to having no predictions. '
md = cls()
md.confidence = np.zeros(cls.nelem)
for metric in TrackingMetricData.metrics:
md.set_metric(metric, np.zeros(cls.nelem))
md.recall = np.linspace(0, 1, cls.nelem)
return md
@classmethod
def random_md(cls):
' Returns an md instance corresponding to a random results. '
md = cls()
md.confidence = np.linspace(0, 1, cls.nelem)[::(- 1)]
for metric in TrackingMetricData.metrics:
md.set_metric(metric, np.random.random(cls.nelem))
md.recall = np.linspace(0, 1, cls.nelem)
return md
|
class TrackingMetrics():
' Stores tracking metric results. Provides properties to summarize. '
def __init__(self, cfg: TrackingConfig):
self.cfg = cfg
self.eval_time = None
self.label_metrics: Dict[(str, Dict[(str, float)])] = {}
self.class_names = self.cfg.class_names
self.metric_names = [l for l in TRACKING_METRICS]
for metric_name in self.metric_names:
self.label_metrics[metric_name] = {}
for class_name in self.class_names:
self.label_metrics[metric_name][class_name] = np.nan
def add_label_metric(self, metric_name: str, tracking_name: str, value: float) -> None:
assert (metric_name in self.label_metrics)
self.label_metrics[metric_name][tracking_name] = float(value)
def add_runtime(self, eval_time: float) -> None:
self.eval_time = eval_time
def compute_metric(self, metric_name: str, class_name: str='all') -> float:
if (class_name == 'all'):
data = list(self.label_metrics[metric_name].values())
if (len(data) > 0):
if (metric_name in ['mt', 'ml', 'tp', 'fp', 'fn', 'ids', 'frag']):
return float(np.nansum(data))
else:
return float(np.nanmean(data))
else:
return np.nan
else:
return float(self.label_metrics[metric_name][class_name])
def serialize(self) -> Dict[(str, Any)]:
metrics = dict()
metrics['label_metrics'] = self.label_metrics
metrics['eval_time'] = self.eval_time
metrics['cfg'] = self.cfg.serialize()
for metric_name in self.label_metrics.keys():
metrics[metric_name] = self.compute_metric(metric_name)
return metrics
@classmethod
def deserialize(cls, content: dict) -> 'TrackingMetrics':
' Initialize from serialized dictionary. '
cfg = TrackingConfig.deserialize(content['cfg'])
tm = cls(cfg=cfg)
tm.add_runtime(content['eval_time'])
tm.label_metrics = content['label_metrics']
return tm
def __eq__(self, other):
eq = True
eq = (eq and (self.label_metrics == other.label_metrics))
eq = (eq and (self.eval_time == other.eval_time))
eq = (eq and (self.cfg == other.cfg))
return eq
|
class TrackingBox(EvalBox):
' Data class used during tracking evaluation. Can be a prediction or ground truth.'
def __init__(self, sample_token: str='', translation: Tuple[(float, float, float)]=(0, 0, 0), size: Tuple[(float, float, float)]=(0, 0, 0), rotation: Tuple[(float, float, float, float)]=(0, 0, 0, 0), velocity: Tuple[(float, float)]=(0, 0), ego_translation: Tuple[(float, float, float)]=(0, 0, 0), num_pts: int=(- 1), tracking_id: str='', tracking_name: str='', tracking_score: float=(- 1.0)):
super().__init__(sample_token, translation, size, rotation, velocity, ego_translation, num_pts)
assert (tracking_name is not None), 'Error: tracking_name cannot be empty!'
assert (tracking_name in TRACKING_NAMES), ('Error: Unknown tracking_name %s' % tracking_name)
assert (type(tracking_score) == float), 'Error: tracking_score must be a float!'
assert (not np.any(np.isnan(tracking_score))), 'Error: tracking_score may not be NaN!'
self.tracking_id = tracking_id
self.tracking_name = tracking_name
self.tracking_score = tracking_score
def __eq__(self, other):
return ((self.sample_token == other.sample_token) and (self.translation == other.translation) and (self.size == other.size) and (self.rotation == other.rotation) and (self.velocity == other.velocity) and (self.ego_translation == other.ego_translation) and (self.num_pts == other.num_pts) and (self.tracking_id == other.tracking_id) and (self.tracking_name == other.tracking_name) and (self.tracking_score == other.tracking_score))
def serialize(self) -> dict:
' Serialize instance into json-friendly format. '
return {'sample_token': self.sample_token, 'translation': self.translation, 'size': self.size, 'rotation': self.rotation, 'velocity': self.velocity, 'ego_translation': self.ego_translation, 'num_pts': self.num_pts, 'tracking_id': self.tracking_id, 'tracking_name': self.tracking_name, 'tracking_score': self.tracking_score}
@classmethod
def deserialize(cls, content: dict):
' Initialize from serialized content. '
return cls(sample_token=content['sample_token'], translation=tuple(content['translation']), size=tuple(content['size']), rotation=tuple(content['rotation']), velocity=tuple(content['velocity']), ego_translation=((0.0, 0.0, 0.0) if ('ego_translation' not in content) else tuple(content['ego_translation'])), num_pts=((- 1) if ('num_pts' not in content) else int(content['num_pts'])), tracking_id=content['tracking_id'], tracking_name=content['tracking_name'], tracking_score=((- 1.0) if ('tracking_score' not in content) else float(content['tracking_score'])))
|
class TrackingMetricDataList():
' This stores a set of MetricData in a dict indexed by name. '
def __init__(self):
self.md: Dict[(str, TrackingMetricData)] = {}
def __getitem__(self, key) -> TrackingMetricData:
return self.md[key]
def __eq__(self, other):
eq = True
for key in self.md.keys():
eq = (eq and (self[key] == other[key]))
return eq
def set(self, tracking_name: str, data: TrackingMetricData):
' Sets the MetricData entry for a certain tracking_name. '
self.md[tracking_name] = data
def serialize(self) -> dict:
return {key: value.serialize() for (key, value) in self.md.items()}
@classmethod
def deserialize(cls, content: dict, metric_data_cls):
mdl = cls()
for (name, md) in content.items():
mdl.set(name, metric_data_cls.deserialize(md))
return mdl
|
class TrackingEval():
'\n This is the official nuScenes tracking evaluation code.\n Results are written to the provided output_dir.\n\n Here is an overview of the functions in this method:\n - init: Loads GT annotations and predictions stored in JSON format and filters the boxes.\n - run: Performs evaluation and dumps the metric data to disk.\n - render: Renders various plots and dumps to disk.\n\n We assume that:\n - Every sample_token is given in the results, although there may be not predictions for that sample.\n\n Please see https://www.nuscenes.org/tracking for more details.\n '
def __init__(self, config: TrackingConfig, result_path: str, eval_set: str, output_dir: str, nusc_version: str, nusc_dataroot: str, verbose: bool=True, render_classes: List[str]=None):
'\n Initialize a TrackingEval object.\n :param config: A TrackingConfig object.\n :param result_path: Path of the nuScenes JSON result file.\n :param eval_set: The dataset split to evaluate on, e.g. train, val or test.\n :param output_dir: Folder to save plots and results to.\n :param nusc_version: The version of the NuScenes dataset.\n :param nusc_dataroot: Path of the nuScenes dataset on disk.\n :param verbose: Whether to print to stdout.\n :param render_classes: Classes to render to disk or None.\n '
self.cfg = config
self.result_path = result_path
self.eval_set = eval_set
self.output_dir = output_dir
self.verbose = verbose
self.render_classes = render_classes
assert os.path.exists(result_path), 'Error: The result file does not exist!'
self.plot_dir = os.path.join(self.output_dir, 'plots')
if (not os.path.isdir(self.output_dir)):
os.makedirs(self.output_dir)
if (not os.path.isdir(self.plot_dir)):
os.makedirs(self.plot_dir)
nusc = NuScenes(version=nusc_version, verbose=verbose, dataroot=nusc_dataroot)
if verbose:
print('Initializing nuScenes tracking evaluation')
(pred_boxes, self.meta) = load_prediction(self.result_path, self.cfg.max_boxes_per_sample, TrackingBox, verbose=verbose)
gt_boxes = load_gt(nusc, self.eval_set, TrackingBox, verbose=verbose)
assert (set(pred_boxes.sample_tokens) == set(gt_boxes.sample_tokens)), "Samples in split don't match samples in predicted tracks."
pred_boxes = add_center_dist(nusc, pred_boxes)
gt_boxes = add_center_dist(nusc, gt_boxes)
if verbose:
print('Filtering tracks')
pred_boxes = filter_eval_boxes(nusc, pred_boxes, self.cfg.class_range, verbose=verbose)
if verbose:
print('Filtering ground truth tracks')
gt_boxes = filter_eval_boxes(nusc, gt_boxes, self.cfg.class_range, verbose=verbose)
self.sample_tokens = gt_boxes.sample_tokens
self.tracks_gt = create_tracks(gt_boxes, nusc, self.eval_set, gt=True)
self.tracks_pred = create_tracks(pred_boxes, nusc, self.eval_set, gt=False)
def evaluate(self) -> Tuple[(TrackingMetrics, TrackingMetricDataList)]:
'\n Performs the actual evaluation.\n :return: A tuple of high-level and the raw metric data.\n '
start_time = time.time()
metrics = TrackingMetrics(self.cfg)
if self.verbose:
print('Accumulating metric data...')
metric_data_list = TrackingMetricDataList()
def accumulate_class(curr_class_name):
curr_ev = TrackingEvaluation(self.tracks_gt, self.tracks_pred, curr_class_name, self.cfg.dist_fcn_callable, self.cfg.dist_th_tp, self.cfg.min_recall, num_thresholds=TrackingMetricData.nelem, metric_worst=self.cfg.metric_worst, verbose=self.verbose, output_dir=self.output_dir, render_classes=self.render_classes)
curr_md = curr_ev.accumulate()
metric_data_list.set(curr_class_name, curr_md)
for class_name in self.cfg.class_names:
accumulate_class(class_name)
if self.verbose:
print('Calculating metrics...')
for class_name in self.cfg.class_names:
md = metric_data_list[class_name]
if np.all(np.isnan(md.mota)):
best_thresh_idx = None
else:
best_thresh_idx = np.nanargmax(md.mota)
if (best_thresh_idx is not None):
for metric_name in MOT_METRIC_MAP.values():
if (metric_name == ''):
continue
value = md.get_metric(metric_name)[best_thresh_idx]
metrics.add_label_metric(metric_name, class_name, value)
for metric_name in AVG_METRIC_MAP.keys():
values = np.array(md.get_metric(AVG_METRIC_MAP[metric_name]))
assert (len(values) == TrackingMetricData.nelem)
if np.all(np.isnan(values)):
value = np.nan
else:
np.all((values[np.logical_not(np.isnan(values))] >= 0))
values[np.isnan(values)] = self.cfg.metric_worst[metric_name]
value = float(np.nanmean(values))
metrics.add_label_metric(metric_name, class_name, value)
metrics.add_runtime((time.time() - start_time))
return (metrics, metric_data_list)
def render(self, md_list: TrackingMetricDataList) -> None:
'\n Renders a plot for each class and each metric.\n :param md_list: TrackingMetricDataList instance.\n '
if self.verbose:
print('Rendering curves')
def savepath(name):
return os.path.join(self.plot_dir, (name + '.pdf'))
summary_plot(self.cfg, md_list, savepath=savepath('summary'))
for metric_name in LEGACY_METRICS:
recall_metric_curve(self.cfg, md_list, metric_name, savepath=savepath(('%s' % metric_name)))
def main(self, render_curves: bool=True) -> Dict[(str, Any)]:
'\n Main function that loads the evaluation code, visualizes samples, runs the evaluation and renders stat plots.\n :param render_curves: Whether to render PR and TP curves to disk.\n :return: The serialized TrackingMetrics computed during evaluation.\n '
(metrics, metric_data_list) = self.evaluate()
if self.verbose:
print(('Saving metrics to: %s' % self.output_dir))
metrics_summary = metrics.serialize()
metrics_summary['meta'] = self.meta.copy()
with open(os.path.join(self.output_dir, 'metrics_summary.json'), 'w') as f:
json.dump(metrics_summary, f, indent=2)
with open(os.path.join(self.output_dir, 'metrics_details.json'), 'w') as f:
json.dump(metric_data_list.serialize(), f, indent=2)
if self.verbose:
print_final_metrics(metrics)
if render_curves:
self.render(metric_data_list)
return metrics_summary
|
def interpolate_tracking_boxes(left_box: TrackingBox, right_box: TrackingBox, right_ratio: float) -> TrackingBox:
'\n Linearly interpolate box parameters between two boxes.\n :param left_box: A Trackingbox.\n :param right_box: Another TrackingBox\n :param right_ratio: Weight given to the right box.\n :return: The interpolated TrackingBox.\n '
def interp_list(left, right, rratio):
return tuple((((1.0 - rratio) * np.array(left, dtype=float)) + (rratio * np.array(right, dtype=float))))
def interp_float(left, right, rratio):
return (((1.0 - rratio) * float(left)) + (rratio * float(right)))
rotation = Quaternion.slerp(q0=Quaternion(left_box.rotation), q1=Quaternion(right_box.rotation), amount=right_ratio).elements
tracking_score = interp_float(left_box.tracking_score, right_box.tracking_score, right_ratio)
return TrackingBox(sample_token=right_box.sample_token, translation=interp_list(left_box.translation, right_box.translation, right_ratio), size=interp_list(left_box.size, right_box.size, right_ratio), rotation=rotation, velocity=interp_list(left_box.velocity, right_box.velocity, right_ratio), ego_translation=interp_list(left_box.ego_translation, right_box.ego_translation, right_ratio), tracking_id=right_box.tracking_id, tracking_name=right_box.tracking_name, tracking_score=tracking_score)
|
def interpolate_tracks(tracks_by_timestamp: DefaultDict[(int, List[TrackingBox])]) -> DefaultDict[(int, List[TrackingBox])]:
'\n Interpolate the tracks to fill in holes, especially since GT boxes with 0 lidar points are removed.\n This interpolation does not take into account visibility. It interpolates despite occlusion.\n :param tracks_by_timestamp: The tracks.\n :return: The interpolated tracks.\n '
tracks_by_id = defaultdict(list)
track_timestamps_by_id = defaultdict(list)
for (timestamp, tracking_boxes) in tracks_by_timestamp.items():
for tracking_box in tracking_boxes:
tracks_by_id[tracking_box.tracking_id].append(tracking_box)
track_timestamps_by_id[tracking_box.tracking_id].append(timestamp)
timestamps = tracks_by_timestamp.keys()
interpolate_count = 0
for timestamp in timestamps:
for (tracking_id, track) in tracks_by_id.items():
if ((track_timestamps_by_id[tracking_id][0] <= timestamp <= track_timestamps_by_id[tracking_id][(- 1)]) and (timestamp not in track_timestamps_by_id[tracking_id])):
right_ind = bisect(track_timestamps_by_id[tracking_id], timestamp)
left_ind = (right_ind - 1)
right_timestamp = track_timestamps_by_id[tracking_id][right_ind]
left_timestamp = track_timestamps_by_id[tracking_id][left_ind]
right_tracking_box = tracks_by_id[tracking_id][right_ind]
left_tracking_box = tracks_by_id[tracking_id][left_ind]
right_ratio = (float((right_timestamp - timestamp)) / (right_timestamp - left_timestamp))
tracking_box = interpolate_tracking_boxes(left_tracking_box, right_tracking_box, right_ratio)
interpolate_count += 1
tracks_by_timestamp[timestamp].append(tracking_box)
return tracks_by_timestamp
|
def create_tracks(all_boxes: EvalBoxes, nusc: NuScenes, eval_split: str, gt: bool) -> Dict[(str, Dict[(int, List[TrackingBox])])]:
'\n Returns all tracks for all scenes. Samples within a track are sorted in chronological order.\n This can be applied either to GT or predictions.\n :param all_boxes: Holds all GT or predicted boxes.\n :param nusc: The NuScenes instance to load the sample information from.\n :param eval_split: The evaluation split for which we create tracks.\n :param gt: Whether we are creating tracks for GT or predictions\n :return: The tracks.\n '
splits = create_splits_scenes()
scene_tokens = set()
for sample_token in all_boxes.sample_tokens:
scene_token = nusc.get('sample', sample_token)['scene_token']
scene = nusc.get('scene', scene_token)
if (scene['name'] in splits[eval_split]):
scene_tokens.add(scene_token)
tracks = defaultdict((lambda : defaultdict(list)))
for scene_token in scene_tokens:
scene = nusc.get('scene', scene_token)
cur_sample_token = scene['first_sample_token']
while True:
cur_sample = nusc.get('sample', cur_sample_token)
tracks[scene_token][cur_sample['timestamp']] = []
if (cur_sample_token == scene['last_sample_token']):
break
cur_sample_token = cur_sample['next']
for sample_token in all_boxes.sample_tokens:
sample_record = nusc.get('sample', sample_token)
scene_token = sample_record['scene_token']
tracks[scene_token][sample_record['timestamp']] = all_boxes.boxes[sample_token]
if (not gt):
for (scene_id, scene_tracks) in tracks.items():
track_id_scores = defaultdict(list)
for (timestamp, boxes) in scene_tracks.items():
for box in boxes:
track_id_scores[box.tracking_id].append(box.tracking_score)
track_id_avg_scores = {}
for (tracking_id, scores) in track_id_scores.items():
track_id_avg_scores[tracking_id] = np.mean(scores)
for (timestamp, boxes) in scene_tracks.items():
for box in boxes:
box.tracking_score = track_id_avg_scores[box.tracking_id]
for scene_token in tracks.keys():
tracks[scene_token] = interpolate_tracks(tracks[scene_token])
if (not gt):
tracks[scene_token] = defaultdict(list, sorted(tracks[scene_token].items(), key=(lambda kv: kv[0])))
return tracks
|
def track_initialization_duration(df: DataFrame, obj_frequencies: DataFrame) -> float:
"\n Computes the track initialization duration, which is the duration from the first occurrence of an object to\n it's first correct detection (TP).\n Note that this True Positive metric is undefined if there are no matched tracks.\n :param df: Motmetrics dataframe that is required, but not used here.\n :param obj_frequencies: Stores the GT tracking_ids and their frequencies.\n :return: The track initialization time.\n "
tid = 0
missed_tracks = 0
for gt_tracking_id in obj_frequencies.index:
dfo = df.noraw[(df.noraw.OId == gt_tracking_id)]
notmiss = dfo[(dfo.Type != 'MISS')]
if (len(notmiss) == 0):
diff = 0
missed_tracks += 1
else:
diff = (notmiss.index[0][0] - dfo.index[0][0])
assert (diff >= 0), 'Time difference should be larger than or equal to zero: %.2f'
tid += (diff * 0.5)
matched_tracks = (len(obj_frequencies) - missed_tracks)
if (matched_tracks == 0):
return np.nan
else:
return (tid / matched_tracks)
|
def longest_gap_duration(df: DataFrame, obj_frequencies: DataFrame) -> float:
'\n Computes the longest gap duration, which is the longest duration of any gaps in the detection of an object.\n Note that this True Positive metric is undefined if there are no matched tracks.\n :param df: Motmetrics dataframe that is required, but not used here.\n :param obj_frequencies: Dataframe with all object frequencies.\n :return: The longest gap duration.\n '
if (len(obj_frequencies.index) == 0):
return np.nan
lgd = 0
missed_tracks = 0
for gt_tracking_id in obj_frequencies.index:
dfo = df.noraw[(df.noraw.OId == gt_tracking_id)]
matched = set(dfo[(dfo.Type != 'MISS')].index.get_level_values(0).values)
if (len(matched) == 0):
gap = 0
missed_tracks += 1
else:
gap = 0
cur_gap = 0
first_index = dfo.index[0][0]
last_index = dfo.index[(- 1)][0]
for i in range(first_index, (last_index + 1)):
if (i in matched):
gap = np.maximum(gap, cur_gap)
cur_gap = 0
else:
cur_gap += 1
gap = np.maximum(gap, cur_gap)
assert (gap >= 0), 'Time difference should be larger than or equal to zero: %.2f'
lgd += (gap * 0.5)
matched_tracks = (len(obj_frequencies) - missed_tracks)
if (matched_tracks == 0):
lgd = np.nan
else:
lgd = (lgd / matched_tracks)
return lgd
|
def motar(df: DataFrame, num_matches: int, num_misses: int, num_switches: int, num_false_positives: int, num_objects: int, alpha: float=1.0) -> float:
'\n Initializes a MOTAR class which refers to the modified MOTA metric at https://www.nuscenes.org/tracking.\n Note that we use the measured recall, which is not identical to the hypothetical recall of the\n AMOTA/AMOTP thresholds.\n :param df: Motmetrics dataframe that is required, but not used here.\n :param num_matches: The number of matches, aka. false positives.\n :param num_misses: The number of misses, aka. false negatives.\n :param num_switches: The number of identity switches.\n :param num_false_positives: The number of false positives.\n :param num_objects: The total number of objects of this class in the GT.\n :param alpha: MOTAR weighting factor (previously 0.2).\n :return: The MOTAR or nan if there are no GT objects.\n '
recall = (num_matches / num_objects)
nominator = (((num_misses + num_switches) + num_false_positives) - ((1 - recall) * num_objects))
denominator = (recall * num_objects)
if (denominator == 0):
motar_val = np.nan
else:
motar_val = (1 - ((alpha * nominator) / denominator))
motar_val = np.maximum(0, motar_val)
return motar_val
|
def mota_custom(df: DataFrame, num_misses: int, num_switches: int, num_false_positives: int, num_objects: int) -> float:
"\n Multiple object tracker accuracy.\n Based on py-motmetric's mota function.\n Compared to the original MOTA definition, we clip values below 0.\n :param df: Motmetrics dataframe that is required, but not used here.\n :param num_misses: The number of misses, aka. false negatives.\n :param num_switches: The number of identity switches.\n :param num_false_positives: The number of false positives.\n :param num_objects: The total number of objects of this class in the GT.\n :return: The MOTA or 0 if below 0.\n "
mota = (1.0 - (((num_misses + num_switches) + num_false_positives) / num_objects))
mota = np.maximum(0, mota)
return mota
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.