Spaces:
Sleeping
Sleeping
File size: 3,949 Bytes
a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 a80d6bb c74a070 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 |
import torch
import torch.nn as nn
import numpy as np
class APLoss(nn.Module):
"""differentiable AP loss, through quantization.
Input: (N, M) values in [min, max]
label: (N, M) values in {0, 1}
Returns: list of query AP (for each n in {1..N})
Note: typically, you want to minimize 1 - mean(AP)
"""
def __init__(self, nq=25, min=0, max=1, euc=False):
nn.Module.__init__(self)
assert isinstance(nq, int) and 2 <= nq <= 100
self.nq = nq
self.min = min
self.max = max
self.euc = euc
gap = max - min
assert gap > 0
# init quantizer = non-learnable (fixed) convolution
self.quantizer = q = nn.Conv1d(1, 2 * nq, kernel_size=1, bias=True)
a = (nq - 1) / gap
# 1st half = lines passing to (min+x,1) and (min+x+1/a,0) with x = {nq-1..0}*gap/(nq-1)
q.weight.data[:nq] = -a
q.bias.data[:nq] = torch.from_numpy(
a * min + np.arange(nq, 0, -1)
) # b = 1 + a*(min+x)
# 2nd half = lines passing to (min+x,1) and (min+x-1/a,0) with x = {nq-1..0}*gap/(nq-1)
q.weight.data[nq:] = a
q.bias.data[nq:] = torch.from_numpy(
np.arange(2 - nq, 2, 1) - a * min
) # b = 1 - a*(min+x)
# first and last one are special: just horizontal straight line
q.weight.data[0] = q.weight.data[-1] = 0
q.bias.data[0] = q.bias.data[-1] = 1
def compute_AP(self, x, label):
N, M = x.shape
# print(x.shape, label.shape)
if self.euc: # euclidean distance in same range than similarities
x = 1 - torch.sqrt(2.001 - 2 * x)
# quantize all predictions
q = self.quantizer(x.unsqueeze(1))
q = torch.min(q[:, : self.nq], q[:, self.nq :]).clamp(
min=0
) # N x Q x M [1600, 20, 1681]
nbs = q.sum(dim=-1) # number of samples N x Q = c
rec = (q * label.view(N, 1, M).float()).sum(
dim=-1
) # nb of correct samples = c+ N x Q
prec = rec.cumsum(dim=-1) / (1e-16 + nbs.cumsum(dim=-1)) # precision
rec /= rec.sum(dim=-1).unsqueeze(1) # norm in [0,1]
ap = (prec * rec).sum(dim=-1) # per-image AP
return ap
def forward(self, x, label):
assert x.shape == label.shape # N x M
return self.compute_AP(x, label)
class PixelAPLoss(nn.Module):
"""Computes the pixel-wise AP loss:
Given two images and ground-truth optical flow, computes the AP per pixel.
feat1: (B, C, H, W) pixel-wise features extracted from img1
feat2: (B, C, H, W) pixel-wise features extracted from img2
aflow: (B, 2, H, W) absolute flow: aflow[...,y1,x1] = x2,y2
"""
def __init__(self, sampler, nq=20):
nn.Module.__init__(self)
self.aploss = APLoss(nq, min=0, max=1, euc=False)
self.name = "pixAP"
self.sampler = sampler
def loss_from_ap(self, ap, rel):
return 1 - ap
def forward(self, feat0, feat1, conf0, conf1, pos0, pos1, B, H, W, N=1200):
# subsample things
scores, gt, msk, qconf = self.sampler(
feat0, feat1, conf0, conf1, pos0, pos1, B, H, W, N=1200
)
# compute pixel-wise AP
n = qconf.numel()
if n == 0:
return 0
scores, gt = scores.view(n, -1), gt.view(n, -1)
ap = self.aploss(scores, gt).view(msk.shape)
pixel_loss = self.loss_from_ap(ap, qconf)
loss = pixel_loss[msk].mean()
return loss
class ReliabilityLoss(PixelAPLoss):
"""same than PixelAPLoss, but also train a pixel-wise confidence
that this pixel is going to have a good AP.
"""
def __init__(self, sampler, base=0.5, **kw):
PixelAPLoss.__init__(self, sampler, **kw)
assert 0 <= base < 1
self.base = base
def loss_from_ap(self, ap, rel):
return 1 - ap * rel - (1 - rel) * self.base
|