import numpy as np import torch import torch.nn as nn # https://zhuanlan.zhihu.com/p/112030273 def warp_optical_flow(batch_x, batch_flow): """ Modified from https://github.com/NVlabs/PWC-Net/blob/fc6ebf9a70a7387164df09a3a2070ba16f9c1ede/PyTorch/models/PWCNet.py # NOQA warp an im2 back to im1, according to the optical flow x: [B, L, C, H, W] (im2) flo: [B, L, 2, H, W] flow """ B, L, C, H, W = batch_x.shape B = B * L x = batch_x.contiguous().view(-1, C, H, W) flo = batch_flow.view(-1, 2, H, W) # mesh grid xx = torch.arange(0, W).view(1, -1).repeat(H, 1) yy = torch.arange(0, H).view(-1, 1).repeat(1, W) xx = xx.view(1, 1, H, W).repeat(B, 1, 1, 1) yy = yy.view(1, 1, H, W).repeat(B, 1, 1, 1) grid = torch.cat((xx, yy), 1).float() if x.is_cuda: grid = grid.cuda() vgrid = grid + flo # scale grid to [-1, 1] vgrid[:, 0, :, :] = 2.0 * vgrid[:, 0, :, :] / max(W - 1, 1) - 1.0 vgrid[:, 1, :, :] = 2.0 * vgrid[:, 1, :, :] / max(H - 1, 1) - 1.0 vgrid = vgrid.permute(0, 2, 3, 1) # B, H, W, 2(compatible with API) output = nn.functional.grid_sample(x, vgrid) # 按照vgrid将x warp到output张量上 mask = torch.autograd.Variable(torch.ones(x.size())).cuda() mask = nn.functional.grid_sample(mask, vgrid) # 这个我觉得没有太大意义,因为warp之后还是1(mask默认全是1) mask[mask < 0.9999] = 0 mask[mask > 0] = 1 # 仍然全是1 result = output * mask return result.view(-1, L, C, H, W) UNKNOWN_FLOW_THRESH = 1e7 def flow_to_image(flow): """ Convert flow into middlebury color code image :param flow: optical flow map :return: optical flow image in middlebury color """ u = flow[:, :, 0] v = flow[:, :, 1] maxu = -999. maxv = -999. minu = 999. minv = 999. idxUnknow = (abs(u) > UNKNOWN_FLOW_THRESH) | (abs(v) > UNKNOWN_FLOW_THRESH) u[idxUnknow] = 0 v[idxUnknow] = 0 maxu = max(maxu, np.max(u)) minu = min(minu, np.min(u)) maxv = max(maxv, np.max(v)) minv = min(minv, np.min(v)) rad = np.sqrt(u ** 2 + v ** 2) maxrad = max(-1, np.max(rad)) u = u / (maxrad + np.finfo(float).eps) v = v / (maxrad + np.finfo(float).eps) img = compute_color(u, v) idx = np.repeat(idxUnknow[:, :, np.newaxis], 3, axis=2) img[idx] = 0 return np.uint8(img) def compute_color(u, v): """ compute optical flow color map :param u: optical flow horizontal map :param v: optical flow vertical map :return: optical flow in color code """ [h, w] = u.shape img = np.zeros([h, w, 3]) nanIdx = np.isnan(u) | np.isnan(v) u[nanIdx] = 0 v[nanIdx] = 0 colorwheel = make_color_wheel() ncols = np.size(colorwheel, 0) rad = np.sqrt(u ** 2 + v ** 2) a = np.arctan2(-v, -u) / np.pi fk = (a + 1) / 2 * (ncols - 1) + 1 k0 = np.floor(fk).astype(int) k1 = k0 + 1 k1[k1 == ncols + 1] = 1 f = fk - k0 for i in range(0, np.size(colorwheel, 1)): tmp = colorwheel[:, i] col0 = tmp[k0 - 1] / 255 col1 = tmp[k1 - 1] / 255 col = (1 - f) * col0 + f * col1 idx = rad <= 1 col[idx] = 1 - rad[idx] * (1 - col[idx]) notidx = np.logical_not(idx) col[notidx] *= 0.75 img[:, :, i] = np.uint8(np.floor(255 * col * (1 - nanIdx))) return img def make_color_wheel(): """ Generate color wheel according Middlebury color code :return: Color wheel """ RY = 15 YG = 6 GC = 4 CB = 11 BM = 13 MR = 6 ncols = RY + YG + GC + CB + BM + MR colorwheel = np.zeros([ncols, 3]) col = 0 # RY colorwheel[0:RY, 0] = 255 colorwheel[0:RY, 1] = np.transpose(np.floor(255 * np.arange(0, RY) / RY)) col += RY # YG colorwheel[col:col + YG, 0] = 255 - np.transpose(np.floor(255 * np.arange(0, YG) / YG)) colorwheel[col:col + YG, 1] = 255 col += YG # GC colorwheel[col:col + GC, 1] = 255 colorwheel[col:col + GC, 2] = np.transpose(np.floor(255 * np.arange(0, GC) / GC)) col += GC # CB colorwheel[col:col + CB, 1] = 255 - np.transpose(np.floor(255 * np.arange(0, CB) / CB)) colorwheel[col:col + CB, 2] = 255 col += CB # BM colorwheel[col:col + BM, 2] = 255 colorwheel[col:col + BM, 0] = np.transpose(np.floor(255 * np.arange(0, BM) / BM)) col += + BM # MR colorwheel[col:col + MR, 2] = 255 - np.transpose(np.floor(255 * np.arange(0, MR) / MR)) colorwheel[col:col + MR, 0] = 255 return colorwheel