Virtual-Try-On / detectron2 /layers /roi_align_rotated.py
IDM-VTON
update IDM-VTON Demo
938e515
raw
history blame
3.75 kB
# Copyright (c) Facebook, Inc. and its affiliates.
import torch
from torch import nn
from torch.autograd import Function
from torch.autograd.function import once_differentiable
from torch.nn.modules.utils import _pair
class _ROIAlignRotated(Function):
@staticmethod
def forward(ctx, input, roi, output_size, spatial_scale, sampling_ratio):
ctx.save_for_backward(roi)
ctx.output_size = _pair(output_size)
ctx.spatial_scale = spatial_scale
ctx.sampling_ratio = sampling_ratio
ctx.input_shape = input.size()
output = torch.ops.detectron2.roi_align_rotated_forward(
input, roi, spatial_scale, output_size[0], output_size[1], sampling_ratio
)
return output
@staticmethod
@once_differentiable
def backward(ctx, grad_output):
(rois,) = ctx.saved_tensors
output_size = ctx.output_size
spatial_scale = ctx.spatial_scale
sampling_ratio = ctx.sampling_ratio
bs, ch, h, w = ctx.input_shape
grad_input = torch.ops.detectron2.roi_align_rotated_backward(
grad_output,
rois,
spatial_scale,
output_size[0],
output_size[1],
bs,
ch,
h,
w,
sampling_ratio,
)
return grad_input, None, None, None, None, None
roi_align_rotated = _ROIAlignRotated.apply
class ROIAlignRotated(nn.Module):
def __init__(self, output_size, spatial_scale, sampling_ratio):
"""
Args:
output_size (tuple): h, w
spatial_scale (float): scale the input boxes by this number
sampling_ratio (int): number of inputs samples to take for each output
sample. 0 to take samples densely.
Note:
ROIAlignRotated supports continuous coordinate by default:
Given a continuous coordinate c, its two neighboring pixel indices (in our
pixel model) are computed by floor(c - 0.5) and ceil(c - 0.5). For example,
c=1.3 has pixel neighbors with discrete indices [0] and [1] (which are sampled
from the underlying signal at continuous coordinates 0.5 and 1.5).
"""
super(ROIAlignRotated, self).__init__()
self.output_size = output_size
self.spatial_scale = spatial_scale
self.sampling_ratio = sampling_ratio
def forward(self, input, rois):
"""
Args:
input: NCHW images
rois: Bx6 boxes. First column is the index into N.
The other 5 columns are (x_ctr, y_ctr, width, height, angle_degrees).
"""
assert rois.dim() == 2 and rois.size(1) == 6
orig_dtype = input.dtype
if orig_dtype == torch.float16:
input = input.float()
rois = rois.float()
output_size = _pair(self.output_size)
# Scripting for Autograd is currently unsupported.
# This is a quick fix without having to rewrite code on the C++ side
if torch.jit.is_scripting() or torch.jit.is_tracing():
return torch.ops.detectron2.roi_align_rotated_forward(
input, rois, self.spatial_scale, output_size[0], output_size[1], self.sampling_ratio
).to(dtype=orig_dtype)
return roi_align_rotated(
input, rois, self.output_size, self.spatial_scale, self.sampling_ratio
).to(dtype=orig_dtype)
def __repr__(self):
tmpstr = self.__class__.__name__ + "("
tmpstr += "output_size=" + str(self.output_size)
tmpstr += ", spatial_scale=" + str(self.spatial_scale)
tmpstr += ", sampling_ratio=" + str(self.sampling_ratio)
tmpstr += ")"
return tmpstr