from collections import OrderedDict
import torch, torchvision

m = torchvision.ops.MultiScaleRoIAlign(['feat1', 'feat2', 'feat3', 'feat4'], 7, 2)
features = OrderedDict()
features['feat1'] = torch.rand(1, 256, 200, 304)
features['feat2'] = torch.rand(1, 256, 100, 153)  # this feature won't be used in the pooling
features['feat3'] = torch.rand(1, 256, 50, 76)
features['feat4'] = torch.rand(1, 256, 25, 38)
# create some random bounding boxes
proposals = torch.rand(512, 4) * 256  # (6,4)
proposals[:, 2:] += proposals[:, :2]
print("proposals:", proposals)
# original image size, before computing the feature maps
image_sizes = [(800, 1088)]
output = m(features, [proposals], image_sizes)
print(output.shape)
