import numpy as np
from mmseg.ops import resize
import torch.nn as nn
import torch

def upscale_positions(pos, scaling_steps=0):
    for _ in range(scaling_steps):
        pos = pos * 2 + 0.5
    return pos

def downscale_positions(pos, scaling_steps=0):
    for _ in range(scaling_steps):
        pos = (pos - 0.5) / 2
    return pos


class NghSampler2DS(nn.Module):
    def __init__(self,ngh, subq=1, subd=1, pos_d=0, neg_d=2, border=None,
                 maxpool_pos=True, subd_neg=0, scaling_step=0):
        super(NghSampler2DS,self).__init__()
        assert 0 <= pos_d < neg_d <= (ngh if ngh else 99)
        self.ngh = ngh
        self.pos_d = pos_d
        self.neg_d = neg_d
        assert subd <= ngh or ngh == 0
        assert subq != 0
        self.sub_q = subq
        self.sub_d = subd
        self.sub_d_neg = subd_neg
        if border is None: border = ngh
        assert border >= ngh, 'border has to be larger than ngh'
        self.border = border
        self.maxpool_pos = maxpool_pos
        self.precompute_offsets()
        self.scaling_step = scaling_step
    def precompute_offsets(self):
        pos_d2 = self.pos_d ** 2
        neg_d2 = self.neg_d ** 2
        rad2 = self.ngh ** 2
        rad = (self.ngh // self.sub_d) * self.ngh  # make an integer multiple
        pos = []
        neg = []
        for j in range(-rad, rad + 1, self.sub_d):
            for i in range(-rad, rad + 1, self.sub_d):
                d2 = i * i + j * j
                if d2 <= pos_d2:
                    pos.append((i, j))
                elif neg_d2 <= d2 <= rad2:
                    neg.append((i, j))

        self.register_buffer('pos_offsets', torch.LongTensor(pos).view(-1, 2).t())
        self.register_buffer('neg_offsets', torch.LongTensor(neg).view(-1, 2).t())

    def gen_grid(self, step, aflow):
        B, two, H, W = aflow.shape
        dev = aflow.device
        b1 = torch.arange(B, device=dev)
        if step > 0:
            # regular grid
            x1 = torch.arange(self.border, W-self.border, step, device=dev)
            y1 = torch.arange(self.border, H-self.border, step, device=dev)
            H1, W1 = len(y1), len(x1)
            x1 = x1[None,None,:].expand(B,H1,W1).reshape(-1)
            y1 = y1[None,:,None].expand(B,H1,W1).reshape(-1)
            b1 = b1[:,None,None].expand(B,H1,W1).reshape(-1)
            shape = (B, H1, W1)
        else:
            # randomly spread
            n = (H - 2*self.border) * (W - 2*self.border) // step**2
            x1 = torch.randint(self.border, W-self.border, (n,), device=dev)
            y1 = torch.randint(self.border, H-self.border, (n,), device=dev)
            x1 = x1[None,:].expand(B,n).reshape(-1)
            y1 = y1[None,:].expand(B,n).reshape(-1)
            b1 = b1[:,None].expand(B,n).reshape(-1)
            shape = (B, n)
        return b1, y1, x1, shape


    def forward(self, outputs, inputs):
        preds1 = outputs["preds1"]
        preds2 = outputs["preds2"]
        feat1 = preds1["xf"]
        feat2 = preds2["xf"]
        h1i, w1i = inputs['img1'].size()[2:]
        h2i, w2i = inputs['img2'].size()[2:]
        aflow = inputs["aflow"]

        B, D, H, W = feat1.shape
        # positions in the first image
        b1, y1, x1, shape = self.gen_grid(self.sub_q, feat1)
        y1_up = upscale_positions(pos=y1, scaling_steps=self.scaling_step).long()
        x1_up = upscale_positions(pos=x1, scaling_steps=self.scaling_step).long()
        # sample features from first image
        feat1 = feat1[b1, :, y1, x1]

        # sample GT from second image
        b2 = b1
        xy2_up = (aflow[b1, :, y1_up, x1_up] + 0.5).long().t()
        xy2 = downscale_positions(pos=xy2_up, scaling_steps=self.scaling_step).long()
        mask = (0 <= xy2[0]) * (0 <= xy2[1]) * (xy2[0] < W) * (xy2[1] < H)
        mask = mask.view(shape)

        def clamp(xy):
            torch.clamp(xy[0], 0, W-1, out=xy[0])
            torch.clamp(xy[1], 0, H-1, out=xy[1])
            return xy

        xy2p = clamp(xy2[:, None, :] + self.pos_offsets[:, :, None])
        pscores = (feat1[None, :, :] * feat2[b2, :, xy2p[1], xy2p[0]]).sum(dim=-1).t()

        if self.maxpool_pos:
            pscores, pos = pscores.max(dim=1, keepdim=True)

        xy2n = clamp(xy2[:, None, :] + self.neg_offsets[:, :, None])
        nscores = (feat1[None, :, :] * feat2[b2, :, xy2n[1], xy2n[0]]).sum(dim=-1).t()

        if self.sub_d_neg:
            # add distractors from a grid
            b3, y3, x3, _ = self.gen_grid(self.sub_d_neg, feat2)
            distractors = feat2[b3, :, y3, x3]
            dscores = torch.matmul(feat1, distractors.t())
            del distractors

            # remove scores that corresponds to positives or nulls
            dis2 = (x3 - xy2[0][:, None]) ** 2 + (y3 - xy2[1][:, None]) ** 2
            dis2 += (b3 != b2[:, None]).long() * self.neg_d ** 2
            dscores[dis2 < self.neg_d ** 2] = 0

            scores = torch.cat((pscores, nscores, dscores), dim=1)

        gt = scores.new_zeros(scores.shape, dtype=torch.uint8)
        gt[:, :pscores.shape[1]] = 1
        return scores, gt, mask
        # from tools.plotting import plot_matches
        # plot_matches(img1,img2,np.concatenate((xy1.cpu().numpy(),xy2.cpu().numpy()),axis=1),lines=True)