from mmseg.ops import resize
import torch.nn as nn
import torch.nn.functional as F
import torch

def normalize_coords(coord, h, w):
    '''
    turn the coordinates from pixel indices to the range of [-1, 1]
    :param coord: [..., 2]
    :param h: the image height
    :param w: the image width
    :return: the normalized coordinates [..., 2]
    '''
    c = torch.Tensor([(w - 1) / 2., (h - 1) / 2.]).to(coord.device).float()
    # print(coord[:,:,0].max(), coord[:,:,1].max(), w, h)
    coord_norm = (coord - c) / c
    # print(coord_norm[:,:,0].max(), coord_norm[:,:,1].max(), coord_norm[:,:,0].min(), coord_norm[:,:,1].min())
    return coord_norm

def sample_feat_by_coord(x, coord_n, norm=False):
    '''
    sample from normalized coordinates
    :param x: feature map [batch_size, n_dim, h, w]
    :param coord_n: normalized coordinates, [batch_size, n_pts, 2]
    :param norm: if l2 normalize features
    :return: the extracted features, [batch_size, n_pts, n_dim]
    '''
    feat = F.grid_sample(x, coord_n.unsqueeze(2), padding_mode='zeros', align_corners=False).squeeze(-1)
    # print(feat.shape)
    if norm:
        feat = F.normalize(feat, p=2, dim=1)
    feat = feat.transpose(1, 2)
    return feat

class SegMatchloss(nn.Module):
    def __init__(self,):
        super(SegMatchloss,self).__init__()

    def dual_softmax(self, PT, scores):
        P_loss = scores[0,PT[0,:], PT[1,:]]
        loss = -P_loss.mean()
        return loss

    def forward(self, outputs, inputs):
        """Compute segmentation loss."""
        preds1 = outputs["preds1"]
        preds2 = outputs["preds2"]
        seg_logit1 = preds1["seg_pre"]
        seg_logit2 = preds2["seg_pre"]

        seg_label1 = inputs["img1_label"]
        seg_label2 = inputs["img2_label"]
        # resize to origin image size
        seg_logit1 = resize(
            input=seg_logit1,
            size=seg_label1.shape[1:3],
            mode='bilinear',
            align_corners=False)
        seg_logit2 = resize(
            input=seg_logit2,
            size=seg_label2.shape[1:3],
            mode='bilinear',
            align_corners=False)


        # Segmentation loss
        pre1 = seg_logit1.permute(0,2,3,1).contiguous().view(-1,150)
        seg_label1 = (seg_label1.long()-1).view(-1)
        non_zero_indices1 = seg_label1 >=0
        if non_zero_indices1.sum()>=0:
            segloss1 = F.cross_entropy(pre1[non_zero_indices1],seg_label1[non_zero_indices1])
        else:
            segloss1 = torch.tensor(0.0)
        _,predicted_labels1 = torch.max(pre1,1)
        accuracy1 = (predicted_labels1 == seg_label1) & (seg_label1>=0)
        accuracy1 = accuracy1.float().mean()

        pre2 = seg_logit2.permute(0,2,3,1).contiguous().view(-1,150)
        seg_label2 = (seg_label2.long()-1).view(-1)
        non_zero_indices2 = seg_label2 >=0
        if non_zero_indices2.sum()>=0:
            segloss2 = F.cross_entropy(pre2[non_zero_indices2],seg_label2[non_zero_indices2])
        else:
            segloss2 = torch.tensor(0.0)
        _,predicted_labels2 = torch.max(pre2,1)
        accuracy2 = (predicted_labels2 == seg_label2) & (seg_label2>=0)
        accuracy2 = accuracy2.float().mean()

        # # descriptor loss
        # xf1 = preds1["xf"]
        # xf2 = preds2["xf"]
        # h1i, w1i = inputs['img1'].size()[2:]
        # h2i, w2i = inputs['img2'].size()[2:]
        # b, _, hf, wf = xf1.shape
        #
        # coord1 = inputs['coord1']
        # coord2 = inputs['coord2']
        # coord1_n = normalize_coords(coord1, h1i, w1i)
        # coord2_n = normalize_coords(coord2, h2i, w2i)
        # feat1_fine = sample_feat_by_coord(xf1, coord1_n.reshape(b, -1, 2), True)
        # feat2_fine = sample_feat_by_coord(xf2, coord2_n.reshape(b, -1, 2), True)
        #
        # all_matches = inputs["all_matches"][0]
        # # Segmentation mask
        # seg_mask1 = seg_logit1.argmax(dim=1)
        # seg_mask2 = seg_logit2.argmax(dim=1)
        # kp1_class = sample_feat_by_coord(seg_mask1.unsqueeze(0).float(), coord1_n.reshape(b, -1, 2), False).squeeze(0)
        # kp2_class = sample_feat_by_coord(seg_mask2.unsqueeze(0).float(), coord2_n.reshape(b, -1, 2), False).squeeze(0)
        # match_mask1 = kp1_class[all_matches[0]].int()
        # match_mask2 = kp2_class[all_matches[1]].int()
        # mask = match_mask1==match_mask2
        # cos_sim = (feat1_fine @ feat2_fine.transpose(1, 2)) / 0.1  # bxmxn
        # cos_sim[0,all_matches[0,:], all_matches[1,:]] *= mask[:,0]
        # cos_sim = cos_sim.clamp(min=1e-4)
        # scores0 = F.log_softmax(cos_sim, 2)
        # scores1 = F.log_softmax(cos_sim.transpose(-1, -2).contiguous(), 2).transpose(-1, -2)
        # scores = scores0 + scores1
        # matchloss = self.dual_softmax(all_matches, scores)
        # loss = segloss1+segloss2+matchloss
        accuracy = (accuracy1+accuracy2)/2
        return loss, accuracy