from model.SegMatch import SegMatch
import yaml
import cv2
import torch
import numpy as np
from mmseg.ops import resize
from loss.Segmatchloss import *
import torch
from tools.plotting import plot_matches


def extract(img1_path, img2_path, model):
    '''

    '''
    img1 = cv2.imread(img1_path)
    img2 = cv2.imread(img2_path)
    # img1 = cv2.resize(img1,(640,480))
    # img2 = cv2.resize(img2,(640,480))
    img_tensor1 = torch.tensor(img1 / 255).unsqueeze(0).permute(0, 3, 1, 2).to("cuda").float()
    img_tensor2 = torch.tensor(img2 / 255).unsqueeze(0).permute(0, 3, 1, 2).to("cuda").float()
    inputs = {"img1": img_tensor1,
              "img2": img_tensor2,
              "img1_meta": img1,
              "img2_meta": img2}
    outputs = model(inputs)
    return outputs,inputs

def generate_query_kpts(img, num_pts, h, w, mode='mixed'):
    if mode == 'sift':
        gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        # sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_pts)
        sift = cv2.SIFT_create(nfeatures=num_pts)
        kp1 = sift.detect(gray1)
        coord = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1])
        return coord

def mnn_matcher(descriptors_a, descriptors_b):
    device = descriptors_a.device
    sim = descriptors_a @ descriptors_b.t()
    nn12 = torch.max(sim, dim=1)[1]
    nn21 = torch.max(sim, dim=0)[1]
    ids1 = torch.arange(0, sim.shape[0], device=device)
    mask = (ids1 == nn21[nn12])
    matches = torch.stack([ids1[mask], nn12[mask]])
    return matches.t().data.cpu().numpy()
def get_mactches(inputs,outputs,detector):
    preds1 = outputs['preds1']
    preds2 = outputs['preds2']
    b, c, h, w = inputs['img1'].shape
    cur_img1 = inputs['img1_meta']
    cur_img2 = inputs['img2_meta']
    if detector == 'sift':
        kps1_np = generate_query_kpts(cur_img1,None,h,2,"sift")
        kps2_np = generate_query_kpts(cur_img2,None,h,2,"sift")
        cur_kps1 = torch.tensor(kps1_np)[:,:2].float().to("cuda")
        cur_kps2 = torch.tensor(kps2_np)[:,:2].float().to("cuda")
        cur_kps1_n = normalize_coords(cur_kps1, h, w).unsqueeze(0)
        cur_kps2_n = normalize_coords(cur_kps2, h, w).unsqueeze(0)

    cur_desc1 = sample_feat_by_coord(preds1['xf'],
                                    cur_kps1_n, True).squeeze(0)
    cur_desc2 = sample_feat_by_coord(preds2['xf'],
                                        cur_kps2_n, True).squeeze(0)
    cur_matches = mnn_matcher(cur_desc1, cur_desc2)
    matches = np.concatenate((kps1_np[cur_matches[:,0]][:,:2],kps2_np[cur_matches[:,1]][:,:2]),axis=1)
    return matches

if __name__ == "__main__":

    with open("config/inference.yaml", 'r') as f:
        config = yaml.load(f, Loader=yaml.FullLoader)
    model = SegMatch(config)
    weight_path = "/home/liyuke/lyk_work/segmatch/ckpts_Segonly/segmentation/020"
    for name in ['backbone',"neck","decode_head"]:
        model_path = weight_path +'/{}.pth'.format(name)
        m = getattr(model,name)
        model_param = torch.load(model_path, map_location='cuda')
        m.load_state_dict(model_param)

    # for part in ['backbone',"neck","decode_head","match_head"]:
    #     m = getattr(model,part)
    #     new_param = {}
    #     for name, param in model_param.items():
    #         if name.startswith(part):
    #             new_name = name.split(part+".")[1]
    #             new_param[new_name] = param

        # m.load_state_dict(new_param)
    model.eval()
    img1_path = "/media/liyuke/dataset/ADEChallengeData2016/images/validation/ADE_val_00000001.jpg"
    img2_path = "/media/liyuke/dataset/ADEChallengeData2016/images/validation/ADE_val_00000002.jpg"
    img1 = cv2.imread(img1_path)
    img2 = cv2.imread(img2_path)
    outputs,inputs = extract(img1_path,img2_path,model)
    seg_pre1 = outputs["preds1"]["seg_pre"]
    seg_pre2 = outputs["preds2"]["seg_pre"]
    seg_pre1 = resize(
                input=seg_pre1,
                size=img1.shape[0:2],
                mode='bilinear',
                align_corners=False)
    seg_pre1 = seg_pre1.argmax(dim = 1)
    seg_pre2 = resize(
                input=seg_pre2,
                size=img2.shape[0:2],
                mode='bilinear',
                align_corners=False)
    seg_pre2 = seg_pre2.argmax(dim = 1)
    seg_pre1 = np.array(seg_pre1.cpu(),dtype=np.uint8).transpose(1,2,0)
    seg_pre2 = np.array(seg_pre2.cpu(),dtype=np.uint8).transpose(1,2,0)
    # get correspondences
    pre_matches = get_mactches(inputs,outputs,"sift")
    plot_matches(img1,img2,pre_matches[:100,:],lines=True)
    cv2.imshow("seg1",seg_pre1)
    cv2.imshow("seg2",seg_pre2)
    cv2.imshow("image1",img1)
    cv2.imshow("image2",img2)

    cv2.waitKey()


