import cv2
import numpy as np
import torch
from evaluation.utils import *
from PIL import Image
from tqdm import tqdm
import torch.nn.functional as F
from evaluation.tools import *


def extract(img1_path, img2_path, model):
    '''

    '''
    img1 = cv2.imread(img1_path)
    img2 = cv2.imread(img2_path)
    # img1 = cv2.resize(img1, (640, 480))
    # img2 = cv2.resize(img2, (640, 480))
    size1 = size2 = [480, 640]

    kp1, kp2 = model.match_xfeat(img1, img2, min_cossim=0.5)
    matches = torch.concatenate((kp1, kp2), axis=1)
    return matches.unsqueeze(0)


def generate_query_kpts(img, mode='mixed'):
    if mode == 'sift':
        gray1 = cv2.cvtColor(img, cv2.COLOR_RGB2GRAY)
        # sift = cv2.xfeatures2d.SIFT_create(nfeatures=num_pts)
        sift = cv2.SIFT_create(2000)
        kp1 = sift.detect(gray1)
        coord = np.array([[kp.pt[0], kp.pt[1], 1] for kp in kp1])
        score = np.array([kp.response for kp in kp1])
        return coord, score


def mnn_matcher(descriptors_a, descriptors_b):
    device = descriptors_a.device
    sim = descriptors_a @ descriptors_b.t()
    nn12 = torch.max(sim, dim=1)[1]
    nn21 = torch.max(sim, dim=0)[1]
    ids1 = torch.arange(0, sim.shape[0], device=device)
    mask = (ids1 == nn21[nn12])
    matches = torch.stack([ids1[mask], nn12[mask]])
    return matches.t()


def get_desc(inputs, outputs, detector):
    preds1 = outputs['preds1']
    preds2 = outputs['preds2']
    b, c, h, w = inputs['img1'].shape
    cur_img1 = inputs['img1_meta']
    cur_img2 = inputs['img2_meta']
    if detector == 'sift':
        kps1_np, score1 = generate_query_kpts(cur_img1, "sift")
        kps2_np, score2 = generate_query_kpts(cur_img2, "sift")
        cur_kps1 = torch.tensor(kps1_np)[:, :2].float().to("cuda")
        cur_kps2 = torch.tensor(kps2_np)[:, :2].float().to("cuda")
        score1 = torch.tensor(score1).float().to("cuda").unsqueeze(0)
        score2 = torch.tensor(score2).float().to("cuda").unsqueeze(0)
        cur_kps1_n = normalize_coords(cur_kps1, h, w).unsqueeze(0)
        cur_kps2_n = normalize_coords(cur_kps2, h, w).unsqueeze(0)

    cur_desc1 = sample_feat_by_coord(preds1['xf'],
                                     cur_kps1_n, False)
    cur_desc2 = sample_feat_by_coord(preds2['xf'],
                                     cur_kps2_n, False)
    cur_matches = mnn_matcher(cur_desc1[0], cur_desc2[0])
    seg_pre1 = preds1["seg_pre"]
    seg_pre2 = preds2["seg_pre"]
    seg_pre1 = seg_pre1.argmax(dim=1)
    seg_pre2 = seg_pre2.argmax(dim=1)
    if "seg_pre" in preds1:
        mkp1_n = cur_kps1_n[0, cur_matches[:, 0]]
        mkp2_n = cur_kps2_n[0, cur_matches[:, 1]]
        kp1_class = sample_feat_by_coord(seg_pre1.unsqueeze(1).float(), mkp1_n.reshape(1, -1, 2),
                                         False).squeeze(0).int()
        kp2_class = sample_feat_by_coord(seg_pre2.unsqueeze(1).float(), mkp2_n.reshape(1, -1, 2),
                                         False).squeeze(0).int()
        mask = (kp1_class == kp2_class)
    mkp1 = cur_kps1_n[0, cur_matches[:, 0]]
    mkp2 = cur_kps2_n[0, cur_matches[:, 1]]
    matches = torch.concatenate((mkp1[mask[:, 0]], mkp2[mask[:, 0]]), axis=1)
    # matches = torch.concatenate((cur_kps1_n[0,cur_matches[:, 0],:], cur_kps2_n[0,cur_matches[:, 1],:]), axis=1)
    return matches
    # return cur_desc1,cur_desc2,cur_kps1_n,cur_kps1_n,score1,score2


def compute_sampson(matches, F1, F2):
    coord1_h = homogenize(matches[:, :, :2]).transpose(1, 2) # bx3xm
    coord2_h = homogenize(matches[:,:, 2:]).transpose(1, 2) # bx3xn
    fmatrix = torch.tensor(F1[None,]).float()
    fmatrix2 = torch.tensor(F2[None,]).float()

    epipolar_line = fmatrix.bmm(coord1_h)
    epipolar_line_ = epipolar_line / torch.clamp(
        torch.norm(epipolar_line[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)
    epipolar_dist = torch.abs(epipolar_line_.transpose(1, 2) @ coord2_h)  # bxmxn
    epipolar_dist = np.array([np.diagonal(epipolar_dist[i]) for i in range(epipolar_dist.shape[0])])
    epipolar_line2 = fmatrix2.bmm(coord2_h)
    epipolar_line2_ = epipolar_line2 / torch.clamp(
        torch.norm(epipolar_line2[:, :2, :], p=2, dim=1, keepdim=True), min=1e-8)
    epipolar_dist2 = torch.abs(epipolar_line2_.transpose(1, 2) @ coord1_h)  # bxnxm
    epipolar_dist2 = epipolar_dist2.transpose(1, 2)  # bxmxn
    epipolar_dist2 = np.array([np.diagonal(epipolar_dist2[i]) for i in range(epipolar_dist2.shape[0])])
    epipolar_d = (epipolar_dist2 + epipolar_dist)/2
    return epipolar_d.reshape(-1)


class MegaDepthFMNNBenchmark:
    def __init__(self, data_root="/media/liyuke/share/megadepth1500", scene_names=None) -> None:
        if scene_names is None:
            self.scene_names = [
                "0015_0.1_0.3.npz",
                "0015_0.3_0.5.npz",
                "0022_0.1_0.3.npz",
                "0022_0.3_0.5.npz",
                "0022_0.5_0.7.npz",
            ]
        else:
            self.scene_names = scene_names
        self.scenes = [
            np.load(f"{data_root}/{scene}", allow_pickle=True)
            for scene in self.scene_names
        ]
        self.data_root = data_root

    def benchmark(self, descriptor_model, matcher_model, model_name=None, resolution=None, scale_intrinsics=False,
                  calibrated=True):
        with torch.no_grad():
            data_root = self.data_root
            tot_d_2, tot_d_5, tot_d_10,tot_d_all = [], [], [], []
            thresholds = [5, 10, 20]
            for scene_ind in range(len(self.scenes)):
                import os
                scene_name = os.path.splitext(self.scene_names[scene_ind])[0]
                scene = self.scenes[scene_ind]
                pairs = scene["pair_infos"]
                intrinsics = scene["intrinsics"]
                poses = scene["poses"]
                im_paths = scene["image_paths"]
                pair_inds = range(len(pairs))
                for pairind in tqdm(pair_inds):
                    idx1, idx2 = pairs[pairind][0]
                    K1 = intrinsics[idx1].copy()
                    T1 = poses[idx1].copy()
                    R1, t1 = T1[:3, :3], T1[:3, 3]
                    K2 = intrinsics[idx2].copy()
                    T2 = poses[idx2].copy()
                    R2, t2 = T2[:3, :3], T2[:3, 3]
                    R, t = compute_relative_pose(R1, t1, R2, t2)
                    R_, t_ = compute_relative_pose(R2, t2, R1, t1)

                    tx = skew(t)
                    E_gt = np.dot(tx, R)
                    F_gt = np.linalg.inv(K2).T.dot(E_gt).dot(np.linalg.inv(K1))


                    tx_ = skew(t_)
                    E_gt2 = np.dot(tx_, R_)
                    F_gt2 = np.linalg.inv(K1).T.dot(E_gt2).dot(np.linalg.inv(K2))

                    im_A_path = f"{data_root}/{im_paths[idx1]}"
                    im_B_path = f"{data_root}/{im_paths[idx2]}"
                    matches = extract(im_A_path, im_B_path, descriptor_model)

                    d = compute_sampson(matches, F_gt, F_gt2)


                    tot_d_all.extend(d)


            auc = pose_auc(tot_d_all, [2,5,10])

            print(f"{model_name} auc: /{auc}")
            return {
                "auc_5": auc[0],
                "auc_10": auc[1],
                "auc_20": auc[2],
            }
