from evaluation.utils import *
from PIL import Image
import cv2
import numpy as np
from tqdm import tqdm
import os
from evaluation.tools import *
from evaluation.superpoint.mega_pose_est_mnn import get_desc

def extract(img1_path, img2_path, model):
    '''

    '''
    img1 = cv2.imread(img1_path,cv2.IMREAD_GRAYSCALE)
    img2 = cv2.imread(img2_path,cv2.IMREAD_GRAYSCALE)
    img1 = cv2.resize(img1,(640,480))[:,:,None]
    img2 = cv2.resize(img2,(640,480))[:,:,None]
    img_tensor1 = torch.tensor(img1 / 255).unsqueeze(0).permute(0, 3, 1, 2).to("cuda").float()
    img_tensor2 = torch.tensor(img2 / 255).unsqueeze(0).permute(0, 3, 1, 2).to("cuda").float()

    output1 = model({'image': img_tensor1})
    output2 = model({'image': img_tensor2})

    return output1, output2



def np_skew_symmetric(v):
    zero = np.zeros_like(v[:, 0])
    M = np.stack([
        zero, -v[:, 2], v[:, 1],
        v[:, 2], zero, -v[:, 0],
        -v[:, 1], v[:, 0], zero,
    ], axis=1)
    return M

class GL3DPoseMNNBenchmark():
    def __init__(self,data_root = ""):
        assert os.path.exists(os.path.join(data_root,"test.txt"))
        with open(os.path.join(data_root,"test.txt"),"r") as f :
            scenes_list = f.readlines()
        self.scenes_list = [line.strip() for line in scenes_list]
        self.data_root = os.path.join(data_root,"data")

    def load_geom(self,root,scene):
        # load geometry file
        geom_file=os.path.join(root,scene,'geolabel','cameras.txt')
        basename_list=np.loadtxt(os.path.join(root,scene,'basenames.txt'),dtype=str)
        geom_dict = []
        cameras = np.loadtxt(geom_file)
        camera_index=0
        for base_index in range(len(basename_list)):
            if base_index<cameras[camera_index][0]:
                geom_dict.append(None)
                continue
            cur_geom = {}
            ori_img_size = [cameras[camera_index][-2], cameras[camera_index][-1]]
            scale_factor = [1000. / ori_img_size[0], 1000. / ori_img_size[1]]
            K = np.asarray([[cameras[camera_index][1], cameras[camera_index][5], cameras[camera_index][3]],
                            [0, cameras[camera_index][2], cameras[camera_index][4]],
                            [0, 0, 1]])
            # Rescale calbration according to previous resizing
            S = np.asarray([[scale_factor[0], 0, 0],
                            [0, scale_factor[1], 0],
                            [0, 0, 1]])
            K = np.dot(S, K)
            cur_geom["K"] = K
            cur_geom['R'] = cameras[camera_index][9:18].reshape([3, 3])
            cur_geom['T'] = cameras[camera_index][6:9]
            cur_geom['size']=np.asarray([1000,1000])
            geom_dict.append(cur_geom)
            camera_index+=1
        return geom_dict

    def benchmark(self,desc_model ,matcher_model, model_name = None, resolution = None, scale_intrinsics = False, calibrated = True):
        with torch.no_grad():
            tot_e_t, tot_e_R, tot_e_pose = [], [], []
            thresholds = [5, 10, 20]
            for scene in self.scenes_list:
                # 读取标注信息
                basename_list = np.loadtxt(os.path.join(self.data_root,scene, 'basenames.txt'), dtype=str)
                pair_list = np.loadtxt(os.path.join(self.data_root,scene, 'geolabel', 'common_track.txt'), dtype=float)[:, :2].astype(
                    int)
                geom_dict = self.load_geom(self.data_root,scene)
                for cur_pair in tqdm(pair_list):
                    pair_index1, pair_index2 = cur_pair[0], cur_pair[1]
                    geo1, geo2 = geom_dict[pair_index1], geom_dict[pair_index2]
                    dR = np.dot(geo2['R'], geo1['R'].T)
                    t1, t2 = geo1["T"].reshape([3, 1]), geo2["T"].reshape([3, 1])
                    dt = t2 - np.dot(dR, t1)
                    K1, K2 = geo1['K'], geo2['K']
                    basename1, basename2 = basename_list[pair_index1], basename_list[pair_index2]
                    img_path1, img_path2 = os.path.join(self.data_root,scene, 'undist_images', basename1 + '.jpg'), os.path.join(self.data_root,scene,
                                                                                                                'undist_images',
                                                                                                                basename2 + '.jpg')
                    egt = np.reshape(
                        np.matmul(np.reshape(np_skew_symmetric(dt.astype('float64').reshape(1, 3)), (3, 3)),
                                  np.reshape(dR .astype('float64'), (3, 3))), (3, 3))
                    F1 = np.linalg.inv(K2).T @ egt @ np.linalg.inv(K1)
                    F1 /= np.linalg.norm(F1)
                    F2 = F1.T
                    output1,output2 = extract(img_path1, img_path2, desc_model)
                    matches = get_desc(output1, output2,min_cossim=0.5)
                    matches_A, matches_B = matches[:, :2], matches[:, 2:]
                    # -------------------------------------------------------
                    im_A = Image.open(img_path1)
                    w1, h1 = im_A.size
                    im_B = Image.open(img_path2)
                    w2, h2 = im_B.size
                    if scale_intrinsics:
                        scale1 = 840 / max(w1, h1)
                        scale2 = 840 / max(w2, h2)
                        w1, h1 = scale1 * w1, scale1 * h1
                        w2, h2 = scale2 * w2, scale2 * h2
                        K1, K2 = K1.copy(), K2.copy()
                        K1[:2] = K1[:2] * scale1
                        K2[:2] = K2[:2] * scale2
                    kpts1, kpts2 = matcher_model.to_pixel_coords(matches_A, matches_B, h1, w1, h2, w2)
                    from plotting import plot_matches
                    # plot_matches(cv2.imread(img_path1),cv2.imread(img_path2),np.concatenate((kpts1.cpu().numpy(),kpts2.cpu().numpy()),axis=1)[:100,],lines=True)
                    for _ in range(1):
                        shuffling = np.random.permutation(np.arange(len(kpts1)))
                        kpts1 = kpts1[shuffling]
                        kpts2 = kpts2[shuffling]
                        try:
                            threshold = 0.5
                            if calibrated:
                                norm_threshold = threshold / (np.mean(np.abs(K1[:2, :2])) + np.mean(np.abs(K2[:2, :2])))
                                R_est, t_est, mask = estimate_pose(
                                    kpts1.cpu().numpy(),
                                    kpts2.cpu().numpy(),
                                    K1,
                                    K2,
                                    norm_threshold,
                                    conf=0.99999,
                                )
                            T1_to_2_est = np.concatenate((R_est, t_est), axis=-1)  #
                            e_t, e_R = compute_pose_error(T1_to_2_est, dR, dt)
                            e_pose = max(e_t, e_R)
                        except Exception as e:
                            print(repr(e))
                            e_t, e_R = 90, 90
                            e_pose = max(e_t, e_R)
                        tot_e_t.append(e_t)
                        tot_e_R.append(e_R)
                        tot_e_pose.append(e_pose)
                tot_e_pose = np.array(tot_e_pose)
                auc = pose_auc(tot_e_pose, thresholds)
                acc_5 = (tot_e_pose < 5).mean()
                acc_10 = (tot_e_pose < 10).mean()
                acc_15 = (tot_e_pose < 15).mean()
                acc_20 = (tot_e_pose < 20).mean()
                map_5 = acc_5
                map_10 = np.mean([acc_5, acc_10])
                map_20 = np.mean([acc_5, acc_10, acc_15, acc_20])
                print(f"{model_name} auc: /{auc}")
                return {
                    "auc_5": auc[0],
                    "auc_10": auc[1],
                    "auc_20": auc[2],
                    "map_5": map_5,
                    "map_10": map_10,
                    "map_20": map_20,
                }

