import glob
from pathlib import Path
from utils.tools import *
import logging
from Matchers.superglue.superglue import SuperGlue

# from DataLoader.SequenceImageLoader import SequenceImageLoader
# from DataLoader.KITTILoader import KITTILoader
from Detectors.SuperPointDetector import SuperPointDetector

from models.utils import AverageTimer


import cv2


class SuperGlueMatcher(object):
    default_config = {
        "descriptor_dim": 256,
        "weights": "outdoor",
        "keypoint_encoder": [32, 64, 128, 256],
        "GNN_layers": ["self", "cross"] * 9,
        "sinkhorn_iterations": 100,
        "match_threshold": 0.2,
        "cuda": True
    }

    def __init__(self, config={}):
        self.config = self.default_config
        self.config = {**self.config, **config}
        logging.info("SuperGlue matcher config: ")
        logging.info(self.config)

        # self.device = 'cuda' if torch.cuda.is_available() and isinstance(self.config["cuda"], int)else 'cpu'
        # print(f"device: {self.device}")
        if("device" in config.keys()):
            self.device = self.config["device"]
        else:
            self.device = 'cuda' if torch.cuda.is_available() and isinstance(self.config["cuda"], int)else 'cpu'


        assert self.config['weights'] in ['indoor', 'outdoor']
        # path = Path(__file__).parent
        # path = path / 'superglue/superglue_{}.pth'.format(self.config['weights'])

        path = "/home/daybeha/Documents/github/DeepLabV3_ws/src/superglue/Matchers/superglue" \
               + '/superglue_{}.pth'.format(self.config['weights'])
        self.config["path"] = path

        logging.info("creating SuperGlue matcher...")
        self.superglue = SuperGlue(self.config).to(self.device)


    def __call__(self, pred):
        # setup data for superglue
        logging.debug("prepare input data for superglue...")
        # data={}
        # data['image_size0'] = torch.from_numpy(kptdescs["ref"]["image_size"]).float().to(self.device)
        # data['image_size1'] = torch.from_numpy(kptdescs["cur"]["image_size"]).float().to(self.device)

        # data['scores0'] = kptdescs["ref"]["scores"][0].unsqueeze(0)
        # data['keypoints0'] = kptdescs["ref"]["keypoints"][0].unsqueeze(0)
        # data['descriptors0'] = kptdescs["ref"]["descriptors"][0].unsqueeze(0)
        # data['scores1'] = kptdescs["cur"]["scores"][0].unsqueeze(0)
        # data['keypoints1'] = kptdescs["cur"]["keypoints"][0].unsqueeze(0)
        # data['descriptors1'] = kptdescs["cur"]["descriptors"][0].unsqueeze(0)
        # data['ref']['image'] = imgs["ref"]
        # data['cur']['image'] = imgs['cur']

        # Forward !!
        logging.debug("matching keypoints with superglue...")
        # data = self.superglue(data)
        pred = {**pred, **self.superglue(pred)}

        # get matching keypoints
        kpts0 = pred["ref"]["keypoints"]
        kpts1 = pred["cur"]["keypoints"]

        # matches = pred['matches0'][0].cpu().numpy()
        # confidence = pred['matching_scores0'][0].cpu().detach().numpy()

        # # Sort them in the order of their confidence.
        # match_conf = []
        # for i, (m, c) in enumerate(zip(matches, confidence)):
        #     match_conf.append([i, m, c])
        # match_conf = sorted(match_conf, key=lambda x: x[2], reverse=True)
        #
        # valid = [[l[0], l[1]] for l in match_conf if l[1] > -1]
        # v0 = [l[0] for l in valid]
        # v1 = [l[1] for l in valid]
        # mkpts0 = kpts0[v0]
        # mkpts1 = kpts1[v1]

        # Keep the matching keypoints.
        valid = pred['matches0'][0] > -1
        mkpts0 = kpts0[valid]
        mkpts1 = kpts1[pred['matches0'][0][valid]]
        mconf = pred['matching_scores0'][0].cpu().detach()[valid]

        ret_dict = {
            "ref_keypoints": mkpts0,
            "cur_keypoints": mkpts1,
            # "match_score": confidence[v0]
            "match_score": mconf
        }

        return ret_dict

class KITTILoader(object):
    default_config = {
        "root_path": "/home/daybeha/Documents/Dataset/Kitti/sequences",
        "sequence": "00",
        "start": 0,
        "step": 1,
    }

    def __init__(self, config={}):
        self.config = self.default_config
        self.config = {**self.config, **config}
        logging.info("KITTI Dataset config: ")
        logging.info(self.config)

        # read ground truth pose
        self.pose_path = self.config["root_path"] + "/poses/" + self.config["sequence"] + ".txt"
        self.gt_poses = []
        with open(self.pose_path) as f:
            lines = f.readlines()
            # poses = [float(p) for p in [line.strip().split() for line in lines]]
            for line in lines:
                ss = line.strip().split()
                pose = np.array([float(p) for p in ss])
                # pose = np.zeros((1, len(ss)))
                # for i in range(len(ss)):
                #     pose[0, i] = float(ss[i])
                pose.resize([3, 4])
                self.gt_poses.append(pose)

        # image id
        self.img_id = self.config["start"]
        self.step = self.config["step"]
        self.img_N = len(glob.glob(pathname=self.config["root_path"] + "/"
                                            + self.config["sequence"] + "/image_0/*.png" ))

    def __getitem__(self, item):
        file_name = self.config["root_path"] + "/sequences/" + self.config["sequence"] \
                    + "/image_0/" + str(item).zfill(6) + ".png"
        img = cv2.imread(file_name)
        return img

    def __iter__(self):
        return self

    def __next__(self):
        if self.img_id < self.img_N:
            file_name = self.config["root_path"] + "/" + self.config["sequence"] \
                        + "/image_0/" + str(self.img_id).zfill(6) + ".png"
            img = cv2.imread(file_name)

            self.img_id += self.step

            return img
        raise StopIteration()

    def __len__(self):
        return self.img_N - self.config["start"]


def match_score(i,j):
    file_name = data_path + "/image_0/" + str(i).zfill(6) + ".png"
    imgs["ref"] = cv2.imread(file_name)
    kptdescs["ref"] = detector(imgs["ref"])

    file_name = data_path + "/image_0/" + str(j).zfill(6) + ".png"
    imgs["cur"] = cv2.imread(file_name)

    kptdescs["cur"] = detector(imgs["cur"])
    matches = matcher(kptdescs)

    score = np.mean(matches["match_score"].cpu().detach().numpy())
    num = matches["match_score"].shape[0]
    return score, num

if __name__ == "__main__":
    torch.set_grad_enabled(False)

    detector = SuperPointDetector({"cuda": 0})
    matcher = SuperGlueMatcher({"cuda": 0, "weights": "outdoor"})

    kptdescs = {'cur': None, 'ref': None}
    imgs = {}

    data_path = "/home/daybeha/Documents/Dataset/Kitti/sequences/05"
    img_num = len(glob.glob(pathname=data_path + "/image_0/*.png"))


    # i = 10
    # step = 2
    # j=2585
    # while i < j-30:
    i = 10
    step = 3
    while i < img_num-51:
        file_name = data_path + "/image_0/" + str(i).zfill(6) + ".png"
        imgs["ref"] = cv2.imread(file_name)
        kptdescs["ref"] = detector(imgs["ref"])

        for j in range(i+50, img_num, step):
            file_name = data_path + "/image_0/" + str(j).zfill(6) + ".png"
            imgs["cur"] = cv2.imread(file_name)

            kptdescs["cur"] = detector(imgs["cur"])
            matches = matcher(kptdescs)

            score = np.mean(matches["match_score"].cpu().detach().numpy())
            num = matches["match_score"].shape[0]

            print(f"match {i} and {j} ...\tscore: {score}\tnum: {num}")

            # 限制输出条件
            # if score > 0.5 or num > 550:
            #     print(f"match {i} and {j} ...\tscore: {score}\tnum: {num}")
            #
            #     while(score > 0.5 or num > 550):
            #         i+=step
            #         j+=step
            #         score, num = match_score(i,j)
            #
            #     print(f"match {i} and {j} ...\tscore: {score}\tnum: {num}")
            #     break

            if score > 0.5 and num > 550:
                print(f"match {i} and {j} ...\tscore: {score}\tnum: {num}")
                i+=50
                break


        i += step
        print(f"i: {i}")

            # img = plot_all(imgs['ref'], imgs['cur'],
            #                matches['ref_keypoints'].cpu().detach().numpy()[0::5], matches['cur_keypoints'].cpu().detach().numpy()[0::5],
            #                matches['match_score'].cpu().detach().numpy()[0::5])
            # cv2.imshow("track", img)
            # if cv2.waitKey(1) == 27:     # 按 ‘ESC'结束运行
            #     break

            # cv2.waitKey(1)

