import os
#os.chdir("..")
import torch
import cv2
import argparse
import yaml
import logging 
import numpy as np
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from src.utils.plotting import make_matching_figure
from DataLoader import create_dataloader
from src.loftr import LoFTR, default_cfg

class AbosluteScaleComputer(object):
    def __init__(self):
        self.prev_pose = None
        self.cur_pose = None
        self.count = 0

    def update(self, pose):
        self.cur_pose = pose

        scale = 1.0
        if self.count != 0:
            scale = np.sqrt(
                (self.cur_pose[0, 3] - self.prev_pose[0, 3]) * (self.cur_pose[0, 3] - self.prev_pose[0, 3])
                + (self.cur_pose[1, 3] - self.prev_pose[1, 3]) * (self.cur_pose[1, 3] - self.prev_pose[1, 3])
                + (self.cur_pose[2, 3] - self.prev_pose[2, 3]) * (self.cur_pose[2, 3] - self.prev_pose[2, 3]))

        self.count += 1
        self.prev_pose = self.cur_pose
        return scale

class TrajPlotter(object):
    def __init__(self):
        self.errors = []
        self.traj = np.zeros((600, 600, 3), dtype=np.uint8)
        pass

    def update(self, est_xyz, gt_xyz):
        x, z = est_xyz[0], est_xyz[2]
        gt_x, gt_z = gt_xyz[0], gt_xyz[2]

        est = np.array([x, z]).reshape(2)
        gt = np.array([gt_x, gt_z]).reshape(2)

        error = np.linalg.norm(est - gt)

        self.errors.append(error)

        avg_error = np.mean(np.array(self.errors))

        # === drawer ==================================
        # each point
        draw_x, draw_y = int(x) + 290, int(z) + 90
        true_x, true_y = int(gt_x) + 290, int(gt_z) + 90

        # draw trajectory
        cv2.circle(self.traj, (draw_x, draw_y), 1, (0, 255, 0), 1)
        cv2.circle(self.traj, (true_x, true_y), 1, (0, 0, 255), 2)
        cv2.rectangle(self.traj, (10, 20), (600, 80), (0, 0, 0), -1)

        # draw text
        text = "[AvgError] %2.4fm" % (avg_error)
        cv2.putText(self.traj, text, (20, 40),
                    cv2.FONT_HERSHEY_PLAIN, 1, (255, 255, 255), 1, 8)

        return self.traj

# -----------------------------------------------------------------------------
def pre_processing(image):
    image = cv2.resize(image, (640, 480))
    image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
    im = torch.from_numpy(image)[None][None].cuda() / 255.
    return im
# -----------------------------------------------------------------------------
absolute_scale = 1.0
traj_plotter = TrajPlotter()
# -----------------------------------------------------------------------------
def run(args):
    with open(args.config, 'r') as f:
        config = yaml.load(f)
    # create dataloader
    loader = create_dataloader(config["dataset"])
    # create matcher
    matcher = LoFTR(config=default_cfg)
    matcher.load_state_dict(torch.load("weights/indoor_ds.ckpt")['state_dict'])
    matcher = matcher.eval().cuda()

    absscale = AbosluteScaleComputer()
    traj_plotter = TrajPlotter()

    # log
    fname = args.config.split('/')[-1].split('.')[0]
    log_fopen = open("results/" + fname + ".txt", mode='a')

    last_img = np.zeros([1241,376,3],np.uint8)
    cur_img = np.zeros([1241,376,3],np.uint8)
    cur_R = np.identity(3)
    cur_t = np.zeros((3, 1))

    for i, img in enumerate(loader):
        print(i)
        gt_pose = loader.get_cur_pose()
        print(gt_pose)
        absolute_scale = absscale.update(gt_pose)
        if(i==0):
            last_img = img
        else:
            cur_img = img
            img0 = pre_processing(last_img)
            img1 = pre_processing(cur_img)
            batch = {'image0': img0, 'image1': img1}

            # Inference with LoFTR and get prediction
            with torch.no_grad():
                matcher(batch)
                mkpts0 = batch['mkpts0_f'].cpu().numpy()
                mkpts1 = batch['mkpts1_f'].cpu().numpy()
                mconf = batch['mconf'].cpu().numpy()
            E, mask = cv2.findEssentialMat(mkpts1, mkpts0,
                           focal=loader.cam.fx, pp=(loader.cam.cx, loader.cam.cy),
                           method=cv2.RANSAC, prob=0.999, threshold=1.0)
            _, R, t, mask = cv2.recoverPose(E, mkpts1, mkpts0,
                            focal=loader.cam.fx, pp=(loader.cam.cx, loader.cam.cy))
            if (absolute_scale > 0.1):
                cur_t = cur_t + absolute_scale * cur_R.dot(t)
                cur_R = R.dot(cur_R)
        last_img = img
        # === drawer ==================================
        canvas = traj_plotter.update(cur_t, gt_pose[:, 3])
        cv2.imshow("trajectory", canvas)
        if cv2.waitKey(10) == 27:
            break


# -----------------------------------------------------------------------------
if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='python_vo')
    parser.add_argument('--config', type=str, default='params/kitti_superpoint_supergluematch.yaml',
                        help='config file')
    parser.add_argument('--logging', type=str, default='INFO',
                        help='logging level: NOTSET, DEBUG, INFO, WARNING, ERROR, CRITICAL')

    args = parser.parse_args()

    logging.basicConfig(level=logging._nameToLevel[args.logging])

    run(args)







