"""
Evaluation utils.
This code is based on https://github.com/mrharicot/monodepth/blob/master/utils/evaluation_utils.py
We would like to thank C. Godard and other authors for sharing their code
"""

import os
import numpy as np
import pandas as pd
import cv2
from collections import Counter
import pickle


def compute_errors(gt, pred):
    thresh = np.maximum((gt / pred), (pred / gt))
    a1 = (thresh < 1.25).mean()
    a2 = (thresh < 1.25 ** 2).mean()
    a3 = (thresh < 1.25 ** 3).mean()

    rmse = (gt - pred) ** 2
    rmse = np.sqrt(rmse.mean())

    rmse_log = (np.log(gt) - np.log(pred)) ** 2
    rmse_log = np.sqrt(rmse_log.mean())

    abs_rel = np.mean(np.abs(gt - pred) / gt)

    sq_rel = np.mean(((gt - pred) ** 2) / gt)

    return abs_rel, sq_rel, rmse, rmse_log, a1, a2, a3


###############################################################################
#######################  KITTI

width_to_focal = dict()
width_to_focal[1242] = 721.5377
width_to_focal[1241] = 718.856
width_to_focal[1224] = 707.0493
width_to_focal[1238] = 718.3351


def load_gt_disp_kitti(path):
    gt_disparities = []
    for i in range(200):
        disp = cv2.imread(
            path + "/training/disp_noc_0/" + str(i).zfill(6) + "_10.png", -1
        )
        disp = disp.astype(np.float32) / 256
        gt_disparities.append(disp)
    return gt_disparities


def convert_disps_to_depths_kitti(gt_disparities, pred_disparities):
    gt_depths = []
    pred_depths = []
    pred_disparities_resized = []

    for i in range(len(gt_disparities)):
        gt_disp = gt_disparities[i]
        height, width = gt_disp.shape

        pred_disp = pred_disparities[i]
        pred_disparities_resized.append(pred_disp)

        mask = gt_disp > 0

        gt_depth = width_to_focal[width] * 0.54 / (gt_disp + (1.0 - mask))
        pred_depth = width_to_focal[width] * 0.54 / pred_disp

        gt_depths.append(gt_depth)
        pred_depths.append(pred_depth)
    return gt_depths, pred_depths, pred_disparities_resized


###############################################################################
#######################  EIGEN


def read_text_lines(file_path):
    f = open(file_path, "r")
    lines = f.readlines()
    f.close()
    lines = [l.rstrip() for l in lines]
    return lines


def read_file_data(files, data_root):
    gt_files = []
    gt_calib = []
    im_sizes = []
    im_files = []
    cams = []
    num_probs = 0
    for filename in files:
        filename = filename.split()[0]
        splits = filename.split("/")
        camera_id = np.int32(splits[2][-1:])  # 2 is left, 3 is right
        date = splits[0]
        im_id = splits[4][:10]
        file_root = "{}/{}"

        im = filename
        vel = "{}/{}/velodyne_points/data/{}.bin".format(splits[0], splits[1], im_id)

        if os.path.isfile(data_root + im):
            gt_files.append(data_root + vel)
            gt_calib.append(data_root + date + "/")
            im_sizes.append(cv2.imread(data_root + im).shape[:2])
            im_files.append(data_root + im)
            cams.append(2)
        else:
            num_probs += 1
            print("{} missing".format(data_root + im))

    return gt_files, gt_calib, im_sizes, im_files, cams


def load_velodyne_points(file_name):
    # adapted from https://github.com/hunse/kitti
    points = np.fromfile(file_name, dtype=np.float32).reshape(-1, 4)
    points[:, 3] = 1.0  # homogeneous
    return points


def lin_interp(shape, xyd):
    # taken from https://github.com/hunse/kitti
    m, n = shape
    ij, d = xyd[:, 1::-1], xyd[:, 2]
    f = LinearNDInterpolator(ij, d, fill_value=0)
    J, I = np.meshgrid(np.arange(n), np.arange(m))
    IJ = np.vstack([I.flatten(), J.flatten()]).T
    disparity = f(IJ).reshape(shape)
    return disparity


def read_calib_file(path):
    # taken from https://github.com/hunse/kitti
    float_chars = set("0123456789.e+- ")
    data = {}
    with open(path, "r") as f:
        for line in f.readlines():
            key, value = line.split(":", 1)
            value = value.strip()
            data[key] = value
            if float_chars.issuperset(value):
                # try to cast to float array
                try:
                    # NOTE: as reported in ISSUE #224 of Monodepth
                    # https://github.com/mrharicot/monodepth/issues/224

                    # data[key] = np.array(map(float, value.split(" ")))
                    data[key] = np.array(list(map(float, value.split(" "))))

                except ValueError:
                    # casting error: data[key] already eq. value, so pass
                    pass

    return data


def get_focal_length_baseline(calib_dir, cam):
    cam2cam = read_calib_file(calib_dir + "calib_cam_to_cam.txt")
    P2_rect = cam2cam["P_rect_02"].reshape(3, 4)
    P3_rect = cam2cam["P_rect_03"].reshape(3, 4)

    # cam 2 is left of camera 0  -6cm
    # cam 3 is to the right  +54cm
    b2 = P2_rect[0, 3] / -P2_rect[0, 0]
    b3 = P3_rect[0, 3] / -P3_rect[0, 0]
    baseline = b3 - b2

    if cam == 2:
        focal_length = P2_rect[0, 0]
    elif cam == 3:
        focal_length = P3_rect[0, 0]

    return focal_length, baseline


def sub2ind(matrixSize, rowSub, colSub):
    m, n = matrixSize
    return rowSub * (n - 1) + colSub - 1


def generate_depth_map(
    calib_dir, velo_file_name, im_shape, cam=2, interp=False, vel_depth=False
):
    # load calibration files
    cam2cam = read_calib_file(calib_dir + "calib_cam_to_cam.txt")
    velo2cam = read_calib_file(calib_dir + "calib_velo_to_cam.txt")
    velo2cam = np.hstack((velo2cam["R"].reshape(3, 3), velo2cam["T"][..., np.newaxis]))
    velo2cam = np.vstack((velo2cam, np.array([0, 0, 0, 1.0])))

    # compute projection matrix velodyne->image plane
    R_cam2rect = np.eye(4)
    R_cam2rect[:3, :3] = cam2cam["R_rect_00"].reshape(3, 3)
    P_rect = cam2cam["P_rect_0" + str(cam)].reshape(3, 4)
    P_velo2im = np.dot(np.dot(P_rect, R_cam2rect), velo2cam)

    # load velodyne points and remove all behind image plane (approximation)
    # each row of the velodyne data is forward, left, up, reflectance
    velo = load_velodyne_points(velo_file_name)
    velo = velo[velo[:, 0] >= 0, :]

    # project the points to the camera
    velo_pts_im = np.dot(P_velo2im, velo.T).T
    velo_pts_im[:, :2] = velo_pts_im[:, :2] / velo_pts_im[:, 2][..., np.newaxis]

    if vel_depth:
        velo_pts_im[:, 2] = velo[:, 0]

    # check if in bounds
    # use minus 1 to get the exact same value as KITTI matlab code
    velo_pts_im[:, 0] = np.round(velo_pts_im[:, 0]) - 1
    velo_pts_im[:, 1] = np.round(velo_pts_im[:, 1]) - 1
    val_inds = (velo_pts_im[:, 0] >= 0) & (velo_pts_im[:, 1] >= 0)
    val_inds = (
        val_inds & (velo_pts_im[:, 0] < im_shape[1]) & (velo_pts_im[:, 1] < im_shape[0])
    )
    velo_pts_im = velo_pts_im[val_inds, :]

    # project to image
    depth = np.zeros((im_shape))
    depth[
        velo_pts_im[:, 1].astype(np.int), velo_pts_im[:, 0].astype(np.int)
    ] = velo_pts_im[:, 2]

    # find the duplicate points and choose the closest depth
    inds = sub2ind(depth.shape, velo_pts_im[:, 1], velo_pts_im[:, 0])
    dupe_inds = [item for item, count in Counter(inds).items() if count > 1]
    for dd in dupe_inds:
        pts = np.where(inds == dd)[0]
        x_loc = int(velo_pts_im[pts[0], 0])
        y_loc = int(velo_pts_im[pts[0], 1])
        depth[y_loc, x_loc] = velo_pts_im[pts, 2].min()
    depth[depth < 0] = 0

    if interp:
        # interpolate the depth map to fill in holes
        depth_interp = lin_interp(im_shape, velo_pts_im)
        return depth, depth_interp
    else:
        return depth


def load_priors(path, num_samples, split):
    """ load semantic priors """
    priors = []
    for t_id in range(num_samples):
        name = (
            str(t_id).zfill(6) + "_10.png" if split == "kitti" else str(t_id) + ".png"
        )
        prior = cv2.imread(os.path.join(path, name), cv2.IMREAD_GRAYSCALE)
        assert prior is not None, "{} not found".format(os.path.join(path, name))
        priors.append(prior)
    return priors


def load_objects_mask(path, num_samples):
    """ Load object mask from kitti dataset """
    priors = []
    for t_id in range(num_samples):
        name = str(t_id).zfill(6) + "_10.png"
        full_name = os.path.join(path, "training", "obj_map", name)
        prior = cv2.imread(full_name, cv2.IMREAD_GRAYSCALE)
        assert prior is not None, "{} not found".format(full_name)
        priors.append(prior)
    return priors
