import open3d
import time
import numpy as np
import math
import torch
import os
from torch.utils.tensorboard import SummaryWriter
import cv2
import random
from scipy.spatial.transform import Rotation
import multiprocessing

import matplotlib
matplotlib.use('TkAgg')

from models.multimodal_classifier import MMClassifer
from data.kitti_pc_img_pose_loader import KittiLoader
from data.augmentation import angles2rotation_matrix
from kitti import options
from util import vis_tools
from data import augmentation

import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.cm as cm

import FrustumRegistration


def transform_pc_np(P, pc_np):
    """

    :param pc_np: 3xN
    :param P: 4x4
    :return:
    """
    pc_homo_np = np.concatenate((pc_np,
                                 np.ones((1, pc_np.shape[1]), dtype=pc_np.dtype)),
                                axis=0)
    P_pc_homo_np = np.dot(P, pc_homo_np)
    return P_pc_homo_np[0:3, :]


def enu2cam(pc_np, P):
    """

    :param pc_np: 3xN
    :param P: 4x4
    :return:
    """
    P_convert = np.asarray([[1, 0, 0, 0],
                            [0, 0, -1,0],
                            [0, 1, 0, 0],
                            [0, 0, 0, 1]], dtype=P.dtype)
    return transform_pc_np(P_convert, pc_np), np.dot(P, np.linalg.inv(P_convert))


def camera_matrix_scaling(K: np.ndarray, s: float):
    K_scale = s * K
    K_scale[2, 2] = 1
    return K_scale


def get_inside_img_mask(pc_np, P_np, K_np, H, W):
    # get the visualization based on P
    pc_np_homo = np.concatenate((pc_np,
                                     np.ones((1, pc_np.shape[1]), dtype=pc_np.dtype)),
                                    axis=0)
    P_points_np = np.dot(P_np, pc_np_homo)[0:3, :]
    K_pc_np = np.dot(K_np, P_points_np)
    pc_pxpy_np = K_pc_np[0:2, :] / K_pc_np[2:3, :]  # Bx3xN -> Bx2xN

    # compute ground truth
    x_inside_mask = np.logical_and(pc_pxpy_np[0:1, :] >= 0,
                                   pc_pxpy_np[0:1, :] <= W - 1)  # Bx1xN_pc
    y_inside_mask = np.logical_and(pc_pxpy_np[1:2, :] >= 0,
                                   pc_pxpy_np[1:2, :] <= H - 1)  # Bx1xN_pc
    z_inside_mask = P_points_np[2:3, :] > 0.1  # Bx1xN_pc
    inside_mask = np.logical_and(np.logical_and(x_inside_mask, y_inside_mask),
                                 z_inside_mask)  # Bx1xN_pc
    return inside_mask[0]


def get_P_diff(P_pred_np, P_gt_np):
    P_diff = np.dot(np.linalg.inv(P_pred_np), P_gt_np)
    t_diff = np.linalg.norm(P_diff[0:3, 3])

    r_diff = P_diff[0:3, 0:3]
    R_diff = Rotation.from_matrix(r_diff)
    angles_diff = np.sum(np.abs(R_diff.as_euler('xzy', degrees=True)))

    return t_diff, angles_diff


def solve_PnP(pc_np, coarse_predictions_np, fine_predictions_np, K_np, H, W, fine_resolution_scale,
              iterationsCount, method):
    in_image_mask = coarse_predictions_np == 1
    pc_masked_np = pc_np[:, in_image_mask]
    fine_predictions_masked_np = fine_predictions_np[in_image_mask]

    # compensate the K_np
    H = H * fine_resolution_scale
    W = W * fine_resolution_scale
    K_np_fine = camera_matrix_scaling(K_np, fine_resolution_scale)

    # extract the pairs for PnP solver
    points = pc_masked_np.astype(np.float)  # 3xN
    pixels_y = np.floor(fine_predictions_masked_np / W)
    pixels_x = fine_predictions_masked_np - pixels_y * W
    pixels = np.stack((pixels_x, pixels_y), axis=0).astype(np.float)  # 2xN

    # debug
    # pc_np_homo = np.concatenate((pc_masked_np,
    #                              np.ones((1, pc_masked_np.shape[1]), dtype=pc_masked_np.dtype)),
    #                             axis=0)
    # P_points_np = np.dot(P_init_np, pc_np_homo)[0:3, :]
    # K_pc_np = np.dot(K_np_fine, P_points_np)
    # pc_pxpy_np = K_pc_np[0:2, :] / K_pc_np[2:3, :]  # Bx3xN -> Bx2xN

    # print(pixels - pc_pxpy_np)
    # pixels = np.round(pc_pxpy_np)

    if points.shape[1] >= 4:
        try:
            is_success, rvec, tvec, inliers = cv2.solvePnPRansac(np.transpose(points),
                                                                 np.transpose(pixels),
                                                                 K_np_fine,
                                                                 useExtrinsicGuess=False,
                                                                 iterationsCount=iterationsCount,
                                                                 reprojectionError=0.6,
                                                                 flags=method,
                                                                 distCoeffs=None)
            P = np.identity(4)
            if np.linalg.norm(tvec) < 14.14 and is_success:
                R, J = cv2.Rodrigues(rvec)
                P[0:3, 0:3] = R
                P[0:3, 3] = tvec[:, 0]
                outlier_ratio = 1 - len(inliers) / len(fine_predictions_masked_np)
            else:
                outlier_ratio = 1
        except:
            P = np.identity(4)
            outlier_ratio = 1
    else:
        P = np.identity(4)
        outlier_ratio = 1

    return P, outlier_ratio


if __name__=='__main__':
    root_path = '/ssd/jiaxin/point-img-feature/kitti/save/1.30-noTranslation'
    # root_path = '/ssd/jiaxin/point-img-feature/oxford/save/1.15-fine-wGround-nocrop-0.25x192x320'
    # root_path = '/ssd/jiaxin/point-img-feature/nuscenes_t/save/3.3-160x320-accu'
    visualization_output_folder = 'visualization'
    visualization_output_path = os.path.join(root_path, visualization_output_folder)
    data_output_folder = 'data'
    data_output_path = os.path.join(root_path, data_output_folder)

    is_plot = False
    H = 160  # kitti=160, oxford=288/192/384, nuscenes 160
    W = 512  # kitti=512, oxford=512/320/640, nuscenes 320
    fine_resolution_scale = 1 / 32.0
    is_enu2cam = 'nuscene' in root_path

    t_diff_avg = 0
    r_diff_avg = 0
    counter = 0

    filename_list = [f[0:9] for f in os.listdir(data_output_path) if os.path.isfile(os.path.join(data_output_path, f))]
    filename_list = list(set(filename_list))
    filename_list.sort()

    # save to disk
    P_pred_all_np = np.zeros((len(filename_list), 4, 4))
    P_gt_all_np = np.zeros((len(filename_list), 4, 4))
    cost_all_np = np.zeros((len(filename_list)))
    for i in range(0, len(filename_list), 1):
        filename = filename_list[i]
        counter += 1

        # if filename != '000400_00':
        #     continue

        point_data_np = np.load(os.path.join(data_output_path, filename+'_pc_label.npy'))
        pc_np = point_data_np[0:3, :].astype(np.float64)
        coarse_predictions_np = point_data_np[3, :].astype(np.int)
        coarse_labels_np = point_data_np[4, :].astype(np.int)
        fine_predictions_np = point_data_np[5, :].astype(np.int)
        fine_labels_np = point_data_np[6, :].astype(np.int)
        K_np = np.load(os.path.join(data_output_path, filename + '_K.npy')).astype(np.float64)
        P_gt_np = np.load(os.path.join(data_output_path, filename + '_P.npy')).astype(np.float64)
        if P_gt_np.shape[0] == 3:
            P_gt_np = np.concatenate((P_gt_np, np.identity(4)[3:4, :]), axis=0)
        if is_enu2cam:
            pc_np, P_gt_np = enu2cam(pc_np, P_gt_np)

        # debug code to ensure that the label is correct
        # inside_mask = get_inside_img_mask(points_np, P_gt_np, K_np, H, W)
        # inside_mask = inside_mask.astype(np.int32)
        # assert 0 == np.sum((inside_mask != coarse_labels_np).astype(np.int))

        R_gt = Rotation.from_matrix(P_gt_np[0:3, 0:3])
        angles_gt = R_gt.as_euler('yxz', degrees=False)
        ry_gt = angles_gt[0]

        P_pred_np, final_cost = solve_PnP(pc_np, coarse_predictions_np, fine_predictions_np, K_np, H, W, fine_resolution_scale,
                                          iterationsCount=500, method=cv2.SOLVEPNP_EPNP)

        t_diff, r_diff = get_P_diff(P_pred_np, P_gt_np)
        t_diff_avg += t_diff
        r_diff_avg += r_diff

        print('%s - cost: %.2f, T: %.1f, R:%.1f' % (filename, final_cost, t_diff, r_diff))
        P_pred_all_np[i, :, :] = P_pred_np
        P_gt_all_np[i, :, :] = P_gt_np
        cost_all_np[i] = final_cost

        if is_plot:
            print("P_pred_np")
            print(P_pred_np)
            print("P_gt_np")
            print(P_gt_np)

            img_vis_np = cv2.cvtColor(cv2.imread(os.path.join(visualization_output_path, filename+'_img.png')), cv2.COLOR_BGR2RGB)
            img_vis_fine_np = cv2.cvtColor(cv2.imread(os.path.join(visualization_output_path, filename+'_prediction.png')), cv2.COLOR_BGR2RGB)

            img_reg_np = vis_tools.get_registration_visualization(pc_np,
                                                               P_pred_np,
                                                               K_np,
                                                               coarse_predictions_np,
                                                               img_vis_np)

            plt.figure()
            plt.imshow(img_vis_np)
            plt.figure()
            plt.imshow(img_vis_fine_np)
            plt.figure()
            plt.imshow(img_reg_np)

            fig_prediction = plt.figure(figsize=(9, 9))
            ax_prediction = Axes3D(fig_prediction)
            ax_prediction.set_title("coarse label")
            vis_tools.plot_pc(pc_np, color=coarse_predictions_np, size=6, ax=ax_prediction)

            # fig_gt = plt.figure(figsize=(9, 9))
            # ax_gt = Axes3D(fig_gt)
            # ax_gt.set_title("registration")
            # vis_tools.plot_pc(pc_vis_np, color=coarse_label_vis_np, size=6, ax=ax_gt)

            plt.show()

    t_diff_avg = t_diff_avg / counter
    r_diff_avg = r_diff_avg / counter
    print("%d frame average translation / rotation error: [%.2f, %.2f]" % (counter, t_diff_avg, r_diff_avg))

    np.save(os.path.join(root_path, 'P_pred_all_np.npy'), P_pred_all_np)
    np.save(os.path.join(root_path, 'P_gt_all_np.npy'), P_gt_all_np)
    np.save(os.path.join(root_path, 'cost_all_np.npy'), cost_all_np)
