import time
import PIL
import numpy as np
import cv2
import open3d as o3d
from transforms3d.axangles import mat2axangle
from transforms3d.quaternions import quat2mat, mat2quat
from geometry_msgs.msg import PoseStamped, TransformStamped
from semantic_nav.log_utils import get_logger
logger = get_logger()


def rot_dist(rot1, rot2):
    """
    Calculate the distance between two rotation matrices

    Parameters:
    rot1 (3x3 numpy array): The first rotation matrix
    rot2 (3x3 numpy array): The second rotation matrix

    Returns:
    rot_dist (float): The logarithmic map distance between the rotation matrices
    """
    diff_rot = rot1.T @ rot2
    direction, angle = mat2axangle(diff_rot)
    return angle


def trans_dist(trans1, trans2):
    """
    Calculate the distance between two translation vectors

    Parameters:
    trans1 (3x1 numpy array): The first translation vector
    trans2 (3x1 numpy array): The second translation vector

    Returns:
    trans_dist (float): The Euclidean distance between the translation vectors
    """
    return np.linalg.norm(trans1 - trans2)


def pose_dist(pose1, pose2):
    """
    Calculate the distance between two poses

    Parameters:
    pose1 (tuple): (rot1, trans1) The first pose, where
        rot1 (3x3 numpy array): The rotation matrix
        trans1 (3x1 numpy array): The translation vector
    pose2 (list): (rot2, trans2) The second pose, where
        rot2 (3x3 numpy array): The rotation matrix
        trans2 (3x1 numpy array): The translation vector

    Returns:
    trans_dist (float): The Euclidean distance between the translation vectors
    rot_dist (float): The logarithmic map distance between the rotation matrices
    """
    # Extract poses
    rot1, trans1 = pose1
    rot2, trans2 = pose2

    return rot_dist(rot1, rot2), trans_dist(trans1, trans2)


def pose_inv(pose):
    """
    Calculate the inverse of a pose
    """
    R, t = pose
    R_inv = R.T
    t_inv = -R_inv @ t
    return R_inv, t_inv


def pose_minus(pose1, pose2):
    """
    Calculate the relative pose between two poses (pose2^{-1} * pose1)
    """
    R1, t1 = pose1
    R2, t2 = pose2
    R2T = R2.T
    return R2T @ R1, R2T @ (t1 - t2)


def pose_compose(pose1, pose2):
    """
    Calculate the composed pose of two poses (pose1 * pose2)
    """
    R1, t1 = pose1
    R2, t2 = pose2
    R = R1 @ R2
    t = R1 @ t2 + t1
    return R, t


def transform_to_Rt(transform_stamped: TransformStamped):
    t = np.array([
        transform_stamped.transform.translation.x,
        transform_stamped.transform.translation.y,
        transform_stamped.transform.translation.z
    ])
    q = np.array([
        transform_stamped.transform.rotation.w,
        transform_stamped.transform.rotation.x,
        transform_stamped.transform.rotation.y,
        transform_stamped.transform.rotation.z,
    ])
    return quat2mat(q), t


def Rt_to_PoseStamped(R, t, stamp,frame_id):
    pose_stamped = PoseStamped()
    pose_stamped.pose.position.x = t[0]
    pose_stamped.pose.position.y = t[1]
    pose_stamped.pose.position.z = t[2]
    q = mat2quat(R)
    pose_stamped.pose.orientation.w = q[0]
    pose_stamped.pose.orientation.x = q[1]
    pose_stamped.pose.orientation.y = q[2]
    pose_stamped.pose.orientation.z = q[3]
    pose_stamped.header.stamp = stamp
    pose_stamped.header.frame_id = frame_id
    return pose_stamped


def pose_to_transform(pose_stamped, source_frame):
    transform = TransformStamped()
    transform.header = pose_stamped.header
    transform.child_frame_id = source_frame
    transform.transform.translation.x = pose_stamped.pose.position.x
    transform.transform.translation.y = pose_stamped.pose.position.y
    transform.transform.translation.z = pose_stamped.pose.position.z
    transform.transform.rotation = pose_stamped.pose.orientation
    return transform


def convert_dict_to_Pose(dict):
    # convert the dictionary to Pose object
    if "position" in dict and "orientation" in dict:
        if (
            "x" in dict["position"]
            and "y" in dict["position"]
            and "z" in dict["position"]
            and "w" in dict["orientation"]
        ):
            pose = Pose()
            pose.position.x = float(dict["position"]["x"])
            pose.position.y = float(dict["position"]["y"])
            pose.position.z = float(dict["position"]["z"])
            pose.orientation.w = float(dict["orientation"]["w"])
            pose.orientation.x = float(dict["orientation"]["x"])
            pose.orientation.y = float(dict["orientation"]["y"])
            pose.orientation.z = float(dict["orientation"]["z"])
        return pose
    return None


def pose_np2dict(R, t):
    return {"rot": mat2quat(R).tolist(), "trans": t.tolist()}


def pose_dict2np(pose_dict):
    return quat2mat(pose_dict["rot"]), np.array(pose_dict["trans"])


def depth_to_points(depth, intrinsic):
    """
    Convert depth image to point cloud

    Parameters:
    depth -- depth image (H x W)
    intrinsic -- camera intrinsic matrix (3 x 3)

    Returns:
    points -- list of 3D points (N x 3)
    """
    # Get the dimensions of the depth image
    H, W = depth.shape
    # Create a grid of pixel coordinates
    u, v = np.meshgrid(np.arange(W), np.arange(H))
    u = u.flatten().astype(np.float32)
    v = v.flatten().astype(np.float32)
    depth = depth.flatten()

    fx = intrinsic[0, 0]
    fy = intrinsic[1, 1]
    cx = intrinsic[0, 2]
    cy = intrinsic[1, 2]

    # camera coordinate
    Z = depth
    valid_mask = (Z > 0.0) & (Z < 20.0)
    Z = Z[valid_mask]
    X = (u[valid_mask] - cx) * Z/ fx
    Y = (v[valid_mask] - cy) * Z / fy
    # Stack the point cloud into an N x 3 array
    points = np.vstack((X, Y, Z)).T
    
    return points


def color_depth_to_points(color, depth, intrinsic_depth):
    """
    Convert RGB and depth image to colored point cloud

    Parameters:
    color -- RGB image (M x N x 3)
    depth -- depth image (H x W)
    intrinsic_depth -- camera intrinsic matrix for depth camera (3 x 3)

    Returns:
    colored_points -- list of 3D points with color (N x 6)
    """
    # Get the dimensions of the depth image
    H, W = depth.shape
    # Create a grid of pixel coordinates
    u, v = np.meshgrid(np.arange(W), np.arange(H))
    u = u.flatten().astype(np.float32)
    v = v.flatten().astype(np.float32)
    depth = depth.flatten()

    fx_depth = intrinsic_depth[0, 0]
    fy_depth = intrinsic_depth[1, 1]
    cx_depth = intrinsic_depth[0, 2]
    cy_depth = intrinsic_depth[1, 2]

    # camera coordinate
    Z = depth
    valid_mask = (Z > 0.0) & (Z < 20.0)
    Z = Z[valid_mask]
    X = (u[valid_mask] - cx_depth) * Z / fx_depth
    Y = (v[valid_mask] - cy_depth) * Z / fy_depth
    points = np.vstack((X, Y, Z)).T
    # get color value
    rgb = color.reshape((-1, 3))
    rgb = rgb[valid_mask]
    # Stack the point cloud into an N x 6 array
    colored_points = np.hstack((points, rgb))
    return colored_points


def render_color_points(color, points, intrinsic_color):
    """
    Convert RGB and point cloud to colored point cloud

    Parameters:
    color -- RGB image (H x W x BGR)
    points -- point cloud coordinates (N x 3)
    intrinsic_color -- camera intrinsic matrix for RGB camera (3 x 3)

    Returns:
    colored_points -- list of 3D points with color (N x 6)
    """
    fx_color = intrinsic_color[0, 0]
    fy_color = intrinsic_color[1, 1]
    cx_color = intrinsic_color[0, 2]
    cy_color = intrinsic_color[1, 2]

    # Calculate pixel coordinates
    X = points[:, 0]
    Y = points[:, 1]
    Z = points[:, 2]
    # Check if Z values are valid (non-zero and non-NaN)
    valid_mask = (Z != 0) & (np.isfinite(Z))
    X = X[valid_mask]
    Y = Y[valid_mask]
    Z = Z[valid_mask]
    # Corresponding color pixel coordinates
    u_color = fx_color * X / Z + cx_color + 1e-5
    v_color = fy_color * Y / Z + cy_color + 1e-5
    u_color = np.round(u_color).astype(np.int32)
    v_color = np.round(v_color).astype(np.int32)

    # Get color values
    mask = (v_color >= 0) & (v_color < color.shape[0]) & (u_color >= 0) & (u_color < color.shape[1])
    X = X[mask]
    Y = Y[mask]
    Z = Z[mask]
    u_color = u_color[mask]
    v_color = v_color[mask]
    color = color[v_color, u_color]

    # Stack the point cloud into an N x 6 array
    points = np.vstack((X, Y, Z)).T
    colored_points = np.hstack((points, color))
    return colored_points


def points_to_depth_image(color, points, intrinsic_color):
    """
    Convert RGB and point cloud to colored point cloud

    Parameters:
    color -- RGB image (H x W x 3)
    points -- point cloud coordinates (N x 3)
    intrinsic_color -- camera intrinsic matrix for RGB camera (3 x 3)

    Returns:
    depth_image -- list of 3D points with color (N x 6)
    """
    height_color = color.shape[0]
    width_color = color.shape[1]
    intrinsic_color[2,2] = 1.0
    device = o3d.core.Device('cpu:0')
    dtype = o3d.core.float32
    intrinsic = o3d.core.Tensor(intrinsic_color, dtype=dtype, device=device)
    points_tensor = o3d.core.Tensor(points, dtype=dtype, device=device)
    pcd = o3d.t.geometry.PointCloud(device=device)
    pcd.point.positions = points_tensor

    depth_image = pcd.project_to_depth_image(width=width_color, height=height_color, intrinsics=intrinsic, depth_scale=1.0, depth_max=10.0)
    return np.asarray(depth_image.to_legacy())


def create_point_cloud_from_rgb_depth(rgb_image, depth_image, camera_intrinsic):
    # Ensure the RGB image is in BGR format (OpenCV default format)
    if len(rgb_image.shape) == 3:
        rgb_image = cv2.cvtColor(rgb_image, cv2.COLOR_BGR2RGB)
    
    # Create Open3D RGB-D image
    rgb_o3d = o3d.geometry.Image(rgb_image)
    depth_o3d = o3d.geometry.Image(depth_image)
    rgbd_image = o3d.geometry.RGBDImage.create_from_color_and_depth(
        rgb_o3d, depth_o3d, depth_scale=1.0, depth_trunc=10.0, convert_rgb_to_intensity=False)

    # Create camera intrinsic parameters
    intrinsic = o3d.camera.PinholeCameraIntrinsic(
        width=rgb_image.shape[1],
        height=rgb_image.shape[0],
        fx=camera_intrinsic[0][0],
        fy=camera_intrinsic[1][1],
        cx=camera_intrinsic[0][2],
        cy=camera_intrinsic[1][2]
    )
    
    # Create point cloud from RGBD image
    pcd = o3d.geometry.PointCloud.create_from_rgbd_image(rgbd_image, intrinsic)

    return pcd


def opencv_to_pillow(cv_image):
    """
    Convert an OpenCV image to a Pillow image.

    Parameters:
    cv_image: NumPy array, OpenCV image in BGR format

    Returns:
    pil_image: Pillow Image object in RGB format
    """
    cv_image_rgb = cv_image[:, :, ::-1]  # OpenCV uses BGR (Blue-Green-Red) order by default, while Pillow uses RGB (Red-Green-Blue) order by default
    pil_image = PIL.Image.fromarray(cv_image_rgb)
    return pil_image


def opencv_to_open3d(opencv_image):
    """
    Convert an OpenCV image to an open3d.t.geometry.Image.

    Parameters:
    opencv_image: NumPy array, OpenCV image
    is_depth: Boolean, indicates whether it is a depth image

    Returns:
    open3d.t.geometry.Image object
    """
    if len(opencv_image.shape) == 3 and opencv_image.shape[2] == 3:
        # For RGB images, ensure it is uint8 type and in BGR format
        if opencv_image.dtype != np.uint8:
            opencv_image = (opencv_image * 255).astype(np.uint8)
        opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_BGR2RGB)

    return o3d.t.geometry.Image(opencv_image)


def open3d_to_opencv(o3d_image):
    """
    Convert an open3d.t.geometry.Image to an OpenCV image.

    Parameters:
    o3d_image: open3d.t.geometry.Image object
    is_depth: Boolean, indicates whether it is a depth image

    Returns:
    NumPy array, OpenCV formatted image
    """
    # Get NumPy array from Open3D image
    opencv_image = o3d_image.as_tensor().numpy()
    if len(opencv_image.shape) == 3 and opencv_image.shape[2] == 3:
        # For RGB images, ensure it is uint8 type and convert to BGR format
        if opencv_image.dtype != np.uint8:
            opencv_image = (opencv_image * 255).astype(np.uint8)
        opencv_image = cv2.cvtColor(opencv_image, cv2.COLOR_RGB2BGR)

    return opencv_image


def draw_geometries(geometries, name="", show_normal=False):
    vis = o3d.visualization.Visualizer()
    vis.create_window(window_name=name)
    for geometry in geometries:
        vis.add_geometry(geometry)
    mesh_frame = o3d.geometry.TriangleMesh.create_coordinate_frame(
        size=1.0, origin=[0, 0, 0]
    )
    vis.add_geometry(mesh_frame)
    opt = vis.get_render_option()
    opt.point_show_normal = show_normal
    opt.background_color = np.asarray([1, 1, 1])
    opt.point_size = 1.0
    opt.line_width = 1.0
    opt.show_coordinate_frame = False
    vis.run()
    vis.destroy_window()


def numpy_to_o3d_pcd(pts, xyz_only=True):
    if len(pts.shape) != 2 or pts.shape[1] % 3 != 0:
        raise ValueError("Input points should be a Nx3 or Nx6 numpy array")
    points = pts[:, :3]  # Extract the first 3 columns as coordinates
    point_cloud = o3d.geometry.PointCloud()
    point_cloud.points = o3d.utility.Vector3dVector(points)
    if xyz_only:
        return point_cloud

    if pts.shape[1] == 6:
        colors = pts[:, 3:6]  # Extract the last 3 columns as colors
        colors = colors / 255.0  # Normalize colors
        colors = colors[:, ::-1]  # Convert colors to RGB order
        point_cloud.colors = o3d.utility.Vector3dVector(colors)

    return point_cloud


def recover_pose_from_matches(camera_intrinsic, points1, points2):
    """
    Recovers the camera pose from matched points between two images.

    Parameters:
    camera_intrinsic (numpy.ndarray): The camera intrinsic matrix, with shape (3, 3).
    points1 (numpy.ndarray): Coordinates of points from the first image, with shape (N, 2).
    points2 (numpy.ndarray): Coordinates of points from the second image, with shape (N, 2).

    Returns:
    tuple: A tuple containing the rotation and translation matrix (^{2}R_{1}, ^{2}t_{1}).
    """

    fundamental_matrix, mask = cv2.findFundamentalMat(points1, points2, cv2.FM_RANSAC)
    return get_pose_from_fundamental_matrix(fundamental_matrix, camera_intrinsic, points1, points2, mask)


def get_pose_from_fundamental_matrix(fundamental_matrix, camera_intrinsic, points1, points2, mask):
    essential_matrix = camera_intrinsic.T @ fundamental_matrix @ camera_intrinsic
    _, rotation, translation, mask = cv2.recoverPose(essential_matrix, points1, points2, camera_intrinsic, mask=mask)
    return rotation, translation, mask


def registration_ransac_correspondence(src_keypts, tgt_keypts, inlier_threshold=0.2, max_iteration=1000):
    start_time = time.time()
    point_num = len(src_keypts.points)
    corr = np.array([np.arange(point_num), np.arange(point_num)])
    corr = o3d.utility.Vector2iVector(corr.T)
    reg_result = o3d.pipelines.registration.registration_ransac_based_on_correspondence(
        src_keypts, tgt_keypts, corr,
        max_correspondence_distance=inlier_threshold,
        estimation_method=o3d.pipelines.registration.TransformationEstimationPointToPoint(False),
        ransac_n=4,
        # for ransac_based_on_correspondence max_iteration and max_validation are the same
        criteria=o3d.pipelines.registration.RANSACConvergenceCriteria(max_iteration=max_iteration, confidence=1)
    )
    inliers = np.array(reg_result.correspondence_set)
    inlier_ratio = float(inliers.shape[0]) / float(point_num)
    end_time = time.time()
    logger.info("Ransac running time: {} s".format(end_time - start_time))
    logger.info("ransac inliers: {} / {} = {}".format(inliers.shape[0], point_num, inlier_ratio))
    if inlier_ratio < 0.15:
        return None
    return reg_result.transformation


def preprocess_point_cloud(pcd, radius, covariance=False):
    pcd = pcd.voxel_down_sample(radius)
    pcd.estimate_normals(o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 3, max_nn=30))
    pcd.orient_normals_towards_camera_location(camera_location=np.array([0.0, 0.0, 0.0]))
    if covariance:
        pcd.estimate_point_covariances(o3d.geometry.KDTreeSearchParamHybrid(radius=radius * 2, max_nn=30))
    return pcd


def registration_icp(
    source,
    target,
    threshold,
    tf_init=np.identity(4),
    method="point2point",
    loss="L2"
):
    normal_flag = False
    covariance_flag = False
    if source.has_normals() and target.has_normals():
        normal_flag = True
    if source.has_covariances() and target.has_covariances():
        covariance_flag = True
    kernel_function = o3d.pipelines.registration.L2Loss()
    icp_criteria = o3d.pipelines.registration.ICPConvergenceCriteria(relative_fitness=1e-6,relative_rmse=1e-6,max_iteration=50)

    if loss == "Cauchy":
        kernel_function = o3d.pipelines.registration.CauchyLoss(2.3489)
    elif loss == "Huber":
        kernel_function = o3d.pipelines.registration.HuberLoss(1)

    if method == "colored_icp" and normal_flag:
        icp_method = o3d.pipelines.registration.TransformationEstimationForColoredICP(kernel=kernel_function, lambda_geometric=0.5)

        start_time = time.time()
        reg = o3d.pipelines.registration.registration_colored_icp(
            source,
            target,
            threshold,
            init=tf_init,
            estimation_method=icp_method,
            criteria=icp_criteria
        )
        end_time = time.time()
    else:
        if method == "point2plane" and normal_flag:
            icp_method = o3d.pipelines.registration.TransformationEstimationPointToPlane(kernel=kernel_function)
        elif method == "plane2plane" and normal_flag and covariance_flag:
            icp_method = o3d.pipelines.registration.TransformationEstimationForGeneralizedICP(kernel=kernel_function)
        else:
            icp_method = o3d.pipelines.registration.TransformationEstimationPointToPoint(with_scaling=False)

        start_time = time.time()
        reg = o3d.pipelines.registration.registration_icp(
            source,
            target,
            threshold,
            init=tf_init,
            estimation_method=icp_method,
            criteria=icp_criteria,
        )
        end_time = time.time()
    logger.info("Code running time: {} s".format(end_time - start_time))

    return reg.transformation
