import os
import json
import cv2
import numpy as np
import open3d as o3d
from utils import load_json

IMAGE_COLORS = {
    1: (0, 0, 255),    # Red
    2: (0, 255, 0),    # Green
    3: (255, 0, 0),    # Blue
    4: (0, 255, 255),  # Yellow
    5: (255, 0, 255),  # Purple
    6: (255, 165, 0),    # Orange
    7: (0, 128, 0),     # Dark Green
    8: (0, 0, 128),     # Navy Blue
    9: (128, 0, 128),   # Magenta
    0: (255, 192, 203) # Pink

}

PCD_COLORS = {
    1: [1, 0, 0],    # Red
    2: [0, 1, 0],    # Green
    3: [0, 0, 1],    # Blue
    4: [1, 1, 0],    # Yellow
    5: [1, 0, 1],    # Purple
    6: [1, 0.65, 0],    # Orange
    7: [0, 0.5, 0],     # Dark Green
    8: [0, 0, 0.5],     # Navy Blue
    9: [0.5, 0, 0.5],   # Magenta
    0: [1, 0.75, 0.8] # Pink
}


def parse_annotation(json_path, key):
    with open(json_path, 'r') as file:
        data = json.load(file)
        return data[key]

def draw_vendor_2d_bbox(image_path, annotations, camera_id, visualized_path, replace_name):
    image = cv2.imread(image_path)
    if camera_id in annotations:
        for annotation in annotations[camera_id]['annotation']:
            bbox = annotation['bbox']
            category_id = annotation['category_id']
            color = IMAGE_COLORS.get(category_id, (0, 0, 0))  # default black
            cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
            label = f'ID: {category_id}'
            (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
            cv2.rectangle(image, (int(bbox[0]), int(bbox[1]) - 20), (int(bbox[0]) + w, int(bbox[1])), color, -1)
            cv2.putText(image, label, (int(bbox[0]), int(bbox[1]) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
    save_name = os.path.join(visualized_path, image_path.split("/")[-1].replace(".jpg", replace_name))
    cv2.imwrite(save_name, image)

def draw_own_2d_bbox(image_path, annotations, visualized_path, replace_name):
    image = cv2.imread(image_path)
    for annotation in annotations:
        bbox = annotation['points'] #xyxy
        label = annotation['label']#string(int)
        color = IMAGE_COLORS.get(int(label), (0, 0, 0))  # default black
        cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
        (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
        cv2.rectangle(image, (int(bbox[0]), int(bbox[1]) - 20), (int(bbox[0]) + w, int(bbox[1])), color, -1)
        cv2.putText(image, label, (int(bbox[0]), int(bbox[1]) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
    save_name = os.path.join(visualized_path, image_path.split("/")[-1].replace(".jpg", replace_name))
    cv2.imwrite(save_name, image)

def draw_truth_2d_bbox(image_path, annotations, visualized_path):
    image = cv2.imread(image_path)
    for annotation in annotations:
        bbox = annotation['points'] #xyxy
        label = annotation['label']#string(int)
        truth_type = annotation['truth_type'] 
        color = IMAGE_COLORS.get(int(label), (0, 0, 0))  # default black
        cv2.rectangle(image, (int(bbox[0]), int(bbox[1])), (int(bbox[2]), int(bbox[3])), color, 2)
        (w, h), _ = cv2.getTextSize(label, cv2.FONT_HERSHEY_SIMPLEX, 0.6, 2)
        cv2.rectangle(image, (int(bbox[0]), int(bbox[1]) - 20), (int(bbox[0]) + w, int(bbox[1])), color, -1)
        cv2.putText(image, label + "_" + truth_type, (int(bbox[0]), int(bbox[1]) - 5), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 2)
    save_name = os.path.join(visualized_path, image_path.split("/")[-1].replace(".jpg", "_fused_truth_result.jpg"))
    cv2.imwrite(save_name, image)

def load_point_cloud(pcd_path):
    pcd = o3d.io.read_point_cloud(pcd_path)
    if not pcd.has_points():
        print("Error: The point cloud has no points.")
        return None
    #read color
    points = np.asarray(pcd.points)
    if pcd.has_colors():
        colors = np.asarray(pcd.colors)
    else:
        colors = np.zeros_like(points)
    # check nan
    if np.isnan(points).any():
        # remove nan
        mask = ~np.isnan(points).any(axis=1)
        points = points[mask]
        colors = colors[mask]

    pcd.colors = o3d.utility.Vector3dVector(colors)
    pcd.points = o3d.utility.Vector3dVector(points)
    return pcd

def get_bbox(center, dimension, rotation, bbox_color):
    bbox = o3d.geometry.OrientedBoundingBox(center, rotation, dimension)
    bbox.color = bbox_color
    lineset = o3d.geometry.LineSet.create_from_oriented_bounding_box(bbox)
    lineset.paint_uniform_color(bbox_color)
    return lineset

def draw_vendor_3d_bbox(pcd, annotations, sensor_id, pcd_path, visualized_path, replace_name):
    vis = o3d.visualization.Visualizer()
    vis.create_window(visible=False)
    vis.add_geometry(pcd)
    render_option = vis.get_render_option()
    render_option.point_size = 0.001

    if sensor_id in annotations:
        for annotation in annotations[sensor_id]['annotation']:
            center = annotation['center']
            dimension = annotation['dimension']
            rotation_angle = annotation['rotation'][2]
            rotation_matrix = o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz((0, 0, rotation_angle))
            bbox_color = PCD_COLORS.get(annotation['type'], [0.5, 0.5, 0.5])  # defalut gray
            bbox = get_bbox(center, dimension, rotation_matrix, bbox_color)
            vis.add_geometry(bbox)

    vis.poll_events()
    vis.update_renderer()
    save_name = os.path.join(visualized_path, pcd_path.split("/")[-1].replace(".pcd", replace_name))
    vis.capture_screen_image(save_name, do_render=True)
    vis.destroy_window()

def draw_own_3d_bbox(pcd, pcd_path, annotations, visualized_path, replace_name):
    vis = o3d.visualization.Visualizer()
    vis.create_window()
    vis.add_geometry(pcd)
    render_option = vis.get_render_option()
    render_option.point_size = 0.001

    for annotation in annotations:
        center = [annotation["cube"]['position']["x"], annotation["cube"]['position']["y"], annotation["cube"]['position']["z"]]
        dimension = [annotation["cube"]['scale']["x"], annotation["cube"]['scale']["y"], annotation["cube"]['scale']["z"]]
        rotation_angle = annotation["cube"]['rotation']["z"]
        rotation_matrix = o3d.geometry.OrientedBoundingBox.get_rotation_matrix_from_xyz((0, 0, rotation_angle))
        bbox_color = PCD_COLORS.get(int(annotation['label']), [0.5, 0.5, 0.5])  # defalut gray
        bbox = get_bbox(center, dimension, rotation_matrix, bbox_color)
        vis.add_geometry(bbox)

    vis.poll_events()
    vis.update_renderer()
    save_name = os.path.join(visualized_path, pcd_path.split("/")[-1].replace(".pcd", replace_name))
    vis.capture_screen_image(save_name, do_render=True)
    vis.destroy_window()

def view_vendor_image_anno(image_path, visualized_path):
    base_name = image_path.split("/")[-1].split('_')[0]
    directory_path = os.path.dirname(image_path)
    json_name = f"{base_name}.json"
    json_path = os.path.join(directory_path, json_name)
    camera_id = "GMSLCam_2"
    if os.path.exists(json_path):
        annotations = load_json(json_path, 'object2D')
        draw_vendor_2d_bbox(image_path, annotations, camera_id, visualized_path ,"_vendor_image.jpg")

def view_vendor_pcd_anno(pcd_path, visualized_path):
    base_name = pcd_path.split("/")[-1].split('_')[0]
    directory_path = os.path.dirname(pcd_path)
    json_name = f"{base_name}.json"
    json_path = os.path.join(directory_path, json_name)
    sensor_id = "pandarat128_2372"
    if os.path.exists(json_path):
        annotations = load_json(json_path, 'object3D')
        pcd = load_point_cloud(pcd_path)
        draw_vendor_3d_bbox(pcd, annotations, sensor_id, pcd_path, visualized_path, "_vendor_pcd.jpg")

def view_own_image_anno(image_path, generated_path, visualized_path):
    # base_name = image_path.split("/")[-1].split('_')[0]
    # json_name = f"{base_name}_own_image.json"
    json_name = image_path.split("/")[-1].split('.')[0] + ".json"
    json_path = os.path.join(generated_path, json_name)
    if os.path.exists(json_path):
        annotations = load_json(json_path, 'shapes')
        draw_own_2d_bbox(image_path, annotations, visualized_path, "_own_image.jpg")

def view_own_pcd_anno(pcd_path, generated_path, visualized_path):
    # base_name = pcd_path.split("/")[-1].split('_')[0]
    # json_name = f"{base_name}_own_pcd.json"
    json_name = pcd_path.split("/")[-1].split('.')[0] + ".json"
    json_path = os.path.join(generated_path, json_name)
    if os.path.exists(json_path):
        annotations = load_json(json_path, 'annotations')
        pcd = load_point_cloud(pcd_path)
        draw_own_3d_bbox(pcd, pcd_path, annotations, visualized_path, "_own_pcd.jpg")

def view_fused_image_anno(image_path, generated_path, visualized_path):
    # base_name = image_path.split("/")[-1].split('_')[0]
    # json_name = f"{base_name}_fused_image.json"
    json_name = image_path.split("/")[-1].split('.')[0] + ".json"
    json_path = os.path.join(generated_path, json_name)
    if os.path.exists(json_path):
        annotations = load_json(json_path, 'shapes')
        draw_own_2d_bbox(image_path, annotations, visualized_path, "_fused_image.jpg")

def view_fused_pcd_anno(pcd_path, generated_path, visualized_path):
    # base_name = pcd_path.split("/")[-1].split('_')[0]
    # json_name = f"{base_name}_fused_pcd.json"
    json_name = pcd_path.split("/")[-1].split('.')[0] + ".json"
    json_path = os.path.join(generated_path, json_name)
    if os.path.exists(json_path):
        annotations = load_json(json_path, 'annotations')
        pcd = load_point_cloud(pcd_path)
        draw_own_3d_bbox(pcd, pcd_path, annotations, visualized_path, "_fused_pcd.jpg")

def view_fused_multi_domain_truth_anno(image_path, generated_path, visualized_path):
    base_name = image_path.split("/")[-1].split('_')[0]
    json_name = f"{base_name}_fused_multi_domain_truth.json"
    json_path = os.path.join(generated_path, json_name)
    if os.path.exists(json_path):
        annotations = load_json(json_path, 'shapes')
        draw_truth_2d_bbox(image_path, annotations, visualized_path)



