import json
from shapely.geometry import Polygon
from shapely.affinity import rotate
import difflib
import numpy as np
import torch
import os
import yaml

own_image_class_map = {"0":"car", "1":"truck", "2":"motorcycle", "3":"bicycle", "4":"tricycle", "5":"bus", "6":"pedestrain"}
fuse_image_class_map = {"0":"vehicle", "1":"vehicle", "2":"motorcycle", "3":"motorcycle", "4":"tricycle", "5":"vehicle", "6":"pedestrain"}
vendor_pcd_class_map = {"1":"pedestrain", "2":"motorcycle", "3":"vehicle", "4":"tricycle"}



def read_yaml(file_path):
    with open(file_path, 'r') as f:
        data = yaml.safe_load(f)
    return data

def load_json(json_path, key):
    with open(json_path, 'r') as f:
        data = json.load(f)
        if key in data:
            return data[key]
        else:
            return {}
    
def get_3d_box_corners(center, size, yaw):
    cx, cy, cz = center
    length, width, height = size
    
    # get the coordinates of the corners relative to the center (excluding yaw rotation).
    x_corners = [length / 2, length / 2, -length / 2, -length / 2,
                 length / 2, length / 2, -length / 2, -length / 2]
    y_corners = [width / 2, -width / 2, -width / 2, width / 2,
                 width / 2, -width / 2, -width / 2, width / 2]
    z_corners = [height / 2, height / 2, height / 2, height / 2,
                 -height / 2, -height / 2, -height / 2, -height / 2]
    corners = np.array([x_corners, y_corners, z_corners])
    
    # get rotation_matrix
    rotation_matrix = np.array([[np.cos(yaw), -np.sin(yaw), 0],
                                [np.sin(yaw), np.cos(yaw), 0],
                                [0, 0, 1]])
    
    # rotate and translate corners
    rotated_corners = np.dot(rotation_matrix, corners).T
    rotated_corners += np.array([cx, cy, cz])
    
    return rotated_corners

def get_2d_projection(box_corners):
    return Polygon([(box_corners[i][0], box_corners[i][1]) for i in range(8)])

def calculate_3d_iou(box1, box2):
    center1 = box1[:3]
    size1 = box1[3:6]
    yaw1 = box1[6]
    
    center2 = box2[:3]
    size2 = box2[3:6]
    yaw2 = box2[6]
    
    box1_corners = get_3d_box_corners(center1, size1, yaw1)
    box2_corners = get_3d_box_corners(center2, size2, yaw2)
    
    box1_2d = get_2d_projection(box1_corners)
    box2_2d = get_2d_projection(box2_corners)
    
    inter_area = box1_2d.intersection(box2_2d).area
    union_area = box1_2d.union(box2_2d).area
    
    z_overlap = max(0, min(box1_corners[0][2], box2_corners[0][2]) - max(box1_corners[4][2], box2_corners[4][2]))
    inter_volume = inter_area * z_overlap
    
    volume1 = size1[0] * size1[1] * size1[2]
    volume2 = size2[0] * size2[1] * size2[2]
    union_volume = volume1 + volume2 - inter_volume
    union_volume = union_volume
    
    iou = inter_volume / union_volume if union_volume != 0 else 0
    return iou


def calculate_2d_iou(box1, box2):
    if len(box1) == 4:#[x,y,x,y]
        x1_min, y1_min, x1_max, y1_max = box1
    if len(box1) == 2:#[[x,y],[x,y]]
        x1_min, y1_min = box1[0]
        x1_max, y1_max = box1[1]

    if len(box2) == 4:#[x,y,x,y]
        x2_min, y2_min, x2_max, y2_max = box2
    if len(box2) == 2:#[[x,y],[x,y]]
        x2_min, y2_min = box2[0]
        x2_max, y2_max = box2[1]

    inter_x_min = max(x1_min, x2_min)
    inter_y_min = max(y1_min, y2_min)
    inter_x_max = min(x1_max, x2_max)
    inter_y_max = min(y1_max, y2_max)

    inter_width = max(0, inter_x_max - inter_x_min)
    inter_height = max(0, inter_y_max - inter_y_min)
    inter_area = inter_width * inter_height
    
    box1_area = (x1_max - x1_min) * (y1_max - y1_min)
    box2_area = (x2_max - x2_min) * (y2_max - y2_min)
    union_area = box1_area + box2_area - inter_area
    iou = inter_area / union_area if union_area != 0 else 0
    return iou

def nms_3d(boxes, scores, iou_threshold):
    keep = []
    _, idxs = scores.sort(descending=True)

    while idxs.numel() > 0:
        i = idxs[0]
        keep.append(i)
        if idxs.numel() == 1:
            break
        ious = []
        for c in range(1, len(idxs)):
            ious.append(calculate_3d_iou(boxes[i], boxes[idxs[c]]))
        #ious = calculate_3d_iou(boxes[i].unsqueeze(0), boxes[idxs[1:]])
        ious = np.array(ious)
        idxs = idxs[1:][ious < iou_threshold]

    return torch.tensor(keep)

def find_closest_match(target, options):
    closest_match = difflib.get_close_matches(target, options, n=1)
    if closest_match:
        return options.index(closest_match[0])
    else:
        # calculate max similarity
        return min(range(len(options)), key=lambda i: difflib.SequenceMatcher(None, target, options[i]).ratio())

def get_labelId_from_phrase(phrases, labelsId, prompts):
    options = prompts.split(".")[:-1]
    labelsId_list = labelsId.split(",")
    labels = []
    for phrase in phrases:
        index = find_closest_match(phrase, options)
        labels.append(labelsId_list[index])
    return labels

def write_json(ret_dict, save_path):
    directory_path = os.path.dirname(save_path)
    os.makedirs(directory_path, exist_ok=True)
    with open(save_path, 'w', encoding='utf-8') as json_file:
        json.dump(ret_dict, json_file, ensure_ascii=False, indent=4)

def change_image_anno_format(abs_file_path):
    with open(abs_file_path, "r") as f:
        json_data = json.load(f)
    # update label&points in shapes
    for seq, shape in enumerate(json_data["shapes"]):
        label = shape["label"]
        if label in own_image_class_map:
            shape["label"] = own_image_class_map[label]
        # update points
        xmin, ymin, xmax, ymax = shape["points"]
        shape["points"] = [[xmin, ymin], [xmax, ymax]]
        flags = shape["flags"]
        flags.update({"track_id": seq})
    # update summary
    for key in list(json_data["summary"].keys()):
        if key in own_image_class_map:
            new_key = own_image_class_map[key]
            json_data["summary"][new_key] = json_data["summary"].pop(key)
    # save file
    with open(abs_file_path, "w") as f:
        json.dump(json_data, f, indent=4)


def change_pcd_anno_format(abs_file_path):
    with open(abs_file_path, "r") as f:
        json_data = json.load(f)
    # update label in annotations
    for seq, annotation in enumerate(json_data["annotations"]):
        label = annotation["label"]
        if label in vendor_pcd_class_map:
            annotation["label"] = vendor_pcd_class_map[label]
        flags = annotation["flags"]
        flags.update({"track_id": seq})
    # update summary
    for key in list(json_data["summary"].keys()):
        if key in vendor_pcd_class_map:
            new_key = vendor_pcd_class_map[key]
            json_data["summary"][new_key] = json_data["summary"].pop(key)
    # save file
    with open(abs_file_path, "w") as f:
        json.dump(json_data, f, indent=4)

def change_fuse_image_anno_format(abs_file_path):
    with open(abs_file_path, "r") as f:
        json_data = json.load(f)
    # update label&points in shapes
    for seq, shape in enumerate(json_data["shapes"]):
        label = shape["label"]
        if label in fuse_image_class_map:
            shape["label"] = fuse_image_class_map[label]
        # update points
        xmin, ymin, xmax, ymax = shape["points"]
        shape["points"] = [[xmin, ymin], [xmax, ymax]]
        flags = shape["flags"]
        flags.update({"track_id": seq})
    # update summary
    for key in list(json_data["summary"].keys()):
        if key in fuse_image_class_map:
            new_key = fuse_image_class_map[key]#multiple key match vehicle
            if new_key not in json_data["summary"]:
                json_data["summary"][new_key] = json_data["summary"].pop(key)
            else:
                json_data["summary"][new_key] += json_data["summary"].pop(key)
    # save file
    with open(abs_file_path, "w") as f:
        json.dump(json_data, f, indent=4)