from utils import calculate_3d_iou, calculate_2d_iou, load_json, read_yaml, write_json
from view_anno import view_fused_image_anno, view_fused_pcd_anno, view_fused_multi_domain_truth_anno
import json
import numpy as np
import os
import cv2

cam_name = "GMSLCam_2"
lidar_name = "pandarat128_2372"

image_suffix = "_" + cam_name + ".jpg"
pcd_suffix = "_" + lidar_name + ".pcd"
fuse_image_suffix = "_" + lidar_name + "_" + cam_name + ".jpg"

fuse_image_class_map = {"0":"vehicle", "1":"vehicle", "2":"motorcycle", "3":"motorcycle", "4":"tricycle", "5":"vehicle", "6":"pedestrain"}
vendor_pcd_class_map = {"1":"pedestrain", "2":"motorcycle", "3":"vehicle", "4":"tricycle"}

class FuseDetection(object):
    def __init__(self, downloaded_path, datasetType, width=1920, height=1080) -> None:
        #own pcd 1car 2truck 3construction_vehicle 4bus 5trailer 6barrier 7motorcycle 8bicycle 9pedestrain 10traffic_cone type, label is string
        #vendor pcd 1pedestrain 2motorcycle 3vehicle 4tricycle, type is int
        self.pcd_map_own_to_vendor = {"1":3,"2":3,"3":3,"4":3,"5":3,"7":2,"8":2,"9":1}#vendor is better, 6barrier 10traffic_cone is deleted
        #own image 0car 1truck 2motorcycle 3bicycle 4tricycle 5bus 6pedestrain, label is string
        #vendor image 1pedestrain 2motorcycle 3car 4bus 5tricycle 6traffic_light 7triffic_sign, category_id is int
        self.image_map_vendor_to_own = {1:"6",2:"2",3:"0",4:"5",5:"4"}#own is better, 6traffic_light 7triffic_sign is deleted

        #own image 0car 1truck 2motorcycle 3bicycle 4tricycle 5bus 6pedestrain, label is string
        #projected image 1pedestrain 2motorcycle 3vehicle 4tricycle, type is string
        self.image_map_projected_to_own = {"1":"6","2":"2","3":"0","4":"4"}#own is better, 6traffic_light 7triffic_sign is deleted

        self.image_width = width
        self.image_height = height
        if datasetType == "3":
            self.get_transform(os.path.join(downloaded_path, "calibration.json"))
        

    def get_transform(self, calibration_filepath):
        extrinsics = load_json(calibration_filepath, "pandarat128_2372_TO_GMSLCam_2")
        intrinsics = load_json(calibration_filepath, "GMSLCam_2_Intr")
        lidar_to_vehicle = load_json(calibration_filepath, "pandarat128_2372_TO_Vehicle")

        # get lidar_to_camer param
        rotation_matrix_lidar_to_camera = np.array(extrinsics['TransformationMatrix']).reshape(4, 4).T[:3, :3]
        translation_vector_lidar_to_camera = np.array(extrinsics['TransformationMatrix']).reshape(4, 4).T[:3, 3]

        # get lidar_to_vehicle param
        rotation_matrix_lidar_to_vehicle = np.array(lidar_to_vehicle['TransformationMatrix']).reshape(4, 4).T[:3, :3]
        translation_vector_lidar_to_vehicle = np.array(lidar_to_vehicle['TransformationMatrix']).reshape(4, 4).T[:3, 3]

        # get vehicle_to_lidar param
        rotation_matrix_vehicle_to_lidar = np.linalg.inv(rotation_matrix_lidar_to_vehicle)
        self.trans_matrix_vehicle_to_lidar = self.get_transformation_matrix(rotation_matrix_vehicle_to_lidar, translation_vector_lidar_to_vehicle*(-1))

        # get lidar_to_camera param
        self.trans_matrix_lidar_to_camera = self.get_transformation_matrix(rotation_matrix_lidar_to_camera, translation_vector_lidar_to_camera)
        self.intrinsic_matrix = self.load_intrinsics(intrinsics['intrinsic'])
        self.distort_list = self.load_distort(intrinsics['distort'])


    def get_transformation_matrix(self, rotation, translation):
        trans_matrix = np.eye(4)
        trans_matrix[:3, :3] = rotation
        trans_matrix[:3, 3] = translation
        return trans_matrix

    def load_intrinsics(self, intrinsics):
        fx = intrinsics['fx']
        fy = intrinsics['fy']
        cx = intrinsics['cx']
        cy = intrinsics['cy']
        return np.array([[fx, 0, cx], [0, fy, cy], [0, 0, 1]])

    def load_distort(self, distort):
        dist_coeffs = np.array([distort['k1'], distort['k2'], distort['p1'], distort['p2'], distort['k3']])
        return dist_coeffs 

    def transform_3d_bbox(self, bbox, transformation_matrix):
        bbox_homogeneous = np.hstack((bbox, np.ones((bbox.shape[0], 1))))
        bbox_transformed = transformation_matrix @ bbox_homogeneous.T
        return bbox_transformed[:3, :].T
    
    def project_to_image(self, bbox_3d_in_camera, intrinsic_matrix, distort_list):
        # project to image coordinate
        projected_points = np.dot(intrinsic_matrix, bbox_3d_in_camera.T).T
        # from homogeneous to 2d
        bbox_2d_in_image = projected_points[:, :2] / projected_points[:, 2].reshape(-1, 1)
        # undistort
        bbox_2d_in_image = cv2.undistortPoints(bbox_2d_in_image.reshape(-1, 1, 2), intrinsic_matrix, distort_list, P=intrinsic_matrix)
        return bbox_2d_in_image.reshape(-1, 2)

    def get_rotation_matrix(self, yaw):
        cos_yaw = np.cos(yaw)
        sin_yaw = np.sin(yaw)
        return np.array([
            [cos_yaw, -sin_yaw, 0],
            [sin_yaw, cos_yaw, 0],
            [0, 0, 1]
        ])

    def project_pcd_to_image(self, pcd_detections):
        bbox_2d_list = []
        for obj in pcd_detections:
            tmp_dict = {}
            center = np.array([obj["cube"]["position"]["x"], obj["cube"]["position"]["y"], obj["cube"]["position"]["z"]])
            dimension = np.array([obj["cube"]["scale"]["x"], obj["cube"]["scale"]["y"], obj["cube"]["scale"]["z"]])
            yaw = obj["cube"]["rotation"]["z"]
            bbox_3d_local = np.array([
                [-dimension[0] / 2, -dimension[1] / 2, -dimension[2] / 2],
                [ dimension[0] / 2, -dimension[1] / 2, -dimension[2] / 2],
                [ dimension[0] / 2,  dimension[1] / 2, -dimension[2] / 2],
                [-dimension[0] / 2,  dimension[1] / 2, -dimension[2] / 2],
                [-dimension[0] / 2, -dimension[1] / 2,  dimension[2] / 2],
                [ dimension[0] / 2, -dimension[1] / 2,  dimension[2] / 2],
                [ dimension[0] / 2,  dimension[1] / 2,  dimension[2] / 2],
                [-dimension[0] / 2,  dimension[1] / 2,  dimension[2] / 2]
            ])

            rotation_matrix = self.get_rotation_matrix(yaw)
            bbox_3d_rotated = np.dot(bbox_3d_local, rotation_matrix.T)
            bbox_3d_vehicle = bbox_3d_rotated + center
            bbox_3d_in_lidar = self.transform_3d_bbox(bbox_3d_vehicle, self.trans_matrix_vehicle_to_lidar)
            bbox_3d_in_camera = self.transform_3d_bbox(bbox_3d_in_lidar, self.trans_matrix_lidar_to_camera)
            bbox_2d = self.project_to_image(bbox_3d_in_camera, self.intrinsic_matrix, self.distort_list)
            p_3d = bbox_2d.tolist()
            xmin = min([p[0] for p in p_3d])
            ymin = min([p[1] for p in p_3d])
            xmax = max([p[0] for p in p_3d])
            ymax = max([p[1] for p in p_3d])
            tmp_dict["points"] = [xmin, ymin, xmax, ymax]
            tmp_dict["label"] =  obj["label"]
            tmp_dict["conf"] =  1.0##############
            tmp_dict["shape_type"] = "rectangle"
            tmp_dict["group_id"] = "null"
            tmp_dict["flags"] = {}
            if xmax < 0 or ymax < 0 or xmin > self.image_width or ymin > self.image_height:
                continue
            bbox_2d_list.append(tmp_dict)
        return bbox_2d_list
    
    def map_own_pcd_class(self, own_det, map_info):
        own_det["label"] = str(map_info[own_det["label"]])#final label is str
        return own_det
    
    def modify_vendor_image_detection(self, vendor_det, map_info):
        vendor_det["label"] = map_info[vendor_det["category_id"]]
        vendor_det["points"] = vendor_det["bbox"]
        vendor_det["conf"] = vendor_det["score"]
        vendor_det["shape_type"] = "rectangle"
        vendor_det["group_id"] = "null"
        vendor_det["flags"] = {}
        keys_to_remove = [k for k in vendor_det.keys() if k not in ["label", "points", "conf", "shape_type", "group_id", "flags"]]
        for k in keys_to_remove:
            del vendor_det[k]
        return vendor_det
    
    def modify_vendor_pcd_detection(self, vendor_det):
        vendor_det["label"] = str(vendor_det["type"])
        vendor_det["cube"] = {"rotation":{"x":0.0,"y":0.0,"z":vendor_det["rotation"][2]}, 
                              "scale":{"x":vendor_det["dimension"][0],"y":vendor_det["dimension"][1],"z":vendor_det["dimension"][2]}, 
                              "position":{"x":vendor_det["center"][0],"y":vendor_det["center"][1],"z":vendor_det["center"][2]}}
        vendor_det["group_id"] = "null"
        vendor_det["flags"] = {}
        keys_to_remove = [k for k in vendor_det.keys() if k not in ["label", "cube", "group_id", "flags"]]
        for k in keys_to_remove:
            del vendor_det[k]
        return vendor_det
    
    def modify_projected_pcd_detection(self, proj_det):
        proj_det["label"] = self.image_map_projected_to_own[proj_det["label"]]
        return proj_det
    
    def cube2box(self, cube):
        return [cube["position"]["x"], cube["position"]["y"], cube["position"]["z"], cube["scale"]["x"], cube["scale"]["y"], cube["scale"]["z"], cube["rotation"]["z"]]

    def fuse_pcd_detections(self, own_detections, vendor_detections, iou_threshold=0.3):
        #vendor detection is init, own detection is appendix
        fused_detections = []
        for vendor_det in vendor_detections:
            vendor_det = self.modify_vendor_pcd_detection(vendor_det)
            vendor_box = self.cube2box(vendor_det['cube'])
            all_ious = []
            for own_det in own_detections:
                own_box = self.cube2box(own_det['cube'])
                all_ious.append(calculate_3d_iou(own_box, vendor_box))
            fused_detections.append(vendor_det)# add vendor detection as init
            if len(all_ious) == 0:
                continue
            max_iou = max(all_ious)
            if max_iou > iou_threshold:#remove most matched detection 
                own_detections.remove(own_detections[all_ious.index(max_iou)])

        for own_det in own_detections:
            all_ious = []
            own_box = self.cube2box(own_det['cube'])
            for fused_det in fused_detections:
                fused_box = self.cube2box(fused_det['cube'])
                all_ious.append(calculate_3d_iou(own_box, fused_box))
            if len(all_ious) == 0:
                # if own_det["label"] == "10" or own_det["label"] == "6": 
                #     continue
                # fused_detections.append(self.map_own_pcd_class(own_det, self.pcd_map_own_to_vendor))  # add own detection
                fused_detections.append(own_det)  # add own detection
                continue
            max_iou = max(all_ious)
            if max_iou < iou_threshold / 2:#not match with every fused detection
                # if own_det["label"] == "10" or own_det["label"] == "6": 
                #     continue
                # fused_detections.append(self.map_own_pcd_class(own_det, self.pcd_map_own_to_vendor))  # add own detection
                fused_detections.append(own_det)  # add own detection
        return fused_detections
    
    def fuse_image_detections(self, own_detections, vendor_detections, iou_threshold=0.7):
        #own detection is init, vendor detection is appendix
        fused_detections = []
        for own_det in own_detections:
            all_ious = []
            for vendor_det in vendor_detections:
                all_ious.append(calculate_2d_iou(own_det['points'], vendor_det['bbox']))
            
            fused_detections.append(own_det)  # add own detection as init
            if len(all_ious) == 0:
                continue
            max_iou = max(all_ious)
            if max_iou > iou_threshold:#remove most matched detection 
                vendor_detections.remove(vendor_detections[all_ious.index(max_iou)])
        for vendor_det in vendor_detections:
            all_ious = []
            for det in fused_detections:
                all_ious.append(calculate_2d_iou(det['points'], vendor_det['bbox']))
            if len(all_ious) == 0:
                if vendor_det["category_id"] == 6 or vendor_det["category_id"] == 7:#trafficsign trafficlight
                    continue
                fused_detections.append(self.modify_vendor_image_detection(vendor_det, self.image_map_vendor_to_own))  # add own detection
                continue
            max_iou = max(all_ious)
            if max_iou < iou_threshold / 2:#not match with every fused detection
                if vendor_det["category_id"] == 6 or vendor_det["category_id"] == 7:#trafficsign trafficlight
                    continue
                fused_detections.append(self.modify_vendor_image_detection(vendor_det, self.image_map_vendor_to_own))  # add vendor detection
        return fused_detections
        #0.3to0.7

    def check_class_matched(self, image_label, pcd_label):#all string
        if pcd_label == "1" and image_label == "6":#pedestrain
            return 1
        if pcd_label == "2" and image_label in ["2", "3"]:#two wheel
            return 1
        if pcd_label == "3" and image_label in ["0", "1", "5"]:#vehicle
            return 1
        if pcd_label == "4" and image_label == "4":#three wheel
            return 1
        return 0

    def classify_image_detections(self, image_detections, projected_detections, iou_threshold=0.7):#map pcd to image,both data and class
        # confirmed_truths = []
        # image_only_truths = []
        # pcd_only_truths = []
        image_matched_pcd_list = []
        # pcd_matched_image_list = []
        # used_pcd_list = []

        for img_seq, img_det in enumerate(image_detections):
            all_ious = []
            for proj_seq, proj_det in enumerate(projected_detections):
                if proj_seq in image_matched_pcd_list:
                    all_ious.append(0.0)
                    continue
                if self.check_class_matched(img_det["label"], proj_det["label"]):
                    all_ious.append(calculate_2d_iou(img_det['points'], proj_det['points']))
                else:
                    all_ious.append(0.0)
            if len(all_ious) == 0:#no pcd projected detection
                # image_only_truths.append(img_det) #in img_det, not in proj_det
                image_matched_pcd_list.append(-1)
                continue
            max_iou = max(all_ious)
            if max_iou > iou_threshold:
                # confirmed_truths.append([img_det, img_seq])
                image_matched_pcd_list.append(all_ious.index(max_iou))
                # projected_detections.remove(proj_det[all_ious.index(max_iou)])
            else:
                image_matched_pcd_list.append(-1)
                # image_only_truths.append(img_det) #in img_det, not in proj_det
        
        # for proj_det in projected_detections:
        #     all_ious = []
        #     seq_list = []
        #     for det, seq in confirmed_truths:
        #         if self.check_class_matched(det["label"], proj_det["label"]):
        #             all_ious.append(calculate_2d_iou(det['points'], proj_det['points']))
        #             seq_list.append(seq)
        #         else:
        #             all_ious.append(0.0)
        #             seq_list.append(seq)
        #     if len(all_ious) == 0:#no pcd projected detection
        #         # pcd_only_truths.append(self.modify_projected_pcd_detection(proj_det)) #no confirmed, add pcd only
        #         pcd_matched_image_list.append(-1)
        #         continue
        #     max_iou = max(all_ious)
        #     if max_iou > iou_threshold:
        #         pcd_matched_image_list.append(seq_list[all_ious.index(max_iou)])
        #     else:
        #         # pcd_only_truths.append(self.modify_projected_pcd_detection(proj_det)) #in proj_det, not in confirmed
        #         pcd_matched_image_list.append(-1)
        return image_matched_pcd_list

    def generate_pcd_json_file(self, annotations, summary, file_path):
        data = {
            "annotations": annotations,
            "summary": summary
        }
        write_json(data, file_path)

    def generate_image_json_file(self, annotations, summary, image_info, file_path):
        data = {
            "version": "4.6.0",
            "flags": {},
            "shapes": annotations,
            "summary": summary,
            "imagePath": image_info["imagePath"],
            "imageHeight": image_info["imageHeight"],
            "imageWidth": image_info["imageWidth"]
        }
        write_json(data, file_path)

    def fuse_detection(self, download_path, generated_path, visualized_path, IF_DEBUG, datasetType, logger):
        # find frameid with all 3 file(1 json, 1 pcd, 1 jpg)
        
        full_info_frameid = []
        file_list = os.listdir(download_path)
        frameid_list = [x[:6] for x in file_list]
        for frameid in frameid_list:
            if datasetType == "1":#fuse image result 
                if frameid_list.count(frameid) >= 2 and frameid + ".json" in file_list and frameid + image_suffix in file_list:#full info, 3 for full info
                    full_info_frameid.append(frameid)
            elif datasetType == "2":#fuse pcd result 
                if frameid_list.count(frameid) >= 2 and frameid + ".json" in file_list and frameid + pcd_suffix in file_list:#full info, 3 for full info
                    full_info_frameid.append(frameid)
            else:#"3" fuse multi domain result 
                if frameid_list.count(frameid) == 3 and frameid + ".json" in file_list and frameid + pcd_suffix in file_list and frameid + fuse_image_suffix in file_list:#full info, 3 for full info
                    full_info_frameid.append(frameid)
        full_info_frameid = list(set(full_info_frameid))
        full_info_frameid.sort()

        for frameid in full_info_frameid:
            vendor_annotation_json = os.path.join(download_path, frameid + ".json")

            if datasetType == "1":
                own_annotation_image_json = os.path.join(generated_path, frameid + image_suffix.replace("jpg", "json"))
                image_own = load_json(own_annotation_image_json, "shapes")
                image_vendor_anno = load_json(vendor_annotation_json, "object2D")
                if cam_name in image_vendor_anno and "annotation" in image_vendor_anno[cam_name]:
                    image_vendor = image_vendor_anno[cam_name]["annotation"]
                else:
                    image_vendor = []
                fused_image_detections = self.fuse_image_detections(image_own, image_vendor)
                image_summary = {label: sum(1 for det in fused_image_detections if det['label'] == label) for label in set(det['label'] for det in fused_image_detections)}
                image_summary['rect'] = len(fused_image_detections)
                image_info = {}
                image_info["imagePath"] = frameid + image_suffix
                image_info["imageHeight"] = self.image_height
                image_info["imageWidth"] = self.image_width
                self.generate_image_json_file(fused_image_detections, image_summary, image_info, os.path.join(generated_path, frameid + image_suffix.replace("jpg", "json")))#overwrite own image detection json
                logger.info("frameid {} fuse_image_detections done, write json in {}".format(frameid, os.path.join(generated_path, frameid + image_suffix.replace("jpg", "json"))))
                if IF_DEBUG:
                    image_path = os.path.join(download_path, frameid + image_suffix)
                    view_fused_image_anno(image_path, generated_path, visualized_path)

            if datasetType == "2":
                own_annotation_pcd_json = os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json"))
                pcd_own = load_json(own_annotation_pcd_json, "annotations")
                pcd_vendor_anno = load_json(vendor_annotation_json, "object3D")
                if lidar_name in pcd_vendor_anno and "annotation" in pcd_vendor_anno[lidar_name]:
                    pcd_vendor = pcd_vendor_anno[lidar_name]["annotation"]
                else:
                    pcd_vendor = []
                fused_pcd_detections = self.fuse_pcd_detections(pcd_own, pcd_vendor)
                pcd_summary = {label: sum(1 for det in fused_pcd_detections if det['label'] == label) for label in set(det['label'] for det in fused_pcd_detections)}
                pcd_summary['cube'] = len(fused_pcd_detections)
                self.generate_pcd_json_file(fused_pcd_detections, pcd_summary, os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json")))#overwrite own pcd detection json
                logger.info("frameid {} fuse_pcd_detections done, write json in {}".format(frameid, os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json"))))
                if IF_DEBUG:
                    pcd_path = os.path.join(download_path, frameid + pcd_suffix)
                    view_fused_pcd_anno(pcd_path, generated_path, visualized_path)

            if datasetType == "3":
                own_annotation_image_json = os.path.join(generated_path, frameid + fuse_image_suffix.replace("jpg", "json"))
                image_own = load_json(own_annotation_image_json, "shapes")
                image_vendor_anno = load_json(vendor_annotation_json, "object2D")
                if cam_name in image_vendor_anno and "annotation" in image_vendor_anno[cam_name]:
                    image_vendor = image_vendor_anno[cam_name]["annotation"]
                else:
                    image_vendor = []
                fused_image_detections = self.fuse_image_detections(image_own, image_vendor)
                image_summary = {label: sum(1 for det in fused_image_detections if det['label'] == label) for label in set(det['label'] for det in fused_image_detections)}
                image_summary['rect'] = len(fused_image_detections)
                image_info = {}
                image_info["imagePath"] = frameid + fuse_image_suffix
                image_info["imageHeight"] = self.image_height
                image_info["imageWidth"] = self.image_width
                self.generate_image_json_file(fused_image_detections, image_summary, image_info, os.path.join(generated_path, frameid + fuse_image_suffix.replace("jpg", "json")))#overwrite own image detection json
                logger.info("frameid {} fuse_image_detections done, write json in {}".format(frameid, os.path.join(generated_path, frameid + fuse_image_suffix.replace("jpg", "json"))))
                if IF_DEBUG:
                    image_path = os.path.join(download_path, frameid + fuse_image_suffix)
                    view_fused_image_anno(image_path, generated_path, visualized_path)

                own_annotation_pcd_json = os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json"))
                pcd_own = load_json(own_annotation_pcd_json, "annotations")
                pcd_vendor_anno = load_json(vendor_annotation_json, "object3D")
                if lidar_name in pcd_vendor_anno and "annotation" in pcd_vendor_anno[lidar_name]:
                    pcd_vendor = pcd_vendor_anno[lidar_name]["annotation"]
                else:
                    pcd_vendor = []
                fused_pcd_detections = self.fuse_pcd_detections(pcd_own, pcd_vendor)
                pcd_summary = {label: sum(1 for det in fused_pcd_detections if det['label'] == label) for label in set(det['label'] for det in fused_pcd_detections)}
                pcd_summary['cube'] = len(fused_pcd_detections)
                self.generate_pcd_json_file(fused_pcd_detections, pcd_summary, os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json")))#overwrite own pcd detection json
                logger.info("frameid {} fuse_pcd_detections done, write json in {}".format(frameid, os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json"))))
                if IF_DEBUG:
                    pcd_path = os.path.join(download_path, frameid + pcd_suffix)
                    view_fused_pcd_anno(pcd_path, generated_path, visualized_path)


                projected_detections = self.project_pcd_to_image(fused_pcd_detections)
                image_matched_pcd_list = self.classify_image_detections(fused_image_detections, projected_detections)    # 生成新的点云检测真值JSON文件
                
                #change image json to needed format, add trakid and map class
                with open(os.path.join(generated_path, frameid + fuse_image_suffix.replace("jpg", "json")), "r") as f:
                    json_data = json.load(f)
                # update label&points in shapes
                for seq, shape in enumerate(json_data["shapes"]):
                    label = shape["label"]
                    if label in fuse_image_class_map:
                        shape["label"] = fuse_image_class_map[label]
                    # update points
                    xmin, ymin, xmax, ymax = shape["points"]
                    shape["points"] = [[xmin, ymin], [xmax, ymax]]
                    flags = shape["flags"]
                    flags.update({"track_id": seq})
                # update summary
                for key in list(json_data["summary"].keys()):
                    if key in fuse_image_class_map:
                        new_key = fuse_image_class_map[key]#multiple key match vehicle
                        if new_key not in json_data["summary"]:
                            json_data["summary"][new_key] = json_data["summary"].pop(key)
                        else:
                            json_data["summary"][new_key] += json_data["summary"].pop(key)
                # save file
                with open(os.path.join(generated_path, frameid + fuse_image_suffix.replace("jpg", "json")), "w") as f:
                    json.dump(json_data, f, indent=4)


                #change pcd json to needed format, add trakid and map class, id is associated with image result
                with open(os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json")), "r") as f:
                    json_data = json.load(f)
                # update label in annotations
                for seq, annotation in enumerate(json_data["annotations"]):
                    label = annotation["label"]
                    if label in vendor_pcd_class_map:
                        annotation["label"] = vendor_pcd_class_map[label]
                    flags = annotation["flags"]
                    if seq in image_matched_pcd_list:
                        flags.update({"track_id": image_matched_pcd_list.index(seq)})
                    else:
                        flags.update({"track_id": 200+seq})
                # update summary
                for key in list(json_data["summary"].keys()):
                    if key in vendor_pcd_class_map:
                        new_key = vendor_pcd_class_map[key]
                        json_data["summary"][new_key] = json_data["summary"].pop(key)
                # save file
                with open(os.path.join(generated_path, frameid + pcd_suffix.replace("pcd", "json")), "w") as f:
                    json.dump(json_data, f, indent=4)
                logger.info("frameid {} multi domain fusion done, json updated".format(frameid))
                
                # image_annotations = []
                # for det in confirmed_truths:
                #     det['truth_type'] = 'confirmed_truth'
                #     image_annotations.append(det)
                # for det in image_only_truths:
                #     det['truth_type'] = 'image_only_truths'
                #     image_annotations.append(det)
                # for det in pcd_only_truths:
                #     det['truth_type'] = 'pcd_only_truths'
                #     image_annotations.append(det)
                # image_summary = {label: sum(1 for det in image_annotations if det['label'] == label) for label in set(det['label'] for det in image_annotations)}
                # image_summary['rect'] = len(image_annotations)
                # image_info = {}
                # image_info["imagePath"] = frameid + fuse_image_suffix
                # image_info["imageHeight"] = self.image_height
                # image_info["imageWidth"] = self.image_width
                # self.generate_image_json_file(image_annotations, image_summary, image_info, os.path.join(generated_path, frameid + "_fused_multi_domain_truth.json"))
                # logger.info("frameid {} fuse_multi_domain_truth done, write json in {}".format(frameid, os.path.join(generated_path, frameid + "_fused_multi_domain_truth.json")))
                # if IF_DEBUG:
                #     image_path = os.path.join(download_path, frameid + fuse_image_suffix)
                #     view_fused_multi_domain_truth_anno(image_path, generated_path, visualized_path)

        







