from collections import Counter
import json, os, uuid, yaml
from tqdm import tqdm
from multiprocessing import Pool
from scipy.spatial.transform import Rotation as R
import numpy as np
import cv2
from multiprocessing import Pool, cpu_count


def get_token():
    return uuid.uuid4().hex


def get_visibility_token(visibility_score: float) -> str:
    if 0 <= visibility_score <= 0.4:
        return "1"
    elif 0.4 < visibility_score <= 0.6:
        return "2"
    elif 0.6 < visibility_score <= 0.8:
        return "3"
    elif 0.8 < visibility_score <= 1.0:
        return "4"
    else:
        print("error")
        raise


class NuScenceFormat:
    def __init__(self, flatten_json_path, output_root_path):
        self.flatten_json_path = flatten_json_path
        self.output_root_path = output_root_path
        pass

    def map(self, log_token):
        with open(os.path.join(self.output_root_path, "map.json"), "w") as f:
            map_list = []
            map_token = get_token()
            map_list.append({
                "category": "semantic_prior",
                "token": map_token,
                "filename": f"maps/{map_token}.png",
                "log_tokens": [log_token]
            })
            json.dump(map_list, f)
        map_path = os.path.join(self.output_root_path[:-5], 'maps')
        os.mkdir(map_path)
        mock_map_image = np.zeros((500, 500, 3))
        cv2.imwrite(os.path.join(map_path, f'{map_token}.png'), mock_map_image)

    def attribute(self):
        with open(os.path.join(self.output_root_path, "attribute.json"), "w") as f:
            attribute_list = []
            json.dump(attribute_list, f)

    def scene(self, FLATTEN_JSON_ROOT_PATH):
        with open(os.path.join(self.output_root_path, "scene.json"), "w") as f:
            scene_list = []
            for clip in os.listdir(FLATTEN_JSON_ROOT_PATH):
                clip_path = os.path.join(FLATTEN_JSON_ROOT_PATH, clip)
                scene_list.append(
                    {
                        "token": get_token(),
                        "log_token": get_token(),
                        "nbr_samples": len(os.listdir(clip_path)),
                        "first_sample_token": None,
                        "last_sample_token": None,
                        "name": clip,
                        "description": None,
                    }
                )
            json.dump(scene_list, f)

    def log(self, vehicle, date_captured, location):
        with open(os.path.join(self.output_root_path, "log.json"), "w") as f:
            log_list = []
            log_token = get_token()
            log_list.append(
                {
                    "token": log_token,
                    "logfile": vehicle + "-" + date_captured,
                    "vehicle": vehicle,
                    "date_captured": date_captured,
                    "location": location,
                }
            )
            json.dump(log_list, f)
        return log_token

    def category(self, category_list, description=None):
        with open(os.path.join(self.output_root_path, "category.json"), "w") as f:
            new_category_list = []
            for category in tqdm(category_list, postfix="category..."):
                new_category_list.append(
                    {"token": get_token(), "name": category, "description": description}
                )
            json.dump(new_category_list, f)

    def sensor(self, sensor_dict):
        with open(os.path.join(self.output_root_path, "sensor.json"), "w") as f:
            new_sensor_list = []
            for channel, modality in sensor_dict.items():
                new_sensor_list.append(
                    {"token": get_token(), "channel": channel, "modality": modality}
                )
            json.dump(new_sensor_list, f)

    def visibility(self):
        with open(os.path.join(self.output_root_path, "visibility.json"), "w") as f:
            visibility_list = [
                {
                    "description": "visibility of whole object is between 0 and 40%",
                    "token": "1",
                    "level": "v0-40",
                },
                {
                    "description": "visibility of whole object is between 40 and 60%",
                    "token": "2",
                    "level": "v40-60",
                },
                {
                    "description": "visibility of whole object is between 60 and 80%",
                    "token": "3",
                    "level": "v60-80",
                },
                {
                    "description": "visibility of whole object is between 80 and 100%",
                    "token": "4",
                    "level": "v80-100",
                },
            ]
            json.dump(visibility_list, f)

    def calibrated_sensor(self, calibrated_dict):
        with open(
            os.path.join(self.output_root_path, "calibrated_sensor.json"), "w"
        ) as f:
            cali_list = []
            with open(os.path.join(self.output_root_path, "sensor.json")) as s:
                new_sensor_list = json.load(s)
            for sensors in tqdm(new_sensor_list, postfix="calibrated_sensor..."):
                name = sensors["channel"]
                sensor_token = sensors["token"]
                if name.startswith("LIDAR"):
                    cali_list.append(
                        {
                            "token": get_token(),
                            "sensor_token": sensor_token,
                            "translation": calibrated_dict[name]["translation"],
                            "rotation": calibrated_dict[name]["quat"],
                            "camera_intrinsic": calibrated_dict[name]["intrinsics"],
                        }
                    )
                elif name.startswith("CAM") or name.startswith("cam"):
                    # euler = R.from_quat(calibrated_dict[name]['quat']).as_euler('yxz', degrees=True)
                    # new_quat = R.from_euler('yxz', euler, degrees=True).as_quat()
                    cali_list.append(
                        {
                            "token": get_token(),
                            "sensor_token": sensor_token,
                            "translation": calibrated_dict[name]["translation"],
                            "rotation": calibrated_dict[name]["quat"],
                            "camera_intrinsic": calibrated_dict[name]["intrinsics"],
                        }
                    )
                else:
                    print("error")
                    raise

            json.dump(cali_list, f)

    def ego_pose(self, timestamp_dict):
        with open(os.path.join(self.output_root_path, "ego_pose.json"), "w") as f:
            ego_pose_list = []
            for clip, timestamp_list in tqdm(timestamp_dict.items(), postfix="ego_pose..."):
                for timestamp in timestamp_list:
                    with open(os.path.join(os.path.join(self.flatten_json_path, clip), f"{timestamp}.json")) as k:
                        flatten_data = json.load(k)
                        quat = flatten_data["ego_pose_orientation"]
                        ego_pose_list.append(
                            {
                                "token": get_token(),
                                "timestamp": timestamp,
                                "rotation": quat,
                                "translation": flatten_data["ego_pose_position"],
                            }
                        )
            json.dump(ego_pose_list, f)

    def sample(self, timestamp_dict):
        with open(os.path.join(self.output_root_path, "scene.json")) as s:
            scenes = json.load(s)
        
        with open(os.path.join(self.output_root_path, "sample.json"), "w") as f:
            sample_list = []
            for scene in tqdm(scenes, desc='sample...'):
                scene_id = scene["token"]
                scene_name = scene["name"]
                timestamp_list = timestamp_dict[scene_name]
                token_list = []
                first_sample_token = ""
                last_sample_token = ""
                # write in sample info
                for timestamp in timestamp_list:
                    token_list.append(get_token())
                for idx in range(len(token_list)):
                    if idx == 0:
                        sample_list.append(
                            {
                                "token": token_list[idx],
                                "timestamp": timestamp_list[idx],
                                "prev": "",
                                "next": token_list[idx + 1],
                                "scene_token": scene_id,
                            }
                        )
                        first_sample_token = token_list[idx]
                    elif 0 < idx < len(token_list) - 1:
                        sample_list.append(
                            {
                                "token": token_list[idx],
                                "timestamp": timestamp_list[idx],
                                "prev": token_list[idx - 1],
                                "next": token_list[idx + 1],
                                "scene_token": scene_id,
                            }
                        )
                    elif idx == len(token_list) - 1:
                        sample_list.append(
                            {
                                "token": token_list[idx],
                                "timestamp": timestamp_list[idx],
                                "prev": token_list[idx - 1],
                                "next": "",
                                "scene_token": scene_id,
                            }
                        )
                        last_sample_token = token_list[idx]
                    else:
                        raise
                # update first and last sample token in scene.json
                scene["first_sample_token"] = first_sample_token
                scene["last_sample_token"] = last_sample_token
            
            json.dump(sample_list, f)
        
        with open(os.path.join(self.output_root_path, "scene.json"), 'w') as s:
            json.dump(scenes, s)

    def instance(self):
        with open(os.path.join(self.output_root_path, "category.json")) as c:
            category = json.load(c)

        instance = []
        for clip in os.listdir(self.flatten_json_path):
            clip_path = os.path.join(self.flatten_json_path, clip)
            for files in os.listdir(clip_path):
                with open(os.path.join(clip_path, files)) as f:
                    flatten_data = json.load(f)
                    for anno in flatten_data["objects"]:
                        instance.append(int(anno["instance_token"]))

        instance_count = Counter(instance)
        instance = sorted(set(instance))
        instance_token = []
        for _ in instance:
            instance_token.append(get_token())

        instance_list = []
        for clip in tqdm(os.listdir(self.flatten_json_path), desc="instance..."):
            clip_path = os.path.join(self.flatten_json_path, clip)
            for files in os.listdir(clip_path):
                with open(os.path.join(clip_path, files)) as f:
                    flatten_data = json.load(f)
                    for anno in flatten_data["objects"]:
                        instancetoken = anno["instance_token"]
                        token = instance_token[instance.index(int(instancetoken))]
                        instance_category = (
                            anno["category"] + "." + anno["subcategory"]
                            if anno["subcategory"] is not None
                            else anno["category"]
                        )

                        instance_category_token = [
                            cate["token"]
                            for cate in category
                            if instance_category == cate["name"]
                        ]

                        nbr = instance_count[int(instancetoken)]

                        instance_list.append(
                            {
                                "token": token,
                                "category_token": instance_category_token[0],
                                "nbr_annotations": nbr,
                                "first_annotation_token": "",
                                "last_annotation_token": "",
                            }
                        )
                        
        with open(os.path.join(self.output_root_path, "instance.json"), "w") as f:
            json.dump(instance_list, f)
        return instance_token, instance
    
    def process_timestamp_annotation(self, argument):
        sample_annotation_sublist = []
        clip, timestamp, clip_path, instance_token, instance, sample_list = argument
        json_path = os.path.join(clip_path, f"{timestamp}.json")
        with open(json_path) as j:
            flatten_data = json.load(j)
        # ego to world
        ego_position = flatten_data["ego_pose_position"]
        ego_pose_orientation = flatten_data["ego_pose_orientation"]
        ego_to_world_rotation_matrix = R.from_quat(ego_pose_orientation).as_matrix()
        ego_to_world = np.hstack((ego_to_world_rotation_matrix, np.array(ego_position).reshape(3, 1)))
        ego_to_world = np.vstack((ego_to_world, np.array([0, 0, 0, 1]).reshape(1, 4)))

        for anno in flatten_data["objects"]:
            instancetoken = anno["instance_token"]
            visibility_score = anno["visibility"]['LIDAR_TOP']
            if visibility_score:
                obj_to_world_rotation_matrix = R.from_euler("xyz", anno["obj_euler"]).as_matrix()
                obj_to_world_quat = (
                    R.from_matrix(obj_to_world_rotation_matrix)
                    .as_quat()
                    .tolist()
                )
                obj_to_world_quat = [
                    obj_to_world_quat[3],
                    obj_to_world_quat[0],
                    obj_to_world_quat[1],
                    obj_to_world_quat[2],
                ]
                size = [
                    anno["obj_size"][1],
                    anno["obj_size"][0],
                    anno["obj_size"][2],
                ]
                
                if size[0] < 1e-4 or size[1] < 1e-4 or size[0] < 1e-4:
                    continue 
                
                obj_position_world = anno["obj_position"]

                sample_annotation_sublist.append(
                    {
                        "token": get_token(),
                        "sample_token": [
                            i["token"]
                            for i in sample_list
                            if i["timestamp"] == timestamp
                        ][0],
                        "instance_token": instance_token[
                            instance.index(int(instancetoken))
                        ],
                        "visibility_token": get_visibility_token(
                            visibility_score * 1
                        ),
                        "attribute_tokens": "",
                        "translation": obj_position_world,
                        "size": size,
                        "rotation": obj_to_world_quat,
                        "prev": "",
                        "next": "",
                        "num_lidar_pts": "",
                        "num_radar_pts": "",
                    }
                )
            else:
                continue
        return sample_annotation_sublist   

    def sample_annotation(self, sensor_dict, calibrated_dict, timestamp_dict, instance_token, instance):
        with open(os.path.join(self.output_root_path, "sample.json")) as s:
            sample_list = json.load(s)
        with open(os.path.join(self.output_root_path, "sample_annotation.json"), "w") as f:
            argument_list = []
            for clip, timestamp_list in tqdm(timestamp_dict.items(), desc='sample_annotation...'):
                clip_path = os.path.join(self.flatten_json_path, clip)
                for timestamp in timestamp_list:
                    argument = (clip, timestamp, clip_path, instance_token, instance, sample_list)
                    argument_list.append(argument)
            
            with Pool(processes=cpu_count()) as pool:
                 results = pool.map(self.process_timestamp_annotation, argument_list)
                    
            sample_annotation_list = [item for sublist in results for item in sublist]               
            # update prev and next token
            for idx in range(len(sample_annotation_list)):
                if idx == 0:
                    sample_annotation_list[idx]["next"] = sample_annotation_list[idx + 1]["token"]
                elif 0 < idx < len(sample_annotation_list) - 1:
                    sample_annotation_list[idx]["prev"] = sample_annotation_list[idx - 1]["token"]
                    sample_annotation_list[idx]["next"] = sample_annotation_list[idx + 1]["token"]
                elif idx == len(sample_annotation_list) - 1:
                    sample_annotation_list[idx]["prev"] = sample_annotation_list[idx - 1]["token"]
                else: 
                    raise
             
            json.dump(sample_annotation_list, f)
    
    def process_timestamp_sample_data(self, argument):
        sample_data_sublist = []
        sensor, img_size_dict, calibrated_sensor_token, sample_data, ego_pose_data, clip, timestamp, DATA_ROOT_PATH, img_suffix, date_captured = argument
        # camera
        if sensor.startswith("CAM"):
            height, width = img_size_dict[sensor]
            clip_path = os.readlink(os.path.join(DATA_ROOT_PATH, clip))
            img_read_path = os.path.join(clip_path, "data", sensor, "RGB", f"{timestamp}.{img_suffix}")
            output_img_path = os.path.join(f"{self.output_root_path[:-5]}/samples", sensor, f"{date_captured}__{sensor}__{timestamp}.{img_suffix}")
            # cv2.imwrite(output_img_path, cv2.imread(img_read_path))
            os.symlink(img_read_path, output_img_path)
            # get_sample_token
            sample_token = [i["token"] for i in sample_data if i["timestamp"] == timestamp]
            # get_ego_pose_token
            ego_pose_token = [i["token"] for i in ego_pose_data if i["timestamp"] == timestamp]
            sample_data_sublist.append(
                {
                    "token": get_token(),
                    "sample_token": sample_token[0],
                    "ego_pose_token": ego_pose_token[0],
                    "calibrated_sensor_token": calibrated_sensor_token,
                    "timestamp": timestamp,
                    "fileformat": img_suffix,
                    "is_key_frame": True,
                    "height": height,
                    "width": width,
                    "filename": os.path.join(
                        "samples",
                        sensor,
                        f"{date_captured}__{sensor}__{timestamp}.{img_suffix}",
                    ),
                    "prev": "",
                    "next": "",
                }
            )
        
        # LIDAR
        elif sensor.startswith("LIDAR"):
            # get_sample_token
            sample_token = [i["token"] for i in sample_data if i["timestamp"] == timestamp]
            # get_ego_pose_token
            ego_pose_token = [i["token"] for i in ego_pose_data if i["timestamp"] == timestamp]
            sample_data_sublist.append(
                {
                    "token": get_token(),
                    "sample_token": sample_token[0],
                    "ego_pose_token": ego_pose_token[0],
                    "calibrated_sensor_token": calibrated_sensor_token,
                    "timestamp": timestamp,
                    "fileformat": "pcd",
                    "is_key_frame": True,
                    "height": 0,
                    "width": 0,
                    "filename": os.path.join("samples", sensor, "demo.pcd"),
                    "prev": "",
                    "next": "",
                }
            )
        else:
            raise("Wrong Sensor...")
        return sample_data_sublist   
          
    def sample_data(self, timestamp_dict, DATA_ROOT_PATH, date_captured, img_size_dict, img_suffix):
        # dir prepare
        os.makedirs(os.path.join(self.output_root_path[:-5], "samples"), exist_ok=True)
        with open(os.path.join(self.output_root_path, "sample_data.json"), "w") as f:
            argument_list = []
            # Load data shared across processes
            with open(os.path.join(self.output_root_path, "sensor.json")) as s:
                sensor_data = json.load(s)
            sensor_list = [i["channel"] for i in sensor_data]
            sensor_token_list = [i["token"] for i in sensor_data]
            
            for sensor in sensor_list:
                 os.makedirs(os.path.join(self.output_root_path[:-5], "samples", sensor), exist_ok=True)
                 
            with open(os.path.join(self.output_root_path, "calibrated_sensor.json")) as c:
                calibrated_sensor_data = json.load(c)
            with open(os.path.join(self.output_root_path, "sample.json")) as s:
                sample_data = json.load(s)
            with open(os.path.join(self.output_root_path, "ego_pose.json")) as e:
                ego_pose_data = json.load(e)

            # get sample data
            for sensor in tqdm(sensor_list, postfix="sample_data..."):
                for calibrated_sensor in calibrated_sensor_data:
                    if (calibrated_sensor["sensor_token"] == sensor_token_list[sensor_list.index(sensor)]):
                        calibrated_sensor_token = calibrated_sensor["token"]
                        for clip, timestamp_list in timestamp_dict.items():
                                for timestamp in timestamp_list:
                                    argument = (sensor, img_size_dict, calibrated_sensor_token,
                                                sample_data, ego_pose_data, clip, timestamp,
                                                DATA_ROOT_PATH, img_suffix, date_captured)
                                    argument_list.append(argument)
            
            with Pool(processes=cpu_count()) as pool:
                results = pool.map(self.process_timestamp_sample_data, argument_list)
            sample_data_list = [item for sublist in results for item in sublist]          
                  
            # update prev and next token, each caliberate token 
            for calibrated_sensor in calibrated_sensor_data:
                calibrated_token = calibrated_sensor["token"]
                idx_list = []
                for sample_data in sample_data_list:
                    if sample_data["calibrated_sensor_token"] == calibrated_token:
                        idx_list.append(sample_data_list.index(sample_data))
                
                sample_data_list[idx_list[0]]["next"] =  sample_data_list[idx_list[1]]["token"]
                sample_data_list[idx_list[-1]]["prev"] =  sample_data_list[idx_list[-2]]["token"]
                for id in range(1, len(idx_list) - 1):
                    sample_data_list[idx_list[id]]["prev"] = sample_data_list[idx_list[id - 1]]["token"]
                    sample_data_list[idx_list[id]]["next"] = sample_data_list[idx_list[id + 1]]["token"]
            
            json.dump(sample_data_list, f)


def flatten_nucenes(config_path):

    with open(config_path) as f:
        flatten_conf = yaml.safe_load(f)
    
    DATA_ROOT_PATH = flatten_conf["DATA_ROOT_PATH"]
    OUTPUT_ROOT_PATH = flatten_conf["OUTPUT_ROOT_PATH"]
    FLATTEN_JSON_ROOT_PATH = flatten_conf["FLATTEN_JSON_ROOT_PATH"]

    os.makedirs(OUTPUT_ROOT_PATH, exist_ok=True)
    os.makedirs(os.path.join(OUTPUT_ROOT_PATH, "v1.0"), exist_ok=True)
    
    img_size_dict = flatten_conf["IMG_SIZE"]
    vehicle = flatten_conf["VEHICLE"]
    date_captured = str(flatten_conf["DATE_CAPTURED"])
    location = flatten_conf["LOCATION"]
    
    category_list = flatten_conf["CATEGORY_LIST"]
    sensor_dict = flatten_conf["SENSOR_DICT"]
    calibrated_dict = flatten_conf["CALIBRATED_SENSOR"]
    timestamp_dict = flatten_conf["TIMESTAMP_DICT"]
    img_suffix = flatten_conf["IMG_SUFFIX"]

    ConvertFormat = NuScenceFormat(FLATTEN_JSON_ROOT_PATH, os.path.join(OUTPUT_ROOT_PATH, "v1.0"))
    ConvertFormat.attribute()
    ConvertFormat.scene(FLATTEN_JSON_ROOT_PATH)
    log_token = ConvertFormat.log(vehicle, date_captured, location)
    ConvertFormat.map(log_token)
    ConvertFormat.category(category_list)
    ConvertFormat.sensor(sensor_dict)
    ConvertFormat.visibility()
    ConvertFormat.calibrated_sensor(calibrated_dict)
    ConvertFormat.ego_pose(timestamp_dict)
    ConvertFormat.sample(timestamp_dict)
    instance_token, instance = ConvertFormat.instance()
    ConvertFormat.sample_annotation(sensor_dict, calibrated_dict, timestamp_dict, instance_token, instance)
    ConvertFormat.sample_data(timestamp_dict, DATA_ROOT_PATH, date_captured, img_size_dict, img_suffix)
