import json
import os, random
from scipy.spatial.transform import Rotation as R
from tqdm import tqdm

def changan_flatten(data_path, clip_num, save_path):
    
    # Sample clip
    src_path = os.path.join(save_path, "changan_sample_data")
    if not os.path.exists(src_path):
        os.makedirs(src_path, exist_ok=True)
    
    all_clip = os.listdir(data_path)
    target_clip = random.sample(all_clip, clip_num)
    for clip in target_clip:
        origin_path = os.path.join(data_path, clip)
        dest_path = os.path.join(src_path, clip)
        if not os.path.exists(dest_path):
            os.symlink(origin_path, dest_path)
    
    sensor_list = [
        "CAM_FRONT",
        "CAM_FRONT_LEFT",
        "CAM_FRONT_RIGHT",
        "CAM_BACK",
        "CAM_BACK_LEFT",
        "CAM_BACK_RIGHT",
        "LIDAR_TOP"
    ]

    for clip in tqdm(os.listdir(src_path), desc='Converting to Flatten...'):

        RAW_JSON_PATH = os.path.join(src_path, clip, "data", "Label", "RAW")
        FLATTEN_JSON_ROOT_PATH = os.path.join(save_path, "flatten_json")
        if not os.path.exists(os.path.join(FLATTEN_JSON_ROOT_PATH, clip)):
            os.makedirs(os.path.join(FLATTEN_JSON_ROOT_PATH, clip), exist_ok=True)

        timestamp_list = [i[:-5] for i in os.listdir(RAW_JSON_PATH)]
        timestamp_list = sorted(timestamp_list)

        for timestamp in timestamp_list:
            with open(os.path.join(os.path.join(FLATTEN_JSON_ROOT_PATH, clip), f"{timestamp}.json"), "w") as f:
                with open(os.path.join(RAW_JSON_PATH, f"{timestamp}.json")) as r:
                    raw_data = json.load(r)
                # ego orientation
                ego_pose_orientation = raw_data[0]["ego_to_world"]["orientation"]
                ego_pose_orientation = [
                    ego_pose_orientation[3],
                    ego_pose_orientation[0],
                    ego_pose_orientation[1],
                    ego_pose_orientation[2],
                ]
                # ego position
                ego_pose_position = raw_data[0]["ego_to_world"]["position"]
                # obj info
                obj_list = []
                for anno in raw_data:
                    if "acceleration" in anno.keys():
                        instance_token = anno["object_id"]
                        category = anno["object_type"]

                        visibility = {}
                        for sensor in sensor_list:
                            if sensor.startswith("CAM"):
                                visibility[sensor] = (
                                    anno["sensor_visibility"][sensor]["visible"] * 1.0
                                )
                            else:
                                visibility[sensor] = 1.0

                        obj_posiiton = anno["center"]
                        obj_euler = (
                            R.from_quat(anno["orientation"]).as_euler("xyz").tolist()
                        )
                        obj_size = anno["size"]
                        obj_list.append(
                            {
                                "instance_token": instance_token,
                                "category": category,
                                "subcategory": None,
                                "visibility": visibility,
                                "obj_position": obj_posiiton,
                                "obj_euler": obj_euler,
                                "obj_size": obj_size,
                            }
                        )
                flatten_json_dict = {
                    "ego_pose_orientation": ego_pose_orientation,
                    "ego_pose_position": ego_pose_position,
                    "objects": obj_list,
                }
                json.dump(flatten_json_dict, f)

