#! /usr/bin/env python3
import io, os, sys

wp = os.path.abspath(os.path.dirname(__file__))
sys.path.append(wp)

import json
import glob
import argparse
import logging

import numpy as np
import pandas as pd
from matplotlib import colormaps
from matplotlib import font_manager
from matplotlib.gridspec import GridSpec
from mpl_toolkits.axes_grid1 import make_axes_locatable
from matplotlib import pyplot as plt

from frame_data.frame_data_collector import FrameDataCollector, DEPT_BINS
from clip_data_collector import ClipDataCollector, UnrecognizedMapExeption
from xodr_parser.road_network_parser import RoadNetworkParser
from data_report.report_writer import ReportWritor

"""=== CSV Headers ==="""
CLIP_METADATA_HEADER = "clip_id,time,weather,road_scene\n"
EGO_DATA_HEADER = "clip_id,frame_id,curvature,left_roadmark_color,right_roadmark_color,left_roadmark_type,right_roadmark_type,left_roadmark_double,right_roadmark_double,num_pre_lanes,num_suc_lanes\n"
FRAME_DATA_HEADER = "clip_id,frame_id,obj_id,obj_type,label_type,velocity_x,velocity_y,velocity_z,speed,length,width,height,relative_x,relative_y,relative_z,radius,angle,relative_orientation,num_sensors"
SENSOR_HEADER = "sensor_id,sensor_name\n"

WEATHER_TRANSLATION = {
            "clear_sky"     : "晴天",
            "cloudy"        : "多云",
            "partly_cloudy" : "部分多云",
            "overcast"      : "阴",
            "foggy"         : "雾",
            "rain"          : "雨",
            "light_rain"    : "小雨",
            "thunder_storm" : "雷雨",
        }

COLOR_TRANSLATION = {
            'white'  : '白色',
            'blue'   : '蓝色',
            'green'  : '绿色',
            'red'    : '红色',
            'orange' : '橙色',
            'yellow' : '黄色',
        }

ROAD_SCENE_TRANSLATION = {
            'Urban'            : '城区',
            'Suburban'         : '郊区',
            'Highway'          : '高速',
            'Residential Area' : '住宅区',
            'test ground'      : '测试场',
        }

font_files = font_manager.findSystemFonts(os.path.join(wp, "dependencies/fonts/"))
for font_file in font_files:
    font_manager.fontManager.addfont(font_file)
# Set SimHei as the default font
plt.rcParams['font.family']        = 'SimHei'
plt.rcParams['axes.unicode_minus'] = False

# debug only
#os.environ["PYDEVD_DISABLE_FILE_VALIDATION"] = "1"

class DataCollector:
    """
    A class to contain all annotation data of a data set. It can export its data to csv files and json files. It can draw some graphs on the data, as well.
    """
    def __init__(self, data_dir_path, logger:logging.Logger) -> None:
        """
        Initializes a DataCollector object.

        Args:
            data_dir_path (str): Path to the data set
        """ 
        self.wp                = os.path.abspath(os.path.dirname(__file__))
        self.data_dir_path     = data_dir_path
        self.odr_parser        = None
        self.data              = {}
        self.sensors           = []
        self.logger            = logger
        self.frame_collector   = FrameDataCollector(data_dir_path, logger=self.logger.getChild("FrameCollector_logger"))
        self.clip_collector    = ClipDataCollector(data_dir_path, logger=self.logger.getChild("ClipCollector_logger"))
        
    def collect_clip_data(self, debug=False):
        sensors, failed_clips = self.clip_collector.collect(debug)
        self.sensors = sensors
        map_to_paths   = {m:glob.glob(self.wp + "/scenario/xodr" + f"/*{m}*.xodr")[0] for m in self.clip_collector.maps}
        map_to_parser  = {m:RoadNetworkParser(glob.glob(self.wp + "/scenario/xodr" + f"/*{m}*.xodr")[0]).parse() for m in self.clip_collector.maps}
        clip_to_parser = {clip : map_to_parser[m] for clip, m in self.clip_collector.clip_map.items()}
        map_to_clips = {}
        for clip, m in self.clip_collector.clip_map.items():
            if m not in map_to_clips:
                map_to_clips[m] = [clip]
            else:
                map_to_clips[m].append(clip)
                
        return failed_clips, map_to_paths, clip_to_parser

    def collect(self, debug=False, num_threads=128):
        """
        Processes the annotation data and constructs DataFrames.

        Args:
            debug (bool, optional): Enters debug mode, only collects the first ten items. Defaults to False.

        Returns:
            Self
        """
        self.logger.info("Collecting clip data...")
        try:
            sensors, failed_clips = self.clip_collector.collect(debug)
        except UnrecognizedMapExeption:
            self.logger.warning(UnrecognizedMapExeption.message)
            exit(5)
        self.sensors = sensors
        self.logger.info("Collecting frame data...")
        map_to_paths   = {m:glob.glob(self.wp + "/scenario/xodr" + f"/*{m}*.xodr")[0] for m in self.clip_collector.maps}
        map_to_parser  = {m:RoadNetworkParser(glob.glob(self.wp + "/scenario/xodr" + f"/*{m}*.xodr")[0]).parse() for m in self.clip_collector.maps}
        clip_to_parser = {clip : map_to_parser[m] for clip, m in self.clip_collector.clip_map.items()}
        
        map_to_clips = {}
        for clip, m in self.clip_collector.clip_map.items():
            if m not in map_to_clips:
                map_to_clips[m] = [clip]
            else:
                map_to_clips[m].append(clip)
        
        empty_clips = self.frame_collector.collect(sensors=self.sensors, 
                                                    map_to_clips=map_to_clips, 
                                                    map_to_paths=map_to_paths, 
                                                    failed_clips=failed_clips, 
                                                    debug=debug,
                                                    num_threads=num_threads)
        
        failed_images, self.pixel_depth = self.frame_collector.process_images()
        
        # remove bad clips
        for clip_id in failed_clips:
            if clip_id in self.clip_collector.data:
                del self.clip_collector.data[clip_id]
            if clip_id in self.frame_collector.clip_data:
                del self.frame_collector.clip_data[clip_id]
                
        for clip_id in empty_clips:
            if clip_id in self.clip_collector.data:
                del self.clip_collector.data[clip_id]
            if clip_id in self.frame_collector.clip_data:
                del self.frame_collector.clip_data[clip_id]
        
        # gather ego data
        for clip_id, clip_info in self.frame_collector.clip_data.items():
            self.odr_parser = clip_to_parser[clip_id]
            for _, frame_info in clip_info.items():
                ego_lane_pos = frame_info["ego_lane_pos"]
                road_id, lane_id, s = ego_lane_pos
                curvature = self._calc_curvature_data(road_id, s)
                (left_rm, right_rm) = self._get_road_mark_data(road_id, lane_id, s)
                num_pre_lanes, num_suc_lanes = self._get_linkage_data(road_id, lane_id, s)

                if left_rm is not None:
                    left_rm_color, left_rm_type, left_rm_double = left_rm
                else:
                    left_rm_color, left_rm_type, left_rm_double = None, None, None
                if right_rm is not None:
                    right_rm_color, right_rm_type, right_rm_double = right_rm
                else:
                    right_rm_color, right_rm_type, right_rm_double = None, None, None

                ego = {
                    "curvature": curvature,
                    "rm_color": [left_rm_color, right_rm_color],
                    "rm_type": [left_rm_type, right_rm_type],
                    "rm_double": [left_rm_double, right_rm_double],
                    "num_pre_lanes": num_pre_lanes,
                    "num_suc_lanes": num_suc_lanes,
                }
                frame_info["ego"] = ego
                del frame_info["ego_lane_pos"]

        # combine data
        self.data = self.frame_collector.clip_data
        for clip_id, clip_info in self.clip_collector.data.items():
            if clip_id in self.data:
                self.data[clip_id]["clip_meta_data"] = clip_info

        # form dataframes
        self._get_dataframes()
        return failed_clips, empty_clips, failed_images

    def _get_dataframes(self) -> None:
        """
        Converts data into DataFrames

        Raises:
            RuntimeError: when there is no data to convert
        """
        if len(self.data) == 0:
            self.logger.warning("No data collected")
            exit(1)
            return 
        clip_metadata_lines, ego_data_lines, frame_data_lines = self.format_data()
        self.clip_df = pd.read_csv(io.StringIO("".join(clip_metadata_lines)))
        self.ego_df = pd.read_csv(io.StringIO("".join(ego_data_lines)))
        self.frame_df = pd.read_csv(io.StringIO("".join(frame_data_lines)))

    def format_data(self) -> tuple[list[str], list[str], list[str]]:
        """
        Format data into three csv lines, including the header lines.

        Returns:
            Tuple[list[str], list[str], list[str]]: A Tuple containing the clip data, ego data, and frame data, respectively.
        """
        ego_data_lines = [EGO_DATA_HEADER]
        clip_metadata_lines = [CLIP_METADATA_HEADER]
        frame_data_lines = [self._format_frame_data_header(self.sensors)]

        for clip_id, clip_info in self.data.items():
            clip_line = f'"{clip_id}"'
            for frame_id, frame_info in clip_info.items():
                if frame_id == "clip_meta_data":
                    clip_metadata_lines.append(
                        f'{clip_line},{frame_info["time"]},{frame_info["weather"]},{frame_info["road_scene"]}\n'
                    )
                    continue
                frame_line = f'{clip_line},"{frame_id}"'
                for obj_id, obj_info in frame_info.items():
                    if obj_id == "ego":
                        ego_data_line = f'{frame_line},{obj_info["curvature"]},'
                        ego_data_line += f'"{obj_info["rm_color"][0]}","{obj_info["rm_color"][1]}",'
                        ego_data_line += f'"{obj_info["rm_type"][0]}","{obj_info["rm_type"][1]}",'
                        ego_data_line += f'"{obj_info["rm_double"][0]}","{obj_info["rm_double"][1]}",'
                        ego_data_line += f'{obj_info["num_pre_lanes"]},{obj_info["num_suc_lanes"]}\n'
                        ego_data_lines.append(ego_data_line)
                        continue

                    obj_velocity = obj_info["velocity"]
                    obj_size = obj_info["size"]
                    obj_position = obj_info["relative_carte_position"]
                    obj_polar_pos = obj_info["relative_polar_position"]
                    obj_visibility = obj_info["visibility"]

                    obj_line = f'{frame_line},"{obj_id}",'
                    obj_line += f'"{obj_info["obj_type"]}",{obj_info["label_type"]},{obj_velocity[0]},{obj_velocity[1]},{obj_velocity[2]},'
                    obj_line += f"{obj_info['speed']},{obj_size[0]},{obj_size[1]},{obj_size[2]},"
                    obj_line += f"{obj_position[0]},{obj_position[1]},{obj_position[2]},"
                    obj_line += f"{obj_polar_pos[0]},{obj_polar_pos[1]},{obj_info['relative_orientation']},"
                    obj_line += f"{len(self.sensors)}"

                    for sensor in self.sensors:
                        sensor_info = obj_visibility.get(sensor)
                        sensor_visibility = False if sensor_info is None else sensor_info["visible"]
                        sensor_position = None if sensor_info is None else sensor_info["bbox_2d_top_left"]
                        sensor_size = None if sensor_info is None else sensor_info["bbox_2d_size"]

                        obj_line += f",{sensor_visibility}"
                        obj_line += (
                            f",NaN,NaN" if sensor_position is None else f",{sensor_position[0]},{sensor_position[1]}"
                        )
                        obj_line += f",NaN,Nan" if sensor_size is None else f",{sensor_size[0]},{sensor_size[1]}"

                    frame_data_lines.append(obj_line + "\n")
        return clip_metadata_lines, ego_data_lines, frame_data_lines
    
    @staticmethod
    def _format_frame_data_header(sensors:list[str]) -> str:
        """
        format the frame data csv header.

        Args:
            sensors (list[str]): The list of sensor names

        Returns:
            str: The csv header for frame data.
        """
        header = FRAME_DATA_HEADER
        for sensor in sensors:
            header += f",{sensor}_visibility,{sensor}_obj_x,{sensor}_obj_y,{sensor}_obj_height,{sensor}_obj_width"
        return header + "\n"

    def load_from_json(self, file_path:str):
        """
        Reads data from a json file. It will fill the DataFrames.

        Args:
            file_path (str): Path to the json file
        """
        try:
            fp = open(file_path, "r")
        except FileNotFoundError as e:
            print(e)
        else:
            self.data = json.load(fp=fp)
            self._get_dataframes()
            fp.close()

    def dump_to_json(self, file_path: str):
        """
        writes data into a json file

        Args:
            file_path (str): The path to the json file.
        """
        try:
            fp = open(file_path, "w")
        except FileNotFoundError:
            fp = open(file_path, "x")
        except Exception as e:
            print(e)
            return

        json.dump(obj=self.data, fp=fp, indent=2)
        fp.close()

    def dump_csv_files(self, file_path:str, file_name:str):
        """
        Writes the csv files containing the annotation data.

        Args:
            file_path (str): The folder to write the csv file
            file_name (str): The prefix of the resulting csv files.
        """
        clip_metadata_lines, ego_data_lines, frame_data_lines = self.format_data()
        file_name = file_name.split(sep=".")[0]
        if not os.path.exists(file_path):
            os.makedirs(file_path)
        with open(os.path.join(file_path, file_name + "_clip_metadata.csv"), "w") as clip_csv_file:
            clip_csv_file.writelines(clip_metadata_lines)
        with open(os.path.join(file_path, file_name + "_ego_data.csv"), "w") as ego_csv_file:
            ego_csv_file.writelines(ego_data_lines)
        with open(os.path.join(file_path, file_name + "_frame_data.csv"), "w") as frame_csv_file:
            frame_csv_file.writelines(frame_data_lines)

    def dump_to_str(self):
        """
        Exports the json data to a string

        Returns:
            str: The string representation of the data
        """
        return json.dumps(self.data, indent=2)

    def _calc_curvature_data(self, road_id: int, s: float) -> float:
        return self.odr_parser.calc_curvature(road_id, s)

    def _get_road_mark_data(self, road_id: int, lane_id: int, s: float):
        if lane_id == 0:
            return (None, None)
        road_mark = self.odr_parser.get_road_mark(road_id, lane_id, s)
        if road_mark is None:
            return (None, None)
        left_rm, right_rm = road_mark

        if left_rm is not None:
            # type
            if left_rm.type.is_left_broken:
                left_rm_type = "broken"
            elif left_rm.type.is_left_solid:
                left_rm_type = "solid"
            else:
                left_rm_type = "other"

            # is_double
            if left_rm.type.is_single:
                left_rm = (left_rm.color.str, left_rm_type, "single")
            elif left_rm.type.is_double:
                left_rm = (left_rm.color.str, left_rm_type, "double")
            else:
                left_rm = (left_rm.color.str, left_rm_type, "other")

        if right_rm is not None:
            # type
            if right_rm.type.is_right_broken:
                right_rm_type = "broken"
            elif right_rm.type.is_right_solid:
                right_rm_type = "solid"
            else:
                right_rm_type = "other"

            # is_double
            if right_rm.type.is_single:
                right_rm = (right_rm.color.str, right_rm_type, "single")
            elif right_rm.type.is_double:
                right_rm = (right_rm.color.str, right_rm_type, "double")
            else:
                right_rm = (right_rm.color.str, right_rm_type, "other")

        return (left_rm, right_rm)

    def _get_linkage_data(self, road_id: int, lane_id: int, s: float):
        if lane_id == 0:
            return
        pre_codes, suc_codes = self.odr_parser.get_linkage(road_id, lane_id, s)
        return len(pre_codes), len(suc_codes)

    """=== Frame Data Visualization ==="""

    def draw_moving_ratio(self, path="./static_dynamic_ratio.png", sensor_name=None):
        title = "动静物占比"
        if sensor_name is None:
            data = (self.frame_df["speed"] < 1e-3).value_counts()
        else:
            level = f"{sensor_name}_visibility"
            data = (self.frame_df["speed"].loc[self.frame_df[level].fillna(False)] < 1e-3).value_counts()
            title += f" ({sensor_name} 可见)"
            
        if len(data) == 0:
            return self
        labels = data.index.map({True: "静态物体", False:"动态物体"})
        plt.figure()
        plt.pie(data, labels=labels, autopct="%1.1f%%")
        plt.title(title)
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_speed_distribution(self, path="./speed_distribution.png", bins=20):
        data = self.frame_df["speed"].dropna()
        if len(data) == 0:
            return self
        plt.figure()
        plt.hist(data, bins=bins)
        plt.title("物体速度分布")
        plt.xlabel("速度 (米/秒)")
        plt.ylabel("频数")
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_position_2d(self, path="./2d_position_distribution.png", bins=50):
        radius = self.frame_df["radius"].dropna()
        angle = self.frame_df["angle"].dropna()
        title = "2D相对位置极坐标分布"
            
        if len(radius) == 0 or len(angle) == 0:
                return self
            
        fig = plt.figure(figsize=(7,12), layout='constrained')
        gs  = GridSpec(3,1, figure=fig, wspace=0.2, hspace=0.03)
        # draw 2d heatmap
        heat_map = fig.add_subplot(gs[0, 0])
        img = heat_map.hist2d(radius * np.cos(angle), radius * np.sin(angle), cmap=colormaps['cool'], bins=bins, density=True)
        heat_map.set_xlabel("相对x坐标 (米)")
        heat_map.set_ylabel("相对y坐标 (米)")
        heat_map.set_title("2D 位置热度图")
        
        # draw radius distribution
        radius_ax = fig.add_subplot(gs[1, 0])
        radius_ax.hist(radius, bins=bins)
        radius_ax.set_xlabel("半径 (米)")
        radius_ax.set_ylabel("频数")
        radius_ax.set_title("相对位置极坐标半径分布")

        # draw angle distribution
        angle_ax = fig.add_subplot(gs[2, 0])
        angle_ax.hist(angle, bins=bins)
        angle_ax.set_xlabel("角度 (弧度)")
        angle_ax.set_ylabel("频数")
        angle_ax.set_title("相对位置极坐标角度分布")
        
        divider = make_axes_locatable(heat_map)
        cax = divider.append_axes('right', size='5%', pad=0.05) 
        fig.colorbar(img[3], cax=cax, orientation='vertical')
        fig.suptitle(title)
        plt.savefig(path, format="png")
        plt.close()
        return self
    
    def draw_position_3d(self, path="./3d_position_distribution.png", sensor_name=None):
        if sensor_name is None:
            return self
        visibility_level = f"{sensor_name}_visibility"
        columns = ['obj_type', 'label_type','relative_x', 'relative_y', 'relative_z']
        coordinates = ['relative_x', 'relative_y', 'relative_z']
        type_level  = 'obj_type'
        label_level = 'label_type'
        
        condition = self.frame_df[visibility_level].fillna(False)
        types = np.unique(self.frame_df[type_level])
        num_type = len(types)
        labels = np.unique(self.frame_df[label_level])
        num_label = len(labels)
        
        # data selection
        data = self.frame_df[columns].loc[condition].dropna(how='any')
        
        title = f"3D相对位置分布 ({sensor_name}可见)"

        if len(data) == 0:
            return self
        
        fig = plt.figure(figsize=(6*2, 5 * max(num_type, num_label)+1), layout='constrained')
        gs  = GridSpec(max(num_type, num_label), 2, left=0.1, right=0.9, bottom=0.1, top=0.9,
                    wspace=0.1, hspace=0.1, figure=fig)
        
        for i, type in enumerate(types):
            sub_plot_data = data.loc[data[type_level]==type]
            ax:plt.Axes   = fig.add_subplot(gs[i, 0], projection='3d')
            ax.scatter(*coordinates, marker='.', label=type, data=sub_plot_data)
            ax.set_xlabel("相对x坐标")
            ax.set_ylabel("相对y坐标")
            ax.set_zlabel("相对z坐标")
            ax.set_title(f"{type} 3D 位置分布")
            
        for i, label in enumerate(labels):
            sub_plot_data = data.loc[data[label_level]==label]
            ax:plt.Axes   = fig.add_subplot(gs[i, 1], projection='3d')
            ax.scatter(*coordinates, marker='.', label=type, data=sub_plot_data)
            ax.set_xlabel("相对x坐标")
            ax.set_ylabel("相对y坐标")
            ax.set_zlabel("相对z坐标")
            ax.set_title(f"{label} 3D 位置分布")
            
        fig.suptitle(title)
        plt.savefig(path, format="png")
        plt.close()
        return self
    
    def draw_object_type_count(self, path="./object_type_count.png", sensor_name=None):
        if sensor_name is None:
            return self
        title = f"物体类型数量统计（{sensor_name}可见）"
        
        sensor_condition = self.frame_df[f"{sensor_name}_visibility"].fillna(False)
        labels = np.unique(self.frame_df['label_type'].dropna())
        fig = plt.figure(figsize=(10, 4 * len(labels)), layout='constrained')
        gs  = GridSpec(len(labels), 1, figure=fig)
        
        for i, label in enumerate(labels):
            filter_condition = (self.frame_df['label_type'] == label) & sensor_condition
            labeled_data = self.frame_df['obj_type'].loc[filter_condition].value_counts()
            if len(labeled_data) == 0:
                continue
            ax           = fig.add_subplot(gs[i, :])
            ax.bar(labeled_data.index, labeled_data)
            for i in range(len(labeled_data.index)):
                ax.text(i, labeled_data[i]//2, labeled_data[i], ha='center')
            ax.set_ylabel("数量")
            ax.set_title(f"{label}数量分布")
        fig.supxlabel("物体类型")
        fig.suptitle(title)
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_orientation_distribution(self, path="./orientation_distribution.png", bins=20):
        data = self.frame_df["relative_orientation"] * (180.0 / np.pi)
        plt.figure()
        plt.hist(data, bins=bins)
        plt.xticks(np.arange(-180, 270, step=90.0), labels=["-180", "-90", "0", "90", "180"])
        plt.title("相对朝向的夹角分布")
        plt.xlabel("夹角角度 (度)")
        plt.ylabel("频数")
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_npc_size_distribution(self, path="object_size_distribution.png", bins=20, sensor_name=None):
        level_to_str = {
            "length" : "长度", 
            "width"  : "宽度",
            "height" : "高度",
        }
        levels = ["length", "width", "height"]
        title = "物体大小分布"
        if sensor_name is not None:
            title += f" (Visible from {sensor_name})"
        fig, axs = plt.subplots(3, 1, constrained_layout=True)
        for id, ax in enumerate(axs):
            if sensor_name is None:
                data = self.frame_df[levels[id]]
            else:
                level = f"{sensor_name}_visibility"
                data = self.frame_df[levels[id]].loc[self.frame_df[level].fillna(False)]
                
            if len(data) == 0:
                return self

            ax.hist(data, bins=bins)
            ax.set_xlabel(level_to_str[levels[id]] + "（米）")
            ax.set_ylabel("频数")
            ax.set_title(level_to_str[levels[id]] + "分布")
        fig.set_figheight(13)
        fig.set_figwidth(5)
        fig.align_xlabels(axs)
        fig.suptitle(title)
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_2d_bbox_position(self, path="2d_bbox_position.png", bins=50, sensor_name=None):
        if sensor_name is None:
            return self
        level = f"{sensor_name}_visibility"
        x_level, y_level = f"{sensor_name}_obj_x", f"{sensor_name}_obj_y"
        x_coord = self.frame_df[x_level].loc[self.frame_df[level].fillna(False)].astype("int")
        y_coord = self.frame_df[y_level].loc[self.frame_df[level].fillna(False)].astype("int")
        title = f"2D BBox中心位置热度图({sensor_name} 可见)"
        
        if len(x_coord) == 0 or len(y_coord) == 0:
            return self
        
        max_x = x_coord.max()
        max_y = y_coord.max()
        min_x = x_coord.min()
        min_y = y_coord.min()

        plt.figure()
        im = plt.hist2d((x_coord-min_x)/(max_x - min_x), 
                        (y_coord-min_y)/(max_y - min_y), 
                        cmap=colormaps['cool'], 
                        bins=bins, density=True)
        plt.title(title)
        plt.xlabel("中心x像素位置 (像素点)")
        plt.ylabel("中心y像素位置 (像素点)")
        plt.colorbar(im[3])
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_2d_bbox_size(self, path="2d_bbox_size.png", bins=50, sensor_name=None):
        if sensor_name is None:
            return self
        level = f"{sensor_name}_visibility"
        height_level, width_level = f"{sensor_name}_obj_height", f"{sensor_name}_obj_width"
        widths = self.frame_df[width_level].loc[self.frame_df[level].fillna(False)].astype("int")
        heights = self.frame_df[height_level].loc[self.frame_df[level].fillna(False)].astype("int")
        title = f"2D BBox 大小热度图 ({sensor_name} 可见)"
        
        if len(widths) == 0 or len(heights) == 0:
            return self
        
        max_w = widths.max()
        max_h = heights.max()
        min_w = widths.min()
        min_h = heights.min()
        
        plt.figure()
        im = plt.hist2d((widths - min_w)/(max_w - min_w), 
                        (heights - min_h)/(max_h - min_h), 
                        cmap=colormaps['cool'], 
                        bins=bins, density=True)
        plt.title(title)
        plt.xlabel("2D BBox宽度 (像素数)")
        plt.ylabel("2D BBox高度 (像素数)")
        plt.colorbar(im[3])
        plt.savefig(path, format="png")
        plt.close()
        return self

    """=== Ego Data Visualization ==="""

    def draw_curvature_distribution(self, path="curvature_distribution.png", bins=30):
        data = self.ego_df["curvature"]

        plt.figure()
        plt.hist(data, bins=bins, density=False)
        plt.xlabel("曲率 (1/米)")
        plt.ylabel("频数")
        plt.title("道路曲率分布")
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_road_mark_color_rat(self, path="road_mark_color_distribution.png", ax=None):
        data = pd.concat([self.ego_df["right_roadmark_color"], self.ego_df["left_roadmark_color"]]).value_counts()
        this_ax = ax
        if this_ax is None:
            fig, this_ax = plt.subplots()
        
        this_ax.pie(data, labels=data.index.map(COLOR_TRANSLATION), autopct="%1.1f%%")
        this_ax.set_title("车道线颜色占比")
        if ax is None:
            plt.savefig(path, format="png")
            plt.close()

    def draw_road_mark_type_rat(self, path="road_mark_type_ratio.png", ax=None):
        data = pd.concat([self.ego_df["right_roadmark_type"], self.ego_df["left_roadmark_type"]]).value_counts()
        this_ax = ax
        if this_ax is None:
            fig, this_ax = plt.subplots()
        
        labels = data.index.map({'broken': '虚线', 'solid': '实线', 'other': '其他'})
        this_ax.pie(data, labels=labels, autopct="%1.1f%%")
        this_ax.set_title("虚实线占比")
        if ax is None:
            plt.savefig(path, format="png")
            plt.close()

    def draw_double_rat(self, path="double_road_mark_ratio.png", ax=None):
        data = pd.concat([self.ego_df["right_roadmark_double"], self.ego_df["left_roadmark_double"]]).value_counts()
        this_ax = ax
        if this_ax is None:
            fig, this_ax = plt.subplots()
        
        labels = data.index.map({'single': '单线', 'double': '双线', 'other': '其他'})
        this_ax.pie(data, labels=labels, autopct="%1.1f%%")
        this_ax.set_title("单线双线占比")
        if ax is None:
            plt.savefig(path, format="png")
            plt.close()

    def draw_road_mark_charts(self, path="./road_mark_features.png"):
        fig, (ax1, ax2, ax3) = plt.subplots(3, 1)

        self.draw_road_mark_color_rat(ax=ax1)
        self.draw_road_mark_type_rat(ax=ax2)
        self.draw_double_rat(ax=ax3)

        fig.set_figheight(10)
        fig.set_figwidth(4)
        fig.suptitle("车道线类型统计")
        plt.savefig(path, format="png")
        plt.close()

    def draw_con_div_ratio(self, path="converge_diverge_ratio.png", ax=None):
        data_con = (self.ego_df["num_pre_lanes"] > 0).value_counts().reset_index()
        data_con.columns = ["type", "count"]
        data_con["type"] = data_con["type"].replace([True, False], ["convergence", "none"])
        data_div = (self.ego_df["num_suc_lanes"] > 0).value_counts().reset_index()
        data_div.columns = ["type", "count"]
        data_div["type"] = data_div["type"].replace([True, False], ["divergence", "none"])
        data = pd.merge(data_con, data_div, how="outer", on="type").fillna(0)
        data["count"] = (data["count_x"] + data["count_y"]).astype("int")

        this_ax = ax
        if this_ax is None:
            fig, this_ax = plt.subplots()
            
        labels = data['type'].map({'convergence': '汇入', 'divergence': '汇出', 'none':'无汇入汇出'})
        this_ax.pie(data["count"], labels=labels, autopct="%1.1f%%")
        this_ax.set_title("车道汇入汇出占比")

        if ax is None:
            plt.savefig(path, format="png")
            plt.close()

    def draw_con_di_distribution(self, type="converge", path="{}_distribution.png", bins=30, ax:plt.Axes=None):
        data = None
        if type.lower() == "converge":
            data = self.ego_df["num_pre_lanes"].loc[self.ego_df["num_pre_lanes"] > 0]
        elif type.lower() == "diverge":
            data = self.ego_df["num_suc_lanes"].loc[self.ego_df["num_suc_lanes"] > 0]

        if data is None:
            raise ValueError(f"Unknown type: '{type}'")

        if len(data) == 0:
            return
        
        figure_type = "汇入" if type == "converge" else "汇出"
        this_ax = ax
        if this_ax is None:
            fig, this_ax = plt.subplots()
        
        x_max = np.max(data.astype("int"))
        data = data.astype("int").value_counts()
        
        this_ax.bar(data.index, data, width=0.5)
        this_ax.set_xticks(np.arange(1, x_max + 1, 1))
        this_ax.set_xlabel(f"{figure_type}车道数")
        this_ax.set_ylabel("帧数")
        this_ax.set_title(f"{figure_type}车道数量分布")
        for index in data.index:
            this_ax.text(index, data[index]//2, data[index], ha='center')

        if ax is None:
            plt.savefig(path.format(type.lower()), format="png")
            plt.close()

    def draw_converge_diverge_charts(self, path="./road_converge_diverge.png"):
        fig = plt.figure()
        gs = GridSpec(2, 2, figure=fig, hspace=0.1)

        num_converge = (self.ego_df["num_pre_lanes"] > 0).sum()
        num_diverge = (self.ego_df["num_suc_lanes"] > 0).sum()

        if num_converge == 0 and num_diverge == 0:
            ax = fig.add_subplot(gs[:, :])
            self.draw_con_div_ratio(ax=ax)
        elif num_converge != 0 and num_diverge != 0:
            ax1 = fig.add_subplot(gs[0, :])
            self.draw_con_div_ratio(ax=ax1)
            ax2 = fig.add_subplot(gs[1, 0])
            self.draw_con_di_distribution(type="converge", ax=ax2)
            ax3 = fig.add_subplot(gs[1, 1])
            self.draw_con_di_distribution(type="diverge", ax=ax3)
        elif num_converge != 0:
            ax1 = fig.add_subplot(gs[0, :])
            self.draw_con_div_ratio(ax=ax1)
            ax2 = fig.add_subplot(gs[1, :])
            self.draw_con_di_distribution(type="converge", ax=ax2)
        else:
            ax1 = fig.add_subplot(gs[0, :])
            self.draw_con_div_ratio(ax=ax1)
            ax2 = fig.add_subplot(gs[1, :])
            self.draw_con_di_distribution(type="diverge", ax=ax2)

        fig.set_figheight(7)
        fig.set_figwidth(7)
        fig.suptitle("道路汇入、汇出分布统计")
        
        plt.savefig(path, format="png")
        plt.close()

    """=== Clip Data Visualization ==="""

    def draw_weather(self, path="./weather_distribution.png"):
        data = self.clip_df["weather"].dropna().value_counts()
        if len(data) == 0:
            return self

        plt.figure()
        labels = data.index.map(WEATHER_TRANSLATION)
        plt.pie(data, labels=labels, autopct="%1.1f%%")
        plt.title("天气占比")
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_time(self, path="./time_distribution.png"):
        data = self.clip_df["time"].dropna()
        if len(data) == 0:
            return self
        plt.figure()
        plt.hist(data, bins=np.arange(0000, 2400, 100))
        time_range = (00_00, 6_00, 12_00, 18_00, 24_00)
        time_labels = [f"{int(time/100):02d}:{time%100:02d}" for time in time_range]
        plt.xticks(time_range, time_labels)
        plt.xlabel("时间")
        plt.ylabel("场景数")
        plt.title("场景片段时间分布")
        plt.savefig(path, format="png")
        plt.close()
        return self

    def draw_scene(self, path="./road_scene_distribution.png"):
        data = self.clip_df["road_scene"].dropna().value_counts()
        if len(data) == 0:
            return self
        
        labels = data.index.map(ROAD_SCENE_TRANSLATION)
        plt.figure()
        plt.pie(data, labels=labels, autopct="%1.1f%%")
        plt.title("道路场景类型占比")
        plt.savefig(path, format="png")
        plt.close()
        return self

    """=== Other Visualizations ==="""
    
    def draw_pixel_depth_distribution(self, path):
        fig = plt.figure()
        ax  = fig.add_subplot(111)
        for i, type in enumerate(['静态目标', '动态目标']):
            ax.plot(np.arange(1, DEPT_BINS+1, 1), np.log10(self.pixel_depth[i].data.numpy()), label=type)
        box = ax.get_position()
        ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])
        ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))
        ax.set_xlabel("深度")
        ax.set_ylabel("像素数量（对数， 10的次方）")
        ax.set_title("像素点深度分布")
        fig.savefig(path, format="png")
        plt.close()
        return self
    
    """=== Draw all ==="""

    def draw_all(self, folder_path):
        # clip charts
        self.draw_weather(os.path.join(folder_path, "weather_distribution.png"))
        self.draw_time(os.path.join(folder_path, "time_distribution.png"))
        self.draw_scene(os.path.join(folder_path, "road_scene_distribution.png"))
        # ego charts
        self.draw_curvature_distribution(os.path.join(folder_path, "curvature_distribution.png"))
        self.draw_road_mark_charts(os.path.join(folder_path, "road_mark_features.png"))
        self.draw_converge_diverge_charts(os.path.join(folder_path, "road_converge_diverge.png"))
        # all npc charts
        self.draw_moving_ratio(os.path.join(folder_path, "static_dynamic_ratio.png"))
        self.draw_speed_distribution(os.path.join(folder_path, "speed_distribution.png"))
        self.draw_position_2d(os.path.join(folder_path, "2d_position_distribution.png"))
        self.draw_orientation_distribution(os.path.join(folder_path, "orientation_distribution.png"))
        self.draw_npc_size_distribution(os.path.join(folder_path, "object_size_distribution.png"))
        # visible npc charts
        for sensor in self.sensors:
            self.draw_object_type_count(os.path.join(folder_path, f"object_type_count_{sensor}.png"), sensor_name=sensor)
            self.draw_position_3d(os.path.join(folder_path, f"3d_position_distribution_{sensor}.png"), sensor_name=sensor)
            self.draw_npc_size_distribution(os.path.join(folder_path, f"object_size_distribution_{sensor}.png"), sensor_name=sensor)
            self.draw_2d_bbox_position(os.path.join(folder_path, f"2d_bbox_position_{sensor}.png"), sensor_name=sensor)
            self.draw_2d_bbox_size(os.path.join(folder_path, f"2d_bbox_size_{sensor}.png"), sensor_name=sensor)
        return self

    def draw_to_byte_io(self,):
        # initialize ByteIO objects
        general_content = { # clip figures
                            'time_figure'                   : io.BytesIO(),
                            'weather_figure'                : io.BytesIO(),
                            'road_scene_figure'             : io.BytesIO(),
                            # road lane figures
                            'curvature_figure'              : io.BytesIO(),
                            'road_mark_figure'              : io.BytesIO(),
                            'convergence_divergence_figure' : io.BytesIO(),
                            # road_object figures
                            'relative_position_figure'      : io.BytesIO(),
                            'relative_orientation_figure'   : io.BytesIO(),
                            'object_size_figure'            : io.BytesIO(),
                            'speed_figure'                  : io.BytesIO(),
                            'moving_nonmoving_ratio'        : io.BytesIO(),
                          }
        sensor_content = {}
        other_figures  = {
                            'pixel_depth_figure'            : io.BytesIO(),
                         }
        
        # clip charts
        self.draw_weather(general_content["weather_figure"])
        self.draw_time(general_content["time_figure"])
        self.draw_scene(general_content["road_scene_figure"])
        # ego charts
        self.draw_curvature_distribution(general_content["curvature_figure"])
        self.draw_road_mark_charts(general_content["road_mark_figure"])
        self.draw_converge_diverge_charts(general_content["convergence_divergence_figure"])
        # all npc charts
        self.draw_moving_ratio(general_content["moving_nonmoving_ratio"])
        self.draw_speed_distribution(general_content["speed_figure"])
        self.draw_position_2d(general_content["relative_position_figure"])
        self.draw_orientation_distribution(general_content["relative_orientation_figure"])
        self.draw_npc_size_distribution(general_content["object_size_figure"])
        # other figures
        self.draw_pixel_depth_distribution(other_figures["pixel_depth_figure"])
        # visible npc charts
        for sensor in self.sensors:
            # check visibility
            if self.frame_df[f'{sensor}_visibility'].fillna(False).eq(False).all():
                continue
            sensor_figures = {
                                'object_type_figure'         : io.BytesIO(),
                                'three_d_position_figure'    : io.BytesIO(),
                                'two_d_bbox_position_figure' : io.BytesIO(),
                                'two_d_bbox_size_figure'     : io.BytesIO(),
                                'object_size_figure'         : io.BytesIO(),
                            }
            self.draw_object_type_count(sensor_figures["object_type_figure"], sensor_name=sensor)
            self.draw_position_3d(sensor_figures["three_d_position_figure"], sensor_name=sensor)
            self.draw_npc_size_distribution(sensor_figures["object_size_figure"], sensor_name=sensor)
            self.draw_2d_bbox_position(sensor_figures["two_d_bbox_position_figure"], sensor_name=sensor)
            self.draw_2d_bbox_size(sensor_figures["two_d_bbox_size_figure"], sensor_name=sensor)
            sensor_content[sensor] = sensor_figures
        return general_content, other_figures, sensor_content

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="SimUtil")
    parser.add_argument("-d", "--debug", action='store_true', dest='debug', help="Enters debug mode. The program will collect the first 10 clips and 100 frames.")
    parser.add_argument("--data_path", type=str, required=True, dest='data_path', help="""The path to the root directory of a collection of data or an exported json file""")
    parser.add_argument("-o", "--output_path", type=str, required=False, dest='output_path', default=None, help="The path to the directory to store the report. The report is named \"synthetic_data_distribution_report.html\"")
    parser.add_argument("--export_data", action='store_true', dest='export_data', help="Exports the data files.")
    parser.add_argument("-f", "--figures", action='store_true', dest='figures', help="Draws visual figures of the data")
    parser.add_argument("--no_report", dest='no_report', action='store_true')
    args        = parser.parse_args()
    data_path   = args.data_path
    output_path = wp if args.output_path is None else args.output_path
    perf_path   = os.path.join(output_path, "profile.txt")
    debug_mode  = args.debug
    optimize    = False
    logging.basicConfig(level=logging.INFO,
                        format="[%(levelname)s] %(name)s - %(message)s")
    main_logger = logging.getLogger(name="main_logger")
    collector   = DataCollector(data_path, main_logger.getChild("DataCollector_logger"))
    
    main_logger.info("Loading Data ...")
    if os.path.isfile(data_path):
        if data_path.split(".")[-1] != "json":
            raise RuntimeWarning("The input file may not be a json file: {}".format(data_path))
        try:
            failed_clips  = []
            empty_clips   = []
            failed_images = []
            collector.load_from_json(data_path)
        except Exception as e:
            main_logger.error(e)
            main_logger.error("Error when loading the json file: {}".format(data_path))
            exit(1)
    if os.path.isdir(data_path):
        failed_clips, empty_clips, failed_images = collector.collect(debug=debug_mode)
        
        if optimize:
            result = collector.collect_clip_data(debug=debug_mode)
            failed_clips, map_to_paths, clip_to_parser = result
            
            # distribute work among different 
        
        error_file_path = os.path.join(output_path, "failed_files.txt")
        with open(error_file_path, "w") as error_f:
            for clip in failed_clips:
                error_f.write(f"No meta.json file: {clip}\n")
            for clip in empty_clips:
                error_f.write(f"No annotation file: {clip}\n")
            for image_file in failed_images:
                error_f.write(f"Image file not found : {image_file}\n")
    main_logger.info("Finished loading.")
    
    if args.export_data:
        main_logger.info("Exporting json and csv files...")
        data_file_path = os.path.join(output_path, "./output_data_files")
        if not os.path.exists(data_file_path):
            os.mkdir(data_file_path)
        json_file_path = os.path.join(data_file_path, "sample_data.json")
        collector.dump_to_json(json_file_path)
        collector.dump_csv_files(data_file_path, "sample_data")
    
    figure_list = []
    if args.figures:
        figure_file_path = os.path.join(wp, './output_figures')
        main_logger.info("Drawing graphs...")
        if not os.path.exists(figure_file_path):
            os.mkdir(figure_file_path)
        collector.draw_all(figure_file_path)
        
    if not args.no_report:
        if output_path is None:
            report_path = os.path.join(wp, "synthetic_data_distribution_report.html")
        else:
            os.makedirs(output_path, exist_ok=True)
            report_path = os.path.join(output_path, "synthetic_data_distribution_report.html")
        main_logger.info(f"Writing html report to {report_path}")
        clips_collected  = len(collector.clip_df)
        clips_missing    = len(failed_clips)
        clips_empty      = len(empty_clips)
        frames_collected = len(collector.frame_df['frame_id'].unique())
        total_clips      = clips_collected + clips_empty + clips_missing
        
        data_quality = {'total_clips'   : total_clips,
                        'num_collected' : clips_collected, 
                        'num_missing'   : clips_missing, 
                        'num_empty'     : clips_empty,
                        'num_frames'    : frames_collected,
                        'avg_frame'     : frames_collected / clips_collected,
                        }
        
        report_writor   = ReportWritor()
        report_writor.write_report(data_quality, collector, os.path.join(wp, report_path))
    main_logger.info("Done")
    if (len(empty_clips) > 0 or
        len(failed_clips) > 0 or 
        len(failed_images) > 0):
        exit(10)
    exit(0)
