#!/usr/bin/env python3
# -*- coding:utf-8 -*-
# @Author: renjin@bit.edu.cn
# @Date  : 2024-08-06

"""
【节点名称】：
    NuScenesDatasetLoaderNode
【依赖项安装】：
    pip install nuscenes-devkit
【订阅类型】：
    std_msgs::Boolean （是否输出下一张图像）
    spirecv_msgs::EvaluationResult （算法评价结果）
【发布类型】：
    sensor_msgs::CompressedImage （输出图像）
    std_msgs::Boolean （评价任务结束时，输出关闭所有节点指令）
"""

import threading
import time
import os
import platform
import json
from collections import defaultdict
import cv2
import argparse
from typing import Union
from queue import Queue
from spirems import tensor2sms, sms2tensor, pcl2sms
from spirems import Subscriber, Publisher, cvimg2sms, def_msg, QoS, BaseNode, get_extra_args, Rate
import uuid
import numpy as np
import copy
from scipy.spatial.transform import Rotation
from nuscenes.nuscenes import NuScenes
from nuscenes.utils.data_classes import LidarPointCloud, RadarPointCloud, Box
from nuscenes.utils.data_io import load_bin_file, panoptic_to_lidarseg


color_dict = {
    0: (23, 184, 155),
    1: (129, 61, 209),
    2: (217, 79, 72),
    3: (76, 175, 80),
    4: (33, 150, 243),
    5: (255, 152, 0),
    6: (156, 39, 176),
    7: (102, 187, 106),
    8: (63, 81, 181),
    9: (255, 87, 34),
    10: (121, 85, 72),
    11: (46, 125, 50),
    12: (21, 101, 192),
    13: (245, 124, 0),
    14: (118, 42, 131),
    15: (3, 169, 244),
    16: (255, 193, 7),
    17: (156, 39, 176),
    18: (0, 188, 212),
    19: (255, 235, 59),
    20: (103, 58, 183),
    21: (0, 150, 136),
    22: (244, 67, 54),
    23: (76, 175, 80),
    24: (33, 33, 33),
    25: (255, 152, 0),
    26: (156, 39, 176),
    27: (66, 165, 245),
    28: (255, 87, 34),
    29: (121, 85, 72),
    30: (46, 125, 50),
    31: (21, 101, 192),
    32: (245, 124, 0),
    33: (118, 42, 131),
    34: (3, 169, 244),
    35: (255, 193, 7),
    36: (156, 39, 176),
    37: (0, 188, 212),
    38: (255, 235, 59),
    39: (103, 58, 183),
    40: (0, 150, 136)
}


def inverse_transform(body_to_world_pos, body_to_world_quat):
    """
    计算世界坐标系到车体坐标系的逆变换
    
    参数:
    body_to_world_pos (list或np.array): 车体到世界的位置变换 [x, y, z]
    body_to_world_quat (list或np.array): 车体到世界的姿态四元数 [x, y, z, w]
    
    返回:
    world_to_body_pos (np.array): 世界到车体的位置变换 [x, y, z]
    world_to_body_quat (np.array): 世界到车体的姿态四元数 [x, y, z, w]
    """
    # 将四元数转换为旋转对象
    rotation_BW = Rotation.from_quat(body_to_world_quat)
    
    # 计算逆旋转（四元数的共轭）
    rotation_WB = rotation_BW.inv()
    world_to_body_quat = rotation_WB.as_quat()  # [x, y, z, w]
    
    # 计算逆位置变换: p_WB = -R_WB * p_BW
    rotation_matrix_WB = rotation_WB.as_matrix()
    world_to_body_pos = -rotation_matrix_WB @ np.array(body_to_world_pos)
    
    return world_to_body_pos, world_to_body_quat


class NuScenesDatasetLoaderNode(threading.Thread, BaseNode):
    def __init__(
        self,
        job_name: str,
        ip: str = '127.0.0.1',
        port: int = 9094,
        param_dict_or_file: Union[dict, str] = None,
        sms_shutdown: bool = True,
        **kwargs
    ):
        threading.Thread.__init__(self)
        sms_shutdown = True if sms_shutdown in ['True', 'true', '1', True] else False
        BaseNode.__init__(
            self,
            self.__class__.__name__,
            job_name,
            ip=ip,
            port=port,
            param_dict_or_file=param_dict_or_file,
            sms_shutdown=sms_shutdown,
            **kwargs
        )
        self.remote_ip = self.get_param("remote_ip", "127.0.0.1")
        self.remote_port = self.get_param("remote_port", 9094)
        self.sms_shutdown_emit = self.get_param("sms_shutdown_emit", True)
        self.data_root = self.get_param("data_root", r"/media/jario/T9/nuScenes/v1.0-mini")
        self.version = self.get_param("version", "v1.0-mini")
        self.lidar_fields = self.get_param("lidar_fields", ["x", "y", "z", "intensity"])
        self.radar_fields = self.get_param("radar_fields", [
            "x", "y", "z", "dyn_prop", "id", "rcs", "vx", "vy", "vx_comp", "vy_comp", "is_quality_valid",
            "ambig_state", "x_rms", "y_rms", "invalid_state", "pdh0", "vx_rms", "vy_rms"
        ])
        self.auto_next = self.get_param("auto_next", True)
        self.rate = self.get_param("rate", 2)
        self.repeated = self.get_param("repeated", 1)
        self.modalities = self.get_param("modalities", ["camera", "lidar", "radar"])
        self.nsweeps = self.get_param("nsweeps", 1)
        self.zero_intensity = self.get_param("zero_intensity", False)
        self.publish_split_topic = self.get_param("publish_split_topic", True)
        self.dataset_name = self.get_param("dataset_name", "nuscenes")
        self.dataset_categories = self.get_param("/dataset_categories", {})
        self.params_help()

        self.cates_list = self.dataset_categories[self.dataset_name]
        self.client_id = str(uuid.uuid4()).replace('-', '_')
        self.token_i_queue = Queue()
        self.queue_pool.append(self.token_i_queue)
        self.token_i = 0
        self.done = False

        self.job_name = job_name
        self.ip = ip
        self.port = port
        self._next_reader = Subscriber(
            '/' + job_name + '/launch_next', 'std_msgs::Boolean', self.launch_next,
            ip=ip, port=port, qos=QoS.Reliability
        )
        self._sample_writer = Publisher(
            '/' + job_name + '/sensor/sample', 'spirecv_msgs::NuSample',
            ip=ip, port=port, qos=QoS.Reliability
        )
        if self.publish_split_topic:
            self._split_pubs = {}
            self._split_calib_pubs = {}
            self.sms_trans_front_pub = Publisher('/' + job_name + "/frame_trans", "geometry_msgs::FrameTransforms", ip=ip, port=port)
            self.sms_base_pub = Publisher('/' + job_name + "/base_link", "geometry_msgs::PoseInFrame", ip=ip, port=port)
            self._scene_update_writer = Publisher(
                '/' + job_name + "/scene_update", 'geometry_msgs::SceneUpdate',
                ip=ip, port=port, qos=QoS.Reliability
            )

        self._load_dataset()
        self.start()

    def release(self):
        BaseNode.release(self)
        self._next_reader.kill()
        self._sample_writer.kill()

    def launch_next(self, msg: dict = None):
        if (isinstance(msg, dict) and msg['data']) or msg is None:
            if self.token_i < len(self.sample_tokens):
                self.token_i_queue.put(self.token_i)
                self.token_i += 1
                # print(self.token_i)
                if self.repeated and self.token_i == len(self.sample_tokens):
                    self.token_i = 0
            else:
                self.done = True

    def run(self):
        while self.is_running():
            token_i = self.token_i_queue.get(block=True)
            if token_i is None:
                break

            t1 = time.time()
            found_local_file = False
            sample_token = self.sample_tokens[token_i]
            msg = def_msg("spirecv_msgs::NuSample")
            msg["category"] = self.category
            msg["attribute"] = self.attribute
            msg["visibility"] = self.visibility
            msg["sensor"] = self.sensor
            # msg["log"] = self.log
            ego_pose_set = set()
            calibrated_sensor_set = set()
            instance_set = set()
            lidarseg_set = set()
            sample = self.nusc.get('sample', sample_token)
            scene = self.nusc.get('scene', sample['scene_token'])
            # print("scene:", scene)
            msg["scene"] = scene
            msg["sample"] = sample
            # print("sample:", sample)
            # 记录sample的时间戳
            sample_t0 = sample['timestamp']
            cams_t0 = []
            lidars_t0 = []
            radars_t0 = []
            if self.publish_split_topic:
                trans_msg = def_msg("geometry_msgs::FrameTransforms")
                trans_msg["frame_id"] = "global_link"
                trans_msg["transforms"] = []
            msg["sample_data"] = []
            # sensors = ['CAM_FRONT', 'CAM_FRONT_LEFT', 'CAM_FRONT_RIGHT', 'CAM_BACK_LEFT', 'CAM_BACK_RIGHT', 'CAM_BACK']
            if "camera" in self.modalities:
                for sensor in self.sensor_cams:
                    if sensor not in sample['data']:
                        continue

                    cam_data = self.nusc.get('sample_data', sample['data'][sensor])
                    calibrated_sensor_set.add(cam_data["calibrated_sensor_token"])
                    ego_pose_set.add(cam_data["ego_pose_token"])
                    cams_t0.append(cam_data['timestamp'])

                    img_fn = str(os.path.join(self.data_root, cam_data['filename']))
                    if os.path.isfile(img_fn):
                        found_local_file = True
                    else:
                        continue

                    img = cv2.imread(str(os.path.join(self.data_root, cam_data['filename'])))
                    assert img.shape[0] == cam_data['height'] and img.shape[1] == cam_data['width']
                    cam_data["data"] = tensor2sms(img, self.__class__.__name__ + "_" + sensor)
                    msg["sample_data"].append(cam_data)

                    if self.publish_split_topic:
                        img_msg = self._split_pubs[sensor].cvimg2sms_mem(
                            img, 
                            frame_id=sensor.lower(),               # frame_id 可自定义设置
                            timestamp=cam_data["timestamp"] / 1e6  # timestamp 可自定义设置
                        )
                        self._split_pubs[sensor].publish(img_msg)

                        calibrated_sensor = self.nusc.get('calibrated_sensor', cam_data["calibrated_sensor_token"])
                        calib_msg = def_msg('sensor_msgs::CameraCalibration')
                        calib_msg['frame_id'] = sensor.lower()
                        calib_msg['width'] = img.shape[1]
                        calib_msg['height'] = img.shape[0]
                        K = calibrated_sensor["camera_intrinsic"]
                        K = [*K[0], *K[1], *K[2]]
                        calib_msg['K'] = K
                        calib_msg['D'] = [0, 0, 0, 0, 0]
                        calib_msg['P'] = [K[0], K[1], K[2], 0, K[3], K[4], K[5], 0, K[6], K[7], K[8], 0]
                        self._split_calib_pubs[sensor].publish(calib_msg)
                        trans_msg["transforms"].append(
                            {
                                "timestamp": cam_data["timestamp"] / 1e6,
                                "parent_frame_id": "base_link",
                                "child_frame_id": sensor.lower(),
                                "translation": {
                                    "x": calibrated_sensor["translation"][0],
                                    "y": calibrated_sensor["translation"][1],
                                    "z": calibrated_sensor["translation"][2]
                                },
                                "rotation": {
                                    "x": calibrated_sensor["rotation"][1],
                                    "y": calibrated_sensor["rotation"][2],
                                    "z": calibrated_sensor["rotation"][3],
                                    "w": calibrated_sensor["rotation"][0]
                                }
                            }
                        )

            # sensors = ['LIDAR_TOP']
            if "lidar" in self.modalities:
                for sensor in self.sensor_lidars:
                    if sensor not in sample['data']:
                        # print(sample['data'])
                        continue

                    lidar_data = self.nusc.get('sample_data', sample['data'][sensor])
                    calibrated_sensor_set.add(lidar_data["calibrated_sensor_token"])
                    ego_pose_set.add(lidar_data["ego_pose_token"])
                    ego_pose = self.nusc.get('ego_pose', lidar_data["ego_pose_token"])
                    lidars_t0.append(lidar_data['timestamp'])

                    lidar_data_token = lidar_data['token']
                    lidarseg_set.add(lidar_data_token)

                    pcd_fn = str(os.path.join(self.data_root, lidar_data['filename']))
                    # if 'MULTI-SLOW-MIT__LIDAR_FRONT__164116360033966592' in pcd_fn:
                    #     print(pcd_fn)
                    if os.path.isfile(pcd_fn):
                        found_local_file = True
                    else:
                        continue

                    if self.nsweeps > 1:
                        pc, times = LidarPointCloud.from_file_multisweep(self.nusc, sample, lidar_data['channel'], sensor,
                                                                         nsweeps=self.nsweeps)
                    else:
                        pc = LidarPointCloud.from_file(str(os.path.join(self.data_root, lidar_data['filename'])))
                    assert len(self.lidar_fields) == pc.points.T.shape[1]
                    lidar_data["fields"] = self.lidar_fields
                    lidar_data["data"] = tensor2sms(pc.points.T, self.__class__.__name__ + "_" + sensor)
                    msg["sample_data"].append(lidar_data)

                    if self.publish_split_topic:
                        if self.zero_intensity:
                            pc.points.T[:, self.lidar_fields.index("intensity")] = 0
                        pcl_msg = self._split_pubs[sensor].pcl2sms_mem(
                            pc.points.T, 
                            fields=self.lidar_fields,                # fields为pcl每一列数据的具体描述
                            frame_id=sensor.lower(),                 # frame_id 可自定义设置
                            timestamp=lidar_data["timestamp"] / 1e6  # timestamp 可自定义设置
                        )
                        # 发布共享内存消息
                        self._split_pubs[sensor].publish(pcl_msg)
                        calibrated_sensor = self.nusc.get('calibrated_sensor', lidar_data["calibrated_sensor_token"])
                        trans_msg["transforms"].append(
                            {
                                "timestamp": lidar_data["timestamp"] / 1e6,
                                "parent_frame_id": "base_link",
                                "child_frame_id": sensor.lower(),
                                "translation": {
                                    "x": calibrated_sensor["translation"][0],
                                    "y": calibrated_sensor["translation"][1],
                                    "z": calibrated_sensor["translation"][2]
                                },
                                "rotation": {
                                    "x": calibrated_sensor["rotation"][1],
                                    "y": calibrated_sensor["rotation"][2],
                                    "z": calibrated_sensor["rotation"][3],
                                    "w": calibrated_sensor["rotation"][0]
                                }
                            }
                        )
                        pos_inv, quat_inv = inverse_transform(
                            ego_pose["translation"],
                            [ego_pose["rotation"][1], ego_pose["rotation"][2], ego_pose["rotation"][3], ego_pose["rotation"][0]]
                        )
                        trans_msg["transforms"].append(
                            {
                                "timestamp": lidar_data["timestamp"] / 1e6,
                                "parent_frame_id": "base_link",
                                "child_frame_id": "global_link",
                                "translation": {
                                    "x": pos_inv[0],
                                    "y": pos_inv[1],
                                    "z": pos_inv[2]
                                },
                                "rotation": {
                                    "x": quat_inv[0],
                                    "y": quat_inv[1],
                                    "z": quat_inv[2],
                                    "w": quat_inv[3]
                                }
                            }
                        )

            # sensors = ['RADAR_FRONT', 'RADAR_FRONT_LEFT', 'RADAR_FRONT_RIGHT', 'RADAR_BACK_LEFT', 'RADAR_BACK_RIGHT']
            if "radar" in self.modalities:
                for sensor in self.sensor_radars:
                    if sensor not in sample['data']:
                        continue

                    radar_data = self.nusc.get('sample_data', sample['data'][sensor])
                    calibrated_sensor_set.add(radar_data["calibrated_sensor_token"])
                    ego_pose_set.add(radar_data["ego_pose_token"])
                    radars_t0.append(radar_data['timestamp'])

                    pcd_fn = str(os.path.join(self.data_root, radar_data['filename']))
                    if os.path.isfile(pcd_fn):
                        found_local_file = True
                    else:
                        continue

                    pc = RadarPointCloud.from_file(str(os.path.join(self.data_root, radar_data['filename'])))
                    assert len(self.radar_fields) == pc.points.T.shape[1]
                    radar_data["fields"] = self.radar_fields
                    radar_data["data"] = tensor2sms(pc.points.T, self.__class__.__name__ + "_" + sensor)
                    msg["sample_data"].append(radar_data)

                    if self.publish_split_topic:
                        pcl_msg = self._split_pubs[sensor].pcl2sms_mem(
                            pc.points.T.astype(np.float32), 
                            fields=self.radar_fields,                # fields为pcl每一列数据的具体描述
                            frame_id=sensor.lower(),                 # frame_id 可自定义设置
                            timestamp=radar_data["timestamp"] / 1e6  # timestamp 可自定义设置
                        )
                        # 发布共享内存消息
                        self._split_pubs[sensor].publish(pcl_msg)
                        calibrated_sensor = self.nusc.get('calibrated_sensor', radar_data["calibrated_sensor_token"])
                        trans_msg["transforms"].append(
                            {
                                "timestamp": radar_data["timestamp"] / 1e6,
                                "parent_frame_id": "base_link",
                                "child_frame_id": sensor.lower(),
                                "translation": {
                                    "x": calibrated_sensor["translation"][0],
                                    "y": calibrated_sensor["translation"][1],
                                    "z": calibrated_sensor["translation"][2]
                                },
                                "rotation": {
                                    "x": calibrated_sensor["rotation"][1],
                                    "y": calibrated_sensor["rotation"][2],
                                    "z": calibrated_sensor["rotation"][3],
                                    "w": calibrated_sensor["rotation"][0]
                                }
                            }
                        )

            if not found_local_file:
                self.launch_next()
                continue

            if self.publish_split_topic:
                self.sms_trans_front_pub.publish(trans_msg)
                base_msg = def_msg("geometry_msgs::PoseInFrame")
                base_msg["timestamp"] = sample["timestamp"] / 1e6
                base_msg["frame_id"] = "base_link"
                self.sms_base_pub.publish(base_msg)
                # 准备发布目标可视化
                entity = def_msg("geometry_msgs::SceneEntity")
                entity["frame_id"] = "global_link"
                entity["frame_locked"] = True
                entity["lifetime"]["sec"] = 1
                entity["cubes"] = []

            msg["sample_annotation"] = []
            for ann_t in sample['anns']:
                anno_data =  self.nusc.get('sample_annotation', ann_t)
                # print(anno_data)
                instance_set.add(anno_data['instance_token'])
                msg["sample_annotation"].append(anno_data)
                if self.publish_split_topic:
                    cube = def_msg("geometry_msgs::CubePrimitive")
                    cube["pose"]["position"]["x"] = anno_data['translation'][0]
                    cube["pose"]["position"]["y"] = anno_data['translation'][1]
                    cube["pose"]["position"]["z"] = anno_data['translation'][2]
                    cube["pose"]["orientation"]["x"] = anno_data['rotation'][1]
                    cube["pose"]["orientation"]["y"] = anno_data['rotation'][2]
                    cube["pose"]["orientation"]["z"] = anno_data['rotation'][3]
                    cube["pose"]["orientation"]["w"] = anno_data['rotation'][0]
                    cube["size"]["x"] = anno_data['size'][1]
                    cube["size"]["y"] = anno_data['size'][0]
                    cube["size"]["z"] = anno_data['size'][2]
                    if anno_data['category_name'] in self.cates_list:
                        color = color_dict[self.cates_list.index(anno_data['category_name'])]
                    else:
                        print("[WARN]: Unrecognized object category: '{}'".format(anno_data['category_name']))
                        continue
                    cube["color"]["r"] = color[0] / 255
                    cube["color"]["g"] = color[1] / 255
                    cube["color"]["b"] = color[2] / 255
                    cube["color"]["a"] = 0.2
                    entity["cubes"].append(copy.deepcopy(cube))
                    """
                    text = def_msg("geometry_msgs::TextPrimitive")
                    text["pose"]["position"]["x"] = anno_data['translation'][0]
                    text["pose"]["position"]["y"] = anno_data['translation'][1]
                    text["pose"]["position"]["z"] = anno_data['translation'][2]
                    text["pose"]["orientation"]["x"] = anno_data['rotation'][1]
                    text["pose"]["orientation"]["y"] = anno_data['rotation'][2]
                    text["pose"]["orientation"]["z"] = anno_data['rotation'][3]
                    text["pose"]["orientation"]["w"] = anno_data['rotation'][0]
                    text["text"] = anno_data['category_name']
                    text["font_size"] = 10
                    text["billboard"] = True
                    text["scale_invariant"] = True
                    text["color"]["r"] = 0
                    text["color"]["g"] = 0
                    text["color"]["b"] = 0
                    text["color"]["a"] = 0.8
                    entity["texts"].append(copy.deepcopy(text))
                    """

            if self.publish_split_topic:
                vis_msg = def_msg("geometry_msgs::SceneUpdate")
                vis_msg["entities"] = [entity]
                vis_msg["timestamp"] = sample["timestamp"] / 1e6
                self._scene_update_writer.publish(vis_msg)

            msg["ego_pose"] = []
            msg["instance"] = []
            msg["lidarseg"] = []
            msg["panoptic"] = []
            msg["calibrated_sensor"] = []
            for t in calibrated_sensor_set:
                calibrated_sensor = self.nusc.get('calibrated_sensor', t)
                msg["calibrated_sensor"].append(calibrated_sensor)
            for t in ego_pose_set:
                ego_pose = self.nusc.get('ego_pose', t)
                msg["ego_pose"].append(ego_pose)
            for t in instance_set:
                instance = self.nusc.get('instance', t)
                msg["instance"].append(instance)
            if hasattr(self.nusc, "lidarseg"):
                for t in lidarseg_set:
                    lidarseg = self.nusc.get('lidarseg', t)
                    lidarseg_label = load_bin_file(str(os.path.join(self.data_root, lidarseg['filename'])), type='lidarseg')
                    lidarseg["label"] = tensor2sms(lidarseg_label, self.__class__.__name__ + "_lidarseg")
                    msg["lidarseg"].append(lidarseg)
                    panoptic = self.nusc.get('panoptic', t)
                    panoptic_label = load_bin_file(str(os.path.join(self.data_root, panoptic['filename'])), type='panoptic')
                    panoptic["label"] = tensor2sms(panoptic_label, self.__class__.__name__ + "_lidarseg")
                    msg["panoptic"].append(panoptic)

            msg['sample_total'] = len(self.sample_tokens)
            msg['sample_id'] = token_i + 1
            msg['client_id'] = self.client_id
            msg['t1'] = time.time()

            print("------------------------------------------")
            # print("category:", len(msg["category"]))
            # print("attribute:", len(msg["attribute"]))
            # print("visibility:", len(msg["visibility"]))
            # print("ego_pose:", len(msg["ego_pose"]))
            # print("calibrated_sensor:", len(msg["calibrated_sensor"]))
            # print("sample_data:", len(msg["sample_data"]))
            # print("sample_annotation:", len(msg["sample_annotation"]))
            # print("instance:", len(msg["instance"]))
            # print("sample_id:", msg["sample_id"])
            print("Sample Timestamp: {}".format(sample_t0))
            print("Camera Timestamps: {}".format(cams_t0))
            print("Lidar Timestamps: {}".format(lidars_t0))
            print("Radar Timestamps: {}".format(radars_t0))
            if len(cams_t0) > 0:
                cams_t0 = np.array(cams_t0)
                print("Camera MaxDiff: {} ms".format(np.abs(cams_t0 - sample_t0).max() / 1000.))
            if len(lidars_t0) > 0:
                lidars_t0 = np.array(lidars_t0)
                print("Lidar MaxDiff: {} ms".format(np.abs(lidars_t0 - sample_t0).max() / 1000.))
            if len(radars_t0) > 0:
                radars_t0 = np.array(radars_t0)
                print("Radar MaxDiff: {} ms".format(np.abs(radars_t0 - sample_t0).max() / 1000.))
            print("------------------------------------------")
            print("Unpacking Time: {}".format(time.time() - t1))
            self._sample_writer.publish(msg)

        self.release()
        print('{} quit!'.format(self.__class__.__name__))

    def _load_dataset(self):
        nusc = NuScenes(version=self.version, dataroot=self.data_root, verbose=True)  # v1.0-mini
        self.nusc = nusc
        # nusc.list_scenes()
        self.category = nusc.category
        self.attribute = nusc.attribute
        self.visibility = nusc.visibility  #
        self.sensor = nusc.sensor  #
        self.calibrated_sensor = nusc.calibrated_sensor
        self.ego_pose = nusc.ego_pose #
        self.instance = nusc.instance
        self.log = nusc.log
        # lidarseg
        if hasattr(nusc, "lidarseg"):
            self.lidarseg = nusc.lidarseg
        if hasattr(nusc, "panoptic"):
            self.panoptic = nusc.panoptic
        self.sensor_cams = []
        self.sensor_lidars = []
        self.sensor_radars = []
        for s in self.sensor:
            if s['modality'] == 'camera':
                self.sensor_cams.append(s['channel'])
                if self.publish_split_topic:
                    pub = Publisher(
                        '/' + self.job_name + '/' + s['channel'].lower(), 'memory_msgs::RawImage',
                        ip=self.ip, port=self.port
                    )
                    self._split_pubs[s['channel']] = pub
                    calib_pub = Publisher(
                        '/' + self.job_name + '/' + s['channel'].lower() + "/calibration_info", 
                        'sensor_msgs::CameraCalibration',
                        ip=self.ip, port=self.port
                    )
                    self._split_calib_pubs[s['channel']] = calib_pub
            elif s['modality'] == 'lidar':
                self.sensor_lidars.append(s['channel'])
                if self.publish_split_topic:
                    pub = Publisher(
                        '/' + self.job_name + '/' + s['channel'].lower(), 'memory_msgs::PointCloud',
                        ip=self.ip, port=self.port
                    )
                    self._split_pubs[s['channel']] = pub
            elif s['modality'] == 'radar':
                self.sensor_radars.append(s['channel'])
                if self.publish_split_topic:
                    pub = Publisher(
                        '/' + self.job_name + '/' + s['channel'].lower(), 'memory_msgs::PointCloud',
                        ip=self.ip, port=self.port
                    )
                    self._split_pubs[s['channel']] = pub
        print(self.sensor_cams)
        print(self.sensor_lidars)
        print(self.sensor_radars)
        # print(json.dumps(self.ego_pose, indent=4))
        self.sample_tokens = []
        for j in range(len(nusc.scene)):
            scene = nusc.scene[j]
            sample_token = scene['first_sample_token']
            while len(sample_token) > 0:
                self.sample_tokens.append(sample_token)
                sample = nusc.get('sample', sample_token)
                sample_token = sample['next']


if __name__ == '__main__':
    parser = argparse.ArgumentParser()
    parser.add_argument(
        '--config',
        type=str,
        default='default_params.json',
        help='SpireCV2 Config (.json)')
    parser.add_argument(
        '--job-name',
        type=str,
        default='live',
        help='SpireCV Job Name')
    parser.add_argument(
        '--ip',
        type=str,
        default='127.0.0.1',
        help='SpireMS Core IP')
    parser.add_argument(
        '--port',
        type=int,
        default=9094,
        help='SpireMS Core Port')
    args, unknown_args = parser.parse_known_args()
    if not os.path.isabs(args.config):
        current_path = os.path.abspath(__file__)
        params_dir = os.path.join(current_path[:current_path.find('spirecv-pro') + 11], 'params', 'spirecv2')
        args.config = os.path.join(params_dir, args.config)
    print("--config:", args.config)
    print("--job-name:", args.job_name)
    extra = get_extra_args(unknown_args)

    node = NuScenesDatasetLoaderNode(args.job_name, param_dict_or_file=args.config, ip=args.ip, port=args.port, **extra)
    node.launch_next()
    if node.auto_next:
        r = Rate(node.rate)
        while not node.done:
            node.launch_next()
            r.sleep()
        node.shutdown()

