import carla
import numpy as np
import time
import random
from queue import Queue, Empty
import cv2
import os
import copy
import logging
from carla import Transform, Rotation, Location
import shutil
import stat
import json
import open3d as o3d



def points2pcd(PCD_FILE_PATH, points):
    # 存放路径
    # PCD_DIR_PATH = os.path.join(os.path.abspath('.'), 'pcd')
    # PCD_FILE_PATH = os.path.join(PCD_DIR_PATH, 'cache.pcd')
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)

    # 写文件句柄
    handle = open(PCD_FILE_PATH, 'a')

    # 得到点云点数
    point_num = points.shape[0]

    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')

    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()



def get_time_stamp(ct):
    """

    :param ct: float时间
    :return: 带毫秒的格式化时间戳
    """
    # ct = time.time()
    # print(ct)
    local_time = time.localtime(ct)
    # data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
    data_head = time.strftime("%Y-%m-%d %H-%M-%S", local_time).split(' ')[-1]
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s-%03d" % (data_head, data_secs)
    # print(time_stamp, type(time_stamp))
    # stamp = ("".join(time_stamp.split()[0].split("-"))+"".join(time_stamp.split()[1].split(":"))).replace('.', '')
    # print(stamp)
    return time_stamp


def sensor_callback(sensor_data, sensor_queue, sensor_name):
    # Do stuff with the sensor_data data like save it to disk
    # Then you just need to add to the queue
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))


# modify from manual control
def _parse_image_cb(image):
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array


# modify from leaderboard
def _parse_lidar_cb(lidar_data):
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    # print(type(points), points.shape)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    # print(type(points), points.shape)
    return points


def _parse_radar_cb(radar_data):
    points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (len(radar_data), 4))
    return points

# modify from world on rail code
def lidar_to_bev(lidar, min_x=-24, max_x=24, min_y=-16, max_y=16, pixels_per_meter=4, hist_max_per_pixel=10):
    xbins = np.linspace(
        min_x, max_x + 1,
               (max_x - min_x) * pixels_per_meter + 1,
    )
    ybins = np.linspace(
        min_y, max_y + 1,
               (max_y - min_y) * pixels_per_meter + 1,
    )
    # Compute histogram of x and y coordinates of points.
    hist = np.histogramdd(lidar[..., :2], bins=(xbins, ybins))[0]
    # Clip histogram
    hist[hist > hist_max_per_pixel] = hist_max_per_pixel
    # Normalize histogram by the maximum number of points in a bin we care about.
    overhead_splat = hist / hist_max_per_pixel * 255.
    # Return splat in X x Y orientation, with X parallel to car axis, Y perp, both parallel to ground.
    return overhead_splat[::-1, :]


# 
def visualize_data(rgb, lidars, text_args=(cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)):
    rgb_canvas = np.array(rgb[..., ::-1])
    # print(rgb_canvas.shape, rgb_canvas.size)
    canvas_list = []
    lidar_canvas = None

    if lidars is not None:
        for lidar in lidars:
            lidar_viz = lidar_to_bev(lidar).astype(np.uint8)
            lidar_viz = cv2.cvtColor(lidar_viz, cv2.COLOR_GRAY2RGB)
            canvas = cv2.resize(lidar_viz.astype(np.uint8), (rgb_canvas.shape[0], rgb_canvas.shape[0]))
            canvas_list.append(canvas)
        lidar_canvas = np.concatenate(canvas_list, axis=1)
    # cv2.putText(canvas, f'yaw angle: {imu_yaw:.3f}', (4, 10), *text_args)
    # cv2.putText(canvas, f'log: {gnss[0]:.3f} alt: {gnss[1]:.3f} brake: {gnss[2]:.3f}', (4, 20), *text_args)
    return lidar_canvas


def mkdir_folder(path):
    for s_type in sensor_type:
        if not os.path.isdir(os.path.join(path, s_type)):
            os.makedirs(os.path.join(path, s_type))
    return True


def draw_waypoints(world, waypoints, road_id=None, life_time=50.0):
    """

    :param waypoints: 地图所有航点列表
    :param road_id: 目标路段id
    :param life_time: 高亮时间
    :return:
    """
    obj_waypoints = []

    for waypoint in waypoints:
        if waypoint.road_id == road_id:
            obj_waypoints.append(waypoint)
            world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False,
                                    color=carla.Color(r=0, g=255, b=0), life_time=life_time,
                                    persistent_lines=True)
    return obj_waypoints


def mkdir_folder(path):
    for s_type in sensor_type:
        if not os.path.isdir(os.path.join(path, s_type)):
            os.makedirs(os.path.join(path, s_type))
    return True

def main():
    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间
    # with open(word_path) as od_file:
    #     data = od_file.read()
    # world = client.generate_opendrive_world(data,
    #                                         carla.OpendriveGenerationParameters(
    #                                             wall_height=0))
    world = client.load_world('shangdiwr')   # 加载地图
    # world = client.get_world()

    # 使得道路中自动生成的交通灯为不可见状态
    env_objs1 = world.get_environment_objects(carla.CityObjectLabel.TrafficLight)
    env_objs2 = world.get_environment_objects(carla.CityObjectLabel.Poles)

    env_list = []
    for env_obj in env_objs1:
        env_list.append(env_obj.id)
    for env_obj in env_objs2:
        env_list.append(env_obj.id)
    world.enable_environment_objects(env_list, False)
    
    synchronous_master = False

    try:

        pt = world.get_snapshot().timestamp.platform_timestamp
        ptime = int(pt)
        save_pcd_path = os.path.join(save_path, str(ptime))
        os.makedirs(save_pcd_path)

        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        world.debug.draw_string(view_transform.location, 'O', draw_shadow=False,
                                color=carla.Color(r=0, g=255, b=0), life_time=1500,
                                persistent_lines=True)
        spectator.set_transform(lidar_trans)

        # # 让车辆按照交通规则在世界中行驶
        # ##################################### 修改世界设置:更新步长和同步模式##################################
        traffic_manager = client.get_trafficmanager(8000)
        traffic_manager.set_global_distance_to_leading_vehicle(0.0)
        traffic_manager.global_percentage_speed_difference(0.0)
        if True:
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not settings.synchronous_mode:
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                synchronous_master = False
                settings.fixed_delta_seconds = 0.1

        # # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为50
        weather = carla.WeatherParameters(
            cloudiness=0.0,  # 0-100  0 是晴朗的天空，100 是完全阴天
            precipitation=0.0,  # 0 表示没有下雨，100 表示大雨
            sun_altitude_angle=10.0)  # 90 是中午，-90 是午夜
        world.set_weather(weather)

        # ===========================通过蓝图库模糊搜索指定的车辆模型
        blueprint_library = world.get_blueprint_library()

        # ===========================实例化传感器模型============================
        # =======================================传感器==================================
        # cam_bp = blueprint_library.find('sensor.camera.rgb')  # 相机
        lidar_bp_2_1 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        lidar_bp_2_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_2_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_28 = blueprint_library.find('sensor.lidar.ray_cast')
        # lidar_bp_64 = blueprint_library.find('sensor.lidar.ray_cast')

        # 设置传感器的参数
        atmosphere_attenuation_rate = '0.004'
        dropoff_general_rate = '0.0'
        dropoff_intensity_limit = '0.0'
        dropoff_zero_intensity = '0.0'
        noise_seed = '0.0'
        noise_stddev = '0.0'


        # cam_bp.set_attribute("image_size_x", "{}".format(IM_WIDTH))
        # cam_bp.set_attribute("image_size_y", "{}".format(IM_HEIGHT))
        # cam_bp.set_attribute("fov", "90")
        # cam_bp.set_attribute('sensor_tick', '0.1')


        # lidar_bp_64.set_attribute('channels', '64')
        # lidar_bp_64.set_attribute('upper_fov', '-1.5')
        # lidar_bp_64.set_attribute('lower_fov', '-42')
        # lidar_bp_64.set_attribute('points_per_second', '1152000')
        # lidar_bp_64.set_attribute('range', '200')
        # lidar_bp_64.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        # lidar_bp_64.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        # lidar_bp_64.set_attribute('dropoff_general_rate', dropoff_general_rate)
        # lidar_bp_64.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        # lidar_bp_64.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        # lidar_bp_64.set_attribute('noise_seed', noise_seed)
        # lidar_bp_64.set_attribute('noise_stddev', noise_stddev)


        lidar_bp_2_1.set_attribute('channels', '2')
        lidar_bp_2_1.set_attribute('upper_fov', '-1.5')
        lidar_bp_2_1.set_attribute('lower_fov', '-1.7')
        lidar_bp_2_1.set_attribute('points_per_second', '36000')
        lidar_bp_2_1.set_attribute('range', '200')
        lidar_bp_2_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_28.set_attribute('channels', '28')
        lidar_bp_28.set_attribute('upper_fov', '-1.9')
        lidar_bp_28.set_attribute('lower_fov', '-4.6')
        lidar_bp_28.set_attribute('points_per_second', '504000')
        lidar_bp_28.set_attribute('range', '200')
        lidar_bp_28.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_28.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_28.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_28.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_28.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_28.set_attribute('noise_seed', noise_seed)
        lidar_bp_28.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_6_1.set_attribute('channels', '6')
        lidar_bp_6_1.set_attribute('upper_fov', '-4.8')
        lidar_bp_6_1.set_attribute('lower_fov', '-5.8')
        lidar_bp_6_1.set_attribute('points_per_second', '108000')
        lidar_bp_6_1.set_attribute('range', '200')
        lidar_bp_6_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_1.set_attribute('channels', '4')
        lidar_bp_4_1.set_attribute('upper_fov', '-6.1')
        lidar_bp_4_1.set_attribute('lower_fov', '-7')
        lidar_bp_4_1.set_attribute('points_per_second', '72000')
        lidar_bp_4_1.set_attribute('range', '200')
        lidar_bp_4_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_1.set_attribute('noise_stddev', noise_stddev)
        
        lidar_bp_4_2.set_attribute('channels', '4')
        lidar_bp_4_2.set_attribute('upper_fov', '-7.4')
        lidar_bp_4_2.set_attribute('lower_fov', '-8.6')
        lidar_bp_4_2.set_attribute('points_per_second', '72000')
        lidar_bp_4_2.set_attribute('range', '200')
        lidar_bp_4_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_3.set_attribute('channels', '4')
        lidar_bp_4_3.set_attribute('upper_fov', '-9.2')
        lidar_bp_4_3.set_attribute('lower_fov', '-11')
        lidar_bp_4_3.set_attribute('points_per_second', '72000')
        lidar_bp_4_3.set_attribute('range', '200')
        lidar_bp_4_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_3.set_attribute('noise_stddev', noise_stddev)  

        lidar_bp_6_2.set_attribute('channels', '6')
        lidar_bp_6_2.set_attribute('upper_fov', '-12')
        lidar_bp_6_2.set_attribute('lower_fov', '-17')
        lidar_bp_6_2.set_attribute('points_per_second', '108000')
        lidar_bp_6_2.set_attribute('range', '200')
        lidar_bp_6_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_2.set_attribute('noise_stddev', noise_stddev)
    
        lidar_bp_6_3.set_attribute('channels', '6')
        lidar_bp_6_3.set_attribute('upper_fov', '-19')
        lidar_bp_6_3.set_attribute('lower_fov', '-29')
        lidar_bp_6_3.set_attribute('points_per_second', '108000')
        lidar_bp_6_3.set_attribute('range', '200')
        lidar_bp_6_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_3.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_2.set_attribute('channels', '2')
        lidar_bp_2_2.set_attribute('upper_fov', '-32')
        lidar_bp_2_2.set_attribute('lower_fov', '-35')
        lidar_bp_2_2.set_attribute('points_per_second', '36000')
        lidar_bp_2_2.set_attribute('range', '200')
        lidar_bp_2_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_3.set_attribute('channels', '2')
        lidar_bp_2_3.set_attribute('upper_fov', '-38')
        lidar_bp_2_3.set_attribute('lower_fov', '-42')
        lidar_bp_2_3.set_attribute('points_per_second', '36000')
        lidar_bp_2_3.set_attribute('range', '200')
        lidar_bp_2_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_3.set_attribute('noise_stddev', noise_stddev)

        # cam01 = world.spawn_actor(cam_bp, camera01_trans,  attach_to=None)
        lidar_2_1 = world.spawn_actor(lidar_bp_2_1, lidar_trans, attach_to=None)
        lidar_2_2 = world.spawn_actor(lidar_bp_2_2, lidar_trans, attach_to=None)
        lidar_2_3 = world.spawn_actor(lidar_bp_2_3, lidar_trans, attach_to=None)
        lidar_4_1 = world.spawn_actor(lidar_bp_4_1, lidar_trans, attach_to=None)
        lidar_4_2 = world.spawn_actor(lidar_bp_4_2, lidar_trans, attach_to=None)
        lidar_4_3 = world.spawn_actor(lidar_bp_4_3, lidar_trans, attach_to=None)
        lidar_6_1 = world.spawn_actor(lidar_bp_6_1, lidar_trans, attach_to=None)
        lidar_6_2 = world.spawn_actor(lidar_bp_6_2, lidar_trans, attach_to=None)
        lidar_6_3 = world.spawn_actor(lidar_bp_6_3, lidar_trans, attach_to=None)
        lidar_28 = world.spawn_actor(lidar_bp_28, lidar_trans, attach_to=None)
        # lidar_64 = world.spawn_actor(lidar_bp_64, lidar_trans, attach_to=None)

        # cam01.listen(lambda data: sensor_callback(data, sensor_queue, "rgb_camera01"))
        lidar_2_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_1"))
        lidar_2_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_2"))
        lidar_2_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_3"))
        lidar_4_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_1"))
        lidar_4_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_2"))
        lidar_4_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_3"))
        lidar_6_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_1"))
        lidar_6_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_2"))
        lidar_6_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_3"))
        lidar_28.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_28"))
        # lidar_64.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_64"))

        # sensor_list.append(cam01)
        sensor_list.append(lidar_2_1)
        sensor_list.append(lidar_2_2)
        sensor_list.append(lidar_2_3)
        sensor_list.append(lidar_4_1)
        sensor_list.append(lidar_4_2)
        sensor_list.append(lidar_4_3)
        sensor_list.append(lidar_6_1)
        sensor_list.append(lidar_6_2)
        sensor_list.append(lidar_6_3)
        sensor_list.append(lidar_28)
        # sensor_list.append(lidar_64)

        with open(realdata) as sim_file:
            sim_data_dic = json.load(sim_file)
            print(type(sim_data_dic))
        real_iterator = sim_data_dic.__iter__()

        if SHOW:
            point_cloud = o3d.geometry.PointCloud()
            vis = o3d.visualization.Visualizer()
            vis.create_window(window_name='Wanji 64line Lidar', width=600, height=400, left=300, top=150, visible=True)
            vis.add_geometry(point_cloud)

            render_option = vis.get_render_option()
            render_option.point_size = 1.0
            render_option.background_color = np.asarray([0, 0, 0])  # 颜色 0为黑；1为白
            to_reset_view_point = True

        while True:

            world.tick()
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            w_timestamp = get_time_stamp(p_timestamp)
            startt = get_time_stamp(time.time())
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, w_timestamp))

            try:
                # rgbs = []
                # lidars = []
                splicing = []
                try:
                    timestep = real_iterator.__next__()
                except StopIteration:
                    break
                nowids = list(sim_data_dic[timestep].keys())
                print(len(nowids))
                destodyed_ids = [id for id in spawed_ids if id not in nowids]
                for did in destodyed_ids:
                    carlaid = spawed_ids[did]
                    vehicle = world.get_actor(carlaid)

                    if vehicle is not None:
                        vehicle.destroy()
                    del spawed_ids[did]
                vehicles_id_list.clear

                errorids = []
                # num = 0
                # nummax = 1

                #添加交通流
                for k, v in sim_data_dic[timestep].items():
                    # if num > nummax:
                    #     continue
                    # num += 1
                    loc = v['Location']
                    rot = v['Rotation']
                    yaw = rot[1] - 180
                    if yaw > 360:
                        yaw -= 360
                    elif yaw < 0:
                        yaw += 360
                    trans = Transform(carla.Location(loc[0], loc[1], loc[2]),
                                      carla.Rotation(rot[0], yaw, rot[2]))
                    vehtype = v['type']
                    if k not in spawed_ids.keys():
                        bp1 = world.get_blueprint_library().find(vehtype)
                        try:
                            bp1.set_attribute('color', '0,0,0')
                        except:
                            pass
                        # else:
                        #     pass

                        batch = [
                                carla.command.SpawnActor(bp1, trans).then(
                                    carla.command.SetSimulatePhysics(carla.command.FutureActor, True))
                            ]
                        response = client.apply_batch_sync(batch, False)[0]
                        if response.error:
                            errorids.append(k)
                        else:
                            spawed_ids[k] = response.actor_id
                            vehicles_id_list.append(response.actor_id)
                        # if vehtype == 'walker.pedestrian.0013':
                        #     batch = [
                        #         carla.command.SpawnActor(bp1, trans).then(
                        #             carla.command.SetSimulatePhysics(carla.command.FutureActor, False))
                        #     ]
                        #     response = client.apply_batch_sync(batch, False)[0]
                        #     if response.error:
                        #         errorids.append(k)
                        #     else:
                        #         spawed_ids[k] = response.actor_id
                        #     # wid = walkresults.actor_id
                        #     # controlbatch = [carla.command.SpawnActor(walker_controller_bp, carla.Transform(), wid)]
                        #     # controlresults = client.apply_batch_sync(controlbatch, False)[0]
                        #     # cid = controlresults.actor_id
                        #     # cactor = world.get_actor(cid)
                        #     # cactor.start()
                        #     # spawed_ids[k] = cid
                        # else:
                        #     batch = [
                        #         carla.command.SpawnActor(bp1, trans).then(
                        #             carla.command.SetSimulatePhysics(carla.command.FutureActor, False))
                        #     ]
                        #     response = client.apply_batch_sync(batch, False)[0]
                        #     # spawed_ids[k] = response.actor_id
                        #     if response.error:
                        #         errorids.append(k)
                        #     else:
                        #         spawed_ids[k] = response.actor_id
                        #     #     logging.error('Spawn carla actor failed. %s', response.error)
                        #     #     return INVALID_ACTOR_ID
                    # else:
                    #     if vehtype == 'walker.pedestrian.0013':
                    #         cid = spawed_ids[k]
                    #         cactor = world.get_actor(cid)
                    #         cactor.set_transform(trans)

                    #         # cactor.go_to_location(trans.location)
                    #         # cactor.set_max_speed(float(walker_speed))
                    #     else:
                    #         carlaid = spawed_ids[k]
                    #         vehicle = world.get_actor(carlaid)
                    #         if vehicle is not None:
                    #             vehicle.set_transform(trans)
                print("已有：", len(spawed_ids))

                # destodyed_ids = [id for id in spawed_ids if id not in nowids]
                # for did in destodyed_ids:
                #     carlaid = spawed_ids[did]
                #     vehicle = world.get_actor(carlaid)

                #     if vehicle is not None:
                #         vehicle.destroy()
                #     del spawed_ids[did]
                print("更新：", len(nowids), "销毁:", len(destodyed_ids))
                print(errorids)

                for i in range(0, len(sensor_list)):
                    s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                    splicing.append(_parse_lidar_cb(s_data))
                    # sensor_type = s_name.split('_')[0]
                    # print("    Frame: %d   Sensor: %s" % (s_frame, s_name))
                #
                    # if sensor_type == 'rgb':
                    #     rgbs.append(_parse_image_cb(s_data))
                    # if sensor_type == 'lidar':
                    #     if s_name != 'lidar_64':
                    #         splicing.append(_parse_lidar_cb(s_data))
                    #     else:
                    #         lidars.append(_parse_lidar_cb(s_data))
                            # s_data.save_to_disk(save_path + str(w_frame) + "_" + s_name + ".ply")
                if splicing:
                    concat_points = np.concatenate(splicing, axis=0)
                    pcd_path = save_path + "splice/"+ str(w_timestamp) + "_" + "splice" + ".pcd"
                    concat_points[:, 1] = [-p for p in concat_points[:, 1]]
                    # points2pcd(pcd_path, concat_points)

                    if SHOW:
                        points1 = np.array(concat_points)[:, :3]
                        colors1 = np.array(concat_points)[:, 3]
                        # print(points1)
                        point_cloud.points = o3d.utility.Vector3dVector(points1)
                        points_intensity = np.array(concat_points)[:, 3]  # intensity 
                        # print(points_intensity)
                        # r = 0
                        # b = 0
                        # g = 0
                        colors = []
                        for num in points_intensity:
                        #     int1 = num * 255
                        #     if int1 <= 33:
                        #         r = 0
                        #         b = 255
                        #         g = int(7.727 * int1)
                        #     elif int1 >33 and int1 <= 66:
                        #         r = 0
                        #         b = int(255 - 7.727 * (int1 - 34))
                        #         g = 255
                        #     elif int1 > 66 and int1 <= 100:
                        #         r = int(7.727 * (int1 - 67))
                        #         b = 0
                        #         g = 255
                        #     elif  int1 > 100 and int1 <= 255:
                        #         r = 255
                        #         b = 0
                        #         g = int(255 - 7.727 * (int1 - 100)/4.697)
                            points_color = [0, 0, 0.8]
                            colors.append(points_color)
                        # print(colors)

                        point_cloud.colors = o3d.utility.Vector3dVector(colors)  # 根据 intensity 为点云着色 
                        vis.update_geometry(point_cloud)
                        if to_reset_view_point:
                            vis.reset_view_point(True)
                            to_reset_view_point = False
                        vis.poll_events()
                        vis.update_renderer()
                    ######################
      
                #
                #     # 仅用来可视化 可注释
                # rgb = np.concatenate(rgbs, axis=1)[..., :3]
                # # lidar64 = visualize_data(rgb, lidars)
                # lidarsplice = visualize_data(rgb, [concat_points])
                # # lidarsplice = visualize_data(splicing)
                # cv2.imshow('64line', lidarsplice)
                # cv2.waitKey(1)
             
                ##################   open3d   ################
                    
                    ##########################

            except Empty:
                print("    Some of the sensor information is missed")
            # time.sleep(0.1)

    finally:
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)

        for sensor in sensor_list:
            sensor.destroy()
        for actor in actor_list:
            if actor.is_alive:
                actor.destroy()
        client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_id_list]) #
        print("All cleaned up!")
        time.sleep(0.1)


if __name__ == "__main__":
    word_path = r"/home/wanji/下载/carla_test/net_files/SDOpenDrive.xodr"
    sensor_type = ['rgb', 'lidar']
    sensor_queue = Queue()
    actor_list, sensor_list = [], []
    vehicles_id_list = []
    save_path = '/home/wanji/下载/carla_test/output/real_car_simu/'
    realdata = '/home/wanji/下载/carla_test/output/toveiw_track.json'
    # if os.path.exists(save_path):
    #     for fileList in os.walk(save_path):
    #         for name in fileList[2]:
    #             os.chmod(os.path.join(fileList[0], name), stat.S_IWRITE)
    #             os.remove(os.path.join(fileList[0], name))
    #     shutil.rmtree(save_path)
    spawed_ids = {}

    # os.mkdir(save_path)
    IM_WIDTH = 256 * 1
    IM_HEIGHT = 256 * 1
    SHOW = False # 是否使用open3d进行实时显示雷达点云

    # view_transform = Transform(Location(x=-725, y=-310.90056100583038, z=200),
    view_transform = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017), 
                            Rotation(pitch=0.1249, yaw=100.3, roll=0.0))
    # camera01_trans = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017), 
    #                         Rotation(pitch=0.1249, yaw=0.3, roll=0.0))

    # lidar_trans = Transform(Location(x=241.61057899409823, y=-90.90056100583038, z=45.3),
    #                            Rotation(pitch=0, yaw=-13, roll=0.000000))
    # vehicle_trans1 = Transform(Location(x=240.45972426616333, y=-119.58177141749717, z=38.0),
    #                            Rotation(pitch=0.000000, yaw=76, roll=0.000000))
    lidar_trans = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017), 
                            Rotation(pitch=0.1249, yaw=260.3, roll=0.0))

    try:
        main()
    except KeyboardInterrupt:
        print(' - Exited by user.')
