#!/usr/bin/env python

import carla
import numpy as np
import time
import random
from queue import Queue, Empty
import cv2
import os
import copy
import logging
from carla import Transform, Rotation, Location
import open3d as o3d
from matplotlib import cm


# 使用cm中的某条色带
VIRIDIS = np.array(cm.get_cmap('plasma').colors)
# 0-1对应VIRIDIS.shape[0]中的数据
VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0])

def points2pcd(PCD_FILE_PATH, points):
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)
    handle = open(PCD_FILE_PATH, 'a')
    point_num = points.shape[0]
    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')
    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()

def get_time_stamp(ct):
    """
    :param ct: float时间
    :return: 带毫秒的格式化时间戳
    """
    local_time = time.localtime(ct)
    data_head = time.strftime("%Y-%m-%d %H-%M-%S", local_time).split(' ')[-1]
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s-%03d" % (data_head, data_secs)
    return time_stamp

def sensor_callback(sensor_data, sensor_queue, sensor_name):
    # Do stuff with the sensor_data data like save it to disk
    # Then you just need to add to the queue
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))

# modify from leaderboard
def _parse_lidar_cb(lidar_data):
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    return points

def _parse_image_cb(image):
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array

def main():

    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间

    # print(client.get_available_maps())  # 获取可用的地图列表（自带的地图，无自定义地图）
    world = client.load_world('yuxiang02')
    # world = client.get_world() # 如果在UE4中运行场景，则使用该world
    synchronous_master = False

    try:
        # =================================调整路网中的环境可见度======================================
        # 使得道路中自动生成的交通灯为不可见状态
        env_objs1 = world.get_environment_objects(carla.CityObjectLabel.TrafficLight)
        env_objs2 = world.get_environment_objects(carla.CityObjectLabel.Poles)

        env_list = []
        for env_obj in env_objs1:
            env_list.append(env_obj.id)
        for env_obj in env_objs2:
            env_list.append(env_obj.id)
        world.enable_environment_objects(env_list, False)

        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        world.debug.draw_string(lidar_trans.location, 'X', draw_shadow=False, color=carla.Color(r=255, g=0, b=0), life_time=1500, persistent_lines=True)
        spectator.set_transform(view_transform)

        # ============================== 修改世界设置:更新步长和同步模式===================================
        # 让车辆按照交通规则在世界中行驶
        #设置TM接入server的端口，默认8000
        traffic_manager = client.get_trafficmanager(8000)
        #TM里的每一辆车至少保持2米的安全距离
        traffic_manager.set_global_distance_to_leading_vehicle(2.0)
        #TM里每一辆车都是默认速度的40%（默认所有车辆限速30km/h）
        traffic_manager.global_percentage_speed_difference(40.0)
        if True:
            #设置TM同步模式
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not settings.synchronous_mode:
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                synchronous_master = False

        # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为90
        weather = carla.WeatherParameters(sun_altitude_angle=90.0)
        world.set_weather(weather)

        # ======================通过蓝图库模糊搜索指定的车辆模型===============
        blueprint_library = world.get_blueprint_library()

        # =============================随机车辆=================================
        blueprints = blueprint_library.filter("vehicle.*")

        blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4]
        # blueprints = [x for x in blueprints if not x.id.endswith('microlino')]
        # blueprints = [x for x in blueprints if not x.id.endswith('carlacola')]
        # blueprints = [x for x in blueprints if not x.id.endswith('cybertruck')]
        # blueprints = [x for x in blueprints if not x.id.endswith('t2')]
        # blueprints = [x for x in blueprints if not x.id.endswith('sprinter')]
        # blueprints = [x for x in blueprints if not x.id.endswith('firetruck')]
        # blueprints = [x for x in blueprints if not x.id.endswith('ambulance')]

        blueprints = sorted(blueprints, key=lambda bp: bp.id)

        spawn_points = world.get_map().get_spawn_points()
        number_of_spawn_points = len(spawn_points)
        number_of_vehicles = 100

        if number_of_vehicles < number_of_spawn_points:
            random.shuffle(spawn_points)
        elif number_of_vehicles > number_of_spawn_points:
            msg = 'requested %d vehicles, but could only find %d spawn points'
            logging.warning(msg, number_of_vehicles, number_of_spawn_points)
            number_of_vehicles = number_of_spawn_points
        # Spawn vehicles
        batch = []
        hero = False
        for n, transform in enumerate(spawn_points):
            if n >= number_of_vehicles:
                break
            blueprint = random.choice(blueprints)
            if blueprint.has_attribute('color'):
                color = random.choice(blueprint.get_attribute('color').recommended_values)
                blueprint.set_attribute('color', color)
            if blueprint.has_attribute('driver_id'):
                driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
                blueprint.set_attribute('driver_id', driver_id)
            if hero:
                blueprint.set_attribute('role_name', 'hero')
                hero = False
            else:
                blueprint.set_attribute('role_name', 'autopilot')

            batch.append(carla.command.SpawnActor(blueprint, transform)
                         .then(carla.command.SetAutopilot(carla.command.FutureActor, True, traffic_manager.get_port())))

        for response in client.apply_batch_sync(batch, synchronous_master):
            if response.error:
                logging.error(response.error)
            else:
                actor_list.append(response.actor_id)

        # =======================================传感器==================================
        # 相机
        cam_bp_1 = blueprint_library.find('sensor.camera.rgb')
        cam_bp_2 = blueprint_library.find('sensor.camera.rgb')

        cam_bp_1.set_attribute("image_size_x", "{}".format(IM_WIDTH))
        cam_bp_1.set_attribute("image_size_y", "{}".format(IM_HEIGHT))
        cam_bp_1.set_attribute("fov", "90")
        cam_bp_1.set_attribute('sensor_tick', '0.1')

        cam_bp_2.set_attribute("image_size_x", "{}".format(IM_WIDTH))
        cam_bp_2.set_attribute("image_size_y", "{}".format(IM_HEIGHT))
        cam_bp_2.set_attribute("fov", "90")
        cam_bp_2.set_attribute('sensor_tick', '0.1')

        cam01 = world.spawn_actor(cam_bp_1, camera_trans_1, attach_to=None)
        cam02 = world.spawn_actor(cam_bp_2, camera_trans_2, attach_to=None)

        cam01.listen(lambda data: sensor_callback(data, sensor_queue, "rgb_1"))
        cam02.listen(lambda data: sensor_callback(data, sensor_queue, "rgb_2"))

        sensor_list.append(cam01)
        sensor_list.append(cam02)

        # 雷达
        lidar_bp_2_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_2_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_2_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_28 = blueprint_library.find('sensor.lidar.ray_cast')

        # 设置传感器的参数
        atmosphere_attenuation_rate = '0.004'
        dropoff_general_rate = '0.0'
        dropoff_intensity_limit = '0.0'
        dropoff_zero_intensity = '0.0'
        noise_seed = '0.0'
        noise_stddev = '0.0'

        lidar_bp_2_1.set_attribute('channels', '2')
        lidar_bp_2_1.set_attribute('upper_fov', '-1.5')
        lidar_bp_2_1.set_attribute('lower_fov', '-1.7')
        lidar_bp_2_1.set_attribute('points_per_second', '36000')
        lidar_bp_2_1.set_attribute('range', '200')
        lidar_bp_2_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_28.set_attribute('channels', '28')
        lidar_bp_28.set_attribute('upper_fov', '-1.9')
        lidar_bp_28.set_attribute('lower_fov', '-4.6')
        lidar_bp_28.set_attribute('points_per_second', '504000')
        lidar_bp_28.set_attribute('range', '200')
        lidar_bp_28.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_28.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_28.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_28.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_28.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_28.set_attribute('noise_seed', noise_seed)
        lidar_bp_28.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_6_1.set_attribute('channels', '6')
        lidar_bp_6_1.set_attribute('upper_fov', '-4.8')
        lidar_bp_6_1.set_attribute('lower_fov', '-5.8')
        lidar_bp_6_1.set_attribute('points_per_second', '108000')
        lidar_bp_6_1.set_attribute('range', '200')
        lidar_bp_6_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_1.set_attribute('channels', '4')
        lidar_bp_4_1.set_attribute('upper_fov', '-6.1')
        lidar_bp_4_1.set_attribute('lower_fov', '-7')
        lidar_bp_4_1.set_attribute('points_per_second', '72000')
        lidar_bp_4_1.set_attribute('range', '200')
        lidar_bp_4_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_1.set_attribute('noise_stddev', noise_stddev)
        
        lidar_bp_4_2.set_attribute('channels', '4')
        lidar_bp_4_2.set_attribute('upper_fov', '-7.4')
        lidar_bp_4_2.set_attribute('lower_fov', '-8.6')
        lidar_bp_4_2.set_attribute('points_per_second', '72000')
        lidar_bp_4_2.set_attribute('range', '200')
        lidar_bp_4_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_3.set_attribute('channels', '4')
        lidar_bp_4_3.set_attribute('upper_fov', '-9.2')
        lidar_bp_4_3.set_attribute('lower_fov', '-11')
        lidar_bp_4_3.set_attribute('points_per_second', '72000')
        lidar_bp_4_3.set_attribute('range', '200')
        lidar_bp_4_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_3.set_attribute('noise_stddev', noise_stddev)  

        lidar_bp_6_2.set_attribute('channels', '6')
        lidar_bp_6_2.set_attribute('upper_fov', '-12')
        lidar_bp_6_2.set_attribute('lower_fov', '-17')
        lidar_bp_6_2.set_attribute('points_per_second', '108000')
        lidar_bp_6_2.set_attribute('range', '200')
        lidar_bp_6_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_2.set_attribute('noise_stddev', noise_stddev)
    
        lidar_bp_6_3.set_attribute('channels', '6')
        lidar_bp_6_3.set_attribute('upper_fov', '-19')
        lidar_bp_6_3.set_attribute('lower_fov', '-29')
        lidar_bp_6_3.set_attribute('points_per_second', '108000')
        lidar_bp_6_3.set_attribute('range', '200')
        lidar_bp_6_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_3.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_2.set_attribute('channels', '2')
        lidar_bp_2_2.set_attribute('upper_fov', '-32')
        lidar_bp_2_2.set_attribute('lower_fov', '-35')
        lidar_bp_2_2.set_attribute('points_per_second', '36000')
        lidar_bp_2_2.set_attribute('range', '200')
        lidar_bp_2_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_3.set_attribute('channels', '2')
        lidar_bp_2_3.set_attribute('upper_fov', '-38')
        lidar_bp_2_3.set_attribute('lower_fov', '-42')
        lidar_bp_2_3.set_attribute('points_per_second', '36000')
        lidar_bp_2_3.set_attribute('range', '200')
        lidar_bp_2_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_3.set_attribute('noise_stddev', noise_stddev)

        lidar_2_1 = world.spawn_actor(lidar_bp_2_1, lidar_trans, attach_to=None)
        lidar_2_2 = world.spawn_actor(lidar_bp_2_2, lidar_trans, attach_to=None)
        lidar_2_3 = world.spawn_actor(lidar_bp_2_3, lidar_trans, attach_to=None)
        lidar_4_1 = world.spawn_actor(lidar_bp_4_1, lidar_trans, attach_to=None)
        lidar_4_2 = world.spawn_actor(lidar_bp_4_2, lidar_trans, attach_to=None)
        lidar_4_3 = world.spawn_actor(lidar_bp_4_3, lidar_trans, attach_to=None)
        lidar_6_1 = world.spawn_actor(lidar_bp_6_1, lidar_trans, attach_to=None)
        lidar_6_2 = world.spawn_actor(lidar_bp_6_2, lidar_trans, attach_to=None)
        lidar_6_3 = world.spawn_actor(lidar_bp_6_3, lidar_trans, attach_to=None)
        lidar_28 = world.spawn_actor(lidar_bp_28, lidar_trans, attach_to=None)

        lidar_2_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_1"))
        lidar_2_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_2"))
        lidar_2_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_3"))
        lidar_4_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_1"))
        lidar_4_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_2"))
        lidar_4_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_3"))
        lidar_6_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_1"))
        lidar_6_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_2"))
        lidar_6_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_3"))
        lidar_28.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_28"))

        sensor_list.append(lidar_2_1)
        sensor_list.append(lidar_2_2)
        sensor_list.append(lidar_2_3)
        sensor_list.append(lidar_4_1)
        sensor_list.append(lidar_4_2)
        sensor_list.append(lidar_4_3)
        sensor_list.append(lidar_6_1)
        sensor_list.append(lidar_6_2)
        sensor_list.append(lidar_6_3)
        sensor_list.append(lidar_28)

        if SHOW:
            point_cloud = o3d.geometry.PointCloud()
            vis = o3d.visualization.Visualizer()
            vis.create_window(window_name='Wanji 64line Lidar', width=600, height=600, left=100, top=200, visible=True)
            vis.add_geometry(point_cloud)
            render_option = vis.get_render_option()
            render_option.point_size = 1.0
            render_option.background_color = np.asarray([0, 0, 0])  # 颜色 0为黑；1为白
            to_reset_view_point = True

        while True:
            world.tick()
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            w_timestamp = get_time_stamp(p_timestamp)
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, w_timestamp))

            try:
                rgb1 = []
                rgb2 = []
                splicing = []
                lidars_frame = []

                for i in range(0, len(sensor_list)):
                    s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                    if s_name == "rgb_1":
                        rgb1.append(_parse_image_cb(s_data))
                        r1 = os.path.join(save_camera1_path, '%06d.png' % s_data.frame)
                        if SAVE:
                            s_data.save_to_disk(r1)
                        if SHOW:
                            image = cv2.imread(r1)
                            cv2.imshow('Image1', image)
                            cv2.waitKey(1)
                    elif s_name == "rgb_2":
                        rgb2.append(_parse_image_cb(s_data))
                        r2 = os.path.join(save_camera2_path, '%06d.png' % s_data.frame)
                        if SAVE:
                            s_data.save_to_disk(r2)
                        if SHOW:
                            image = cv2.imread(r2)
                            cv2.imshow('Image2', image)
                            cv2.waitKey(1)
                    else:
                        splicing.append(_parse_lidar_cb(s_data))
                        lidars_frame.append(s_data.frame)

                if splicing and SAVE:
                    # 多雷达数据融合为一个万集雷达数据
                    concat_points = np.concatenate(splicing, axis=0)
                    concat_points[:, 1] = [-p for p in concat_points[:, 1]]  # 将生成的点云Y值进行反转 由于carla的坐标系中Y值是反的
                    pcd_path = os.path.join(save_pcd_path, "%06d.pcd" % lidars_frame[0])
                    # 保存点云数据
                    points2pcd(pcd_path, concat_points)
                if SHOW:
                    # open3d实时更新显示点云
                    points1 = np.array(concat_points)[:, :3]
                    point_cloud.points = o3d.utility.Vector3dVector(points1)
                    intensity = np.array(concat_points)[:, -1]
                    intensity_col = 1.0 - np.log(intensity) / np.log(np.exp(-0.004 * 100))
                    int_color = np.c_[
                        np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 0]),
                        np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 1]),
                        np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 2])]
                    point_cloud.colors = o3d.utility.Vector3dVector(int_color)
                    vis.update_geometry(point_cloud)
                    if to_reset_view_point:
                        vis.reset_view_point(True)
                        to_reset_view_point = False
                    vis.poll_events()
                    vis.update_renderer()
                    time.sleep(0.005)
            except Empty:
                print("[Warning] Some of the sensor information is missed")

    finally:
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)
        for sensor in sensor_list:
            sensor.destroy()
        client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
        print("All cleaned up!")

if __name__ == "__main__":

    # 数据保存位置
    save_pcd_path = "/home/gj/Carla_0.9.13/PythonAPI/work/112"
    save_camera1_path = "/home/gj/Carla_0.9.13/PythonAPI/work/11"
    save_camera2_path = "/home/gj/Carla_0.9.13/PythonAPI/work/12"

    sensor_queue = Queue()
    actor_list, sensor_list = [], []

    # 相机图片分辨率
    IM_WIDTH = 256
    IM_HEIGHT = 256

    SAVE = True  #  是否需要保存lidar点云的pcd格式数据
    SHOW = True      # 是否需要展示点云数据

    # 视角、雷达、相机的位置和姿态
    # view_transform = Transform(Location(x=350, y=-630, z=130), Rotation(pitch=-90, yaw=-90, roll=0))
    # lidar_trans = Transform(Location(x=379, y=-630.72, z=43.017), Rotation(pitch=0, yaw=0, roll=0))
    # camera_trans_1 = Transform(Location(x=379, y=-624.72, z=43.017), Rotation(pitch=0, yaw=0, roll=0))
    # camera_trans_2 = Transform(Location(x=379, y=-624.72, z=43.017), Rotation(pitch=0, yaw=180, roll=0))

    view_transform = Transform(Location(x=-18843, y=-10296, z=300), Rotation(pitch=-90, yaw=-13, roll=0.000000))
    vehicle_trans = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280),
                              Rotation(pitch=0.0, yaw=0, roll=0.0))
    lidar_trans = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280), Rotation(pitch=0, yaw=0, roll=0))
    camera_trans_1 = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280), Rotation(pitch=0, yaw=0, roll=0))
    camera_trans_2 = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280), Rotation(pitch=0, yaw=180, roll=0))

    try:
        main()
    except KeyboardInterrupt:
        print(' - Exited by user.')
