import carla
import numpy as np
import time
import random
from queue import Queue, Empty
import cv2
import os
import copy
import logging
from carla import Transform, Rotation, Location
import shutil
import stat
import json
import open3d as o3d
import math



def points2pcd(PCD_FILE_PATH, points):
    # 存放路径
    # PCD_DIR_PATH = os.path.join(os.path.abspath('.'), 'pcd')
    # PCD_FILE_PATH = os.path.join(PCD_DIR_PATH, 'cache.pcd')
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)
    # 写文件句柄
    handle = open(PCD_FILE_PATH, 'a')
    # 得到点云点数
    point_num = points.shape[0]
    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')
    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()



def get_time_stamp(ct):
    """
    :param ct: float时间
    :return: 带毫秒的格式化时间戳
    """
    # ct = time.time()
    # print(ct)
    local_time = time.localtime(ct)
    # data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
    data_head = time.strftime("%Y-%m-%d %H-%M-%S", local_time).split(' ')[-1]
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s-%03d" % (data_head, data_secs)
    # print(time_stamp, type(time_stamp))
    # stamp = ("".join(time_stamp.split()[0].split("-"))+"".join(time_stamp.split()[1].split(":"))).replace('.', '')
    # print(stamp)
    return time_stamp


def sensor_callback(sensor_data, sensor_queue, sensor_name):
    # Do stuff with the sensor_data data like save it to disk
    # Then you just need to add to the queue
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))


# modify from manual control
def _parse_image_cb(image):
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array


# modify from leaderboard
def _parse_lidar_cb(lidar_data):
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    # print(type(points), points.shape)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    # print(type(points), points.shape)
    return points


def _parse_radar_cb(radar_data):
    points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (len(radar_data), 4))
    return points

# modify from world on rail code
def lidar_to_bev(lidar, min_x=-24, max_x=24, min_y=-16, max_y=16, pixels_per_meter=4, hist_max_per_pixel=10):
    xbins = np.linspace(
        min_x, max_x + 1,
               (max_x - min_x) * pixels_per_meter + 1,
    )
    ybins = np.linspace(
        min_y, max_y + 1,
               (max_y - min_y) * pixels_per_meter + 1,
    )
    # Compute histogram of x and y coordinates of points.
    hist = np.histogramdd(lidar[..., :2], bins=(xbins, ybins))[0]
    # Clip histogram
    hist[hist > hist_max_per_pixel] = hist_max_per_pixel
    # Normalize histogram by the maximum number of points in a bin we care about.
    overhead_splat = hist / hist_max_per_pixel * 255.
    # Return splat in X x Y orientation, with X parallel to car axis, Y perp, both parallel to ground.
    return overhead_splat[::-1, :]



def mkdir_folder(path):
    for s_type in sensor_type:
        if not os.path.isdir(os.path.join(path, s_type)):
            os.makedirs(os.path.join(path, s_type))
    return True


def draw_waypoints(world, waypoints, road_id=None, life_time=50.0):
    """

    :param waypoints: 地图所有航点列表
    :param road_id: 目标路段id
    :param life_time: 高亮时间
    :return:
    """
    obj_waypoints = []

    for waypoint in waypoints:
        if waypoint.road_id == road_id:
            obj_waypoints.append(waypoint)
            world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False,
                                    color=carla.Color(r=0, g=255, b=0), life_time=life_time,
                                    persistent_lines=True)
    return obj_waypoints


def main():
    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间
    # with open(word_path) as od_file:
    #     data = od_file.read()
    # world = client.generate_opendrive_world(data,
    #                                         carla.OpendriveGenerationParameters(
    #                                             wall_height=0))  # 使用自定义地图
    world = client.load_world('shangdiwr')   # 加载打包好的地图
    # world = client.get_world() # 使用当前地图

    # 使得道路中自动生成的交通灯为不可见状态，避免车辆放置失败
    env_objs1 = world.get_environment_objects(carla.CityObjectLabel.TrafficLight)
    env_objs2 = world.get_environment_objects(carla.CityObjectLabel.Poles)

    env_list = []
    for env_obj in env_objs1:
        env_list.append(env_obj.id)
    for env_obj in env_objs2:
        env_list.append(env_obj.id)
    world.enable_environment_objects(env_list, False)
    
    synchronous_master = False

    #  获取范围内车道中心点坐标
    # waypoints = world.get_map().generate_waypoints(0.2)
    # v_waypoints = []
    # for waypoint in waypoints:
    #     if waypoint.transform.location.x < 500 and waypoint.transform.location.x > 200:
    #         if waypoint.transform.location.y < -400 and waypoint.transform.location.y > -800:
    #             v_waypoints.append(waypoint)

    try:
        if SAVE_PCD:
            pt = world.get_snapshot().timestamp.platform_timestamp
            ptime = int(pt)
            save_pcd_path = os.path.join(save_path, str(ptime))
            os.makedirs(save_pcd_path) # 生成新的文件夹用于保存本次雷达点云

        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        world.debug.draw_string(view_transform.location, 'O', draw_shadow=False,
                                color=carla.Color(r=0, g=255, b=0), life_time=1500,
                                persistent_lines=True)
        spectator.set_transform(lidar_trans)

        # # 让车辆按照交通规则在世界中行驶
        # ##################################### 修改世界设置:更新步长和同步模式##################################
        traffic_manager = client.get_trafficmanager(8000)
        traffic_manager.set_global_distance_to_leading_vehicle(0.0)
        traffic_manager.global_percentage_speed_difference(20.0)
        if True:
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not settings.synchronous_mode: # 如果需要改为异步模式，只需要去掉not
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                synchronous_master = False
                settings.fixed_delta_seconds = 0.1

        # # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为50
        weather = carla.WeatherParameters(
            cloudiness=0.0,  # 0-100  0 是晴朗的天空，100 是完全阴天
            precipitation=0.0,  # 0 表示没有下雨，100 表示大雨
            sun_altitude_angle=90.0)  # 90 是中午，-90 是午夜
        world.set_weather(weather)

        # ===========================通过蓝图库模糊搜索指定的车辆模型
        blueprint_library = world.get_blueprint_library()

        # ===========================实例化传感器模型============================
        # =======================================传感器==================================
        lidar_bp_2_1 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        lidar_bp_2_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_2_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_28 = blueprint_library.find('sensor.lidar.ray_cast')

        # 设置传感器的参数
        atmosphere_attenuation_rate = '0.004'
        dropoff_general_rate = '0.0'
        dropoff_intensity_limit = '0.0'
        dropoff_zero_intensity = '0.0'
        noise_seed = '0.0'
        noise_stddev = '0.0'

        lidar_bp_2_1.set_attribute('channels', '2')
        lidar_bp_2_1.set_attribute('upper_fov', '-1.5')
        lidar_bp_2_1.set_attribute('lower_fov', '-1.7')
        lidar_bp_2_1.set_attribute('points_per_second', '36000')
        lidar_bp_2_1.set_attribute('range', '200')
        lidar_bp_2_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_28.set_attribute('channels', '28')
        lidar_bp_28.set_attribute('upper_fov', '-1.9')
        lidar_bp_28.set_attribute('lower_fov', '-4.6')
        lidar_bp_28.set_attribute('points_per_second', '504000')
        lidar_bp_28.set_attribute('range', '200')
        lidar_bp_28.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_28.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_28.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_28.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_28.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_28.set_attribute('noise_seed', noise_seed)
        lidar_bp_28.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_6_1.set_attribute('channels', '6')
        lidar_bp_6_1.set_attribute('upper_fov', '-4.8')
        lidar_bp_6_1.set_attribute('lower_fov', '-5.8')
        lidar_bp_6_1.set_attribute('points_per_second', '108000')
        lidar_bp_6_1.set_attribute('range', '200')
        lidar_bp_6_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_1.set_attribute('channels', '4')
        lidar_bp_4_1.set_attribute('upper_fov', '-6.1')
        lidar_bp_4_1.set_attribute('lower_fov', '-7')
        lidar_bp_4_1.set_attribute('points_per_second', '72000')
        lidar_bp_4_1.set_attribute('range', '200')
        lidar_bp_4_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_1.set_attribute('noise_stddev', noise_stddev)
        
        lidar_bp_4_2.set_attribute('channels', '4')
        lidar_bp_4_2.set_attribute('upper_fov', '-7.4')
        lidar_bp_4_2.set_attribute('lower_fov', '-8.6')
        lidar_bp_4_2.set_attribute('points_per_second', '72000')
        lidar_bp_4_2.set_attribute('range', '200')
        lidar_bp_4_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_3.set_attribute('channels', '4')
        lidar_bp_4_3.set_attribute('upper_fov', '-9.2')
        lidar_bp_4_3.set_attribute('lower_fov', '-11')
        lidar_bp_4_3.set_attribute('points_per_second', '72000')
        lidar_bp_4_3.set_attribute('range', '200')
        lidar_bp_4_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_3.set_attribute('noise_stddev', noise_stddev)  

        lidar_bp_6_2.set_attribute('channels', '6')
        lidar_bp_6_2.set_attribute('upper_fov', '-12')
        lidar_bp_6_2.set_attribute('lower_fov', '-17')
        lidar_bp_6_2.set_attribute('points_per_second', '108000')
        lidar_bp_6_2.set_attribute('range', '200')
        lidar_bp_6_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_2.set_attribute('noise_stddev', noise_stddev)
    
        lidar_bp_6_3.set_attribute('channels', '6')
        lidar_bp_6_3.set_attribute('upper_fov', '-19')
        lidar_bp_6_3.set_attribute('lower_fov', '-29')
        lidar_bp_6_3.set_attribute('points_per_second', '108000')
        lidar_bp_6_3.set_attribute('range', '200')
        lidar_bp_6_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_3.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_2.set_attribute('channels', '2')
        lidar_bp_2_2.set_attribute('upper_fov', '-32')
        lidar_bp_2_2.set_attribute('lower_fov', '-35')
        lidar_bp_2_2.set_attribute('points_per_second', '36000')
        lidar_bp_2_2.set_attribute('range', '200')
        lidar_bp_2_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_3.set_attribute('channels', '2')
        lidar_bp_2_3.set_attribute('upper_fov', '-38')
        lidar_bp_2_3.set_attribute('lower_fov', '-42')
        lidar_bp_2_3.set_attribute('points_per_second', '36000')
        lidar_bp_2_3.set_attribute('range', '200')
        lidar_bp_2_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_3.set_attribute('noise_stddev', noise_stddev)

        lidar_2_1 = world.spawn_actor(lidar_bp_2_1, lidar_trans, attach_to=None)
        lidar_2_2 = world.spawn_actor(lidar_bp_2_2, lidar_trans, attach_to=None)
        lidar_2_3 = world.spawn_actor(lidar_bp_2_3, lidar_trans, attach_to=None)
        lidar_4_1 = world.spawn_actor(lidar_bp_4_1, lidar_trans, attach_to=None)
        lidar_4_2 = world.spawn_actor(lidar_bp_4_2, lidar_trans, attach_to=None)
        lidar_4_3 = world.spawn_actor(lidar_bp_4_3, lidar_trans, attach_to=None)
        lidar_6_1 = world.spawn_actor(lidar_bp_6_1, lidar_trans, attach_to=None)
        lidar_6_2 = world.spawn_actor(lidar_bp_6_2, lidar_trans, attach_to=None)
        lidar_6_3 = world.spawn_actor(lidar_bp_6_3, lidar_trans, attach_to=None)
        lidar_28 = world.spawn_actor(lidar_bp_28, lidar_trans, attach_to=None)

        lidar_2_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_1"))
        lidar_2_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_2"))
        lidar_2_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_3"))
        lidar_4_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_1"))
        lidar_4_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_2"))
        lidar_4_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_3"))
        lidar_6_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_1"))
        lidar_6_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_2"))
        lidar_6_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_3"))
        lidar_28.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_28"))
        
        sensor_list.append(lidar_2_1)
        sensor_list.append(lidar_2_2)
        sensor_list.append(lidar_2_3)
        sensor_list.append(lidar_4_1)
        sensor_list.append(lidar_4_2)
        sensor_list.append(lidar_4_3)
        sensor_list.append(lidar_6_1)
        sensor_list.append(lidar_6_2)
        sensor_list.append(lidar_6_3)
        sensor_list.append(lidar_28)

        with open(realdata) as sim_file:
            sim_data_dic = json.load(sim_file)
            print(type(sim_data_dic))
        real_iterator = sim_data_dic.__iter__()

        if SHOW:
            point_cloud = o3d.geometry.PointCloud()
            vis = o3d.visualization.Visualizer()
            vis.create_window(window_name='Wanji 64line Lidar', width=600, height=400, left=300, top=150, visible=True)
            vis.add_geometry(point_cloud)

            render_option = vis.get_render_option()
            render_option.point_size = 1.0
            render_option.background_color = np.asarray([0, 0, 0])  # 颜色 0为黑；1为白
            to_reset_view_point = True

        while True:
            world.tick()
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            w_timestamp = get_time_stamp(p_timestamp)
            startt = get_time_stamp(time.time())
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, w_timestamp))

            try:
                splicing = []
                try:
                    timestep = real_iterator.__next__()
                except StopIteration:
                    break
                nowids = list(sim_data_dic[timestep].keys())
                print(len(nowids))
                
                errorids = []

                #添加交通流
                for k, v in sim_data_dic[timestep].items():
                    loc = v['Location']
                    rot = v['Rotation'] 
                    trans = Transform(carla.Location(loc[0], loc[1], loc[2]),
                                      carla.Rotation(rot[0], rot[1], rot[2]))
                    vehtype = v['type']
                    if k not in spawed_ids.keys():
                        bp1 = world.get_blueprint_library().find(vehtype)
                        try:
                            bp1.set_attribute('color', '0,0,0')
                        except:
                            pass
                        else:
                            pass

                        batch = [
                                carla.command.SpawnActor(bp1, trans).then(
                                    carla.command.SetSimulatePhysics(carla.command.FutureActor, True))
                            ]
                        response = client.apply_batch_sync(batch, False)[0]
                    
                        if response.error:
                            errorids.append(k)
                        else:
                            spawed_ids[k] = response.actor_id
                            vehicles_id_list.append(response.actor_id)
                        # if vehtype == 'walker.pedestrian.0013':
                        #     batch = [
                        #         carla.command.SpawnActor(bp1, trans).then(
                        #             carla.command.SetSimulatePhysics(carla.command.FutureActor, False))
                        #     ]
                        #     response = client.apply_batch_sync(batch, False)[0]
                        #     if response.error:
                        #         errorids.append(k)
                        #     else:
                        #         spawed_ids[k] = response.actor_id
                        #     # wid = walkresults.actor_id
                        #     # controlbatch = [carla.command.SpawnActor(walker_controller_bp, carla.Transform(), wid)]
                        #     # controlresults = client.apply_batch_sync(controlbatch, False)[0]
                        #     # cid = controlresults.actor_id
                        #     # cactor = world.get_actor(cid)
                        #     # cactor.start()
                        #     # spawed_ids[k] = cid
                        # else:
                        #     batch = [
                        #         carla.command.SpawnActor(bp1, trans).then(
                        #             carla.command.SetSimulatePhysics(carla.command.FutureActor, False))
                        #     ]
                        #     response = client.apply_batch_sync(batch, False)[0]
                        #     # spawed_ids[k] = response.actor_id
                        #     if response.error:
                        #         errorids.append(k)
                        #     else:
                        #         spawed_ids[k] = response.actor_id
                        #     #     logging.error('Spawn carla actor failed. %s', response.error)
                        #     #     return INVALID_ACTOR_ID
                    else:
                        # 更新当前帧位姿信息
                        if vehtype == 'walker.pedestrian.0013':
                            cid = spawed_ids[k]
                            cactor = world.get_actor(cid)
                            if Correction_yaw:
                                v_waypoint = world.get_map().get_waypoint(
                                    cactor.get_location(), project_to_road=True, 
                                    lane_type=(carla.LaneType.Driving))
                                yaw = v_waypoint.transform.rotation.yaw
                                inside_junction = v_waypoint.is_junction
                                if inside_junction:
                                    dx = trans.location.x - cactor.get_location().x
                                    dy = trans.location.y - cactor.get_location().y
                                    if dx != 0.0:
                                        yaw = math.atan2(dy, dx) * 180.0 / math.pi
                                    else:
                                        yaw = cactor.get_transform().rotation.yaw
                                trans = Transform(carla.Location(loc[0], loc[1], loc[2]),
                                        carla.Rotation(rot[0], yaw, rot[2]))

                            cactor.set_transform(trans)

                            # cactor.go_to_location(trans.location)
                            # cactor.set_max_speed(float(walker_speed))
                        else:
                            carlaid = spawed_ids[k]
                            vehicle = world.get_actor(carlaid)
                            if Correction_yaw:
                                # 获取车辆所在位置附近的道路点
                                v_waypoint = world.get_map().get_waypoint(
                                    vehicle.get_location(), project_to_road=True, 
                                    lane_type=(carla.LaneType.Driving))
                                yaw = v_waypoint.transform.rotation.yaw
                                inside_junction = v_waypoint.is_junction
                                # inside_junction = False
                                if inside_junction:
                                    dx = trans.location.x - vehicle.get_location().x
                                    dy = trans.location.y - vehicle.get_location().y
                                    if dx != 0.0:
                                        yaw = math.atan2(dy, dx) * 180.0 / math.pi
                                    else:
                                        yaw = vehicle.get_transform().rotation.yaw
                                trans = Transform(carla.Location(loc[0], loc[1], loc[2]),
                                        carla.Rotation(rot[0], yaw, rot[2]))

                            vehicle.set_transform(trans)
                            # if vehicle is not None:
                            #     vehicle.set_transform(trans)
                print("已有：", len(spawed_ids))

                destodyed_ids = [id for id in spawed_ids if id not in nowids]
                for did in destodyed_ids:
                    carlaid = spawed_ids[did]
                    vehicle = world.get_actor(carlaid)

                    if vehicle is not None:
                        vehicle.destroy()
                    del spawed_ids[did]
                print("更新：", len(nowids), "销毁:", len(destodyed_ids))
                print(errorids)
                print(timestep)

                for i in range(0, len(sensor_list)):
                    s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                    splicing.append(_parse_lidar_cb(s_data))
                if splicing:
                    concat_points = np.concatenate(splicing, axis=0)
                    pcd_path = save_pcd_path + "/" + str(w_timestamp) + ".pcd"
                    concat_points[:, 1] = [-p for p in concat_points[:, 1]]
                    if SAVE_PCD:
                        points2pcd(pcd_path, concat_points)

                    if SHOW:
                        points1 = np.array(concat_points)[:, :3]
                        colors1 = np.array(concat_points)[:, 3]
                        
                        point_cloud.points = o3d.utility.Vector3dVector(points1)
                        points_intensity = np.array(concat_points)[:, 3]  # intensity 
                        # print(points_intensity)
                        # r = 0
                        # b = 0
                        # g = 0
                        colors = []
                        for num in points_intensity:
                        #     intensity = num * 255
                        #     if intensity <= 33:
                        #         r = 0
                        #         b = 255
                        #         g = int(7.727 * intensity)
                        #     elif intensity >33 and intensity <= 66:
                        #         r = 0
                        #         b = int(255 - 7.727 * (intensity - 34))
                        #         g = 255
                        #     elif intensity > 66 and intensity <= 100:
                        #         r = int(7.727 * (intensity - 67))
                        #         b = 0
                        #         g = 255
                        #     elif  intensity > 100 and intensity <= 255:
                        #         r = 255
                        #         b = 0
                        #         g = int(255 - 7.727 * (intensity - 100)/4.697)
                            points_color = [0, 0, 0.8]
                            colors.append(points_color)
                        # print(colors)
                        # 根据 intensity 为点云着色，由于变化不大，这里设置为蓝色[0,0,0.8]
                        point_cloud.colors = o3d.utility.Vector3dVector(colors)   
                        vis.update_geometry(point_cloud)
                        if to_reset_view_point:
                            vis.reset_view_point(True)
                            to_reset_view_point = False
                        vis.poll_events()
                        vis.update_renderer()
                    ######################
      
            except Empty:
                print("    Some of the sensor information is missed")
            # time.sleep(0.1)

    finally:
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)

        for sensor in sensor_list:
            sensor.destroy()
        for actor in actor_list:
            if actor.is_alive:
                actor.destroy()
        client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_id_list]) #
        print("All cleaned up!")
        # time.sleep(0.1)


if __name__ == "__main__":
    word_path = r"/home/wanji/下载/carla_test/net_files/SDOpenDrive.xodr"
    sensor_type = ['rgb', 'lidar']
    sensor_queue = Queue()
    actor_list, sensor_list = [], []
    vehicles_id_list = []
    save_path = '/home/wanji/下载/carla_test/output/real_car_simu/'
    realdata = '/home/wanji/下载/carla_test/output/toveiw_51.json'
    # if os.path.exists(save_path):
    #     for fileList in os.walk(save_path):
    #         for name in fileList[2]:
    #             os.chmod(os.path.join(fileList[0], name), stat.S_IWRITE)
    #             os.remove(os.path.join(fileList[0], name))
    #     shutil.rmtree(save_path)
    spawed_ids = {}

    # os.mkdir(save_path)
    IM_WIDTH = 256 * 1
    IM_HEIGHT = 256 * 1
    SHOW = False # 是否使用open3d进行实时显示雷达点云
    SAVE_PCD = False # 是否在save_path路径下新建文件夹保存雷达点云pcd文件
    Correction_yaw = True  # 是否修正跟踪车辆的航向角，根据车辆所在位置最近的waypoint的航向角信息修正

    view_transform = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017), 
                            Rotation(pitch=0.1249, yaw=100.3, roll=0.0))
    
    lidar_trans = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017), 
                            Rotation(pitch=0.1249, yaw=260.3, roll=0.0))

    try:
        main()
    except KeyboardInterrupt:
        print(' - Exited by user.')
