#!/usr/bin/env python


from __future__ import print_function


# ==============================================================================
# -- find carla module ---------------------------------------------------------
# ==============================================================================





import carla
import numpy as np
import time
from queue import Queue, Empty
import cv2
import os
import copy
from carla import Transform, Rotation, Location
import shutil
import stat
import json
from carla import Map
from carla import Vector3D
import math
from carla import ColorConverter as cc
# import matplotlib.pyplot as plt
import open3d as o3d
# import threading

def points2pcd(PCD_FILE_PATH, points):
    # 存放路径
    # PCD_DIR_PATH = os.path.join(os.path.abspath('.'), 'pcd')
    # PCD_FILE_PATH = os.path.join(PCD_DIR_PATH, 'cache.pcd')
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)
    # 写文件句柄
    handle = open(PCD_FILE_PATH, 'a')
    # 得到点云点数
    point_num = points.shape[0]
    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')

    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()

# 毫米波雷达数据转换并保存为图片
# def points2plt(plt_path, points):
#     l = np.cos(points[:, 2]) * points[:, 3]
#     z = np.sin(points[:, 2]) * points[:, 3]
#     y = np.cos(points[:, 1]) * 1
#     x = np.sin(points[:, 1]) * 1
#     plt.figure("3D Scatter", facecolor="lightgray", figsize=(20, 20), dpi=80)
#     ax3d = plt.gca(projection="3d")
#     ax3d.scatter(x, y, z, s=10, cmap="jet", marker="o")
#     ax3d.view_init(elev=0, azim=-70)
#     plt.savefig(plt_path)


def get_time_stamp(ct):
    """
    :param ct: float时间
    :return: 带毫秒的格式化时间戳
    """
    # ct = time.time()
    # print(ct)
    local_time = time.localtime(ct)
    # data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
    data_head = time.strftime("%Y-%m-%d %H-%M-%S", local_time).split(' ')[-1]
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s-%03d" % (data_head, data_secs)
    # print(time_stamp, type(time_stamp))
    # stamp = ("".join(time_stamp.split()[0].split("-"))+"".join(time_stamp.split()[1].split(":"))).replace('.', '')
    # print(stamp)
    return time_stamp

def sensor_callback(sensor_data, sensor_queue, sensor_name):
    # Do stuff with the sensor_data data like save it to disk
    # Then you just need to add to the queue
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))

# modify from leaderboard
def _parse_lidar_cb(lidar_data):
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    return points

# modify from manual control
def _parse_image_cb(image):
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array

# def _parse_radar_cb(radar_data):
#     points = np.frombuffer(radar_data.raw_data, dtype=np.dtype('f4'))
#     points = copy.deepcopy(points)
#     points = np.reshape(points, (len(radar_data), 4))
#     return points

# modify from world on rail code
def lidar_to_bev(lidar, min_x=-24, max_x=24, min_y=-16, max_y=16, pixels_per_meter=4, hist_max_per_pixel=10):
    xbins = np.linspace(
        min_x, max_x + 1,
               (max_x - min_x) * pixels_per_meter + 1,
    )
    ybins = np.linspace(
        min_y, max_y + 1,
               (max_y - min_y) * pixels_per_meter + 1,
    )
    # Compute histogram of x and y coordinates of points.
    hist = np.histogramdd(lidar[..., :2], bins=(xbins, ybins))[0]
    # Clip histogram
    hist[hist > hist_max_per_pixel] = hist_max_per_pixel
    # Normalize histogram by the maximum number of points in a bin we care about.
    overhead_splat = hist / hist_max_per_pixel * 255.
    # Return splat in X x Y orientation, with X parallel to car axis, Y perp, both parallel to ground.
    return overhead_splat[::-1, :]


# 可视化数据
def visualize_data(rgb, lidars, text_args=(cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)):

    rgb_canvas = np.array(rgb[..., ::-1])
    # print(rgb_canvas.shape, rgb_canvas.size)
    canvas_list = []
    lidar_canvas = None
    if lidars is not None:
        for lidar in lidars:
            lidar_viz = lidar_to_bev(lidar).astype(np.uint8)
            lidar_viz = cv2.cvtColor(lidar_viz, cv2.COLOR_GRAY2RGB)
            canvas = cv2.resize(lidar_viz.astype(np.uint8), (rgb_canvas.shape[0], rgb_canvas.shape[0]))
            canvas_list.append(canvas)
        lidar_canvas = np.concatenate(canvas_list, axis=1)
    # cv2.putText(canvas, f'yaw angle: {imu_yaw:.3f}', (4, 10), *text_args)
    # cv2.putText(canvas, f'log: {gnss[0]:.3f} alt: {gnss[1]:.3f} brake: {gnss[2]:.3f}', (4, 20), *text_args)
    return lidar_canvas


def draw_waypoints(world, waypoints, road_id=None, life_time=50.0):
    """
    :param waypoints: 地图所有航点列表
    :param road_id: 目标路段id
    :param life_time: 高亮时间
    :return:
    """
    obj_waypoints = []

    for waypoint in waypoints:
        # if waypoint.road_id == road_id:
        if True:
            obj_waypoints.append(waypoint)
            world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False,
                                    color=carla.Color(r=0, g=255, b=0), life_time=life_time,
                                    persistent_lines=True)
    return obj_waypoints


def main():
    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间
    # with open(word_path) as od_file:
    #     data = od_file.read()
    # 使用自定义地图
    # world = client.generate_opendrive_world(data,
    #                                         carla.OpendriveGenerationParameters(
    #                                             wall_height=0))
    ## 加载carla自带地图，该地图列表可通过client.get_available_maps()查看
    # print(client.get_available_maps())  # 获取可用的地图列表（自带的地图，无自定义地图）
    # world = client.load_world('shangdiwr') 
    world = client.get_world()
    synchronous_master = False
    
    ##  显示车道中心点坐标
    # waypoints = world.get_map().generate_waypoints(0.2)
    # v_waypoints = []
    # for waypoint in waypoints:
    #     if waypoint.transform.location.x < 399 and waypoint.transform.location.x > 300:
    #         if waypoint.transform.location.y < -600 and waypoint.transform.location.y > -699:
    #             v_waypoints.append(waypoint)

    # obj_waypoints = draw_waypoints(world, v_waypoints, life_time=100)


    try:
        # =================================调整路网中的环境可见度======================================
        # 使得道路中自动生成的交通灯为不可见状态
        env_objs1 = world.get_environment_objects(carla.CityObjectLabel.TrafficLight)
        env_objs2 = world.get_environment_objects(carla.CityObjectLabel.Poles)
    
        env_list = []
        for env_obj in env_objs1:
            env_list.append(env_obj.id)
        for env_obj in env_objs2:
            env_list.append(env_obj.id)
        world.enable_environment_objects(env_list, False)


        # ==================================新建pcd存储文件夹==========================================
        
        pt = world.get_snapshot().timestamp.platform_timestamp
        ptime = int(pt)
        if SAVE_PCD:
            save_pcd_path = os.path.join(save_path, str(ptime))
            os.makedirs(save_pcd_path)

        if RECORDER:
            # 存储recorder记录文件
            client.start_recorder(recorder_path + str(ptime) + '.log', True)


        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        # world.debug.draw_string(vehicle_trans1.location, 'M', draw_shadow=False,
        #                         color=carla.Color(r=0, g=255, b=0), life_time=1500, persistent_lines=True)
        spectator.set_transform(view_transform)

        # world.debug.draw_string(p.location, 'X', draw_shadow=False,
        #                         color=carla.Color(r=255, g=0, b=0), life_time=1500, persistent_lines=True)


        # ============================== 修改世界设置:更新步长和同步模式===================================

        # # 让车辆按照交通规则在世界中行驶
        #设置TM接入server的端口，默认8000
        traffic_manager = client.get_trafficmanager(8000)
        #TM里的每一辆车至少保持2米的安全距离
        traffic_manager.set_global_distance_to_leading_vehicle(2.0)
        #TM里每一辆车都是默认速度的40%（默认所有车辆限速30km/h）
        traffic_manager.global_percentage_speed_difference(120.0)
        if True:
            #设置TM同步模式
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not  settings.synchronous_mode:
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                print("开启异步模式")
                synchronous_master = False
                traffic_manager.set_synchronous_mode(False)
                settings.synchronous_mode = False
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)

        # =============================天气=================================

        # # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为90
        weather = carla.WeatherParameters(
            cloudiness=0.0,  # 0-100  0 是晴朗的天空，100 是完全阴天
            precipitation=0.0,  # 0 表示没有下雨，100 表示大雨
            precipitation_deposits=0.0, # 0 表示道路上没有水坑，100 表示道路完全被雨水覆盖
            wind_intensity=0.0, # 0 表示平静，100 表示强风，风会影响雨向和树叶
            sun_azimuth_angle=0.0, # 太阳方位角，0～360
            sun_altitude_angle=90.0,  # 太阳高度角，90 是中午，-90 是午夜
            fog_density=0.0, # 0～100 表示雾的浓度或厚度，仅影响RGB相机传感器
            fog_distance=0.0, # 雾开始的距离，单位为米
            wetness=0.0, # 0～100 表示道路湿度百分比，仅影响RGB相机传感器
            fog_falloff=0.0, # 雾的密度，0至无穷大，0 表示雾比空气轻，覆盖整个场景，1表示与空气一样，覆盖正常大小的建筑物
            scattering_intensity=0.0, # 控制光线对雾的穿透程度
            mie_scattering_scale=0.0, # 控制光线与花粉或空气等大颗粒的相互作用，导致天气朦胧，光源周围有光晕，0表示无影响
            rayleigh_scattering_scale=0.0331, # 控制光与空气分子等小粒子的相互作用，取决于光波长，导致白天蓝天或晚上红天
            )
        world.set_weather(weather)

        # ======================通过蓝图库模糊搜索指定的车辆模型===============
        blueprint_library = world.get_blueprint_library()

        # =============================车辆=================================

        # wbp = world.get_blueprint_library().find('walker.pedestrian.0004')
        # wbp.set_attribute('speed', '0.0')
        # walker = world.spawn_actor(wbp, walker_trans)
        # actor_list.append(walker)

        #添加车辆
        # bp1 = world.get_blueprint_library().find('vehicle.lincoln.mkz_2017')
        bp1 = blueprint_library.find('vehicle.lincoln.mkz_2017')
        bp2 = blueprint_library.find('vehicle.lincoln.mkz_2017')
        bp3 = blueprint_library.find('vehicle.lincoln.mkz_2017')
        bp4 = blueprint_library.find('vehicle.lincoln.mkz_2017')
        bp5 = blueprint_library.find('vehicle.lincoln.mkz_2017')
        bp6 = blueprint_library.find('vehicle.lincoln.mkz_2017')
        bp7 = blueprint_library.find('vehicle.lincoln.mkz_2017')
        bp_static = blueprint_library.find('static.prop.streetbarrier')

        bp1.set_attribute('color', '0, 0, 0')
        bp2.set_attribute('color', '0, 0, 0')
        bp3.set_attribute('color', '0, 0, 0')
        bp4.set_attribute('color', '0, 0, 0')
        bp5.set_attribute('color', '0, 0, 0')
        bp6.set_attribute('color', '0, 0, 0')
        bp7.set_attribute('color', '0, 0, 0')

        ego_vehicle1 = world.spawn_actor(bp1, vehicle_trans1)
        ego_vehicle2 = world.spawn_actor(bp2, vehicle_trans2)
        # street = world.spawn_actor(bp_static, vehicle_trans3)
        # ego_vehicle3 = world.spawn_actor(bp3, vehicle_trans3)
        # ego_vehicle4 = world.spawn_actor(bp4, vehicle_trans4)
        # ego_vehicle5 = world.spawn_actor(bp5, vehicle_trans5)
        # ego_vehicle6 = world.spawn_actor(bp6, vehicle_trans6)
        # ego_vehicle7 = world.spawn_actor(bp7, vehicle_trans7)
        print("单独放置车辆")
        actor_list.append(ego_vehicle1)
        actor_list.append(ego_vehicle2)
        # actor_list.append(street)
        # a_list = world.get_actors()
        # for a in a_list:
        #     print (a.id)
        # actor_list.append(ego_vehicle3)
        # actor_list.append(ego_vehicle4)
        # actor_list.append(ego_vehicle5)
        # actor_list.append(ego_vehicle6)
        # actor_list.append(ego_vehicle7)

        # #获取车辆boundingbox的长宽高信息
        # length1 = ego_vehicle1.bounding_box.extent.x * 2.0
        # width1 = ego_vehicle1.bounding_box.extent.y * 2.0
        # higth1 = ego_vehicle1.bounding_box.extent.z * 2.0
        # length2 = ego_vehicle2.bounding_box.extent.x * 2.0
        # width2 = ego_vehicle2.bounding_box.extent.y * 2.0
        # higth2 = ego_vehicle2.bounding_box.extent.z * 2.0
        # print("\n1: l:{0}, w:{1}, h:{2}, 2: l:{3}, w:{4}, h:{5}".format(
        #     length1, width1, higth1, length2, width2, higth2))

        #车辆控制
        # ego_vehicle1.set_autopilot(True)
        ego_vehicle1.apply_control(carla.VehicleControl(hand_brake = True))
        ego_vehicle2.apply_control(carla.VehicleControl(hand_brake = True))
        # ego_vehicle3.apply_control(carla.VehicleControl(hand_brake = True))
        # ego_vehicle4.apply_control(carla.VehicleControl(hand_brake = True))
        # ego_vehicle5.apply_control(carla.VehicleControl(hand_brake = True))
        # ego_vehicle6.apply_control(carla.VehicleControl(hand_brake = True))
        # ego_vehicle7.apply_control(carla.VehicleControl(hand_brake = True))

        # 车辆物理属性设置（转距等）
        pyh_control = ego_vehicle2.get_physics_control()
        pyh_control.mass *= 0.2
        pyh_control.center_of_mass = carla.Vector3D(0.0, 0.0,-1.8)

        ego_vehicle2.apply_physics_control(pyh_control)
        
        pyh_control1 = ego_vehicle1.get_physics_control()
        pyh_control1.mass *= 2
        pyh_control1.center_of_mass = carla.Vector3D(-1.0, 0.0,0.0)

        ego_vehicle1.apply_physics_control(pyh_control1)


        # ===========================实例化传感器模型============================
        # =======================================传感器==================================

        lidar_bp_2_1 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        lidar_bp_2_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_2_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_4_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_1 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_2 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_6_3 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_28 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_64 = blueprint_library.find('sensor.lidar.ray_cast')
        lidar_bp_321 = blueprint_library.find('sensor.lidar.ray_cast')

        lidar_bp_322 = blueprint_library.find('sensor.lidar.ray_cast')

        cam_bp = blueprint_library.find('sensor.camera.rgb')

        # radar_bp = blueprint_library.find('sensor.other.radar')  # 毫米波

        # 设置传感器的参数
        cam_bp.set_attribute("image_size_x", "{}".format(IM_WIDTH))
        cam_bp.set_attribute("image_size_y", "{}".format(IM_HEIGHT))
        cam_bp.set_attribute("fov", "90")
        cam_bp.set_attribute('sensor_tick', '0.1')

        atmosphere_attenuation_rate = '0.08'
        dropoff_general_rate = '0.0'
        dropoff_intensity_limit = '0.0'
        dropoff_zero_intensity = '0.0'
        noise_seed = '0.0'
        noise_stddev = '0.0'

        lidar_bp_64.set_attribute('channels', '64')
        lidar_bp_64.set_attribute('upper_fov', '15')
        lidar_bp_64.set_attribute('lower_fov', '-15')
        lidar_bp_64.set_attribute('points_per_second', '1152000')
        lidar_bp_64.set_attribute('range', '200')
        lidar_bp_64.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_64.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_64.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_64.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_64.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_64.set_attribute('noise_seed', noise_seed)
        lidar_bp_64.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_321.set_attribute('channels', '32')
        lidar_bp_321.set_attribute('upper_fov', '25')
        lidar_bp_321.set_attribute('lower_fov', '0')
        lidar_bp_321.set_attribute('points_per_second', '576000')
        lidar_bp_321.set_attribute('range', '200')
        lidar_bp_321.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_321.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_321.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_321.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_321.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_321.set_attribute('noise_seed', noise_seed)
        lidar_bp_321.set_attribute('noise_stddev', noise_stddev)


        lidar_bp_322.set_attribute('channels', '32')
        lidar_bp_322.set_attribute('upper_fov', '0')
        lidar_bp_322.set_attribute('lower_fov', '-10')
        lidar_bp_322.set_attribute('points_per_second', '576000')
        lidar_bp_322.set_attribute('range', '200')
        lidar_bp_322.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_322.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_322.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_322.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_322.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_322.set_attribute('noise_seed', noise_seed)
        lidar_bp_322.set_attribute('noise_stddev', noise_stddev)



        lidar_bp_2_1.set_attribute('channels', '2')
        lidar_bp_2_1.set_attribute('upper_fov', '-1.5')
        lidar_bp_2_1.set_attribute('lower_fov', '-1.7')
        lidar_bp_2_1.set_attribute('points_per_second', '36000')
        lidar_bp_2_1.set_attribute('range', '200')
        lidar_bp_2_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_28.set_attribute('channels', '28')
        lidar_bp_28.set_attribute('upper_fov', '-1.9')
        lidar_bp_28.set_attribute('lower_fov', '-4.6')
        lidar_bp_28.set_attribute('points_per_second', '504000')
        lidar_bp_28.set_attribute('range', '200')
        lidar_bp_28.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_28.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_28.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_28.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_28.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_28.set_attribute('noise_seed', noise_seed)
        lidar_bp_28.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_6_1.set_attribute('channels', '6')
        lidar_bp_6_1.set_attribute('upper_fov', '-4.8')
        lidar_bp_6_1.set_attribute('lower_fov', '-5.8')
        lidar_bp_6_1.set_attribute('points_per_second', '108000')
        lidar_bp_6_1.set_attribute('range', '200')
        lidar_bp_6_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_1.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_1.set_attribute('channels', '4')
        lidar_bp_4_1.set_attribute('upper_fov', '-6.1')
        lidar_bp_4_1.set_attribute('lower_fov', '-7')
        lidar_bp_4_1.set_attribute('points_per_second', '72000')
        lidar_bp_4_1.set_attribute('range', '200')
        lidar_bp_4_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_1.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_1.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_1.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_1.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_1.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_1.set_attribute('noise_stddev', noise_stddev)
        
        lidar_bp_4_2.set_attribute('channels', '4')
        lidar_bp_4_2.set_attribute('upper_fov', '-7.4')
        lidar_bp_4_2.set_attribute('lower_fov', '-8.6')
        lidar_bp_4_2.set_attribute('points_per_second', '72000')
        lidar_bp_4_2.set_attribute('range', '200')
        lidar_bp_4_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_4_3.set_attribute('channels', '4')
        lidar_bp_4_3.set_attribute('upper_fov', '-9.2')
        lidar_bp_4_3.set_attribute('lower_fov', '-11')
        lidar_bp_4_3.set_attribute('points_per_second', '72000')
        lidar_bp_4_3.set_attribute('range', '200')
        lidar_bp_4_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_4_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_4_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_4_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_4_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_4_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_4_3.set_attribute('noise_stddev', noise_stddev)  

        lidar_bp_6_2.set_attribute('channels', '6')
        lidar_bp_6_2.set_attribute('upper_fov', '-12')
        lidar_bp_6_2.set_attribute('lower_fov', '-17')
        lidar_bp_6_2.set_attribute('points_per_second', '108000')
        lidar_bp_6_2.set_attribute('range', '200')
        lidar_bp_6_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_2.set_attribute('noise_stddev', noise_stddev)
    
        lidar_bp_6_3.set_attribute('channels', '6')
        lidar_bp_6_3.set_attribute('upper_fov', '-19')
        lidar_bp_6_3.set_attribute('lower_fov', '-29')
        lidar_bp_6_3.set_attribute('points_per_second', '108000')
        lidar_bp_6_3.set_attribute('range', '200')
        lidar_bp_6_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_6_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_6_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_6_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_6_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_6_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_6_3.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_2.set_attribute('channels', '2')
        lidar_bp_2_2.set_attribute('upper_fov', '-32')
        lidar_bp_2_2.set_attribute('lower_fov', '-35')
        lidar_bp_2_2.set_attribute('points_per_second', '36000')
        lidar_bp_2_2.set_attribute('range', '200')
        lidar_bp_2_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_2.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_2.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_2.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_2.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_2.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_2.set_attribute('noise_stddev', noise_stddev)

        lidar_bp_2_3.set_attribute('channels', '2')
        lidar_bp_2_3.set_attribute('upper_fov', '-38')
        lidar_bp_2_3.set_attribute('lower_fov', '-42')
        lidar_bp_2_3.set_attribute('points_per_second', '36000')
        lidar_bp_2_3.set_attribute('range', '200')
        lidar_bp_2_3.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        lidar_bp_2_3.set_attribute('atmosphere_attenuation_rate', atmosphere_attenuation_rate)
        lidar_bp_2_3.set_attribute('dropoff_general_rate', dropoff_general_rate)
        lidar_bp_2_3.set_attribute('dropoff_intensity_limit', dropoff_intensity_limit)
        lidar_bp_2_3.set_attribute('dropoff_zero_intensity', dropoff_zero_intensity)
        lidar_bp_2_3.set_attribute('noise_seed', noise_seed)
        lidar_bp_2_3.set_attribute('noise_stddev', noise_stddev)

        # radar_bp.set_attribute('horizontal_fov',str(35))
        # radar_bp.set_attribute('points_per_second','10000')
        # radar_bp.set_attribute('vertical_fov',str(20))
        # cam01 = world.spawn_actor(cam_bp, camera_trans,  attach_to=None)

        # lidar_2_1 = world.spawn_actor(lidar_bp_2_1, lidar_trans, attach_to=None)
        # lidar_2_2 = world.spawn_actor(lidar_bp_2_2, lidar_trans, attach_to=None)
        # lidar_2_3 = world.spawn_actor(lidar_bp_2_3, lidar_trans, attach_to=None)
        # lidar_4_1 = world.spawn_actor(lidar_bp_4_1, lidar_trans, attach_to=None)
        # lidar_4_2 = world.spawn_actor(lidar_bp_4_2, lidar_trans, attach_to=None)
        # lidar_4_3 = world.spawn_actor(lidar_bp_4_3, lidar_trans, attach_to=None)
        # lidar_6_1 = world.spawn_actor(lidar_bp_6_1, lidar_trans, attach_to=None)
        # lidar_6_2 = world.spawn_actor(lidar_bp_6_2, lidar_trans, attach_to=None)
        # lidar_6_3 = world.spawn_actor(lidar_bp_6_3, lidar_trans, attach_to=None)
        # lidar_28 = world.spawn_actor(lidar_bp_28, lidar_trans, attach_to=None)
        # lidar_64 = world.spawn_actor(lidar_bp_64, v_lidar_trans, attach_to=ego_vehicle2)
        lidar_321 = world.spawn_actor(lidar_bp_321, v_lidar_trans, attach_to=ego_vehicle2)
        lidar_322 = world.spawn_actor(lidar_bp_322, v_lidar_trans, attach_to=ego_vehicle2)
        # radar = world.spawn_actor(radar_bp,radar_trans, attach_to=None)

        # cam01.listen(lambda data: sensor_callback(data, sensor_queue, "rgb"))
        # lidar_2_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_1"))
        # lidar_2_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_2"))
        # lidar_2_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_2_3"))
        # lidar_4_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_1"))
        # lidar_4_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_2"))
        # lidar_4_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_3"))
        # lidar_6_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_1"))
        # lidar_6_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_2"))
        # lidar_6_3.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_6_3"))
        # lidar_28.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_28"))
        # lidar_64.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_64"))
        lidar_321.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_321"))
        lidar_322.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_322"))
        # radar.listen(lambda data:sensor_callback(data, sensor_queue, "radar"))

        # sensor_list.append(cam01)
        # sensor_list.append(lidar_2_1)
        # sensor_list.append(lidar_2_2)
        # sensor_list.append(lidar_2_3)
        # sensor_list.append(lidar_4_1)
        # sensor_list.append(lidar_4_2)
        # sensor_list.append(lidar_4_3)
        # sensor_list.append(lidar_6_1)
        # sensor_list.append(lidar_6_2)
        # sensor_list.append(lidar_6_3)
        # sensor_list.append(lidar_28)
        # sensor_list.append(lidar_64)
        sensor_list.append(lidar_321)
        sensor_list.append(lidar_322)
        
        # sensor_list.append(radar)

        collion_flash = 0
        if SHOW:
            point_cloud = o3d.geometry.PointCloud()
            vis = o3d.visualization.Visualizer()
            # vis = o3d.visualization.VisualizerWithKeyCallback()
            vis.create_window(window_name='Wanji 64line Lidar', width=1000, height=500, left=0, top=700, visible=True)

            ctr = vis.get_view_control()
            vis.add_geometry(point_cloud)

            extrinsic = np.eye(4)
            W = 960
            H = 512 
            camera_angle_x = 0.8457078337669373
            focal = 0.5 * W / np.tan(0.5 * camera_angle_x)
            # cx=(W-1)/2而不是W/2 !!!!!
            intrinsic = o3d.cuda.pybind.camera.PinholeCameraIntrinsic(width=W, height=H, fx=focal, fy=focal, cx=(W-1) / 2,cy=(H-1) / 2)
            param = o3d.cuda.pybind.camera.PinholeCameraParameters()
            param.extrinsic = extrinsic
            param.intrinsic = intrinsic
            ctr.convert_from_pinhole_camera_parameters(param)
           
            
            # pcd_lock = threading.Lock()

            render_option = vis.get_render_option()
            render_option.point_size = 1.0
            render_option.background_color = np.asarray([0, 0, 0])  # 颜色 0为黑；1为白
            to_reset_view_point = True

            flag = False

        while True:
            # if flag:
            #  time.sleep(100)
            # flag = True
            # print(sensor_queue.qsize())
            world.tick()
            # time.sleep(2.0)
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            w_timestamp = get_time_stamp(p_timestamp)
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, w_timestamp))
        
            x_v1 = ego_vehicle1.get_location().x
            x_v2 = ego_vehicle2.get_location().x
            y_v1 = ego_vehicle1.get_location().y
            y_v2 = ego_vehicle2.get_location().y
            yaw_v1 = ego_vehicle1.get_transform().rotation.yaw
            yaw_v2 = ego_vehicle2.get_transform().rotation.yaw

            dis = math.sqrt((x_v1 - x_v2)**2 + (y_v1 - y_v2)**2)
            yawdelta = yaw_v1 - yaw_v2
            print(dis, yawdelta)

            # if abs(x_v2 - vehicle_trans2.get_location().x) > 10 or abs(y_v2 - vehicle_trans2.get_location().y)>10:
            #     ego_vehicle10 = world.spawn_actor(bp1, vehicle_trans2)
            #     actor_list.append(ego_vehicle10)
            # acc_x2 = ego_vehicle2.get_acceleration().x

            if abs(dis) > 5 and collion_flash == False:
                # ego_vehicle1.apply_control(carla.VehicleControl(throttle=0.5, steer=0.0))
                ego_vehicle1.apply_control(carla.VehicleControl(brake = 1.0, hand_brake = True))
                ego_vehicle2.apply_control(carla.VehicleControl(throttle=80.0, steer=0.0))
            # elif abs(dis) <= 15  and collion_flash == False:
            #     pyh_control = ego_vehicle2.get_physics_control()
            #     pyh_control.mass *= 0.5
            #     pyh_control.center_of_mass = carla.Vector3D(1.0, 0.0, -1.0)

            #     ego_vehicle2.apply_physics_control(pyh_control)
            elif abs(dis) <= 5  and collion_flash == False:
                # ego_vehicle1.apply_control(carla.VehicleControl(brake = 1.0))
                ego_vehicle1.apply_control(carla.VehicleControl(brake = 1.0, hand_brake = True))
                ego_vehicle2.apply_control(carla.VehicleControl(throttle=50.0, steer= 0.0))
                if abs(dis) <= 5  and collion_flash == False:
                    collion_flash = True
                   
                    
                    # actor_v2 = world.get_actor(a_list[1])
                    # forward_vector = carla.Vector3D(0.0, 10000.0, 0.0)
                    # actor_v2.add_force(forward_vector)
                    # 定义侧向冲击力参数
                    # impulse_magnitude = 500  # 冲击力大小（单位：N）
                    # impulse_duration = 1  # 冲击力持续时间（单位：秒）

                    # # 计算冲击力的方向向量
                    # forward_vector = ego_vehicle2.get_transform().get_forward_vector()
                    # right_vector = ego_vehicle2.get_transform().get_right_vector()
                    # impulse_direction = right_vector * impulse_magnitude

                    # # 施加侧向冲击力
                    # ego_vehicle2.apply_force(impulse_direction, carla.Location(), False)

                    # # 等待冲击力持续时间
                    # time.sleep(impulse_duration)

                    # ego_vehicle2.apply_force(-impulse_direction, carla.Location(), False)

            if collion_flash:
                ego_vehicle1.apply_control(carla.VehicleControl(brake = 1.0, hand_brake = True))
                ego_vehicle2.apply_control(carla.VehicleControl(throttle=30.0, steer= 0.0))

            try:
                rgbs = []
                lidars = []
                splicing = []
                
                #传感器数据保存
                for i in range(0, len(sensor_list)):
                    s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                    if sensor_type == 'rgb':
                        rgbs.append(_parse_image_cb(s_data))
                    # elif sensor_type == 'lidar_64':
                    #     lidars.append(_parse_lidar_cb(s_data))
                    #     print('lidar')
                    else:
                        splicing.append(_parse_lidar_cb(s_data))

                if splicing:
                    concat_points = np.concatenate(splicing, axis=0)
                    # concat_points = lidars
                    concat_points[:, 1] = [-p for p in concat_points[:, 1]] #  将生成的点云Y值进行反转 由于carla的坐标系中Y值是反的
                    if SAVE_PCD:
                        pcd_path = save_pcd_path + "/" + str(w_timestamp) + ".pcd"
                        

                        # points2pcd(pcd_path, concat_points)
                    
                # 传感器数据可视化
                # rgb = np.concatenate(rgbs, axis=1)[..., :3]
                # lidarsplice = visualize_data([concat_points])
                # cv2.imshow('rgb_vizs', rgb)
                # cv2.imshow('lidar_splice', lidarsplice)
                # cv2.waitKey(1)
                if SHOW:
                        points1 = np.array(concat_points)[:, :3]
                        colors1 = np.array(concat_points)[:, 3]
                        
                        point_cloud.points = o3d.utility.Vector3dVector(points1)
                        points_intensity = np.array(concat_points)[:, 3]  # intensity 
                        # print(points_intensity)
                        # r = 0
                        # b = 0
                        # g = 0
                        colors = []
                        for num in points_intensity:
                            # print(num)
                            intensity = num * 255
                            if intensity <= 33:
                                r = 0
                                b = 255
                                g = int(7.727 * intensity)
                            elif intensity >33 and intensity <= 66:
                                r = 0
                                b = int(255 - 7.727 * (intensity - 34))
                                g = 255
                            elif intensity > 66 and intensity <= 100:
                                r = int(7.727 * (intensity - 67))
                                b = 0
                                g = 255
                            elif  intensity > 100 and intensity <= 255:
                                r = 255
                                b = 0
                                g = int(255 - 7.727 * (intensity - 100)/4.697)
                            points_color = [r, g, b]
                            # points_color = [0, 0, 0.8]
                            colors.append(points_color)
                        # print(colors)
                        # 根据 intensity 为点云着色，由于变化不大，这里设置为蓝色[0,0,0.8]
                        point_cloud.colors = o3d.utility.Vector3dVector(colors)   
                        render_opt = vis.get_render_option()
                        render_opt.point_size = 1
                        render_opt.background_color = [0,0, 0]



                        
                        

                        box = o3d.geometry.AxisAlignedBoundingBox([-60,-60,-10],[60,60,20])
                        pcd_box = point_cloud.crop(box)
                        vis.update_geometry(point_cloud)
                        if to_reset_view_point:
                            vis.reset_view_point(True)
                            to_reset_view_point = False
                        vis.poll_events()
                        vis.update_renderer()

            except Empty:
                print("[Warning] Some of the sensor information is missed")
            # time.sleep(2.0)

    finally:
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)
        else:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)

        for sensor in sensor_list:
            sensor.destroy()
        for actor in actor_list:
            if actor.is_alive:
                actor.destroy()
        # client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
        print("All cleaned up!")
        if RECORDER:
            #关闭recorder
            client.stop_recorder()

if __name__ == "__main__":
    word_path = r"/home/wanji/下载/carla_test/net_files/SDOpenDrive.xodr"
    sensor_type = ['rgb', 'lidar_64']
    sensor_queue = Queue()
    actor_list, sensor_list = [], []
    save_path = '/home/wanji/下载/carla_test/output/real_car_simu/'
    save_ply_path = '/home/wanji/carla_output/PLY/'
    recorder_path = '/home/wanji/carla/recorder_files/'
    # realdata = '/home/wanji/下载/carla_test/output/toveiw.json'
    # if os.path.exists(save_path):
    #     for fileList in os.walk(save_path):
    #         for name in fileList[2]:
    #             os.chmod(os.path.join(fileList[0], name), stat.S_IWRITE)
    #             os.remove(os.path.join(fileList[0], name))
    #     shutil.rmtree(save_path)

    # os.mkdir(save_path)
    IM_WIDTH = 256 * 1
    IM_HEIGHT = 256 * 1

    RECORDER = False  #  是否需要回放
    SAVE_PCD = False  #  是否需要保存lidar点云的pcd格式数据
    SHOW = True # 可视化展示
    # view_transform = Transform(Location(x=-40.0, y=100.0, z=4.0), Rotation(pitch=-45.0, yaw=180.0, roll=0.0))
    
    # 与51simone相比，航向角yaw_51 =  360 - yaw_carla   pitch=-0.1249, yaw=260.3, roll=-0.3431
    lidar_trans = Transform(Location(x=-35.0, y=0.0, z=4.0), 
                            Rotation(pitch=0.0, yaw=270.0, roll=0.0))
    camera_trans = Transform(Location(x=-52.3, y=87.3, z=4.0), 
                            Rotation(pitch=-90.0, yaw=0.0, roll=0.0))
    # view_transform= lidar_trans
    v_lidar_trans = Transform(Location(x=1.5, y=0.0, z=2.5), 
                            Rotation(pitch=-0.01, yaw=90.0, roll=0.0))


    # 与51simone相比，航向角yaw_51 = 360 - yaw_carla
    vehicle_trans1 = Transform(Location(x=-54.3, y=19.0, z=0.2), 
                               Rotation(pitch=0.000000, yaw = 90, roll=0.000000))
    # vehicle_trans1 = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=36.559), 
    #                            Rotation(pitch=0.000000, yaw=189.7, roll=0.000000))
    vehicle_trans2 = Transform(Location(x=-54.3, y=-20.0, z=0.2), 
                               Rotation(pitch=0.000000, yaw = 90, roll=0.000000))
    vehicle_trans3 =Transform(Location(x=-55.0, y=100, z=0.2), 
                               Rotation(pitch=0.000000, yaw = 75, roll=0.000000))
    vehicle_trans4 = Transform(Location(x=357.92793750425966, y=-677.8377235324622, z=36.68), 
                               Rotation(pitch=0.000000, yaw = 256, roll=0.000000))
    vehicle_trans5 = Transform(Location(x=352.49815245895985, y=-696.7494065737143, z=36.68), 
                               Rotation(pitch=0.000000, yaw = 256, roll=0.000000))
    vehicle_trans6 = Transform(Location(x=343.8499634794923, y=-733.7018024279948, z=36.68), 
                               Rotation(pitch=0.000000, yaw = 254, roll=0.000000))
    
    vehicle_trans7 = Transform(Location(x=352.2509348040266, y=-669.750163842901, z=36.68), 
                               Rotation(pitch=0.000000, yaw = 69, roll=0.000000))
    
    # p = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=46), 
    #                            Rotation(pitch=-45.000000, yaw=0.0, roll=0.000000))
    view_transform = Transform(Location(x=-18.0, y=4.0, z=45.0), Rotation(pitch=-55.0, yaw=180.0, roll=0.0))
    try:
        main()
    except KeyboardInterrupt:
        print(' - Exited by user.')
