#!/usr/bin/env python
import random

import carla
import numpy as np
import time
from queue import Queue, Empty
import os
import copy
from carla import Transform, Rotation, Location
import matplotlib.pyplot as plt
import open3d as o3d
import imageio
from scipy.spatial.transform import Rotation as R
import cv2

# 使用cm中的某条色带
VIRIDIS = np.array(plt.get_cmap('plasma').colors)
# 0-1对应VIRIDIS.shape[0]中的数据
VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0])

def points2pcd(PCD_FILE_PATH, points):
    '''
    :param PCD_FILE_PATH: pcd文件详细保存地址
    :param points: 待保存的点云数据
    :return:
    '''
    # 存放路径
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)
    # 写文件句柄
    handle = open(PCD_FILE_PATH, 'a')
    # 得到点云点数
    point_num = points.shape[0]
    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')

    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()

def points2plt(plt_path, points):
    '''
    :param plt_path: 图片详细保存地址
    :param points: 待保存的图片数据
    :return:
    '''
    l = np.cos(points[:, 2]) * points[:, 3]
    z = np.sin(points[:, 2]) * points[:, 3]
    y = np.cos(points[:, 1]) * 1
    x = np.sin(points[:, 1]) * 1
    plt.figure("3D Scatter", facecolor="lightgray", figsize=(20, 20), dpi=80)
    ax3d = plt.gca(projection="3d")
    ax3d.scatter(x, y, z, s=10, cmap="jet", marker="o")
    ax3d.view_init(elev=0, azim=-70)
    plt.savefig(plt_path)

def get_time_stamp(ct):
    """
    :param ct: float时间
    :return: 带毫秒的格式化时间戳
    """
    local_time = time.localtime(ct)
    # data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
    data_head = time.strftime("%Y-%m-%d %H-%M-%S", local_time).split(' ')[-1]
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s-%03d" % (data_head, data_secs)
    return time_stamp

def sensor_callback(sensor_data, sensor_queue, sensor_name):
    '''
    处理sensor_data数据，比如将其保存到磁盘，然后只需要添加到队列中
    :param sensor_data: sensor_data数据
    :param sensor_queue: sensor_data数据队列
    :param sensor_name: sensor_data数据名称
    :return:
    '''
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))

def _parse_lidar_cb(lidar_data):
    '''
    处理lidar数据
    :param lidar_data: 待处理的lidar数据
    :return: lidar数据队列
    '''
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    return points

def _parse_image_cb(image):
    '''
    处理camera数据
    :param image: 待处理的camera数据
    :return: camera数据队列
    '''
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array

def lidar_to_bev(lidar, min_x=-100, max_x=100, min_y=-100, max_y=100, pixels_per_meter=4, hist_max_per_pixel=10):
    '''
    3D点云转2D点云
    :param lidar: 待处理的lidar数据
    :param min_x: 点云显示时X轴的最小边界
    :param max_x: 点云显示时X轴的最大边界
    :param min_y: 点云显示时Y轴的最小边界
    :param max_y: 点云显示时Y轴的最大边界
    :param pixels_per_meter: 点云显示时的放大倍数
    :param hist_max_per_pixel:
    :return: lidar数据队列
    '''
    xbins = np.linspace(min_x, max_x + 1, num=(max_x - min_x) * pixels_per_meter + 1)
    ybins = np.linspace(min_y, max_y + 1, num=(max_y - min_y) * pixels_per_meter + 1)
    # Compute histogram of x and y coordinates of points.
    hist = np.histogramdd(lidar[..., :2], bins=(xbins, ybins))[0]
    # Clip histogram
    hist[hist > hist_max_per_pixel] = hist_max_per_pixel
    # Normalize histogram by the maximum number of points in a bin we care about.
    overhead_splat = hist / hist_max_per_pixel * 255.
    # Return splat in X x Y orientation, with X parallel to car axis, Y perp, both parallel to ground.
    return overhead_splat[::-1, :]

def show_init():
    '''
    初始化可视化界面
    :return: 可视化界面变量，点云变量，
    '''
    # 创建一个点云对象
    point_cloud = o3d.geometry.PointCloud()
    # 创建一个Visualizer对象
    vis = o3d.visualization.Visualizer()
    # 是否显示
    if SHOW_Lidar_3D:
        # 创建一个窗口用于显示点云
        vis.create_window(window_name='Wanji_64line_Lidar', width=LI_WIDTH, height=LI_HEIGHT,
                          left=50, top=50, visible=True)
        # 获取渲染选项对象（可选）
        render_option = vis.get_render_option()
        # 设置点的大小
        render_option.point_size = 1.0
        # 设置背景颜色
        render_option.background_color = np.asarray([0, 0, 0])  # 颜色 0为黑；1为白
        # 获取视图控制对象（可选）
        view = vis.get_view_control()
        view.set_lookat(np.array([0.0, 0.0, 0.0]))  # 拖动模型旋转时，围绕哪个点进行旋转
        view.set_up((0, 1, 0))  # 设置指向屏幕上方的向量
        view.set_front((0, 0, 1))  # 垂直指向屏幕外的向量，三维空间中有无数向量，垂直指向屏幕外的只有一个
        view.set_zoom(0.5)  # 控制视角远近 0-1表示拉近，>1表示拉远，1表示原始大小
        view.rotate(np.radians(45), 0)
        # 点云对象添加到可视化器
        vis.add_geometry(point_cloud)
    to_reset_view_point = True

    list_lidar_color = []
    for i in range(Lidar_Number):
        points_color = [random.randint(0, 255) / 255, random.randint(0, 255) / 255, random.randint(0, 255) / 255]
        list_lidar_color.append(points_color)
    return vis, point_cloud, to_reset_view_point, list_lidar_color

def vis_deal(vis, point_cloud, to_reset_view_point, concat_points):
    '''
    实时可视化3D点云
    :param vis: 可视化界面变量
    :param point_cloud: 点云变量
    :param to_reset_view_point: 标志位
    :param concat_points: 待可视化的点云数据
    :return:
    '''
    points = np.array(concat_points)[:, :3]
    intensity = np.array(concat_points)[:, -1]
    intensity_col = 1.0 - np.log(intensity) / np.log(np.exp(-0.004 * 100))
    int_color = np.c_[
        np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 0]),
        np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 1]),
        np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 2])]
    # 根据 intensity 为点云着色
    point_cloud.colors = o3d.utility.Vector3dVector(int_color)
    point_cloud.points = o3d.utility.Vector3dVector(points)
    # 裁剪点云
    # box = o3d.geometry.AxisAlignedBoundingBox([-60, -60, -10], [60, 60, 20])
    # pcd_box = point_cloud.crop(box)
    vis.update_geometry(point_cloud)
    if to_reset_view_point:
        vis.reset_view_point(True)
        to_reset_view_point = False
    vis.poll_events()
    vis.update_renderer()
    time.sleep(0.005)

def multi_lidar_deal(concat_points_list, lidar_trans_list):
    '''
    多个激光雷达同时仿真
    :param concat_points_list: Y去反后的点云数据列表
    :param lidar_trans_list: 雷达位姿列表
    :return: list_lidar_data： 相对于第一个雷达的点云数据列表
    '''
    list_lidar_data = []
    # 以第一个激光雷达为原点，其他激光雷达的显示相对于第一个激光雷达的位置
    x0 = lidar_trans_list[0].location.x
    y0 = lidar_trans_list[0].location.y
    z0 = lidar_trans_list[0].location.z
    pitch0 = lidar_trans_list[0].rotation.pitch
    yaw0 = lidar_trans_list[0].rotation.yaw
    roll0 = lidar_trans_list[0].rotation.roll
    for i in range(Lidar_Number):
        x = lidar_trans_list[i].location.x
        y = lidar_trans_list[i].location.y
        z = lidar_trans_list[i].location.z
        pitch = lidar_trans_list[i].rotation.pitch
        yaw = lidar_trans_list[i].rotation.yaw
        roll = lidar_trans_list[i].rotation.roll
        trans = (x-x0, y-y0, z-z0, pitch-pitch0, yaw-yaw0, roll-roll0)
        lidar_transform = np.asarray(trans)
        lidar_data = np.asarray(concat_points_list[i])
        # print(lidar_data.shape)
        points_cloud = lidar_data[:, 0:3]
        T0 = np.eye(4)
        T0[:3, :3] = R.from_euler('xyz', lidar_transform[3:6], degrees=True).as_matrix()
        T0[:3, 3] = lidar_transform[0:3]
        points_cloud = np.hstack((points_cloud, np.ones((points_cloud.shape[0], 1))))
        points_cloud = np.dot(T0, points_cloud.T).T[:, :3]
        list_lidar_data.append(points_cloud)

    return list_lidar_data

def ego_control(ego_vehicle, autopilot, control_params=(0.0, 0.0, 0.0), hand_brake=False):
    '''
    主车控制指令和车辆包围框信息
    :param ego_vehicle: 主车变量
    :param autopilot: 主车是否设置为自动驾驶模式
    :param control_params: 主车控制指令（油门，转向、刹车）
    :param hand_brake:  主车手刹控制指令
    :return: 主车的长、宽、高
    '''
    #获取车辆boundingbox的长宽高信息
    length = ego_vehicle.bounding_box.extent.x * 2.0
    width = ego_vehicle.bounding_box.extent.y * 2.0
    higth = ego_vehicle.bounding_box.extent.z * 2.0
    # 车辆控制
    if autopilot:
        ego_vehicle.set_autopilot(True)
    else:
        control = carla.VehicleControl()
        control.throttle = control_params[0]
        control.steer = control_params[1]
        control.brake = control_params[2]
        control.hand_brake = hand_brake
        ego_vehicle.apply_control(control)

    # 车辆物理属性设置（转距等）
    # ego_vehicle.apply_physics_control(
    #     carla.VehiclePhysicsControl(center_of_mass=carla.Vector3D(-1.0, 0.0, 0.0)))
    return length, width, higth

def add_camera(world, number, camera_trans, width, height, fov, tick):
    '''
    添加相机传感器
    :param world: 世界
    :param number: 第几个相机传感器
    :param camera_trans: 相机的放置位置
    :param width:  图像像素
    :param height: 图像像素
    :param fov: 相机水平视场角
    :param tick:  相机的图像捕捉周期
    '''
    blueprint_library = world.get_blueprint_library()
    camera_bp = blueprint_library.find('sensor.camera.rgb')
    camera_bp.set_attribute("image_size_x", str(width))
    camera_bp.set_attribute("image_size_y", str(height))
    camera_bp.set_attribute("fov", str(fov))
    camera_bp.set_attribute('sensor_tick', str(tick))
    camera = world.spawn_actor(camera_bp, camera_trans, attach_to=None)
    camera.listen(lambda data: sensor_callback(data, sensor_queue, "camera_{}".format(number)))
    sensor_list.append(camera)

def lidar_init(blueprint_library, main_params, params_list):
    '''
    lidar参数初始化设定
    :param world: 世界
    :param blueprint_library: 蓝图
    :param main_params: lidar的主要参数（线束、垂直角度最小/最大值、每圈点数）
    :param params_list: lidar的其他参数
    :return: lidar变量
    '''
    lidar_bp = blueprint_library.find('sensor.lidar.ray_cast')
    lidar_bp.set_attribute('channels', str(main_params[0]))
    lidar_bp.set_attribute('upper_fov', str(main_params[1]))
    lidar_bp.set_attribute('lower_fov', str(main_params[2]))
    lidar_bp.set_attribute('points_per_second', str(main_params[3]))
    lidar_bp.set_attribute('range', str(params_list[0]))
    lidar_bp.set_attribute('rotation_frequency', str(params_list[1]))
    lidar_bp.set_attribute('atmosphere_attenuation_rate', str(params_list[2]))
    lidar_bp.set_attribute('dropoff_general_rate', str(params_list[3]))
    lidar_bp.set_attribute('dropoff_intensity_limit', str(params_list[4]))
    lidar_bp.set_attribute('dropoff_zero_intensity', str(params_list[5]))
    lidar_bp.set_attribute('noise_seed', str(params_list[6]))
    lidar_bp.set_attribute('noise_stddev', str(params_list[7]))
    return lidar_bp

def wanji_lidar_deal(world, lidar_bp, lidar_name, lidar_trans, num):
    '''
    lidar设置
    :param world: 世界
    :param lidar_bp: Lidar模型
    :param lidar_name: Lidar数据保存名称
    :param lidar_trans: Lidar放置位置
    :param num: 传感器数量
    '''
    lidar = world.spawn_actor(lidar_bp, lidar_trans, attach_to=None)
    lidar_data_name = lidar_name + "_{}"
    lidar.listen(lambda data: sensor_callback(data, sensor_queue, lidar_data_name.format(num)))
    sensor_list.append(lidar)

def wanji_64_lines_lidar(world):
    '''
    设置万集的64线激光雷达
    :param world: 世界
    :return:
    '''
    # 设置传感器的参数
    lidar_range = 200
    rotation_frequency = 10
    atmosphere_attenuation_rate = 0.004
    dropoff_general_rate = 0.0
    dropoff_intensity_limit = 0.0
    dropoff_zero_intensity = 0.0
    noise_seed = 0.0
    noise_stddev = 0.0
    params_list = [lidar_range, rotation_frequency, atmosphere_attenuation_rate, dropoff_general_rate,
                   dropoff_intensity_limit, dropoff_zero_intensity, noise_seed, noise_stddev]
    blueprint_library = world.get_blueprint_library()
    lidar_bp_2_1 = lidar_init(blueprint_library, (2, -1.5, -1.7, 36000), params_list)
    lidar_bp_28_1 = lidar_init(blueprint_library, (28, -1.9, -4.6, 504000), params_list)
    lidar_bp_6_1 = lidar_init(blueprint_library, (6, -4.8, -5.8, 108000), params_list)
    lidar_bp_4_1 = lidar_init(blueprint_library, (4, -6.1, -7, 72000), params_list)
    lidar_bp_4_2 = lidar_init(blueprint_library, (4, -7.4, -8.6, 72000), params_list)
    lidar_bp_4_3 = lidar_init(blueprint_library, (4, -9.2, -11, 72000), params_list)
    lidar_bp_6_2 = lidar_init(blueprint_library, (6, -12, -17, 108000), params_list)
    lidar_bp_6_3 = lidar_init(blueprint_library, (6, -19, -29, 108000), params_list)
    lidar_bp_2_2 = lidar_init(blueprint_library, (2, -32, -35, 36000), params_list)
    lidar_bp_2_3 = lidar_init(blueprint_library, (2, -38, -42, 36000), params_list)
    lidar_dict = {"lidar_bp_2_1": lidar_bp_2_1, "lidar_bp_28_1": lidar_bp_28_1, "lidar_bp_6_1": lidar_bp_6_1,
                  "lidar_bp_4_1": lidar_bp_4_1, "lidar_bp_4_2": lidar_bp_4_2, "lidar_bp_4_3": lidar_bp_4_3,
                  "lidar_bp_6_2": lidar_bp_6_2, "lidar_bp_6_3": lidar_bp_6_3, "lidar_bp_2_2": lidar_bp_2_2,
                  "lidar_bp_2_3": lidar_bp_2_3}
    # 激光雷达传感器设置
    for i in range(Lidar_Number):
        lidar_trans = lidar_trans_list[i]
        for x in list(lidar_dict.keys()):
            lidar_bp = lidar_dict[x]
            lidar_name = x
            wanji_lidar_deal(world, lidar_bp, lidar_name, lidar_trans, i)

def save_data(w_timestamp, concat_points_list, camera_data_list):
    '''
    lidar/camera等数据的实时保存
    :param w_timestamp: 以实时时间信息为保存的数据的命名
    :param concat_points_list: 待保存的lidar数据
    :param camera_data_list: 待保存的camera数据
    :return:
    '''
    try:
        # 实时保存点云数据(pcd)
        if SAVE_PCD:
            for x in range(Lidar_Number):
                pcd_path_floder = save_lidar_path + "/" + str(x)
                if not os.path.exists(pcd_path_floder):
                    os.makedirs(pcd_path_floder)
                pcd_path = os.path.join(pcd_path_floder, "%s.pcd" % str(w_timestamp))
                points2pcd(pcd_path, concat_points_list[x])
        # 实时保存点云数据(npy/ply)
        if SAVE_NPY:
            for x in range(Lidar_Number):
                npy_path_floder = save_lidar_path + "/" + str(x)
                if not os.path.exists(npy_path_floder):
                    os.makedirs(npy_path_floder)
                npy_path = os.path.join(npy_path_floder, "%s.npy" % str(w_timestamp))
                np.save(npy_path, concat_points_list[x])
        # 实时保存图像
        if SAVE_Camera:
            for i in range(Camera_Number):
                img_path_floder = save_img_path + "/" + str(i)
                if not os.path.exists(img_path_floder):
                    os.makedirs(img_path_floder)
                img_path = os.path.join(img_path_floder, "%s.jpg" % str(w_timestamp))
                imageio.save(img_path, camera_data_list[i])
    except Exception as e:
        print(e)

def show_data(vis, point_cloud, to_reset_view_point, concat_points_list, camera_data_list):
    '''
    lidar/camera等数据的实时显示
    :param vis: lidar显示框（3D）
    :param point_cloud: lidar显示点云框架（3D）
    :param to_reset_view_point: lidar数据显示标志位（3D）
    :param concat_points_list: 待显示的lidar数据
    :param camera_data_list: 待显示的camera数据
    :return:
    '''
    # 3D点云数据可视化
    if SHOW_Lidar_3D:
        if Lidar_Number == 1:
            vis_deal(vis, point_cloud, to_reset_view_point, concat_points_list[0])
        else:
            list_lidar_data = multi_lidar_deal(concat_points_list, lidar_trans_list)
            list_lidar_colors = []
            # for i in range(Lidar_Number):
            #     points_colors = np.full((list_lidar_data[i].shape[0], 3), list_lidar_color[i])
            #     list_lidar_colors.append(points_colors)

            list_lidar_color = [(1, 0, 0), (0, 0, 1)]

            for i in range(Lidar_Number):
                points_colors = np.full((list_lidar_data[i].shape[0], 3), list_lidar_color[i])
                list_lidar_colors.append(points_colors)

            point_cloud.colors = o3d.utility.Vector3dVector(np.vstack(list_lidar_colors))
            all_points = np.concatenate(list_lidar_data, axis=0)
            point_cloud.points = o3d.utility.Vector3dVector(all_points)

            vis.update_geometry(point_cloud)
            if to_reset_view_point:
                vis.reset_view_point(True)
                to_reset_view_point = False
            vis.poll_events()
            vis.update_renderer()
            time.sleep(0.005)
    # 2D点云数据可视化
    if SHOW_Lidar_2D:
        lidar_data = []
        if Lidar_Number == 1:
            lidar_data = concat_points_list[0]
        else:
            list_lidar_data = multi_lidar_deal(concat_points_list, lidar_trans_list)
            lidar_data = np.concatenate(list_lidar_data, axis=0)

        lidar_viz = lidar_to_bev(lidar_data).astype(np.uint8)
        lidar_viz = cv2.cvtColor(lidar_viz, cv2.COLOR_GRAY2RGB)
        cv2.imshow('Lidar', lidar_viz)
        cv2.waitKey(1)
    # 相机图像可视化
    if SHOW_Camera:
        camera_data = np.concatenate(camera_data_list[0], axis=1)[..., :3]
        camera_data = np.asarray(camera_data)
        # 构建多相机时，图片与图片间的隔离条
        width = 10
        black_stripe = np.zeros((IM_HEIGHT, width, 3), np.uint8)
        if Camera_Number > 1:
            for i in range(1, Camera_Number):
                camera_data_i = np.concatenate(camera_data_list[i], axis=1)[..., :3]
                camera_data_i = np.asarray(camera_data_i)
                camera_data = np.concatenate((camera_data, black_stripe, camera_data_i), axis=1)
        # 修改默认的三通道颜色顺序  B、G、R -> R、G、B，否则相机显示图片整体发蓝
        camera_data = cv2.cvtColor(camera_data, cv2.COLOR_BGR2RGB)
        cv2.imshow('Camera', camera_data)
        cv2.waitKey(1)


def main():
    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间
    # with open(word_path) as od_file:
    #     data = od_file.read()
    # # 使用自定义地图
    # world = client.generate_opendrive_world(data,
    #                                         carla.OpendriveGenerationParameters(
    #                                             wall_height=0))
    # 加载carla自带地图，该地图列表可通过client.get_available_maps()查看
    print(client.get_available_maps())  # 获取可用的地图列表（自带的地图，无自定义地图）
    world = client.load_world('yuxiang02')
    # world = client.get_world()
    synchronous_master = False

    try:
        # =================================调整路网中的环境可见度======================================
        # 使得道路中自动生成的交通灯为不可见状态
        env_objs1 = world.get_environment_objects(carla.CityObjectLabel.TrafficLight)
        env_objs2 = world.get_environment_objects(carla.CityObjectLabel.Poles)
        env_list = []
        for env_obj in env_objs1:
            env_list.append(env_obj.id)
        for env_obj in env_objs2:
            env_list.append(env_obj.id)
        world.enable_environment_objects(env_list, False)

        # ==================================回放==========================================
        pt = world.get_snapshot().timestamp.platform_timestamp
        ptime = int(pt)
        if RECORDER:
            # 存储recorder记录文件
            client.start_recorder(recorder_path + str(ptime) + '.log', True)

        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        spectator.set_transform(view_transform)

        # ============================== 修改世界设置:更新步长和同步模式===================================
        #设置TM接入server的端口，默认8000
        traffic_manager = client.get_trafficmanager(8000)
        #TM里的每一辆车至少保持2米的安全距离
        traffic_manager.set_global_distance_to_leading_vehicle(2.0)
        #TM里每一辆车都是默认速度的40%（默认所有车辆限速30km/h）
        traffic_manager.global_percentage_speed_difference(40.0)
        if True:
            #设置TM同步模式
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not settings.synchronous_mode:
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                synchronous_master = False

        # =============================天气=================================
        # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为90
        weather = carla.WeatherParameters(
            cloudiness=0.0,                 # 0-100  0是晴朗的天空，100是完全阴天
            precipitation=0.0,              # 0表示没有下雨，100表示大雨
            precipitation_deposits=0.0,     # 0表示道路上没有水坑，100表示道路完全被雨水覆盖
            wind_intensity=0.0,             # 0表示平静，100表示强风，风会影响雨向和树叶
            sun_azimuth_angle=0.0,          # 太阳方位角，0～360
            sun_altitude_angle=90.0,        # 太阳高度角，90是中午，-90是午夜
            fog_density=0.0,                # 0～100表示雾的浓度或厚度，仅影响RGB相机传感器
            fog_distance=0.0,               # 雾开始的距离，单位为米
            wetness=0.0,                    # 0～100表示道路湿度百分比，仅影响RGB相机传感器
            fog_falloff=0.0,                # 雾的密度，0至无穷大，0表示雾比空气轻，覆盖整个场景，1表示与空气一样，覆盖正常大小的建筑物
            scattering_intensity=0.0,       # 控制光线对雾的穿透程度
            mie_scattering_scale=0.0,       # 控制光线与花粉或空气等大颗粒的相互作用，导致天气朦胧，光源周围有光晕，0表示无影响
            rayleigh_scattering_scale=0.0331,   # 控制光与空气分子等小粒子的相互作用，取决于光波长，导致白天蓝天或晚上红天
            )
        world.set_weather(weather)

        # ======================通过蓝图库模糊搜索指定的车辆模型=================
        blueprint_library = world.get_blueprint_library()

        # =============================车辆=================================
        ego_bp = blueprint_library.find('vehicle.lincoln.mkz_2017')
        ego_bp.set_attribute('color', '0, 0, 0')
        ego_vehicle1 = world.spawn_actor(ego_bp, vehicle_trans)
        print("单独放置车辆")
        actor_list.append(ego_vehicle1)
        ego_length, ego_width, ego_hight = ego_control(ego_vehicle1, autopilot=False,
                                           control_params=(0.0, 0.0, 0.0), hand_brake=False)
        # print("车身长：", ego_length, ", 宽：", ego_width, ", 高：", ego_hight)

        # ===========================实例化传感器模型==========================
        # lidar
        wanji_64_lines_lidar(world)
        # camera
        for i in range(Camera_Number):
            camera_trans = camera_trans_list[i]
            add_camera(world, i, camera_trans, IM_WIDTH, IM_HEIGHT, fov=90, tick=0.1)
        # 点云显示初始化
        vis, point_cloud, to_reset_view_point, list_lidar_color = show_init()

        while True:
            world.tick()
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            w_timestamp = get_time_stamp(p_timestamp)
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, w_timestamp))

            try:
                lidar_data_list = []
                concat_points_list = []
                camera_data_list = []
                for n in range(Lidar_Number):
                    lidar_data_list.append([])
                    concat_points_list.append([])
                for p in range(Camera_Number):
                    camera_data_list.append([])
                # 数据存储
                for i in range(0, len(sensor_list)):
                    s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                    s_name_list = s_name.split('_')
                    sensor_type = str(s_name_list[0])
                    sensor_id = int(s_name_list[len(s_name_list)-1])
                    if sensor_type == "lidar":
                        lidar_data_list[sensor_id].append(_parse_lidar_cb(s_data))
                    elif sensor_type == "camera":
                        camera_data_list[sensor_id].append(_parse_image_cb(s_data))
                # lidar数据处理
                for m in range(Lidar_Number):
                    concat_points_list[m] = np.concatenate(lidar_data_list[m], axis=0)
                    concat_points_list[m][:, 1] = [-p for p in concat_points_list[m][:, 1]]

                # 保存传感器数据
                save_data(w_timestamp, concat_points_list, camera_data_list)

                # 传感器数据可视化
                show_data(vis, point_cloud, to_reset_view_point, concat_points_list, camera_data_list)

            except Empty:
                print("[Warning] Some of the sensor information is missed")

    finally:
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)
        for sensor in sensor_list:
            sensor.destroy()
        for actor in actor_list:
            if actor.is_alive:
                actor.destroy()
        # client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
        print("All cleaned up!")
        if RECORDER:
            #关闭recorder
            client.stop_recorder()


if __name__ == "__main__":

    local_time = time.localtime(time.time())
    timestamp = time.strftime("%Y%m%d_%H%M%S", local_time)

    # word_path = r"/home/wanji/下载/carla_test/net_files/SDOpenDrive.xodr"
    save_lidar_path = '/home/gj/Carla_Output/lidar/' + str(timestamp)
    save_img_path = '/home/gj/Carla_Output/camera/' + str(timestamp)
    recorder_path = '/home/gj/Carla_Output/recorder/'
    sensor_queue = Queue()
    actor_list, sensor_list = [], []
    lidar_trans_list, camera_trans_list = [], []

    Lidar_Number = 1      # lidar数量
    LI_WIDTH = 256 * 3
    LI_HEIGHT = 256 * 3

    Camera_Number = 2       # camera数量
    IM_WIDTH = 356 * 2
    IM_HEIGHT = 256 * 2

    RECORDER = False    # 是否需要回放
    SAVE_PCD = False    # 是否需要保存lidar点云的pcd格式数据
    SAVE_NPY = False    # 是否需要保存lidar点云的npy格式数据
    SAVE_Camera = False  # 是否需要保存camera图像
    SHOW_Lidar_3D = False    # 是否需要实时显示3D点云
    SHOW_Lidar_2D = False  # 是否需要实时显示2D点云
    SHOW_Camera = True  # 是否需要实时显示相机

    # 渝湘高速位置
    # view_transform = Transform(Location(x=-18843, y=-10296, z=300), Rotation(pitch=-90, yaw=-13, roll=0.000000))
    # vehicle_trans = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280),
    #                         Rotation(pitch=0.0, yaw=0, roll=0.0))
    # lidar_trans = Transform(Location(x=-18843, y=-10296, z=300), Rotation(pitch=0.0, yaw=0.0, roll=0.0))
    # camera_trans = Transform(Location(x=-18843, y=-10296, z=300), Rotation(pitch=-10.0, yaw=0.0, roll=0.0))

    # 上地西路位置
    view_transform = Transform(Location(x=350, y=-630, z=80), Rotation(pitch=-90, yaw=-13, roll=0.000000))
    vehicle_trans = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017),
                            Rotation(pitch=0.0, yaw=260.3, roll=0.0))
    lidar_trans1 = Transform(Location(x=350.7301467362416, y=-563.1270607010696, z=43.017),
                            Rotation(pitch=0.0, yaw=0, roll=0.0))
    lidar_trans2 = Transform(Location(x=365.7301467362416, y=-563.1270607010696, z=43.017),
                             Rotation(pitch=0.0, yaw=0, roll=0.0))
    camera_trans1 = Transform(Location(x=409.89334490685565, y=-608.4437149592156, z=43.017),
                            Rotation(pitch=0.0, yaw=99.7, roll=0.0))
    camera_trans2 = Transform(Location(x=409.89334490685565, y=-608.4437149592156, z=43.017),
                              Rotation(pitch=0.0, yaw=0, roll=0.0))

    lidar_trans_list.append(lidar_trans1)
    lidar_trans_list.append(lidar_trans2)
    camera_trans_list.append(camera_trans1)
    camera_trans_list.append(camera_trans2)


    if Lidar_Number <= len(lidar_trans_list) and Camera_Number <= len(camera_trans_list):
        try:
            main()
        except KeyboardInterrupt:
            print(' - Exited by user.')
    else:
        print("Insufficient number of LiDARs")
