#!/usr/bin/env python

import carla
import numpy as np
import time
import random
from queue import Queue, Empty
import cv2
import os
import copy
import logging
from carla import Transform, Rotation, Location
import open3d as o3d
from matplotlib import cm

# 使用cm中的某条色带
VIRIDIS = np.array(cm.get_cmap('plasma').colors)
# 0-1对应VIRIDIS.shape[0]中的数据
VID_RANGE = np.linspace(0.0, 1.0, VIRIDIS.shape[0])
LABEL_COLORS = np.array([
    (255, 255, 255), # None
    (70, 70, 70),    # Building
    (100, 40, 40),   # Fences
    (55, 90, 80),    # Other
    (220, 20, 60),   # Pedestrian
    (153, 153, 153), # Pole
    (157, 234, 50),  # RoadLines
    (128, 64, 128),  # Road
    (244, 35, 232),  # Sidewalk
    (107, 142, 35),  # Vegetation
    (0, 0, 142),     # Vehicle
    (102, 102, 156), # Wall
    (220, 220, 0),   # TrafficSign
    (70, 130, 180),  # Sky
    (81, 0, 81),     # Ground
    (150, 100, 100), # Bridge
    (230, 150, 140), # RailTrack
    (180, 165, 180), # GuardRail
    (250, 170, 30),  # TrafficLight
    (110, 190, 160), # Static
    (170, 120, 50),  # Dynamic
    (45, 60, 150),   # Water
    (145, 170, 100), # Terrain
]) / 255.0 # normalize each channel [0-1] since is what Open3D uses

def points2pcd(PCD_FILE_PATH, points):
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)
    handle = open(PCD_FILE_PATH, 'a')
    point_num = points.shape[0]
    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')
    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()

def get_time_stamp(ct):
    """
    :param ct: float时间
    :return: 带毫秒的格式化时间戳
    """
    local_time = time.localtime(ct)
    data_head = time.strftime("%Y-%m-%d %H-%M-%S", local_time).split(' ')[-1]
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s-%03d" % (data_head, data_secs)
    return time_stamp

def sensor_callback(sensor_data, sensor_queue, sensor_name):
    # Do stuff with the sensor_data data like save it to disk
    # Then you just need to add to the queue
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))

# modify from leaderboard
def _parse_lidar_cb(lidar_data):
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    return points

def _parse_DVS_cb(dvs_data):
    '''
    # Example of converting the raw_data from a carla.DVSEventArray
    # sensor into a NumPy array and using it as an image
    '''
    dvs_events = np.frombuffer(dvs_data.raw_data, dtype=np.dtype([
        ('x', np.uint16), ('y', np.uint16), ('t', np.int64), ('pol', np.bool_)]))
    dvs_img = np.zeros((dvs_data.height, dvs_data.width, 3), dtype=np.uint8)
    # Blue is positive, red is negative
    dvs_img[dvs_events[:]['y'], dvs_events[:]['x'], dvs_events[:]['pol'] * 2] = 255
    # surface = pygame.surfarray.make_surface(dvs_img.swapaxes(0, 1))
    return  dvs_img


def semantic_lidar_callback(point_cloud):
    """Prepares a point cloud with semantic segmentation
    colors ready to be consumed by Open3D"""
    data = np.frombuffer(point_cloud.raw_data, dtype=np.dtype([
        ('x', np.float32), ('y', np.float32), ('z', np.float32),
        ('CosAngle', np.float32), ('ObjIdx', np.uint32), ('ObjTag', np.uint32)]))
    return data

    # We're negating the y to correclty visualize a world that matches
    # what we see in Unreal since Open3D uses a right-handed coordinate system
    # points = np.array([data['x'], -data['y'], data['z']]).T
    #
    # # 点云数据中添加噪声
    # # points += np.random.uniform(-0.05, 0.05, size=points.shape)
    #
    # # 基于CityScapes调色板为点云着色
    # labels = np.array(data['ObjTag'])
    # int_color = LABEL_COLORS[labels]
    #
    # # # 如果要使颜色强度取决于入射光线角度，可以使用：
    # # int_color *= np.array(data['CosAngle'])[:, None]
    #
    # point_list.points = o3d.utility.Vector3dVector(points)
    # point_list.colors = o3d.utility.Vector3dVector(int_color)

def _parse_image_cb(image):
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array

def main():
    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间

    print(client.get_available_maps())  # 获取可用的地图列表（自带的地图，无自定义地图）
    world = client.load_world('SDWR0525')
    # world = client.get_world()  # 如果在UE4中运行场景，则使用该world
    synchronous_master = False

    try:
        # =================================调整路网中的环境可见度======================================
        # 使得道路中自动生成的交通灯为不可见状态
        env_objs1 = world.get_environment_objects(carla.CityObjectLabel.TrafficLight)
        env_objs2 = world.get_environment_objects(carla.CityObjectLabel.Poles)

        env_list = []
        for env_obj in env_objs1:
            env_list.append(env_obj.id)
        for env_obj in env_objs2:
            env_list.append(env_obj.id)
        world.enable_environment_objects(env_list, False)

        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        # world.debug.draw_string(lidar_trans.location, 'X', draw_shadow=False, color=carla.Color(r=255, g=0, b=0),
        #                         life_time=1500, persistent_lines=True)
        # spectator.set_transform(view_transform)

        # ============================== 修改世界设置:更新步长和同步模式===================================
        # 让车辆按照交通规则在世界中行驶
        # 设置TM接入server的端口，默认8000
        traffic_manager = client.get_trafficmanager(8000)
        # TM里的每一辆车至少保持2米的安全距离
        traffic_manager.set_global_distance_to_leading_vehicle(2.0)
        # TM里每一辆车都是默认速度的40%（默认所有车辆限速30km/h）
        traffic_manager.global_percentage_speed_difference(40.0)
        if True:
            # 设置TM同步模式
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not settings.synchronous_mode:
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                synchronous_master = False
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)

        # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为90
        weather = carla.WeatherParameters(sun_altitude_angle=90.0)
        world.set_weather(weather)

        # ======================通过蓝图库模糊搜索指定的车辆模型===============
        blueprint_library = world.get_blueprint_library()

        # =============================随机车辆=================================

        ego_bp = blueprint_library.find('vehicle.lincoln.mkz_2017')
        ego_bp.set_attribute('color', '0, 0, 0')
        # vehicle_transform = random.choice(world.get_map().get_spawn_points())
        # vehicle_trans = carla.Transform(carla.Location(x=-18843, y=-10296, z=265),
        #                                  carla.Rotation(pitch=0, yaw=0, roll=0.0))
        ego_vehicle1 = world.spawn_actor(ego_bp, vehicle_trans)
        print("单独放置车辆")
        actor_list.append(ego_vehicle1)
        ego_vehicle1.set_autopilot(True)

        blueprints = blueprint_library.filter("vehicle.*")

        blueprints = [x for x in blueprints if int(x.get_attribute('number_of_wheels')) == 4]

        blueprints = sorted(blueprints, key=lambda bp: bp.id)

        spawn_points = world.get_map().get_spawn_points()
        spawn_points = [x for x in spawn_points if x.location.x < -17700]
        number_of_spawn_points = len(spawn_points)
        number_of_vehicles = 20

        if number_of_vehicles < number_of_spawn_points:
            random.shuffle(spawn_points)
        elif number_of_vehicles > number_of_spawn_points:
            msg = 'requested %d vehicles, but could only find %d spawn points'
            logging.warning(msg, number_of_vehicles, number_of_spawn_points)
            number_of_vehicles = number_of_spawn_points
        # Spawn vehicles
        batch = []
        hero = False
        for n, transform in enumerate(spawn_points):
            if n >= number_of_vehicles:
                break
            blueprint = random.choice(blueprints)
            if blueprint.has_attribute('color'):
                color = random.choice(blueprint.get_attribute('color').recommended_values)
                blueprint.set_attribute('color', color)
            if blueprint.has_attribute('driver_id'):
                driver_id = random.choice(blueprint.get_attribute('driver_id').recommended_values)
                blueprint.set_attribute('driver_id', driver_id)
            if hero:
                blueprint.set_attribute('role_name', 'hero')
                hero = False
            else:
                blueprint.set_attribute('role_name', 'autopilot')

            batch.append(carla.command.SpawnActor(blueprint, transform)
                         .then(carla.command.SetAutopilot(carla.command.FutureActor, True, traffic_manager.get_port())))

        for response in client.apply_batch_sync(batch, synchronous_master):
            if response.error:
                logging.error(response.error)
            else:
                actor_list.append(response.actor_id)

        # =======================================传感器==================================
        # DVS
        camera_dvs_bp = blueprint_library.find('sensor.camera.dvs')
        camera_dvs = world.spawn_actor(camera_dvs_bp, camera_dvs_trans, attach_to=ego_vehicle1)
        camera_dvs.listen(lambda data: sensor_callback(data, sensor_queue, "camera_dvs"))
        sensor_list.append(camera_dvs)

        # # 雷达
        # lidar_bp = blueprint_library.find('sensor.lidar.ray_cast_semantic')
        #
        # lidar_bp.set_attribute('channels', '128')
        # lidar_bp.set_attribute('upper_fov', '30')
        # lidar_bp.set_attribute('lower_fov', '-15')
        # lidar_bp.set_attribute('points_per_second', '2304000')
        # lidar_bp.set_attribute('range', '200')
        # lidar_bp.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        #
        # lidar = world.spawn_actor(lidar_bp, lidar_trans, attach_to=ego_vehicle1)
        # lidar.listen(lambda data: sensor_callback(data, sensor_queue, "semantic_lidar"))
        # sensor_list.append(lidar)


        # if SHOW:
        #     point_cloud = o3d.geometry.PointCloud()
        #     vis = o3d.visualization.Visualizer()
        #     vis.create_window(window_name='Wanji 64line Lidar', width=600, height=600, left=100, top=200, visible=True)
        #     vis.add_geometry(point_cloud)
        #     render_option = vis.get_render_option()
        #     render_option.point_size = 1.0
        #     render_option.background_color = np.asarray([0, 0, 0])  # 颜色 0为黑；1为白
        #     to_reset_view_point = True

        while True:
            world.tick()
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            w_timestamp = get_time_stamp(p_timestamp)
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, w_timestamp))
            spectator = world.get_spectator()
            transform = ego_vehicle1.get_transform()
            spectator.set_transform(carla.Transform(transform.location + carla.Location(z=40),
                                                    carla.Rotation(pitch=-90)))

            try:
                s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                dvs_img = _parse_DVS_cb(s_data)

                cv2.imshow('DVS_Camera', dvs_img)
                cv2.waitKey(1)


                # semantic_lidar = semantic_lidar_callback(s_data)

                # points = np.array([semantic_lidar['x'], -semantic_lidar['y'], semantic_lidar['z']]).T
                # # 基于CityScapes调色板为点云着色
                # labels = np.array(semantic_lidar['ObjTag'])
                # int_color = LABEL_COLORS[labels]

                    # pcd_path = os.path.join(save_pcd_path, "%06d.pcd" % lidars_frame[0])
                    # 保存点云数据
                    # points2pcd(pcd_path, concat_points)
                # if SHOW:
                #     # open3d实时更新显示点云
                #     # points1 = np.array(concat_points)[:, :3]
                #     point_cloud.points = o3d.utility.Vector3dVector(points)
                #     # intensity = np.array(concat_points)[:, -1]
                #     # intensity_col = 1.0 - np.log(intensity) / np.log(np.exp(-0.004 * 100))
                #     # int_color = np.c_[
                #     #     np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 0]),
                #     #     np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 1]),
                #     #     np.interp(intensity_col, VID_RANGE, VIRIDIS[:, 2])]
                #     point_cloud.colors = o3d.utility.Vector3dVector(int_color)
                #     vis.update_geometry(point_cloud)
                #     if to_reset_view_point:
                #         vis.reset_view_point(True)
                #         to_reset_view_point = False
                #     vis.poll_events()
                #     vis.update_renderer()
                #     time.sleep(0.005)


            except Empty:
                print("[Warning] Some of the sensor information is missed")
            # time.sleep(100)

    finally:
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)
        for sensor in sensor_list:
            sensor.destroy()
        client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
        print("All cleaned up!")


if __name__ == "__main__":

    # 数据保存位置
    save_pcd_path = "/home/gj/Carla_0.9.13/PythonAPI/work/112"
    save_camera1_path = "/home/gj/Carla_0.9.13/PythonAPI/work/11"
    save_camera2_path = "/home/gj/Carla_0.9.13/PythonAPI/work/12"

    sensor_queue = Queue()
    actor_list, sensor_list = [], []

    # 相机图片分辨率
    IM_WIDTH = 256
    IM_HEIGHT = 256

    SAVE = False  # 是否需要保存lidar点云的pcd格式数据
    SHOW = False  # 是否需要展示点云数据

    # 视角、雷达、相机的位置和姿态
    # view_transform = Transform(Location(x=350, y=-630, z=130), Rotation(pitch=-90, yaw=-90, roll=0))
    vehicle_trans = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017),
                              Rotation(pitch=0.0, yaw=0, roll=0.0))
    # lidar_trans = Transform(Location(x=379, y=-630.72, z=43.017), Rotation(pitch=0, yaw=0, roll=0))
    lidar_trans = Transform(Location(x=0, y=0, z=2.0), Rotation(pitch=0, yaw=90, roll=0))
    camera_trans_1 = Transform(Location(x=379, y=-624.72, z=43.017), Rotation(pitch=0, yaw=0, roll=0))
    camera_trans_2 = Transform(Location(x=379, y=-624.72, z=43.017), Rotation(pitch=0, yaw=180, roll=0))
    #
    view_transform = Transform(Location(x=-18843, y=-10296, z=300), Rotation(pitch=-90, yaw=-13, roll=0.000000))
    camera_dvs_trans = Transform(Location(x=0, y=0, z=2.0), Rotation(pitch=0, yaw=90, roll=0))
    # vehicle_trans = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280),
    #                           Rotation(pitch=0.0, yaw=0, roll=0.0))
    # lidar_trans = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280), Rotation(pitch=0, yaw=0, roll=0))
    # camera_trans_1 = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280),
    #                            Rotation(pitch=0, yaw=0, roll=0))
    # camera_trans_2 = Transform(Location(x=-18842.24609375, y=-10296.3388671875, z=280),
    #                            Rotation(pitch=0, yaw=180, roll=0))

    # lidar_trans = Transform(Location(x=-18992.3, y=-10408.8, z=265), Rotation(pitch=0, yaw=0, roll=0))
    # camera_trans_1 = Transform(Location(x=-18992.3, y=-10408.8, z=265),
    #                            Rotation(pitch=0, yaw=-152, roll=0))
    # camera_trans_2 = Transform(Location(x=-18992.3, y=-10408.8, z=265),
    #                            Rotation(pitch=0, yaw=49, roll=0))

    try:
        main()
    except KeyboardInterrupt:
        print(' - Exited by user.')
