import carla
import numpy as np
import time
import random
from queue import Queue, Empty
import os
import copy
from carla import Transform, Rotation, Location
import json
import open3d as o3d
import math


def points2pcd(PCD_FILE_PATH, points):
    # 存放路径
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)
    # 写文件句柄
    handle = open(PCD_FILE_PATH, 'a')
    # 得到点云点数
    point_num = points.shape[0]
    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')
    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()

def sensor_callback(sensor_data, sensor_queue, sensor_name):
    # Do stuff with the sensor_data data like save it to disk
    # Then you just need to add to the queue
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))

# modify from manual control
def _parse_image_cb(image):
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array

# modify from leaderboard
def _parse_lidar_cb(lidar_data):
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    return points

def mkdir_folder(path):
    for s_type in sensor_type:
        if not os.path.isdir(os.path.join(path, s_type)):
            os.makedirs(os.path.join(path, s_type))
    return True

def add_camera(world, number, camera_trans, width, height, fov, tick):
    '''
    添加相机传感器
    :param world: 世界
    :param number: 第几个相机传感器
    :param camera_trans: 相机的放置位置
    :param width:  图像像素
    :param height: 图像像素
    :param fov: 相机水平视场角
    :param tick:  相机的图像捕捉周期
    '''
    blueprint_library = world.get_blueprint_library()
    camera_bp = blueprint_library.find('sensor.camera.rgb')
    camera_bp.set_attribute("image_size_x", str(width))
    camera_bp.set_attribute("image_size_y", str(height))
    camera_bp.set_attribute("fov", str(fov))
    camera_bp.set_attribute('sensor_tick', str(tick))
    camera = world.spawn_actor(camera_bp, camera_trans, attach_to=None)
    camera.listen(lambda data: sensor_callback(data, sensor_queue, "camera_{}".format(number)))
    sensor_list.append(camera)

def add_732L_lidar(world, lidar_trans, number):
    '''
    添加万集732L雷达传感器
    :param world: 世界
    :param lidar_trans: 雷达的放置位置
    '''
    blueprint_library = world.get_blueprint_library()

    lidar_bp_16_1 = blueprint_library.find('sensor.lidar.ray_cast')
    lidar_bp_8_1 = blueprint_library.find('sensor.lidar.ray_cast')
    lidar_bp_4_1 = blueprint_library.find('sensor.lidar.ray_cast')
    lidar_bp_4_2 = blueprint_library.find('sensor.lidar.ray_cast')

    lidar_bp_16_1.set_attribute('channels', '16')
    lidar_bp_16_1.set_attribute('upper_fov', '0')
    lidar_bp_16_1.set_attribute('lower_fov', '-9')
    lidar_bp_16_1.set_attribute('points_per_second', '288000')
    lidar_bp_16_1.set_attribute('range', '200')
    lidar_bp_16_1.set_attribute('rotation_frequency', '10')
    lidar_bp_16_1.set_attribute('atmosphere_attenuation_rate', '0.004')
    lidar_bp_16_1.set_attribute('dropoff_general_rate', '0.0')
    lidar_bp_16_1.set_attribute('dropoff_intensity_limit', '0.0')
    lidar_bp_16_1.set_attribute('dropoff_zero_intensity', '0.0')
    lidar_bp_16_1.set_attribute('noise_seed', '0.0')
    lidar_bp_16_1.set_attribute('noise_stddev', '0.0')

    lidar_bp_8_1.set_attribute('channels', '8')
    lidar_bp_8_1.set_attribute('upper_fov', '-10')
    lidar_bp_8_1.set_attribute('lower_fov', '-17')
    lidar_bp_8_1.set_attribute('points_per_second', '144000')
    lidar_bp_8_1.set_attribute('range', '200')
    lidar_bp_8_1.set_attribute('rotation_frequency', '10')
    lidar_bp_8_1.set_attribute('atmosphere_attenuation_rate', '0.004')
    lidar_bp_8_1.set_attribute('dropoff_general_rate', '0.0')
    lidar_bp_8_1.set_attribute('dropoff_intensity_limit', '0.0')
    lidar_bp_8_1.set_attribute('dropoff_zero_intensity', '0.0')
    lidar_bp_8_1.set_attribute('noise_seed', '0.0')
    lidar_bp_8_1.set_attribute('noise_stddev', '0.0')

    lidar_bp_4_1.set_attribute('channels', '4')
    lidar_bp_4_1.set_attribute('upper_fov', '-19')
    lidar_bp_4_1.set_attribute('lower_fov', '-25')
    lidar_bp_4_1.set_attribute('points_per_second', '72000')
    lidar_bp_4_1.set_attribute('range', '200')
    lidar_bp_4_1.set_attribute('rotation_frequency', '10')
    lidar_bp_4_1.set_attribute('atmosphere_attenuation_rate', '0.004')
    lidar_bp_4_1.set_attribute('dropoff_general_rate', '0.0')
    lidar_bp_4_1.set_attribute('dropoff_intensity_limit', '0.0')
    lidar_bp_4_1.set_attribute('dropoff_zero_intensity', '0.0')
    lidar_bp_4_1.set_attribute('noise_seed', '0.0')
    lidar_bp_4_1.set_attribute('noise_stddev', '0.0')

    lidar_bp_4_2.set_attribute('channels', '4')
    lidar_bp_4_2.set_attribute('upper_fov', '-28')
    lidar_bp_4_2.set_attribute('lower_fov', '-37')
    lidar_bp_4_2.set_attribute('points_per_second', '72000')
    lidar_bp_4_2.set_attribute('range', '200')
    lidar_bp_4_2.set_attribute('rotation_frequency', '10')
    lidar_bp_4_2.set_attribute('atmosphere_attenuation_rate', '0.004')
    lidar_bp_4_2.set_attribute('dropoff_general_rate', '0.0')
    lidar_bp_4_2.set_attribute('dropoff_intensity_limit', '0.0')
    lidar_bp_4_2.set_attribute('dropoff_zero_intensity', '0.0')
    lidar_bp_4_2.set_attribute('noise_seed', '0.0')
    lidar_bp_4_2.set_attribute('noise_stddev', '0.0')

    lidar_16_1 = world.spawn_actor(lidar_bp_16_1, lidar_trans, attach_to=None)
    lidar_8_1 = world.spawn_actor(lidar_bp_8_1, lidar_trans, attach_to=None)
    lidar_4_1 = world.spawn_actor(lidar_bp_4_1, lidar_trans, attach_to=None)
    lidar_4_2 = world.spawn_actor(lidar_bp_4_2, lidar_trans, attach_to=None)
    lidar_16_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_16_1_{}".format(number)))
    lidar_8_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_8_1_{}".format(number)))
    lidar_4_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_1_{}".format(number)))
    lidar_4_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_2_{}".format(number)))
    sensor_list.append(lidar_16_1)
    sensor_list.append(lidar_8_1)
    sensor_list.append(lidar_4_1)
    sensor_list.append(lidar_4_2)

def main():
    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间
    world = client.load_world('SDWR0525')   # 加载打包好的地图
    # world = client.get_world() # 使用当前地图

    # 使得道路中自动生成的交通灯为不可见状态，避免车辆放置失败
    env_objs1 = world.get_environment_objects(carla.CityObjectLabel.TrafficLight)
    env_objs2 = world.get_environment_objects(carla.CityObjectLabel.Poles)
    env_list = []
    for env_obj in env_objs1:
        env_list.append(env_obj.id)
    for env_obj in env_objs2:
        env_list.append(env_obj.id)
    world.enable_environment_objects(env_list, False)
    synchronous_master = False

    try:
        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        # spectator.set_transform(lidar_trans)

        # # 让车辆按照交通规则在世界中行驶
        # ##################################### 修改世界设置:更新步长和同步模式##################################
        traffic_manager = client.get_trafficmanager(8000)
        traffic_manager.set_global_distance_to_leading_vehicle(2.0)
        # traffic_manager.global_percentage_speed_difference(20.0)
        if True:
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not settings.synchronous_mode:
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                synchronous_master = False
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)

        # # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为50
        weather = carla.WeatherParameters(
            cloudiness=0.0,  # 0-100  0 是晴朗的天空，100 是完全阴天
            precipitation=0.0,  # 0 表示没有下雨，100 表示大雨
            sun_altitude_angle=90.0)  # 90 是中午，-90 是午夜
        world.set_weather(weather)

        # ===========================通过蓝图库模糊搜索指定的车辆模型
        blueprint_library = world.get_blueprint_library()

        # ===========================实例化传感器模型============================
        # =======================================传感器==================================

        add_camera(world, 0, camera_trans1, IM_WIDTH, IM_HEIGHT, fov=90, tick=0.1)

        add_732L_lidar(world, lidar_trans1, 0)

        with open(realdata) as sim_file:
            sim_data_dic = json.load(sim_file)
            print(type(sim_data_dic))
        real_iterator = sim_data_dic.__iter__()

        while True:

            world.tick()
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, time.time()))
            try:
                splicing = []
                try:
                    timestep = real_iterator.__next__()
                except StopIteration:
                    break
                nowids = list(sim_data_dic[timestep].keys())
                print(len(nowids))
                
                errorids = []

                #添加交通流
                for k, v in sim_data_dic[timestep].items():
                    loc = v['Location']
                    rot = v['Rotation'] 
                    trans = Transform(carla.Location(loc[0], loc[1], loc[2]),
                                      carla.Rotation(rot[0], rot[1], rot[2]))
                    vehtype = v['type']
                    if k not in spawed_ids.keys():
                        bp1 = world.get_blueprint_library().find(vehtype)
                        try:
                            bp1.set_attribute('color', '0,0,0')
                        except:
                            pass
                        else:
                            pass

                        batch = [
                                carla.command.SpawnActor(bp1, trans).then(
                                    carla.command.SetSimulatePhysics(carla.command.FutureActor, True))
                            ]
                        response = client.apply_batch_sync(batch, False)[0]
                    
                        if response.error:
                            errorids.append(k)
                        else:
                            spawed_ids[k] = response.actor_id
                            vehicles_id_list.append(response.actor_id)
                        # if vehtype == 'walker.pedestrian.0013':
                        #     batch = [
                        #         carla.command.SpawnActor(bp1, trans).then(
                        #             carla.command.SetSimulatePhysics(carla.command.FutureActor, False))
                        #     ]
                        #     response = client.apply_batch_sync(batch, False)[0]
                        #     if response.error:
                        #         errorids.append(k)
                        #     else:
                        #         spawed_ids[k] = response.actor_id
                        #     # wid = walkresults.actor_id
                        #     # controlbatch = [carla.command.SpawnActor(walker_controller_bp, carla.Transform(), wid)]
                        #     # controlresults = client.apply_batch_sync(controlbatch, False)[0]
                        #     # cid = controlresults.actor_id
                        #     # cactor = world.get_actor(cid)
                        #     # cactor.start()
                        #     # spawed_ids[k] = cid
                        # else:
                        #     batch = [
                        #         carla.command.SpawnActor(bp1, trans).then(
                        #             carla.command.SetSimulatePhysics(carla.command.FutureActor, False))
                        #     ]
                        #     response = client.apply_batch_sync(batch, False)[0]
                        #     # spawed_ids[k] = response.actor_id
                        #     if response.error:
                        #         errorids.append(k)
                        #     else:
                        #         spawed_ids[k] = response.actor_id
                        #     #     logging.error('Spawn carla actor failed. %s', response.error)
                        #     #     return INVALID_ACTOR_ID
                    else:
                        # 更新当前帧位姿信息
                        if vehtype == 'walker.pedestrian.0013':
                            cid = spawed_ids[k]
                            cactor = world.get_actor(cid)
                            if Correction_yaw:
                                v_waypoint = world.get_map().get_waypoint(
                                    cactor.get_location(), project_to_road=True, 
                                    lane_type=(carla.LaneType.Driving))
                                yaw = v_waypoint.transform.rotation.yaw
                                inside_junction = v_waypoint.is_junction
                                if inside_junction:
                                    dx = trans.location.x - cactor.get_location().x
                                    dy = trans.location.y - cactor.get_location().y
                                    if dx != 0.0:
                                        yaw = math.atan2(dy, dx) * 180.0 / math.pi
                                    else:
                                        yaw = cactor.get_transform().rotation.yaw
                                trans = Transform(carla.Location(loc[0], loc[1], loc[2]),
                                        carla.Rotation(rot[0], yaw, rot[2]))

                            cactor.set_transform(trans)

                            # cactor.go_to_location(trans.location)
                            # cactor.set_max_speed(float(walker_speed))
                        else:
                            carlaid = spawed_ids[k]
                            vehicle = world.get_actor(carlaid)
                            if Correction_yaw:
                                # 获取车辆所在位置附近的道路点
                                v_waypoint = world.get_map().get_waypoint(
                                    vehicle.get_location(), project_to_road=True, 
                                    lane_type=(carla.LaneType.Driving))
                                yaw = v_waypoint.transform.rotation.yaw
                                inside_junction = v_waypoint.is_junction
                                # inside_junction = False
                                if inside_junction:
                                    dx = trans.location.x - vehicle.get_location().x
                                    dy = trans.location.y - vehicle.get_location().y
                                    if dx != 0.0:
                                        yaw = math.atan2(dy, dx) * 180.0 / math.pi
                                    else:
                                        yaw = vehicle.get_transform().rotation.yaw
                                trans = Transform(carla.Location(loc[0], loc[1], loc[2]),
                                        carla.Rotation(rot[0], yaw, rot[2]))

                            vehicle.set_transform(trans)
                            # if vehicle is not None:
                            #     vehicle.set_transform(trans)
                print("已有：", len(spawed_ids))

                destodyed_ids = [id for id in spawed_ids if id not in nowids]
                for did in destodyed_ids:
                    carlaid = spawed_ids[did]
                    vehicle = world.get_actor(carlaid)

                    if vehicle is not None:
                        vehicle.destroy()
                    del spawed_ids[did]
                print("更新：", len(nowids), "销毁:", len(destodyed_ids))
                print(errorids)
                print(timestep)

                for i in range(0, len(sensor_list)):
                    s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                    splicing.append(_parse_lidar_cb(s_data))
                if splicing:
                    concat_points = np.concatenate(splicing, axis=0)
                    pcd_path = save_pcd_path + "/" + str(w_frame) + ".pcd"
                    concat_points[:, 1] = [-p for p in concat_points[:, 1]]
                    if SAVE_PCD:
                        points2pcd(pcd_path, concat_points)

                    if SHOW:
                        points1 = np.array(concat_points)[:, :3]
                        colors1 = np.array(concat_points)[:, 3]

                        point_cloud.points = o3d.utility.Vector3dVector(points1)
                        points_intensity = np.array(concat_points)[:, 3]  # intensity 
                        # print(points_intensity)
                        # r = 0
                        # b = 0
                        # g = 0
                        colors = []
                        for num in points_intensity:
                        #     intensity = num * 255
                        #     if intensity <= 33:
                        #         r = 0
                        #         b = 255
                        #         g = int(7.727 * intensity)
                        #     elif intensity >33 and intensity <= 66:
                        #         r = 0
                        #         b = int(255 - 7.727 * (intensity - 34))
                        #         g = 255
                        #     elif intensity > 66 and intensity <= 100:
                        #         r = int(7.727 * (intensity - 67))
                        #         b = 0
                        #         g = 255
                        #     elif  intensity > 100 and intensity <= 255:
                        #         r = 255
                        #         b = 0
                        #         g = int(255 - 7.727 * (intensity - 100)/4.697)
                            points_color = [0, 0, 0.8]
                            colors.append(points_color)
                        # print(colors)
                        # 根据 intensity 为点云着色，由于变化不大，这里设置为蓝色[0,0,0.8]
                        point_cloud.colors = o3d.utility.Vector3dVector(colors)   
                        vis.update_geometry(point_cloud)
                        if to_reset_view_point:
                            vis.reset_view_point(True)
                            to_reset_view_point = False
                        vis.poll_events()
                        vis.update_renderer()
                    ######################
      
            except Empty:
                print("    Some of the sensor information is missed")
            # time.sleep(0.1)

    finally:
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)

        for sensor in sensor_list:
            sensor.destroy()
        for actor in actor_list:
            if actor.is_alive:
                actor.destroy()
        client.apply_batch([carla.command.DestroyActor(x) for x in vehicles_id_list]) #
        print("All cleaned up!")
        # time.sleep(0.1)


if __name__ == "__main__":
    word_path = r"/home/wanji/下载/carla_test/net_files/SDOpenDrive.xodr"
    sensor_type = ['rgb', 'lidar']
    sensor_queue = Queue()
    actor_list, sensor_list = [], []
    vehicles_id_list = []
    save_path = '/home/wanji/下载/carla_test/output/real_car_simu/'
    realdata = '/home/wanji/下载/carla_test/output/toveiw_track.json'
    # if os.path.exists(save_path):
    #     for fileList in os.walk(save_path):
    #         for name in fileList[2]:
    #             os.chmod(os.path.join(fileList[0], name), stat.S_IWRITE)
    #             os.remove(os.path.join(fileList[0], name))
    #     shutil.rmtree(save_path)
    spawed_ids = {}

    # os.mkdir(save_path)
    IM_WIDTH = 256 * 1
    IM_HEIGHT = 256 * 1
    SHOW = False # 是否使用open3d进行实时显示雷达点云
    SAVE_PCD = False # 是否在save_path路径下新建文件夹保存雷达点云pcd文件
    Correction_yaw = True  # 是否修正跟踪车辆的航向角，根据车辆所在位置最近的waypoint的航向角信息修正

    view_transform = Transform(Location(x=371.9938132585651, y=-643.7199381087686, z=43.017), 
                            Rotation(pitch=0.1249, yaw=100.3, roll=0.0))

    camera_trans1 = Transform(Location(x=390.8, y=-607.4, z=42.0), Rotation(pitch=-10.0, yaw=280.0, roll=0.0))
    lidar_trans1 = Transform(Location(x=390.8, y=-607.4, z=42.0), Rotation(pitch=0.0, yaw=0, roll=0.0))

    try:
        main()
    except KeyboardInterrupt:
        print(' - Exited by user.')
