#!/usr/bin/env python

import carla
import numpy as np
import time
import random
from queue import Queue, Empty
import cv2
import os
import copy
import logging
from carla import Transform, Rotation, Location
import shutil
import stat
import json
from carla import Map
from carla import Vector3D
import math
from carla import ColorConverter as cc
import sys
import csv



def points2pcd(PCD_FILE_PATH, points):
    # 存放路径
    # PCD_DIR_PATH = os.path.join(os.path.abspath('.'), 'pcd')
    # PCD_FILE_PATH = os.path.join(PCD_DIR_PATH, 'cache.pcd')
    if os.path.exists(PCD_FILE_PATH):
        os.remove(PCD_FILE_PATH)
    # 写文件句柄
    handle = open(PCD_FILE_PATH, 'a')
    # 得到点云点数
    point_num = points.shape[0]
    # pcd头部（重要）
    handle.write(
        '# .PCD v0.7 - Point Cloud Data file format\nVERSION 0.7\nFIELDS x y z intensity\nSIZE 4 4 4 4\nTYPE F F F F\nCOUNT 1 1 1 1')
    string = '\nWIDTH ' + str(point_num)
    handle.write(string)
    handle.write('\nHEIGHT 1\nVIEWPOINT 0 0 0 1 0 0 0')
    string = '\nPOINTS ' + str(point_num)
    handle.write(string)
    handle.write('\nDATA ascii')

    # 依次写入点
    for i in range(point_num):
        string = '\n' + str(points[i, 0]) + ' ' + str(points[i, 1]) + ' ' + str(points[i, 2]) + ' ' + str(points[i, 3])
        handle.write(string)
    handle.close()

def get_time_stamp(ct):
    """
    :param ct: float时间
    :return: 带毫秒的格式化时间戳
    """
    # ct = time.time()
    # print(ct)
    local_time = time.localtime(ct)
    # data_head = time.strftime("%Y-%m-%d %H:%M:%S", local_time)
    data_head = time.strftime("%Y-%m-%d %H-%M-%S", local_time).split(' ')[-1]
    data_secs = (ct - int(ct)) * 1000
    time_stamp = "%s-%03d" % (data_head, data_secs)
    # print(time_stamp, type(time_stamp))
    # stamp = ("".join(time_stamp.split()[0].split("-"))+"".join(time_stamp.split()[1].split(":"))).replace('.', '')
    # print(stamp)
    return time_stamp

def sensor_callback(sensor_data, sensor_queue, sensor_name):
    # Do stuff with the sensor_data data like save it to disk
    # Then you just need to add to the queue
    sensor_queue.put((sensor_data.frame, sensor_data.timestamp, sensor_name, sensor_data))

# modify from manual control
def _parse_image_cb(image):
    array = np.frombuffer(image.raw_data, dtype=np.dtype("uint8"))
    array = np.reshape(array, (image.height, image.width, 4))
    array = array[:, :, :3]
    array = array[:, :, ::-1]
    return array

# modify from leaderboard
def _parse_lidar_cb(lidar_data):
    points = np.frombuffer(lidar_data.raw_data, dtype=np.dtype('f4'))
    points = copy.deepcopy(points)
    # print(type(points), points.shape)
    points = np.reshape(points, (int(points.shape[0] / 4), 4))
    # print(type(points), points.shape)
    return points

# modify from world on rail code
def lidar_to_bev(lidar, min_x=-24, max_x=24, min_y=-16, max_y=16, pixels_per_meter=4, hist_max_per_pixel=10):
    xbins = np.linspace(
        min_x, max_x + 1,
               (max_x - min_x) * pixels_per_meter + 1,
    )
    ybins = np.linspace(
        min_y, max_y + 1,
               (max_y - min_y) * pixels_per_meter + 1,
    )
    # Compute histogram of x and y coordinates of points.
    hist = np.histogramdd(lidar[..., :2], bins=(xbins, ybins))[0]
    # Clip histogram
    hist[hist > hist_max_per_pixel] = hist_max_per_pixel
    # Normalize histogram by the maximum number of points in a bin we care about.
    overhead_splat = hist / hist_max_per_pixel * 255.
    # Return splat in X x Y orientation, with X parallel to car axis, Y perp, both parallel to ground.
    return overhead_splat[::-1, :]


# 可视化数据
def visualize_data(rgb, lidars, text_args=(cv2.FONT_HERSHEY_SIMPLEX, 0.3, (255, 255, 255), 1)):
    rgb_canvas = np.array(rgb[..., ::-1])
    # print(rgb_canvas.shape, rgb_canvas.size)
    canvas_list = []
    lidar_canvas = None
    if lidars is not None:
        for lidar in lidars:
            lidar_viz = lidar_to_bev(lidar).astype(np.uint8)
            lidar_viz = cv2.cvtColor(lidar_viz, cv2.COLOR_GRAY2RGB)
            canvas = cv2.resize(lidar_viz.astype(np.uint8), (rgb_canvas.shape[0], rgb_canvas.shape[0]))
            canvas_list.append(canvas)
        lidar_canvas = np.concatenate(canvas_list, axis=1)
    # cv2.putText(canvas, f'yaw angle: {imu_yaw:.3f}', (4, 10), *text_args)
    # cv2.putText(canvas, f'log: {gnss[0]:.3f} alt: {gnss[1]:.3f} brake: {gnss[2]:.3f}', (4, 20), *text_args)
    return lidar_canvas


def mkdir_folder(path):
    for s_type in sensor_type:
        if not os.path.isdir(os.path.join(path, s_type)):
            os.makedirs(os.path.join(path, s_type))
    return True


def draw_waypoints(world, waypoints, road_id=None, life_time=50.0):
    """
    :param waypoints: 地图所有航点列表
    :param road_id: 目标路段id
    :param life_time: 高亮时间
    :return:
    """
    obj_waypoints = []

    for waypoint in waypoints:
        if waypoint.road_id == road_id:
            obj_waypoints.append(waypoint)
            world.debug.draw_string(waypoint.transform.location, 'O', draw_shadow=False,
                                    color=carla.Color(r=0, g=255, b=0), life_time=life_time,
                                    persistent_lines=True)
    return obj_waypoints

def compute_distance(location_1, location_2):
    """
    Euclidean distance between 3D points

        :param location_1, location_2: 3D points
    """
    x = location_2.x - location_1.x
    y = location_2.y - location_1.y
    z = location_2.z - location_1.z
    norm = np.linalg.norm([x, y, z]) + np.finfo(float).eps
    return norm

def main():
    #  ======================================跟服务器实现连接=============================================
    client = carla.Client("localhost", 2000)
    client.set_timeout(20.0)  # 设置这个客户端连接超时时间
    with open(word_path) as od_file:
        data = od_file.read()
    world = client.generate_opendrive_world(data,
                                            carla.OpendriveGenerationParameters(
                                                wall_height=0))
    # world = client.load_world('Town02')   # 加载地图
    world = client.get_world()
    origin_settings = world.get_settings()
    synchronous_master = False

    if RECORDER:
        # 存储recorder记录文件
        pt = world.get_snapshot().timestamp.platform_timestamp
        ptime = int(pt)
        client.start_recorder(recorder_path + str(ptime) + '.log', True)


    try:
        # ====================================获取世界视角=============================================
        spectator = world.get_spectator()
        world.debug.draw_string(view_transform.location, 'O', draw_shadow=False,
                                color=carla.Color(r=0, g=255, b=0), life_time=1500,
                                persistent_lines=True)
        spectator.set_transform(view_transform)

        # # 让车辆按照交通规则在世界中行驶
        # ##################################### 修改世界设置:更新步长和同步模式##################################
        #设置TM接入server的端口，默认8000
        traffic_manager = client.get_trafficmanager(8000)
        #TM里的每一辆车至少保持2米的安全距离
        traffic_manager.set_global_distance_to_leading_vehicle(2.0)
        #TM里每一辆车都是默认速度的40%（默认所有车辆限速30km/h）
        traffic_manager.global_percentage_speed_difference(40.0)
        if True:
            #设置TM同步模式
            settings = world.get_settings()
            traffic_manager.set_synchronous_mode(True)
            if not settings.synchronous_mode:
                print("开启同步模式")
                synchronous_master = True
                settings.synchronous_mode = True
                settings.fixed_delta_seconds = 0.1
                world.apply_settings(settings)
            else:
                synchronous_master = False

        # # 控制世界的天气和时间（太阳的位置） 万里无云，没有降雨，太阳的角度为50
        weather = carla.WeatherParameters(
            cloudiness=0.0,  # 0-100  0 是晴朗的天空，100 是完全阴天
            precipitation=0.0,  # 0 表示没有下雨，100 表示大雨
            sun_altitude_angle=10.0)  # 90 是中午，-90 是午夜
        world.set_weather(weather)

        # ===========================通过蓝图库模糊搜索指定的车辆模型
        blueprint_library = world.get_blueprint_library()

        # =============================车辆=================================

        #添加碰撞车辆
        bp1 = world.get_blueprint_library().find('vehicle.audi.tt')
        bp1.set_attribute('color', '0, 0, 0')
        bp2 = world.get_blueprint_library().find('vehicle.audi.tt')
        bp2.set_attribute('color','0, 0, 0')

        ego_vehicle1 = world.spawn_actor(bp1, vehicle_trans1)
        ego_vehicle2 = world.spawn_actor(bp2, vehicle_trans2)
        print("单独放置车辆")
        actor_list.append(ego_vehicle1)
        actor_list.append(ego_vehicle2)
        # ego_vehicle1.set_autopilot(True)
        # carla.VehicleControl()   参数变量
        # throttle   油门
        # steer  转向
        # brake  刹车
        # hand_brake=True   拉起手刹

        #追尾事故场景，车辆1行使状态，车辆2停车状态，车辆1追尾车辆2。
        #碰撞瞬间车辆1回弹，车辆2基本未动（可能因为车辆1速度较低），随后车辆1推着车辆2前进。
        # ego_vehicle1.apply_control(carla.VehicleControl(throttle=1.0, steer=0.0))
        # ego_vehicle2.apply_control(carla.VehicleControl(throttle=0.1, steer=0.0))
        phy_control = ego_vehicle2.get_physics_control()
        phy_control.mass *= 0.5
        phy_control.center_of_mass += carla.Vector3D(0.0, 0.0, -0.5)
        
        ego_vehicle2.set_physics_control(phy_control)


        # ===========================实例化传感器模型============================
        # =======================================传感器==================================
        cam_bp = blueprint_library.find('sensor.camera.rgb')  # 相机
        lidar_bp_16 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        lidar_bp_8 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        lidar_bp_4_1 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        lidar_bp_4_2 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        lidar_bp_32 = blueprint_library.find('sensor.lidar.ray_cast')  # 雷达
        # gnss_bp = world.get_blueprint_library().find('sensor.other.gnss')
        # imu_bp = world.get_blueprint_library().find('sensor.other.imu')


        # 设置传感器的参数 set the attribute of camera
        cam_bp.set_attribute("image_size_x", "{}".format(IM_WIDTH))
        cam_bp.set_attribute("image_size_y", "{}".format(IM_HEIGHT))
        cam_bp.set_attribute("fov", "90")
        cam_bp.set_attribute('sensor_tick', '0.1')

        lidar_bp_32.set_attribute('channels', '32')
        lidar_bp_32.set_attribute('upper_fov', '0')
        lidar_bp_32.set_attribute('lower_fov', '-37')
        lidar_bp_32.set_attribute('dropoff_general_rate', '0.0')
        lidar_bp_32.set_attribute('points_per_second', '576000')
        lidar_bp_32.set_attribute('range', '100')
        lidar_bp_32.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))

        lidar_bp_16.set_attribute('channels', '16')
        lidar_bp_16.set_attribute('upper_fov', '0')
        lidar_bp_16.set_attribute('lower_fov', '-9')
        lidar_bp_16.set_attribute('dropoff_general_rate', '0.0')
        lidar_bp_16.set_attribute('points_per_second', '288000')
        lidar_bp_16.set_attribute('range', '100')
        lidar_bp_16.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))

        lidar_bp_8.set_attribute('channels', '8')
        lidar_bp_8.set_attribute('upper_fov', '-10')
        lidar_bp_8.set_attribute('lower_fov', '-17')
        lidar_bp_8.set_attribute('dropoff_general_rate', '0.0')
        lidar_bp_8.set_attribute('points_per_second', '144000')
        lidar_bp_8.set_attribute('range', '100')
        lidar_bp_8.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))

        lidar_bp_4_1.set_attribute('channels', '4')
        lidar_bp_4_1.set_attribute('upper_fov', '-19')
        lidar_bp_4_1.set_attribute('lower_fov', '-25')
        lidar_bp_4_1.set_attribute('dropoff_general_rate', '0.0')
        lidar_bp_4_1.set_attribute('points_per_second', '72000')
        lidar_bp_4_1.set_attribute('range', '100')
        lidar_bp_4_1.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))

        lidar_bp_4_2.set_attribute('channels', '4')
        lidar_bp_4_2.set_attribute('upper_fov', '-28')
        lidar_bp_4_2.set_attribute('lower_fov', '-37')
        lidar_bp_4_2.set_attribute('dropoff_general_rate', '0.0')
        lidar_bp_4_2.set_attribute('points_per_second', '72000')
        lidar_bp_4_2.set_attribute('range', '100')
        lidar_bp_4_2.set_attribute('rotation_frequency', str(int(1 / settings.fixed_delta_seconds)))
        

        cam01 = world.spawn_actor(cam_bp, camera01_trans,  attach_to=None)
        cam02 = world.spawn_actor(cam_bp, camera02_trans,  attach_to=None)
        lidar_16 = world.spawn_actor(lidar_bp_16, lidar_trans, attach_to=None)
        lidar_8 = world.spawn_actor(lidar_bp_8, lidar_trans, attach_to=None)
        lidar_4_1 = world.spawn_actor(lidar_bp_4_1, lidar_trans, attach_to=None)
        lidar_4_2 = world.spawn_actor(lidar_bp_4_2, lidar_trans, attach_to=None)
        lidar_32 = world.spawn_actor(lidar_bp_32, lidar_trans, attach_to=None)
        # gnss01 = world.spawn_actor(gnss_bp, gnss_trans, attach_to=ego_vehicle1)
        # gnss02 = world.spawn_actor(gnss_bp, gnss_trans, attach_to=ego_vehicle2)
        # imu01 = world.spawn_actor(imu_bp, imu_trans, attach_to=ego_vehicle1)
        # imu02 = world.spawn_actor(imu_bp, imu_trans, attach_to=ego_vehicle2)

        cam01.listen(lambda data: sensor_callback(data, sensor_queue, "rgb_camera01"))
        cam02.listen(lambda data: sensor_callback(data, sensor_queue, "rgb_camera02"))
        lidar_16.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_16"))
        lidar_8.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_8"))
        lidar_4_1.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_1"))
        lidar_4_2.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_4_2"))
        lidar_32.listen(lambda data: sensor_callback(data, sensor_queue, "lidar_32"))
        # gnss01.listen(lambda data: sensor_callback(data, sensor_queue, "other_gnss01"))
        # gnss02.listen(lambda data: sensor_callback(data, sensor_queue, "other_gnss02"))
        # imu01.listen(lambda data: sensor_callback(data, sensor_queue, "imu_1"))
        # imu02.listen(lambda data: sensor_callback(data, sensor_queue, "imu_2"))

        sensor_list.append(cam01)
        sensor_list.append(cam02)
        sensor_list.append(lidar_16)
        sensor_list.append(lidar_8)
        sensor_list.append(lidar_4_1)
        sensor_list.append(lidar_4_2)
        sensor_list.append(lidar_32)
        # sensor_list.append(gnss01)
        # sensor_list.append(gnss02)
        # sensor_list.append(imu01)
        # sensor_list.append(imu02)


        while True:
            # print(sensor_queue.qsize())
            world.tick()
            w_frame = world.get_snapshot().frame
            p_timestamp = world.get_snapshot().timestamp.platform_timestamp
            w_timestamp = get_time_stamp(p_timestamp)
            print("\nWorld's frame:{0}, time: {1}".format(w_frame, w_timestamp))

            # ego_vehicle1.apply_control(carla.VehicleControl(throttle=1.0, steer=0.0))
            

            #计算速度信息  m/s     该速度为车辆的总速度，且无正负之分
            v1 = ego_vehicle1.get_velocity()
            ego_vehicle1_v = math.sqrt(v1.x**2 + v1.y**2 + v1.z**2)
            print(str(ego_vehicle1_v))
            v2 = ego_vehicle2.get_velocity()
            ego_vehicle2_v = math.sqrt(v2.x**2 + v2.y**2 + v2.z**2)
            print(str(ego_vehicle2_v))

            if ego_vehicle2_v < 2:
                ego_vehicle1.apply_control(carla.VehicleControl(throttle=1.0, steer=0.0))
                ego_vehicle2.apply_control(carla.VehicleControl(throttle=0.2, steer=0.0))
            else:
                ego_vehicle1.apply_control(carla.VehicleControl(throttle=0.0, steer=0.0, brake=1.0, hand_brake=True))
                ego_vehicle2.apply_control(carla.VehicleControl(throttle=0.0, steer=0.0, brake=1.0, hand_brake=True))

            location1 = ego_vehicle1.get_location()
            location2 = ego_vehicle2.get_location()
            ss = compute_distance(location1, location2)
            print(ss)
            # #加速度信息
            # acc_x1 = ego_vehicle1.get_acceleration.x
            # acc_y1 = ego_vehicle1.get_acceleration.y
            # acc_z1 = ego_vehicle1.get_acceleration.z

            # acc_x2 = ego_vehicle2.get_acceleration.x
            # acc_y2 = ego_vehicle2.get_acceleration.y
            # acc_z2 = ego_vehicle2.get_acceleration.z

            # #角速度信息
            # ang_v_x1 = ego_vehicle1.get_angular_velocity.x
            # ang_v_y1 = ego_vehicle1.get_angular_velocity.y
            # ang_v_z1 = ego_vehicle1.get_angular_velocity.z

            # ang_v_x2 = ego_vehicle2.get_angular_velocity.x
            # ang_v_y2 = ego_vehicle2.get_angular_velocity.y
            # ang_v_z2 = ego_vehicle2.get_angular_velocity.z

            # #航向角信息
            # angle_yaw1 = ego_vehicle1.get_transform.rotation.yaw
            # angle_yaw2 = ego_vehicle2.get_transform.rotation.yaw



            try:
                rgbs = []
                lidars = []
                splicing = []
                imu1 = []
                imu2 = []
                
                #传感器数据保存
                for i in range(0, len(sensor_list)):
                    s_frame, s_timestamp, s_name, s_data = sensor_queue.get(True, 1.0)
                    sensor_type = s_name.split('_')[0]
                    # print("    Frame: %d   Sensor: %s" % (s_frame, s_name))
                    if sensor_type == 'rgb':
                        rgbs.append(_parse_image_cb(s_data))
                        # s_data.save_to_disk(
                        #     save_path + "jpg/" + str(w_timestamp) + "_" + s_name + ".png")
                    elif sensor_type == 'lidar':
                        if s_name != 'lidar_32':
                            splicing.append(_parse_lidar_cb(s_data))
                        else:
                            lidars.append(_parse_lidar_cb(s_data))
                            # s_data.save_to_disk(save_path + str(w_frame) + "_" + s_name + ".ply")

                    #  imu数据输出为csv格式
                    # elif sensor_type == 'imu':
                    #     if s_name == 'imu_1':
                    #         imu1.extend([s_timestamp, s_data.accelerometer.x, s_data.accelerometer.y,
                    #                     s_data.accelerometer.z, s_data.compass, s_data.gyroscope.x,
                    #                     s_data.gyroscope.y, s_data.gyroscope.z])
                    #         with open(imu01_output_file, 'a') as f1:
                    #             writer = csv.writer(f1)
                    #             writer.writerow(imu1)
                    #     else:
                    #         imu2.extend([s_timestamp, s_data.accelerometer.x, s_data.accelerometer.y,
                    #                     s_data.accelerometer.z, s_data.compass, s_data.gyroscope.x,
                    #                     s_data.gyroscope.y, s_data.gyroscope.z])
                    #         with open(imu02_output_file, 'a') as f2:
                    #             writer = csv.writer(f2)
                    #             writer.writerow(imu2)

                if splicing:
                    concat_points = np.concatenate(splicing, axis=0)
                    concat_points1 = np.concatenate(splicing, axis=0)
                    # pcd_path = save_path + str(w_timestamp) + "_" + "splice" + ".pcd"
                    # points2pcd(pcd_path, concat_points)
                    concat_points[:, 1] = [-p for p in concat_points[:, 1]]
                    pcd_path = save_path + str(w_timestamp) + "_" + "460" + ".pcd"
                    points2pcd(pcd_path, concat_points)
                
                # 传感器数据可视化
                # rgb = np.concatenate(rgbs, axis=1)[..., :3]
                # lidar32 = visualize_data(rgb, lidars)
                # lidarsplice = visualize_data(rgb, [concat_points1])
                # cv2.imshow('rgb_vizs', rgb)
                # cv2.imshow('lidar_32', lidar32)
                # cv2.imshow('lidar_splice', lidarsplice)
                # cv2.waitKey(1)

            except Empty:
                print("[Warning] Some of the sensor information is missed")
            time.sleep(0.1)

    finally:
        world.apply_settings(origin_settings)
        if synchronous_master:
            settings = world.get_settings()
            settings.synchronous_mode = False
            settings.fixed_delta_seconds = None
            world.apply_settings(settings)

        for sensor in sensor_list:
            sensor.destroy()
        for actor in actor_list:
            if actor.is_alive:
                actor.destroy()
        # client.apply_batch([carla.command.DestroyActor(x) for x in actor_list])
        print("All cleaned up!")
        if RECORDER:
            #关闭recorder
            client.stop_recorder()

if __name__ == "__main__":
    word_path = r"/home/wanji/下载/carla_test/wanji_0701.xodr"
    sensor_type = ['rgb', 'lidar']
    sensor_queue = Queue()
    actor_list, sensor_list = [], []
    save_path = '/home/wanji/下载/carla_test/output/real_car_simu/'
    recorder_path = '/home/wanji/carla/recorder_files/'
    # imu_output_path = '/home/wanji/下载/carla_test/output/csv_output/'
    # imu01_output_file = imu_output_path + 'vehicle01.csv'
    # imu02_output_file = imu_output_path + 'vehicle02.csv'
    # imu_csv_head = ['timestamp', 'acc_x', 'acc_y', 'acc_z', 'compass', 'ang_x', 'ang_y', 'ang_z']
    # with open(imu01_output_file, 'w') as f1:
    #     writer = csv.writer(f1)
    #     writer.writerow(imu_csv_head)
    # with open(imu02_output_file, 'w') as f2:
    #     writer = csv.writer(f2)
    #     writer.writerow(imu_csv_head)

    # realdata = '/home/wanji/下载/carla_test/output/toveiw.json'
    # if os.path.exists(save_path):
    #     for fileList in os.walk(save_path):
    #         for name in fileList[2]:
    #             os.chmod(os.path.join(fileList[0], name), stat.S_IWRITE)
    #             os.remove(os.path.join(fileList[0], name))
    #     shutil.rmtree(save_path)
    # spawed_ids = {}

    # os.mkdir(save_path)
    IM_WIDTH = 256 * 1
    IM_HEIGHT = 256 * 1

    RECORDER = False

    view_transform = Transform(Location(x=320, y=-106, z=80),
                               Rotation(pitch=-90, yaw=-13, roll=0.000000))
    camera01_trans = Transform(Location(x=241.61057899409823, y=-90.90056100583038, z=45.3),
                               Rotation(pitch=-45, yaw=77.0, roll=0.000000))
    camera02_trans = Transform(Location(x=241.61057899409823, y=-90.90056100583038, z=45.3),
                               Rotation(pitch=-45, yaw=77.0, roll=0.000000))

    lidar_trans = Transform(Location(x=325, y=-92, z=42.8),
                               Rotation(pitch=0, yaw=77, roll=0.000000))

    vehicle_trans1 = Transform(Location(x=290, y=-95, z=38.0),
                               Rotation(pitch=0.000000, yaw=-15, roll=0.000000))

    vehicle_trans2 = Transform(Location(x=325, y=-104.5, z=38.0),
                               Rotation(pitch=0.000000, yaw=-15, roll=0.000000))
    
    try:
        main()
    except KeyboardInterrupt:
        print(' - Exited by user.')
