from driving_gym.environment.env import DrivingGym
from driving_gym.environment.scenario.config import CarlaScenarioConfig, RoutingTask
from driving_gym.misc.wrappers.flatten_space_wrapper import FlattenSpaceWrapper
from driving_gym.misc.wrappers.single_agent_wrapper import SingleAgentWrapper
from driving_gym.simulation.adapter.carla import (
    CarlaActorConfig,
    CarlaConfig,
    CarlaSensorConfig,
)


def make_carla_env(
    config: dict = None, single_agent: bool = False, flatten_obs: bool = False
):
    config = config or {}
    host = config.get("host", "localhost")
    port = config.get("port", 2000)
    rolename = config.get("rolename", "hero")
    map_name = config.get("map_name", "Town03")
    num_vehicles = config.get("num_vehicles", 20)
    num_walkers = config.get("num_walkers", 8)
    image_x = config.get("image_x", 224)
    image_y = config.get("image_y", 224)
    max_steps = config.get("max_steps", None)
    reward_type = config.get("reward_type", "carla_default_reward")
    done_type = config.get("done_type", "carla_default_done")

    carla_config = (
        CarlaConfig()
        .connection(host=host, port=port)
        .synchronization(sync_mode=True, fixed_delta_seconds=0.05)
        .spectator(follow_actor=rolename)
    )

    rgb_front_config = (
        CarlaSensorConfig()
        .blueprint(
            type="rgb",
            attributes={
                "image_size_x": image_x,
                "image_size_y": image_y,
                "fov": 60,
            },
        )
        .spawning(parent=rolename, spawn_point={"x": 1.3, "y": 0.0, "z": 2.3})
    )

    rgb_left_config = (
        CarlaSensorConfig()
        .blueprint(
            type="rgb",
            attributes={
                "image_size_x": image_x,
                "image_size_y": image_y,
                "fov": 60,
            },
        )
        .spawning(
            parent=rolename, spawn_point={"x": 1.3, "y": 0.0, "z": 2.3, "yaw": -60}
        )
    )

    rgb_right_config = (
        CarlaSensorConfig()
        .blueprint(
            type="rgb",
            attributes={
                "image_size_x": image_x,
                "image_size_y": image_y,
                "fov": 60,
            },
        )
        .spawning(
            parent=rolename, spawn_point={"x": 1.3, "y": 0.0, "z": 2.3, "yaw": 60}
        )
    )

    gnss_config = CarlaSensorConfig().blueprint(type="gnss").spawning(parent=rolename)

    imu_config = CarlaSensorConfig().blueprint(type="imu").spawning(parent=rolename)

    speedometer_config = (
        CarlaSensorConfig().blueprint(type="speedometer").spawning(parent=rolename)
    )

    hero_config = (
        CarlaActorConfig()
        .blueprint("vehicle.tesla.model3", rolename=rolename)
        .add_sensor("rgb_front", rgb_front_config.to_dict())
        .add_sensor("rgb_left", rgb_left_config.to_dict())
        .add_sensor("rgb_right", rgb_right_config.to_dict())
        .add_sensor("gnss", gnss_config.to_dict())
        .add_sensor("imu", imu_config.to_dict())
        .add_sensor("speedometer", speedometer_config.to_dict())
    )

    hero_tasks = [
        RoutingTask().randomize(min_distance=50, max_distance=200),  # Short distance
        RoutingTask().randomize(min_distance=100, max_distance=500),  # Medium distance
        RoutingTask().randomize(min_distance=300, max_distance=800),  # Long distance
    ]

    scenarios = []
    for i, task in enumerate(hero_tasks):
        if max_steps is not None:
            step_limit = max_steps
        else:
            step_limit = 500 * (i + 1)

        scenarios.append(
            (
                CarlaScenarioConfig()
                .environment(map_name=map_name, max_steps=step_limit)
                .add_actor(rolename, hero_config.to_dict())
                .add_task(rolename, task.to_dict())
                .traffic(num_vehicles, num_walkers, spawn_near_agents=True)
                .planner(enable=True)
                .default_sensor(attach=True)
            ).to_dict()
        )

    env_config = {
        "scenarios": scenarios,
        "adapter": carla_config.to_dict(),
        "agents": {
            rolename: {
                "obs": {
                    "framestack": 1,
                    "frameskip": 4,
                    "items": {
                        "front_view": {
                            "type": "image_obs",
                            "source": "rgb_front",
                            "width": image_x,
                            "height": image_y,
                        },
                        "left_view": {
                            "type": "image_obs",
                            "source": "rgb_left",
                            "width": image_x,
                            "height": image_y,
                        },
                        "right_view": {
                            "type": "image_obs",
                            "source": "rgb_right",
                            "width": image_x,
                            "height": image_y,
                        },
                        "semantic": {
                            "type": "sensor_semantic_obs",
                            "gnss_source": "gnss",
                            "imu_source": "imu",
                            "speed_source": "speedometer",
                        },
                        "waypoint": {
                            "type": "waypoint_obs",
                            "gps": True,
                        },
                    },
                },
                "action": {"type": "carla_vehicle_action", "is_discrete": False},
                "reward": {"type": reward_type},
                "done": {"type": done_type},
                "render": ["rgb_left", "rgb_front", "rgb_right"],
            }
        },
        "visualizer": {
            "image_size_x": image_x,
            "image_size_y": image_y,
        },
    }

    env = DrivingGym(env_config)
    if single_agent:
        env = SingleAgentWrapper(env)
    if flatten_obs:
        env = FlattenSpaceWrapper(env)
    return env
