from driving_gym.environment.env import DrivingGym
from driving_gym.environment.scenario.config import CarlaScenarioConfig, RoutingTask
from driving_gym.misc.wrappers.flatten_space_wrapper import FlattenSpaceWrapper
from driving_gym.misc.wrappers.single_agent_wrapper import SingleAgentWrapper
from driving_gym.simulation.adapter.carla import CarlaActorConfig, CarlaConfig


def make_carla_env(
    config: dict = None, single_agent: bool = False, flatten_obs: bool = False
):
    config = config or {}
    host = config.get("host", "localhost")
    port = config.get("port", 2000)
    rolename = config.get("rolename", "hero")
    map_name = config.get("map_name", "Town01")
    weather = config.get("weather", [i for i in range(5)])
    num_vehicles = config.get("num_vehicles", [80, 160])
    num_walkers = config.get("num_walkers", [80, 160])
    traffic_range = config.get("traffic_range", float("inf"))
    max_steps = config.get("max_steps", None)
    discrete_action = config.get("discrete_action", False)
    reward_type = config.get("reward_type", "carla_default_reward")
    done_type = config.get("done_type", "carla_default_done")

    carla_config = (
        CarlaConfig()
        .connection(host=host, port=port)
        .synchronization(sync_mode=True, fixed_delta_seconds=0.05)
        .spectator(follow_actor=rolename)
    )

    hero_config = CarlaActorConfig().blueprint(
        "vehicle.tesla.model3", rolename=rolename
    )

    hero_tasks = [RoutingTask(random=True)]

    scenarios = []
    for i, task in enumerate(hero_tasks):
        if max_steps is not None:
            step_limit = max_steps
        else:
            step_limit = 1500

        scenarios.append(
            (
                CarlaScenarioConfig()
                .environment(map_name=map_name, weather=weather, max_steps=step_limit)
                .add_actor(rolename, hero_config.to_dict())
                .add_task(rolename, task.to_dict())
                .traffic(
                    num_vehicles,
                    num_walkers,
                    spawn_near_agents=True,
                    spawn_radius=traffic_range,
                )
                .planner(enable=True, endless=True)
                .default_sensor(attach=True)
            ).to_dict()
        )

    env_config = {
        "scenarios": scenarios,
        "adapter": carla_config.to_dict(),
        "agents": {
            rolename: {
                "obs": {
                    "framestack": 1,
                    "frameskip": 4,
                    "items": {
                        "semantic": {"type": "kinematic_obs"},
                        "waypoint": {
                            "type": "waypoint_obs",
                            "gps": False,
                        },
                    },
                },
                "action": {
                    "type": "carla_vehicle_action",
                    "is_discrete": discrete_action,
                },
                "reward": {"type": reward_type},
                "done": {"type": done_type},
            }
        },
    }

    env = DrivingGym(env_config)
    if single_agent:
        env = SingleAgentWrapper(env)
    if flatten_obs:
        env = FlattenSpaceWrapper(env)
    return env


if __name__ == "__main__":
    env = make_carla_env(
        {
            "rolename": "hero",
            "discrete_action": True,
            "num_vehicles": [20, 40],
            "num_walkers": [20, 40],
        },
        single_agent=True,
    )

    try:
        for _ in range(10):
            obs, info = env.reset()
            done = False

            while not done:
                action = env.action_space.sample()
                obs, reward, te, tr, info = env.step(action)
                done = te or tr
                env.render()
    finally:
        env.close()
