import numpy as np
import pygame

from driving_gym.environment.env import DrivingGym
from driving_gym.environment.scenario.config import CarlaScenarioConfig, RoutingTask
from driving_gym.misc.roach.roach_birdview_wrapper import RlBirdviewWrapper
from driving_gym.misc.wrappers.flatten_space_wrapper import FlattenSpaceWrapper
from driving_gym.misc.wrappers.single_agent_wrapper import SingleAgentWrapper
from driving_gym.simulation.adapter.carla import CarlaActorConfig, CarlaConfig


def make_carla_env(
    config: dict = None,
    single_agent: bool = False,
    flatten_obs: bool = False,
    roach_wrapper: bool = False,
    input_states: list = None,
    tasks: list = None,
):
    config = config or {}
    host = config.get("host", "localhost")
    port = config.get("port", 2000)
    rolename = config.get("rolename", "hero")
    autopilot = config.get("autopilot", False)
    map_name = config.get("map_name", "Town01")
    weather = config.get("weather", [i for i in range(5)])
    num_vehicles = config.get("num_vehicles", [80, 160])
    num_walkers = config.get("num_walkers", [80, 160])
    max_steps = config.get("max_steps", None)
    endless = config.get("endless", False)
    discrete_action = config.get("discrete_action", False)
    reward_type = config.get("reward_type", "carla_roach_reward")
    done_type = config.get("done_type", "carla_roach_done")
    input_states = input_states or ["control", "vel_xy"]

    carla_config = (
        CarlaConfig()
        .connection(host=host, port=port)
        .synchronization(sync_mode=True, fixed_delta_seconds=0.05)
        .spectator(follow_actor=rolename)
    )

    hero_config = CarlaActorConfig().blueprint(
        "vehicle.tesla.model3", rolename=rolename, autopilot=autopilot
    )

    if tasks is None:
        hero_tasks = [RoutingTask(random=True)]
    else:
        hero_tasks = []
        for task in tasks:
            if isinstance(task, dict):
                t = RoutingTask(start=task["start"], end=task["end"])
            elif isinstance(task, (tuple, list)):
                t = RoutingTask(start=task[0], end=task[1])
            elif isinstance(task, RoutingTask):
                t = task
            else:
                raise ValueError(f"Invalid task format: {task}")
            hero_tasks.append(t)

    scenarios = []
    for i, task in enumerate(hero_tasks):
        step_limit = max_steps if max_steps is not None else 1500
        scenarios.append(
            (
                CarlaScenarioConfig()
                .environment(map_name=map_name, weather=weather, max_steps=step_limit)
                .add_actor(rolename, hero_config.to_dict())
                .add_task(rolename, task.to_dict())
                .traffic(
                    num_vehicles,
                    num_walkers,
                    spawn_near_agents=True,
                    spawn_radius=500,
                )
                .planner(enable=False, endless=endless)
                .default_sensor(attach=False)
            ).to_dict()
        )

    env_config = {
        "scenarios": scenarios,
        "adapter": carla_config.to_dict(),
        "agents": {
            rolename: {
                "obs": {
                    "framestack": 1,
                    "frameskip": 1,
                    "items": {
                        "roach": {
                            "type": "carla_roach_obs",
                            "birdview": {
                                "module": "birdview.chauffeurnet",
                                "width_in_pixels": 192,
                                "pixels_ev_to_bottom": 40,
                                "pixels_per_meter": 5.0,
                                "history_idx": [-16, -11, -6, -1],
                                "scale_bbox": True,
                                "scale_mask_col": 1.0,
                            },
                            "speed": {"module": "actor_state.speed"},
                            "control": {"module": "actor_state.control"},
                            "velocity": {"module": "actor_state.velocity"},
                        }
                    },
                },
                "action": {
                    "type": "carla_vehicle_action",
                    "is_discrete": discrete_action,
                },
                "reward": {"type": reward_type},
                "done": {"type": done_type},
            }
        },
    }

    env = DrivingGym(env_config)
    if roach_wrapper:
        env = RlBirdviewWrapper(env, rolename, input_states)
    elif single_agent:
        env = SingleAgentWrapper(env)
    if flatten_obs:
        env = FlattenSpaceWrapper(env)
    return env


if __name__ == "__main__":
    env = make_carla_env(
        {
            "rolename": "hero",
            "discrete_action": True,
            "num_vehicles": [20, 40],
            "num_walkers": [20, 40],
            "autopilot": True,
        },
        single_agent=True,
        roach_wrapper=False,
    )
    pygame.init()
    pygame.display.set_caption("Driving Gym")
    screen = pygame.display.set_mode((192, 192), pygame.DOUBLEBUF | pygame.HWSURFACE)

    try:
        for _ in range(10):
            obs, info = env.reset()
            done = False

            while not done:
                action = env.action_space.sample()
                obs, reward, te, tr, info = env.step(action)
                control = env.adapter.get_control("hero")
                action = np.array(
                    [
                        control.throttle if control.brake <= 0 else -control.brake,
                        control.steer,
                    ]
                )
                done = te or tr

                # Render birdview
                image = obs["roach"]["birdview"]["rendered"]  # (192. 192, 3)
                image = image.transpose((1, 0, 2))
                image = pygame.surfarray.make_surface(image)
                screen.blit(image, (0, 0))
                pygame.display.flip()
                pygame.event.pump()
    finally:
        env.close()
