import numpy as np

from driving_gym.examples.carla_env_v2 import CarlaActorConfig, make_carla_env
from driving_gym.simulation.adapter.carla.planner.navigation.rule_agent import RuleAgent
from driving_gym.simulation.adapter.carla.utils import carla_location

if __name__ == "__main__":
    env = make_carla_env({"rolename": "hero", "reward_type": "carla_roach_reward"})

    try:
        for i in range(10):
            obs, info = env.reset()
            vehicle = env.adapter.get_actor("hero")
            agent = RuleAgent(
                vehicle,
                opt_dict={"ignore_traffic_lights": False, "ignore_stop_signs": True},
            )

            task = env.scenario.get_task("hero")
            start = CarlaActorConfig.parse_location(task["start"], agent._map)
            end = CarlaActorConfig.parse_location(task["end"], agent._map)
            plan = agent._global_planner.trace_route(
                carla_location(start.location), carla_location(end.location)
            )
            if len(plan) < 2:
                continue

            agent.init_path([(w.transform, opt) for w, opt in plan])

            done = False

            while not done:
                control = agent.run_step()
                action = np.array(
                    [
                        -control.brake if control.brake > 0 else control.throttle,
                        control.steer,
                    ]
                )

                obs, reward, te, tr, info = env.step({"hero": action})
                print(f"Step: {env.num_step}, Action: {action}, Reward: {reward}")
                done = te["__all__"] or tr["__all__"]
                env.render()

            print(f"Episode {i} is done!")
    finally:
        env.close()
