"""
Author: Morphlng
Date: 2025-03-18 15:04:06
LastEditTime: 2025-03-18 16:19:05
LastEditors: Morphlng
Description: Provide a stable-baselines3 compatible wrapper for Rule-based Agent, suitable for `imitation` library.
FilePath: /DrivingGym/examples/carla_rule_policy.py
"""

import numpy as np
from stable_baselines3.common.policies import BasePolicy

from driving_gym.simulation.adapter.carla import CarlaActorConfig
from driving_gym.simulation.adapter.carla.carla_adapter import CarlaAdapter
from stable_baselines3.common.vec_env.dummy_vec_env import DummyVecEnv
from driving_gym.simulation.adapter.carla.planner.navigation.rule_agent import RuleAgent
from driving_gym.simulation.adapter.carla.utils import carla_location


class RulePolicy(BasePolicy):
    def __init__(self, env, squash_output=False, **kwargs):
        super().__init__(
            env.observation_space, env.action_space, squash_output=squash_output
        )

        if isinstance(env, DummyVecEnv):
            env = env.envs[0]

        self.env = env
        self.adapter: CarlaAdapter = self.env.adapter
        self.ep_id = -1
        self.agent = None

    def _predict(self, observation, deterministic=False):
        import torch

        if self.env.num_episode != self.ep_id:
            self.ep_id = self.env.num_episode
            self.agent = self._init_agent()

        control = self.agent.run_step()
        action = np.array(
            [
                -control.brake if control.brake > 0 else control.throttle,
                control.steer,
            ]
        )
        return torch.from_numpy(action[None])

    def _init_agent(self):
        vehicle = self.adapter.get_actor("hero")
        agent = RuleAgent(
            vehicle,
            opt_dict={"ignore_traffic_lights": False, "ignore_stop_signs": False},
        )

        task = self.env.scenario.get_task("hero")
        start = CarlaActorConfig.parse_location(task["start"], agent._map)
        end = CarlaActorConfig.parse_location(task["end"], agent._map)
        plan = agent._global_planner.trace_route(
            carla_location(start.location), carla_location(end.location)
        )
        agent.init_path([(w.transform, opt) for w, opt in plan])

        return agent


if __name__ == "__main__":
    from stable_baselines3.common.evaluation import evaluate_policy

    from driving_gym.examples.carla_env_roach import make_carla_env

    env = make_carla_env(roach_wrapper=True)
    policy = RulePolicy(env)
    mean_reward, std_reward = evaluate_policy(policy, env, n_eval_episodes=10)

    print("mean_reward:", mean_reward, "std_reward:", std_reward)
