"""
Author: Morphlng
Date: 2024-04-18 17:00:06
LastEditTime: 2025-01-21 16:38:10
LastEditors: Morphlng
Description: 
FilePath: /DrivingGym/src/driving_gym/environment/agent/action/action_handler.py
"""

from __future__ import annotations

import os
from typing import TYPE_CHECKING

import numpy as np

from driving_gym.environment.agent.action.action_interface import (
    AbstractAction,
    ActionInterface,
)
from driving_gym.misc.util import dynamic_import

if TYPE_CHECKING:
    from driving_gym.simulation.adapter_interface import AdapterInterface


class ActionHandler:
    def __init__(self, action_config: dict, adapter: "AdapterInterface"):
        """Initialize action space for a single agent

        Args:
            action_config (dict): The action configuration
        """
        self.action_config = action_config.copy()
        self.handler = self._create_handler(self.action_config, adapter)
        self._action_space = None
        self._mask_space = None
        self._abstract_action = None
        self._step_cnt = None

    def _create_handler(
        self, config: dict, adapter: "AdapterInterface"
    ) -> ActionInterface:
        """Initilize the action handler based on the action type"""
        action_type = config["type"]
        root = os.path.join(os.path.dirname(__file__))
        handler = dynamic_import(
            action_type,
            root,
            args=(config, adapter),
            suffix="_action",
        )
        return handler

    def reset(self):
        self._step_cnt = None
        self.handler.reset()

    def convert_single_action(self, action, time_step) -> AbstractAction:
        """Convert a model output action to an AbstractAction

        Args:
            action: The action to be converted

        Returns:
            AbstractAction: An action instance

        Note:
            Repeatedly calling this function with the same action will return the same AbstractAction instance.
        """
        if self._step_cnt != time_step:
            self._step_cnt = time_step
            self._abstract_action = self.handler.convert_single_action(action)
        return self._abstract_action

    def get_action_mask(self) -> np.ndarray:
        """Get the action mask for a given actor

        Returns:
            np.ndarray: A numpy array of action mask
        """
        return self.handler.get_action_mask()

    def get_action_space(self):
        if self._action_space is None:
            self._action_space = self.handler.get_action_space()
        return self._action_space

    def get_mask_space(self):
        if self._mask_space is None:
            self._mask_space = self.handler.get_mask_space()
        return self._mask_space
