from sb3_contrib.qrdqn.policies import QRDQNPolicy
from gymnasium import spaces
import torch as th
from typing import Dict, Union, Optional, Tuple
import numpy as np
from stable_baselines3.common.buffers import DictReplayBuffer
from stable_baselines3.common.type_aliases import PyTorchObs

class MaskingReplayBuffer(DictReplayBuffer):
    def add(self, obs, next_obs, action, reward, done, infos=None):
        if infos is not None and isinstance(infos, list):
            for i, info in enumerate(infos):
                if info.get("is_invalid_action", False):
                    return  # 某个环境非法动作就不添加（或者你可以单独处理）
        super().add(obs, next_obs, action, reward, done, infos)

class MaskableQRDQNMultiInputPolicy(QRDQNPolicy):
    def forward(self, obs: PyTorchObs, deterministic: bool = True) -> th.Tensor:
        if isinstance(obs, dict) and "action_mask" in obs:
            action_mask = obs["action_mask"]
            filtered_obs = {k: v for k, v in obs.items() if k != "action_mask"}
        else:
            action_mask = None
            filtered_obs = obs

        quantiles = self.quantile_net(filtered_obs)  # [batch, actions * n_quantiles]

        batch_size = quantiles.shape[0]
        action_dim = self.action_space.n
        n_quantiles = self.n_quantiles

        quantiles = quantiles.view(batch_size, action_dim, n_quantiles)
        q_values = quantiles.mean(dim=2)

        if action_mask is not None:
            if not isinstance(action_mask, th.Tensor):
                action_mask = th.tensor(action_mask, device=q_values.device, dtype=th.bool)
            else:
                action_mask = action_mask.to(q_values.device).bool()
            q_values = q_values.masked_fill(~action_mask, -1e10)

        return q_values.argmax(dim=1).view(-1, 1)

    def _predict(self, obs: PyTorchObs, deterministic: bool = True) -> th.Tensor:
        return self.forward(obs, deterministic=deterministic)

    def predict(
        self,
        observation: Union[np.ndarray, Dict[str, np.ndarray]],
        state: Optional[Tuple[np.ndarray, ...]] = None,
        episode_start: Optional[np.ndarray] = None,
        deterministic: bool = False,
        action_mask: Optional[np.ndarray] = None,
    ) -> Tuple[np.ndarray, Optional[Tuple[np.ndarray, ...]]]:
        self.set_training_mode(False)

        if not isinstance(observation, dict):
            observation = {"features": observation}
        if action_mask is not None:
            observation = observation.copy()
            mask_arr = np.array(action_mask)
            if mask_arr.ndim == 1:
                mask_arr = mask_arr[np.newaxis, :]
            observation["action_mask"] = mask_arr

        obs_tensor, vectorized_env = self.obs_to_tensor(observation)
        with th.no_grad():
            actions = self._predict(obs_tensor, deterministic=deterministic)
        actions = actions.cpu().numpy().reshape((-1, *self.action_space.shape))

        if isinstance(self.action_space, spaces.Box):
            if self.squash_output:
                actions = self.unscale_action(actions)
            else:
                actions = np.clip(actions, self.action_space.low, self.action_space.high)

        if not vectorized_env:
            actions = actions.squeeze(axis=0)

        return actions, state
