import torch
import numpy as np
from typing import Any
from tianshou.policy import DQNPolicy
from tianshou.data import ReplayBuffer


class CustomDQNPolicy(DQNPolicy):
    def __init__(
        self,
        *args: Any,
        is_double: bool = True,
        **kwargs: Any,
    ) -> None:
        DQNPolicy.__init__(
            self, *args, **kwargs
        )
        self.is_double = is_double  # 是否使用的是 DDQN

    def _target_q(self, buffer: ReplayBuffer, indice: np.ndarray) -> torch.Tensor:
        batch = buffer[indice]  # batch.obs_next: s_{t+n}
        # target_Q = Q_old(s_, argmax(Q_new(s_, *)))
        if self._target:
            # Nature DQN 2015 有 target Q network
            a = self(batch, input="obs_next").act
            target_q = self(batch, model="model_old", input="obs_next").logits
            # DQN 与 DDQN 唯一的不同
            target_q = target_q[np.arange(len(a)), a] if self.is_double else target_q.max(dim=1)[0]
        else:
            # DQN 2013
            target_q = self(batch, input="obs_next").logits.max(dim=1)[0]
        return target_q
