from typing import Any, Type, Optional, Union, Dict, List
from tianshou.policy import PPOPolicy
import torch

import torch.nn as nn
from torch.optim.optimizer import Optimizer as Optimizer
from tianshou.data import Batch, ReplayBuffer, to_torch, to_torch_as
import numpy as np

class TexasPPOPolicy(PPOPolicy):
    def __init__(
            self, 
            actor: nn.Module, 
            critic: nn.Module, 
            optim: Optimizer, 
            dist_fn, 
            eps_clip: float = 0.2, 
            dual_clip: Optional[float] = None, 
            value_clip: bool = False, 
            advantage_normalization: bool = True, 
            recompute_advantage: bool = False, 
            **kwargs: Any
        ) -> None:
        super().__init__(actor, critic, optim, dist_fn, eps_clip, dual_clip, value_clip, advantage_normalization, recompute_advantage, **kwargs)
        self.win_predict_loss = torch.nn.CrossEntropyLoss()

    def forward(
        self,
        batch: Batch,
        state: Optional[Union[dict, Batch, np.ndarray]] = None,
        **kwargs: Any,
    ) -> Batch:
        """Compute action over the given batch data.

        :return: A :class:`~tianshou.data.Batch` which has 4 keys:

            * ``act`` the action.
            * ``logits`` the network's raw output.
            * ``dist`` the action distribution.
            * ``state`` the hidden state.

        .. seealso::

            Please refer to :meth:`~tianshou.policy.BasePolicy.forward` for
            more detailed explanation.
        """
        logits, win_logits, hidden = self.actor(batch.obs, state=state, info=batch.info)
        sample_logits = logits.clone().detach()+1e-5
        mask = torch.tensor(batch.obs.mask.astype(bool), device=logits.device)
        sample_logits[~mask] = float('-inf')
        if isinstance(sample_logits, tuple):
            dist = self.dist_fn(*sample_logits)
        else:
            dist = self.dist_fn(sample_logits)
        if self._deterministic_eval and not self.training:
            if self.action_type == "discrete":
                act = logits.argmax(-1)
            elif self.action_type == "continuous":
                act = logits[0]
        else:
            act = dist.sample()
        return Batch(logits=logits, act=act, state=hidden, dist=dist, win_logits=win_logits)

    def learn(  # type: ignore
        self, batch: Batch, batch_size: int, repeat: int, **kwargs: Any
    ) -> Dict[str, List[float]]:
        losses, clip_losses, vf_losses, ent_losses = [], [], [], []
        win_losses = []
        for step in range(repeat):
            if self._recompute_adv and step > 0:
                batch = self._compute_returns(batch, self._buffer, self._indices)
            for minibatch in batch.split(batch_size, merge_last=True):
                # calculate loss for actor
                out = self(minibatch)
                dist = out.dist
                win_logits = out.win_logits
                if self._norm_adv:
                    mean, std = minibatch.adv.mean(), minibatch.adv.std()
                    minibatch.adv = (minibatch.adv -
                                     mean) / (std + self._eps)  # per-batch norm
                ratio = (dist.log_prob(minibatch.act) -
                         minibatch.logp_old).exp().float()
                ratio = ratio.reshape(ratio.size(0), -1).transpose(0, 1)
                surr1 = ratio * minibatch.adv
                surr2 = ratio.clamp(
                    1.0 - self._eps_clip, 1.0 + self._eps_clip
                ) * minibatch.adv
                if self._dual_clip:
                    clip1 = torch.min(surr1, surr2)
                    clip2 = torch.max(clip1, self._dual_clip * minibatch.adv)
                    clip_loss = -torch.where(minibatch.adv < 0, clip2, clip1).mean()
                else:
                    clip_loss = -torch.min(surr1, surr2).mean()
                # calculate loss for critic
                value = self.critic(minibatch.obs).flatten()
                if self._value_clip:
                    v_clip = minibatch.v_s + \
                        (value - minibatch.v_s).clamp(-self._eps_clip, self._eps_clip)
                    vf1 = (minibatch.returns - value).pow(2)
                    vf2 = (minibatch.returns - v_clip).pow(2)
                    vf_loss = torch.max(vf1, vf2).mean()
                else:
                    vf_loss = (minibatch.returns - value).pow(2).mean()
                # calculate regularization and overall loss
                ent_loss = dist.entropy().mean()

                win_label = torch.tensor(minibatch.obs.win, device=win_logits.device)
                win_loss = self.win_predict_loss(win_logits, win_label)
                loss = clip_loss + self._weight_vf * vf_loss \
                    - self._weight_ent * ent_loss + 0.25*win_loss
                self.optim.zero_grad()
                loss.backward()
                if self._grad_norm:  # clip large gradient
                    nn.utils.clip_grad_norm_(
                        self._actor_critic.parameters(), max_norm=self._grad_norm
                    )
                self.optim.step()
                clip_losses.append(clip_loss.item())
                vf_losses.append(vf_loss.item())
                ent_losses.append(ent_loss.item())
                win_losses.append(win_loss.item())
                losses.append(loss.item())

        return {
            "loss": losses,
            "loss/clip": clip_losses,
            "loss/vf": vf_losses,
            "loss/ent": ent_losses,
            "loss/win_losses": win_losses
        }
