import torch
import numpy as np
from torch.distributions import Independent, Normal, Categorical
from typing import Any, Union, Optional, Callable

from tianshou.policy import SACPolicy, DiscreteSACPolicy
from tianshou.data import Batch
from ril.policy.rule_policy_base import RulePolicyBase


class SACRulePolicy(SACPolicy, RulePolicyBase):
    def __init__(
        self,
        rule_fn: Callable = None,
        *args: Any,
        **kwargs: Any,
    ) -> None:
        SACPolicy.__init__(self, *args, **kwargs)
        RulePolicyBase.__init__(self, rule_fn)
        self.__eps = np.finfo(np.float32).eps.item()

    def forward(
        self,
        batch: Batch,
        state: Optional[Union[dict, Batch, np.ndarray]] = None,
        input: str = "obs",
        **kwargs: Any,
    ) -> Batch:
        obs = batch[input]
        logits, h = self.actor(obs, state=state, info=batch.info)
        assert isinstance(logits, tuple)
        dist = Independent(Normal(*logits), 1)
        if self._deterministic_eval and not self.training:
            x = logits[0]
        else:
            x = dist.rsample()
        y = torch.tanh(x)
        act = y * self._action_scale + self._action_bias
        # use ril
        if not self.updating:
            act, mask, explorable = self.process_rule(batch, act)
            y = (act - self._action_bias) / self._action_scale
            x = torch.atanh(y)

        y = self._action_scale * (1 - y.pow(2)) + self.__eps
        log_prob = dist.log_prob(x).unsqueeze(-1)
        log_prob = log_prob - torch.log(y).sum(-1, keepdim=True)

        return Batch(logits=logits, act=act, state=h, dist=dist, log_prob=log_prob)


class DiscreteSACRulePolicy(DiscreteSACPolicy, RulePolicyBase):
    def __init__(
        self,
        rule_fn: Callable = None,
        *args: Any,
        **kwargs: Any,
    ) -> None:
        DiscreteSACPolicy.__init__(self, *args, **kwargs)
        RulePolicyBase.__init__(self, rule_fn)

    def forward(
            self,
            batch: Batch,
            state: Optional[Union[dict, Batch, np.ndarray]] = None,
            input: str = "obs",
            **kwargs: Any,
    ) -> Batch:
        obs = batch[input]
        logits, h = self.actor(obs, state=state, info=batch.info)
        dist = Categorical(logits=logits)
        act = dist.sample()
        if not self.updating:
            act, mask, explorable = self.process_rule(batch, act)
        return Batch(logits=logits, act=act, state=h, dist=dist)
