from typing import Dict, Sequence, Tuple, Any, Union, Optional
import numpy as np
import torch
import torch.nn as nn
from torch import Tensor
from tianshou.utils.net.discrete import Actor, Critic
import torch.nn.functional as F
from torch import device
from tianshou.utils.net.common import MLP

class TexasBaseNet(nn.Module):
    def __init__(
            self,
            input_size:int, 
            device:str='cpu',
            hidden_sizes:int=128, 
            output_sizes:int=128,
            n_layer:int=2,
        ) -> None:
        super().__init__()
        self.device = device
        net = [
            nn.Linear(input_size, hidden_sizes),
            nn.ReLU(),
            ]
        for _ in range(n_layer-1):
            net.extend([
                nn.Linear(hidden_sizes, hidden_sizes),
                nn.ReLU()
            ])
        self.nets = nn.Sequential(*net)
        # self.rnn = nn.GRU(
        #     input_size=hidden_sizes,
        #     hidden_size=hidden_sizes,
        #     num_layers=1,
        #     batch_first=True
        # )
        self.out = nn.Linear(hidden_sizes, output_sizes)

    def forward(self, obs, state):
        if isinstance(obs, np.ndarray):
            logits = torch.tensor(obs, dtype=torch.float32, device=self.device)
        logits = self.nets(logits)
        # logits = logits.unsqueeze(1)
        # if state is not None:
        #     state = torch.transpose(state, 0, 1)
        # logits, state = self.rnn(logits, state)
        # logits = logits.squeeze(1)
        logits = self.out(logits)
        # state = torch.transpose(state, 0, 1)
        return logits, state

class TexasActor(nn.Module):
    def __init__(
            self, 
            preprocess_net: nn.Module, 
            action_shape: Sequence[int], 
            hidden_sizes: Sequence[int] = ..., 
            softmax_output: bool = True, 
            preprocess_net_output_dim: int = None, 
            device: device = "cpu"
        ) -> None:
        super().__init__()
        self.device = device
        self.preprocess = preprocess_net
        self.output_dim = int(np.prod(action_shape))
        self.win_predict = nn.Sequential(*[
            nn.Linear(preprocess_net_output_dim, 2),
            nn.Softmax(dim=-1)
            ])
        self.last = MLP(
            preprocess_net_output_dim+2,  # type: ignore
            self.output_dim,
            [hidden_sizes],
            device=self.device
        )
        self.softmax_output = softmax_output

    def forward(
            self, 
            obs: Union[np.ndarray, Tensor], 
            state: torch.Any = None, 
            info: Dict[str, Any] = ...
        ) -> Tuple[Tensor, Any]:
        obs = obs['obs']
        logits, hidden = self.preprocess(obs, state)

        # 胜率
        win_rate_logits = self.win_predict(logits)
        
        # 动作
        logits = torch.cat([logits, win_rate_logits.detach()], dim=-1)
        act_logits = self.last(logits)

        if self.softmax_output:
            act_logits = F.softmax(act_logits, dim=-1)
        return act_logits, win_rate_logits, hidden

class TexasCritic(nn.Module):
    def __init__(
        self,
        preprocess_net: nn.Module,
        hidden_sizes: Sequence[int] = (),
        last_size: int = 1,
        preprocess_net_output_dim: Optional[int] = None,
        device: Union[str, int, torch.device] = "cpu",
    ) -> None:
        super().__init__()
        self.device = device
        self.preprocess = preprocess_net
        self.output_dim = last_size
        input_dim = getattr(preprocess_net, "output_dim", preprocess_net_output_dim)
        self.oppo_obs_external_encoder = nn.Sequential(*[
            nn.Linear(52*3, hidden_sizes),
            nn.ReLU(),
            nn.Linear(hidden_sizes, 32)
        ])
        self.last = MLP(
            input_dim+32,  # type: ignore
            last_size,
            [hidden_sizes],
            device=self.device
        )

    def forward(
        self, obs: Union[np.ndarray, torch.Tensor], **kwargs: Any
    ) -> torch.Tensor:
        """Mapping: s -> V(s)."""
        oppo_obs = obs['oppo_obs']
        obs = obs['obs']
        if isinstance(obs, np.ndarray):
            oppo_logits = torch.tensor(oppo_obs, dtype=torch.float32, device=self.device)
        logits, _ = self.preprocess(obs, state=kwargs.get("state", None))
        oppo_logits = self.oppo_obs_external_encoder(oppo_logits)
        logits = torch.cat((logits, oppo_logits), dim=-1)
        return self.last(logits)