import torch
import torch.nn as nn
from torch import Tensor
import torch.nn.functional as F
import numpy as np
from collections import OrderedDict
from typing import Optional, Sequence, Union, Tuple, Any, Dict, Type, List
ArgsType = Union[Tuple[Any, ...], Dict[Any, Any], Sequence[Tuple[Any, ...]],
                 Sequence[Dict[Any, Any]]]

def miniblock(
    input_size: int,
    output_size: int = 0,
    norm_layer: Optional[nn.Module] = None,
    norm_args: Optional[Union[Tuple[Any, ...], Dict[Any, Any]]] = None,
    activation: Optional[nn.Module] = None,
    act_args: Optional[Union[Tuple[Any, ...], Dict[Any, Any]]] = None,
    linear_layer: Type[nn.Linear] = nn.Linear,
) -> List[nn.Module]:
    """Construct a miniblock with given input/output-size, norm layer and \
    activation."""
    layers: List[nn.Module] = [linear_layer(input_size, output_size)]
    if norm_layer is not None:
        if isinstance(norm_args, tuple):
            layers += [norm_layer(output_size, *norm_args)]
        elif isinstance(norm_args, dict):
            layers += [norm_layer(output_size, **norm_args)]
        else:
            layers += [norm_layer(output_size)]
    if activation is not None:
        if isinstance(act_args, tuple):
            layers += [activation(*act_args)]
        elif isinstance(act_args, dict):
            layers += [activation(**act_args)]
        else:
            layers += [activation()]
    return layers


class MLP(nn.Module):
    def __init__(
        self,
        input_dim: int,
        output_dim: int = 0,
        hidden_sizes: Sequence[int] = (),
        norm_layer = None,
        norm_args: Optional[ArgsType] = None,
        activation = nn.ReLU,
        act_args: Optional[ArgsType] = None,
        device: Optional[Union[str, int, torch.device]] = None,
        linear_layer: Type[nn.Linear] = nn.Linear,
        flatten_input: bool = True,
    ) -> None:
        super().__init__()
        self.device = device
        if norm_layer:
            if isinstance(norm_layer, list):
                assert len(norm_layer) == len(hidden_sizes)
                norm_layer_list = norm_layer
                if isinstance(norm_args, list):
                    assert len(norm_args) == len(hidden_sizes)
                    norm_args_list = norm_args
                else:
                    norm_args_list = [norm_args for _ in range(len(hidden_sizes))]
            else:
                norm_layer_list = [norm_layer for _ in range(len(hidden_sizes))]
                norm_args_list = [norm_args for _ in range(len(hidden_sizes))]
        else:
            norm_layer_list = [None] * len(hidden_sizes)
            norm_args_list = [None] * len(hidden_sizes)
        if activation:
            if isinstance(activation, list):
                assert len(activation) == len(hidden_sizes)
                activation_list = activation
                if isinstance(act_args, list):
                    assert len(act_args) == len(hidden_sizes)
                    act_args_list = act_args
                else:
                    act_args_list = [act_args for _ in range(len(hidden_sizes))]
            else:
                activation_list = [activation for _ in range(len(hidden_sizes))]
                act_args_list = [act_args for _ in range(len(hidden_sizes))]
        else:
            activation_list = [None] * len(hidden_sizes)
            act_args_list = [None] * len(hidden_sizes)
        hidden_sizes = [input_dim] + list(hidden_sizes)
        model = []
        for in_dim, out_dim, norm, norm_args, activ, act_args in zip(
            hidden_sizes[:-1], hidden_sizes[1:], norm_layer_list, norm_args_list,
            activation_list, act_args_list
        ):
            model += miniblock(
                in_dim, out_dim, norm, norm_args, activ, act_args, linear_layer
            )
        if output_dim > 0:
            model += [linear_layer(hidden_sizes[-1], output_dim)]
        self.output_dim = output_dim or hidden_sizes[-1]
        self.model = nn.Sequential(*model)
        self.flatten_input = flatten_input

    def forward(self, obs: Union[np.ndarray, torch.Tensor]) -> torch.Tensor:
        if self.device is not None:
            obs = torch.as_tensor(obs, device=self.device, dtype=torch.float32)
        if self.flatten_input:
            obs = obs.flatten(1)
        return self.model(obs)

class InferBaseNet(nn.Module):
    def __init__(
            self,
            input_size:int, 
            device:str='cpu',
            hidden_sizes:int=128, 
            output_sizes:int=128,
            n_layer:int=2,
        ) -> None:
        super().__init__()
        self.device = device
        net = [
            nn.Linear(input_size, hidden_sizes),
            nn.ReLU(),
            ]
        for _ in range(n_layer-1):
            net.extend([
                nn.Linear(hidden_sizes, hidden_sizes),
                nn.ReLU()
            ])
        self.nets = nn.Sequential(*net)
        # self.rnn = nn.GRU(
        #     input_size=hidden_sizes,
        #     hidden_size=hidden_sizes,
        #     num_layers=1,
        #     batch_first=True
        # )
        self.out = nn.Linear(hidden_sizes, output_sizes)

    def forward(self, obs, state):
        if isinstance(obs, np.ndarray):
            logits = torch.tensor(obs, dtype=torch.float32, device=self.device)
        logits = self.nets(logits)
        # logits = logits.unsqueeze(1)
        # if state is not None:
        #     state = torch.transpose(state, 0, 1)
        # logits, state = self.rnn(logits, state)
        # logits = logits.squeeze(1)
        logits = self.out(logits)
        # state = torch.transpose(state, 0, 1)
        return logits, state

class InferActor(nn.Module):
    def __init__(
            self, 
            preprocess_net: nn.Module, 
            action_shape: Sequence[int], 
            hidden_sizes: Sequence[int] = ..., 
            softmax_output: bool = True, 
            preprocess_net_output_dim: int = None, 
            device = "cpu"
        ) -> None:
        super().__init__()
        self.device = device
        self.preprocess = preprocess_net
        self.output_dim = int(np.prod(action_shape))
        self.win_predict = nn.Sequential(*[
            nn.Linear(preprocess_net_output_dim, 2),
            nn.Softmax(dim=-1)
            ])
        self.last = MLP(
            preprocess_net_output_dim+2,  # type: ignore
            self.output_dim,
            [hidden_sizes],
            device=self.device
        )
        self.softmax_output = softmax_output

    def forward(
            self, 
            obs: Union[np.ndarray, Tensor], 
            state: torch.Any = None, 
            info: Dict[str, Any] = ...
        ) -> Tuple[Tensor, Any]:
        logits, hidden = self.preprocess(obs, state)

        # 胜率
        win_rate_logits = self.win_predict(logits)
        
        # 动作
        logits = torch.cat([logits, win_rate_logits.detach()], dim=-1)
        act_logits = self.last(logits)

        if self.softmax_output:
            act_logits = F.softmax(act_logits, dim=-1)
        return act_logits, win_rate_logits, hidden

class Agent():
    def __init__(self, model:nn.Module, model_path:Optional[str] = None) -> None:
        self._state = None
        self.model = model
        if model_path:
            state_dict = torch.load(model_path)
            self.model.load_state_dict(state_dict)

    def reset(self, state_dict:Optional[OrderedDict] = None):
        _state_dict = OrderedDict()
        if state_dict is not None:
            for k, v in state_dict['model'].items():
                if k.startswith('actor.'):
                    _state_dict[k[6:]] = v
            self.model.load_state_dict(_state_dict)
        self._state = None

    def get_action(self, obs):
        encode_obs = obs['obs']
        with torch.no_grad():
            act_logits, _, self._state = self.model(encode_obs, self._state)
            mask = torch.tensor(obs['mask'].astype(bool), device=act_logits.device).unsqueeze(0)
            act_logits[~mask] = float('-inf')
            # dist = torch.distributions.Categorical(logits=act_logits)
            # action = dist.sample().item()
            action = nn.Softmax(dim=-1)(act_logits).argmax().item()
        return [np.eye(6)[action].astype(int).tolist()]
