import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.base.rnn import RNNLayer


def init(module, weight_init, bias_init, gain=1):
    """Init module.
    Args:
        module: (torch.nn) module
        weight_init: (torch.nn) weight init
        bias_init: (torch.nn) bias init
        gain: (float) gain
    Returns:
        module: (torch.nn) module
    """
    if weight_init == None:
        return module
    else:
        weight_init(module.weight.data, gain=gain)
        if module.bias is not None:
            bias_init(module.bias.data)
        return module


class MLPBase(nn.Module):
    def __init__(self, args, input_dim):
        super(MLPBase, self).__init__()

        self.use_feature_normalization = getattr(args, "use_feature_normalization", True)
        self.hidden_sizes = getattr(args, "hidden_sizes", [256, 256])
        self.initialization_critic = None
        if getattr(args, "initialization_critic", "orthogonal_"):
            self.initialization_critic = nn.init.__dict__["orthogonal_"]
        self.gain = getattr(args, "gain", 0.01)

        if self.use_feature_normalization:
            self.feature_norm = nn.LayerNorm(input_dim)

        def init_(m):
            return init(m, self.initialization_critic, lambda x: nn.init.constant_(x, 0), gain=self.gain)

        layers = [
            init_(nn.Linear(input_dim, self.hidden_sizes[0])),
            nn.ReLU(inplace=True),
            nn.LayerNorm(self.hidden_sizes[0]),
        ]
        for i in range(1, len(self.hidden_sizes)):
            layers += [
                init_(nn.Linear(self.hidden_sizes[i - 1], self.hidden_sizes[i])),
                nn.ReLU(inplace=True),
                nn.LayerNorm(self.hidden_sizes[i]),
            ]
        self.mlp = nn.Sequential(*layers)
    
    def forward(self, x):
        if self.use_feature_normalization:
            x = self.feature_norm(x)
        x = self.mlp(x)
        return x


class Q_Critic(nn.Module):
    def __init__(self, scheme, args):
        super(Q_Critic, self).__init__()

        self.args = args
        self.n_actions = args.n_actions
        self.n_agents = args.n_agents

        self.gain = getattr(args, "gain", 0.01)
        self.use_feature_normalization = getattr(args, "use_feature_normalization", True)
        self.hidden_sizes = getattr(args, "hidden_sizes", [256, 256])
        self.initialization_critic = None
        if getattr(args, "initialization_critic", "True"):
            self.initialization_critic = nn.init.__dict__["orthogonal_"]

        input_shape = self._get_input_shape(scheme)

        def init_(m, gain=1):
            return init(m, self.initialization_critic, lambda x: nn.init.constant_(x, 0), gain=gain)
        
        self.mlp = MLPBase(args, input_shape)
        self.value_out = init_(nn.Linear(self.hidden_sizes[-1], self.n_actions))

    def forward(self, batch):
        inputs = self._build_inputs(batch)
        bs, max_t, na, dim = inputs.size()
        inputs = inputs.reshape(-1, dim)
        critic_features = self.mlp(inputs)
        value = self.value_out(critic_features)
        value = value.reshape(bs, max_t, na, self.n_actions)
        return value

    def _build_inputs(self, batch):
        inputs = []
        # state, obs, action
        inputs.append(torch.eye(self.n_agents, device=self.args.device).unsqueeze(0).unsqueeze(0).repeat(batch["obs"].shape[0], batch["obs"].shape[1], 1, 1))
        inputs.append(batch["obs"])
        inputs.append(batch["state"].unsqueeze(2).repeat(1, 1, self.n_agents, 1))
        inputs = torch.cat([x for x in inputs], dim=-1)    # [batch_size, max_t, n_agents, obs_dim+state_dim+n_agents]
        return inputs

    def _get_input_shape(self, scheme):
        input_shape = scheme["state"]["vshape"] 
        input_shape += scheme["obs"]["vshape"]
        input_shape += self.n_agents
        return input_shape
    
    def init_hidden(self):
        return None


class V_Critic(nn.Module):
    def __init__(self, scheme, args, output_dim=1, is_state_action_input=False):
        super(V_Critic, self).__init__()

        self.args = args
        self.n_actions = args.n_actions
        self.n_agents = args.n_agents

        self.gain = getattr(args, "gain", 0.01)
        self.use_feature_normalization = getattr(args, "use_feature_normalization", True)
        self.hidden_sizes = getattr(args, "hidden_sizes", [256, 256])
        self.initialization_critic = None
        if getattr(args, "initialization_critic", "True"):
            self.initialization_critic = nn.init.__dict__["orthogonal_"]

        input_shape = self._get_input_shape(scheme)

        def init_(m, gain=1):
            return init(m, self.initialization_critic, lambda x: nn.init.constant_(x, 0), gain=gain)
        
        self.mlp = MLPBase(args, input_shape)
        self.value_out = init_(nn.Linear(self.hidden_sizes[-1], 1))

    def forward(self, batch):
        inputs = self._build_inputs(batch)
        bs, max_t, na, dim = inputs.size()
        inputs = inputs.reshape(-1, dim)
        critic_features = self.mlp(inputs)
        value = self.value_out(critic_features)
        value = value.reshape(bs, max_t, na, 1)
        return value

    def _build_inputs(self, batch):
        inputs = []
        # state, action
        inputs.append(torch.eye(self.n_agents, device=self.args.device).unsqueeze(0).unsqueeze(0).repeat(batch["obs"].shape[0], batch["obs"].shape[1], 1, 1))
        inputs.append(batch["state"].unsqueeze(2).repeat(1, 1, self.n_agents, 1))
        inputs = torch.cat([x for x in inputs], dim=-1)    # [batch_size, max_t, n_agents, state_dim+n_agents]
        return inputs

    def _get_input_shape(self, scheme):
        input_shape = scheme["state"]["vshape"]
        input_shape += self.n_agents
        return input_shape
    
    def init_hidden(self):
        return None