import torch
import torch.nn as nn
import torch.nn.functional as F
from modules.base.rnn import RNNLayer


def init(module, weight_init, bias_init, gain=1):
    """Init module.
    Args:
        module: (torch.nn) module
        weight_init: (torch.nn) weight init
        bias_init: (torch.nn) bias init
        gain: (float) gain
    Returns:
        module: (torch.nn) module
    """
    if weight_init == None:
        return module
    else:
        weight_init(module.weight.data, gain=gain)
        if module.bias is not None:
            bias_init(module.bias.data)
        return module


class MLPBase(nn.Module):
    def __init__(self, args, input_dim):
        super(MLPBase, self).__init__()

        self.use_feature_normalization = getattr(args, "use_feature_normalization", True)
        self.hidden_sizes = getattr(args, "hidden_sizes", [256, 256])
        self.initialization_critic = None
        if getattr(args, "initialization_critic", "orthogonal_"):
            self.initialization_critic = nn.init.__dict__["orthogonal_"]
        self.gain = getattr(args, "gain", 0.01)

        if self.use_feature_normalization:
            self.feature_norm = nn.LayerNorm(input_dim)

        def init_(m):
            return init(m, self.initialization_critic, lambda x: nn.init.constant_(x, 0), gain=self.gain)

        layers = [
            init_(nn.Linear(input_dim, self.hidden_sizes[0])),
            nn.ReLU(inplace=True),
            nn.LayerNorm(self.hidden_sizes[0]),
        ]
        for i in range(1, len(self.hidden_sizes)):
            layers += [
                init_(nn.Linear(self.hidden_sizes[i - 1], self.hidden_sizes[i])),
                nn.ReLU(inplace=True),
                nn.LayerNorm(self.hidden_sizes[i]),
            ]
        self.mlp = nn.Sequential(*layers)
    
    def forward(self, x):
        if self.use_feature_normalization:
            x = self.feature_norm(x)
        x = self.mlp(x)
        return x


class DiscreteCritic(nn.Module):
    def __init__(self, scheme, args, output_dim=1, is_state_action_input=False, agent_id=None, last_layer_bias=False, is_ob_with_id=False, use_obs=False):
        super(DiscreteCritic, self).__init__()

        self.args = args
        self.n_actions = args.n_actions
        self.n_agents = args.n_agents
        self.agent_id = agent_id

        self.is_state_action_input = is_state_action_input
        self.use_obs = use_obs

        self.gain = getattr(args, "gain", 0.01)
        self.use_feature_normalization = getattr(args, "use_feature_normalization", True)
        self.hidden_sizes = getattr(args, "hidden_sizes", [256, 256])
        self.initialization_critic = None
        if getattr(args, "initialization_critic", "True"):
            self.initialization_critic = nn.init.__dict__["orthogonal_"]
        self.use_recurrent_policy = getattr(args, "use_recurrent_policy", False)
        self.recurrent_n = getattr(args, "recurrent_n", 1)

        input_shape = self._get_input_shape(scheme, is_state_action_input, is_ob_with_id, use_obs)

        def init_(m, gain=1):
            return init(m, self.initialization_critic, lambda x: nn.init.constant_(x, 0), gain=gain)
        
        self.mlp = MLPBase(args, input_shape)
        
        # Set up network layers
        self.value_out = init_(nn.Linear(self.hidden_sizes[-1], output_dim, bias=last_layer_bias))

    def forward(self, states, rnn_states=None):
        critic_features = self.mlp(states)
        value = self.value_out(critic_features)
        return value

    def _build_q_inputs_with_obs(self, batch, all_state_actions_i, agent_id):
        obs = batch["obs"][:, :, agent_id, :].unsqueeze(dim=2)   # [batch_size, seq_len + 1, 1, obs_dim]
        obs = obs.repeat((1, 1, all_state_actions_i.shape[2], 1)).to(all_state_actions_i.device)    # [batch_size, seq_len + 1, n_actions, obs_dim]
        q_input = all_state_actions_i.clone()   # [batch_size, seq_len + 1, n_actions, state_dim+n_actions]
        q_input = torch.cat((all_state_actions_i, obs), dim=-1)

        return q_input

    def _get_input_shape(self, scheme, is_state_action_input, is_ob_with_id, use_obs):
        input_shape = scheme["state"]["vshape"]
        if is_state_action_input:
            input_shape += scheme["actions_onehot"]["vshape"][0]
        if is_ob_with_id:
            input_shape += self.n_agents
        if use_obs:
            input_shape += scheme["obs"]["vshape"]
        return input_shape
    
    def init_hidden(self):
        # make hidden states on same device as model
        if self.use_recurrent_policy:
            return self.fc1.weight.new(1, self.hidden_sizes[0]).zero_()
        else:
            return None


class MLPDoubleQ(nn.Module):
    def __init__(self, scheme, args, output_dim=1, agent_id=None, use_obs=False):
        super(MLPDoubleQ, self).__init__()

        self.args = args
        self.n_actions = args.n_actions
        self.n_agents = args.n_agents
        self.agent_id = agent_id

        self.use_obs = use_obs

        self.hidden_dim = getattr(args, "q_hidden_dim", 256)

        input_shape = self._get_input_shape(scheme, use_obs)
        
        self.fc1 = nn.Linear(input_shape, self.hidden_dim)
        self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)
        self.fc3 = nn.Linear(self.hidden_dim, output_dim)

        self.fc4 = nn.Linear(input_shape, self.hidden_dim)
        self.fc5 = nn.Linear(self.hidden_dim, self.hidden_dim)
        self.fc6 = nn.Linear(self.hidden_dim, output_dim)

    def both(self, X):
        """
        Inputs:
            X (PyTorch Matrix): Batch of observations
        Outputs:
            out (PyTorch Matrix): Output of network (actions, values, etc)
        """
        h1 = F.relu(self.fc1(X))
        h2 = F.relu(self.fc2(h1))
        out = self.fc3(h2)

        h1_2 = F.relu(self.fc4(X))
        h2_2 = F.relu(self.fc5(h1_2))
        out_2 = self.fc3(h2_2)

        return out, out_2

    def forward(self, X):
        value = torch.min(*self.both(X))
        return value

    def _build_q_inputs_with_obs(self, batch, agent_id):
        bs = batch["state"].shape[0]
        max_t = batch["state"].shape[1]
        inputs = []
        # state, (obs), action
        inputs.append(batch["state"].unsqueeze(2).repeat(1, 1, self.n_actions, 1))
        if self.use_obs:
            inputs.append(batch["obs"][:, :, agent_id, :].unsqueeze(2).repeat(1, 1, self.n_actions, 1))
        inputs.append(torch.eye(self.n_actions, device=self.args.device).unsqueeze(0).unsqueeze(0).expand(bs, max_t, -1, -1))
        inputs = torch.cat(inputs, dim=-1)
        return inputs

    def _get_input_shape(self, scheme, use_obs):
        input_shape = scheme["state"]["vshape"]
        if use_obs:
            input_shape += scheme["obs"]["vshape"]
        input_shape += scheme["actions_onehot"]["vshape"][0]
        return input_shape


class MLPQnetork(nn.Module):
    def __init__(self, scheme, args, output_dim=1, agent_id=None, use_obs=False):
        super(MLPQnetork, self).__init__()

        self.args = args
        self.n_actions = args.n_actions
        self.n_agents = args.n_agents
        self.agent_id = agent_id

        self.use_obs = use_obs

        self.hidden_dim = getattr(args, "q_hidden_dim", 256)

        input_shape = self._get_input_shape(scheme, use_obs)
        
        self.fc1 = nn.Linear(input_shape, self.hidden_dim)
        self.fc2 = nn.Linear(self.hidden_dim, self.hidden_dim)
        self.fc3 = nn.Linear(self.hidden_dim, output_dim)

    def forward(self, X):
        """
        Inputs:
            X (PyTorch Matrix): Batch of observations
        Outputs:
            out (PyTorch Matrix): Output of network (actions, values, etc)
        """
        h1 = F.relu(self.fc1(X))
        h2 = F.relu(self.fc2(h1))
        out = self.fc3(h2)

        return out

    def _build_inputs(self, batch, agent_id):
        bs = batch["state"].shape[0]
        max_t = batch["state"].shape[1]
        inputs = []
        # state, (obs), action
        inputs.append(batch["state"].unsqueeze(2).repeat(1, 1, self.n_actions, 1))
        if self.use_obs:
            inputs.append(batch["obs"][:, :, agent_id, :].unsqueeze(2).repeat(1, 1, self.n_actions, 1))
        inputs.append(torch.eye(self.n_actions, device=self.args.device).unsqueeze(0).unsqueeze(0).expand(bs, max_t, -1, -1))
        inputs = torch.cat(inputs, dim=-1)
        return inputs

    def _get_input_shape(self, scheme, use_obs):
        input_shape = scheme["state"]["vshape"]
        if use_obs:
            input_shape += scheme["obs"]["vshape"]
        input_shape += scheme["actions_onehot"]["vshape"][0]
        return input_shape