import random
import torch
import torch.nn as nn
import torch.nn.functional as F

class MPNN(nn.Module):
    def __init__(self,
                 n_obs_in=1,
                 n_layers=3,
                 n_features=64):

        super().__init__()

        self.n_obs_in = n_obs_in
        self.n_layers = n_layers
        self.n_features = n_features

        self.node_init_embedding_layer = nn.Sequential(
            nn.Linear(n_obs_in, n_features, bias=False),
            nn.ReLU()
        )

        self.edge_embedding_layer = EdgeAndNodeEmbeddingLayer(n_obs_in, n_features)

        self.update_node_embedding_layer = nn.ModuleList([UpdateNodeEmbeddingLayer(n_features) for _ in range(self.n_layers)])

        self.readout_layer = ReadoutLayer(n_features)

    @torch.no_grad()
    def get_normalisation(self, adj):
        """将adj得行求和作为聚合得scale

        Args:
            adj ([torch.Tensor]): [B, N, N]

        Returns:
            norm: [B, N, 1], the scale factor of each node in each batch
        """
        norm = torch.sum((adj != 0), dim=2).unsqueeze(-1) 
        norm[norm == 0] = 1
        return norm.float()
        
    def forward(self, inputs):
        """[summary]

        Args:
            adj: torch.Tensor of size [B, N, N]
            node_features: torch.Tensor of size [B, N, 1]

        Returns:
            [type]: [description]
        """
        adj, node_features, candidates, candi_w, mask = inputs
        if adj.dim() == 2:
            adj = adj[None, :, :]
        if node_features.dim() == 2:
            node_features = node_features[None, :, :]
        if candidates.dim() == 2:
            candidates = candidates[None, :, :]
        if candi_w.dim() == 2:
            candi_w = candi_w[None, :, :]
        if mask.dim() == 2:
            mask = mask[None, :, :]

        norm = self.get_normalisation(adj) # [B, N, 1]

        init_node_embeddings = self.node_init_embedding_layer(node_features) # [B, N, n_features]
        edge_embeddings = self.edge_embedding_layer(node_features, adj, norm) # [B, N, n_features]

        # Initialise embeddings.
        current_node_embeddings = init_node_embeddings
        
        for i in range(self.n_layers):
            current_node_embeddings = self.update_node_embedding_layer[i](current_node_embeddings,
                                                                            edge_embeddings,
                                                                            norm,
                                                                            adj)

        edge_score = self.readout_layer([current_node_embeddings,
                                         candidates,
                                         candi_w,
                                         mask]) # [B, C]

        return edge_score
    
    def sample_action(self, inputs, epsilon):
        edge_score = self.forward(inputs)
        _, _, _, _, mask = inputs
        coin = random.random()
        if coin < epsilon:
            edge_index = torch.nonzero(mask).squeeze()[:, 1].squeeze()
            action = edge_index[torch.randint(len(edge_index), (1,))[0]]
            return action
        else : 
            return edge_score.argmax().item()


class EdgeAndNodeEmbeddingLayer(nn.Module):

    def __init__(self, n_obs_in, n_features):
        super().__init__()
        self.n_obs_in = n_obs_in
        self.n_features = n_features

        self.edge_embedding_NN = nn.Linear(int(n_obs_in+1), n_features-1, bias=False)
        self.edge_feature_NN = nn.Linear(n_features, n_features, bias=False)

    def forward(self, node_features, adj, norm):
        """[summary]

        Args:
            node_features ([type]): [B, N, 1]
            adj ([type]): [B, N, N]
            norm ([type]): [B, N, 1]

        Returns:
            [B, N, n_features]
        """
        edge_features = torch.cat([adj.unsqueeze(-1),
                node_features.unsqueeze(-2).transpose(-2, -3).repeat(1, adj.shape[-2], 1, 1)], dim=-1)
        # edge_features: [B, N, N, 2]

        edge_features *= (adj.unsqueeze(-1)!=0).float()

        edge_features_unrolled = torch.reshape(edge_features, (edge_features.shape[0], edge_features.shape[1] * edge_features.shape[1], edge_features.shape[-1]))
        embedded_edges_unrolled = F.relu(self.edge_embedding_NN(edge_features_unrolled))
        # embedded_edges_unrolled: [B, N*N, 2]
        embedded_edges_rolled = torch.reshape(embedded_edges_unrolled,
                                              (adj.shape[0], adj.shape[1], adj.shape[1], self.n_features-1))
        # embedded_edges_rolled: [B, N, N, n_features-1]
        embedded_edges = embedded_edges_rolled.sum(dim=2) / norm # [B, N, n_features-1]

        edge_embeddings = F.relu(self.edge_feature_NN(torch.cat([embedded_edges, norm / norm.max()],dim=-1)))

        return edge_embeddings # [B, N, n_features]

class UpdateNodeEmbeddingLayer(nn.Module):

    def __init__(self, n_features):
        super().__init__()

        self.message_layer = nn.Linear(2*n_features, n_features, bias=False)
        self.update_layer = nn.Linear(2*n_features, n_features, bias=False)

    def forward(self, current_node_embeddings, edge_embeddings, norm, adj):
        """message passing

        Args:
            current_node_embeddings (torch.Tensor): of size [B, N, n_features]
            edge_embeddings (torch.Tensor): of size [B, N, n_features]
            norm (torch.Tensor): of size [B, N, 1]
            adj (torch.Tensor): of size [B, N, N]

        Returns:
            [type]: [description]
        """
        node_embeddings_aggregated = torch.matmul(adj, current_node_embeddings) / norm

        message = F.relu(self.message_layer(torch.cat([node_embeddings_aggregated, edge_embeddings], dim=-1)))
        new_node_embeddings = F.relu(self.update_layer(torch.cat([current_node_embeddings, message], dim=-1)))

        return new_node_embeddings


class ReadoutLayer(nn.Module):

    def __init__(self, n_features, bias_pool=False, bias_readout=True):

        super().__init__()

        self.layer_pooled = nn.Linear(int(n_features), int(n_features), bias=bias_pool)
        self.layer_nodes  = nn.Linear(int(n_features)*2, int(n_features), bias=bias_pool)

        self.candi_w_emb = nn.Sequential(nn.Linear(1, n_features),
                                         nn.ReLU())

        self.layer_edges = nn.Sequential(nn.Linear(2*n_features, n_features),
                                         nn.ReLU(),
                                         nn.Linear(n_features, n_features),
                                         nn.ReLU(),
                                         nn.Linear(n_features, 1))

    def forward(self, inputs):
        """得到候选连边的选择概率

        Args:
            node_embeddings：[B, N, F]
            cadicates:[B, 2, C]
            candi_w: [B, C, 1]
            mask: [B, C] of {0,1} 1 是没有被选中的连边

        Returns:
            [type]: [description]
        """
        node_embeddings, candidates, candi_w, mask = inputs

        f_local = node_embeddings

        h_pooled = self.layer_pooled(node_embeddings.sum(dim=1) / node_embeddings.shape[1])
        f_pooled = h_pooled.repeat(1, 1, node_embeddings.shape[1]).view(node_embeddings.shape)

        features = F.relu(self.layer_nodes(torch.cat([f_pooled, f_local], dim=-1)))
        # features is the cat of local of global embeddings, of size [B, N, n_features]
        batch_edge_score = []
        for node_x, ca, cw, m in zip(features, candidates, candi_w, mask):
            # node_x is of size [N, F]
            # cadi if of size [2, C]
            # m is of size [C]
            edge_emb = node_x[ca[0]] + node_x[ca[1]]
            weight_emb = self.candi_w_emb(cw) 
            edge_agg_emb = torch.cat([edge_emb, weight_emb], dim=1)
            edge_score = self.layer_edges(edge_agg_emb).reshape(1, -1) # [1, C]
            edge_score[m==0] = -10E20
            batch_edge_score.append(edge_score)
        batch_edge_score = torch.cat(batch_edge_score, dim=0) # [B, C]

        return batch_edge_score
