import torch
import torch.nn as nn
import torch.nn.functional as F

import torch_geometric.nn as pyg_nn
import torch_geometric.utils as pyg_utils

from torch.nn import Sequential as Seq, Linear, ReLU
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import remove_self_loops, add_self_loops

from torch_geometric.data import DataLoader, Data


class GNNStack(torch.nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim, args, task='node'):
        super(GNNStack, self).__init__()
        conv_model = self.build_conv_model(args.model_type)
        self.convs = nn.ModuleList()
        self.convs.append(conv_model(input_dim, hidden_dim))
        assert (args.num_layers >= 1), 'Number of layers is not >=1'
        for l in range(args.num_layers-1):
            self.convs.append(conv_model(hidden_dim, hidden_dim))

        # post-message-passing
        self.post_mp = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim), nn.Dropout(args.dropout),
            nn.Linear(hidden_dim, output_dim))

        self.task = task
        if not (self.task == 'node' or self.task == 'graph'):
            raise RuntimeError('Unknown task.')

        self.dropout = args.dropout
        self.num_layers = args.num_layers

    def build_conv_model(self, model_type):
        if model_type == 'GCN':
            return pyg_nn.GCNConv
        elif model_type == 'GraphSage':
            return SAGEConv
        elif model_type == 'GAT':
            return GAT

    def forward(self, data):
        x, edge_index, batch = data.x, data.edge_index, data.batch

        ############################################################################
        # TODO: Your code here! 
        # Each layer in GNN should consist of a convolution (specified in model_type),
        # a non-linearity (use RELU), and dropout. 
        # HINT: the __init__ function contains parameters you will need. You may 
        # also find pyg_nn.global_max_pool useful for graph classification.
        # Our implementation is ~6 lines, but don't worry if you deviate from this.

        x = self.convs[0](x, edge_index)
        for idx in range(1,len(self.convs)-1,2):
            _x = x.clone()
            x = self.convs[idx](x, edge_index)
            x = self.convs[idx+1](x, edge_index)
            x += _x
            x = F.relu(x)
            x = F.dropout(x, self.dropout)

        ############################################################################

        x = self.post_mp(x)

        return F.log_softmax(x, dim=1)

    def loss(self, pred, label):
        return F.nll_loss(pred, label)

class SAGEConv(MessagePassing):
    def __init__(self, in_channels, out_channels):
        super(SAGEConv, self).__init__(aggr='max')  # "Max" aggregation.
        self.lin = torch.nn.Linear(in_channels, out_channels)
        self.act = torch.nn.ReLU()
        self.update_lin = torch.nn.Linear(in_channels + out_channels, out_channels, bias=False)
        self.update_act = torch.nn.ReLU()

    def forward(self, x, edge_index):
        # x has shape [N, in_channels]
        # edge_index has shape [2, E]

        edge_index, _ = remove_self_loops(edge_index)
        edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))

        return self.propagate(edge_index, size=(x.size(0), x.size(0)), x=x)

    def message(self, x_j):
        # x_j has shape [E, in_channels]

        x_j = self.lin(x_j)
        x_j = self.act(x_j)

        return x_j

    def update(self, aggr_out, x):
        new_embedding = torch.cat([aggr_out, x], dim=1)

        new_embedding = self.update_lin(new_embedding)
        new_embedding = self.update_act(new_embedding)

        return new_embedding


# # aggr_out has shape [N, out_channels]
#
# class GraphSage(pyg_nn.MessagePassing):
#     """Non-minibatch version of GraphSage."""
#     def __init__(self, in_channels, out_channels, reducer='mean',
#                  normalize_embedding=True):
#         super(GraphSage, self).__init__(aggr='mean')
#
#         ############################################################################
#         # TODO: Your code here!
#         # Define the layers needed for the forward function.
#         # Our implementation is ~2 lines, but don't worry if you deviate from this.
#
#         self.lin = nn.Linear(in_channels, out_channels)# TODO
#         self.agg_lin = nn.Linear(in_channels + out_channels, out_channels) # TODO
#
#         ############################################################################
#
#         if normalize_embedding:
#             self.normalize_emb = True
#
#     def forward(self, x, edge_index):
#         num_nodes = x.size(0)
#         # x has shape [N, in_channels]
#         # edge_index has shape [2, E]
#
#         ############################################################################
#         # TODO: Your code here!
#         # Given x, perform the aggregation and pass it through a MLP with skip-
#         # connection. Place the result in out.
#         # HINT: It may be useful to read the pyg_nn implementation of GCNConv,
#         # https://pytorch-geometric.readthedocs.io/en/latest/notes/create_gnn.html
#         # Our implementation is ~4 lines, but don't worry if you deviate from this.
#
#         #edge_index, _ = pyg_utils.remove_self_loops(edge_index)
#         #edge_index, _ = pyg_utils.add_self_loops(edge_index, num_nodes=num_nodes)
#         #aggr_out = self.lin(x)
#         #x = torch.cat([aggr_out,x], dim=1)
#         #out = aggr_out # TODO
#
#         edge_index, _ = remove_self_loops(edge_index)
#         edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))
#
#         ############################################################################
#         return self.propagate(edge_index, size=(num_nodes, num_nodes), x=x)
#
#     def message(self, x_j, edge_index, size):
#         # x_j has shape [E, out_channels]
#
#         row, col = edge_index
#         deg = pyg_utils.degree(row, size[0], dtype=x_j.dtype)
#         deg_inv_sqrt = deg.pow(-0.5)
#         norm = deg_inv_sqrt[row] * deg_inv_sqrt[col]
#
#         return norm.view(-1, 1) * x_j
#
#     def update(self, aggr_out):
#         ############################################################################
#         # TODO: Your code here! Perform the update step here.
#         # Our implementation is ~1 line, but don't worry if you deviate from this.
#
#         if self.normalize_emb:
#             print(aggr_out.shape)
#             aggr_out = self.agg_lin(aggr_out) # TODO
#
#         ############################################################################
#
#         return aggr_out


class GAT(pyg_nn.MessagePassing):

    def __init__(self, in_channels, out_channels, num_heads=1, concat=True,
                 dropout=0, bias=True, **kwargs):
        super(GAT, self).__init__(aggr='add', **kwargs)

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.heads = num_heads
        self.concat = concat 
        self.dropout = dropout

        ############################################################################
        #  TODO: Your code here!
        # Define the layers needed for the forward function. 
        # Remember that the shape of the output depends the number of heads.
        # Our implementation is ~1 line, but don't worry if you deviate from this.

        self.lin = None # TODO

        ############################################################################

        ############################################################################
        #  TODO: Your code here!
        # The attention mechanism is a single feed-forward neural network parametrized
        # by weight vector self.att. Define the nn.Parameter needed for the attention
        # mechanism here. Remember to consider number of heads for dimension!
        # Our implementation is ~1 line, but don't worry if you deviate from this.

        self.att = None # TODO

        ############################################################################

        if bias and concat:
            self.bias = nn.Parameter(torch.Tensor(self.heads * out_channels))
        elif bias and not concat:
            self.bias = nn.Parameter(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        nn.init.xavier_uniform_(self.att)
        nn.init.zeros_(self.bias)

        ############################################################################

    def forward(self, x, edge_index, size=None):
        ############################################################################
        #  TODO: Your code here!
        # Apply your linear transformation to the node feature matrix before starting
        # to propagate messages.
        # Our implementation is ~1 line, but don't worry if you deviate from this.
        
        x = None # TODO
        ############################################################################

        # Start propagating messages.
        return self.propagate(edge_index, size=size, x=x)

    def message(self, edge_index_i, x_i, x_j, size_i):
        #  Constructs messages to node i for each edge (j, i).

        ############################################################################
        #  TODO: Your code here! Compute the attention coefficients alpha as described
        # in equation (7). Remember to be careful of the number of heads with 
        # dimension!
        # Our implementation is ~5 lines, but don't worry if you deviate from this.

        alpha = None # TODO

        ############################################################################

        alpha = F.dropout(alpha, p=self.dropout, training=self.training)

        return x_j * alpha.view(-1, self.heads, 1)

    def update(self, aggr_out):
        # Updates node embedings.
        if self.concat is True:
            aggr_out = aggr_out.view(-1, self.heads * self.out_channels)
        else:
            aggr_out = aggr_out.mean(dim=1)

        if self.bias is not None:
            aggr_out = aggr_out + self.bias
        return aggr_out


class GNN(torch.nn.Module):
    def __init__(self, input_dim, hidden_dim, output_dim):
        super(GNN, self).__init__()

        self.dropout = 0.2
        self.num_layers = 12
        self.task = "node"

        conv_model = self.build_conv_model()
        self.convs = nn.ModuleList()
        self.convs.append(conv_model(input_dim, hidden_dim))
        for l in range(self.num_layers):
            self.convs.append(conv_model(hidden_dim, hidden_dim))

        # post-message-passing
        self.post_mp = nn.Sequential(
            nn.Linear(hidden_dim, hidden_dim), #nn.Dropout(0.2),
            nn.Linear(hidden_dim, output_dim))



    def build_conv_model(self):
        return pyg_nn.GCNConv
        #return SAGEConv


    def forward(self, data):
        x, edge_index = data.x, data.edge_index

        x = self.convs[0](x, edge_index)
        for idx in range(1,len(self.convs)-1,2):
            _x = x.clone()
            x = self.convs[idx](x, edge_index)
            x = self.convs[idx+1](x, edge_index)
            x += _x # 添加残差可以一定程度上缓解退化，但是太深还是退化，30层以上就没法训练了
            x = F.relu(x)
            #x = F.dropout(x, self.dropout)



        ############################################################################

        x = self.post_mp(x)

        return F.log_softmax(x, dim=1)

    def loss(self, pred, label):
        return F.nll_loss(pred, label)


class mymodel:
    def __init__(self,hidden_dim,epoch):
        self.hidden_dim = hidden_dim
        self.epoch = epoch
        self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

    def train(self,trainData):

        self.model = GNN(trainData["num_features"], self.hidden_dim, trainData["num_class"],)

        opt = torch.optim.Adam(self.model.parameters())
        model = self.model.to(self.device)

        total_loss = 0
        model.train()

        data = Data(x=torch.Tensor(trainData["node_attr"]).float(),
                    edge_index=torch.Tensor(trainData["edge"]).permute([1, 0]).long())
        data_y = trainData["label"].reshape(-1)

        for i in range(self.epoch):
            batch = data
            batch = batch.to(self.device)
            opt.zero_grad()
            pred = model(batch)

            label = torch.Tensor(data_y).long().to(self.device)

            pred = pred[trainData["ID"].reshape(-1)]

            loss = model.loss(pred, label)
            loss.backward()
            opt.step()
            total_loss += loss.item()

            total_loss /= trainData["num_samples"]
            print("\r epoch={}/{} loss = {:.12f}".format(i+1, self.epoch, total_loss), end="")
        print(" train done\n")
        return self

    def test(self,testData):
        data = Data(x=torch.Tensor(testData["node_attr"]).float(),
                    edge_index=torch.Tensor(testData["edge"]).permute([1, 0]).long())
        data = data.to(self.device)
        pred = self.model(data).max(dim=1)[1]
        return pred[testData["ID"].reshape(-1)]

model = mymodel(256,20)