import torch
from torch import nn
from torch_geometric.nn import HGTConv, Linear
# from torch_geometric.transforms import GCNNorm
# from torch_geometric.nn.conv.gcn_conv import gcn_norm
from model.common_layer import STEncoder


class Readout(nn.Module):
    """
    This module learns a single graph level representation for a molecule given GNN generated node embeddings
    """

    def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim, num_cats):
        super(Readout, self).__init__()
        self.attr_dim = attr_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim
        self.num_cats = num_cats

        self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim)
        self.layer2 = nn.Linear(hidden_dim, output_dim)
        self.output = nn.Linear(output_dim, num_cats)
        self.act = nn.ReLU()

        nn.init.xavier_normal_(self.layer1.weight, gain=0.2)
        nn.init.xavier_normal_(self.layer2.weight, gain=0.2)
        nn.init.xavier_normal_(self.output.weight, gain=0.2)

    def forward(self, x_dict, node_embeddings_dict):

        for i, fea in enumerate(list(x_dict.values())):             # 拼接不同node_type 的 node_feature
            if i == 0:
                node_features = fea
            else:
                node_features = torch.cat((node_features, fea), dim=0)

        for i, fea in enumerate(list(node_embeddings_dict.values())):
            if i == 0:
                node_embeddings = fea
            else:
                node_embeddings = torch.cat((node_embeddings, fea), dim=0)

        combined_rep = torch.cat((node_features, node_embeddings), dim=1)        # Concat initial node attributed with embeddings from sage
        hidden_rep = self.act(self.layer1(combined_rep))
        graph_rep = self.act(torch.mean(self.layer2(hidden_rep), dim=0))                       # Generate final graph level embedding

        logits = self.output(graph_rep)                  # Generated logits for multilabel classific ation

        return logits, graph_rep

class GraphEmbLayer(nn.Module):
    """
    This module learns graph level representation embeddings
    """

    def __init__(self, attr_dim, embedding_dim, hidden_dim, output_dim):
        super().__init__()
        self.attr_dim = attr_dim
        self.hidden_dim = hidden_dim
        self.output_dim = output_dim

        self.layer1 = nn.Linear(attr_dim + embedding_dim, hidden_dim)
        self.layer2 = nn.Linear(hidden_dim, output_dim)
        self.act = nn.ReLU()

        nn.init.xavier_normal_(self.layer1.weight, gain=0.2)
        nn.init.xavier_normal_(self.layer2.weight, gain=0.2)
        nn.init.xavier_normal_(self.output.weight, gain=0.2)

    def forward(self, x_dict, node_embeddings_dict):
        for i, fea in enumerate(list(x_dict.values())):             # 拼接不同node_type 的 node_feature
            if i == 0:
                node_features = fea
            else:
                node_features = torch.cat((node_features, fea), dim=0)

        for i, fea in enumerate(list(node_embeddings_dict.values())):
            if i == 0:
                node_embeddings = fea
            else:
                node_embeddings = torch.cat((node_embeddings, fea), dim=0)

        combined_rep = torch.cat((node_features, node_embeddings), dim=1)        # Concat initial node attributed with embeddings from sage
        hidden_rep = self.act(self.layer1(combined_rep))
        graph_rep = self.act(self.layer2(hidden_rep))                       # Generate final graph level embedding

        return graph_rep

class Decoder(nn.Module):
    def __init__(self, input_dim, emb_dim, n_layers, dropout, vocab_size):
        super().__init__()

        self.input_dim = input_dim
        self.emb_dim = emb_dim
        self.n_layers = n_layers
        self.vocab_size = vocab_size

        # self.embedding = nn.Embedding(emb_dim)

        self.rnn = nn.LSTM(emb_dim, emb_dim, n_layers, dropout=dropout)

        self.dropout = nn.Dropout(dropout)

        self.fc_out = nn.Linear(emb_dim, vocab_size)

        self.dropout = nn.Dropout(dropout)

    def forward(self, input, trg):             # input 是 图的embedding，
        # input = [seq len, batch size]
        # embedded = self.dropout(self.embedding(input.long()))
        input = input.unsqueeze(0).unsqueeze(0)

        # embedded = [seq len, batch size, emb dim]

        trg_len = trg.shape[0]
        # tensor to store decoder outputs
        outputs = torch.zeros(trg_len, self.vocab_size)      # output: batch_size, trg_len, vocal_size

        # hidden = [n layers, batch size, hid dim]
        hidden = torch.randn(1, 1, self.emb_dim)        # hidden = [1, batch size, emb dim]
        cell = torch.randn(1, 1, self.emb_dim)          # cell = [1, batch size, emb dim]

        for t in range(0, trg_len):
            output, (hidden, cell) = self.rnn(input, (hidden, cell))
            input = output
            # outputs = [src len, batch size, emb dim * n directions]
            # hidden = [1, batch size, emb dim]
            # cell = [1, batch size, emb dim]

            # n directions will always be 1 in the decoder, therefore:
            # output = [seq len, batch size, emb dim]
            output = output.mean(0)
            # output = [1, batch size, emb dim]
            # output.squeeze(0) = [src len, batch size, emb dim]

            # prediction = torch.argmax(self.fc_out(output.squeeze(0)))
            prediction = self.fc_out(output.squeeze(0))

            # prediction = [batch size, output dim]

            outputs[t] = prediction

        return outputs

class HGTClassification(nn.Module):
    """
    Network that consolidates GCN + Readout into a single nn.Module
    """

    def __init__(self, config, vocab, vocabulary_size, pad_idx,in_channels, hidden_channels, out_channels, num_heads, num_layers, node_types, metadata, num_categories):        # 此处channel 就相当于 dim
        super(HGTClassification, self).__init__()
        self.hgt = HGT(config, vocab, vocabulary_size, pad_idx, hidden_channels, num_heads=num_heads, num_layers=num_layers, node_types=node_types, metadata=metadata)
        self.readout = Readout(in_channels, hidden_channels, hidden_channels, out_channels, num_categories)
        # self.graph_emb_layer = GraphEmbLayer(attr_dim=in_channels, embedding_dim=hidden_channels, hidden_dim=hidden_channels, output_dim=out_channels)
        self.decoder = Decoder(input_dim=64, emb_dim=64, n_layers=1, dropout=0.5, vocab_size=200)

    def forward(self, x_dict, edge_index_dict, trg):
        node_embeddings_dict = self.hgt(x_dict, edge_index_dict)
        logits, graph_rep = self.readout(x_dict, node_embeddings_dict)       # graph_rep: 图的embedding
        # graph_rep = self.graph_emb_layer(x_dict, node_embeddings_dict)  # 图的embedding
        predictions = self.decoder(graph_rep, trg)

        return logits, predictions