# -*- coding: utf-8 -*-

import torch
import torch.nn as nn
import torch.nn.functional as F
import dgl
from dgl.nn.pytorch import GATConv

class DGL_AttentiveFP(nn.Module):
    ## adapted from https://github.com/awslabs/dgl-lifesci/blob/2fbf5fd6aca92675b709b6f1c3bc3c6ad5434e96/python/dgllife/model/model_zoo/attentivefp_predictor.py#L17
    def __init__(self, node_feat_size, edge_feat_size, num_layers = 2, num_timesteps = 2, graph_feat_size = 200, predictor_dim=None):
        super(DGL_AttentiveFP, self).__init__()
        from dgllife.model.gnn import AttentiveFPGNN
        from dgllife.model.readout import AttentiveFPReadout
        self.gnn = AttentiveFPGNN(node_feat_size=node_feat_size,
                                  edge_feat_size=edge_feat_size,
                                  num_layers=num_layers,
                                  graph_feat_size=graph_feat_size)
        self.readout = AttentiveFPReadout(feat_size=graph_feat_size,
                                          num_timesteps=num_timesteps)
        # self.transform = nn.Linear(graph_feat_size, 2)
        self.transform = nn.Sequential(
            nn.Linear(graph_feat_size, 128),
            nn.Tanh(),
            # nn.Linear(256, 128),
            # nn.Tanh(),
            nn.Linear(128, 2),
        )
    def forward(self, bg):
        with bg.local_scope():
            bg = bg.to(device)
            node_feats = bg.ndata.pop('h')
            edge_feats = bg.edata.pop('e')
            node_feats = self.gnn(bg, node_feats, edge_feats)
            graph_feats = self.readout(bg, node_feats, False)
            # graph_feats = torch.cat((graph_feats,MACCS),1)

            # return graph_feats
        return graph_feats, self.transform(graph_feats)

class SemanticAttention(nn.Module):
    def __init__(self, in_size, hidden_size=128):
        super(SemanticAttention, self).__init__()
        self.project = nn.Sequential(
            nn.Linear(in_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1, bias=False)
        )

    def forward(self, z):
        w = self.project(z)
        # print (z.size())
        beta = torch.softmax(w, dim=1)
        # print (beta.size)
        return (beta * z).sum(1)


class HANLayer(nn.Module):
    """
    HAN layer.
    Arguments
    ---------
    meta_paths : list of metapaths, each as a list of edge types
    in_size : input feature dimension
    out_size : output feature dimension
    layer_num_heads : number of attention heads

    dropout : Dropout probability
    Inputs
    ------
    g : DGLHeteroGraph
        The heterogeneous graph
    h : tensor
        Input features
    Outputs
    -------
    tensor
        The output feature
    """

    def __init__(self, meta_paths, in_size, out_size, layer_num_heads, dropout, GAT_Layers, W_size):

        super(HANLayer, self).__init__()
        # One GAT layer for each meta path based adjacency matrix
        self.gat_layers = nn.ModuleList()
        self.gat_layers1 = nn.ModuleList()
        self.gat_layers2 = nn.ModuleList()
        self.w_h = nn.Linear(in_size, W_size, bias=False)
        self.nums_GAT = GAT_Layers
        ##############3_layers GAT
        if GAT_Layers == 3:
            for i in range(len(meta_paths)):
                self.gat_layers.append(GATConv(W_size, 128, 1, dropout, 0, activation=F.elu))
            for i in range(len(meta_paths)):
                self.gat_layers1.append(GATConv(128, 128, 1, dropout, 0, activation=F.elu))
            for i in range(len(meta_paths)):
                self.gat_layers2.append(GATConv(128, out_size, layer_num_heads, dropout, 0, activation=F.elu))

            ################################2-layerGAT
        if GAT_Layers == 2:
            for i in range(len(meta_paths)):
                self.gat_layers.append(GATConv(W_size, 128, 1, dropout, 0, activation=F.elu))
            for i in range(len(meta_paths)):
                self.gat_layers1.append(GATConv(128, out_size, layer_num_heads, dropout, 0, activation=F.elu))

        ########1_layer
        if GAT_Layers == 1:
            for i in range(len(meta_paths)):
                self.gat_layers.append(GATConv(W_size, out_size, layer_num_heads, dropout, 0, activation=F.elu))
            # self.gat_layers.append(self.num_GAT(in_size,out_size))

        self.semantic_attention = SemanticAttention(in_size=out_size * layer_num_heads)
        self.meta_paths = list(tuple(meta_path) for meta_path in meta_paths)
        self._cached_graph = None
        self._cached_coalesced_graph = {}

    def forward(self, g, h):
        semantic_embeddings = []
        if self._cached_graph is None or self._cached_graph is not g:
            self._cached_graph = g
            self._cached_coalesced_graph.clear()
            for meta_path in self.meta_paths:
                self._cached_coalesced_graph[meta_path] = dgl.metapath_reachable_graph(
                    g, meta_path)
        for i, meta_path in enumerate(self.meta_paths):
            new_g = self._cached_coalesced_graph[meta_path]
            w_h = self.w_h(h)
            if self.nums_GAT == 3:
                semantic_embeddings.append(self.gat_layers2[i](new_g, self.gat_layers1[i](new_g,
                                                                                          self.gat_layers[i](new_g,
                                                                                                             w_h).flatten(
                                                                                              1)).flatten(1)).flatten(
                    1))
            if self.nums_GAT == 2:
                semantic_embeddings.append(
                    self.gat_layers1[i](new_g, self.gat_layers[i](new_g, w_h).flatten(1)).flatten(1))
            if self.nums_GAT == 1:
                semantic_embeddings.append(self.gat_layers[i](new_g, w_h).flatten(1))
                # semantic_embeddings.append(self.gat_layers1[i](new_g,self.gat_layers[i](new_g,h).flatten(1)).flatten(1))
            # semantic_embeddings.append(self.gat_layers2[i](new_g,self.gat_layers1[i](new_g,self.gat_layers[i](new_g,h).flatten(1)).flatten(1)).flatten(1))
            # semantic_embeddings.append(self.gat_layers[i](new_g,h).flatten(1))
        semantic_embeddings = torch.stack(semantic_embeddings, dim=1)  # (N, M, D * K)
        # print (self.semantic_attention(semantic_embeddings).shape)
        return self.semantic_attention(semantic_embeddings)  # (N, D * K)


class HAN(nn.Module):
    def __init__(self, meta_paths, in_size, hidden_size, out_size, num_heads, dropout, GAT_Layers, W_size):
        super(HAN, self).__init__()
        self.layers = nn.ModuleList()
        self.layers.append(HANLayer(meta_paths, in_size, hidden_size, num_heads[0], dropout, GAT_Layers, W_size))
        for l in range(1, len(num_heads)):
            self.layers.append(HANLayer(meta_paths, hidden_size * num_heads[l - 1],
                                        hidden_size, num_heads[l], dropout))
        self.predict = nn.Linear(hidden_size * num_heads[-1], out_size, bias=False)

    def forward(self, g, h):
        # print (self.layers)
        for gnn in self.layers:
            h = gnn(g, h)
            # print (h.shape)
            # print (self.predict(h).shape)
        return self.predict(h)


