import torch

import torch.nn.functional as F
from dgl import DGLError
from torch.nn.modules.module import Module
from torch.nn.parameter import Parameter
import torch.nn as nn
import numpy as np
import dgl
import dgl.nn as dglnn
import torch.nn.functional as F
from dgl.nn.functional import edge_softmax
import torchfile
from torch.nn import init
import dgl.function as fn
from dgl.utils import expand_as_pair
from Graph import custom_HeteroGraphConv,custom_GATConv
#存放编码器的R-GCN


class GraphConvolution(Module):
    """
    Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
    """

    def __init__(self, in_features, out_features, dropout=0., act=F.relu):
        super(GraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.dropout = dropout
        self.act = act
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        self.reset_parameters()

    def reset_parameters(self):
        torch.nn.init.xavier_uniform_(self.weight)

    def forward(self, input, adj):
        input = F.dropout(input, self.dropout, self.training)
        support = torch.mm(input, self.weight)
        output = torch.spmm(adj, support)
        output = self.act(output)
        return output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'


class RelationalGraphConvolution(Module):
    """
    Simple GCN layer, similar to https://arxiv.org/abs/1609.02907
    """

    def __init__(self, in_features, out_features, dropout=0., act=F.relu):
        super(RelationalGraphConvolution, self).__init__()
        self.in_features = in_features
        self.out_features = out_features
        self.dropout = dropout
        self.act = act

        'weight is for all (W0 in the paper; other two are separate weights'
        self.weight = Parameter(torch.FloatTensor(in_features, out_features))
        self.weight_dc = Parameter(torch.Tensor(in_features, out_features))
        self.weight_dd = Parameter(torch.Tensor(in_features, out_features))
        self.reset_parameters()
        #weight_ds,weight_dd,weight_si

    # def __init__(self, in_features, out_features, bias=False):
    #     super(RelationalGraphConvolution, self).__init__()
    #     self.in_features = in_features
    #     self.out_features = out_features
    #     self.weight = Parameter(torch.Tensor(in_features, out_features)) # this is for all
    #     self.weight_dc = Parameter(torch.Tensor(in_features, out_features))
    #     self.weight_dd = Parameter(torch.Tensor(in_features, out_features))
    #
    #     if bias:
    #         self.bias = Parameter(torch.Tensor(out_features))
    #     else:
    #         self.register_parameter('bias', None)
    #     self.reset_parameters()

    def reset_parameters(self):
        torch.nn.init.xavier_uniform_(self.weight)
        torch.nn.init.xavier_uniform_(self.weight_dc)
        torch.nn.init.xavier_uniform_(self.weight_dd)

    def forward(self, input, adj):
        '''
        TODO:

        :param x: x is a list of features: whole, dd and dc (same shape)
        :param adj: adj is a list of features: whole, dd and dc (same shape)
        :return:
        '''
        'adj will be a list of adj, list of 2'
        input = F.dropout(input, self.dropout, self.training)

        'all_adj is all of the adj'
        all_adj = adj[0].add(adj[1]).sub(torch.eye(adj[0].shape[0]).to_sparse())

        # for over-all
        support = torch.mm(input, self.weight)
        output = torch.spmm(all_adj, support)

        # for dc
        support_dc = torch.mm(input, self.weight_dc)
        output_dc = torch.spmm(adj[0], support_dc)

        # for dd
        support_dd = torch.mm(input, self.weight_dd)
        output_dd = torch.spmm(adj[1], support_dd)

        # import pdb;pdb.set_trace()
        # add all
        # final_output = (output + output_dc + output_dd)/3
        # final_output = torch.add(output,torch.div(output_dc + output_dd,2))
        # final_output = ((output_dc + output_dd)/2 + output)/2


        # works for tf-idf
        final_output = (output + output_dc + output_dd)/3

        return final_output

    def __repr__(self):
        return self.__class__.__name__ + ' (' \
               + str(self.in_features) + ' -> ' \
               + str(self.out_features) + ')'


class RelationAttention(nn.Module):
    def __init__(self, in_size, hidden_size=16):
        super(RelationAttention, self).__init__()

        self.project = nn.Sequential(
            nn.Linear(in_size, hidden_size),
            nn.Tanh(),
            nn.Linear(hidden_size, 1, bias=False)
        )

    def forward(self, z):
        w = self.project(z).mean(0)  # (M, 1)
        beta = torch.softmax(w, dim=0)  # (M, 1)
        beta = beta.expand((z.shape[0],) + beta.shape)  # (N, M, 1)
        out = (beta * z).sum(1)  # (N, D * K)
        #print("relation_out:",len(z))
        return out


def node_drop(feats, drop_rate, training):
    n = feats.shape[0]
    drop_rates = torch.FloatTensor(np.ones(n) * drop_rate)

    if training:
        masks = torch.bernoulli(1. - drop_rates).unsqueeze(1)
        feats = masks.to(feats.device) * feats / (1. - drop_rate)
    else:
        feats = feats
    return feats


class custom_GATConv(nn.Module):
    def __init__(self,
                 in_feats,
                 out_feats,
                 num_heads,
                 feat_drop=0.1,
                 attn_drop=0.,
                 negative_slope=0.2,
                 edge_drop=0.1,
                 residual=False,
                 activation=None,
                 allow_zero_in_degree=False,
                 bias=True):
        super(custom_GATConv, self).__init__()
        self._num_heads = num_heads
        self._in_src_feats, self._in_dst_feats = expand_as_pair(in_feats)
        self._out_feats = out_feats
        self._allow_zero_in_degree = allow_zero_in_degree
        if isinstance(in_feats, tuple):
            self.fc_src = nn.Linear(
                self._in_src_feats, out_feats * num_heads, bias=False)
            self.fc_dst = nn.Linear(
                self._in_dst_feats, out_feats * num_heads, bias=False)

            self.fc_src2 = nn.Linear(
                self._in_src_feats, out_feats * num_heads, bias=False)
            self.fc_dst2 = nn.Linear(
                self._in_dst_feats, out_feats * num_heads, bias=False)
        else:
            self.fc = nn.Linear(
                self._in_src_feats, out_feats * num_heads, bias=False)
            self.fc2 = nn.Linear(
                self._in_src_feats, out_feats * num_heads, bias=False)
        self.attn_l = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
        self.attn_r = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
        self.attn_l2 = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
        self.attn_r2 = nn.Parameter(torch.FloatTensor(size=(1, num_heads, out_feats)))
        self.feat_drop = nn.Dropout(feat_drop)
        self.attn_drop = nn.Dropout(attn_drop)
        self.leaky_relu = nn.LeakyReLU(negative_slope)
        if bias:
            self.bias = nn.Parameter(torch.FloatTensor(size=(num_heads * out_feats,)))
        else:
            self.register_buffer('bias', None)
        if residual:
            if self._in_dst_feats != out_feats:
                self.res_fc = nn.Linear(
                    self._in_dst_feats, num_heads * out_feats, bias=False)
            else:
                self.res_fc = nn.Identity()
        else:
            self.register_buffer('res_fc', None)
        self.reset_parameters()
        self.activation = activation

        self.edge_drop = edge_drop

    def reset_parameters(self):
        gain = nn.init.calculate_gain('relu')
        if hasattr(self, 'fc'):
            nn.init.xavier_normal_(self.fc.weight, gain=gain)
        else:
            nn.init.xavier_normal_(self.fc_src.weight, gain=gain)
            nn.init.xavier_normal_(self.fc_dst.weight, gain=gain)
        nn.init.xavier_normal_(self.attn_l, gain=gain)
        nn.init.xavier_normal_(self.attn_r, gain=gain)
        nn.init.xavier_normal_(self.attn_l2, gain=gain)
        nn.init.xavier_normal_(self.attn_r2, gain=gain)
        nn.init.constant_(self.bias, 0)
        if isinstance(self.res_fc, nn.Linear):
            nn.init.xavier_normal_(self.res_fc.weight, gain=gain)

    def set_allow_zero_in_degree(self, set_value):
        self._allow_zero_in_degree = set_value

    def forward(self, graph, feat, get_attention=False):
        with graph.local_scope():
            if not self._allow_zero_in_degree:
                if (graph.in_degrees() == 0).any():
                    raise DGLError('There are 0-in-degree nodes in the graph, '
                                   'output for those nodes will be invalid. '
                                   'This is harmful for some applications, '
                                   'causing silent performance regression. '
                                   'Adding self-loop on the input graph by '
                                   'calling `g = dgl.add_self_loop(g)` will resolve '
                                   'the issue. Setting ``allow_zero_in_degree`` '
                                   'to be `True` when constructing this module will '
                                   'suppress the check and let the code run.')

            if isinstance(feat, tuple):
                do_edge_drop = feat[2]
                # #print('do_edge_drop: ', do_edge_drop)
                h_src = self.feat_drop(feat[0])
                h_dst = self.feat_drop(feat[1])
                h_src2 = h_src.clone()
                h_dst2 = h_dst.clone()
                if not hasattr(self, 'fc_src'):
                    feat_src = self.fc(h_src).view(-1, self._num_heads, self._out_feats)
                    feat_dst = self.fc(h_dst).view(-1, self._num_heads, self._out_feats)
                    feat_src2 = self.fc2(h_src2).view(-1, self._num_heads, self._out_feats)
                    feat_dst2 = self.fc2(h_dst2).view(-1, self._num_heads, self._out_feats)
                else:
                    feat_src = self.fc_src(h_src).view(-1, self._num_heads, self._out_feats)
                    feat_dst = self.fc_dst(h_dst).view(-1, self._num_heads, self._out_feats)
                    feat_src2 = self.fc_src2(h_src2).view(-1, self._num_heads, self._out_feats)
                    feat_dst2 = self.fc_dst2(h_dst2).view(-1, self._num_heads, self._out_feats)
            else:
                h_src = h_dst = self.feat_drop(feat)
                h_src2 = h_dst2 = h_src.clone()  # self.feat_drop(feat)
                feat_src = feat_dst = self.fc(h_src).view(
                    -1, self._num_heads, self._out_feats)
                feat_src2 = feat_dst2 = self.fc(h_src).view(
                    -1, self._num_heads, self._out_feats)
                if graph.is_block:
                    feat_dst = feat_src[:graph.number_of_dst_nodes()]
                    feat_dst2 = feat_src2[:graph.number_of_dst_nodes()]

            el = (feat_src * self.attn_l).sum(dim=-1).unsqueeze(-1)
            er = (feat_dst * self.attn_r).sum(dim=-1).unsqueeze(-1)

            graph.srcdata.update({'ft': feat_src, 'el': el, 'feat_src2': feat_src2})
            graph.dstdata.update({'er': er, 'feat_dst2': feat_dst2})
            # compute edge attention, el and er are a_l Wh_i and a_r Wh_j respectively.
            graph.apply_edges(fn.u_add_v('el', 'er', 'e'))
            e = self.leaky_relu(graph.edata.pop('e'))

            # compute softmax, edge dropout
            if self.training and do_edge_drop and self.edge_drop > 0:
                perm = torch.randperm(graph.number_of_edges(), device=e.device)
                bound = int(graph.number_of_edges() * self.edge_drop)
                eids = perm[bound:]
                graph.edata["a"] = torch.zeros_like(e)
                graph.edata["a"][eids] = self.attn_drop(edge_softmax(graph, e[eids], eids=eids))
            else:
                graph.edata['a'] = self.attn_drop(edge_softmax(graph, e))

            # message passing
            graph.update_all(fn.u_mul_e('ft', 'a', 'm'),
                             fn.sum('m', 'initial_ft'))
            graph.update_all(fn.u_mul_v('feat_src2', 'feat_dst2', 'm2'),
                             fn.sum('m2', 'add_ft'))
            rst = graph.dstdata['initial_ft'] + graph.dstdata['add_ft']

            # residual
            if self.res_fc is not None:
                resval = self.res_fc(h_dst).view(h_dst.shape[0], self._num_heads, self._out_feats)
                rst = rst + resval
            # bias
            if self.bias is not None:
                rst = rst + self.bias.view(1, self._num_heads, self._out_feats)
            # activation
            if self.activation:
                rst = self.activation(rst)

            if get_attention:
                return rst, graph.edata['a']
            else:
                return rst


class GNN(nn.Module):
    def __init__(self, in_feats, hid_feats, out_feats, rel_names):
        super().__init__()

        self.num_heads = 4 # 8注意力头数，自己设置
        self.hid_feats = int(hid_feats / self.num_heads)  # 隐藏节点 输入
        self.out_feats = int(out_feats / self.num_heads)  # 输出节点 输入
        self.relation_attention = RelationAttention(hid_feats)

        self.gatconv1 = custom_HeteroGraphConv({  # dglnn.HeteroGraphConv  自定义的异构图卷积层，输入，隐藏层，注意力头数，这里应该就是对应
            'r-s': custom_GATConv(in_feats, self.hid_feats, num_heads=self.num_heads),
            'r-r': custom_GATConv(in_feats, self.hid_feats, num_heads=self.num_heads),
            's-i': custom_GATConv(in_feats, self.hid_feats, num_heads=self.num_heads),
            'i-i': custom_GATConv(in_feats, self.hid_feats, num_heads=self.num_heads),
            's-s': custom_GATConv(in_feats, self.hid_feats, num_heads=self.num_heads),
            'i-s': custom_GATConv(in_feats, self.hid_feats, num_heads=self.num_heads),
        }, aggregate='stack')

        self.gatconv2 = custom_HeteroGraphConv({  # dglnn.HeteroGraphConv

            'r-s': custom_GATConv(self.hid_feats * self.num_heads, self.out_feats, num_heads=self.num_heads),
            'r-r': custom_GATConv(self.hid_feats * self.num_heads, self.out_feats, num_heads=self.num_heads),
            's-i': custom_GATConv(self.hid_feats * self.num_heads, self.out_feats, num_heads=self.num_heads),
            'i-i': custom_GATConv(self.hid_feats * self.num_heads, self.out_feats, num_heads=self.num_heads),
            's-s': custom_GATConv(self.hid_feats * self.num_heads, self.out_feats, num_heads=self.num_heads),
            'i-s': custom_GATConv(self.hid_feats * self.num_heads, self.out_feats, num_heads=self.num_heads),

        }, aggregate='stack')
        self.dropout = nn.Dropout(0.1)
        # 初始化，主要是对其各边进行融合，然后才dropout的

    def forward(self, blocks, inputs, do_edge_drop,ingre_step):
        edge_weight_0 = blocks[0].edata['weight']
        #这里的bolcks[1]记得标记是啥子
        edge_weight_1 = blocks[1].edata['weight']
        #print("edge_weight_0:",edge_weight_0)
        #print("edge_weight_i:",edge_weight_1)
        # 权重矩阵

        num_recipes = blocks[-1].dstdata[dgl.NID]['recipe'].shape[0] #116
        num_instr = blocks[-1].dstdata[dgl.NID]['step'].shape[0]  #17
        #这里是新加的注意调试！！！！
        num_ingre = blocks[-1].dstdata[dgl.NID]['ingredient'].shape[0]
        # 为什么只有users he recipes??ingredient呢？

        # 那就是他每一层都有adaptive和relation level的注意力合集

        #因为他原来只是两个user和recipe之间的所以用一次就好，这里再看下子，应该要调整
        h = self.gatconv1(blocks[0], inputs, edge_weight_0, do_edge_drop)# 公式四，GATConv里面有定义attention
        #print("gatconv1:",len(h['step']))
        h = {k: F.relu(v).flatten(2) for k, v in h.items()}  # 公式四
        ##具体的公式三包含在custom_GATConv这个类里面
        h = {k: self.relation_attention(v) for k, v in h.items()}

        first_layer_output = {}
        #first_layer_output['recipe'] = h['recipe'][:num_recipes]

        first_layer_output['step'] = h['step'][:num_instr]
        #if ingre_step:
           # first_layer_output['step'] = h['ingredient'][:num_ingre]
        #print("first_layer_output['step'] ",first_layer_output['step'] )
        #这里新加的,h里面没有ingredient
        #first_layer_output['ingredient'] = h['ingredient'][:num_ingre]

        h = {key: self.dropout(value) for key, value in h.items()}

        h = self.gatconv2(blocks[1], h, edge_weight_1, do_edge_drop)


##        last_ingre_and_instr = h['recipe'].flatten(2)

        h = {k: self.relation_attention(v.flatten(2)) for k, v in h.items()}

        return h

