import dgl
import torch

import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import scipy.sparse as sp
from torch.nn.parameter import Parameter
from layers import RelationalGraphConvolution, GNN
from optimizer import norm
from sklearn import preprocessing

from utils import node_drop


class GCNModelVAE_Semi(nn.Module):
    def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout, class_dim):
        super(GCNModelVAE_Semi, self).__init__()
        # self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
        # self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
        # self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)

        self.gc1 = RelationalGraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
        self.gc2 = RelationalGraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
        self.gc3 = RelationalGraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)

        self.dc = InnerProductDecoder(dropout, act=lambda x: x)

        'add another layer to predict node labels'
        self.W_node = Parameter(torch.Tensor(hidden_dim2, class_dim))
        torch.nn.init.xavier_uniform_(self.W_node)

    def encode(self, x, adj):
        hidden1 = self.gc1(x, adj)
        return self.gc2(hidden1, adj), self.gc3(hidden1, adj)

    def reparameterize(self, mu, logvar):
        if self.training:
            std = torch.exp(logvar)
            eps = torch.randn_like(std)
            return eps.mul(std).add_(mu)
        else:
            return mu

    def forward(self, x, adj):

        # import pdb;pdb.set_trace()
        'adj is a list of adj vars'
        'mu = hidden layer, logvar = last layer (shape: torch.Size([2039, 16]))'
        mu, logvar = self.encode(x, adj)
        z = self.reparameterize(mu, logvar)

        'return the node predictions'
        pred_nodes = torch.mm(logvar, self.W_node)  # shape torch.Size([2039, 322])

        return self.dc(z), mu, logvar, pred_nodes


class GCNModelVAE(nn.Module):
    def __init__(self, input_feat_dim, hidden_dim1, hidden_dim2, dropout):
        super(GCNModelVAE, self).__init__()
        # self.gc1 = GraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
        # self.gc2 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
        # self.gc3 = GraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
        self.gc1 = RelationalGraphConvolution(input_feat_dim, hidden_dim1, dropout, act=F.relu)
        self.gc2 = RelationalGraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)
        self.gc3 = RelationalGraphConvolution(hidden_dim1, hidden_dim2, dropout, act=lambda x: x)

        self.dc = InnerProductDecoder(dropout, act=lambda x: x)

    def encode(self, x, adj):
        hidden1 = self.gc1(x, adj)
        return self.gc2(hidden1, adj), self.gc3(hidden1, adj)

    def reparameterize(self, mu, logvar):
        if self.training:
            std = torch.exp(logvar)
            eps = torch.randn_like(std)
            return eps.mul(std).add_(mu)
        else:
            return mu

    def forward(self, x, adj):
        'adj is a list of adj vars'
        'mu = hidden layer, logvar = last layer'

        mu, logvar = self.encode(x, adj)
        z = self.reparameterize(mu, logvar)
        return self.dc(z), mu, logvar


class NewGCN(nn.Module):
    def __init__(self, dropout, graph):
        super().__init__()
        self.recipe_embedding = nn.Sequential(
            nn.Linear(768, 128),
            nn.ReLU(),  # 线性再用ReLU激活
        )
        self.instr_embedding = nn.Sequential(
            nn.Linear(768, 128),
            nn.ReLU(),
        )
        self.ingredient_embedding = nn.Sequential(
            nn.Linear(768, 128),
            nn.ReLU()
        )

        self.gnn = GNN(128, 128, 128, graph.etypes)

        self.dc = InnerProductDecoder(dropout, act=lambda x: x)

    def encode(self, x1, x2):
        out = np.hstack([x1, x2])
        return out, out



    def forward(self, blocks, input_features, model):
        recipe, instr, ingredient = input_features

        # major GNN
        recipe_major = self.recipe_embedding(recipe)
        recipe_major = norm(recipe_major)
        instr_major = self.instr_embedding(instr)
        instr_major = norm(instr_major)
        ingredient_major = self.ingredient_embedding(ingredient)
        ingredient_major = norm(ingredient_major)
        # x = self.gnn(blocks, {'recipe': recipe_major, 'step': instr_major},
        #             torch.Tensor([[0]]))
        x_r = self.gnn(blocks, {'recipe': recipe_major, 'step': instr_major, 'ingredient': ingredient_major},
                       torch.Tensor([[0]]), 1)
        # x_i = self.gnn(blocks, {'step': instr_major, }, torch.Tensor([[0]]),1)

        #print("model NID:", blocks[1].ndata[dgl.NID])
        blocks_dic = {}
        for i in range(len(blocks[1].ndata[dgl.NID])):
            blocks_dic[i] = blocks[1].ndata[dgl.NID]['step'][i]
        feat_x_r = np.zeros((len(instr), 128))
        for k, v in enumerate(blocks_dic):
            for j in range(128):
                feat_x_r[v][j] = x_r['step'][i][j]
        #print("model:feat_x_r", feat_x_r)

        '''
        # contrastive - 1!!!!!这里的training换成了常数

        recipe1 = node_drop(recipe, 0.1, model.training)
        instr1 = node_drop(instr, 0.1,model.training)
        ingredient1 = node_drop(ingredient, 0.1, model.training)

       
        recipe1 = self.recipe_embedding(recipe1)
        recipe1 = norm(recipe1)
        instr1 = self.instr_embedding(instr1)
        instr1 = norm(instr1)
        ingredient1 = self.ingredient_embedding(ingredient1)
        ingredient1 = norm(ingredient1)
        #层间
        x1_r = self.gnn(blocks, {'recipe': recipe1, 'step': instr1,'ingredient':ingredient1}, torch.Tensor([[1]]),1)
        # x1_i = self.gnn(blocks, { 'step': instr1, }, torch.Tensor([[1]]),1)
        # 同样的代码 为啥。做完是都存在，还是第二层把第一层覆盖住了，好像没有因为返回x1,x2都在，应该是两个不同的结果可能，
        '''
        # 层内
        x_s = preprocessing.MinMaxScaler().fit_transform(feat_x_r)
        # 层内    矩阵归一化，最后要对齐矩阵，再拼接mm

        mu, logvar = self.encode(feat_x_r, x_s)
        #print("model:mu",mu)
        gaussian_noise=torch.randn(torch.from_numpy(mu).size(0), torch.from_numpy(mu).size(1))  # 标准高斯分布采样，大小是features_size*hidden2_dim
        z = torch.tensor(mu) + gaussian_noise * torch.exp(torch.tensor(logvar))
        #z = self.reparameterize(torch.from_numpy(mu), torch.from_numpy(logvar))
        #print("model forward:",z.shape)
        'return the node predictions'

        # pred_nodes = torch.mm(mu['step'],Parameter(torch.Tensor(128,17)))  # shape torch.Size([2039, 322])
        # #print("pred_nodes",pred_nodes)

        return self.dc(z), mu, logvar


class InnerProductDecoder(nn.Module):
    """Decoder for using inner product for prediction."""

    def __init__(self, dropout, act=torch.sigmoid):
        super(InnerProductDecoder, self).__init__()
        self.dropout = dropout
        self.act = act

    def forward(self, z):
        z = F.dropout(z, self.dropout, training=self.training)
        #adj = self.act(torch.mm(z, z.t()))
        adj = torch.sigmoid(torch.matmul(z, z.t()))  # 解码器点乘还原邻接矩阵A'
        return adj
