import torch
import torch.nn as nn
from torch.nn import functional as F
from torch.optim import Adadelta

from models.util import BottledOrthogonalLinear, log


class GraphConvolution(nn.Module):
    def __init__(self, in_features, out_features, edge_types, dropout=0.5, bias=True, use_bn=False,
                 device=torch.device("cpu")):
        """
        Single Layer GraphConvolution单层图卷积

        :param in_features: The number of incoming features  传入特征的数量
        :param out_features: The number of output features   输出特征数
        :param edge_types: The number of edge types in the whole graph  整个图中的边类型数
        :param dropout: Dropout keep rate, if not bigger than 0, 0 or None, default 0.5  Dropout保持率，如果不大于0、0或None，默认0.5
        :param bias: If False, then the layer does not use bias weights b_ih and b_hh. Default: True  如果为 False，则该层不使用偏置权重 b_ih 和 b_hh。 默认值：真
        """
        super(GraphConvolution, self).__init__()
        self.in_features = in_features  #768
        self.out_features = out_features #768
        self.edge_types = edge_types  #4
        # self.dropout = dropout if type(dropout) == float and -1e-7 < dropout < 1 + 1e-7 else None
        self.dropout = dropout
        # parameters for gates
        self.Gates = nn.ModuleList()
        # parameters for graph convolutions
        self.GraphConv = nn.ModuleList() #专门用于存储module的list
        # batch norm
        self.use_bn = use_bn #True
        if self.use_bn:
            self.bn = nn.BatchNorm1d(self.out_features) #定义了一个归一化的函数bn，需要归一化的维度为（768）

        for _ in range(edge_types):
            # self.Gates.append(BottledOrthogonalLinear(in_features=in_features,
            #                                           out_features=1,
            #                                           bias=bias))
            self.GraphConv.append(BottledOrthogonalLinear(in_features=in_features, #768
                                                          out_features=out_features, #768
                                                          bias=bias)) #TRUE

        for i in range(3):
            self.Gates.append(BottledOrthogonalLinear(in_features=in_features,
                                                      out_features=1,
                                                      bias=bias))


        self.device = device
        self.to(device)

    def forward(self, input, adj, adj_entity):
        """

        :param input: FloatTensor, input feature tensor, (batch_size, seq_len, hidden_size)
        :param adj: FloatTensor (sparse.FloatTensor.to_dense()), adjacent matrix for provided graph of padded sequences, (batch_size, edge_types, seq_len, seq_len)填充序列图的邻接矩阵
        :return: output
            - **output**: FloatTensor, output feature tensor with the same size of input, (batch_size, seq_len, hidden_size)
        """

        adj_ = adj.transpose(0, 1)  # (edge_types, batch_size, seq_len, seq_len)
        adj_entity_ = adj_entity.transpose(0, 1)
        ts = []
        for i in range(self.edge_types):
            # adj_hat_i = adj_[i]
            # adj_entity_1 = adj_entity_[i]
            if i < 3:
                gate_status = torch.sigmoid(self.Gates[i](input))  # (batch_size, seq_len, 1)
                # gate_status = torch.tanh(self.Gates[i](input))
                adj_hat_i = adj_[i] * gate_status  # (batch_size, seq_len, seq_len)
                adj_entity_1 = adj_entity_[i] * gate_status

                input_1 = adj_hat_i.view(1, adj_hat_i.size()[0] * adj_hat_i.size()[1] * adj_hat_i.size()[2])
                input_2 = adj_entity_1.view(1, adj_entity_1.size()[0] * adj_entity_1.size()[1] * adj_entity_1.size()[2])
                linear_layer1 = nn.Linear(in_features=input_1.size()[1], out_features=1)
                linear_layer2 = nn.Linear(in_features=input_2.size()[1], out_features=1)
                output_1 = linear_layer1(input_1)
                output_2 = linear_layer2(input_2)
                aba = torch.exp(output_1) / (torch.exp(output_1) + torch.exp(output_2))
                abc = torch.exp(output_2) / (torch.exp(output_1) + torch.exp(output_2))
                out = aba * adj_hat_i + abc * adj_entity_1
                # out = 0.6*adj_hat_i +0.4*adj_entity_1

            ts.append(torch.bmm(out, self.GraphConv[i](input)))
            # ts.append(torch.bmm(adj_[i], self.GraphConv[i](input)))
        ts = torch.stack(ts).sum(dim=0, keepdim=False).to(self.device)
        if self.use_bn:
            ts = ts.transpose(1, 2).contiguous()
            ts = self.bn(ts)
            ts = ts.transpose(1, 2).contiguous()
        ts = F.relu(ts)
        if self.dropout is not None:
            ts = F.dropout(ts, p=self.dropout, training=self.training)
        return ts

    def __repr__(self):
        return self.__class__.__name__ + ' (' + str(self.in_features) + ' -> ' + str(self.out_features) + ')'
