import torch
from torch import nn
from torch.nn import Module, Parameter
from torch.nn import functional as F
from torch_geometric.nn import GCNConv
from const import DEVICE


class GraphAttentionLayer(Module):
    def __init__(self, input_dim, output_dim, dropout, concat=True, alpha=0.2):
        super(GraphAttentionLayer, self).__init__()
        # 输入输出维数
        self.input_dim = input_dim
        self.output_dim = output_dim
        self.concat = concat
        self.leakyRelu = nn.LeakyReLU(alpha)
        self.dropout = dropout
        # 设置参数
        self.W = Parameter(torch.empty(size=(input_dim, output_dim), dtype=torch.float32))
        self.a = Parameter(torch.empty(size=(2 * output_dim, 1), dtype=torch.float32))
        # self.W = Parameter(torch.FloatTensor(input_dim,output_dim))
        # self.a = Parameter(torch.FloatTensor(2*output_dim,1))
        self.init_parameter()
        self.W.requires_grad_(True)
        self.a.requires_grad_(True)

    def init_parameter(self):
        nn.init.xavier_uniform_(self.W.data, gain=1.414)
        nn.init.xavier_uniform_(self.a.data, gain=1.414)

    # 聚合操作
    def concatenation(self, wh):
        # wh : N * output_dim
        # a : 2ouput_dim * 1
        # wh1 : N*1
        # wh2 : N*1
        wh1 = torch.matmul(wh, self.a[:self.output_dim, :])
        wh2 = torch.matmul(wh, self.a[self.output_dim:, :])
        # 拼接
        # 广播的方式进行拼接
        e = wh1 + wh2.T
        return self.leakyRelu(e)
        # torch.mm(a, b) 43是矩阵a和b矩阵相乘,只适用于二维矩阵
        # torch.matmul可以适用于高维  一维*二维；二维*一维；
        # torch.mul(a, b) 是矩阵 a 和 b 对应位相乘，a 和 b 的维度必须相等

    def forward(self, h, adj):
        # h : N*input_dim
        # adj : N*N
        # W : input_dim * output_dim
        # wh : N * ouput_dim
        # print(h.shape)
        # print(self.W.shape)
        wh = torch.mm(h, self.W)
        # print(h.shape)
        # print(self.W.shape)
        # print(wh.shape)
        # 获得处理后的矩阵
        # e : N*N
        e = self.concatenation(wh)

        # 将结点i 的非邻居设置为0
        zero_inf = -9e15 * torch.ones_like(e)
        # 选择结点 i 的邻居
        # print(adj.shape)
        # print(e.shape)
        # Neighbor = torch.where(adj > 0, e, zero_inf)
        Neighbor = torch.where(adj > 0, e * adj, zero_inf)
        # 按行softmax
        # N*N

        '''
        尝试在这里修改体现出链路权重
        预测权值网络不一定需要，但是critic需要将权值作为输入的，所以应该需要
        可以给个参数，确定是不是加上权重
        '''
        attention = F.softmax(Neighbor, dim=1)
        # attention = F.softmax(torch.mul(Neighbor, adj), dim=1)

        attention = F.dropout(attention, self.dropout, training=self.training)
        # head ： N * output_dim
        head = torch.matmul(attention, wh)
        if self.concat:
            return F.relu(head)  # F.elu(head)
        else:
            return head


class GAT(Module):
    def __init__(self, input_dim, hid_dim, output_dim, num_h, dropout, alpha):
        super(GAT, self).__init__()
        # 两层结构
        self.dropout = dropout
        # 第一层
        self.MultiHeadAttention = [GraphAttentionLayer(input_dim, hid_dim, dropout, concat=True, alpha=alpha) for _ in
                                   range(num_h)]
        for i, attention in enumerate(self.MultiHeadAttention):
            self.add_module(f"attention_{i}", attention)
        # 第二层
        self.last_layer = GraphAttentionLayer(hid_dim * num_h, output_dim, dropout, concat=False, alpha=alpha)

    def forward(self, feature, adj):
        feature = F.dropout(feature, self.dropout, training=self.training)
        output = torch.cat([attention(feature, adj) for attention in self.MultiHeadAttention], dim=1)  # 拼接
        output = F.dropout(output, self.dropout, training=self.training)
        output = self.last_layer(output, adj)

        output = F.relu(output)
        # return F.log_softmax(output, dim=1)
        output = F.log_softmax(output, dim=1)
        return output


class GAT_with_fc(Module):
    def __init__(self, input_dim, hid_dim, output_dim, fc_in, fc_out, num_h, dropout, alpha):
        super(GAT_with_fc, self).__init__()
        # 两层结构
        self.dropout = dropout
        # 第一层
        self.MultiHeadAttention = [GraphAttentionLayer(input_dim, hid_dim, dropout, concat=True, alpha=alpha) for _ in
                                   range(num_h)]
        for i, attention in enumerate(self.MultiHeadAttention):
            self.add_module(f"attention_{i}", attention)
        # 第二层
        self.last_layer = GraphAttentionLayer(hid_dim * num_h, output_dim, dropout, concat=False, alpha=alpha)
        # self.gat = GAT(input_dim, hid_dim, output_dim, dropout, alpha, num_h)
        self.fc = nn.Linear(fc_in, fc_out, dtype=torch.float32)

    def forward(self, feature, adj):
        feature = F.dropout(feature, self.dropout, training=self.training)
        output = torch.cat([attention(feature, adj) for attention in self.MultiHeadAttention], dim=1)  # 拼接
        output = F.dropout(output, self.dropout, training=self.training)
        output = self.last_layer(output, adj)

        output = F.relu(output)
        # return F.log_softmax(output, dim=1)
        output = F.log_softmax(output, dim=1)
        output = output.transpose(1, 0)
        out = self.fc(output)
        return out


class actor_gat(Module):
    def __init__(self, input_dim, hid_dim, output_dim, fc_in, fc_out, num_h, dropout, alpha):
        super(actor_gat, self).__init__()
        self.dropout = dropout
        # 第一层
        self.MultiHeadAttention = [GraphAttentionLayer(input_dim, hid_dim, dropout, concat=True, alpha=alpha) for _ in
                                   range(num_h)]
        for i, attention in enumerate(self.MultiHeadAttention):
            self.add_module(f"attention_{i}", attention)
        # 第二层
        self.last_layer = GraphAttentionLayer(hid_dim * num_h, output_dim, dropout, concat=False, alpha=alpha)
        # self.gat = GAT(input_dim, hid_dim, output_dim, dropout, alpha, num_h)
        self.fc = nn.Linear(fc_in, fc_out, dtype=torch.float32)
        self.confidence_layer = nn.Linear(fc_out, 1, dtype=torch.float32)

    def forward(self, feature, adj):
        # feature = F.dropout(feature, self.dropout, training=self.training)
        output = torch.cat([attention(feature, adj) for attention in self.MultiHeadAttention], dim=1)  # 拼接
        output = F.dropout(output, self.dropout, training=self.training)
        output = self.last_layer(output, adj)

        output = F.relu(output)
        # return F.log_softmax(output, dim=1)
        output = F.log_softmax(output, dim=1)
        output = output.transpose(1, 0)
        out = self.fc(output)
        confidence_out = self.confidence_layer(out)
        confidence_out = torch.sigmoid(confidence_out)  # 将输出映射到0~1
        return out, confidence_out


'''
GAT反向传播出现问题，尝试使用带权值的GCN
使用PyG自带GCN层构建属于自己的GCN网络

GCNLayer Shapes:
    - **input:**
        node features :math:`(vertex, in_feature)`,
        edge indices :math:`(2, edge)`,
        edge weights :math:`(edge)` *(optional)*
    - **output:** node features :math:`(vertex, out_feature)`

'''


def Norm_1d(x):
    mean = torch.mean(x)
    var = torch.var(x)
    x = (x-mean) / var
    return x


class GCN(Module):
    def __init__(self, input_num, hid_num, vertex_num, linear_output, dropout=False, device=DEVICE):
        super(GCN, self).__init__()

        self.GCN_layer1 = GCNConv(input_num, hid_num, dtype=torch.float32).to(device)
        # self.bn_1 = torch.nn.BatchNorm1d(input_num)
        self.GCN_layer2 = GCNConv(hid_num, hid_num, dtype=torch.float32).to(device)
        # self.bn_2 = torch.nn.BatchNorm1d(hid_num)
        self.GCN_layer3 = GCNConv(hid_num, 1, dtype=torch.float32).to(device)
        self.linear = torch.nn.Linear(vertex_num, linear_output, dtype=torch.float32).to(device)
        self.dropout = dropout

    def forward(self, tasks, edge_index, edge_weight):
        x = self.GCN_layer1(tasks, edge_index, edge_weight=edge_weight)
        # x = self.bn_1(x)
        x = F.leaky_relu(x)
        x = F.dropout(x, self.dropout, training=self.training)

        x = self.GCN_layer2(x, edge_index, edge_weight=edge_weight)
        # x = self.bn_2(x)
        x = F.leaky_relu(x)
        x = F.dropout(x, self.dropout, training=self.training)

        x = self.GCN_layer3(x, edge_index, edge_weight=edge_weight)

        # x = Norm_1d(x)
        x = F.leaky_relu(x)
        x = F.dropout(x, self.dropout, training=self.training)  # 这里的x应该是（vertex,1)，但是linear层要求输出的应该是1*edges 或者1*1
        x = x.transpose(1, 0)  # 1*vertex,便于后面的操作

        x = self.linear(x)
        # x = Norm_1d(x)

        return x
