#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.nn.functional as F
from torch.nn import Parameter
from torch_geometric.nn import MessagePassing
from torch_geometric.utils import remove_self_loops, add_self_loops  # , softmax
from torch_scatter import scatter

import math


def xavier_init(tensor):
    # print("xavier_init###################################")
    # print(tensor)
    # print(tensor.shape)
    # print("xavier_init###################################")
    # [1,2] [input_size , heads * output_size] while heads = 1
    # [1,1,4]  [1, heads, 2 * out_channels] while  heads = 1
    if tensor is not None:
        stdv = math.sqrt(6.0 / (tensor.size(-2) + tensor.size(-1)))
        tensor.data.uniform_(-stdv, stdv)


def zeros(tensor):
    if tensor is not None:
        tensor.data.fill_(0)


def softmax(src, index, num_nodes):
    """
    Given a value tensor: `src`, this function first groups the values along the first dimension
    based on the indices specified in: `index`, and then proceeds to compute the softmax individually for each group.
    """
    # print('src', src) # 【7,1】
    # print('index', index) # tensor([0, 0, 0, 0, 1, 2, 3])
    # print('num_nodes', num_nodes) # 4
    # 自动获取节点数，如果没有指定节点数，则以最大索引+1为节点数。
    N = int(index.max()) + 1 if num_nodes is None else num_nodes
    print('N', N) # 4 个节点
    # 在所有的12个值中取出节点数个较大的值组成一个列向量
    print(f"{scatter(src, index, dim=0, dim_size=N, reduce='max')}")
    # print(f"{scatter(src, index, dim=0, dim_size=N, reduce='max')[index]}")
    # 用原始的向量减去 按照index索引找到新得到的列向量对应索引上的值
    out = src - scatter(src, index, dim=0, dim_size=N, reduce='max')[index]
    print('out', out)
    out = out.exp()
    # print('out', out)
    #
    out_sum = scatter(out, index, dim=0, dim_size=N, reduce='sum')[index]
    print('out_sum', out_sum)
    # print(f'return: {out / (out_sum + 1e-16)}')

    return out / (out_sum + 1e-16)


class GATConv(MessagePassing):
    def __init__(self,
                 in_channels,
                 out_channels,
                 heads=1,
                 negative_slope=0.2,
                 dropout=0.,
                 bias=True):
        super(GATConv, self).__init__(node_dim=0, aggr='add')  # "Add" aggregation.

        self.in_channels = in_channels
        self.out_channels = out_channels
        self.heads = heads
        self.negative_slope = negative_slope
        self.dropout = dropout

        self.weight = Parameter(torch.Tensor(in_channels, heads * out_channels))  # \theta
        self.att = Parameter(torch.Tensor(1, heads, 2 * out_channels))  # \alpha: rather than separate into two parts

        if bias:
            self.bias = Parameter(torch.Tensor(out_channels))
        else:
            self.register_parameter('bias', None)

        self.reset_parameters()

    def reset_parameters(self):
        xavier_init(self.weight)
        xavier_init(self.att)
        zeros(self.bias)

    def forward(self, x, edge_index, size=None):
        # print("inputs ##################################",x, edge_index)
        # print("inputs ##################################", x.shape, edge_index.shape)
        """
        x = torch.Size([4, 1])
        edge_index = torch.Size([2, 3])
        self.weight = [1,5]
        x = torch.mm(x, self.weight).view(-1, self.heads, self.out_channels)
        [4,5], out_channels=2 x = [4,1,5] N = 4 H = 1 embedding_size = 5
        """
        # 1. Linearly transform node feature matrix (XΘ) The size is  N x H x embedding_size
        # 经过线性变换之后，得到嵌入的编码信息，其中蕴含这x的信息，是x的更加合理化表示。
        x = torch.mm(x, self.weight).view(-1, self.heads, self.out_channels)  # N x H x embedding_size
        # print('x [4,1,5]', x.shape)
        print("embedding x", x)

        # 2. Add self-loops to the adjacency matrix (A' = A + I)
        # 去掉相应的自关联之后再增加自关联，相当于新的邻接矩阵 A' = A + I
        if size is None and torch.is_tensor(x):
            # 这里注意 edge_index 是两个向量组成的，前面的向量代表起始端 j 后面的向量代表结束端 i
            print("init edge_index", edge_index)
            edge_index, _ = remove_self_loops(edge_index)  # 2 x E
            print('remove_self_loops edge_index', edge_index)
            edge_index, _ = add_self_loops(edge_index, num_nodes=x.size(0))  # 2 x (E+N)
            print('add_self_loops edge_index', edge_index)

        # 3. Start propagating messages
        # 最后输出的特征  [4,5] 这个过程包含了message 和 update 过程。
        return self.propagate(edge_index, size=size, x=x)  # 2 x (E+N), N x H x emb(out), None

    # 测试样例
    # def message(self, x, edge_index, x_i, x_j, size_i, size_j, edge_index_i, edge_index_j):
    #     print("pass massages start ##############################")
    #     print("x", x, x_i, x_j)
    #     print("size", size_i, size_j)
    #     # j is source and i is target .
    #     print("edge_index", edge_index, edge_index_i, edge_index_j)
    #     print("pass massages end ##############################")
    #     return x_j

    def message(self, x_i, x_j, size_i, edge_index_i):  # Compute normalization (concatenate + softmax)
        # x_i, x_j: after linear x and expand edge (N+E) x H x emb(out)
        # = N x H x emb(in) @ emb(in) x emb(out) (+) E x H x emb(out)
        # edge_index_i: the col part of index
        # size_i: number of nodes

        # x_i 是按照增补后的矩阵排列目的节点信息
        # x_j 是按照增补后矩阵排列源节点信息
        # size_i 代表图中一共几个节点
        print('x_i', x_i)
        print('x_j', x_j)

        print('x_i', x_i.shape)
        print('x_j', x_j.shape)
        print('size_i', size_i)
        print('edge_index_i', edge_index_i.shape)
        """
        x_i torch.Size([12, 1, 3])
        x_j torch.Size([12, 1, 3])
        size_i 4
        edge_index_i torch.Size([7])
        
        add_self_loops edge_index 
        source j [[1, 2, 3, 0, 1, 2, 3],
        target i [0, 0, 0, 0, 1, 2, 3]]
        """
        # x_i = x_i.view(-1, self.heads, self.out_channels)
        """
        x_i torch.Size([12, 1, 3])
        x_j torch.Size([12, 1, 3])
        print(torch.cat([x_i, x_j], dim=-1).shape)
        torch.Size([12, 1, 3])  
        self.att = [1,1,3]
        torch.cat([x_i, x_j], dim=-1) * self.att = torch.Size([12, 1, 3])
        alpha.shape = torch.Size([12, 1])
        """
        # 12 ,1, 6
        # 1,1,6
        # 12,1,6
        # 12 ,1
        alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)  # (E+N) x H x (emb(out)+ emb(out))
        print('alpha', alpha, alpha.shape)
        alpha = F.leaky_relu(alpha, self.negative_slope)  # LeakReLU only changes those negative.
        print('alpha leaky_relu', alpha, torch.sum(alpha),alpha.shape)
        # 重合有向边的权重特征
        print("edge_index_i#########################################",edge_index_i)

        alpha = softmax(alpha, edge_index_i, num_nodes=size_i)  # Computes a sparsely evaluated softmax
        print('alpha softmax', alpha,torch.sum(alpha), alpha.shape)

        # alpha 最终得到一个互相关加起来为1 自相关为 1 的新的邻接矩阵。方向不变，仍然为 j to i。
        if self.training and self.dropout > 0:
            alpha = F.dropout(alpha, p=self.dropout, training=True)
        # print(f'x_j*alpha {x_j * alpha.view(-1, self.heads, 1)}')

        print("x_j,alpha",x_j,alpha.view(-1, self.heads, 1))
        out = x_j * alpha.view(-1, self.heads, 1)
        # print("message output : x_j * alpha", out.shape)
        # torch.Size([12, 2, 3])
        print("message output #########################################", out)
        return x_j * alpha.view(-1, self.heads, 1)
        # each row is norm(embedding) vector for each edge_index pair (detail in the following)

    def message(self, x_i, x_j, size_i, edge_index_i):  # Compute normalization (concatenate + softmax)
        # x_i, x_j: after linear x and expand edge (N+E) x H x emb(out)
        # = N x H x emb(in) @ emb(in) x emb(out) (+) E x H x emb(out)
        # edge_index_i: the col part of index
        # size_i: number of nodes
        alpha = (torch.cat([x_i, x_j], dim=-1) * self.att).sum(dim=-1)  # (E+N) x H x (emb(out)+ emb(out))
        alpha = F.leaky_relu(alpha, self.negative_slope)  # LeakReLU only changes those negative.
        alpha = softmax(alpha, edge_index_i, num_nodes=size_i)  # Computes a sparsely evaluated softmax
        # alpha 最终得到一个互相关加起来为1 自相关为 1 的新的邻接矩阵。方向不变，仍然为 j to i。
        if self.training and self.dropout > 0:
            alpha = F.dropout(alpha, p=self.dropout, training=True)
        out = x_j * alpha.view(-1, self.heads, 1)

        print("x_j message out \n",out)
        return x_j * alpha.view(-1, self.heads, 1)

    def update(self, aggr_out):  # 4. Return node embeddings (average heads)
        # Based on the directed graph, Node 0 gets message from three edges and one self_loop
        # for Node 1, 2, 3: since they do not get any message from others, so only self_loop
        # print("message output\n", aggr_out)
        # print("aggr_out_shape1", aggr_out.shape)  # torch.Size([4, 2, 5])
        # print('aggr_out', aggr_out)  # (E+N) x H x emb(out)
        # torch.Size([4, 2, 5]) 在第一个维度上求平均 就得到 [4,5]
        aggr_out = aggr_out.mean(dim=1)  # to average multi-head
        # print('aggr_out', aggr_out)
        # print("aggr_out_shape2", aggr_out.shape)  # [4,5]
        if self.bias is not None:
            aggr_out = aggr_out + self.bias
        return aggr_out


torch.manual_seed(0)

# edge_index = torch.tensor([[1, 2, 3,0], [0, 0, 0,2]], dtype=torch.long)  # 2 x E
edge_index = torch.tensor([[1, 2, 1, 2, 3, 3, 4], [2, 1, 0, 0, 0, 4, 3]], dtype=torch.long)  # 2 x E
x = torch.tensor([[11], [22], [33], [44], [55]], dtype=torch.float)  # N x emb(in)
print('x', x)
print('edge_index', edge_index)

# from 1 dim -> 2 dim
conv = GATConv(1, 3)  # emb(in), emb(out)
# forward
x = conv(x, edge_index)
print(x.shape)  # N x emb(out)    =aggr_out


