import torch

a_edge, a_value = (torch.LongTensor(
    [
        [0, 0, 1, 2, 3, 0],
        [1, 2, 2, 3, 0, 3]]
), torch.FloatTensor(([0.1, 0.2, 0.3, 0.4, 0.5, 0.6])))
# 定义了全局邻接矩阵的稀疏形式。
mat_a = torch.sparse_coo_tensor(a_edge, a_value, (5, 5)).to(a_edge.device)
print(mat_a)
# print(a_edge,a_value)

mat_b = torch.sparse_coo_tensor(a_edge, a_value, (5, 5)).to(a_edge.device)
mat_ab = torch.sparse.mm(mat_a, mat_b).coalesce()
edges, values = mat_ab.indices(), mat_ab.values()
print(mat_ab)
# print(edges, values)
mat_c = torch.sparse_coo_tensor(a_edge, a_value, (5, 5)).to(a_edge.device)

mat_abc = torch.sparse.mm(mat_ab, mat_c).coalesce()
edges, values = mat_abc.indices(), mat_abc.values()
print(mat_abc)
# print(edges, values)


import torch

a_edge, a_value = (torch.LongTensor(
    [
        [0, 1, 2, 3, 4],
        [1, 2, 3, 4, 1]]
), torch.FloatTensor(([0.1, 0.2, 0.3, 0.4, 0.5])))
# 定义了全局邻接矩阵的稀疏形式。
mat_a = torch.sparse_coo_tensor(a_edge, a_value, (6, 6)).to(a_edge.device)
print(mat_a)
# print(a_edge,a_value)

mat_b = torch.sparse_coo_tensor(a_edge, a_value, (6, 6)).to(a_edge.device)
mat_ab = torch.sparse.mm(mat_a, mat_b).coalesce()
edges, values = mat_ab.indices(), mat_ab.values()
print(mat_ab)
# print(edges, values)
mat_c = torch.sparse_coo_tensor(a_edge, a_value, (6, 6)).to(a_edge.device)

mat_abc = torch.sparse.mm(mat_ab, mat_c).coalesce()
edges, values = mat_abc.indices(), mat_abc.values()
print(mat_abc)
# print(edges, values)


"""
1、attention的本质是节点关系的信息传递。
2、传递信息是边权重乘积的方式。
1、每一次attention过程都去掉了了。
"""
print("################################")
import torch

"""
我    不     爱    吃    米饭
0     1     2    3     4
"""
a_edge, a_value = (torch.LongTensor(
    [
        [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
        [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]]
), torch.FloatTensor(([0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4, 0.1, 0.2, 0.3, 0.4])))

# 定义了全局邻接矩阵的稀疏形式。
mat_a = torch.sparse_coo_tensor(a_edge, a_value, (5, 5)).to(a_edge.device)
print(mat_a)
# print(a_edge,a_value)

mat_b = torch.sparse_coo_tensor(a_edge, a_value, (5, 5)).to(a_edge.device)
mat_ab = torch.sparse.mm(mat_a, mat_b).coalesce()
edges, values = mat_ab.indices(), mat_ab.values()
print(mat_ab)
# print(edges, values)
mat_c = torch.sparse_coo_tensor(a_edge, a_value, (5, 5)).to(a_edge.device)

mat_abc = torch.sparse.mm(mat_ab, mat_c).coalesce()
edges, values = mat_abc.indices(), mat_abc.values()
print(mat_abc)
# print(edges, values)

"""
tensor(indices=tensor([[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
                       [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]]),
       values=tensor([0.1000, 0.2000, 0.3000, 0.4000, 0.1000, 0.2000, 0.3000,
                      0.4000, 0.1000, 0.2000, 0.3000, 0.4000, 0.1000, 0.2000,
                      0.3000, 0.4000]),
       size=(5, 5), nnz=16, layout=torch.sparse_coo)
tensor(indices=tensor([[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
                       [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]]),
       values=tensor([0.0600, 0.1200, 0.1800, 0.2400, 0.0600, 0.1200, 0.1800,
                      0.2400, 0.0600, 0.1200, 0.1800, 0.2400, 0.0600, 0.1200,
                      0.1800, 0.2400]),
       size=(5, 5), nnz=16, layout=torch.sparse_coo)
tensor(indices=tensor([[0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 3, 3, 3, 3],
                       [1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4, 1, 2, 3, 4]]),
       values=tensor([0.0360, 0.0720, 0.1080, 0.1440, 0.0360, 0.0720, 0.1080,
                      0.1440, 0.0360, 0.0720, 0.1080, 0.1440, 0.0360, 0.0720,
                      0.1080, 0.1440]),
       size=(5, 5), nnz=16, layout=torch.sparse_coo)
"""