import numpy as np
import torch

# 定义计算两个节点之间的注意力系数的函数
def attention_coefficient(h_u, h_v, theta):
    # 连接节点特征向量
    concat = torch.cat((h_u, h_v), dim=-1)
    # 乘以可训练的权重矩阵 theta
    Wh = torch.matmul(concat, theta)
    # 应用带有负输入斜率 0.2 的 LeakyReLU 非线性函数
    e = torch.relu(Wh) + 0.2 * torch.min(torch.zeros_like(Wh), Wh)
    # 需要对原始的注意力分数进行归一化，以使它们在所有邻居节点上总和为1
    return e

# 样本的n阶节点注意力系数矩阵
def sample_n_attention_coefficient(W, H, theta, n):
    attention = torch.zeros_like(W)
    batch_size, node_num, _ = H.shape

    # 初始化一个字典来存储每个节点的邻居节点和它们的特征
    n_neighbor_features = {}

    # 遍历每个节点
    for b in range(batch_size):
        for node in range(node_num):
            # 找到与当前节点有n阶边连接的节点
            n_neighbor_indices = find_n_node_neighbors(W[b], n, node)
            # 获取邻居节点的特征
            n_neighbor_feats = H[b, n_neighbor_indices]

            # 将邻居节点和它们的特征存储到字典中
            n_neighbor_features[(b, node)] = (n_neighbor_indices, n_neighbor_feats)

    # 打印每个节点的邻居节点和它们的特征
    for (b, node), (n_neighbor_indices, n_neighbor_feats) in n_neighbor_features.items():
        node_attention_list = []
        for node_ in n_neighbor_indices:
            node_attention_list.append(attention_coefficient(H[b, node], H[b, node_], theta))
        # 应用 softmax 函数对注意力系数进行归一化
        attention_scores = torch.exp(torch.stack(node_attention_list))
        attention_scores_normalized = attention_scores / torch.sum(attention_scores)
        index = 0
        for node_ in n_neighbor_indices:
            attention[b, node, node_] = attention_scores_normalized[index]
            index += 1
    return attention

# 节点的n阶邻居节点
def find_n_node_neighbors(W, n, node):
    adjacency_matrix = torch.where(W != 0, 1, 0)
    adjacency_matrix_powered = torch.matrix_power(adjacency_matrix, n)
    # 提取节点的n阶邻居节点
    n_node_neighbors = torch.nonzero(adjacency_matrix_powered[node]).squeeze(dim=-1)
    return n_node_neighbors

# n阶节点
def find_n_node(W, H, theta, n):
    batch_size, _, _ = W.shape
    node_W = W.clone()
    node_H = H.clone()
    A = torch.where(W != 0, 1, 0)
    # 计算度矩阵D的逆平方根
    D = np.diag(np.sum(A, axis=0))
    D_inv_sqrt = np.linalg.inv(np.sqrt(D))
    for i in range(n):
        attention = sample_n_attention_coefficient(node_W, node_H, theta, i)
        node_H = D_inv_sqrt @ A @ D_inv_sqrt @ attention
    return node_H

# 路径的邻居节点
def find_path_neighbors(W, path):
    path_neighbors = []
    for node in path:
        path_neighbors.extend(find_n_node_neighbors(W, 1, node))
    result = [x for x in path_neighbors if x not in path]
    return result

# 第n阶路径的发现
def find_n_path_(W, path_pre, path_pre_feature, H, theta, path_threshold):
    path_new = []
    path_new_feature = []
    # 遍历n-1阶路径里的所有路径
    for path in path_pre:
        neighbors_path = find_path_neighbors(W, path)  # 找到路径的邻居节点
        # 找到路径的下一跳节点
        for node_neighbor in neighbors_path:
            attention_score = attention_coefficient(path_pre_feature[path_pre.index(path)], H[node_neighbor],
                                                    theta)  # 计算注意力系数
            if attention_score > path_threshold:
                path_ = path.copy()
                path_.append(node_neighbor)
                # 新路径
                path_new.append(path_)
                sum_of_lists = [x + y for x, y in zip(path_pre_feature[path_pre.index(path)], H[node_neighbor])]
                average = [x / 2 for x in sum_of_lists]
                # 新路径的特征矩阵
                path_new_feature.append(average)
    return path_new, path_new_feature

# n阶路径的发现
def find_n_path(n, W, H, theta, path_threshold):
    path_pre, path_pre_feature = [[i] for i in range(116,W.shape[0])], [H[i] for i in range(116,W.shape[0])]
    global path_new_feature, path_new
    for i in range(n):
        path_new, path_new_feature = find_n_path_(W, path_pre, path_pre_feature, H, theta, path_threshold)
        path_pre, path_pre_feature = path_new, path_new_feature
    return path_new, path_new_feature

# 第n阶子网的发现
def find_n_network_(net_all, net_all_feature, net_adjacency, theta, adjacency_matrix):
    net_all_new = []
    net_all_feature_new = []
    for i in range(len(net_all_new)):
        net_adjacency_powered = torch.matrix_power(net_adjacency, 1)
        # 提取子网的邻居子网
        net_neighbors = torch.nonzero(net_adjacency_powered[i]).squeeze(dim=-1)
        net_attention = []
        for net_ in net_neighbors:
            # 子网络间的注意力差值
            net_attention.append(
                torch.abs(
                    attention_coefficient(net_all_feature[i], net_all_feature[net_], theta) -
                    attention_coefficient(net_all_feature[net_], net_all_feature[i], theta)
                )
            )
        net_fuge = net_all[i].clone()
        # 注意力系数之差满足局部最小性时，合并子网
        net_fuge.extend(net_all[net_attention.index(min(net_attention))])
        net_all_new.append(net_fuge)
        sum_of_lists = [x + y for x, y in
                        zip(net_all_feature[i], net_all_feature[net_attention.index(min(net_attention))])]
        # 新子网的特征信息
        average = [x / 2 for x in sum_of_lists]
        net_all_feature_new.append(average)
    net_adjacency_new = torch.zeros(size=(len(net_all_new), len(net_all_new)))
    for i in range(len(net_all_new)):
        for j in range(len(net_all_new)):
            # 构建子网之间的邻接矩阵
            net_adjacency_new[i][j] = judge_net_connect(net_all_new[i], net_all_new[j], adjacency_matrix)
    return net_all_new, net_all_feature_new, net_adjacency_new

# n阶子网的发现
def find_n_network(n, W, H, theta):
    # 初始化一阶子网
    batch_size, _, _ = W.shape
    net_all = [list(range(W.shape[1])) for _ in range(batch_size)]
    net_all_feature = [H[i] for i in range(batch_size)]
    net_adjacency = torch.where(W != 0, 1, 0)
    adjacency_matrix = torch.where(W != 0, 1, 0)
    # 获取n阶子网
    for i in n:
        net_all_new, net_all_feature_new, net_adjacency_new = find_n_network_(net_all, net_all_feature, net_adjacency,
                                                                              theta, adjacency_matrix)
        net_all, net_all_feature, net_adjacency = net_all_new, net_all_feature_new, net_adjacency_new
    return net_all, net_all_feature, net_adjacency

# 检查子网1是否与子网2相连
def judge_net_connect(net_1, net_2, adjacency_matrix):
    connected = 0
    for node1 in net_1:
        for node2 in net_2:
            if adjacency_matrix[node1 - 1][node2 - 1] == 1:
                connected = 1
                break
        if connected:
            break
    return connected
