
import pickle
import os
import random as rd
import numpy as np
import copy
import copy as cp
import time
import logging, sys, json
from datetime import datetime
import scipy.sparse as sp
from scipy.io import loadmat
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
from torch_geometric.utils import to_dense_adj, subgraph
from sklearn.preprocessing import MinMaxScaler,LabelEncoder, label_binarize
from sklearn.model_selection import train_test_split
from sklearn.metrics import f1_score, accuracy_score, recall_score, roc_auc_score, average_precision_score, confusion_matrix


filelist = {
    'amz_upu': 'amz_upu_adjlists.pickle',
    'amz_usu': 'amz_usu_adjlists.pickle',
    'amz_uvu': 'amz_uvu_adjlists.pickle',
    'yelp_rsr': 'yelp_rsr_adjlists.pickle',
    'yelp_rtr': 'yelp_rtr_adjlists.pickle',
    'yelp_rur': 'yelp_rur_adjlists.pickle'
}

file_matrix_prefix = {
    'amz_upu': 'amazon_upu_matrix_',
    'amz_usu': 'amazon_usu_matrix_',
    'amz_uvu': 'amazon_uvu_matrix_',
    'yelp_rsr': 'yelpnet_rsr_matrix_decompision_',
    'yelp_rtr': 'yelpnet_rtr_matrix_decompision_',
    'yelp_rur': 'yelpnet_rur_matrix_decompision_'
}


def create_node_subgraph(node_idx, feat_data, edge_indexs, device):
    """
    为单个节点创建一阶邻居子图（所有邻居）
    """
    neighbors = set()
    for rel_idx in range(len(edge_indexs)):
        edge_index = edge_indexs[rel_idx][0].cpu().numpy()
        # 找出以中心点为起点的边的终点
        rel_neighbors = edge_index[1][edge_index[0] == node_idx].tolist()
        neighbors.update(rel_neighbors)

    # 移除中心节点自身
    neighbors.discard(node_idx)
    neighbors = list(neighbors)

    # 如果邻居太多，进行随机采样截取
    sample_size = 399
    if len(neighbors) > sample_size:
        neighbors = np.random.choice(neighbors, size=sample_size, replace=False).tolist()

    # 构建子图节点列表，确保中心节点是第一个
    sub_nodes = [node_idx] + [n for n in neighbors if n != node_idx]

    # 构建子图边列表
    sub_edge_index = []
    for rel_idx in range(len(edge_indexs)):
        edge_index = edge_indexs[rel_idx][0].cpu().numpy()
        mask = np.isin(edge_index[0], sub_nodes) & np.isin(edge_index[1], sub_nodes)
        local_edges = edge_index[:, mask]

        # 创建节点映射
        node_map = {n: i for i, n in enumerate(sub_nodes)}

        # 将全局索引映射到局部索引
        if len(local_edges) > 0 and local_edges.size > 0:
            src_nodes = [node_map[src] for src in local_edges[0]]
            dst_nodes = [node_map[dst] for dst in local_edges[1]]
            edge_tensor = torch.tensor([src_nodes, dst_nodes], dtype=torch.long)
        else:
            # 添加自环确保图不为空
            edge_tensor = torch.tensor([[0], [0]], dtype=torch.long)

        sub_edge_index.append(edge_tensor.to(device))

    # 创建子图数据
    subgraph = {
        'features': feat_data[sub_nodes].clone(),
        'edges': sub_edge_index,
        'global_idx': sub_nodes
    }

    return subgraph


def dict_to_edge_index(edge_dict):
    source_nodes = []
    target_nodes = []
    for src, targets in edge_dict.items():
        for target in targets:
            source_nodes.append(src)
            target_nodes.append(target)
    edge_index = [source_nodes, target_nodes]
    return torch.LongTensor(edge_index)


def numpy_array_to_edge_index(np_array):
    assert np_array.ndim == 2 and np_array.shape[0] == np_array.shape[1], "Input must be a square matrix."
    rows, cols = np.nonzero(np_array)
    edge_index = np.vstack((rows, cols))
    edge_index_tensor = torch.from_numpy(edge_index).long()
    return edge_index_tensor

def load_cora_or_citeser(dataset_name, data_dir):
    prefix = os.path.join(data_dir, dataset_name)
    content_file = os.path.join(prefix, f"{dataset_name}.content")
    cites_file = os.path.join(prefix, f"{dataset_name}.cites")

    # 读取节点特征和标签
    idx_features_labels = np.genfromtxt(content_file, dtype=np.dtype(str))
    features = idx_features_labels[:, 1:-1].astype(np.float32)
    labels = idx_features_labels[:, -1]
    le = LabelEncoder()
    labels = le.fit_transform(labels)

    # 节点ID映射到连续索引
    nodes = idx_features_labels[:, 0]
    node_map = {node_id: idx for idx, node_id in enumerate(nodes)}

    # 读取边
    edges_unordered = np.genfromtxt(cites_file, dtype=np.dtype(str))
    edges = np.array([[node_map[u], node_map[v]] for u, v in edges_unordered if u in node_map and v in node_map]).T

    return features, labels, edges

def load_data(data, k=2, prefix=''):
    pickle_file = {}
    matrix_prefix = {}
    for key in filelist:
        pickle_file[key] = os.path.join(prefix, filelist[key])
        matrix_prefix[key] = os.path.join(prefix, file_matrix_prefix[key])

    if data == 'yelp':
        data_file = loadmat(os.path.join(prefix, 'YelpChi.mat'))
        labels = data_file['label'].flatten()
        feat_data = data_file['features'].todense().A
        with open(pickle_file['yelp_rur'], 'rb') as file:
            relation1 = pickle.load(file)
        file.close()
        relation1 = dict_to_edge_index(relation1)
        relation1_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rur'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation1_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['yelp_rtr'], 'rb') as file:
            relation2 = pickle.load(file)
        file.close()
        relation2 = dict_to_edge_index(relation2)
        relation2_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rtr'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation2_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['yelp_rsr'], 'rb') as file:
            relation3 = pickle.load(file)
        file.close()
        relation3 = dict_to_edge_index(relation3)
        relation3_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['yelp_rsr'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation3_tree.append(numpy_array_to_edge_index(tree))
        return [[relation1, relation1_tree], [relation2, relation2_tree],
                [relation3, relation3_tree]], feat_data, labels
    elif data == 'amazon':
        data_file = loadmat(os.path.join(prefix, 'Amazon.mat'))
        labels = data_file['label'].flatten()
        feat_data = data_file['features'].todense().A
        with open(pickle_file['amz_upu'], 'rb') as file:
            relation1 = pickle.load(file)
        file.close()
        relation1 = dict_to_edge_index(relation1)
        relation1_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['amz_upu'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation1_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['amz_usu'], 'rb') as file:
            relation2 = pickle.load(file)
        file.close()
        relation2 = dict_to_edge_index(relation2)
        relation2_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['amz_usu'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation2_tree.append(numpy_array_to_edge_index(tree))
        with open(pickle_file['amz_uvu'], 'rb') as file:
            relation3 = pickle.load(file)
        file.close()
        relation3_tree = []
        for i in range(1, k + 1):
            file_name = '{}{}.pkl'.format(matrix_prefix['amz_uvu'], i)
            with open(file_name, 'rb') as file:
                tree = pickle.load(file)
            file.close()
            relation3_tree.append(numpy_array_to_edge_index(tree))
        relation3 = dict_to_edge_index(relation3)

        return [[relation1, relation1_tree], [relation2, relation2_tree],
                [relation3, relation3_tree]], feat_data, labels
    elif data == 'cora':
        features, labels, edges = load_cora_or_citeser("cora", prefix)
        edge_index = torch.tensor(edges, dtype=torch.long).to(device)
        return [[edge_index, []]], features, labels
    elif data == 'citeseer':
        features, labels, edges = load_cora_or_citeser("citeseer", prefix)
        edge_index = torch.tensor(edges, dtype=torch.long).to(device)
        return [[edge_index, []]], features, labels

def normalize(mx):
    rowsum = np.array(mx.sum(1)) + 0.01
    r_inv = np.power(rowsum, -1).flatten()
    r_inv[np.isinf(r_inv)] = 0.
    r_mat_inv = sp.diags(r_inv)
    mx = r_mat_inv.dot(mx)
    return mx


def pos_neg_split(nodes, labels):
    # 正负样本分割
    pos_nodes = []
    neg_nodes = cp.deepcopy(nodes)
    aux_nodes = cp.deepcopy(nodes)
    for idx, label in enumerate(labels):
        if label == 1:
            pos_nodes.append(aux_nodes[idx])
            neg_nodes.remove(aux_nodes[idx])

    return pos_nodes, neg_nodes


def undersample(pos_nodes, neg_nodes, scale=1):
    # 对负样本进行下采样，平衡正负样本数量
    aux_nodes = cp.deepcopy(neg_nodes)
    aux_nodes = rd.sample(aux_nodes, k=int(len(pos_nodes) * scale))
    batch_nodes = pos_nodes + aux_nodes

    return batch_nodes


def calculate_g_mean(y_true, y_pred):
    cm = confusion_matrix(y_true, y_pred)
    sensitivities = []
    for i in range(len(cm)):
        TP = cm[i, i]
        FN = cm[i, :].sum() - TP
        sensitivity = TP / (TP + FN) if (TP + FN) != 0 else 0
        sensitivities.append(sensitivity)
    g_mean = np.prod(sensitivities) ** (1 / len(sensitivities))
    return g_mean


def iterate_batches(indices, batch_size, shuffle=True):
    """
        将索引列表划分为指定大小的批次
        :param indices: 样本索引列表
        :param batch_size: 每个批次的大小
        :param shuffle: 是否打乱顺序
        :return: 生成批次索引的迭代器
    """
    if shuffle:
        rd.shuffle(indices)
    for i in range(0, len(indices), batch_size):
        yield indices[i:i + batch_size]


def setup_logger(log_dir='./logs', log_name=None):
    """返回一个同时写文件与终端的 utf-8 logger"""
    os.makedirs(log_dir, exist_ok=True)
    if log_name is None:
        log_name = datetime.now().strftime('%Y%m%d-%H%M%S') + '.log'
    log_path = os.path.join(log_dir, log_name)

    logger = logging.getLogger('BSNE')
    logger.setLevel(logging.INFO)
    # 防止重复 handler
    if not logger.handlers:
        # 文件 handler（utf-8）
        fh = logging.FileHandler(log_path, encoding='utf-8')
        fh.setLevel(logging.INFO)
        # 终端 handler
        ch = logging.StreamHandler(sys.stdout)
        ch.setLevel(logging.INFO)
        # 统一格式
        fmt = '%(asctime)s | %(levelname)s | %(message)s'
        formatter = logging.Formatter(fmt, datefmt='%Y-%m-%d %H:%M:%S')
        fh.setFormatter(formatter)
        ch.setFormatter(formatter)
        logger.addHandler(fh)
        logger.addHandler(ch)
    return logger

# def test(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size=64):
#     model.eval()
#     all_probs = []
#     all_labels = []
#
#     # 分批处理
#     for batch_centers in iterate_batches(idx_eval, batch_size, shuffle=False):
#         subgraph_data = []
#
#         # 为每个中心节点构建子图
#         for xi in batch_centers:
#             subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)
#             subgraph_data.append(subgraph)
#
#         # 获取中心节点预测
#         with torch.no_grad():
#             center_logits, _ = model(subgraph_data)  # [B, 2]
#             probs = torch.softmax(center_logits, dim=-1)[:, 1]  # 正类概率
#             all_probs.extend(probs.cpu().numpy())
#             all_labels.extend([labels[xi] for xi in batch_centers])
#
#     # 计算指标
#     auc_score = roc_auc_score(all_labels, all_probs)
#     ap_score = average_precision_score(all_labels, all_probs)
#     pred_labels = (np.array(all_probs) >= 0.5).astype(int)
#     f1 = f1_score(all_labels, pred_labels, average='macro')
#     g_mean = calculate_g_mean(all_labels, pred_labels)
#
#     return auc_score, ap_score, f1, g_mean

# 增加cora数据集后修改
def test(idx_eval, y_eval, model, feat_data, edge_indexs, device, batch_size=64):
    model.eval()
    all_probs = []   # list of [B, C] arrays
    all_labels = []  # list of ints

    # 逐 batch 推理
    for batch_centers in iterate_batches(idx_eval, batch_size, shuffle=False):
        subgraph_data = []
        for xi in batch_centers:
            subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)
            subgraph_data.append(subgraph)

        with torch.no_grad():
            center_logits, _ = model(subgraph_data)          # [B, C]
            probs = torch.softmax(center_logits, dim=-1)     # [B, C]
            all_probs.append(probs.cpu().numpy())
            all_labels.extend([y_eval[xi] for xi in batch_centers])

    # 合并结果
    all_probs = np.vstack(all_probs)        # [N, C]
    all_labels = np.array(all_labels)       # [N]

    # 多类 one-hot 编码
    n_classes = all_probs.shape[1]
    y_true_bin = label_binarize(all_labels, classes=list(range(n_classes)))

    # ========= 指标计算 =========
    auc_score = roc_auc_score(y_true_bin, all_probs, multi_class="ovr", average="macro")
    ap_score = average_precision_score(y_true_bin, all_probs, average="macro")
    pred_labels = np.argmax(all_probs, axis=1)
    f1 = f1_score(all_labels, pred_labels, average="macro")
    acc = accuracy_score(all_labels, pred_labels)
    g_mean = calculate_g_mean(all_labels, pred_labels)

    return auc_score, ap_score, f1, g_mean, acc
class BSNE_Transformer(nn.Module):
    def __init__(self, in_feat, out_feat, relation_nums=3, d_model=256,
                 nhead=8, num_layers=3, dim_feedforward=256,
                 drop_rate=0.5):
        super().__init__()
        self.relation_nums = relation_nums
        self.d_model = d_model
        self.nhead = nhead

        self.feature_proj = nn.Sequential(
            nn.Linear(in_feat, d_model),
            nn.LayerNorm(d_model),
            nn.ReLU()
        )
        self.norm = nn.LayerNorm(d_model)

        self.relation_encoders = nn.ModuleList([
            TransformerEncoder(
                TransformerEncoderLayer(
                    d_model=d_model,
                    nhead=nhead,
                    dim_feedforward=dim_feedforward,
                    dropout=drop_rate,
                    batch_first=True
                ),
                num_layers=num_layers
            ) for _ in range(relation_nums)
        ])

        self.classifier = nn.Sequential(
            nn.Linear(relation_nums * d_model, 512),
            nn.ReLU(),
            nn.Dropout(drop_rate),
            nn.Linear(512, out_feat)
        )
        for p in self.parameters():
            if p.dim() > 1:
                nn.init.xavier_uniform_(p)

    def forward(self, subgraph_batch):
        # 为每个子图单独处理
        center_logits_list = []
        center_features_list = []

        for sg in subgraph_batch:
            # 处理单个子图
            features = self.feature_proj(sg['features'].unsqueeze(0))  # [1, num_nodes, d_model]
            features = self.norm(features)

            # 为每个关系类型单独处理
            rel_outputs = []
            num_nodes = features.size(1)

            for rel_idx in range(self.relation_nums):
                # 构建当前关系的邻接矩阵
                edge_index = sg['edges'][rel_idx]
                adj = torch.zeros(num_nodes, num_nodes,
                                  dtype=torch.float, device=features.device)

                if edge_index.size(1) > 0:
                    src, dst = edge_index
                    adj[src, dst] = 1.0

                # 添加自环
                adj[range(num_nodes), range(num_nodes)] = 1.0

                # 创建注意力掩码
                adj_mask = adj.masked_fill(adj == 0.0, float('-inf'))
                adj_mask = adj_mask.masked_fill(adj == 1.0, 0.0)

                # 扩展为多头注意力掩码
                adj_mask = adj_mask.unsqueeze(0).unsqueeze(0)  # [1, 1, num_nodes, num_nodes]
                adj_mask = adj_mask.expand(1, self.nhead, num_nodes, num_nodes)
                adj_mask = adj_mask.reshape(-1, num_nodes, num_nodes)

                # 关系编码
                encoder_output = self.relation_encoders[rel_idx](
                    src=features,
                    mask=adj_mask
                )
                rel_outputs.append(encoder_output)

            # 合并多关系特征
            combined = torch.cat(rel_outputs, dim=-1)  # [1, num_nodes, rel*d_model]

            # 提取中心节点特征（第一个节点）
            center_features = combined[:, 0, :]  # [1, rel*d_model]
            center_logits = self.classifier(center_features)  # [1, out_feat]
            center_logits = F.log_softmax(center_logits, dim=-1)

            center_logits_list.append(center_logits)
            center_features_list.append(center_features)

        # 将结果堆叠为批次
        center_logits = torch.cat(center_logits_list, dim=0)
        center_features = torch.cat(center_features_list, dim=0)

        return center_logits, center_features



# 参数设置
args = {
    # "dataset": "amazon",
    # "out_feat": 2,
    "dataset": "cora",
    "out_feat": 7,
    "weight_decay": 0.00005,
    "seed": 76,
    #     预训练参数
    "pretrain_epochs": 10,
    "max_steps": 100,
    "sample_size": 100,
    "loss_threshold": 0.3,
    "pretrain_lr": 0.0005,  # 0.0005，

    #     分类训练参数
    "batch_size": 64,
    "num_epochs": 1500,
    "patience": 30,
    "test_size": 0.3,
    "val_size": 0.5,
    "finetune_lr": 0.0005,
    # 模型结构参数
    "layers_tree": 7,
    "num_heads": 4,
    "num_layers": 4,
    "drop_rate": 0.5
}

# 创建 logger
logger = setup_logger()
logger.info('============  BSNE Training  ============')
logger.info('Args:\n' + json.dumps(args, indent=2, ensure_ascii=False))

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
logger.info(device)
logger.info('loading data...')
prefix = "../../data/"

edge_indexs, feat_data, labels = load_data(args['dataset'], args['layers_tree'], prefix)

np.random.seed(args['seed'])
rd.seed(args['seed'])

if args['dataset'] == 'yelp':
    index = list(range(len(labels)))
    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,
                                                                    test_size=args['test_size'], random_state=2,
                                                                    shuffle=True)
    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,
                                                          stratify=y_train_val, test_size=args['val_size'],
                                                          random_state=2, shuffle=True)
    dist_path = os.path.join(prefix, "YelpChi_shortest_distance.pkl")
elif args['dataset'] == 'amazon':
    index = list(range(3305, len(labels)))
    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels[3305:],
                                                                    stratify=labels[3305:],
                                                                    test_size=args['test_size'],
                                                                    random_state=2, shuffle=True)
    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,
                                                          stratify=y_train_val, test_size=args['val_size'],
                                                          random_state=2, shuffle=True)
    dist_path = os.path.join(prefix, "Amazon_shortest_distance.pkl")
elif args['dataset'] == 'cora':
    index = list(range(len(labels)))
    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,
                                                                    test_size=args['test_size'], random_state=2,
                                                                    shuffle=True)
    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,
                                                          stratify=y_train_val, test_size=args['val_size'],
                                                          random_state=2, shuffle=True)
    dist_path = os.path.join(prefix, "cora_shortest_distance.pkl")
elif args['dataset'] == 'citeseer':
    index = list(range(len(labels)))
    idx_train_val, idx_test, y_train_val, y_test = train_test_split(index, labels, stratify=labels,
                                                                    test_size=args['test_size'], random_state=2,
                                                                    shuffle=True)
    idx_train, idx_val, y_train, y_val = train_test_split(idx_train_val, y_train_val,
                                                          stratify=y_train_val, test_size=args['val_size'],
                                                          random_state=2, shuffle=True)
    dist_path = os.path.join(prefix, "citeseer_shortest_distance.pkl")

with open(dist_path, 'rb') as f:
    dist_data = pickle.load(f)
    dist_matrix = torch.tensor(dist_data['dist_matrix']).to(device)

# 准备特征数据
feat_data = torch.tensor(feat_data).float()
# 特征归一化
scaler = MinMaxScaler()
feat_data = torch.tensor(scaler.fit_transform(feat_data)).float().to(device)

# 初始化模型
bsne_model = BSNE_Transformer(
    in_feat=feat_data.shape[1],
    out_feat=args['out_feat'],
    relation_nums=len(edge_indexs),
    d_model=64,
    nhead=args['num_heads'],
    num_layers=args['num_layers'],
    dim_feedforward=256,
    drop_rate=args['drop_rate']
).to(device)

# 将边索引转移到设备
for edge_index in edge_indexs:
    edge_index[0] = edge_index[0].to(device)
    edge_index[1] = [tensor.to(device) for tensor in edge_index[1]]


'''

logger.info("\n=== Starting Pretraining ===")

bsne_model.classifier.requires_grad_(False)
optimizer = torch.optim.AdamW(
    filter(lambda p: p.requires_grad, bsne_model.parameters()),
    lr=args['pretrain_lr'],
    weight_decay=args["weight_decay"]
)
pretrain_best_loss = float('inf')
pretrain_no_improve = 0
pretrain_early_stop = False

temperature = 1  # 越小区分性越强

loss_threshold = args['loss_threshold']
sample_size = args['sample_size']
max_steps = args['max_steps']
max_epochs = args['pretrain_epochs']
center_indices = list(range(feat_data.shape[0]))
# 在每轮epoch前随机打乱中心点顺序
rd.shuffle(center_indices)

# 限制训练的中心点数量
center_indices = center_indices[:max_epochs]

for epoch, center_idx in enumerate(center_indices):
    logger.info(f"\n=== Pretraining Epoch {epoch} (Center Node: {center_idx}) ===")
    step = 0

    dist_row = dist_matrix[center_idx].cpu().numpy()
    probs = np.exp(-dist_row / temperature)

    probs[center_idx] = 0
    probs = probs / (probs.sum() + 1e-10)

    available_nodes = len(dist_row) - 1

    # 构建Bp子图
    actual_sample_size = min(sample_size, available_nodes)
    if actual_sample_size > 0:
        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, p=probs, replace=False)
        bp_nodes = neighbors.tolist()

    # 构建Bu子图
    if actual_sample_size > 0:
        neighbors = np.random.choice(len(dist_row), size=actual_sample_size, replace=False)
        bu_nodes = neighbors.tolist()

    while True:
        bsne_model.train()
        optimizer.zero_grad()

        total_loss = 0.0  # 每个step都要重置
        eps = 1e-10

        # 计算Bp子图中所有节点的特征
        bp_node_features = []
        for node_idx in bp_nodes:
            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)
            _, node_feature = bsne_model([node_subgraph])
            bp_node_features.append(node_feature.squeeze(0))
        bp_features = torch.stack(bp_node_features)

        center_node_subgraph = create_node_subgraph(center_idx, feat_data, edge_indexs, device)
        _, center_feature = bsne_model([center_node_subgraph])
        center_feature = center_feature.squeeze(0)



        P = torch.tensor(probs[bp_nodes], device=device, dtype=torch.float32)
        P = P/ P.sum()
        # print(P)
        # p_entropy = -torch.sum(P * torch.log(P)).item()
        # print(f"p_entropy: {p_entropy:.4f}")

        # 计算Q向量（欧式距离）
        feat_dists_bp = torch.cdist(center_feature.unsqueeze(0), bp_features).squeeze(0)
        Q = torch.softmax(-feat_dists_bp, dim=0)
        log_ratio = (torch.log(P / Q)) ** 2
        # log_ratio = torch.log(P / Q)
        loss_local = log_ratio.mean()

        # 全局loss计算
        # 计算Bu子图中所有节点的特征
        bu_node_features = []
        for node_idx in bu_nodes:
            node_subgraph = create_node_subgraph(node_idx, feat_data, edge_indexs, device)
            _, node_feature = bsne_model([node_subgraph])
            bu_node_features.append(node_feature.squeeze(0))
        bu_features = torch.stack(bu_node_features)

        feat_dists_bu = torch.cdist(center_feature.unsqueeze(0), bu_features).squeeze(0)
        sum_e_bp = torch.exp(-feat_dists_bp).sum()
        sum_e_bu = torch.exp(-feat_dists_bu).sum()

        N = len(probs)
        k_Bp = probs[bp_nodes].sum() * (N / len(bp_nodes))
        loss_global = (torch.log(k_Bp*sum_e_bu/sum_e_bp))**2
        #         #加上平方项
        #         loss_global = (torch.log(global_ratio.clamp(min=eps, max=1e10)))**2
        total_loss = loss_local + loss_global
        # total_loss = loss_local
        total_loss.backward()
        optimizer.step()

        # logger.info(f"Step {step}: local_loss: {loss_local.item()}")
        logger.info(f"Step {step}: local_loss: {loss_local.item()} | global_loss: {loss_global.item()} | total_loss: {total_loss.item()}")
        step += 1

        if total_loss.item() < loss_threshold or step >= max_steps:
            logger.info(f"Center node {center_idx} finished at step {step} with loss {total_loss.item():.4f}")
            break

'''


logger.info("\n=== Starting Fine-tuning ===")
bsne_model.classifier.requires_grad_(True)
optimizer = torch.optim.AdamW(
    bsne_model.parameters(),
    lr=args['finetune_lr'],
    weight_decay=args["weight_decay"]
)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(
    optimizer, mode='max', factor=0.5, patience=10, verbose=True
)

best_val_auc = 0.0
best_model_state = None
train_pos, train_neg = pos_neg_split(idx_train, y_train)

no_improve_epochs = 0
early_stop = False

for epoch in range(args['num_epochs']):
    if early_stop:
        break

    bsne_model.train()
    total_loss = 0.0

    # 采样中心节点
    batch_centers = rd.sample(train_pos + train_neg, args['batch_size'])
    subgraph_data = []

    for xi in batch_centers:
        subgraph = create_node_subgraph(xi, feat_data, edge_indexs, device)
        subgraph_data.append(subgraph)

    optimizer.zero_grad()

    center_logits, _ = bsne_model(subgraph_data)  # [B, 2]

    # 提取中心节点标签
    labels_center = torch.tensor([labels[xi] for xi in batch_centers]).to(device).long()

    # 计算分类损失
    cls_loss = F.nll_loss(center_logits, labels_center)

    cls_loss.backward()
    optimizer.step()

    total_loss += cls_loss.item()

    avg_loss = total_loss / args['batch_size']

    if epoch % 5 == 0:
        val_auc, val_ap, val_f1, val_g_mean, val_acc  = test(idx_val, labels, bsne_model, feat_data, edge_indexs, device)

        logger.info(f'Epoch: {epoch:03d} | Loss: {avg_loss:.4f} | Val ACC: {val_acc:.4f} | Val AUC: {val_auc:.4f} | Val F1: {val_f1:.4f} | Val GMean: {val_g_mean:.4f}')

        scheduler.step(val_auc)

        if val_auc > best_val_auc:
            best_val_auc = val_auc
            no_improve_epochs = 0
            best_model_state = copy.deepcopy(bsne_model.state_dict())
        else:
            no_improve_epochs += 1

        if no_improve_epochs >= args['patience']:
            logger.info(f"Early stopping at epoch {epoch}")
            early_stop = True

# 加载最佳模型
bsne_model.load_state_dict(best_model_state)

# 最终测试
test_auc, test_ap, test_f1, test_g_mean, test_acc = test(idx_test, labels, bsne_model, feat_data, edge_indexs, device)
logger.info(f'\n=== Final Test Results ===')
logger.info(f'Test ACC: {test_acc:.4f} | Test AUC: {test_auc:.4f} | Test AP: {test_ap:.4f} | Test F1: {test_f1:.4f} | G-mean: {test_g_mean:.4f}')
