import numpy as np
import torch
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler

from plot_picture import visualize_pca_features_bias


# 计算每条元路径中每个节点的同质性分数
# 同质性偏差：计算目标节点与其不同元路径邻居间的标签一致性，量化同质性水平。
def compute_homophily_level(meta_path_adjs, labels, target_nodes):
    """
    计算基于不同元路径的同质性水平
    meta_path_adjs: 只有首尾节点类型一致的元路径，前面已经处理过
    """
    homophily_scores = {}

    for meta_path, adj in meta_path_adjs.items():
        # 对于每个元路径，计算目标节点与其邻居的标签一致性
        # 只考虑以目标节点类型开始和结束的元路径
        consistency_scores = []
        row, col, _ = adj.coo()  # 获取COO格式的行、列、值
        for node in target_nodes:
            mask = (row == node)  # 找到以node为起点的边
            neighbors = col[mask]  # 获取邻居节点
            if len(neighbors) > 0:
                # 计算标签一致性
                consistency = (labels[neighbors] == labels[node]).float().mean()
                consistency_scores.append(consistency.item())
            else:
                # 为没有邻居的节点设置默认同质性分数
                # 可以设置为0.5（中性）或0（最低同质性）
                # 0：表示无邻居时无同质性
                consistency_scores.append(0)

        homophily_scores[meta_path] = np.array(consistency_scores)

    return homophily_scores


def compute_homophily_features_for_clustering(meta_path_adjs, labels, labeled_nodes):
    """
    计算用于同质性偏差K-MEANS聚类的特征

    Args:
        meta_path_adjs: 元路径邻接矩阵字典
        init_labels: 初始标签
        labeled_nodes: 有标签的节点索引

    Returns:
        同质性特征矩阵 [num_labeled_nodes, num_meta_paths]
    """
    homophily_scores = compute_homophily_level(meta_path_adjs, labels, labeled_nodes)
    if homophily_scores:
        # 将每个元路径的同质性分数作为独立特征
        features_list = []
        for meta_path, scores in homophily_scores.items():
            # 使用 reshape(-1, 1) 把一维数组变为二维列向量。
            # 每条元路径生成一个列特征。
            features_list.append(scores.reshape(-1, 1))
        # 将多个元路径得分列按列拼接，形成最终的同质性特征矩阵。
        # 形状是 [num_labeled_nodes, num_meta_paths]。
        homophily_features = np.concatenate(features_list, axis=1)
    else:
        # 如果没有合适的元路径，使用零向量
        homophily_features = np.zeros((len(labeled_nodes), 1))

    # 标准化特征
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    homophily_features_scaled = scaler.fit_transform(homophily_features)

    return homophily_features_scaled, homophily_scores


# 度偏差：计算节点在不同关系类型下的度分布，识别度偏置。
def compute_multi_relation_degrees(meta_path_adjs, target_nodes):
    """
    计算目标节点在不同关系下的度
    """
    degree_info = {}

    # print("target nodes max num: ", target_nodes.max)

    for meta_path, adj in meta_path_adjs.items():
        degrees = []
        # print("meta path: ", meta_path)
        # print("adj.size:",adj.size(0))
        # 对于每个元路径，计算目标节点与其邻居的标签一致性
        for node in target_nodes:
            # 统计每行非零个数作为度
            degree = adj.storage.rowcount()[node].item()
            degrees.append(degree)
        degree_info[meta_path] = np.array(degrees)

    return degree_info


def compute_degree_features_for_clustering(meta_path_adjs, labeled_nodes):
    """
    计算用于度偏差K-MEANS聚类的特征

    Args:
        meta_path_adjs:
        labeled_nodes: 有标签的节点索引

    Returns:
        度特征矩阵 [num_labeled_nodes, num_relation_types]
    """
    degree_info = compute_multi_relation_degrees(meta_path_adjs, labeled_nodes)
    # 将所有关系类型的度合并为特征向量
    degree_features = np.column_stack(list(degree_info.values()))

    # 标准化特征
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    degree_features_scaled = scaler.fit_transform(degree_features)

    return degree_features_scaled, degree_info


def compute_pca_by_train(target_features, labeled_nodes, train_ids, n_components=128):
    X = target_features[labeled_nodes].cpu().numpy() if hasattr(target_features, 'cpu') else target_features[
        labeled_nodes]
    # 找到 train 的相对位置
    idx_map = {nid: i for i, nid in enumerate(labeled_nodes)}
    train_pos = [idx_map[n] for n in train_ids if n in idx_map]
    X_train = X[train_pos]

    scaler = StandardScaler().fit(X_train)
    X_train_s = scaler.transform(X_train)
    pca = PCA(n_components=n_components).fit(X_train_s)

    X_s = scaler.transform(X)
    scores = pca.transform(X_s)  # shape [len(labeled_nodes), n_components]
    return scores


def compute_feature_features_for_clustering(target_features, labeled_nodes, n_components=128):
    """
    对节点特征应用PCA降维
    """
    if target_features is not None:
        labeled_target_features = target_features[labeled_nodes].cpu().numpy()
        pre_dimension = labeled_target_features.shape[1]
        if pre_dimension > n_components:
            # 标准化
            scaler = StandardScaler()
            features_scaled = scaler.fit_transform(labeled_target_features)

            # PCA降维
            pca = PCA(n_components=n_components)
            features_pca = pca.fit_transform(features_scaled)

            pca_features = torch.FloatTensor(features_pca)
            pca_model = {'pca': pca, 'scaler': scaler}

            print(f"PCA feature dimension reduction completed, {pre_dimension} -> {pca_features.shape[1]}")
            return pca_features, pca_model
        else:
            # 如果没有特征，使用零向量
            return labeled_target_features, None
    else:
        return None, None, None


def create_ood_data_split(bias_type, features, labeled_nodes, train_val_ratio=0.6):
    """
    基于特定偏差类型使用K-MEANS创建分布外(OOD)数据分割

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        features: PCA降维后的节点特征（只有有标签的节点）
        labeled_nodes: 有标签的节点索引
        train_val_ratio: 大簇中训练集的比例

    Returns:
        new_train_nid, new_val_nid, new_test_nid: 新的训练/验证/测试节点索引
    """
    from sklearn.cluster import KMeans

    # 使用K-MEANS将节点聚类为两个簇
    kmeans = KMeans(n_clusters=2, random_state=42, n_init=10)
    cluster_labels = kmeans.fit_predict(features)

    # 分别获取两个簇的节点
    cluster_0_nodes = labeled_nodes[cluster_labels == 0]
    cluster_1_nodes = labeled_nodes[cluster_labels == 1]

    print(f"Cluster 0 size: {len(cluster_0_nodes)}")
    print(f"Cluster 1 size: {len(cluster_1_nodes)}")

    # 确定哪个簇更大
    if len(cluster_0_nodes) >= len(cluster_1_nodes):
        large_cluster = cluster_0_nodes
        small_cluster = cluster_1_nodes
    else:
        large_cluster = cluster_1_nodes
        small_cluster = cluster_0_nodes

    # 大簇随机分成训练集和验证集（6:4比例）
    np.random.shuffle(large_cluster)
    split_point = int(len(large_cluster) * train_val_ratio)

    new_train_nid = large_cluster[:split_point]
    new_val_nid = large_cluster[split_point:]

    # 小簇作为测试集
    new_test_nid = small_cluster

    print(
        f"New {bias_type} OOD split - Train: {len(new_train_nid)}, Val: {len(new_val_nid)}, Test: {len(new_test_nid)}")

    return new_train_nid, new_val_nid, new_test_nid


def create_ood_data_split_by_homophily(bias_type, meta_path_adjs, labels, labeled_nodes, train_val_ratio=0.6):
    """
    基于特定偏差类型使用K-MEANS创建分布外(OOD)数据分割

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        meta_path_adjs: 元路径邻接矩阵字典
        adjs: 原始邻接矩阵字典
        labeled_nodes: 有标签的节点索引
        train_val_ratio: 大簇中训练集的比例

    Returns:
        new_train_nid, new_val_nid, new_test_nid: 新的训练/验证/测试节点索引
    """

    features, homophily_scores = compute_homophily_features_for_clustering(meta_path_adjs, labels, labeled_nodes)
    print(f"Using homophily features with {features.shape[1]} meta-paths")

    new_train_nid, new_val_nid, new_test_nid = create_ood_data_split(bias_type, features, labeled_nodes,
                                                                     train_val_ratio)

    # 分析特定偏差类型下训练/验证/测试集的分布差异
    print(f"\n=== {bias_type.upper()} Bias Analysis for Data Splits ===")

    # 获取每个集合对应的索引掩码
    train_indices = np.isin(labeled_nodes, new_train_nid)
    val_indices = np.isin(labeled_nodes, new_val_nid)
    test_indices = np.isin(labeled_nodes, new_test_nid)

    # 同质性分析
    if homophily_scores:
        for meta_path, scores in homophily_scores.items():
            print(f"\nHomophily analysis for meta-path '{meta_path}':")

            train_homo = scores[train_indices]
            val_homo = scores[val_indices]
            test_homo = scores[test_indices]

            print(f"  All samples mean homophily: {np.mean(scores):.3f} ± {np.std(scores):.3f}")
            print(f"  Train set mean homophily: {np.mean(train_homo):.3f} ± {np.std(train_homo):.3f}")
            print(f"  Val set mean homophily: {np.mean(val_homo):.3f} ± {np.std(val_homo):.3f}")
            print(f"  Test set mean homophily: {np.mean(test_homo):.3f} ± {np.std(test_homo):.3f}")

            # 计算分布差异
            train_test_diff = np.mean(train_homo) - np.mean(test_homo)
            print(f"  Train-Test difference: {train_test_diff:.3f}")
    else:
        print("No valid meta-paths found for homophily analysis")

    return new_train_nid, new_val_nid, new_test_nid


def create_ood_data_split_by_degree(bias_type, meta_path_adjs, labeled_nodes, train_val_ratio=0.6):
    """
    基于特定偏差类型使用K-MEANS创建分布外(OOD)数据分割

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        meta_path_adjs: 元路径邻接矩阵字典
        labeled_nodes: 有标签的节点索引
        train_val_ratio: 大簇中训练集的比例

    Returns:
        new_train_nid, new_val_nid, new_test_nid: 新的训练/验证/测试节点索引
    """

    features, degree_info = compute_degree_features_for_clustering(meta_path_adjs, labeled_nodes)
    print(f"Using degree features with {features.shape[1]} relation types")

    new_train_nid, new_val_nid, new_test_nid = create_ood_data_split(bias_type, features, labeled_nodes,
                                                                     train_val_ratio)

    # 分析特定偏差类型下训练/验证/测试集的分布差异

    print(f"\n=== {bias_type.upper()} Bias Analysis for Data Splits ===")

    # 获取每个集合对应的索引掩码
    train_indices = np.isin(labeled_nodes, new_train_nid)
    val_indices = np.isin(labeled_nodes, new_val_nid)
    test_indices = np.isin(labeled_nodes, new_test_nid)

    # 度分析
    print(f"\nDegree analysis:")
    for relation_type, degrees in degree_info.items():
        train_degrees = degrees[train_indices]
        val_degrees = degrees[val_indices]
        test_degrees = degrees[test_indices]

        print(f"\n  Relation '{relation_type}':")
        print(f"    All samples mean degree: {np.mean(degrees):.3f} ± {np.std(degrees):.3f}")
        print(f"    Train set mean degree: {np.mean(train_degrees):.3f} ± {np.std(train_degrees):.3f}")
        print(f"    Val set mean degree: {np.mean(val_degrees):.3f} ± {np.std(val_degrees):.3f}")
        print(f"    Test set mean degree: {np.mean(test_degrees):.3f} ± {np.std(test_degrees):.3f}")

        # 计算分布差异
        train_test_diff = np.mean(train_degrees) - np.mean(test_degrees)
        print(f"    Train-Test difference: {train_test_diff:.3f}")

    # 总度分析
    total_degrees = np.sum(list(degree_info.values()), axis=0)
    train_total_degrees = total_degrees[train_indices]
    val_total_degrees = total_degrees[val_indices]
    test_total_degrees = total_degrees[test_indices]

    print(f"\n  Total degree analysis:")
    print(f"    All samples mean total degree: {np.mean(total_degrees):.3f} ± {np.std(total_degrees):.3f}")
    print(f"    Train set mean total degree: {np.mean(train_total_degrees):.3f} ± {np.std(train_total_degrees):.3f}")
    print(f"    Val set mean total degree: {np.mean(val_total_degrees):.3f} ± {np.std(val_total_degrees):.3f}")
    print(f"    Test set mean total degree: {np.mean(test_total_degrees):.3f} ± {np.std(test_total_degrees):.3f}")

    train_test_diff = np.mean(train_total_degrees) - np.mean(test_total_degrees)
    print(f"    Train-Test total degree difference: {train_test_diff:.3f}")

    return new_train_nid, new_val_nid, new_test_nid


def create_ood_data_split_by_feature(bias_type, checkpt_folder, target_features, labeled_nodes, train_nid,
                                     train_val_ratio=0.6):
    """
    基于特定偏差类型使用K-MEANS创建分布外(OOD)数据分割

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        target_features: 目标节点特征
        labeled_nodes: 有标签的节点索引
        train_val_ratio: 大簇中训练集的比例

    Returns:
        new_train_nid, new_val_nid, new_test_nid: 新的训练/验证/测试节点索引
    """

    features, pca_model = compute_feature_features_for_clustering(target_features, labeled_nodes)
    print(f"Using node features with {features.shape[1]} dimensions")

    new_train_nid, new_val_nid, new_test_nid = create_ood_data_split(bias_type, features, labeled_nodes,
                                                                     train_val_ratio)

    # 分析特定偏差类型下训练/验证/测试集的分布差异
    print(f"\n=== {bias_type.upper()} Bias Analysis for Data Splits ===")

    # 获取每个集合对应的索引掩码
    train_indices = np.isin(labeled_nodes, new_train_nid)
    val_indices = np.isin(labeled_nodes, new_val_nid)
    test_indices = np.isin(labeled_nodes, new_test_nid)

    # 特征分析
    if target_features is not None:
        labeled_features = target_features[labeled_nodes].cpu().numpy()
        feature_means = np.mean(labeled_features, axis=1)  # 每个节点特征的平均值
        feature_stds = np.std(labeled_features, axis=1)  # 每个节点特征的标准差

        print(f"\nFeature analysis:")
        train_feature_means = feature_means[train_indices]
        val_feature_means = feature_means[val_indices]
        test_feature_means = feature_means[test_indices]

        train_feature_stds = feature_stds[train_indices]
        val_feature_stds = feature_stds[val_indices]
        test_feature_stds = feature_stds[test_indices]

        print(f"  All samples mean feature value: {np.mean(feature_means):.3f} ± {np.std(feature_means):.3f}")
        print(f"  Train set mean feature value: {np.mean(train_feature_means):.3f} ± {np.std(train_feature_means):.3f}")
        print(f"  Val set mean feature value: {np.mean(val_feature_means):.3f} ± {np.std(val_feature_means):.3f}")
        print(f"  Test set mean feature value: {np.mean(test_feature_means):.3f} ± {np.std(test_feature_means):.3f}")

        print(f"\n  Feature variance analysis:")
        print(f"  All samples mean feature std: {np.mean(feature_stds):.3f} ± {np.std(feature_stds):.3f}")
        print(f"  Train set mean feature std: {np.mean(train_feature_stds):.3f} ± {np.std(train_feature_stds):.3f}")
        print(f"  Val set mean feature std: {np.mean(val_feature_stds):.3f} ± {np.std(val_feature_stds):.3f}")
        print(f"  Test set mean feature std: {np.mean(test_feature_stds):.3f} ± {np.std(test_feature_stds):.3f}")

        # 计算分布差异
        mean_diff = np.mean(train_feature_means) - np.mean(test_feature_means)
        std_diff = np.mean(train_feature_stds) - np.mean(test_feature_stds)
        print(f"  Train-Test mean difference: {mean_diff:.3f}")
        print(f"  Train-Test std difference: {std_diff:.3f}")
    else:
        print("No node features available for feature bias analysis")

    # 调用然后绘图（用前5分量）
    # pca_3
    scores = compute_pca_by_train(target_features, labeled_nodes, train_nid, n_components=128)

    visualize_pca_features_bias(scores, labeled_nodes, new_train_nid, new_val_nid, new_test_nid, checkpt_folder)

    return new_train_nid, new_val_nid, new_test_nid


def create_ood_data_split_by_bias_type(bias_type, meta_path_adjs, labels, target_features, labeled_nodes,
                                       train_val_ratio=0.6):
    """
    基于特定偏差类型使用K-MEANS创建分布外(OOD)数据分割

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        meta_path_adjs: 元路径邻接矩阵字典
        adjs: 原始邻接矩阵字典
        target_features: 目标节点特征
        labeled_nodes: 有标签的节点索引
        train_val_ratio: 大簇中训练集的比例

    Returns:
        new_train_nid, new_val_nid, new_test_nid: 新的训练/验证/测试节点索引
    """
    from sklearn.cluster import KMeans
    global homophily_scores, degree_info, pca_features, pca_model

    print(f"Creating OOD split based on {bias_type.upper()} bias...")

    # 根据偏差类型计算相应的特征
    if bias_type == 'homophily':
        features, homophily_scores = compute_homophily_features_for_clustering(meta_path_adjs, labels, labeled_nodes)
        print(f"Using homophily features with {features.shape[1]} meta-paths")
    elif bias_type == 'degree':
        features, degree_info = compute_degree_features_for_clustering(meta_path_adjs, labeled_nodes)
        print(f"Using degree features with {features.shape[1]} relation types")
    elif bias_type == 'feature':
        features, pca_model = compute_feature_features_for_clustering(target_features, labeled_nodes)
        pca_features = features
        print(f"Using node features with {features.shape[1]} dimensions")
    else:
        raise ValueError(f"Unknown bias type: {bias_type}. Must be 'homophily', 'degree', or 'feature'")

    # 使用K-MEANS将节点聚类为两个簇
    kmeans = KMeans(n_clusters=2, random_state=42, n_init=10)
    cluster_labels = kmeans.fit_predict(features)

    # 分别获取两个簇的节点
    cluster_0_nodes = labeled_nodes[cluster_labels == 0]
    cluster_1_nodes = labeled_nodes[cluster_labels == 1]

    print(f"Cluster 0 size: {len(cluster_0_nodes)}")
    print(f"Cluster 1 size: {len(cluster_1_nodes)}")

    # 确定哪个簇更大
    if len(cluster_0_nodes) >= len(cluster_1_nodes):
        large_cluster = cluster_0_nodes
        small_cluster = cluster_1_nodes
    else:
        large_cluster = cluster_1_nodes
        small_cluster = cluster_0_nodes

    # 大簇随机分成训练集和验证集（6:4比例）
    np.random.shuffle(large_cluster)
    split_point = int(len(large_cluster) * train_val_ratio)

    new_train_nid = large_cluster[:split_point]
    new_val_nid = large_cluster[split_point:]

    # 小簇作为测试集
    new_test_nid = small_cluster

    print(
        f"New {bias_type} OOD split - Train: {len(new_train_nid)}, Val: {len(new_val_nid)}, Test: {len(new_test_nid)}")

    """
    分析特定偏差类型下训练/验证/测试集的分布差异

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        meta_path_adjs: 元路径邻接矩阵字典
        init_labels: 初始标签
        target_features: 目标节点特征
        all_labeled_nodes: 所有有标签的节点
        train_nid, val_nid, test_nid: 训练/验证/测试节点索引
    """

    print(f"\n=== {bias_type.upper()} Bias Analysis for Data Splits ===")

    # 获取每个集合对应的索引掩码
    train_indices = np.isin(labeled_nodes, new_train_nid)
    val_indices = np.isin(labeled_nodes, new_val_nid)
    test_indices = np.isin(labeled_nodes, new_test_nid)

    if bias_type == 'homophily':
        # 同质性分析
        if homophily_scores:
            for meta_path, scores in homophily_scores.items():
                print(f"\nHomophily analysis for meta-path '{meta_path}':")

                train_homo = scores[train_indices]
                val_homo = scores[val_indices]
                test_homo = scores[test_indices]

                print(f"  All samples mean homophily: {np.mean(scores):.3f} ± {np.std(scores):.3f}")
                print(f"  Train set mean homophily: {np.mean(train_homo):.3f} ± {np.std(train_homo):.3f}")
                print(f"  Val set mean homophily: {np.mean(val_homo):.3f} ± {np.std(val_homo):.3f}")
                print(f"  Test set mean homophily: {np.mean(test_homo):.3f} ± {np.std(test_homo):.3f}")

                # 计算分布差异
                train_test_diff = np.mean(train_homo) - np.mean(test_homo)
                print(f"  Train-Test difference: {train_test_diff:.3f}")
        else:
            print("No valid meta-paths found for homophily analysis")

    elif bias_type == 'degree':
        # 度分析
        print(f"\nDegree analysis:")
        for relation_type, degrees in degree_info.items():
            train_degrees = degrees[train_indices]
            val_degrees = degrees[val_indices]
            test_degrees = degrees[test_indices]

            print(f"\n  Relation '{relation_type}':")
            print(f"    All samples mean degree: {np.mean(degrees):.3f} ± {np.std(degrees):.3f}")
            print(f"    Train set mean degree: {np.mean(train_degrees):.3f} ± {np.std(train_degrees):.3f}")
            print(f"    Val set mean degree: {np.mean(val_degrees):.3f} ± {np.std(val_degrees):.3f}")
            print(f"    Test set mean degree: {np.mean(test_degrees):.3f} ± {np.std(test_degrees):.3f}")

            # 计算分布差异
            train_test_diff = np.mean(train_degrees) - np.mean(test_degrees)
            print(f"    Train-Test difference: {train_test_diff:.3f}")

        # 总度分析
        total_degrees = np.sum(list(degree_info.values()), axis=0)
        train_total_degrees = total_degrees[train_indices]
        val_total_degrees = total_degrees[val_indices]
        test_total_degrees = total_degrees[test_indices]

        print(f"\n  Total degree analysis:")
        print(f"    All samples mean total degree: {np.mean(total_degrees):.3f} ± {np.std(total_degrees):.3f}")
        print(
            f"    Train set mean total degree: {np.mean(train_total_degrees):.3f} ± {np.std(train_total_degrees):.3f}")
        print(f"    Val set mean total degree: {np.mean(val_total_degrees):.3f} ± {np.std(val_total_degrees):.3f}")
        print(f"    Test set mean total degree: {np.mean(test_total_degrees):.3f} ± {np.std(test_total_degrees):.3f}")

        train_test_diff = np.mean(train_total_degrees) - np.mean(test_total_degrees)
        print(f"    Train-Test total degree difference: {train_test_diff:.3f}")

    elif bias_type == 'feature':
        # 特征分析
        if target_features is not None:
            labeled_features = target_features[labeled_nodes].cpu().numpy()
            feature_means = np.mean(labeled_features, axis=1)  # 每个节点特征的平均值
            feature_stds = np.std(labeled_features, axis=1)  # 每个节点特征的标准差

            print(f"\nFeature analysis:")
            train_feature_means = feature_means[train_indices]
            val_feature_means = feature_means[val_indices]
            test_feature_means = feature_means[test_indices]

            train_feature_stds = feature_stds[train_indices]
            val_feature_stds = feature_stds[val_indices]
            test_feature_stds = feature_stds[test_indices]

            print(f"  All samples mean feature value: {np.mean(feature_means):.3f} ± {np.std(feature_means):.3f}")
            print(
                f"  Train set mean feature value: {np.mean(train_feature_means):.3f} ± {np.std(train_feature_means):.3f}")
            print(f"  Val set mean feature value: {np.mean(val_feature_means):.3f} ± {np.std(val_feature_means):.3f}")
            print(
                f"  Test set mean feature value: {np.mean(test_feature_means):.3f} ± {np.std(test_feature_means):.3f}")

            print(f"\n  Feature variance analysis:")
            print(f"  All samples mean feature std: {np.mean(feature_stds):.3f} ± {np.std(feature_stds):.3f}")
            print(f"  Train set mean feature std: {np.mean(train_feature_stds):.3f} ± {np.std(train_feature_stds):.3f}")
            print(f"  Val set mean feature std: {np.mean(val_feature_stds):.3f} ± {np.std(val_feature_stds):.3f}")
            print(f"  Test set mean feature std: {np.mean(test_feature_stds):.3f} ± {np.std(test_feature_stds):.3f}")

            # 计算分布差异
            mean_diff = np.mean(train_feature_means) - np.mean(test_feature_means)
            std_diff = np.mean(train_feature_stds) - np.mean(test_feature_stds)
            print(f"  Train-Test mean difference: {mean_diff:.3f}")
            print(f"  Train-Test std difference: {std_diff:.3f}")
        else:
            print("No node features available for feature bias analysis")

    else:
        raise ValueError(f"Unknown bias type: {bias_type}")

    return new_train_nid, new_val_nid, new_test_nid, homophily_scores, degree_info, pca_features, pca_model