import numpy as np
import torch
from sklearn.decomposition import PCA
from sklearn.preprocessing import StandardScaler


# 计算每条元路径中每个节点的同质性分数
# 同质性偏差：计算目标节点与其不同元路径邻居间的标签一致性，量化同质性水平。
def compute_homophily_level(meta_path_adjs, labels, target_nodes):
    """
    计算基于不同元路径的同质性水平
    """
    homophily_scores = {}

    for meta_path, adj in meta_path_adjs.items():
        # 对于每个元路径，计算目标节点与其邻居的标签一致性
        # 只考虑以目标节点类型开始和结束的元路径
        if meta_path[0] == meta_path[-1]:  # 同质元路径
            consistency_scores = []
            for node in target_nodes:
                # neighbors = adj[node].nonzero()[1]  # 获取邻居
                row, col, _ = adj.coo()  # 获取COO格式的行、列、值
                mask = (row == node)  # 找到以node为起点的边
                neighbors = col[mask]  # 获取邻居节点
                if len(neighbors) > 0:
                    # 计算标签一致性
                    consistency = (labels[neighbors] == labels[node]).float().mean()
                    consistency_scores.append(consistency.item())
                else:
                    # 为没有邻居的节点设置默认同质性分数
                    # 可以设置为0.5（中性）或0（最低同质性）
                    consistency_scores.append(0.5)

            homophily_scores[meta_path] = np.array(consistency_scores)

    return homophily_scores


def categorize_nodes_by_homophily(homophily_scores, thresholds=[0.3, 0.7]):
    """
    根据同质性水平对节点分类：低、中、高同质性
    """
    # 计算综合同质性分数（可以是平均值或加权平均）
    # 对每个节点，把它在各条元路径下的同质性分数取普通平均，作为该节点的“综合同质性分数”。
    composite_scores = np.mean(list(homophily_scores.values()), axis=0)

    low_homo = composite_scores < thresholds[0]
    high_homo = composite_scores > thresholds[1]
    medium_homo = ~(low_homo | high_homo)

    # 最终返回一个字典，三类的值分别是满足对应条件的节点索引数组：
    # 'low': 低同质性节点索引
    # 'medium': 中同质性节点索引
    # 'high': 高同质性节点索引
    return {
        'low': np.where(low_homo)[0],
        'medium': np.where(medium_homo)[0],
        'high': np.where(high_homo)[0]
    }


def compute_homophily_features_for_clustering(meta_path_adjs, labels, labeled_nodes):
    """
    计算用于同质性偏差K-MEANS聚类的特征

    Args:
        meta_path_adjs: 元路径邻接矩阵字典
        init_labels: 初始标签
        labeled_nodes: 有标签的节点索引

    Returns:
        同质性特征矩阵 [num_labeled_nodes, num_meta_paths]
    """
    homophily_scores = compute_homophily_level(meta_path_adjs, labels, labeled_nodes)
    if homophily_scores:
        # 将每个元路径的同质性分数作为独立特征
        features_list = []
        for meta_path, scores in homophily_scores.items():
            # 使用 reshape(-1, 1) 把一维数组变为二维列向量。
            # 每条元路径生成一个列特征。
            features_list.append(scores.reshape(-1, 1))
        # 将多个元路径得分列按列拼接，形成最终的同质性特征矩阵。
        # 形状是 [num_labeled_nodes, num_meta_paths]。
        homophily_features = np.concatenate(features_list, axis=1)
    else:
        # 如果没有合适的元路径，使用零向量
        homophily_features = np.zeros((len(labeled_nodes), 1))

    # 标准化特征
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    homophily_features_scaled = scaler.fit_transform(homophily_features)

    return homophily_features_scaled


# 度偏差：计算节点在不同关系类型下的度分布，识别度偏置。
def compute_multi_relation_degrees(meta_path_adjs, target_nodes):
    """
    计算目标节点在不同关系下的度
    """
    degree_info = {}

    # print("target nodes max num: ", target_nodes.max)

    for meta_path, adj in meta_path_adjs.items():
        degrees = []
        # print("meta path: ", meta_path)
        # print("adj.size:",adj.size(0))
        # 对于每个元路径，计算目标节点与其邻居的标签一致性
        for node in target_nodes:
            # 统计每行非零个数作为度
            degree = adj.storage.rowcount()[node].item()
            degrees.append(degree)
        degree_info[meta_path] = np.array(degrees)

    return degree_info


def compute_degree_features_for_clustering(meta_path_adjs, labeled_nodes):
    """
    计算用于度偏差K-MEANS聚类的特征

    Args:
        meta_path_adjs:
        labeled_nodes: 有标签的节点索引

    Returns:
        度特征矩阵 [num_labeled_nodes, num_relation_types]
    """
    degree_info = compute_multi_relation_degrees(meta_path_adjs, labeled_nodes)
    # 将所有关系类型的度合并为特征向量
    degree_features = np.column_stack(list(degree_info.values()))

    # 标准化特征
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    degree_features_scaled = scaler.fit_transform(degree_features)

    return degree_features_scaled


def compute_feature_features_for_clustering(target_features, labeled_nodes, n_components=50):
    """
    计算用于特征偏差K-MEANS聚类的特征（PCA降维后）

    Args:
        target_features: 目标节点特征
        labeled_nodes: 有标签的节点索引
        n_components: PCA降维后的维度

    Returns:
        特征矩阵 [num_labeled_nodes, n_components]
    """
    if target_features is not None:
        labeled_target_features = target_features[labeled_nodes].cpu().numpy()
        # 如果特征维度太高，先进行PCA降维
        if labeled_target_features.shape[1] > n_components:
            from sklearn.decomposition import PCA
            pca = PCA(n_components=n_components)
            labeled_target_features = pca.fit_transform(labeled_target_features)
    else:
        # 如果没有特征，使用零向量
        labeled_target_features = np.zeros((len(labeled_nodes), n_components))

    # 标准化特征
    from sklearn.preprocessing import StandardScaler
    scaler = StandardScaler()
    feature_features_scaled = scaler.fit_transform(labeled_target_features)

    return feature_features_scaled


def create_ood_data_split_by_bias_type(bias_type, meta_path_adjs, labels, target_features,
                                       labeled_nodes, train_val_ratio=0.6):
    """
    基于特定偏差类型使用K-MEANS创建分布外(OOD)数据分割

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        meta_path_adjs: 元路径邻接矩阵字典
        adjs: 原始邻接矩阵字典
        target_features: 目标节点特征
        labeled_nodes: 有标签的节点索引
        train_val_ratio: 大簇中训练集的比例

    Returns:
        new_train_nid, new_val_nid, new_test_nid: 新的训练/验证/测试节点索引
    """
    from sklearn.cluster import KMeans

    print(f"Creating OOD split based on {bias_type.upper()} bias...")

    # 根据偏差类型计算相应的特征
    if bias_type == 'homophily':
        features = compute_homophily_features_for_clustering(meta_path_adjs, labels, labeled_nodes)
        print(f"Using homophily features with {features.shape[1]} meta-paths")
    elif bias_type == 'degree':
        features = compute_degree_features_for_clustering(meta_path_adjs, labeled_nodes)
        print(f"Using degree features with {features.shape[1]} relation types")
    elif bias_type == 'feature':
        features = compute_feature_features_for_clustering(target_features, labeled_nodes)
        print(f"Using node features with {features.shape[1]} dimensions")
    else:
        raise ValueError(f"Unknown bias type: {bias_type}. Must be 'homophily', 'degree', or 'feature'")

    # 使用K-MEANS将节点聚类为两个簇
    kmeans = KMeans(n_clusters=2, random_state=42, n_init=10)
    cluster_labels = kmeans.fit_predict(features)

    # 分别获取两个簇的节点
    cluster_0_nodes = labeled_nodes[cluster_labels == 0]
    cluster_1_nodes = labeled_nodes[cluster_labels == 1]

    print(f"Cluster 0 size: {len(cluster_0_nodes)}")
    print(f"Cluster 1 size: {len(cluster_1_nodes)}")

    # 确定哪个簇更大
    if len(cluster_0_nodes) >= len(cluster_1_nodes):
        large_cluster = cluster_0_nodes
        small_cluster = cluster_1_nodes
    else:
        large_cluster = cluster_1_nodes
        small_cluster = cluster_0_nodes

    # 大簇随机分成训练集和验证集（6:4比例）
    np.random.shuffle(large_cluster)
    split_point = int(len(large_cluster) * train_val_ratio)

    new_train_nid = large_cluster[:split_point]
    new_val_nid = large_cluster[split_point:]

    # 小簇作为测试集
    new_test_nid = small_cluster

    print(
        f"New {bias_type} OOD split - Train: {len(new_train_nid)}, Val: {len(new_val_nid)}, Test: {len(new_test_nid)}")

    return new_train_nid, new_val_nid, new_test_nid


def create_ood_data_split(comprehensive_features, labeled_nodes, train_val_ratio=0.6):
    """
    基于综合特征使用K-MEANS创建分布外(OOD)数据分割（兼容性保留）

    Args:
        comprehensive_features: 综合特征矩阵
        labeled_nodes: 有标签的节点索引
        train_val_ratio: 大簇中训练集的比例

    Returns:
        new_train_nid, new_val_nid, new_test_nid: 新的训练/验证/测试节点索引
    """
    from sklearn.cluster import KMeans

    # 使用K-MEANS将节点聚类为两个簇
    kmeans = KMeans(n_clusters=2, random_state=42, n_init=10)
    cluster_labels = kmeans.fit_predict(comprehensive_features)

    # 分别获取两个簇的节点
    cluster_0_nodes = labeled_nodes[cluster_labels == 0]
    cluster_1_nodes = labeled_nodes[cluster_labels == 1]

    print(f"Cluster 0 size: {len(cluster_0_nodes)}")
    print(f"Cluster 1 size: {len(cluster_1_nodes)}")

    # 确定哪个簇更大
    if len(cluster_0_nodes) >= len(cluster_1_nodes):
        large_cluster = cluster_0_nodes
        small_cluster = cluster_1_nodes
    else:
        large_cluster = cluster_1_nodes
        small_cluster = cluster_0_nodes

    # 大簇随机分成训练集和验证集（6:4比例）
    np.random.shuffle(large_cluster)
    split_point = int(len(large_cluster) * train_val_ratio)

    new_train_nid = large_cluster[:split_point]
    new_val_nid = large_cluster[split_point:]

    # 小簇作为测试集
    new_test_nid = small_cluster

    print(f"New split - Train: {len(new_train_nid)}, Val: {len(new_val_nid)}, Test: {len(new_test_nid)}")

    return new_train_nid, new_val_nid, new_test_nid


def analyze_split_bias_by_type(bias_type, meta_path_adjs, labels, target_features, all_labeled_nodes, train_nid,
                               val_nid, test_nid):
    """
    分析特定偏差类型下训练/验证/测试集的分布差异

    Args:
        bias_type: 偏差类型，'homophily', 'degree', 或 'feature'
        meta_path_adjs: 元路径邻接矩阵字典
        init_labels: 初始标签
        target_features: 目标节点特征
        all_labeled_nodes: 所有有标签的节点
        train_nid, val_nid, test_nid: 训练/验证/测试节点索引
    """

    print(f"\n=== {bias_type.upper()} Bias Analysis for Data Splits ===")

    # 获取每个集合对应的索引掩码
    train_indices = np.isin(all_labeled_nodes, train_nid)
    val_indices = np.isin(all_labeled_nodes, val_nid)
    test_indices = np.isin(all_labeled_nodes, test_nid)

    if bias_type == 'homophily':
        # 同质性分析
        homophily_scores = compute_homophily_level(meta_path_adjs, labels, all_labeled_nodes)
        if homophily_scores:
            for meta_path, scores in homophily_scores.items():
                print(f"\nHomophily analysis for meta-path '{meta_path}':")

                train_homo = scores[train_indices]
                val_homo = scores[val_indices]
                test_homo = scores[test_indices]

                print(f"  All samples mean homophily: {np.mean(scores):.3f} ± {np.std(scores):.3f}")
                print(f"  Train set mean homophily: {np.mean(train_homo):.3f} ± {np.std(train_homo):.3f}")
                print(f"  Val set mean homophily: {np.mean(val_homo):.3f} ± {np.std(val_homo):.3f}")
                print(f"  Test set mean homophily: {np.mean(test_homo):.3f} ± {np.std(test_homo):.3f}")

                # 计算分布差异
                train_test_diff = np.mean(train_homo) - np.mean(test_homo)
                print(f"  Train-Test difference: {train_test_diff:.3f}")
        else:
            print("No valid meta-paths found for homophily analysis")

    elif bias_type == 'degree':
        # 度分析
        degree_info = compute_multi_relation_degrees(meta_path_adjs, all_labeled_nodes)

        print(f"\nDegree analysis:")
        for relation_type, degrees in degree_info.items():
            train_degrees = degrees[train_indices]
            val_degrees = degrees[val_indices]
            test_degrees = degrees[test_indices]

            print(f"\n  Relation '{relation_type}':")
            print(f"    All samples mean degree: {np.mean(degrees):.3f} ± {np.std(degrees):.3f}")
            print(f"    Train set mean degree: {np.mean(train_degrees):.3f} ± {np.std(train_degrees):.3f}")
            print(f"    Val set mean degree: {np.mean(val_degrees):.3f} ± {np.std(val_degrees):.3f}")
            print(f"    Test set mean degree: {np.mean(test_degrees):.3f} ± {np.std(test_degrees):.3f}")

            # 计算分布差异
            train_test_diff = np.mean(train_degrees) - np.mean(test_degrees)
            print(f"    Train-Test difference: {train_test_diff:.3f}")

        # 总度分析
        total_degrees = np.sum(list(degree_info.values()), axis=0)
        train_total_degrees = total_degrees[train_indices]
        val_total_degrees = total_degrees[val_indices]
        test_total_degrees = total_degrees[test_indices]

        print(f"\n  Total degree analysis:")
        print(f"    All samples mean total degree: {np.mean(total_degrees):.3f} ± {np.std(total_degrees):.3f}")
        print(
            f"    Train set mean total degree: {np.mean(train_total_degrees):.3f} ± {np.std(train_total_degrees):.3f}")
        print(f"    Val set mean total degree: {np.mean(val_total_degrees):.3f} ± {np.std(val_total_degrees):.3f}")
        print(f"    Test set mean total degree: {np.mean(test_total_degrees):.3f} ± {np.std(test_total_degrees):.3f}")

        train_test_diff = np.mean(train_total_degrees) - np.mean(test_total_degrees)
        print(f"    Train-Test total degree difference: {train_test_diff:.3f}")

    elif bias_type == 'feature':
        # 特征分析
        if target_features is not None:
            labeled_features = target_features[all_labeled_nodes].cpu().numpy()
            feature_means = np.mean(labeled_features, axis=1)  # 每个节点特征的平均值
            feature_stds = np.std(labeled_features, axis=1)  # 每个节点特征的标准差

            print(f"\nFeature analysis:")
            train_feature_means = feature_means[train_indices]
            val_feature_means = feature_means[val_indices]
            test_feature_means = feature_means[test_indices]

            train_feature_stds = feature_stds[train_indices]
            val_feature_stds = feature_stds[val_indices]
            test_feature_stds = feature_stds[test_indices]

            print(f"  All samples mean feature value: {np.mean(feature_means):.3f} ± {np.std(feature_means):.3f}")
            print(
                f"  Train set mean feature value: {np.mean(train_feature_means):.3f} ± {np.std(train_feature_means):.3f}")
            print(f"  Val set mean feature value: {np.mean(val_feature_means):.3f} ± {np.std(val_feature_means):.3f}")
            print(
                f"  Test set mean feature value: {np.mean(test_feature_means):.3f} ± {np.std(test_feature_means):.3f}")

            print(f"\n  Feature variance analysis:")
            print(f"  All samples mean feature std: {np.mean(feature_stds):.3f} ± {np.std(feature_stds):.3f}")
            print(f"  Train set mean feature std: {np.mean(train_feature_stds):.3f} ± {np.std(train_feature_stds):.3f}")
            print(f"  Val set mean feature std: {np.mean(val_feature_stds):.3f} ± {np.std(val_feature_stds):.3f}")
            print(f"  Test set mean feature std: {np.mean(test_feature_stds):.3f} ± {np.std(test_feature_stds):.3f}")

            # 计算分布差异
            mean_diff = np.mean(train_feature_means) - np.mean(test_feature_means)
            std_diff = np.mean(train_feature_stds) - np.mean(test_feature_stds)
            print(f"  Train-Test mean difference: {mean_diff:.3f}")
            print(f"  Train-Test std difference: {std_diff:.3f}")
        else:
            print("No node features available for feature bias analysis")

    else:
        raise ValueError(f"Unknown bias type: {bias_type}")


def analyze_split_bias(meta_path_adjs, adjs, init_labels, target_features,
                       all_labeled_nodes, train_nid, val_nid, test_nid):
    """
    分析训练/验证/测试集在不同偏置维度上的分布差异（兼容性保留）

    Args:
        meta_path_adjs: 元路径邻接矩阵字典
        adjs: 原始邻接矩阵字典
        init_labels: 初始标签
        target_features: 目标节点特征
        all_labeled_nodes: 所有有标签的节点
        train_nid, val_nid, test_nid: 训练/验证/测试节点索引
    """

    print("\n=== Comprehensive Bias Analysis for Data Splits ===")

    # 1. 同质性分析
    homophily_scores = compute_homophily_level(meta_path_adjs, init_labels, all_labeled_nodes)
    if homophily_scores:
        for meta_path, scores in homophily_scores.items():
            print(f"\nHomophily analysis for meta-path '{meta_path}':")

            # 获取每个集合对应的同质性分数
            train_indices = np.isin(all_labeled_nodes, train_nid)
            val_indices = np.isin(all_labeled_nodes, val_nid)
            test_indices = np.isin(all_labeled_nodes, test_nid)

            train_homo = scores[train_indices]
            val_homo = scores[val_indices]
            test_homo = scores[test_indices]

            print(f"  All samples mean homophily: {np.mean(scores):.3f}")
            print(f"  Train set mean homophily: {np.mean(train_homo):.3f}")
            print(f"  Val set mean homophily: {np.mean(val_homo):.3f}")
            print(f"  Test set mean homophily: {np.mean(test_homo):.3f}")

    # 2. 度分析
    degree_info = compute_multi_relation_degrees(adjs, all_labeled_nodes)
    total_degrees = np.sum(list(degree_info.values()), axis=0)

    print(f"\nDegree analysis:")
    train_indices = np.isin(all_labeled_nodes, train_nid)
    val_indices = np.isin(all_labeled_nodes, val_nid)
    test_indices = np.isin(all_labeled_nodes, test_nid)

    train_degrees = total_degrees[train_indices]
    val_degrees = total_degrees[val_indices]
    test_degrees = total_degrees[test_indices]

    print(f"  All samples mean degree: {np.mean(total_degrees):.3f}")
    print(f"  Train set mean degree: {np.mean(train_degrees):.3f}")
    print(f"  Val set mean degree: {np.mean(val_degrees):.3f}")
    print(f"  Test set mean degree: {np.mean(test_degrees):.3f}")

    # 3. 特征分析（如果有特征的话）
    if target_features is not None:
        labeled_features = target_features[all_labeled_nodes].cpu().numpy()
        feature_means = np.mean(labeled_features, axis=1)  # 每个节点特征的平均值

        print(f"\nFeature analysis:")
        train_feature_means = feature_means[train_indices]
        val_feature_means = feature_means[val_indices]
        test_feature_means = feature_means[test_indices]

        print(f"  All samples mean feature value: {np.mean(feature_means):.3f}")
        print(f"  Train set mean feature value: {np.mean(train_feature_means):.3f}")
        print(f"  Val set mean feature value: {np.mean(val_feature_means):.3f}")
        print(f"  Test set mean feature value: {np.mean(test_feature_means):.3f}")


def categorize_nodes_by_degree(degree_info, percentiles=[25, 75]):
    """
    根据度分布对节点分类：低度、中度、高度
    """
    # 计算综合度分数（所有关系类型度的总和）
    total_degrees = np.sum(list(degree_info.values()), axis=0)

    low_threshold = np.percentile(total_degrees, percentiles[0])
    high_threshold = np.percentile(total_degrees, percentiles[1])

    low_degree = total_degrees <= low_threshold
    high_degree = total_degrees >= high_threshold
    medium_degree = ~(low_degree | high_degree)

    return {
        'low': np.where(low_degree)[0],
        'medium': np.where(medium_degree)[0],
        'high': np.where(high_degree)[0]
    }


# 特征偏差：使用PCA降维并分析特征分布，识别特征偏差
def apply_pca_to_features(features_dict, n_components=128):
    """
    对节点特征应用PCA降维
    """
    pca_features = {}
    pca_models = {}

    for feat_type, features in features_dict.items():
        if features.shape[1] > n_components:
            # 标准化
            scaler = StandardScaler()
            features_scaled = scaler.fit_transform(features.cpu().numpy())

            # PCA降维
            pca = PCA(n_components=n_components)
            features_pca = pca.fit_transform(features_scaled)

            pca_features[feat_type] = torch.FloatTensor(features_pca)
            pca_models[feat_type] = {'pca': pca, 'scaler': scaler}
        else:
            pca_features[feat_type] = features
            pca_models[feat_type] = None

    return pca_features, pca_models


def categorize_nodes_by_features(pca_features, target_nodes, n_clusters=3):
    """
    基于PCA特征对节点进行聚类分组
    """
    from sklearn.cluster import KMeans

    # 合并所有PCA特征
    all_features = torch.cat(list(pca_features.values()), dim=1)
    target_features = all_features[target_nodes].cpu().numpy()

    # K-means聚类
    kmeans = KMeans(n_clusters=n_clusters, random_state=42)
    cluster_labels = kmeans.fit_predict(target_features)

    # 根据聚类结果分组
    feature_groups = {}
    for i in range(n_clusters):
        mask = cluster_labels == i
        feature_groups[f'cluster_{i}'] = target_nodes[mask]

    return feature_groups


def print_bias_analysis_results(bias_results):
    """
    打印偏置分析结果
    """
    print("\n=== Bias Analysis Results ===")

    for bias_type, groups in bias_results.items():
        print(f"\n{bias_type.upper()} BIAS:")
        for group_name, metrics in groups.items():
            print(f"  {group_name}: Acc={metrics['accuracy']:.3f}, "
                  f"Micro-F1={metrics['micro_f1']:.3f}, "
                  f"Macro-F1={metrics['macro_f1']:.3f}, "
                  f"Count={metrics['count']}")