import numpy as np
from sklearn.neighbors import NearestNeighbors
from sklearn.preprocessing import StandardScaler  # 导入 StandardScaler

def mlsmote(X, y, k_neighbors=5): # 函数名改为 mlsmote_scaled 以区分
    """
    多标签 SMOTE (MLSMOTE) 过采样方法，包含特征标准化.

    Args:
        X: 特征矩阵 (numpy array), shape: (n_samples, n_features)
        y: 多标签标签矩阵 (numpy array), shape: (n_samples, n_labels), 二进制标签 (0/1)
        k_neighbors: 近邻数量 (int), 用于寻找近邻

    Returns:
        X_resampled: 过采样后的特征矩阵 (numpy array)
        y_resampled: 过采样后的多标签标签矩阵 (numpy array)
    """

    n_samples, n_features = X.shape
    n_labels = y.shape[1]

    # 1. 识别少数类标签组合的样本 (这里简化处理，直接对每个标签进行过采样)
    minority_samples_indices = []
    for label_index in range(n_labels):
        positive_label_indices = np.where(y[:, label_index] == 1)[0]
        negative_label_indices = np.where(y[:, label_index] == 0)[0]

        if len(positive_label_indices) < len(negative_label_indices):
            minority_samples_indices.extend(positive_label_indices)

    minority_samples_indices = np.unique(minority_samples_indices)

    if not minority_samples_indices.size:
        return X, y

    X_minority = X[minority_samples_indices]
    y_minority = y[minority_samples_indices]
    n_minority_samples = len(minority_samples_indices)

    # 2. 特征缩放 (标准化)
    scaler = StandardScaler() # 创建 StandardScaler 对象
    X_scaled = scaler.fit_transform(X) # 对原始特征矩阵 X 进行标准化
    X_minority_scaled = X_scaled[minority_samples_indices] # 对少数类样本的特征也进行标准化

    # 3. 使用 NearestNeighbors 找到每个少数类样本的 k 个近邻 (使用缩放后的特征)
    knn = NearestNeighbors(n_neighbors=k_neighbors)
    knn.fit(X_scaled) # 使用缩放后的特征矩阵 X_scaled 训练 KNN
    neighbors_indices = knn.kneighbors(X_minority_scaled, return_distance=False) # 使用缩放后的少数类特征 X_minority_scaled

    X_synthetic = []
    y_synthetic = []

    # 4. 对每个少数类样本，生成合成样本 (特征插值在缩放后的特征空间进行)
    for i in range(n_minority_samples):
        sample_scaled = X_minority_scaled[i] # 使用缩放后的少数类样本特征
        label = y_minority[i]
        neighbor_index_list = neighbors_indices[i]

        for _ in range(1):
            neighbor_index_scaled = neighbor_index_list[1:] # 近邻索引是基于缩放后的特征的索引
            neighbor_index_original = np.where(np.all(X_scaled == X_scaled[neighbor_index_scaled[0]], axis=1))[0][0] # 找到近邻在原始特征矩阵中的索引 (需要反向查找)
            # 随机选择一个近邻样本的索引 (不包括样本自身)
            neighbor_index = np.random.choice(neighbor_index_list[1:]) # 索引 0 是样本自身 (这里依然用缩放后特征的索引)

            lambda_val = np.random.random()

            # 特征插值 (在缩放后的特征空间进行)
            synthetic_feature_scaled = sample_scaled + lambda_val * (X_scaled[neighbor_index] - sample_scaled)

            # 将合成的缩放后特征 逆变换回原始特征空间
            synthetic_feature = scaler.inverse_transform(synthetic_feature_scaled) # 使用 scaler.inverse_transform 进行逆变换


            synthetic_label = label.copy()

            X_synthetic.append(synthetic_feature) # 添加逆变换回原始空间的特征
            y_synthetic.append(synthetic_label)

    X_resampled = np.vstack((X, np.array(X_synthetic)))
    y_resampled = np.vstack((y, np.array(y_synthetic)))

    return X_resampled, y_resampled


def mlsmote_constrained_label(X, y, k_neighbors=5): 
    """
    多标签 SMOTE (MLSMOTE) 过采样方法，包含特征标准化, 并考虑标签相关性 (标签“或”操作),
    并强制执行 "正常" 标签与疾病标签的互斥约束.

    Args:
        X: 特征矩阵 (numpy array), shape: (n_samples, n_features)
        y: 多标签标签矩阵 (numpy array), shape: (n_samples, n_labels), 二进制标签 (0/1), 第一列为 "正常" 标签
           其余列为疾病标签
        k_neighbors: 近邻数量 (int), 用于寻找近邻

    Returns:
        X_resampled: 过采样后的特征矩阵 (numpy array)
        y_resampled: 过采样后的多标签标签矩阵 (numpy array)
    """

    n_samples, n_features = X.shape
    n_labels = y.shape[1]
    # 0. 检查标签维度 (假设标签长度为 8，第一列为 "正常" 标签)
    if n_labels != 8:
        raise ValueError("代码假设标签长度为 8，且第一列为 '正常' 标签，请检查数据维度!")

    # 1. 识别少数类标签组合的样本 (这里简化处理，直接对每个标签进行过采样)
    minority_samples_indices = []
    for label_index in range(n_labels):
        positive_label_indices = np.where(y[:, label_index] == 1)[0]
        negative_label_indices = np.where(y[:, label_index] == 0)[0]

        if len(positive_label_indices) < len(negative_label_indices):
            minority_samples_indices.extend(positive_label_indices)

    minority_samples_indices = np.unique(minority_samples_indices)

    if not minority_samples_indices.size:
        return X, y

    X_minority = X[minority_samples_indices]
    y_minority = y[minority_samples_indices]
    n_minority_samples = len(minority_samples_indices)

    # 2. 特征缩放 (标准化)
    scaler = StandardScaler()
    X_scaled = scaler.fit_transform(X)
    X_minority_scaled = X_scaled[minority_samples_indices]

    # 3. 使用 NearestNeighbors 找到每个少数类样本的 k 个近邻 (使用缩放后的特征)
    knn = NearestNeighbors(n_neighbors=k_neighbors)
    knn.fit(X_scaled)
    neighbors_indices = knn.kneighbors(X_minority_scaled, return_distance=False)

    X_synthetic = []
    y_synthetic = []

    # 4. 对每个少数类样本，生成合成样本 (特征插值在缩放后的特征空间进行),  标签进行“或”操作, 并强制执行约束
    for i in range(n_minority_samples):
        sample_scaled = X_minority_scaled[i]
        label = y_minority[i]
        neighbor_index_list = neighbors_indices[i]

        for _ in range(1):
            neighbor_index = np.random.choice(neighbor_index_list[1:]) # 索引 0 是样本自身

            lambda_val = np.random.random()

            # 特征插值 (在缩放后的特征空间进行)
            synthetic_feature_scaled = sample_scaled + lambda_val * (X_scaled[neighbor_index] - sample_scaled)

            # 标签合成： 使用“或”操作
            neighbor_label = y[neighbor_index]
            synthetic_label_or = np.logical_or(label, neighbor_label).astype(int) # 先进行或运算

            # 强制执行约束： 第一列 "正常" 标签与后面 7 列疾病标签互斥
            disease_labels = synthetic_label_or[1:] # 获取疾病标签 (第 2 列到第 8 列)
            if np.any(disease_labels == 1): # 如果疾病标签中至少有一个为 1
                synthetic_label = np.array([0] + list(synthetic_label_or[1:])) # 将 "正常" 标签设为 0, 保留疾病标签
            else: # 如果疾病标签都为 0
                synthetic_label = np.array([1] + list(synthetic_label_or[1:])) # 将 "正常" 标签设为 1, 保留疾病标签 (此时疾病标签都为 0)

            # 将合成的缩放后特征 逆变换回原始特征空间
            synthetic_feature = scaler.inverse_transform(synthetic_feature_scaled.reshape(1, -1))
            X_synthetic.append(synthetic_feature[0])
            y_synthetic.append(synthetic_label)

    X_resampled = np.vstack((X, np.array(X_synthetic)))
    y_resampled = np.vstack((y, np.array(y_synthetic)))

    return X_resampled, y_resampled