import numpy as np
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
from sklearn.cluster import k_means
import pandas as pd

def calculate_center_and_radius(gb):
    center = gb.mean(axis=0)
    radius = np.max(np.linalg.norm(gb - center, axis=1))
    return center, radius

def splits_ball(gb, k):
    unique_data = np.unique(gb, axis=0)
    if unique_data.shape[0] < k:
        k = unique_data.shape[0]
    centers, labels, _ = k_means(X=gb, n_clusters=k, n_init=1, random_state=8)
    return [gb[labels == lab] for lab in range(k)]

def splits(gb_list, num, k=2):
    gb_list_new = []
    for gb in gb_list:
        if gb.shape[0] < num:
            gb_list_new.append(gb)
        else:
            gb_list_new.extend(splits_ball(gb, k))
    return gb_list_new

def assign_points_to_closest_gb(data, gb_centers):
    assigned = np.zeros(data.shape[0], dtype=int)
    for idx, sample in enumerate(data):
        assigned[idx] = np.argmin(np.linalg.norm(sample - gb_centers, axis=1))
    return assigned

def fuzzy_similarity(t_data, sigma=0, k=2):
    n_samples, n_features = t_data.shape
    gb_list = [t_data]
    num = np.ceil(n_samples ** 0.5)

    while True:
        prev = len(gb_list)
        gb_list = splits(gb_list, num=num, k=k)
        if len(gb_list) == prev:
            break

    gb_centers = np.zeros((len(gb_list), n_features))
    for i, gb in enumerate(gb_list):
        gb_centers[i], _ = calculate_center_and_radius(gb)

    point_to_gb = assign_points_to_closest_gb(t_data, gb_centers)
    pt_centers = gb_centers[point_to_gb]
    """
    ##########################
    # 先计算出所有点对的距离
    distances = cdist(pt_centers, pt_centers)
    # 找到实际的最大距离
    max_dist = distances.max()

    # 防止所有点都重合导致max_dist为0的情况
    if max_dist > 0:
        sim = 1 - distances / max_dist
    else:
        # 如果所有点都一样，则它们完全相似
        sim = np.ones_like(distances)

    
    ##########################
    """
    
    
    ##########################
    distances = cdist(pt_centers, pt_centers)
    # 计算所有非零距离的平均值
    non_zero_distances = distances[distances > 0]
    if len(non_zero_distances) > 0:
        avg_dist = non_zero_distances.mean()
        # 这里通常不用减法，而是用高斯核的形式，但也可以尝试除法
        sim = 1 - distances / (2 * avg_dist) # 这里的2是可调参数
    else:
        sim = np.ones_like(distances)


    ##########################
      
    #sim = 1 - cdist(pt_centers, pt_centers) / 13
    sim[sim < sigma] = 0
    return sim


# --- MODIFIED FUNCTION START ---

def GBFRD(data, sigma=0, target_R=0.95):
    n, m = data.shape
    labels = data[:, -1]
    unique_labels = np.unique(labels)
    D = np.zeros((unique_labels.shape[0], n), dtype=int)
    for j, lv in enumerate(unique_labels):
        D[j] = (labels == lv).astype(int)

    LA = np.arange(m - 1)
    single_R = []
    for idx in LA:
        sim = fuzzy_similarity(data[:, [idx]], sigma, k=2)
        rel = 1 - sim

        # --- MODIFIED POS CALCULATION (PART 1) START ---
        # 1. 为每个决策类计算下近似，并存储起来
        num_classes = D.shape[0]
        all_lows = np.zeros((num_classes, n))
        for j in range(num_classes):
            Dj = D[j]
            low = np.maximum(rel, Dj).min(axis=1)
            all_lows[j, :] = low
        
        # 2. 对每个样本，找到它在所有类别中的最大下近似值
        pos_y = all_lows.max(axis=0)

        # 3. 将这些最大值求和得到总的正域
        POS = pos_y.sum()
        # --- MODIFIED POS CALCULATION (PART 1) END ---
        
        R = POS / n
        print(f"a{idx+1} 的 R = {R:.4f}")
        single_R.append((idx, R))

    base_idx, base_R = max(single_R, key=lambda x: x[1])
    current_combo = [base_idx]
    current_R = base_R
    print(f"\n初始基底: a{base_idx+1} (R = {base_R:.4f})")

    remaining = set(LA) - set(current_combo)
    while remaining and current_R < target_R:
        best_candidate, best_R = None, -1.0

        for idx in remaining:
            combo = current_combo + [idx]
            sim_c = fuzzy_similarity(data[:, combo], sigma, k=2)
            rel_c = 1 - sim_c

            # --- MODIFIED POS CALCULATION (PART 2) START ---
            num_classes = D.shape[0]
            all_lows_c = np.zeros((num_classes, n))
            for j in range(num_classes):
                Dj = D[j]
                low_c = np.maximum(rel_c, Dj).min(axis=1)
                all_lows_c[j, :] = low_c

            pos_y_c = all_lows_c.max(axis=0)
            POS_c = pos_y_c.sum()
            # --- MODIFIED POS CALCULATION (PART 2) END ---
            
            R_c = POS_c / n
            print(f"尝试组合 {''.join('a'+str(i+1) for i in combo)} 的 R = {R_c:.4f}")
            if R_c > best_R:
                best_candidate, best_R = idx, R_c

        if best_R <= current_R:
            print("无法进一步提升 R，提前终止。")
            break

        current_combo.append(best_candidate)
        remaining.remove(best_candidate)
        current_R = best_R
        combo_name = ''.join('a'+str(i+1) for i in current_combo)
        print(f"加入 a{best_candidate+1}，新组合 {combo_name} 的 R = {current_R:.4f}")

        if current_R >= target_R:
            print(f"依赖度已达到目标 R ≥ {target_R}，终止搜索。")
            break

    final_combo = ''.join('a'+str(i+1) for i in current_combo)
    print(f"\n最终组合: {final_combo}，依赖度 R = {current_R:.4f}")
    return current_combo, current_R

# --- MODIFIED FUNCTION END ---

from sklearn.preprocessing import MinMaxScaler

if __name__ == "__main__":
    # 1. 读取原始数据
    #df = pd.read_csv(r"Code\shuxueyuejian__DataSet\process_wine_1.csv",index_col=0)
    df = pd.read_csv(r"Code\shuxueyuejian__DataSet\lymphography_clasfiy.csv")

    #df = pd.read_csv(r"Code\shuxueyuejian__DataSet\WDBC.csv")
    # df = pd.read_csv(r"Code\shuxueyuejian__DataSet\leaf.csv")
    #df = pd.read_csv(r"Code\shuxueyuejian__DataSet\Sonar.csv")


 
    # 2. 分离特征与标签
    X = df.iloc[:, :-1].values     # 除最后一列外的所有列
    y = df.iloc[:,  -1].values     # 最后一列标签

    # 3. 对特征做最大-最小归一化
    scaler = MinMaxScaler()
    X_scaled = scaler.fit_transform(X)

    # 4. 重新拼回归一化后的数据矩阵（最后一列不变）
    data = np.hstack([X_scaled, y.reshape(-1, 1)])

    # 5. 调用 GBFRD 进行特征选择
    combo, R = GBFRD(data, sigma=0.8, target_R=0.99)

    # 6. 打印结果
    print("选中的属性索引（0-base）:", combo)
    print("最终依赖度 R:", R)