import numpy as np
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
from sklearn.cluster import k_means
from sklearn.preprocessing import MinMaxScaler
import pandas as pd

GBi_dict = {}

def GBi_number(idx1, point_to_gb, attr_name=None):
    global GBi_dict
    if attr_name is None:
        attr_name = f"a{idx1 + 1}"
    unique_labels = np.unique(point_to_gb)
    for i, label in enumerate(unique_labels):
        indices = np.where(point_to_gb == label)[0]
        GBi_dict[f"GB[{attr_name},{i + 1}]"] = {f"x{idx+1}" for idx in indices}
    # 仅打印划分结果
    print(f"属性 {attr_name} 的 point_to_gb 已存储: {point_to_gb}")

def calculate_center_and_radius(gb):
    center = gb.mean(axis=0)
    radius = np.max(np.linalg.norm(gb - center, axis=1))
    return center, radius

def splits(gb_list, num, k=2):
    gb_list_new = []
    for gb in gb_list:
        if gb.shape[0] < num:
            gb_list_new.append(gb)
        else:
            gb_list_new.extend(splits_ball(gb, k))
    return gb_list_new

def splits_ball(gb, k):
    unique_data = np.unique(gb, axis=0)
    if unique_data.shape[0] < k:
        k = unique_data.shape[0]
    centers, labels, _ = k_means(X=gb, n_clusters=k, n_init=1, random_state=8)
    return [gb[labels == lab] for lab in range(k)]

def assign_points_to_closest_gb(data, gb_centers):
    assigned = np.zeros(data.shape[0], dtype=int)
    for idx, sample in enumerate(data):
        assigned[idx] = np.argmin(np.linalg.norm(sample - gb_centers, axis=1))
    return assigned

def fuzzy_similarity(t_data, sigma=0, k=2):
    t_n, t_m = t_data.shape
    gb_list = [t_data]
    num = np.ceil(t_n ** 0.5)
    while True:
        prev = len(gb_list)
        gb_list = splits(gb_list, num=num, k=k)
        if prev == len(gb_list):
            break

    gb_centers = np.zeros((len(gb_list), t_m))
    for i, gb in enumerate(gb_list):
        gb_centers[i], _ = calculate_center_and_radius(gb)

    point_to_gb = assign_points_to_closest_gb(t_data, gb_centers)
    pt_centers = gb_centers[point_to_gb]
    sim = 1 - cdist(pt_centers, pt_centers) / t_m #t_m
    sim[sim < sigma] = 0
    return point_to_gb, sim

def compute_membership_matrix(GBi_dict, attr):
    keys = {k: v for k, v in GBi_dict.items() if k.startswith(f"GB[{attr},")}
    if not keys:
        return np.empty((0, 0))
    sorted_keys = sorted(keys.keys(),
                         key=lambda k: int(k.split(",")[1].rstrip("]")))
    pts = {int(x[1:]) for key in sorted_keys for x in GBi_dict[key]}
    n_pts = max(pts)
    mat = np.zeros((len(sorted_keys), n_pts), dtype=int)
    for i, key in enumerate(sorted_keys):
        for x in GBi_dict[key]:
            mat[i, int(x[1:]) - 1] = 1
    return mat

def GBFRD(data, sigma=0, target_R=0.95):
    """
    Forward Selection 版近似依赖度计算：
      data       -- n×m 的 numpy 数组
      sigma      -- 相似度阈值
      target_R   -- 依赖度达到多少就停止（默认 0.95）
    """
    n, m = data.shape
    LA = np.arange(m)

    # 1. 预计算整体属性的 GBi 与 membership_all
    all_attr = "".join(f"a{j+1}" for j in LA)
    pt2gb_all, _ = fuzzy_similarity(data[:, LA], sigma, k=2)
    GBi_number(0, pt2gb_all, all_attr)
    membership_all = compute_membership_matrix(GBi_dict, all_attr)

    # 2. 计算每个单属性的依赖度，选出最强基底
    single_R = []
    for idx in LA:
        attr = f"a{idx+1}"
        pt2gb, sim = fuzzy_similarity(data[:, [idx]], sigma, k=2)
        GBi_number(idx, pt2gb, attr)

        rel = 1 - sim
        POS = 0.0
        for b in range(membership_all.shape[0]):
            low = np.maximum(rel, membership_all[b]).min(axis=1)
            POS += low.sum()
        R = POS / n
        print(f"{attr} 的 R = {R:.4f}")
        single_R.append((idx, R))

    base_idx, base_R = max(single_R, key=lambda x: x[1])
    current_combo = [base_idx]
    current_R = base_R
    print(f"\n初始基底: a{base_idx+1} (R = {base_R:.4f})")

    # 3. Forward Selection 不断加入新属性
    remaining = set(LA) - set(current_combo)
    while remaining and current_R < target_R:
        best_candidate, best_R = None, -1.0

        # 尝试加入每个候选属性，看看能不能提升最佳 R
        for idx in remaining:
            combo = current_combo + [idx]
            combo_name = "".join(f"a{i+1}" for i in combo)

            pt2gb_c, sim_c = fuzzy_similarity(data[:, combo], sigma, k=2)
            GBi_number(combo[0], pt2gb_c, combo_name)

            rel_c = 1 - sim_c
            POS_c = 0.0
            for b in range(membership_all.shape[0]):
                low = np.maximum(rel_c, membership_all[b]).min(axis=1)
                POS_c += low.sum()
            R_c = POS_c / n

            print(f"尝试组合 {combo_name} 的 R = {R_c:.4f}")
            if R_c > best_R:
                best_candidate, best_R = idx, R_c

        # 如果最优候选也没提升，就停
        if best_R <= current_R:
            print("无法进一步提升 R，提前终止。")
            break

        # 否则将它加入 current_combo
        current_combo.append(best_candidate)
        remaining.remove(best_candidate)
        current_R = best_R
        print(f"加入 a{best_candidate+1}，新组合 {' '.join('a'+str(i+1) for i in current_combo)} 的 R = {current_R:.4f}")

        # 如果已经达到目标 R，就停
        if current_R >= target_R:
            print(f"依赖度已达到目标 R ≥ {target_R}，终止搜索。")
            break

    final_name = "".join(f"a{i+1}" for i in current_combo)
    print(f"\n最终组合: {final_name}，依赖度 R = {current_R:.4f}")
    return current_combo, current_R


if __name__ == "__main__":
    #df = pd.read_csv(r"Code\processed_wine.csv",index_col=0)
    df = pd.read_csv(r"Code\Example3wrong.csv")
    data = df.iloc[:, :-1].values
    #data = df.values
    # mask = (data >= 1).all(axis=0) & (data.max(axis=0) != data.min(axis=0))
    # data[:, mask] = MinMaxScaler().fit_transform(data[:, mask])

    # sigma = 0.5，目标依赖度 0.9
    combo, R = GBFRD(data, sigma=0.5, target_R=0.7)