import numpy as np
from scipy.spatial.distance import cdist
from sklearn.cluster import k_means
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import warnings
warnings.filterwarnings("ignore")



#依赖度逐渐递减趋于0，先选最大的单个属性依赖度为基底，然后组合属性依赖度也找最大的（整体趋势还是递减）

'''
单属性依赖度:
a1: 0.080347
a2: 0.145174
a3: 0.357167

基底 a3  δ=0.357167
  + a2  δ: 0.357167 → 0.267162  
  + a1  δ: 0.267162 → 0.258455  

========== 结果 ==========
最终特征子集: a1 a2 a3
组合依赖度  : 0.258455
'''

# -------------------------------------------------- fuzzy-similarity
def splits_ball(gb, k):
    uniq = np.unique(gb, axis=0)
    k = min(k, len(uniq))
    centers, labels, _ = k_means(gb, n_clusters=k, n_init=1, random_state=8)
    return [gb[labels == lab] for lab in range(k)]

def splits(gb_list, num, k=2):
    new = []
    for gb in gb_list:
        if gb.shape[0] < num:
            new.append(gb)
        else:
            new.extend(splits_ball(gb, k))
    return new

def assign_points(data, centers):
    return np.argmin(((data[:, None, :] - centers) ** 2).sum(2), axis=1)

def fuzzy_similarity(data, sigma=0, k=2):
    n, m = data.shape
    balls = [data]
    thr = np.ceil(n**0.5)
    while True:
        prev = len(balls)
        balls = splits(balls, thr, k)
        if len(balls) == prev:
            break
    centers = np.vstack([g.mean(0) for g in balls])
    labels = assign_points(data, centers)
    centroids = centers[labels]
    sim = 1 - cdist(centroids, centroids) / m
    sim[sim < sigma] = 0
    return labels, sim

# -------------------------------------------------- 依赖度
def membership_from_labels(labels):
    uniq = np.unique(labels)
    mat = np.zeros((len(uniq), len(labels)), int)
    for r, u in enumerate(uniq):
        mat[r, labels == u] = 1
    return mat

def dependency(data, cols, rel_PN, sigma=0):
    """Dep(S)；空集→0"""
    if len(cols) == 0:
        return 0.0
    n = len(data)
    labels, _ = fuzzy_similarity(data[:, cols], sigma, k=2)
    membership = membership_from_labels(labels)
    dep = 0.0
    for row in membership:
        low = np.maximum(rel_PN, row).min(1)
        dep += low.sum()
    return dep / n

# -------------------------------------------------- “非递增-最大化”循环
def nonincreasing_max_search(data, sigma=0,
                             epsilon_same=1e-6, patience=3):
    """
    每次选 δ 最小降幅（但 ≤ 上一步）的属性；连续 patience 轮无变化即停
    """
    m = data.shape[1]
    # 全属性下近似矩阵
    _, sim_all = fuzzy_similarity(data, sigma)
    rel_PN = 1 - sim_all
    delta = lambda cols: dependency(data, cols, rel_PN, sigma)

    # -------- 1) 单属性阶段 --------
    dep_single = [delta([j]) for j in range(m)]
    base = int(np.argmax(dep_single))
    S = {base}
    dep_prev = dep_single[base]

    print("单属性依赖度:")
    for j, v in enumerate(dep_single, 1):
        print(f"a{j}: {v:.6f}")
    print(f"\n基底 a{base+1}  δ={dep_prev:.6f}")

    # -------- 2) 逐步加入 --------
    remaining = set(range(m)) - S
    stagnate = 0

    while remaining and stagnate < patience:
        # 2.1 评估所有剩余属性
        cand = {}
        for j in remaining:
            dep_j = delta(sorted(S | {j}))
            if dep_j <= dep_prev + 1e-12:     # 确保非递增
                cand[j] = dep_j

        if not cand:            # 没有满足“≤ dep_prev”的属性 → 停
            print("\n无更多满足“依赖度非增”的属性，算法终止。")
            break

        # 2.2 取其中依赖度最大的（即减得最少的）
        best_j = max(cand, key=cand.get)
        dep_new = cand[best_j]
        diff = abs(dep_new - dep_prev)

        msg = "(无变化)" if diff < epsilon_same else ""
        print(f"  + a{best_j+1}  δ: {dep_prev:.6f} → {dep_new:.6f}  {msg}")

        # 更新
        S.add(best_j)
        remaining.remove(best_j)

        if diff < epsilon_same:
            stagnate += 1
        else:
            stagnate = 0
        dep_prev = dep_new

    return S, dep_prev

# -------------------------------------------------- main
if __name__ == "__main__":
    # 载入并归一化
    data = pd.read_csv("Code\guiyihua_test.csv").values
    idn = (data >= 1).all(0) & (data.max(0) != data.min(0))
    if any(idn):
        data[:, idn] = MinMaxScaler().fit_transform(data[:, idn])

    sigma = 0.5
    reduct, dep_final = nonincreasing_max_search(
        data, sigma, epsilon_same=1e-6, patience=3
    )

    print("\n========== 结果 ==========")
    feats = " ".join(f"a{i+1}" for i in sorted(reduct))
    print(f"最终特征子集: {feats}")
    print(f"组合依赖度  : {dep_final:.6f}")
