import numpy as np
from scipy.spatial.distance import cdist
from sklearn.cluster import k_means
from sklearn.preprocessing import MinMaxScaler
import pandas as pd
import warnings
warnings.filterwarnings("ignore")

# 单个属性的模糊关系和对立属性的隶属矩阵（最终进阶版），连续n次无明显变化，就停止找寻元素
"""
基底 = a6  依赖度 0.002150

起始基底 a6  δ=0.002150
 + a10 -> δ=0.002777   组合: a6 a10
 + a4 -> δ=0.004215   组合: a6 a10 a4
依赖度不再显著提升，停止扩展。

=== 搜索结束 ===
a6                   δ=0.002150
a6 a10               δ=0.002777
a6 a10 a4            δ=0.004215
"""

# -------------------------------------------------- global dict
GBi_dict = {}

# -------------------------------------------------- 1) 分球 + 相似度
def splits_ball(gb, k):
    uniq = np.unique(gb, axis=0)
    k = min(k, len(uniq))
    centers, labels, _ = k_means(gb, n_clusters=k, n_init=1, random_state=8)
    return [gb[labels == lab] for lab in range(k)]

def splits(gb_list, num, k=2):
    new = []
    for gb in gb_list:
        if gb.shape[0] < num:
            new.append(gb)
        else:
            new.extend(splits_ball(gb, k))
    return new

def assign_points(data, centers):
    return np.argmin(((data[:, None, :] - centers) ** 2).sum(2), axis=1)

def fuzzy_similarity(data, sigma=0, k=2):
    """return labels, similarity-matrix"""
    n, m = data.shape
    balls = [data]
    thr = np.ceil(n ** 0.5)
    while True:
        prev = len(balls)
        balls = splits(balls, thr, k)
        if len(balls) == prev:
            break
    centers = np.vstack([g.mean(0) for g in balls])
    labels = assign_points(data, centers)
    centroids = centers[labels]
    sim = 1 - cdist(centroids, centroids) / m
    sim[sim < sigma] = 0
    return labels, sim



# -------------------------------------------------- 通用前向搜索：最多 k 元组
def forward_up_to_k(data, sigma=0, epsilon=1e-6, max_size=5):
    """
    1) 先取单属性最大依赖度作基底
    2) 每轮把剩余属性各加进去，取依赖度最大的那一个
       仅当依赖度提升 > epsilon 时才接受
    3) 最多扩展到 max_size 个属性
    返回  S (索引列表)  和  history [(S,δ), ...]
    """
    m = data.shape[1]

    # -------- 起始基底 ----------
    single_dep = [dependency_subset(data, [j], sigma) for j in range(m)]
    S = [int(np.argmax(single_dep))]
    dep_now = single_dep[S[0]]
    history = [(S.copy(), dep_now)]
    print(f"\n起始基底 a{S[0]+1}  δ={dep_now:.6f}")

    remaining = set(range(m)) - set(S)

    # -------- 逐级扩展 ----------
    while len(S) < max_size and remaining:
        best_attr, best_dep = None, -np.inf
        for j in remaining:
            dep = dependency_subset(data, S + [j], sigma)
            if dep > best_dep:
                best_attr, best_dep = j, dep

        if best_dep - dep_now <= epsilon:
            print("依赖度不再显著提升，停止扩展。")
            break

        # 接受
        S.append(best_attr)
        dep_now = best_dep
        history.append((S.copy(), dep_now))
        remaining.remove(best_attr)
        tags = " ".join(f"a{x+1}" for x in S)
        print(f" + a{best_attr+1} -> δ={dep_now:.6f}   组合: {tags}")

    return S, history

# -------------------------------------------------- 2) GBi_number ——完全字符串化
def GBi_number(labels, attr_tag):                       #  ← NEW：删 idx1, 只要 tag
    uniq = np.unique(labels)
    for i, lab in enumerate(uniq):
        idx = np.where(labels == lab)[0]
        GBi_dict[f"GB[{attr_tag},{i+1}]"] = {f"x{j+1}" for j in idx}
    # 可注释掉下面这行避免冗长输出
    # print(f"存储 {attr_tag}: {labels}")

# -------------------------------------------------- 3) 生成包含矩阵（通用）
def compute_membership_matrix(attr_tag):
    keys = [k for k in GBi_dict if k.startswith(f"GB[{attr_tag},")]
    if not keys:
        return np.empty((0, 0), int)
    keys = sorted(keys, key=lambda k: int(k.split(',')[1][:-1]))
    max_pt = max(int(p[1:]) for k in keys for p in GBi_dict[k])
    mat = np.zeros((len(keys), max_pt), int)
    for r, k in enumerate(keys):
        for p in GBi_dict[k]:
            mat[r, int(p[1:]) - 1] = 1
    return mat

def print_membership(attr_tag):
    mat = compute_membership_matrix(attr_tag)
    print(f"\n包含矩阵 {attr_tag}:")
    if mat.size == 0:
        print("无数据")
    else:
        for row in mat:
            print(" ".join(map(str, row)))

# -------------------------------------------------- 4) 通用依赖度 δ(S)
def dependency_subset(data, subset_cols, sigma=0):
    """
    计算 δ(S)；S 由列索引 list 指定。
    rel  = 1 - sim(S)
    mem  = 基于 complement 的包含矩阵
    """
    n = data.shape[0]
    all_cols = set(range(data.shape[1]))
    rest_cols = sorted(all_cols - set(subset_cols))

    # ---- rel 部分
    _, sim_S = fuzzy_similarity(data[:, subset_cols], sigma)
    rel = 1 - sim_S

    # ---- membership 部分
    if rest_cols:
        tag_rest = "".join(f"a{c+1}" for c in rest_cols)
        # 若已算过 complement 的划分就直接用；否则先算
        if f"GB[{tag_rest},1]" not in GBi_dict:
            lab_rest, _ = fuzzy_similarity(data[:, rest_cols], sigma)
            GBi_number(lab_rest, tag_rest)
        membership = compute_membership_matrix(tag_rest)
    else:                                    # S 已经是全集
        membership = np.zeros((1, n), int)

    dep_sum = 0.0
    for row in membership:
        low = np.maximum(rel, row).min(1)
        dep_sum += low.sum()
    return dep_sum / n

# -------------------------------------------------- main demo
if __name__ == "__main__":
    # 读取并归一化
    #df = pd.read_csv("cleaned_data123.csv")

    df = pd.read_csv("Datasets\horse_1_12_variant1.csv")
    df = df.iloc[:, :-1]  #删除最后一列异常标签列！！！！！！！！！！！！！！！！

    data = df.values
    idn = (data >= 1).all(0) & (data.max(0) != data.min(0))
    if any(idn):
        data[:, idn] = MinMaxScaler().fit_transform(data[:, idn])

    sigma = 0.5
    m = data.shape[1]

    # ---------- 单属性依赖度 ----------
    single_dep = [dependency_subset(data, [j], sigma) for j in range(m)]
    for j, d in enumerate(single_dep, 1):
        print(f"δ(a{j}) = {d:.6f}")

    # ---------- 最大基底 ----------
    base = int(np.argmax(single_dep))
    print(f"\n基底 = a{base+1}  依赖度 {single_dep[base]:.6f}")

    # # ---------- 打印所有二元组合依赖度 ----------
    # for j in range(m):
    #     if j == base:
    #         continue
    #     dep = dependency_subset(data, [base, j], sigma)
    #     print(f"δ(a{base+1}a{j+1}) = {dep:.6f}")

    # ---------- 示范：任意子集 a1,a2,a4 的依赖度 ----------
    # subset = [0, 1]                        # 0-based 列索引
    # tag = " ".join(f"a{i+1}" for i in subset)
    # print(f"\nδ({tag}) = {dependency_subset(data, subset, sigma):.6f}")


    S_final, hist = forward_up_to_k(data, sigma,epsilon=0,max_size=10)

    print("\n=== 搜索结束 ===")
    for combo, dep in hist:
        tag = " ".join(f"a{i+1}" for i in combo)
        print(f"{tag:<20} δ={dep:.6f}")