import numpy as np
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
from sklearn.cluster import k_means
from sklearn.preprocessing import MinMaxScaler
import pandas as pd

# --------------------------------------------------
# 全局工具
# --------------------------------------------------
GBi_dict = {}                      # 存储“球”到样本索引的映射

def calculate_center_and_radius(gb):
    center = gb.mean(axis=0)
    radius = np.max(np.linalg.norm(gb - center, axis=1))
    return center, radius

def splits_ball(gb, k):
    """对一个球用 K-means 再分 k 个小球"""
    unique = np.unique(gb, axis=0)
    k = min(k, unique.shape[0])
    centers, labels, _ = k_means(X=gb, n_clusters=k, n_init=1, random_state=8)
    return [gb[labels == lab] for lab in range(k)]

def splits(gb_list, num, k=2):
    """递归拆分：单球样本数 ≥ num 时再调用 splits_ball"""
    new_list = []
    for gb in gb_list:
        if gb.shape[0] < num:
            new_list.append(gb)
        else:
            new_list.extend(splits_ball(gb, k))
    return new_list

def assign_points_to_closest_gb(data, centers):
    idxs = [np.argmin(np.linalg.norm(p - centers, axis=1)) for p in data]
    return np.asarray(idxs, int)

def fuzzy_similarity(data, sigma=0, k=2):
    """返回：point_to_gb,  模糊相似性矩阵"""
    n, m = data.shape
    gb_list = [data]
    num = np.ceil(n**0.5)
    while True:
        prev = len(gb_list)
        gb_list = splits(gb_list, num, k)
        if prev == len(gb_list):
            break
    centers = np.vstack([calculate_center_and_radius(gb)[0] for gb in gb_list])
    point_to_gb = assign_points_to_closest_gb(data, centers)
    point_centers = centers[point_to_gb]
    sim = 1 - cdist(point_centers, point_centers) / m
    sim[sim < sigma] = 0
    return point_to_gb, sim

# --------------------------------------------------
# 建立 / 打印包含矩阵
# --------------------------------------------------
def GBi_number(attr_idx, labels):
    """把单属性划分结果写进全局 GBi_dict"""
    attr = f"a{attr_idx+1}"
    for i, lab in enumerate(np.unique(labels)):
        GBi_dict[f"GB[{attr},{i+1}]"] = {f"x{j+1}" for j in np.where(labels == lab)[0]}

def compute_membership_matrix(attr):
    """从 GBi_dict 读取指定属性的包含矩阵"""
    keys = sorted(
        [k for k in GBi_dict if k.startswith(f"GB[{attr},")],
        key=lambda k: int(k.split(',')[1][:-1])
    )
    if not keys:
        return np.empty((0, 0), int)
    max_pt = max(int(p[1:]) for k in keys for p in GBi_dict[k])
    mat = np.zeros((len(keys), max_pt), int)
    for r, k in enumerate(keys):
        for p in GBi_dict[k]:
            mat[r, int(p[1:]) - 1] = 1
    return mat

# --------------------------------------------------
# 依赖度工具
# --------------------------------------------------
def build_membership_from_labels(labels):
    """point_to_gb → one-hot 包含矩阵"""
    uniq = np.unique(labels)
    n = len(labels)
    mat = np.zeros((len(uniq), n), int)
    for r, lab in enumerate(uniq):
        mat[r, labels == lab] = 1
    return mat

def dependency_of_subset(data, cols, rel_PN, sigma=0):
    """给定属性子集 (cols) 计算依赖度"""
    n = data.shape[0]
    labels, _ = fuzzy_similarity(data[:, cols], sigma, k=2)
    membership = build_membership_from_labels(labels)
    dep_sum = 0.0
    for row in membership:
        low_appr = np.maximum(rel_PN, row).min(axis=1)
        dep_sum += low_appr.sum()
    return dep_sum / n            # 按题意除以样本数

# --------------------------------------------------
# 贪婪两级搜索：先单属性，再二元组合
# --------------------------------------------------
def greedy_two_level(data, sigma=0):
    n_attr = data.shape[1]
    _, sim_all = fuzzy_similarity(data, sigma, k=2)
    rel_PN = 1 - sim_all

    # ---- 单属性依赖度 ----
    dep_single = {}
    for j in range(n_attr):
        dep_single[j] = dependency_of_subset(data, [j], rel_PN, sigma)

    best_attr = max(dep_single, key=dep_single.get)
    print("\n单属性依赖度:")
    for j in range(n_attr):
        print(f"a{j+1}: {dep_single[j]}")
    print(f"\n选中最大属性  a{best_attr+1}  依赖度 = {dep_single[best_attr]}")

    # ---- 二元组合 (best_attr + others) ----
    dep_pair = {}
    for j in range(n_attr):
        if j == best_attr:
            continue
        pair = tuple(sorted((best_attr, j)))
        dep_pair[pair] = dependency_of_subset(data, list(pair), rel_PN, sigma)

    best_pair = max(dep_pair, key=dep_pair.get)
    a, b = best_pair
    print(f"\n最佳二元组  a{a+1}a{b+1}  依赖度 = {dep_pair[best_pair]}")

# --------------------------------------------------
# （可选）输出所有 GB_i 的包含矩阵
# --------------------------------------------------
def print_memberships():
    attrs = sorted({k.split(',')[0][3:] for k in GBi_dict})
    for attr in attrs:
        mat = compute_membership_matrix(attr)
        print(f"\n属性 {attr} 的包含矩阵:")
        if mat.size == 0:
            print("无数据")
        else:
            for row in mat:
                print(" ".join(map(str, row)))

# --------------------------------------------------
# main
# --------------------------------------------------
if __name__ == "__main__":
    # 1) 读取并归一化
    data = pd.read_csv("Code\guiyihua_test.csv").values
    ID = (data >= 1).all(axis=0) & (data.max(axis=0) != data.min(axis=0))
    if any(ID):
        data[:, ID] = MinMaxScaler().fit_transform(data[:, ID])

    sigma = 0.5

    # 2) 记录单属性 GB_i（仅为了后续打印／调试，可注释）
    for j in range(data.shape[1]):
        labs, _ = fuzzy_similarity(data[:, [j]], sigma, k=2)
        GBi_number(j, labs)

    # 3) 贪婪两级搜索
    print("\n========== 贪婪两级搜索 ==========")
    greedy_two_level(data, sigma)

    # 4) （可选）打印所有包含矩阵
    # print_memberships()
