import numpy as np
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
from sklearn.cluster import k_means
from sklearn.preprocessing import MinMaxScaler
import pandas as pd

def check():

    return True

GBi_dict = {}

def GBi_number(idx1, point_to_gb, attr_name=None):

    global GBi_dict

    if attr_name is None:

        attr_name = f"a{idx1 + 1}"

    unique_labels = np.unique(point_to_gb)

    for i, label in enumerate(unique_labels):

        indices = np.where(point_to_gb == label)[0]

        GBi_dict[f"GB[{attr_name},{i + 1}]"] = {f"x{idx+1}" for idx in indices}

    print(f"属性 {attr_name} 的 point_to_gb 已存储: {point_to_gb}")

def calculate_center_and_radius(gb):

    data_no_label = gb[:, :]

    center = data_no_label.mean(axis=0)

    radius = np.max(np.linalg.norm(data_no_label - center, axis=1))

    return center, radius

def splits(gb_list, num, k=2):

    gb_list_new = []

    for gb in gb_list:

        if gb.shape[0] < num:

            gb_list_new.append(gb)

        else:

            gb_list_new.extend(splits_ball(gb, k))

    return gb_list_new

def splits_ball(gb, k):

    unique_data = np.unique(gb, axis=0)

    if unique_data.shape[0] < k:

        k = unique_data.shape[0]

    centers, labels, _ = k_means(X=gb, n_clusters=k, n_init=1, random_state=8)

    return [gb[labels == lab] for lab in range(k)]

def assign_points_to_closest_gb(data, gb_centers):

    assigned = np.zeros(data.shape[0], dtype=int)

    for idx, sample in enumerate(data):

        assigned[idx] = np.argmin(np.linalg.norm(sample - gb_centers, axis=1))

    return assigned

def fuzzy_similarity(t_data, sigma=0, k=2):

    t_n, t_m = t_data.shape

    gb_list = [t_data]

    num = np.ceil(t_n ** 0.5)

    while True:

        prev = len(gb_list)

        gb_list = splits(gb_list, num=num, k=k)

        if prev == len(gb_list):

            break

    gb_center = np.zeros((len(gb_list), t_m))

    for i, gb in enumerate(gb_list):

        gbcenter[i], = calculate_center_and_radius(gb)

    point_to_gb = assign_points_to_closest_gb(t_data, gb_center)

    point_center = gb_center[point_to_gb]

    tp = 1 - cdist(point_center, point_center) / t_m

    tp[tp < sigma] = 0

    return point_to_gb, tp

def compute_membership_matrix(GBi_dict, attr):

    keys = {key: val for key, val in GBi_dict.items() if key.startswith(f"GB[{attr},")}

    if not keys:

        return np.array([[]])

    sorted_keys = sorted(

        keys.keys(),

        key=lambda k: int(k.split(",")[1].rstrip("]"))

    )

    all_points = {

        int(pt[1:])

        for key in sorted_keys

        for pt in GBi_dict[key]

        if pt.startswith("x")

    }

    if not all_points:

        return np.array([[]])

    n_points = max(all_points)

    mat = np.zeros((len(sorted_keys), n_points), dtype=int)

    for i, key in enumerate(sorted_keys):

        for pt in GBi_dict[key]:

            idx = int(pt[1:]) - 1

            mat[i, idx] = 1

    return mat

def print_membership_matrices(GBi_dict, attrs=["a1", "a2", "a3"]):

    for attr in attrs:

        mat = compute_membership_matrix(GBi_dict, attr)

        print(f"\n属性 {attr} 的包含矩阵:")

        if mat.size == 0:

            print("无数据")

        else:

            for row in mat:

                print(" ".join(str(x) for x in row))

def GBFRD(data, sigma=0):

    n, m = data.shape

    LA = np.arange(m)

    # —— 先对“全属性”做一次划分并存储 GBi ——

    all_attr_name = "".join(f"a{j+1}" for j in LA)        # e.g. "a1a2a3"

    pt2gball,    = fuzzy_similarity(data[:, LA], sigma, k=2)

    GBi_number(0, pt2gb_all, all_attr_name)

    # 生成全属性的包含矩阵，只算一次

    membership_matrix_all = compute_membership_matrix(GBi_dict, all_attr_name)

    for idx1, l1 in enumerate(LA):

        attr_name = f"a{l1+1}"

        print(f"\n处理属性 {attr_name}:")

        # 1) 单属性划分、存储 GBi

        pt2gb_single, sim_single = fuzzy_similarity(data[:, [l1]], sigma, k=2)

        GBi_number(idx1, pt2gb_single, attr_name)

        # 2) rel_mat_P_N 依旧用单属性相似度计算

        rel_mat_P_N = 1 - sim_single

        # 3) 用“全属性”的包含矩阵计算 low_appr，并累加求 POS

        POS = 0.0

        for ball_idx in range(membership_matrix_all.shape[0]):

            low_appr = np.maximum(

                rel_mat_P_N,

                membership_matrix_all[ball_idx, :]

            ).min(axis=1)

            print(f"全属性 {all_attr_name}，球 {ball_idx+1} 的 low_appr: {low_appr}")

            POS += low_appr.sum()

        # 4) 依赖度 R = POS / 样本总数 n

        R = POS / n

        print(f"{attr_name} 的 POS = {POS:.4f}")

        print(f"{attr_name} 的依赖度 R = {R:.4f}")

    return 0

if name == 'main':

    data = pd.read_csv("Code/guiyihua_test.csv").values

    ID = (data >= 1).all(axis=0) & (data.max(axis=0) != data.min(axis=0))

    if any(ID):

        data[:, ID] = MinMaxScaler().fit_transform(data[:, ID])

    sigma = 0.5

    GBFRD(data, sigma)

    print("\n最终存储的 GBi_dict:")

    for k, v in GBi_dict.items():

        print(f"{k} = {v}")

    print_membership_matrices(GBi_dict, attrs=["a1", "a2", "a3"])

    