import numpy as np
from scipy.spatial.distance import cdist
import warnings
warnings.filterwarnings("ignore")
import numpy as np
from sklearn.cluster import k_means
from scipy.spatial.distance import cdist
from sklearn.preprocessing import MinMaxScaler
import pandas as pd



def GBi_number(): #存储当前粒球的分类

    return 0

def calculate_center_and_radius(gb):
    data_no_label = gb[:,:]
    print("在calculate_center_and_radius函数中，原始数据（副本）:")
    print(data_no_label)
    center = data_no_label.mean(axis=0)
    print("数据的中心位置（center）:")
    print(center)
    radius = np.max((((data_no_label - center) ** 2).sum(axis=1) ** 0.5))
    print("数据的半径（radius）:")
    print(radius)
    return center, radius


def splits(gb_list, num, k=2):
    gb_list_new = []
    print("在splits函数中，输入的数据集列表（gb_list）:")
    print(gb_list)
    for gb in gb_list:
        p = gb.shape[0]
        print(f"当前数据集（gb）的形状，数据点数量: {p}")
        if p < num:
            gb_list_new.append(gb)
            print(f"数据点数量小于num，直接添加数据集: {gb}")
        else:
            gb_list_new.extend(splits_ball(gb, k))
    print("经过处理后，返回的数据集列表（gb_list_new）:")
    print(gb_list_new)
    return gb_list_new


def splits_ball(gb, k):
    ball_list = []
    len_no_label = np.unique(gb, axis=0)
    print("在splits_ball函数中，原始数据集（gb）:")
    print(gb)
    print("数据集中不重复的数据点（len_no_label）:")
    print(len_no_label)
    if len_no_label.shape[0] < k:
        k = len_no_label.shape[0]
        print(f"不重复数据点数量小于k，调整k的值为: {k}")
    label = k_means(X=gb, n_clusters=k, n_init=1, random_state=8)[1]
    print("每个数据点所属的聚类标签（label）:")
    print(label)
    for single_label in range(0, k):
        ball_list.append(gb[label == single_label, :])
        print(f"添加属于聚类标签 {single_label} 的数据点组成的数组到ball_list")
    print("经过划分后，返回的数据集部分组成的列表（ball_list）:")
    print(ball_list)
    return ball_list


def assign_points_to_closest_gb(data, gb_centers):
    assigned_gb_indices = np.zeros(data.shape[0])
    print("在assign_points_to_closest_gb函数中，输入的数据（data）:")
    print(data)
    print("输入的中心（gb_centers）:")
    print(gb_centers)
    for idx, sample in enumerate(data):
        t_idx = np.argmin(np.sqrt(np.sum((sample - gb_centers) ** 2, axis=1)))
        print(f"数据点 {sample} 距离最近的中心索引为: {t_idx}")
        assigned_gb_indices[idx] = t_idx
    print("分配完成后，返回的每个数据点所分配到的组的索引（assigned_gb_indices）:")
    print(assigned_gb_indices)
    return assigned_gb_indices.astype('int')


def fuzzy_similarity(t_data, sigma=0, k=2):
    t_n, t_m = t_data.shape
    gb_list = [t_data]
    print("在fuzzy_similarity函数中，初始数据集列表（gb_list）:")
    print(gb_list)
    num = np.ceil(t_n ** 0.5)
    print(f"计算得到的阈值（num）: {num}")
    while True:
        ball_number_1 = len(gb_list)
        print(f"当前循环开始时，gb_list的长度（ball_number_1）: {ball_number_1}")
        gb_list = splits(gb_list, num=num, k=k)
        ball_number_2 = len(gb_list)
        print(f"当前循环结束后，gb_list的长度（ball_number_2）: {ball_number_2}")
        if ball_number_1 == ball_number_2:
            break
    gb_center = np.zeros((len(gb_list), t_m))
    for idx, gb in enumerate(gb_list):
        gb_center[idx], _ = calculate_center_and_radius(gb)
        print(f"计算得到数据集 {gb} 的中心位置并存入gb_center")
    point_to_gb = assign_points_to_closest_gb(t_data, gb_center)
    print("每个数据点分配到的组的索引（point_to_gb）:")
    print(point_to_gb)
    point_center = np.zeros((t_n, t_m))
    for i in range(t_n):
        point_center[i] = gb_center[point_to_gb[i]]
        print(f"计算得到数据点 {i} 对应的中心位置并存入point_center")
    tp = 1 - cdist(point_center, point_center) / t_m
    print("未处理前的模糊相似性矩阵（tp）:")
    print(tp)
    tp[tp < sigma] = 0
    print("根据sigma处理后的模糊相似性矩阵（tp）:")
    print(tp)
    return tp


def GBFRD(data, sigma=0):
    n, m = data.shape
    LA = np.arange(m)
    
    print("在GBFRD函数中，输入的数据（data）:")
    print(data)
    print("特征索引数组（LA）:")
    print(LA)
    for idx1, l1 in enumerate(LA): #idx1是索引, l1索引对应那一列的值
        #Rb(xi,xj)
        rel_mat_k_l, ic = np.unique(fuzzy_similarity(data[:,[l1]], sigma, k=2), axis=0, return_inverse=True)
        #修改一下
        xiugai_juzhen = fuzzy_similarity(data[:,[l1]], sigma, k=2) #不对矩阵进行去重处理
        #修改一下
        print("与特征l1相关的模糊相似性矩阵（rel_mat_k_l）:")
        print(rel_mat_k_l)
        #n_items = rel_mat_k_l.shape[0]
        n_items = xiugai_juzhen.shape[0]
        A_d1 = np.setdiff1d(LA, l1)

        rel_mat_P_1 = fuzzy_similarity(data[:,A_d1], sigma, k=2) #Rs(xi,xj)
        print("与其他特征相关的模糊相似性矩阵（rel_mat_P_1）:")
        print(rel_mat_P_1)
        rel_mat_P = rel_mat_P_1     #上近似
        rel_mat_P_N = 1 - rel_mat_P #下近似
        for i in range(n_items):
            #i_tem = np.where(ic == i)[0]
            #rel_mat_B = rel_mat_k_l[i]
            rel_mat_B = xiugai_juzhen[i]
            
            low_appr = np.maximum(rel_mat_P_N, rel_mat_B).min(axis=1)
            #up_appr = np.minimum(rel_mat_P, rel_mat_B).max(axis=1)
            print(f"对于rel_mat_k_l中的元素 {i}，下近似相关计算结果（low_appr）: {low_appr}")
            #print(f"对于rel_mat_k_l中的元素 {i}，上近似相关计算结果（up_appr）: {up_appr}")  
    
    return 0

if __name__ == '__main__':
    data = pd.read_csv("Code\guiyihua_test.csv").values
    #data = pd.read_csv(r"F:\0815\code\important\GBFRD-main\Code\Example1.csv").values
   
    ID = (data >= 1).all(axis=0) & (data.max(axis=0) != data.min(axis=0))  #选择归一化的列
    # print(ID) #
    scaler = MinMaxScaler()
    if any(ID):
        data[:, ID] = scaler.fit_transform(data[:, ID])
    sigma = 0.5
    print(data)
    out_factors = GBFRD(data, sigma)
    # print("out_factor")
   