import numpy as np
import pickle
from scipy.stats import gaussian_kde
from sklearn.preprocessing import StandardScaler
from sklearn.cluster import KMeans
from scipy.spatial.distance import cdist
import matplotlib.pyplot as plt


def fit_kde_and_compute_cdf(distances):
    # 拟合KDE
    kde = gaussian_kde(distances)

    # 定义计算CDF的函数
    def compute_cdf(x):
        return kde.integrate_box_1d(-np.inf, x)

    return compute_cdf, kde

def compute_cdf(kde):
    # 定义计算CDF的函数
    def compute_cdf(x):
        return kde.integrate_box_1d(-np.inf, x)
    return compute_cdf


def save_kernels(kernels, filename):
    """保存KDE模型"""
    with open(filename, 'wb') as file:
        pickle.dump(kernels, file)


def load_kernels(filename):
    """加载KDE模型"""
    with open(filename, 'rb') as file:
        kernels = pickle.load(file)
    return kernels


def compute_distance_cdfs(class_distances):
    """为每个类别拟合KDE并计算CDF函数"""
    cdfs = []
    kernels = []

    if len(class_distances) == 1:
        class_distances = np.array(class_distances, dtype=np.float32)
        class_distances = np.nan_to_num(class_distances)
        compute_cdf_func, kernel = fit_kde_and_compute_cdf(class_distances)
        cdfs = compute_cdf_func
        kernels.append(kernel)
    else:
        for dists in class_distances:
            dists = np.array(dists, dtype=np.float32)
            dists = np.nan_to_num(dists)
            compute_cdf_func, kernel = fit_kde_and_compute_cdf(dists)
            cdfs.append(compute_cdf_func)
            kernels.append(kernel)

    return cdfs, kernels


def compute_class_centers_and_distances(pca_data, labels):
    # 确保 labels 是 NumPy 数组
    labels = np.array(labels)
    pca_data = np.array(pca_data)

    # 获取所有唯一的类别标签
    unique_labels = np.unique(labels)

    # 初始化中心和距离
    centers = []
    distances = []

    # 对于每个类别
    for label in unique_labels:
        # 获取属于该类别的样本
        class_indices = np.where(labels == label)[0]
        class_data = pca_data[class_indices]

        center = np.mean(class_data, axis=0)
        centers.append(center)

        dists = cdist(class_data, [center], 'euclidean').flatten()
        distances.append(dists)

    return centers, distances


def adjust_softmax_scores_with_cdfs_kernel(test_distances, kernels, softmax_scores, alpha=1, shift_distance=0):
    """根据测试样本到各中心的距离和CDF函数调整softmax得分，并可选地平移距离"""
    test_distances_shifted = test_distances - shift_distance
    adjusted_scores = softmax_scores.copy()

    class_cdfs = [compute_cdf(kernel) for kernel in kernels]

    if test_distances.shape[1] == 1:
        # 使用生存函数代替CDF，注意现在使用的是平移后的距离
        survival_probs = 1 - class_cdfs(test_distances_shifted)
        adjusted_scores *= survival_probs * alpha

        # 计算未知类的概率
        unknown_class_prob = np.sum(softmax_scores - adjusted_scores, axis=1)
        adjusted_scores = np.hstack((adjusted_scores, unknown_class_prob[:, np.newaxis]))

        # 归一化
        adjusted_scores /= adjusted_scores.sum(axis=1)[:, np.newaxis]
    else:
        adjusted_scores = np.array(adjusted_scores)
        softmax_scores = np.array(softmax_scores)
        for i, cdf in enumerate(class_cdfs):
            # 使用生存函数代替CDF，注意现在使用的是平移后的距离
            ccdf = lambda x: 1 - cdf(x)
            survival_probs = np.array([ccdf(point) for point in test_distances_shifted[:, i]])
            adjusted_scores[:, i] *= (survival_probs + alpha)/2

        # 计算未知类的概率
        unknown_class_prob = softmax_scores.sum(axis=1) - adjusted_scores.sum(axis=1)
        adjusted_scores = np.hstack((adjusted_scores, unknown_class_prob[:, np.newaxis]))

        # 归一化
        adjusted_scores /= adjusted_scores.sum(axis=1)[:, np.newaxis]
    return adjusted_scores


def process_scores_renew(predict_labels, X_pca, softmax_scores, kwn, kernels, centers, alpha=1, dis_shifted=0, shift_ratio=1):
    unknown_pca = [X_pca[i] for i, label in enumerate(predict_labels) if label == '-1']
    unknown_label = [predict_labels[i] for i, label in enumerate(predict_labels) if label == '-1']
    known_pca = [X_pca[i] for i, label in enumerate(predict_labels) if label != '-1']
    known_label = [predict_labels[i] for i, label in enumerate(predict_labels) if label != '-1']
    known_scores = [softmax_scores[i] for i, label in enumerate(predict_labels) if label != '-1']

    new_distances = cdist(known_pca, centers, 'euclidean')
    adjust_scores = adjust_softmax_scores_with_cdfs_kernel(new_distances, kernels, known_scores, alpha, dis_shifted)

    predict_labels_updated = np.argmax(adjust_scores, axis=1)

    label_mapping_end = {}
    for k, label in enumerate(kwn + ['-1']):
        label_mapping_end[int(k)] = label
    kwn_predict_labels = [label_mapping_end[int(class_id)] for class_id in predict_labels_updated]
    predict_update = predict_labels
    count = 0
    for i, label in enumerate(predict_labels):
        if label != '-1':
            predict_update[i] = kwn_predict_labels[count]
            count += 1

    return adjust_scores, predict_update