# 标准库
import os
from tqdm import tqdm  # 第三方进度条库
import numpy as np  # 数值计算库

# PyTorch 相关
import torch

# 自定义模块
from DDPM.Evaluation.Control import get_control_params
from DDPM.Evaluation.UMap import (
    load_feature_vectors_from_dead,
    load_feature_vectors,
    load_umap,
    reduce_feature_vectors,
    show_u_map_clusterd
)
from DDPM.Config import decode_result_save_path
from DEAD.AutoDecoder.Config import (
    hidden_size, 
    depth, 
    data_num, 
    latent_size, 
    model_load_path
)
from DDPM.Evaluation.InternalBallistic import convert_av1_to_value
from DEAD.AutoDecoder.Evaluation.Decoding import setup_dead_model
import DEAD.AutoDecoder.Evaluation.InternalBallistic as IB

# 聚类与性能评估相关
from sklearn.cluster import KMeans  # 聚类算法
from OPT.Objective import (  # 性能评估指标
    performance_matching_degree,
    decodeToApproximateBurningSurface,
    calculate_loading_fraction
)

def cal_r2_and_eta_in_samples(dead, index,npz_file, t_array_target,p_array_target,cstar,rho,n,At):
    with torch.no_grad():
        lv = torch.from_numpy(npz_file["lv"][index]).cuda().unsqueeze(0)
        gv = torch.from_numpy(npz_file["gv"][index]).cuda().unsqueeze(0)
        av1 = torch.from_numpy(npz_file["av1"][index]).cuda().unsqueeze(0)

    Ab_array_norm, w_array_norm, w_pred, _, _, _=\
    decodeToApproximateBurningSurface(dead, lv, gv, need_the_gradient_of_gradient=False)
    
    with torch.no_grad():
        r_ref, R = convert_av1_to_value(av1[0])
        t_array_designed, p_array_designed = IB.internal_ballistic_calculation_pytorch(
            w_array_norm=w_array_norm,
            Ab_array_norm=Ab_array_norm,
            cstar_rho=cstar*rho,
            r_ref=r_ref,
            n=n,
            R=R,
            At=At,
            verify_range=False)
        r2, r2_k, _, _=\
            performance_matching_degree(t_array_designed,
                                        p_array_designed,
                                        t_array_target,
                                        p_array_target)
        
        loading_fraction, _ = calculate_loading_fraction(w_pred,0)
        return r2, loading_fraction

def cluster(feature_vectors, num_clusters):
    # 初始化 KMeans 模型
    kmeans = KMeans(n_clusters=num_clusters, random_state=123, n_init='auto')

    # 对 feature_vectors 进行聚类
    kmeans.fit(feature_vectors)

    # 获取每个聚类的中心位置
    cluster_centers = kmeans.cluster_centers_

    # 获取每个样本所属的聚类标签
    labels = kmeans.labels_

    # 初始化一个字典来存储每个聚类中心最近的 feature_vector 的序号
    closest_indices = {}
    # 新增：初始化一个字典用于保存每个聚类的所有样本序号
    cluster_samples_dict = {}

    # 遍历每个聚类
    for i in range(num_clusters):
        # 找到属于当前聚类的所有样本
        cluster_samples = feature_vectors[labels == i]
        
        # 计算这些样本到聚类中心的距离
        distances = np.linalg.norm(cluster_samples - cluster_centers[i], axis=1)
        
        # 找到距离最小的样本序号
        closest_index = np.argmin(distances)
        
        # 获取该样本在原始 feature_vectors 中的序号
        original_index = np.where(labels == i)[0][closest_index]
        
        # 存储结果
        closest_indices[i] = original_index

        # 保存当前聚类的所有样本序号
        cluster_samples_dict[i] = np.where(labels == i)[0]


    # 将字典转换为数组
    closest_indices_array = np.array([closest_indices[i] for i in sorted(closest_indices.keys())])
    # 新增：将 cluster_samples_dict 转换为数组列表形式（可选）
    cluster_samples_index_array = [cluster_samples_dict[i] for i in sorted(cluster_samples_dict.keys())]

    # 输出距离每个聚类含有的样本数量
    for cluster_idx, indices in enumerate(cluster_samples_index_array):
        print(f"聚类 {cluster_idx} 的样本数量: {len(indices)}")

    return closest_indices_array, cluster_centers, cluster_samples_index_array

def start_post_processing(control_params, dead_model_load_path,need_performance_evaluation=True):
    print(f"##### Start post processing of {control_params['project_name']} #####")
    project_name = control_params["project_name"]
    cluster_num=control_params["cluster_num"]
    user_defined=control_params["user_defined"]

    #need_performance_evaluation=True # 是否需要性能评估,需要选True,仅供调试
    project_path=f"{decode_result_save_path}/{project_name}"
    #=====================计算生成Samples的嵌入向量========================
    print("正在计算Samples的嵌入向量...")
    sample_feature_vectors = load_feature_vectors(project_path,"Samples")
    umap_save_path = f"{dead_model_load_path}/umap_model.joblib"
    reducer, std = load_umap(umap_save_path)
    sample_embedding= reduce_feature_vectors(sample_feature_vectors, std, reducer)
    print("对Samples的嵌入向量开展聚类分析...")
    # 值得注意的是，聚类的对象是umap的embedding，而不是feature_vectors
    closest_indices,cluster_centers, cluster_samples_index_array=cluster(sample_feature_vectors, cluster_num)
    #closest_indices,cluster_centers, cluster_samples_index_array=cluster(sample_embedding, cluster_num)
    selected_indices=closest_indices

    #=====================计算DEAD模型的嵌入向量========================
    print("正在计算DEAD模型的嵌入向量...")
    #dead_model_load_path = model_load_path
    dead = setup_dead_model(hidden_size, depth, data_num,
                            latent_size, dead_model_load_path)
    feature_vectors, labels, dead_model_load_path, std=load_feature_vectors_from_dead()
    dead_embedding = reduce_feature_vectors(feature_vectors, std, reducer)

    best_r2_list=[]
    all_r2_list=[]
    loading_fraction_list = []
    #==========================开展性能评估==========================
    if need_performance_evaluation:
        npz_file = np.load(f"{project_path}/Samples.npz")
        t_array_target = npz_file["t_array_target"]
        p_array_target = npz_file["p_array_target"]
        cstar = npz_file["cstar"]
        rho = npz_file["rho"]
        n = npz_file["n"]
        At = npz_file["At"]

        t_array_target = torch.tensor(
            t_array_target, device="cuda", dtype=torch.float32)
        p_array_target = torch.tensor(
            p_array_target, device="cuda", dtype=torch.float32)
        cstar = torch.tensor(cstar, device="cuda", dtype=torch.float32)
        rho = torch.tensor(rho, device="cuda", dtype=torch.float32)
        n = torch.tensor(n, device="cuda", dtype=torch.float32)
        At = torch.tensor(At, device="cuda", dtype=torch.float32)

        best_indices = []

        for cluster_idx, indices in enumerate(cluster_samples_index_array):
            print(f"正在处理聚类 {cluster_idx}")
            best_i = -1
            best_r2 = -float('inf')
            loading_fraction =-float('inf')
            
            # 使用 tqdm 显示进度条
            for i in tqdm(indices, desc=f'聚类 {cluster_idx} 样本处理进度', leave=True):
                r2 , eta = cal_r2_and_eta_in_samples(dead, i, npz_file, t_array_target, p_array_target, cstar, rho, n, At)
                all_r2_list.append(r2.item())
                if r2 > best_r2:
                    best_r2 = r2
                    best_i = i
                    loading_fraction = eta

            best_indices.append(best_i)
            best_r2_list.append(best_r2.item())
            loading_fraction_list.append(loading_fraction.item())
            print(f"聚类 {cluster_idx} 中 r² 最高的样本序号: {best_i}, r² 得分: {best_r2:.4f}")
        best_indices = np.array(best_indices)
        best_r2_list = np.array(best_r2_list)
        loading_fraction_list = np.array(loading_fraction_list)
        
        #生成聚类精简后的样本
        selected_indices=best_indices

    npz_file = np.load(f"{project_path}/Samples.npz")
    lv_selected = npz_file["lv"][selected_indices]
    gv_selected = npz_file["gv"][selected_indices]
    av1_selected = npz_file["av1"][selected_indices]
    r_ref_list_selected = npz_file["r_ref"][selected_indices]
    R_list_selected= npz_file["R"][selected_indices]
    n= npz_file["n"]
    At= npz_file["At"]
    if user_defined == True:
        cstar= npz_file["cstar"]
        rho= npz_file["rho"]
        t_array_target= npz_file["t_array_target"]
        p_array_target= npz_file["p_array_target"]
    else:
        cstar_rho=npz_file["cstar_rho"]

    selected_feature_vectors = np.concatenate((lv_selected, gv_selected), axis=1)
    selected_sample_embedding= reduce_feature_vectors(selected_feature_vectors, std, reducer)

    show_u_map_clusterd(project_path, dead_embedding, sample_embedding, selected_sample_embedding)

    # 保存聚类精简后的样本到新的 npz 文件
    save_path = f"{project_path}/Samples_clustered.npz"
    if user_defined == True:
        np.savez_compressed(save_path,
                            lv=lv_selected,
                            gv=gv_selected,
                            av1=av1_selected,
                            r_ref=r_ref_list_selected,
                            R=R_list_selected,
                            cstar=cstar,
                            rho=rho,
                            n=n,
                            At=At,
                            t_array_target=t_array_target,
                            p_array_target=p_array_target,
                            umap_embedding=selected_sample_embedding,
                            best_r2_list=best_r2_list,
                            loading_fraction_list=loading_fraction_list)
    else:
        i_from_psfd=control_params["i_from_psfd"]
        np.savez_compressed(save_path,
                    lv=lv_selected,
                    gv=gv_selected,
                    av1=av1_selected,
                    r_ref=r_ref_list_selected,
                    R=R_list_selected,
                    cstar_rho=cstar_rho,
                    n=n,
                    At=At,
                    i_from_psfd=i_from_psfd,
                    umap_embedding=selected_sample_embedding,
                    best_r2_list=best_r2_list,
                    loading_fraction_list=loading_fraction_list)
    print(f"聚类精简后的样本已保存至 {save_path}")

    # 保存all_r2_list
    os.makedirs(f"{project_path}/analysis", exist_ok=True)
    np.savez_compressed(f"{project_path}/analysis/all_r2_list.npz",all_r2_list=all_r2_list)
    csv_file_path = f"{project_path}/analysis/all_r2_list.csv"
    with open(csv_file_path, 'w') as csv_file:
        csv_file.write("r2\n")
        for r2 in all_r2_list:
            csv_file.write(f"{r2}\n")

    print(f"生成结果的所有r2已保存至 {csv_file_path}")

if __name__ == "__main__":
    control_params=get_control_params(question_name="A恒面装药",
            postfix="_epoch50000",
            cluster_num=5,
            guid_w=0.5)
    start_post_processing(control_params=control_params,dead_model_load_path=model_load_path)

    