import os
import numpy as np
from DEAD.AutoDecoder.ParaviewMacro.ShowBS import show_all_BS
from DEAD.AutoDecoder.ParaviewMacro.ShowSR import show_all_SR
from DEAD.AutoDecoder.ParaviewMacro.ShowBSSR import show_all_BSSR
from DEAD.AutoDecoder.Evaluation.Decoding import *
from DDPM.Evaluation.Control import *

def decode_all(save_directory, npz_name, less_middle_results=False, need_show_all=True):
    from DEAD.AutoDecoder.Config import hidden_size, depth, data_num, latent_size, model_load_path
    dead_model_load_path = model_load_path
    os.makedirs(save_directory, exist_ok=True)
    dead = setup_dead_model(hidden_size, depth, data_num,
                            latent_size, dead_model_load_path)
    
    npz_file = np.load(f"{save_directory}/{npz_name}.npz")
    lvs = torch.from_numpy(npz_file["lv"]).cuda()
    gvs = torch.from_numpy(npz_file["gv"]).cuda()
    sample_num = len(lvs)

    # 创建一个进程池
    with concurrent.futures.ProcessPoolExecutor() as executor:
        # 创建一个future列表
        futures = []
        if less_middle_results:
            n=10
            index_range = np.linspace(0, sample_num-1, n, dtype=int).tolist()
        else:
            index_range=range(0, sample_num)

        for grain_index in index_range:
            lv= lvs[grain_index].unsqueeze(0) 
            gv= gvs[grain_index].unsqueeze(0) 
            gds, field_dict = decode_from_vector_to_field(dead, lv, gv)
            id = f"{grain_index}#N{gds.n_slots}L{round(gds.lz*1000)}M{round(gds.m*10)}"
            vts_path = f"{save_directory}/{id}"
            # 将输出vtk文件的步骤加入进程池，以实现并发提速
            futures.append(executor.submit(
                gds.save_as_vtk_in_cartesian, vts_path, field_dict))

    # 等待所有进程完成
    concurrent.futures.wait(futures)

    # 运行Paraview实现可视化
    if need_show_all:
        show_all_BSSR(save_directory)

def start_decoding(control_params):
    print(f"##### Start decoding of {control_params['project_name']} #####")
    from DDPM.Config import decode_result_save_path
    from DEAD.AutoDecoder.Evaluation.BurningSurface import burning_surface_all
    project_name=control_params["project_name"]
    decode_all(f"{decode_result_save_path}/{project_name}","Samples_clustered")
    burning_surface_all(f"{decode_result_save_path}/{project_name}")

if __name__ == '__main__':
    control_params=get_control_params(question_name="A恒面装药",
                postfix="_epoch50000",
                cluster_num=5,
                guid_w=0.5,
                seed=123)
    start_decoding(control_params)