import os
import torch
import torch.multiprocessing as mp
import numpy as np
from DEAD.AutoDecoder.Network import AutoDecoderNet
from DEAD.AutoDecoder.Generator.GDS import GDS
import concurrent.futures
from DEAD.AutoDecoder.ParaviewMacro.ShowBS import show_all_BS
from DEAD.AutoDecoder.ParaviewMacro.ShowSR import show_all_SR
from DEAD.AutoDecoder.ParaviewMacro.ShowBSSR import show_all_BSSR


def get_u_field(u_field):
    # 将pytorch tensor (GPU) 转换为numpy array (CPU)
    u_field = u_field.detach().cpu().numpy()
    return np.ascontiguousarray(u_field[0, :, :, :, 0])


def get_define_marker_field(xyz_field, lz, n_slots, m):
    x = xyz_field[:, :, :, :, 0].detach()
    y = xyz_field[:, :, :, :, 1].detach()
    z = xyz_field[:, :, :, :, 2].detach()
    h = torch.where(z < 1.0/m, 1.0/m-z, 0)  # 封头厚度
    h = torch.where(z > lz-1.0/m, z-(lz-1.0/m), h)  # 封头厚度
    slope = np.tan(np.pi/2-np.pi/n_slots)
    # 定义define_marker，大于0的部分表示装药定义域，否则不是定义域
    define_marker_rho = (1.0 - ((m*h)*(m*h)+x*x+y*y))   # 径向
    define_marker_rho = define_marker_rho.squeeze(0).cpu().numpy()
    define_marker_theta = (x - slope * y)  # 周向
    define_marker_theta = define_marker_theta.squeeze(0).cpu().numpy()
    return define_marker_rho, define_marker_theta


def setup_dead_model(hidden_size, depth, data_num, latent_size, model_load_path):
    # 定义神经网络
    dead = AutoDecoderNet(coordinate_size=3, hidden_size=hidden_size,
                          output_size=1, depth=depth, latent_vectors_num=data_num, latent_size=latent_size).cuda()
    dead.load_state_dict(torch.load(
        f"{model_load_path}/DEAD.pth", weights_only=True))
    dead.eval()
    return dead


def get_items_from_geometry_vector(dead, gv):
    # gv的维度为[1,3],其中gv[0][0]表示lz，gv[0][1]表示n_slots，gv[0][2]表示m
    return gv[0][0].item()/dead.geometry_vectors_scale, round(gv[0][1].item()/dead.geometry_vectors_scale), gv[0][2].item()/dead.geometry_vectors_scale

def get_pytorch_items_from_geometry_vector(dead, gv):
    # gv的维度为[1,3],其中gv[0][0]表示lz，gv[0][1]表示n_slots，gv[0][2]表示m
    return gv[0][0]/dead.geometry_vectors_scale, round(gv[0][1].item()/dead.geometry_vectors_scale), gv[0][2].item()/dead.geometry_vectors_scale

def get_vector_from_index(dead, grain_index):
    lv = dead.get_latent_vectors(grain_index).unsqueeze(0)  # 增加最外层一个维度，表示batch
    gv = dead.get_geometry_vectors(
        grain_index).unsqueeze(0)  # 增加最外层一个维度，表示batch
    return lv, gv


def decode_from_index_to_field(dead, grain_index):
    lv, gv = get_vector_from_index(dead, grain_index)
    print(F"decoding {grain_index}")
    return decode_from_vector_to_field(dead, lv, gv)


def decode_from_vector_to_field(dead, lv, gv):
    # 隐向量lv的维度为[1,latent_size]
    # 几何向量gv的维度为[1,3],其中gv[0][0]表示lz，gv[0][1]表示n_slots，gv[0][2]表示m
    # m是封头椭球系数，通常椭球为2.0，球形封头为1.0
    with torch.no_grad():
        lz, n_slots, m = get_items_from_geometry_vector(dead, gv)
        n_slots = np.round(np.clip(n_slots, 2, 15)) # 圆整
        m = np.clip(m, 1.0, 2.0)  # 将m限制在1.0到2.0之间

        gds = GDS(lx=1,
                  ly=1,
                  lz=lz,
                  nx_points=50,
                  ny_points=50,
                  nz_points=200,
                  n_slots=n_slots,
                  m=m,
                  rho_refine=1,
                  theta_refine=1,
                  zeta_refine=1)
        xyz_field = torch.from_numpy(gds.get_xyz_field()).cuda()
        xyz_field = xyz_field.unsqueeze(0)  # 增加最外层一个维度，表示batch
        u_field = dead.forward(xyz_field, lv, gv)
        define_marker_rho, define_marker_theta = get_define_marker_field(
            xyz_field, lz, n_slots, m)
        return gds, {"u": get_u_field(u_field), "define_marker_rho": define_marker_rho, "define_marker_theta": define_marker_theta}

def decode_and_output(save_directory, dead, grain_index):
    gds, field_dict = decode_from_index_to_field(dead, grain_index)
    id = f"{grain_index}#N{gds.n_slots}L{round(gds.lz*1000)}M{round(gds.m*10)}"
    vts_path = f"{save_directory}/{id}"
    gds.save_as_vtk_in_cartesian(vts_path, field_dict)

def decode_all(save_directory, need_show_all=True):
    from DEAD.AutoDecoder.Config import hidden_size, depth, data_num, latent_size, model_load_path
    os.makedirs(save_directory, exist_ok=True)
    dead = setup_dead_model(hidden_size, depth, data_num,
                            latent_size, model_load_path)

    # 创建一个进程池
    with concurrent.futures.ProcessPoolExecutor(max_workers=3) as executor:
        # 创建一个future列表
        futures = []

        for grain_index in range(0, data_num):
            # 将解码和输出vtk文件的步骤加入进程池，以实现并发提速
            futures.append(executor.submit(
                decode_and_output, save_directory, dead, grain_index))

    # 等待所有进程完成
    concurrent.futures.wait(futures)

    # 运行Paraview实现可视化
    if need_show_all:
        show_all_BSSR(save_directory)


if __name__ == '__main__':
    '''
    非常重要: 务必确保显存足够,否则输出vts文件会失败
    '''
    from DEAD.AutoDecoder.Config import decode_result_save_path
    mp.set_start_method('spawn')
    decode_all(f"{decode_result_save_path}")