import os
from DEAD.AutoDecoder.Evaluation.Decoding import setup_dead_model, decode_from_vector_to_field, get_vector_from_index
import concurrent.futures
from DEAD.AutoDecoder.ParaviewMacro.ShowBS import show_all_BS
from DEAD.AutoDecoder.ParaviewMacro.ShowSR import show_all_SR
from DEAD.AutoDecoder.ParaviewMacro.ShowBSSR import show_all_BSSR
import numpy as np
import torch
import matplotlib.pyplot as plt
from DEAD.Gradiant.AutoGrad import get_vector_field_component as gvc
from DEAD.Gradiant.AutoGrad import auto_grad
from PIL import Image
import re
import matplotlib.patheffects as path_effects
import csv


def generate_point_cloud(num_points, lz, n_slots, m, device="cuda", seed=42, max_batch_size=10000):
    """
    使用 CUDA 加速生成满足条件的点云
    Args:
        num_points (int): 目标点数
        lz (float): Z 轴范围 [0, lz]
        n_slots (int): 槽数（用于计算斜率）
        m (float): 参数（用于计算 h）
        device (str): "cuda" 或 "cpu"
    Returns:
        torch.Tensor: (num_points, 3) 的点云
    """
    # 固定所有随机种子
    torch.manual_seed(seed)
    if device == "cuda":
        torch.cuda.manual_seed_all(seed)

    points = torch.zeros((0, 3), device=device)  # 初始化空张量（GPU）
    
    while len(points) < num_points:
        # 批量生成候选点（GPU）
        batch_size = min(max_batch_size, num_points - len(points))  # 每批最多 1 万个点
        xyz = torch.rand((batch_size, 3), device=device)
        xyz[:, 2] *= lz  # Z ∈ [0, lz]
        
        # 计算 define_marker（向量化）
        x, y, z = xyz[:, 0], xyz[:, 1], xyz[:, 2]
        h = torch.zeros_like(z, device=device)
        h = torch.where(z < 1.0/m, 1.0/m - z, h)
        h = torch.where(z > lz - 1.0/m, z - (lz - 1.0/m), h)
        
        slope = np.tan(np.pi/2 - np.pi/n_slots)
        define_marker_rho = 1.0 - ((m * h)**2 + x**2 + y**2)  # 径向
        define_marker_theta = x - slope * y                   # 周向
        
        # 筛选满足条件的点
        mask = (define_marker_theta >= 0) & (define_marker_rho >= 0)
        points = torch.cat([points, xyz[mask]], dim=0)
    
    return points[:num_points]  # 确保返回精确的 num_points

def evaluate_eikonal_mse(dead, lv, gv, lz, n_slots, m, num_points=5000):
    point_cloud = generate_point_cloud(num_points, lz, n_slots, m).unsqueeze(0)
    point_cloud.requires_grad = True#计算程函方程的残差务必打开梯度
    u_pred = dead.forward(point_cloud, lv, gv)
    du_dxyz = auto_grad(u_pred, point_cloud, 0, False)
    eikonal_mse = torch.mean((1.0-(gvc(du_dxyz, 0)**2+gvc(du_dxyz, 1)**2+gvc(du_dxyz, 2)**2))**2).item()
    return eikonal_mse

def plot_eikonal_mse_matrix(eikonal_mse_matrix):
    plt.imshow(eikonal_mse_matrix, cmap='RdBu_r')
    plt.colorbar()  # 添加颜色条
    plt.title("Eikonal Matrix Visualization")
    for i in range(eikonal_mse_matrix.shape[0]):
        for j in range(eikonal_mse_matrix.shape[1]):
            # 智能判断背景亮度
            cell_value = eikonal_mse_matrix[i,j]
            
            # 设置文字样式
            text_kwargs = {
                'ha': "center",
                'va': "center",
                'fontsize': 8,  # 适当减小字号避免重叠
                'fontweight': 'bold',
                'color': 'white',
                'path_effects': [
                    path_effects.withStroke(linewidth=1, 
                                        foreground='black')
                ]
            }
            
            # 添加文本
            plt.text(j, i, f"{cell_value*1000.0:.2f}", **text_kwargs)
    plt.savefig(f"{save_directory}/eikonal_mse_matrix.png", dpi=300, transparent=True)
    # 打开一个 CSV 文件进行写入
    with open(f"{save_directory}/eikonal_mse_matrix.csv", 'w', newline='') as file:
        writer = csv.writer(file)
        # 写入数据
        for row in eikonal_mse_matrix:
            writer.writerow(row)



def stitch_images(input_dir, output_path, n_rows, n_cols, quality=85, optimize=True):
    """
    拼接多张PNG图片并保留透明度
    
    参数:
        input_dir: 输入图片目录
        output_path: 输出拼接图片路径
        n_rows: 行数
        n_cols: 列数
        quality: 保存质量(1-100)，仅对JPEG有效
        optimize: 是否优化存储
    """
    print("开始拼接图片...")
    # 获取所有图片文件
    image_files = [f for f in os.listdir(input_dir) if f.endswith('BSSR.png')]
    
    # 验证图片数量是否匹配
    expected_count = n_rows * n_cols
    if len(image_files) != expected_count:
        print(f"警告: 找到{len(image_files)}张图片，但期望{expected_count}张(行数×列数)")
    
    # 确定单个图片的大小和模式
    sample_img = Image.open(os.path.join(input_dir, image_files[0]))
    img_width, img_height = sample_img.size
    img_mode = sample_img.mode  # 获取图像模式（'RGB'或'RGBA'）
    sample_img.close()
    
    # 创建透明背景画布（使用RGBA模式）
    canvas_width = img_width * n_cols
    canvas_height = img_height * n_rows
    canvas = Image.new('RGBA', (canvas_width, canvas_height), (0, 0, 0, 0))  # 完全透明背景
    
    # 按行列顺序拼接图片
    for i in range(n_rows):
        for j in range(n_cols):
            # 使用正则表达式查找匹配的文件
            pattern = re.compile(rf"{i}#{j}#.*BSSR\.png")
            matched_files = [f for f in image_files if pattern.match(f)]
            
            if not matched_files:
                print(f"警告: 未找到第{i}行第{j}列的图片")
                continue
                
            if len(matched_files) > 1:
                print(f"警告: 找到多个第{i}行第{j}列的图片，使用第一个")
                
            img_path = os.path.join(input_dir, matched_files[0])
            try:
                img = Image.open(img_path)
                # 如果图片不是RGBA模式，转换为RGBA
                if img.mode != 'RGBA':
                    img = img.convert('RGBA')
                
                # 计算粘贴位置
                x = j * img_width
                y = i * img_height
                canvas.paste(img, (x, y), img)  # 使用图片自身作为蒙版保留透明度
                img.close()
            except Exception as e:
                print(f"无法处理图片 {img_path}: {str(e)}")
    
    # 保存拼接后的图片
    ext = os.path.splitext(output_path)[1].lower()
    
    if ext == '.jpg' or ext == '.jpeg':
        # JPEG不支持透明度，需要先转换为RGB
        canvas = canvas.convert('RGB')
        canvas.save(output_path, 'JPEG', quality=quality, optimize=optimize)
    elif ext == '.png':
        # 保存为PNG格式（保留透明度）
        canvas.save(output_path, 'PNG', optimize=optimize, compress_level=6)
    else:
        # 默认保存为PNG格式
        canvas.save(output_path, 'PNG', optimize=optimize, compress_level=6)
    
    print(f"拼接完成，已保存到 {output_path} (大小: {os.path.getsize(output_path)/1024:.2f}KB)")

def interpolation1D(index1, index2, alphas, save_directory):
    from DEAD.AutoDecoder.Config import hidden_size, depth, data_num, latent_size, model_load_path, data_num
    os.makedirs(save_directory, exist_ok=True)
    dead = setup_dead_model(hidden_size, depth, data_num,
                            latent_size, model_load_path)
    lv1,gv1=get_vector_from_index(dead, index1)
    lv2,gv2=get_vector_from_index(dead, index2)

    # 创建一个进程池
    with concurrent.futures.ProcessPoolExecutor() as executor:
        # 创建一个future列表
        futures = []

        for i, alpha in enumerate(alphas):
            # 插值
            lv=lv1*(1-alpha)+lv2*alpha
            gv=gv1*(1-alpha)+gv2*alpha

            gds, field_dict = decode_from_vector_to_field(dead, lv, gv)
            id=f"{i}#N{gds.n_slots}L{round(gds.lz*1000)}M{round(gds.m*10)}"
            vts_path = f"{save_directory}/{id}"
            # 将输出vtk文件的步骤加入进程池，以实现并发提速
            future = executor.submit(
                gds.save_as_vtk_in_cartesian, vts_path, field_dict)
            futures.append(future)

        # 等待所有进程完成
        concurrent.futures.wait(futures)
        
        # 运行Paraview实现可视化
        #show_all_SR(save_directory)
        show_all_BS(save_directory)

def interpolation2D(index1, index2, index3, index4, alphas, save_directory, need_eikonal_mse=False):
    from DEAD.AutoDecoder.Config import hidden_size, depth, data_num, latent_size, model_load_path, data_num
    # 创建保存目录（如果不存在）
    os.makedirs(save_directory, exist_ok=True)
    # 初始化DEAD模型
    dead = setup_dead_model(hidden_size, depth, data_num,
                            latent_size, model_load_path)
    
    # 获取四个角点的潜在向量和全局向量
    lv1, gv1 = get_vector_from_index(dead, index1)  # 左上角
    lv2, gv2 = get_vector_from_index(dead, index2)  # 右上角
    lv3, gv3 = get_vector_from_index(dead, index3)  # 左下角
    lv4, gv4 = get_vector_from_index(dead, index4)  # 右下角

    eikonal_mse_matrix = np.zeros((len(alphas), len(alphas)))

    # 创建进程池以实现并行计算
    with concurrent.futures.ProcessPoolExecutor() as executor:
        # 存储所有并行任务的列表
        futures = []

        # 双线性插值过程
        for i, alpha1 in enumerate(alphas):  # alpha1: 水平方向插值系数
            # 在上边（index1-index2）进行插值
            lv_top = lv1 * (1 - alpha1) + lv2 * alpha1
            gv_top = gv1 * (1 - alpha1) + gv2 * alpha1
            
            # 在下边（index3-index4）进行插值
            lv_bottom = lv3 * (1 - alpha1) + lv4 * alpha1
            gv_bottom = gv3 * (1 - alpha1) + gv4 * alpha1
            
            for j, alpha2 in enumerate(alphas):  # alpha2: 垂直方向插值系数
                # 在垂直方向（上边-下边）进行插值
                lv = lv_top * (1 - alpha2) + lv_bottom * alpha2
                gv = gv_top * (1 - alpha2) + gv_bottom * alpha2

                print(f"正在处理第{j}行第{i}列的插值")
                # 解码得到场数据
                gds, field_dict = decode_from_vector_to_field(dead, lv, gv)
                if need_eikonal_mse:
                    # 评估程函方程残差
                    eikonal_mse=evaluate_eikonal_mse(dead, lv, gv, gds.lz, gds.n_slots, gds.m, num_points=10000)
                    eikonal_mse_matrix[j, i] = eikonal_mse
                # 生成唯一文件名
                id = f"{j}#{i}#N{gds.n_slots}L{round(gds.lz*1000)}M{round(gds.m*10)}A{alpha1:.2f}B{alpha2:.2f}"
                vts_path = f"{save_directory}/{id}"
                # 将VTK文件保存任务提交到进程池
                future = executor.submit(
                    gds.save_as_vtk_in_cartesian, vts_path, field_dict)
                futures.append(future)

        # 等待所有并行任务完成
        concurrent.futures.wait(futures)
        print(f"eikonal_mse: {eikonal_mse_matrix}")
        # 绘制图像
        plot_eikonal_mse_matrix(eikonal_mse_matrix)
        
        # 使用Paraview进行结果可视化
        show_all_BSSR(save_directory)
        #show_all_SR(save_directory)  # 可选：显示SR结果
        #show_all_BS(save_directory)   # 显示BS结果

if __name__ == "__main__":
    from DEAD.AutoDecoder.Config import model_load_path
    '''
    index1=2350
    index2=7210
    interpolation1D(index1=index1,
                  index2=index2,
                  alphas=np.linspace(0, 1, 31),
                  save_directory=f"{model_load_path}/{index1}-{index2}")'
    '''

    index1=2053
    index2=2061
    #index3=1281
    #index4=1280
    index3=443
    index4=453
    save_directory=f"{model_load_path}/{index1}-{index2}-{index3}-{index4}"
    inter_num=6
    interpolation2D(index1=index1,    # 左上角索引
               index2=index2,    # 右上角索引
               index3=index3,    # 左下角索引
               index4=index4,    # 右下角索引
               alphas=np.linspace(0, 1, inter_num),  # 插值步长（两个方向相同）
               save_directory=f"{model_load_path}/{index1}-{index2}-{index3}-{index4}",
               need_eikonal_mse=True)  # 保存路径
    stitch_images(save_directory, f"{save_directory}/join.png", inter_num, inter_num, quality=50, optimize=True)