from volprims_linear import *
import matplotlib.pyplot as plt
from PIL import Image
import numpy as np
import torch.optim as optim
import os

# 启用自动求导异常检测，帮助诊断梯度计算问题
# torch.autograd.set_detect_anomaly(True)

def load_target_image(image_path, image_size=128):
    """
    加载目标图像，并将其转换为 torch.Tensor
    """
    if os.path.exists(image_path):
        image = Image.open(image_path)
        image = image.resize((image_size, image_size), Image.Resampling.LANCZOS)
        image_np = np.array(image) / 255.0  # 归一化到 [0, 1]
        
        # 确保图像有 alpha 通道
        if image_np.shape[2] == 3:
            alpha = np.ones((image_size, image_size, 1))
            image_np = np.concatenate([image_np, alpha], axis=2)
            
        image_tensor = torch.tensor(image_np, dtype=torch.float32, device=default_device)
        return image_tensor
    else:
        print(f"目标图像 {image_path} 不存在，将使用默认图像")
        # 创建一个简单的测试图像（白色圆形）
        image_tensor = torch.zeros((image_size, image_size, 4), dtype=torch.float32, device=default_device)
        center = image_size // 2
        radius = image_size // 4
        for i in range(image_size):
            for j in range(image_size):
                if ((i - center) ** 2 + (j - center) ** 2) < radius ** 2:
                    image_tensor[i, j, :] = torch.tensor([1.0, 1.0, 1.0, 0.8], device=default_device)
        return image_tensor

def compute_loss(rendered_image, target_image):
    """
    计算渲染图像和目标图像之间的损失
    确保所有操作都是非原地操作，以保证梯度计算正确
    """
    # 归一化颜色，确保使用非原地操作
    rendered_rgb = rendered_image[:, :, :3].clone()  # 使用clone确保创建新的张量
    rendered_alpha = rendered_image[:, :, 3:4].clone()
    normalized_rendered = rendered_rgb / torch.clamp(rendered_alpha, min=1e-6)
    
    # 计算 RGB 和 Alpha 的 L2 损失
    rgb_diff = normalized_rendered - target_image[:, :, :3]
    rgb_loss = torch.mean(rgb_diff * rgb_diff)  # 避免使用 ** 运算符，使用乘法代替
    
    alpha_diff = rendered_image[:, :, 3:4] - target_image[:, :, 3:4]
    alpha_loss = torch.mean(alpha_diff * alpha_diff)
    
    # 总损失是 RGB 损失与 Alpha 损失的加权和
    total_loss = rgb_loss + 0.5 * alpha_loss
    return total_loss

def setup_camera():
    """
    设置与 forward_test_linear.py 中相同的相机
    """
    cam_pos_y = -world_radius * 1.5
    cam_pos_z = world_radius * 1.5
    
    camera_position = torch.tensor([0.0, cam_pos_y, cam_pos_z], device=default_device)
    look_at_point = torch.tensor([0.0, 0.0, 0.0], device=default_device)
    up_vector = torch.tensor([0.0, 1.0, 1.0], device=default_device) 
    fov_degrees = 60.0

    camera = PinHoleCamera(
        position=camera_position,
        look_at=look_at_point,
        up=up_vector,
        fov=fov_degrees,
        device=default_device
    )
    
    return camera

def save_image(image_tensor, filename):
    """
    保存图像张量为 PNG 文件
    """
    image_np = image_tensor.detach().cpu().numpy()
    plt.figure(figsize=(6, 6))
    plt.imshow(image_np)
    plt.axis('off')
    plt.savefig(filename, bbox_inches='tight', pad_inches=0)
    plt.close()

def main():
    """
    主函数：从 sample.png 加载目标图像，优化 volume primitives 以匹配目标图像
    """
    # 参数设置
    image_width = 64
    image_height = 64
    num_primitives = 128  # 使用 128 个 volume primitive
    num_iterations = 1000  # 优化迭代次数
    learning_rate = 0.02
    
    # 加载目标图像
    target_image_path = 'sample.png'
    target_image = load_target_image(target_image_path, image_height)
    
    # 设置相机
    camera = setup_camera()
    
    # 创建和初始化 volume primitives
    primitives = UniformVolumePrimitives(num_spheres=num_primitives)
    primitives.random_initialization(device=default_device)
    
    # 将参数设置为需要梯度
    primitives.centers.requires_grad_(True)
    primitives.scales.requires_grad_(True)
    primitives.rotations.requires_grad_(True)
    primitives.opacities.requires_grad_(True)
    primitives.colors.requires_grad_(True)
    
    # 设置优化器
    optimizer = optim.Adam([
        primitives.centers,
        primitives.scales,
        primitives.rotations,
        primitives.opacities,
        primitives.colors
    ], lr=learning_rate)
        # 创建实时可视化窗口
    plt.ion()  # 开启交互模式
    fig, axes = plt.subplots(1, 3, figsize=(15, 5))
    display_img = axes[0].imshow(np.zeros((image_height, image_width, 4)))
    axes[0].set_title("Current Rendered Image")
    axes[0].axis('off')
    
    display_target = axes[1].imshow(target_image.cpu().numpy())
    axes[1].set_title("Target Image")
    axes[1].axis('off')
    
    loss_plot, = axes[2].plot([], [])
    axes[2].set_title("Loss Curve")
    axes[2].set_xlabel("Iteration")
    axes[2].set_ylabel("Loss")
    axes[2].grid(True)
    
    plt.tight_layout()
    
    # 优化循环
    print(f"开始优化 {num_primitives} 个 volume primitives...")
    losses = []
    
    for iteration in range(num_iterations):
        optimizer.zero_grad()
        
        # 渲染当前场景
        rendered_image = render_scene(camera, primitives, image_width, image_height)
        
        # 计算损失
        loss = compute_loss(rendered_image, target_image)
        losses.append(loss.item())
        
        # 反向传播
        # with torch.profiler.profile(record_shapes=True, profile_memory=True, with_stack=True) as prof:
        #     with torch.profiler.record_function("render_scene_forward"):
        #         rendered_image = render_scene(camera, primitives, image_width, image_height)
        #     loss = compute_loss(rendered_image, target_image)
        #     with torch.profiler.record_function("backward_pass"):
        loss.backward()

        # print(prof.key_averages().table(sort_by="cpu_time_total", row_limit=10))
        
        # 更新参数
        optimizer.step()
                
        # 更新可视化窗口（每2次迭代更新一次，避免太频繁影响性能）
        if iteration % 2 == 0:
            with torch.no_grad():
                # 准备显示图像
                image_rgb = rendered_image[:, :, :3].clone()
                image_alpha = rendered_image[:, :, 3:4].clone()
                normalized_rgb = image_rgb / torch.clamp(image_alpha, min=1e-6)
                display_image = torch.cat([normalized_rgb, image_alpha], dim=2).cpu().numpy()
                
                # 更新显示
                display_img.set_data(display_image)
                
                # 更新损失曲线
                loss_plot.set_data(range(len(losses)), losses)
                axes[2].relim()
                axes[2].autoscale_view()
                
                # 显示当前损失值
                plt.suptitle(f"Iter {iteration + 1}/{num_iterations}, Loss: {loss.item():.6f}")
                
                # 刷新图像
                fig.canvas.draw_idle()
                plt.pause(0.01)
        
        # 打印当前进度和保存图像（原有代码）
        if (iteration + 1) % 10 == 0:
            print(f"Iter {iteration + 1}/{num_iterations}, Loss: {loss.item():.6f}")
            plt.imsave(f"iter_{iteration + 1}.png", rendered_image.detach().cpu().numpy())
    
    plt.ioff()  # 关闭交互模式
    
    # 绘制损失曲线
    plt.figure(figsize=(10, 5))
    plt.plot(losses)
    plt.title("Loss Curve")
    plt.xlabel("Iteration")
    plt.ylabel("Loss")
    plt.grid(True)
    plt.savefig("optimization_loss.png")
    plt.show()
      # 渲染最终结果
    with torch.no_grad():
        final_rendered_image = render_scene(camera, primitives, image_width, image_height)
        # 使用非原地操作归一化图像
        image_rgb = final_rendered_image[:, :, :3].clone()
        image_alpha = final_rendered_image[:, :, 3:4].clone()
        normalized_rgb = image_rgb / torch.clamp(image_alpha, min=1e-6)
        normalized_final_rendered = torch.cat([normalized_rgb, image_alpha], dim=2)
    
    # 显示最终结果与目标图像的对比
    plt.figure(figsize=(12, 6))
    
    plt.subplot(1, 2, 1)
    plt.imshow(target_image.cpu().numpy())
    plt.title("Target Image")
    plt.axis('off')
    
    plt.subplot(1, 2, 2)
    plt.imshow(normalized_final_rendered.cpu().numpy())
    plt.title("Optimized Rendered Image")
    plt.axis('off')
    
    plt.tight_layout()
    plt.savefig("comparison.png")
    plt.show()
    
    print("优化完成！最终损失:", losses[-1])

if __name__ == "__main__":
    main()