import torch
import torch.distributed as dist
from PIL import Image
from torchvision import transforms
from models import VQVAE

def init_distributed():
    # 初始化分布式环境
    if not dist.is_initialized():
        dist.init_process_group(
            backend='nccl',
            init_method='tcp://127.0.0.1:23456',
            world_size=1,
            rank=0
        )

def test_reconstruction(image_path, model_path):
    # 初始化分布式环境
    init_distributed()
    
    # 1. 加载模型
    vae = VQVAE(
        vocab_size=4096,
        z_channels=32,
        ch=160,
        share_quant_resi=4,
        v_patch_nums=(1, 2, 3, 4, 5, 6, 8, 10, 13, 16),
        test_mode=True,
    ).cuda()
    
    # 加载预训练权重
    checkpoint = torch.load(model_path)
    # 如果权重是以DDP方式保存的，需要移除'module.'前缀
    if any(key.startswith('module.') for key in checkpoint.keys()):
        checkpoint = {k.replace('module.', ''): v for k, v in checkpoint.items()}
    vae.load_state_dict(checkpoint)
    vae.eval()

    # 2. 图像预处理
    transform = transforms.Compose([
        transforms.Resize((256, 256)),
        transforms.ToTensor(),
    ])
    
    # 加载并处理图像
    img = Image.open(image_path)
    img_tensor = transform(img).unsqueeze(0).cuda()
    
    resized_img = transforms.Resize((256, 256))(img)
    resized_img.save('original_256.png')

    # 3. 进行重建
    with torch.no_grad():
        recon, _, _ = vae(img_tensor)
        
    
    # 重建图
    recon = torch.clamp(recon, 0, 1)
    recon_img = recon.squeeze(0).cpu().permute(1, 2, 0).numpy()
    recon_img = (recon_img * 255).astype('uint8')
    Image.fromarray(recon_img).save('recon.png')
    
    
    # 计算重建误差
    mse = torch.mean((img_tensor - recon) ** 2)
    print(f'重建均方误差: {mse.item():.6f}')

if __name__ == '__main__':
    image_path = '/ifs/root/ipa01/101/user_101002/Project/CAR/images/ILSVRC2012_val_00003014.JPEG'  # 替换为您的图片路径
    model_path = '/ifs/root/ipa01/101/user_101002/Download_Files/CAR_Ckpt/vae_ch160v4096z32.pth'  # 替换为您的模型权重路径
    
    test_reconstruction(image_path, model_path)
