import os
import glob
import cv2
import numpy as np
import torch
import math
from basicsr.archs.dualCodeBook_arch import DualCodeBookNet
from basicsr.utils import img2tensor, tensor2img, imwrite

def load_model(model_path, device, codebook_params, gt_resolution, use_semantic_loss=False):
    model = DualCodeBookNet(
        in_channel=3,
        codebook_params=codebook_params,
        gt_resolution=gt_resolution,
        LQ_stage=True,
        use_quantize=True,
        use_semantic_loss=use_semantic_loss,
        use_residual=True,
        scale_factor=1,
        short_cut=True,
        quantize_way='min',
        former=True,
    )
    ckpt = torch.load(model_path, map_location=device)
    if isinstance(ckpt, dict) and 'params' in ckpt:
        state = ckpt['params']
    else:
        state = ckpt
    model.load_state_dict(state, strict=False)
    model.eval().to(device)
    return model

def pad_to_multiple(img, multiple=32):
    """
    将图片补齐到 multiple 的倍数，防止下采样时尺寸不匹配报错
    """
    h, w = img.shape[:2]
    # 计算需要补齐的像素数
    pad_h = (multiple - h % multiple) % multiple
    pad_w = (multiple - w % multiple) % multiple
    
    if pad_h == 0 and pad_w == 0:
        return img, (h, w)
    
    # 使用反射填充，避免边缘突兀
    img_padded = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), mode='reflect')
    return img_padded, (h, w)

def main():
    # 路径设置
    model_path = '/root/code/fe-ma-sr/experiments/LQ_LLIE_LRFormer_PSFM_skip/models/net_g_best_.pth'
    hazy_root = '/root/data/data/NPE/'
    output_dir = '/root/code/fe-ma-sr/result-lowlight-restored-dualbook-min/lrformer_sdfm_NPE/'
    
    os.makedirs(output_dir, exist_ok=True)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 模型参数
    gt_resolution = 256
    codebook_params = [[32, 1024, 512]]
    use_semantic_loss = False

    # 加载模型
    model = load_model(model_path, device, codebook_params, gt_resolution, use_semantic_loss)
    print(f"Model loaded from {model_path}")

    hazy_list = sorted(glob.glob(os.path.join(hazy_root, '*')))
    print(f"Found {len(hazy_list)} images.")

    for hazy_path in hazy_list:
        hazy_name = os.path.basename(hazy_path)
        
        # 1. 读取图像
        img_hazy = cv2.imread(hazy_path, cv2.IMREAD_UNCHANGED)
        if img_hazy is None:
            continue

        if len(img_hazy.shape) == 2:
            img_hazy = cv2.cvtColor(img_hazy, cv2.COLOR_GRAY2BGR)

        # 归一化
        img_hazy = img_hazy.astype(np.float32) / 255.0

        # 2. 关键修复：补齐图像尺寸
        # 如果不补齐，遇到非倍数尺寸（如367x500）会导致 tensorcat 报错
        img_hazy_padded, (orig_h, orig_w) = pad_to_multiple(img_hazy, multiple=32)

        # 3. 转 Tensor 并推理
        hazy_tensor = img2tensor(img_hazy_padded, bgr2rgb=True, float32=True).unsqueeze(0).to(device)

        with torch.no_grad():
            # 输出可能有多个，根据报错信息里的代码，你只需要 out_clear
            # 注意：如果模型返回值的数量变了，这里需要相应调整
            ret = model(hazy_tensor, None)
            # 假设返回顺序为: _, out_clear, ...
            if isinstance(ret, (list, tuple)):
                out_clear = ret[1] 
            else:
                out_clear = ret # 防御性编程，防止返回格式变化

        # 4. 转回 Numpy
        out_clear_np = tensor2img(out_clear, rgb2bgr=True, min_max=(0, 1))
        
        # 5. 关键修复：裁剪回原始尺寸
        out_clear_np = out_clear_np[:orig_h, :orig_w, :]
        
        # 保存
        base_name = os.path.splitext(hazy_name)[0]
        save_path = os.path.join(output_dir, f"{base_name}.png")
        imwrite(out_clear_np, save_path)
        print(f"Processed: {save_path}")

if __name__ == '__main__':
    main()