import argparse
import os
import glob
import cv2
import numpy as np
import torch
from basicsr.archs.femasr_arch import FeMaSRNet

def load_model(model_path, device, codebook_params, gt_resolution, use_semantic_loss=True):
    """根据参数加载DualCodeBookNet模型"""
    model = FeMaSRNet(
        in_channel=3,
        codebook_params=codebook_params,
        gt_resolution=gt_resolution,
        LQ_stage=False,  # 根据配置文件，HQ阶段为False
        use_quantize=True,
        use_semantic_loss=False,
        use_residual=False, # 根据配置文件，此项为True
        scale_factor=1,    # 根据配置文件，scale_factor为1
    )
    # 加载模型权重
    ckpt = torch.load(model_path, map_location=device)
    # 权重文件可能是一个字典，包含 'params' 键
    if isinstance(ckpt, dict) and 'params' in ckpt:
        state = ckpt['params']
    else:
        state = ckpt
    model.load_state_dict(state, strict=False)
    model.eval().to(device)
    return model

def pad_to_multiple_of_8(img):
    """将图像的H和W补齐到8的倍数"""
    h, w = img.shape[:2]
    pad_h = (8 - h % 8) % 8
    pad_w = (8 - w % 8) % 8
    if pad_h == 0 and pad_w == 0:
        return img, (h, w)
    img_padded = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), mode='reflect')
    return img_padded, (h, w)

def to_tensor(img_bgr):
    """[H,W,3] BGR float32 [0,1] -> torch [1,3,H,W] RGB"""
    rgb = img_bgr[:, :, ::-1] # BGR to RGB
    tensor = torch.from_numpy(np.ascontiguousarray(np.transpose(rgb, (2, 0, 1)))).float().unsqueeze(0)
    return tensor

def find_gt(hazy_name, gt_root):
    """根据有雾图像名查找对应的清晰图像名"""
    base = os.path.splitext(hazy_name)[0]
    # SOTS outdoor数据集的命名规则: e.g., hazy: 0001_0.8_0.2.png -> GT: 0001.jpg
    gt_base = base.split('_')[0]
    exts = ['.png', '.jpg', '.JPG', '.PNG']
    for e in exts:
        p = os.path.join(gt_root, gt_base + e)
        if os.path.isfile(p):
            return p
    return None

def main():
    parser = argparse.ArgumentParser()
    parser.add_argument('--model_path', type=str,
                        default='/root/code/fe-ma-sr/FeMaSR_HRP_model_g.pth',
                        help='预训练模型权重路径')
    parser.add_argument('--hazy_root', type=str, default='/root/data/Outdoor/SOTS/outdoor/hazy/', help='有雾图像输入文件夹')
    parser.add_argument('--gt_root', type=str, default='/root/data/Outdoor/SOTS/outdoor/gt/', help='清晰图像(GT)输入文件夹')
    parser.add_argument('--output', type=str, default='/root/code/fe-ma-sr/result-restored', help='结果输出文件夹')
    args = parser.parse_args()

    os.makedirs(args.output, exist_ok=True)
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # 从配置文件 train_DualCodeBook_HQ_pretrain_stage.yml 获取模型参数
    gt_resolution = 256
    codebook_params = [[32, 1024, 512]]
    use_semantic_loss = True

    # 加载模型
    model = load_model(args.model_path, device, codebook_params, gt_resolution, use_semantic_loss)
    print(f'模型已加载: {args.model_path}')

    hazy_list = sorted(glob.glob(os.path.join(args.hazy_root, '*')))
    if not hazy_list:
        print(f'未在文件夹中找到任何有雾图像: {args.hazy_root}')
        return

    print(f'开始处理 {len(hazy_list)} 张图像...')
    codebook_usage = 0
    for idx, hazy_path in enumerate(hazy_list):
        hazy_name = os.path.basename(hazy_path)
        gt_path = find_gt(hazy_name, args.gt_root)
        if gt_path is None:
            print(f'[{idx+1}/{len(hazy_list)}] 跳过 {hazy_name}: 未找到对应的GT图像')
            continue

        print(f'[{idx+1}/{len(hazy_list)}] 正在处理: {hazy_name}')

        img_hazy = cv2.imread(hazy_path, cv2.IMREAD_COLOR)
        img_gt = cv2.imread(gt_path, cv2.IMREAD_COLOR)
        if img_hazy is None or img_gt is None:
            print(f'  -> 读取图像失败: {hazy_name} 或 {gt_path}')
            continue

        # 归一化到 [0, 1]
        img_hazy = img_hazy.astype(np.float32) / 255.0
        img_gt = img_gt.astype(np.float32) / 255.0

        # 补全到8的倍数并记录原始尺寸
        img_hazy_pad, (orig_h, orig_w) = pad_to_multiple_of_8(img_hazy)
        img_gt_pad, _ = pad_to_multiple_of_8(img_gt)

        # 如果补全后尺寸不一致，则以最大尺寸为准再次补全，确保输入尺寸相同
        if img_hazy_pad.shape[:2] != img_gt_pad.shape[:2]:
            H = max(img_hazy_pad.shape[0], img_gt_pad.shape[0])
            W = max(img_hazy_pad.shape[1], img_gt_pad.shape[1])
            
            def pad_to_target_size(img, target_h, target_w):
                h, w = img.shape[:2]
                pad_h = target_h - h
                pad_w = target_w - w
                if pad_h > 0 or pad_w > 0:
                    img = np.pad(img, ((0, pad_h), (0, pad_w), (0, 0)), mode='reflect')
                return img

            img_hazy_pad = pad_to_target_size(img_hazy_pad, H, W)
            img_gt_pad = pad_to_target_size(img_gt_pad, H, W)

        # 转换为Tensor
        t_hazy = to_tensor(img_hazy_pad).to(device)
        t_gt = to_tensor(img_gt_pad).to(device)

        # 模型推理
        with torch.no_grad():
            # DualCodeBookNet 返回 (out_img_hazy, out_img_clear, ...)
            out_img, codebook_loss, semantic_loss, indices_list = model(t_gt)

        # 处理并保存clear分支的输出结果
        out_clear_np = out_img.float().cpu().clamp_(0, 1).numpy()[0]
        out_clear_np = np.transpose(out_clear_np, (1, 2, 0))[:, :, ::-1]  # RGB->BGR
        out_clear_np = out_clear_np[:orig_h, :orig_w]
        out_clear_np = (out_clear_np * 255.0).round().astype(np.uint8)
        save_name = os.path.splitext(hazy_name)[0] + '_restored.png'
        cv2.imwrite(os.path.join(args.output, save_name), out_clear_np)
        
if __name__ == '__main__':
    main()