import os
import cv2
import torch
import numpy as np
from basicsr.archs.dualCodeBook_arch import DualCodeBookNet
from basicsr.utils.img_util import img2tensor, tensor2img

# 测试 加载模型的脚本

def main():
    # Model configuration from the YAML file
    model_config = {
        'type': 'DualCodeBookNet',
        'gt_resolution': 256,
        'in_channel': 3,
        'norm_type': 'gn',
        'act_type': 'silu',
        'use_semantic_loss': True,
        'codebook_params': [[32, 1024, 512]],
        'LQ_stage': True,
        'use_quantize': True,
        'scale_factor': 1
    }

    # Device
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Define the model
    model = DualCodeBookNet(**model_config)
    model.to(device)
    model.eval()

    # Load the pretrained weights
    model_path = '/root/code/fe-ma-sr/experiments/train_dualcodebook_dehazing_Outdoor/models/net_g_best_.pth'
    model.load_state_dict(torch.load(model_path)['params'], strict=False)

    # Input and output directories
    hazy_folder = '/root/data/Outdoor/SOTS_restoration/SOTS/outdoor/hazy'
    gt_folder = '/root/data/Outdoor/SOTS_restoration/SOTS/outdoor/gt'
    output_folder = 'results/outdoor_test'
    os.makedirs(output_folder, exist_ok=True)

    # Process each image
    for img_name in os.listdir(hazy_folder):
        hazy_img_path = os.path.join(hazy_folder, img_name)
        
        # Assuming the ground truth image has the same name but might have a different format or identifier
        # Example: 1.png in hazy corresponds to 1.png in gt
        # Adjust the logic here if the naming convention is different
         # Example logic: 1_0.8.png -> 1.png
        
        gt_img_path = os.path.join(gt_folder, img_name)
        gt_img_path = gt_img_path.split('jpg')[0] + 'png'

        if not os.path.exists(gt_img_path):
            print(f"Warning: Ground truth for {img_name} not found at {gt_img_path}, skipping.")
            continue

        print(f"Processing {img_name}...")

        # Read images
        img_hazy = cv2.imread(hazy_img_path, cv2.IMREAD_COLOR)
        img_gt = cv2.imread(gt_img_path, cv2.IMREAD_COLOR)

        # Pre-process images
        img_hazy_tensor = img2tensor(img_hazy / 255., bgr2rgb=True, float32=True).unsqueeze(0).to(device)
        img_gt_tensor = img2tensor(img_gt / 255., bgr2rgb=True, float32=True).unsqueeze(0).to(device)

        # Inference
        with torch.no_grad():
            # The model's test function or forward pass might need both hazy and gt as input
            # Based on the provided architecture, the forward pass takes both
            output_hazy, output_clear, _, _, _ = model(img_hazy_tensor, img_gt_tensor)

        # Post-process the output image we are interested in (the clear one)
        output_img = tensor2img(output_clear, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1))

        # Save the result
        save_path = os.path.join(output_folder, img_name)
        cv2.imwrite(save_path, output_img)

    print(f"Testing finished. Results are saved in {output_folder}")

if __name__ == '__main__':
    main()
