import os
import sys

# 添加项目根目录到Python路径，确保能找到basicsr模块
try:
    # 获取当前脚本所在目录的绝对路径
    script_dir = os.path.dirname(os.path.abspath(__file__))
    # 项目根目录应该是当前脚本目录的父目录
    project_root = os.path.dirname(script_dir)
    
    # 验证这个目录是否包含basicsr文件夹
    if os.path.exists(os.path.join(project_root, 'basicsr')):
        # 添加根目录到Python路径（如果尚未添加）
        if project_root not in sys.path:
            sys.path.insert(0, project_root)
except Exception:
    pass

import argparse
import cv2
import glob
import numpy as np
import torch
from tqdm import tqdm

from basicsr.archs.srcnn_arch import SRCNNArch
from basicsr.utils.img_util import img2tensor, tensor2img

def main():
    # 先创建一个基础解析器，用于获取scale参数
    base_parser = argparse.ArgumentParser(add_help=False)
    base_parser.add_argument('--scale', type=int, default=2, choices=[2, 4], help='upscale factor')
    base_args, _ = base_parser.parse_known_args()
    
    # 根据scale参数设置默认的模型路径和输入路径
    default_model_path = f'experiments/pretrained_models/srcnn/srcnn_x{base_args.scale}.pth'
    default_input_path = f'datasets/Set14/LRbicx{base_args.scale}'
    
    # 创建完整的解析器
    parser = argparse.ArgumentParser(description='SRCNN inference script', parents=[base_parser])
    parser.add_argument(
        '--model_path',
        type=str,
        default=default_model_path,
        help='path to the SRCNN model')
    parser.add_argument('--input', type=str, default=default_input_path, help='input test image folder')
    parser.add_argument('--output', type=str, default='results/SRCNN', help='output folder')
    args = parser.parse_args()

    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')

    # Set up model
    model = SRCNNArch(
        num_in_ch=3,  # 固定为3通道
        num_out_ch=3,  # 固定为3通道
        num_feat=64,  # 固定特征通道数
        upscale=args.scale  # 根据命令行参数设置上采样因子
    )

    # Load model weights
    model.load_state_dict(torch.load(args.model_path, map_location=device)['params'], strict=True)
    model.eval()
    model = model.to(device)

    # Create output directory
    os.makedirs(args.output, exist_ok=True)

    # Get list of input images
    img_list = sorted(glob.glob(os.path.join(args.input, '*.[jp][pn]g')))

    # Process each image
    pbar = tqdm(total=len(img_list), desc='Processing images')
    for idx, img_path in enumerate(img_list):
        img_name = os.path.splitext(os.path.basename(img_path))[0]
        pbar.update(1)
        pbar.set_description(f'{idx+1}/{len(img_list)}: {img_name}')

        # Read image
        img = cv2.imread(img_path, cv2.IMREAD_COLOR).astype(np.float32) / 255.0

        # Convert to tensor
        img_tensor = img2tensor(img, bgr2rgb=True, float32=True).unsqueeze(0).to(device)

        # Inference
        with torch.no_grad():
            output_tensor = model(img_tensor)

        # Convert back to image
        output = tensor2img(output_tensor, rgb2bgr=True, out_type=np.uint8, min_max=(0, 1))

        # Save output image
        save_path = os.path.join(args.output, f'{img_name}_SRCNN_x{args.scale}.png')
        cv2.imwrite(save_path, output)

    pbar.close()
    print(f'All images processed and saved to {args.output}')


if __name__ == '__main__':
    main()