"""
统一推理脚本
对所有baseline模型进行推理并输出结果
"""
import os
import sys
import argparse
import importlib
import torch
from configs.kfold_config import ALL_MODELS, MODEL_CONFIGS
from utils.infer_utils import inference_single_image, load_model_for_inference


def main():
    parser = argparse.ArgumentParser(description='对所有模型进行推理')
    parser.add_argument('--image', type=str, required=True,
                        help='输入图像路径')
    parser.add_argument('--models', type=str, nargs='+', default=None,
                        help='指定要推理的模型（默认所有）')
    parser.add_argument('--fold', type=int, default=0,
                        help='使用哪一折的模型（默认0）')
    parser.add_argument('--checkpoint', type=str, default='best.pth',
                        help='检查点文件名（best.pth或last.pth）')
    parser.add_argument('--threshold', type=float, default=0.5,
                        help='二值化阈值')
    parser.add_argument('--device', type=str, default='cuda',
                        help='设备 (cuda/cpu)')

    args = parser.parse_args()

    # 检查输入文件
    if not os.path.exists(args.image):
        raise FileNotFoundError(f"输入图像不存在: {args.image}")

    # 确定要推理的模型
    models_to_infer = args.models if args.models is not None else ALL_MODELS

    # 设置设备
    device = torch.device(args.device if torch.cuda.is_available() else 'cpu')
    print(f'[INFO] 使用设备: {device}')
    print(f'[INFO] 输入图像: {args.image}')
    print(f'[INFO] 将对以下模型进行推理: {models_to_infer}')
    print(f'{"=" * 80}\n')

    # 对每个模型进行推理
    results = {}

    for model_name in models_to_infer:
        if model_name not in MODEL_CONFIGS:
            print(f'[WARNING] 未知模型: {model_name}，跳过')
            continue

        print(f'\n{"-" * 80}')
        print(f'推理模型: {model_name.upper()}')
        print(f'{"-" * 80}')

        try:
            # 动态导入模型和配置
            model_config = MODEL_CONFIGS[model_name]
            module_name = model_config['module']
            class_name = model_config['class']
            to_rgb = model_config['to_rgb']

            # 导入模型类
            model_module = importlib.import_module(f'{module_name}.model')
            model_class = getattr(model_module, class_name)

            # 导入配置
            config_module = importlib.import_module(f'{module_name}.config')
            MODEL_CONFIG = config_module.MODEL_CONFIG

            # 创建模型
            model = model_class(**MODEL_CONFIG)

            # 加载权重
            checkpoint_path = os.path.join('checkpoints', model_name, f'fold_{args.fold}', args.checkpoint)

            if not os.path.exists(checkpoint_path):
                print(f'[WARNING] 模型权重不存在: {checkpoint_path}，跳过')
                continue

            model = load_model_for_inference(model, checkpoint_path, device)

            # 构建输出路径
            image_name = os.path.basename(args.image)
            image_name_no_ext = os.path.splitext(image_name)[0]
            output_dir = os.path.join('outputs', model_name)
            output_path = os.path.join(output_dir, f'{image_name_no_ext}_pred.png')

            # 推理
            inference_time, foreground_pixels = inference_single_image(
                model=model,
                image_path=args.image,
                output_path=output_path,
                device=device,
                to_rgb=to_rgb,
                threshold=args.threshold
            )

            results[model_name] = {
                'inference_time': inference_time,
                'foreground_pixels': foreground_pixels,
                'output_path': output_path
            }

            print(f'[INFO] 推理用时: {inference_time:.4f} 秒')
            print(f'[INFO] 前景像素数: {foreground_pixels}')
            print(f'[INFO] 输出路径: {output_path}')

        except Exception as e:
            print(f'[ERROR] 推理 {model_name} 时出错: {str(e)}')
            continue

    # 打印汇总
    print(f'\n{"=" * 80}')
    print('所有模型推理完成！汇总：')
    print(f'{"=" * 80}\n')

    print(f'{"Model":<15} {"Time (s)":<12} {"Foreground Pixels":<20} {"Output Path"}')
    print(f'{"-" * 80}')

    for model_name, result in results.items():
        print(f'{model_name:<15} '
              f'{result["inference_time"]:<12.4f} '
              f'{result["foreground_pixels"]:<20} '
              f'{result["output_path"]}')

    print(f'{"=" * 80}\n')


if __name__ == '__main__':
    main()
