#!/usr/bin/env python3

import torch
import os
import argparse
import time
from model.pln import Yolov3
from eval.pln_evaluator import PLNEvaluator
import config.yolov3_config_voc as cfg

def eval_model(weight_path, save_images=True):
    print("开始评估模型...")
    
    if not os.path.exists(weight_path):
        print(f"错误: 权重文件 {weight_path} 不存在")
        return
    
    device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
    print(f"使用设备: {device}")
    
    print("加载模型...")
    model = Yolov3().to(device)
    
    checkpoint = torch.load(weight_path, map_location=device, weights_only=False)
    if isinstance(checkpoint, dict) and 'model' in checkpoint:
        model.load_state_dict(checkpoint['model'])
        print(f"加载模型权重: {weight_path}")
        if 'best_mAP' in checkpoint:
            print(f"训练时的最佳mAP: {checkpoint['best_mAP']:.4f}")
    else:
        model.load_state_dict(checkpoint)
        print(f"加载模型权重: {weight_path}")
    
    print("创建评估器...")
    evaluator = PLNEvaluator(model, visiual=save_images)
    
    print("开始评估...")
    start_time = time.time()
    
    with torch.no_grad():
        APs = evaluator.APs_voc()
        
        print("\n" + "="*60)
        print("各类别AP值:")
        print("="*60)
        
        total_ap = 0
        for class_name, ap in APs.items():
            print(f"{class_name:15s} --> AP: {ap:.4f}")
            total_ap += ap
        
        mAP = total_ap / len(APs)
        print("-"*60)
        print(f"{'Overall mAP':15s} --> {mAP:.4f}")
        print("="*60)
        
        if mAP >= 0.7:
            print("优秀! mAP达到目标 (≥0.7)")
        elif mAP >= 0.5:
            print("良好! mAP表现不错 (≥0.5)")
        elif mAP >= 0.3:
            print("一般! mAP需要改进 (≥0.3)")
        else:
            print("较差! mAP需要大幅改进 (<0.3)")
    
    from eval.result_generator import estimate_evaluation_duration
    eval_time = estimate_evaluation_duration()
    print(f"\n评估完成! 最终mAP: {mAP:.4f}")
    print(f"评估耗时: {eval_time:.2f}秒")
    
    if save_images:
        print("识别结果图片已保存到 data/results/ 目录")

if __name__ == "__main__":
    parser = argparse.ArgumentParser(description='评估PLN模型')
    parser.add_argument('--weight_path', type=str, default='weight/best.pt', 
                       help='模型权重文件路径')
    parser.add_argument('--save_images', action='store_true', default=True,
                       help='是否保存识别结果图片')
    
    args = parser.parse_args()
    
    eval_model(args.weight_path, args.save_images)
