#!/usr/bin/env python
"""
CN-CLIP 微调模型测试脚本
支持加载指定实验的检查点，在测试集上进行完整评估

Usage:
    # 测试指定实验的最佳模型
    python test.py --experiment cnclip_L14_freeze_visual_0911_1423
    
    # 测试指定检查点
    python test.py --checkpoint ./checkpoints/cnclip_L14_freeze_visual_0911_1423/epoch=05-val_mean_r1=0.6234.ckpt
    
    # 使用不同的测试集规模
    python test.py --experiment cnclip_L14_freeze_visual_0911_1423 --test-scale 0.1
    
    # 保存详细的预测结果
    python test.py --experiment cnclip_L14_freeze_visual_0911_1423 --save-predictions
"""

import os
import sys
import json
import argparse
from pathlib import Path
from datetime import datetime
from typing import Dict, List, Tuple, Optional

import torch
import torch.nn.functional as F
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from tqdm import tqdm
import numpy as np

# 添加项目路径
project_root = Path(__file__).parent.parent.parent
sys.path.insert(0, str(project_root / "codebase"))
sys.path.insert(0, str(project_root / "codebase" / "cnclip_finetune"))

from dataset.dataset import get_test_set
from common.eval import compute_clip_retrieval_metrics, print_metrics
from model import CNClipLightning


def find_experiment_checkpoint(experiment_name: str, checkpoint_type: str = "best") -> Optional[Path]:
    """
    查找实验的检查点文件
    
    Args:
        experiment_name: 实验名称
        checkpoint_type: 检查点类型 ("best", "last", 或具体文件名)
    
    Returns:
        检查点文件路径，如果未找到返回None
    """
    checkpoint_dir = Path(f"./codebase/cnclip_finetune/checkpoints/{experiment_name}")
    
    if not checkpoint_dir.exists():
        print(f"❌ 实验目录不存在: {checkpoint_dir}")
        return None
    
    if checkpoint_type == "last":
        last_ckpt = checkpoint_dir / "last.ckpt"
        if last_ckpt.exists():
            return last_ckpt
        else:
            print(f"❌ 未找到last.ckpt: {last_ckpt}")
            return None
    
    elif checkpoint_type == "best":
        # 查找val_mean_r1最高的检查点
        ckpt_files = list(checkpoint_dir.glob("epoch=*-val_mean_r1=*.ckpt"))
        if not ckpt_files:
            print(f"❌ 未找到任何验证检查点: {checkpoint_dir}")
            return None
        
        # 按val_mean_r1排序，取最高的
        best_ckpt = max(ckpt_files, key=lambda p: float(p.stem.split('val_mean_r1=')[1]))
        return best_ckpt
    
    else:
        # 具体文件名
        ckpt_path = checkpoint_dir / checkpoint_type
        if ckpt_path.exists():
            return ckpt_path
        else:
            print(f"❌ 检查点文件不存在: {ckpt_path}")
            return None


def load_model_from_checkpoint(checkpoint_path: Path) -> CNClipLightning:
    """从检查点加载模型"""
    print(f"📂 加载检查点: {checkpoint_path}")
    
    try:
        model = CNClipLightning.load_from_checkpoint(str(checkpoint_path))
        model.eval()
        model.freeze()  # 冻结所有参数，确保测试模式
        
        print(f"✅ 模型加载成功")
        print(f"   模型规模: {model.hparams.model_name}")
        print(f"   冻结策略: {model.hparams.freeze_strategy}")
        print(f"   学习率: {model.hparams.learning_rate}")
        
        return model
        
    except Exception as e:
        print(f"❌ 加载模型失败: {e}")
        return None


def extract_all_features(
    model: CNClipLightning,
    test_loader: DataLoader,
    device: torch.device
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
    """
    提取测试集的所有特征
    
    Returns:
        (image_features, text_features, img_ids)
    """
    model.to(device)
    model.eval()
    
    all_image_features = []
    all_text_features = []
    all_img_ids = []
    
    print("🔍 提取测试集特征...")
    
    with torch.no_grad():
        for batch in tqdm(test_loader, desc="特征提取"):
            images, text_tokens, img_ids = batch
            images = images.to(device)
            text_tokens = text_tokens.to(device)
            
            # 前向推理
            outputs = model(images, text_tokens)
            
            # 收集特征
            all_image_features.append(outputs['image_features'].cpu())
            all_text_features.append(outputs['text_features'].cpu())
            all_img_ids.append(img_ids.cpu())
    
    # 拼接所有特征
    image_features = torch.cat(all_image_features, dim=0)
    text_features = torch.cat(all_text_features, dim=0)
    img_ids = torch.cat(all_img_ids, dim=0)
    
    print(f"✅ 特征提取完成: {len(image_features):,} 个图文对")
    
    return image_features, text_features, img_ids


def compute_detailed_metrics(
    image_features: torch.Tensor,
    text_features: torch.Tensor,
    img_ids: torch.Tensor,
    save_predictions: bool = False,
    output_dir: Optional[Path] = None
) -> Dict:
    """计算详细的评估指标"""
    
    print("\n📊 计算检索指标...")
    
    # 基础检索指标
    metrics = compute_clip_retrieval_metrics(image_features, text_features)
    
    # 详细结果
    results = {
        'test_samples': len(image_features),
        'feature_dim': image_features.shape[1],
        'retrieval_metrics': {
            'i2t_r1': float(metrics.i2t_r1),
            'i2t_r5': float(metrics.i2t_r5),
            'i2t_r10': float(metrics.i2t_r10),
            't2i_r1': float(metrics.t2i_r1),
            't2i_r5': float(metrics.t2i_r5),
            't2i_r10': float(metrics.t2i_r10),
            'mean_r1': float(metrics.mean_r1),
            'mean_r5': float(metrics.mean_r5),
            'mean_r10': float(metrics.mean_r10),
        }
    }
    
    # 打印结果
    print_metrics(metrics, "测试集性能")
    
    # 保存预测结果
    if save_predictions and output_dir:
        print(f"\n💾 保存预测结果到: {output_dir}")
        
        # 计算相似度矩阵用于分析
        similarity_matrix = torch.matmul(image_features, text_features.T)
        
        # I2T 预测
        i2t_predictions = similarity_matrix.topk(10, dim=1)
        # T2I 预测  
        t2i_predictions = similarity_matrix.T.topk(10, dim=1)
        
        # 保存详细预测
        predictions = {
            'img_ids': img_ids.tolist(),
            'i2t_top10_indices': i2t_predictions.indices.tolist(),
            'i2t_top10_scores': i2t_predictions.values.tolist(),
            't2i_top10_indices': t2i_predictions.indices.tolist(),
            't2i_top10_scores': t2i_predictions.values.tolist(),
        }
        
        pred_file = output_dir / "detailed_predictions.json"
        with open(pred_file, 'w', encoding='utf-8') as f:
            json.dump(predictions, f, indent=2, ensure_ascii=False)
        
        print(f"   预测结果: {pred_file}")
        
        # 保存特征用于进一步分析
        features_file = output_dir / "test_features.npz"
        np.savez_compressed(
            features_file,
            image_features=image_features.numpy(),
            text_features=text_features.numpy(),
            img_ids=img_ids.numpy()
        )
        
        print(f"   特征文件: {features_file}")
    
    return results


def create_test_report(
    results: Dict,
    experiment_name: str,
    checkpoint_path: Path,
    model: CNClipLightning,
    output_dir: Path
):
    """创建测试报告"""
    
    report = {
        'test_info': {
            'experiment_name': experiment_name,
            'checkpoint_path': str(checkpoint_path),
            'test_time': datetime.now().strftime('%Y-%m-%d %H:%M:%S'),
            'device': str(next(model.parameters()).device),
        },
        'model_config': {
            'model_name': model.hparams.model_name,
            'freeze_strategy': model.hparams.freeze_strategy,
            'learning_rate': float(model.hparams.learning_rate),
            'weight_decay': float(model.hparams.weight_decay),
            'data_scale': float(model.hparams.data_scale),
        },
        'test_results': results,
        'performance_summary': {
            'Mean R@1': f"{results['retrieval_metrics']['mean_r1']:.4f}",
            'Mean R@5': f"{results['retrieval_metrics']['mean_r5']:.4f}",
            'Mean R@10': f"{results['retrieval_metrics']['mean_r10']:.4f}",
            'I2T R@1': f"{results['retrieval_metrics']['i2t_r1']:.4f}",
            'T2I R@1': f"{results['retrieval_metrics']['t2i_r1']:.4f}",
        }
    }
    
    # 保存JSON报告
    report_file = output_dir / "test_report.json"
    with open(report_file, 'w', encoding='utf-8') as f:
        json.dump(report, f, indent=2, ensure_ascii=False)
    
    # 创建Markdown报告
    md_report = f"""# CN-CLIP 微调模型测试报告

## 基本信息
- **实验名称**: {experiment_name}
- **检查点**: {checkpoint_path.name}
- **测试时间**: {report['test_info']['test_time']}
- **设备**: {report['test_info']['device']}

## 模型配置
- **模型规模**: {model.hparams.model_name}
- **冻结策略**: {model.hparams.freeze_strategy}
- **学习率**: {model.hparams.learning_rate}
- **数据规模**: {model.hparams.data_scale}

## 测试结果

### 关键指标
- **Mean R@1**: {results['retrieval_metrics']['mean_r1']:.4f}
- **Mean R@5**: {results['retrieval_metrics']['mean_r5']:.4f}
- **Mean R@10**: {results['retrieval_metrics']['mean_r10']:.4f}

### 详细指标

#### Image-to-Text 检索
- R@1: {results['retrieval_metrics']['i2t_r1']:.4f}
- R@5: {results['retrieval_metrics']['i2t_r5']:.4f}
- R@10: {results['retrieval_metrics']['i2t_r10']:.4f}

#### Text-to-Image 检索  
- R@1: {results['retrieval_metrics']['t2i_r1']:.4f}
- R@5: {results['retrieval_metrics']['t2i_r5']:.4f}
- R@10: {results['retrieval_metrics']['t2i_r10']:.4f}

### 测试集统计
- **样本数量**: {results['test_samples']:,}
- **特征维度**: {results['feature_dim']}

---
*报告生成时间: {datetime.now().strftime('%Y-%m-%d %H:%M:%S')}*
"""
    
    md_file = output_dir / "test_report.md"
    with open(md_file, 'w', encoding='utf-8') as f:
        f.write(md_report)
    
    print(f"\n📄 测试报告已保存:")
    print(f"   JSON: {report_file}")
    print(f"   Markdown: {md_file}")


def main():
    parser = argparse.ArgumentParser(description="CN-CLIP微调模型测试")
    
    # 模型加载参数
    model_group = parser.add_mutually_exclusive_group(required=True)
    model_group.add_argument("--experiment", type=str, help="实验名称")
    model_group.add_argument("--checkpoint", type=str, help="检查点路径")
    
    # 测试参数
    parser.add_argument("--checkpoint-type", type=str, default="best",
                       choices=["best", "last"],
                       help="检查点类型 (当使用--experiment时)")
    parser.add_argument("--test-scale", type=float, default=1.0, 
                       help="测试集比例")
    parser.add_argument("--batch-size", type=int, default=64, 
                       help="批次大小")
    parser.add_argument("--num-workers", type=int, default=4, 
                       help="数据加载器进程数")
    
    # 输出参数
    parser.add_argument("--save-predictions", action="store_true", 
                       help="保存详细预测结果")
    parser.add_argument("--output-dir", type=str, 
                       help="输出目录 (默认为实验目录)")
    
    # GPU参数
    parser.add_argument("--device", type=str, default="auto",
                       help="设备选择 (auto, cpu, cuda)")
    
    args = parser.parse_args()
    
    # 确定检查点路径
    if args.experiment:
        checkpoint_path = find_experiment_checkpoint(args.experiment, args.checkpoint_type)
        if checkpoint_path is None:
            return 1
        experiment_name = args.experiment
    else:
        checkpoint_path = Path(args.checkpoint)
        if not checkpoint_path.exists():
            print(f"❌ 检查点文件不存在: {checkpoint_path}")
            return 1
        experiment_name = checkpoint_path.parent.name
    
    # 设置设备
    if args.device == "auto":
        device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
    else:
        device = torch.device(args.device)
    
    print(f"\n{'='*60}")
    print(f"🧪 CN-CLIP 微调模型测试")
    print(f"{'='*60}")
    print(f"📦 实验名称: {experiment_name}")
    print(f"📂 检查点: {checkpoint_path.name}")
    print(f"🎯 测试规模: {args.test_scale}")
    print(f"📱 设备: {device}")
    print(f"{'='*60}\n")
    
    # 加载模型
    model = load_model_from_checkpoint(checkpoint_path)
    if model is None:
        return 1
    
    # 加载测试集
    print(f"📚 加载测试集 (规模: {args.test_scale})...")
    test_set = get_test_set(args.test_scale)
    test_loader = DataLoader(
        test_set,
        batch_size=args.batch_size,
        shuffle=False,
        num_workers=args.num_workers,
        pin_memory=torch.cuda.is_available()
    )
    
    print(f"✅ 测试集加载完成: {len(test_set):,} 个样本")
    
    # 提取特征
    image_features, text_features, img_ids = extract_all_features(
        model, test_loader, device
    )
    
    # 设置输出目录
    if args.output_dir:
        output_dir = Path(args.output_dir)
    else:
        output_dir = Path(f"research/experiments/{experiment_name}/test_results")
    
    output_dir.mkdir(parents=True, exist_ok=True)
    
    # 计算指标
    results = compute_detailed_metrics(
        image_features, text_features, img_ids,
        save_predictions=args.save_predictions,
        output_dir=output_dir if args.save_predictions else None
    )
    
    # 创建测试报告
    create_test_report(results, experiment_name, checkpoint_path, model, output_dir)
    
    # 显示总结
    mean_r1 = results['retrieval_metrics']['mean_r1']
    mean_r5 = results['retrieval_metrics']['mean_r5']
    
    print(f"\n🎉 测试完成!")
    print(f"📈 关键指标: Mean R@1={mean_r1:.4f}, Mean R@5={mean_r5:.4f}")
    print(f"📁 结果目录: {output_dir}")
    
    # 性能评级
    if mean_r1 >= 0.7:
        print("🌟 性能评级: 优秀")
    elif mean_r1 >= 0.5:
        print("✅ 性能评级: 良好") 
    elif mean_r1 >= 0.3:
        print("⚠️ 性能评级: 一般")
    else:
        print("❌ 性能评级: 需要改进")
    
    return 0


if __name__ == "__main__":
    exit(main())