"""
模型checkpoint管理
提供模型保存和加载功能
"""
import torch
from pathlib import Path
from collections import OrderedDict


def save_checkpoint(save_path, epoch, model, optimizer=None, scheduler=None, 
                   metrics=None, config=None, **kwargs):
    """
    保存训练checkpoint
    
    参数:
        save_path: 保存路径
        epoch: 当前epoch
        model: 模型
        optimizer: 优化器（可选）
        scheduler: 学习率调度器（可选）
        metrics: 评价指标（可选）
        config: 配置对象（可选）
        **kwargs: 其他要保存的内容
    """
    # 创建保存目录
    save_path = Path(save_path)
    save_path.parent.mkdir(parents=True, exist_ok=True)
    
    # 构建checkpoint字典
    checkpoint = {
        'epoch': epoch,
        'model_state_dict': model.state_dict(),
    }
    
    # 添加优化器状态
    if optimizer is not None:
        checkpoint['optimizer_state_dict'] = optimizer.state_dict()
    
    # 添加调度器状态
    if scheduler is not None:
        checkpoint['scheduler_state_dict'] = scheduler.state_dict()
    
    # 添加评价指标
    if metrics is not None:
        checkpoint['metrics'] = metrics
    
    # 添加配置信息
    if config is not None:
        config_dict = {}
        for key in ['model_type', 'num_images', 'num_features', 'image_size', 
                    'batch_size', 'learning_rate', 'optimizer']:
            if hasattr(config, key):
                config_dict[key] = getattr(config, key)
        checkpoint['config'] = config_dict
    
    # 添加其他内容
    checkpoint.update(kwargs)
    
    # 保存
    torch.save(checkpoint, save_path)
    
    # 打印信息
    print(f"Checkpoint saved to: {save_path}")
    print(f"  Epoch: {epoch}")
    if metrics:
        print(f"  R²: {metrics.get('R²', 'N/A')}")


def load_checkpoint(checkpoint_path, model, optimizer=None, scheduler=None, 
                   device='cpu', strict=True):
    """
    加载checkpoint
    
    参数:
        checkpoint_path: checkpoint路径
        model: 模型
        optimizer: 优化器（可选）
        scheduler: 学习率调度器（可选）
        device: 设备
        strict: 是否严格匹配模型权重
    
    返回:
        checkpoint: checkpoint字典
    """
    checkpoint_path = Path(checkpoint_path)
    
    if not checkpoint_path.exists():
        raise FileNotFoundError(f"Checkpoint not found: {checkpoint_path}")
    
    # 加载checkpoint
    checkpoint = torch.load(checkpoint_path, map_location=device)
    
    # 加载模型权重
    try:
        model.load_state_dict(checkpoint['model_state_dict'], strict=strict)
        print(f"Model weights loaded successfully")
    except Exception as e:
        print(f"Warning: Error loading model weights - {e}")
        if not strict:
            # 尝试部分加载
            model_dict = model.state_dict()
            pretrained_dict = {k: v for k, v in checkpoint['model_state_dict'].items() 
                             if k in model_dict and v.shape == model_dict[k].shape}
            model_dict.update(pretrained_dict)
            model.load_state_dict(model_dict)
            print(f"Loaded {len(pretrained_dict)}/{len(checkpoint['model_state_dict'])} layers")
    
    # 加载优化器状态
    if optimizer is not None and 'optimizer_state_dict' in checkpoint:
        try:
            optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
            print(f"Optimizer state loaded")
        except Exception as e:
            print(f"Warning: Could not load optimizer state - {e}")
    
    # 加载调度器状态
    if scheduler is not None and 'scheduler_state_dict' in checkpoint:
        try:
            scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
            print(f"Scheduler state loaded")
        except Exception as e:
            print(f"Warning: Could not load scheduler state - {e}")
    
    # 打印加载信息
    print(f"\nCheckpoint loaded from: {checkpoint_path}")
    if 'epoch' in checkpoint:
        print(f"  Epoch: {checkpoint['epoch']}")
    if 'metrics' in checkpoint:
        print(f"  Metrics: {checkpoint['metrics']}")
    
    return checkpoint


def resume_training(checkpoint_path, model, optimizer, scheduler, device='cpu'):
    """
    恢复训练
    
    参数:
        checkpoint_path: checkpoint路径
        model: 模型
        optimizer: 优化器
        scheduler: 学习率调度器
        device: 设备
    
    返回:
        start_epoch: 起始epoch
        best_metric: 最佳指标
    """
    checkpoint = load_checkpoint(checkpoint_path, model, optimizer, scheduler, device)
    
    start_epoch = checkpoint.get('epoch', 0)
    best_metric = checkpoint.get('metrics', {}).get('R²', -float('inf'))
    
    print(f"\nResuming training from epoch {start_epoch}")
    print(f"Best R² so far: {best_metric:.4f}")
    
    return start_epoch, best_metric


def convert_state_dict(state_dict):
    """
    转换state_dict格式
    处理DataParallel保存的模型
    
    参数:
        state_dict: 原始state_dict
    
    返回:
        new_state_dict: 转换后的state_dict
    """
    new_state_dict = OrderedDict()
    
    for k, v in state_dict.items():
        # 移除'module.'前缀（如果存在）
        name = k[7:] if k.startswith('module.') else k
        new_state_dict[name] = v
    
    return new_state_dict


def get_model_size(model):
    """
    计算模型大小
    
    参数:
        model: 模型
    
    返回:
        size_mb: 模型大小（MB）
    """
    param_size = sum(p.numel() * p.element_size() for p in model.parameters())
    buffer_size = sum(b.numel() * b.element_size() for b in model.buffers())
    size_mb = (param_size + buffer_size) / 1024 / 1024
    return size_mb


if __name__ == "__main__":
    # 测试checkpoint功能
    print("Testing checkpoint functions...")
    
    import tempfile
    import shutil
    import torch.nn as nn
    
    # 创建临时目录
    temp_dir = tempfile.mkdtemp()
    
    try:
        # 创建简单模型
        class SimpleModel(nn.Module):
            def __init__(self):
                super().__init__()
                self.fc = nn.Linear(10, 5)
            
            def forward(self, x):
                return self.fc(x)
        
        model = SimpleModel()
        optimizer = torch.optim.Adam(model.parameters(), lr=0.001)
        
        # 保存checkpoint
        save_path = Path(temp_dir) / 'test_checkpoint.pth'
        metrics = {'MAE': 1.5, 'RMSE': 2.0, 'R²': 0.85}
        save_checkpoint(save_path, epoch=10, model=model, optimizer=optimizer, metrics=metrics)
        
        # 加载checkpoint
        model2 = SimpleModel()
        optimizer2 = torch.optim.Adam(model2.parameters(), lr=0.001)
        checkpoint = load_checkpoint(save_path, model2, optimizer2)
        
        print(f"\nLoaded epoch: {checkpoint['epoch']}")
        print(f"Loaded metrics: {checkpoint['metrics']}")
        
        # 计算模型大小
        size = get_model_size(model)
        print(f"\nModel size: {size:.2f} MB")
        
        print("\n✓ Checkpoint test passed!")
        
    finally:
        # 清理临时目录
        shutil.rmtree(temp_dir)