#!/usr/bin/env python3
"""
VAE模型训练脚本 - 学习Julia的训练策略

训练流程（参考Julia实现）：
1. 加载生成的数据集（.npz文件）
2. 提取任务相关观测作为VAE输入（60维，只比较位置）
3. 划分训练集和验证集（80/20）
4. 数据标准化（使用训练集的均值和标准差）
5. 构建VAE模型
6. 训练模型（ELBO损失 = 重建损失 + KL散度）
7. 定期保存模型和训练曲线

观测设计（参考Julia）：
- Julia: ego观测1维×15步 + opponent观测3维×15步 = 60维
- 我们: ego位置3维×10步 + opponent位置3维×10步 = 60维（只比较位置）
- 理念: 不需要完整状态，只需任务相关的观测信息

Julia关键配置：
- Batch size: 32
- Epochs: 1000
- Optimizer: Adam(lr=0.0002) + Gradient Clipping(max_norm=50)
- Latent dim: 16
- Hidden dim: 128
- Train/Val split: 80/20
"""

import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

import sys
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, repo_root)
sys.path.insert(0, os.path.join(repo_root, 'src'))
sys.path.insert(0, os.path.join(repo_root, 'games'))
from julia.api import Julia
Julia(compiled_modules=False)
import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import Dataset, DataLoader, TensorDataset
import numpy as np
import matplotlib.pyplot as plt
import multiprocessing as mp
from typing import Dict, Tuple, List, Optional, Any
from datetime import datetime
import json
import time

# 尝试导入swanlab
try:
    import swanlab
    SWANLAB_AVAILABLE = True
except ImportError:
    SWANLAB_AVAILABLE = False
    print("⚠️ swanlab未安装，将跳过实验跟踪。安装命令: pip install swanlab")

# 导入VAE组件
from src.vae import (
    GaussianEncoder,
    Decoder,
    compute_elbo_loss,
    DroneMCPDecoder,
    DroneVAE,
)
from src.vae.training import (
    train_epoch,
    validate,
    log_solver_stats_to_file,
    sample_and_visualize_latent_space,
)
from src.mcp.mcp_solver import MCPGameSolver, solve_mcp_game_differentiable
from games.drone_game import DroneGame
def plot_training_history(
    train_losses: List[Dict[str, float]],
    val_losses: List[Dict[str, float]],
    goal_errors: List[Dict[str, float]],
    save_path: str
):
    """
    绘制训练历史曲线
    
    Args:
        train_losses: 训练损失列表
        val_losses: 验证损失列表
        goal_errors: 目标预测误差列表
        save_path: 保存路径
    """
    epochs = range(1, len(train_losses) + 1)
    
    fig, axes = plt.subplots(2, 2, figsize=(12, 10))
    
    # 总损失
    axes[0, 0].plot(epochs, [l['total'] for l in train_losses], 'b-', label='Train')
    axes[0, 0].plot(epochs, [l['total'] for l in val_losses], 'r-', label='Validation')
    axes[0, 0].set_xlabel('Epoch')
    axes[0, 0].set_ylabel('Total Loss')
    axes[0, 0].set_title('Total ELBO Loss')
    axes[0, 0].legend()
    axes[0, 0].grid(True)
    
    # 重建损失
    axes[0, 1].plot(epochs, [l['reconstruction'] for l in train_losses], 'b-', label='Train')
    axes[0, 1].plot(epochs, [l['reconstruction'] for l in val_losses], 'r-', label='Validation')
    axes[0, 1].set_xlabel('Epoch')
    axes[0, 1].set_ylabel('Reconstruction Loss')
    axes[0, 1].set_title('Reconstruction Loss (MSE)')
    axes[0, 1].legend()
    axes[0, 1].grid(True)
    
    # KL散度
    axes[1, 0].plot(epochs, [l['kl_divergence'] for l in train_losses], 'b-', label='Train')
    axes[1, 0].plot(epochs, [l['kl_divergence'] for l in val_losses], 'r-', label='Validation')
    axes[1, 0].set_xlabel('Epoch')
    axes[1, 0].set_ylabel('KL Divergence')
    axes[1, 0].set_title('KL Divergence')
    axes[1, 0].legend()
    axes[1, 0].grid(True)
    
    # 分布匹配指标（Julia风格）
    if goal_errors:  # 只在有目标评估数据时绘制
        # 安全处理：过滤掉没有'epoch'键的条目，或使用索引作为epoch
        valid_goal_errors = []
        for idx, e in enumerate(goal_errors):
            if isinstance(e, dict):
                # 如果没有'epoch'键，使用索引+1作为epoch（假设按顺序）
                if 'epoch' not in e:
                    e = e.copy()
                    e['epoch'] = idx + 1
                # 确保有必要的键
                if 'mean_diff' in e and 'std_diff' in e:
                    valid_goal_errors.append(e)
        
        if valid_goal_errors:
            goal_epochs = [e['epoch'] for e in valid_goal_errors]
            axes[1, 1].plot(goal_epochs, [e['mean_diff'] for e in valid_goal_errors], 'g-', label='Mean Diff', linewidth=2)
            axes[1, 1].plot(goal_epochs, [e['std_diff'] for e in valid_goal_errors], 'b--', label='Std Diff', linewidth=2)
            axes[1, 1].set_xlabel('Epoch')
            axes[1, 1].set_ylabel('Distribution Difference (m)')
            axes[1, 1].set_title('Goal Distribution Matching')
            axes[1, 1].legend()
            axes[1, 1].grid(True)
        else:
            axes[1, 1].text(0.5, 0.5, 'No valid goal evaluation data', 
                            ha='center', va='center', transform=axes[1, 1].transAxes)
            axes[1, 1].set_xlabel('Epoch')
            axes[1, 1].set_ylabel('Distribution Difference (m)')
            axes[1, 1].set_title('Goal Distribution Matching')
    else:
        axes[1, 1].text(0.5, 0.5, 'No goal evaluation data\n(evaluated every 50 epochs)', 
                        ha='center', va='center', transform=axes[1, 1].transAxes)
        axes[1, 1].set_xlabel('Epoch')
        axes[1, 1].set_ylabel('Distribution Difference (m)')
        axes[1, 1].set_title('Goal Distribution Matching')
    
    plt.tight_layout()
    plt.savefig(save_path, dpi=150, bbox_inches='tight')
    print(f"训练曲线已保存到: {save_path}")
    plt.close()


def prepare_data_loaders(
    config: Dict[str, Any],
    dataset_path: str,
    verbose: bool = True,
) -> Tuple[DataLoader, DataLoader, np.ndarray, int, Dict[str, Any]]:
    """
    加载数据集并构建DataLoader（独立函数，便于子进程复用�?
    
    Args:
        config: 训练配置
        dataset_path: 数据集路径
        verbose: 是否打印数据集统计
    
    Returns:
        train_loader, val_loader, opponent_goals, observation_dim, normalization_params
    """
    horizon = config['horizon']
    data = np.load(dataset_path)
    ego_trajectories = data['ego_trajectories']  # [n_samples, 6*horizon]
    opponent_trajectories = data['opponent_trajectories']  # [n_samples, 6*horizon]
    initial_states = data['initial_states']  # [n_samples, 12]
    opponent_goals = data['opponent_goals']  # [n_samples, 3]

    ego_positions = ego_trajectories.reshape(-1, horizon, 6)[:, :, 0:3].reshape(-1, 3*horizon)
    opponent_positions = opponent_trajectories.reshape(-1, horizon, 6)[:, :, 0:3].reshape(-1, 3*horizon)
    combined_observations = np.concatenate([ego_positions, opponent_positions], axis=1)  # [n_samples, 60]

    if verbose:
        print(f"  数据集大小: {combined_observations.shape[0]} 条样本, 输入维度 {combined_observations.shape[1]}")
        print(f"    Ego位置维度: {ego_positions.shape[1]}  Opponent位置维度: {opponent_positions.shape[1]}")
        print(f"    初始状态维度: {initial_states.shape[1]}  目标维度: {opponent_goals.shape[1]}")

    n_samples = combined_observations.shape[0]
    n_train = int(n_samples * config['train_split'])

    train_obs_raw = combined_observations[:n_train]
    val_obs_raw = combined_observations[n_train:]
    train_initial_states = initial_states[:n_train]
    val_initial_states = initial_states[n_train:]
    train_opponent_goals = opponent_goals[:n_train]
    val_opponent_goals = opponent_goals[n_train:]

    if config.get('normalize_data', False):
        obs_mean = train_obs_raw.mean(axis=0)
        obs_std = train_obs_raw.std(axis=0) + 1e-5
        train_obs = (train_obs_raw - obs_mean) / obs_std
        val_obs = (val_obs_raw - obs_mean) / obs_std
        normalization_params = {
            'obs_mean': obs_mean.tolist(),
            'obs_std': obs_std.tolist(),
            'enabled': True
        }
    else:
        train_obs = train_obs_raw
        val_obs = val_obs_raw
        normalization_params = {'enabled': False}

    train_obs_tensor = torch.FloatTensor(train_obs)
    val_obs_tensor = torch.FloatTensor(val_obs)
    train_initial_states_tensor = torch.FloatTensor(train_initial_states)
    val_initial_states_tensor = torch.FloatTensor(val_initial_states)
    train_opponent_goals_tensor = torch.FloatTensor(train_opponent_goals)
    val_opponent_goals_tensor = torch.FloatTensor(val_opponent_goals)

    train_dataset = TensorDataset(train_obs_tensor, train_initial_states_tensor, train_opponent_goals_tensor)
    val_dataset = TensorDataset(val_obs_tensor, val_initial_states_tensor, val_opponent_goals_tensor)

    train_loader = DataLoader(
        train_dataset,
        batch_size=config['batch_size'],
        shuffle=True,
        drop_last=False,
        pin_memory=config['pin_memory'],
        num_workers=0
    )
    val_loader = DataLoader(
        val_dataset,
        batch_size=config['batch_size'],
        shuffle=False,
        pin_memory=config['pin_memory']
    )

    observation_dim = combined_observations.shape[1]
    return train_loader, val_loader, opponent_goals, observation_dim, normalization_params


def _train_epoch_worker(queue: mp.Queue, payload: Dict[str, Any]) -> None:
    """
    子进程执行单个epoch训练，结束即释放Julia/PATH状态，避免性能劣化�?
    """
    epoch = payload['epoch']
    config = payload['config']
    dataset_path = payload['dataset_path']
    save_dir = payload['save_dir']
    results_save_dir = payload['results_save_dir']
    normalization_params = payload['normalization_params']
    global_batch_start = payload.get('global_batch_start', 0)
    checkpoint_path = payload.get('checkpoint_path')
    solver_log_file = payload.get('solver_log_file')
    horizon = config['horizon']
    try:
        device = torch.device(config['device'])
        if torch.cuda.is_available():
            torch.backends.cudnn.benchmark = True

        # 重新加载数据和DataLoader（进程隔离，避免跨epoch缓存�?
        train_loader, val_loader, opponent_goals, observation_dim, normalization_params = prepare_data_loaders(
            config, dataset_path, verbose=False
        )
        norm_params = None
        observations_are_normalized = bool(normalization_params.get('enabled', False))
        if observations_are_normalized:
            norm_params = (
                torch.as_tensor(normalization_params['obs_mean'], device=device).view(1, -1),
                torch.as_tensor(normalization_params['obs_std'], device=device).view(1, -1),
            )

        # 初始化游戏和求解器（新的Julia/PATH实例，仅当前epoch使�?
        game = DroneGame(
            dt=0.1,
            collision_radius=2.0,
            control_limits={
                'theta': 0.5,
                'phi': 0.5,
                'tau': 20.0
            },
            velocity_limit=6.0,
            altitude_limits=(15.0, 55.0),
            g=9.81
        )
        mcp_solver = MCPGameSolver(
            game=game,
            horizon=horizon,
            solver_params={
                'time_limit': 60.0,
                'tolerance': 1e-2,
                'major_iteration_limit': 200,
                'minor_iteration_limit': 2000
            }
        )
        mcp_solver.set_warm_start_enabled(config.get('solver_use_warm_start', True))
        ego_goal = torch.tensor([7.0, 0.0, 35.0]).to(device)

        # 构建VAE
        encoder = GaussianEncoder(
            input_dim=observation_dim,
            hidden_dims=[config['hidden_dim'], config['hidden_dim']],
            latent_dim=config['latent_dim']
        )
        min_bounds = torch.tensor([0.0, -5.0, 15.0], device=device)
        max_bounds = torch.tensor([10.0, 5.0, 55.0], device=device)
        decoder = DroneMCPDecoder(
            latent_dim=config['latent_dim'],
            hidden_dims=[5 * config['latent_dim'], 5 * config['latent_dim']],
            output_dim=3,
            mcp_solver=mcp_solver,
            observation_dim=observation_dim,
            output_bounds=(min_bounds, max_bounds)
        )
        vae = DroneVAE(encoder, decoder).to(device)

        optimizer = optim.Adam([
            {'params': vae.encoder.parameters(), 'lr': config['learning_rate']},
            {'params': vae.decoder.parameters(), 'lr': config['learning_rate']}
        ])

        # 若存在断点，加载参数与优化器（兼容 latest_checkpoint 与纯 state_dict 文件）
        if checkpoint_path and os.path.exists(checkpoint_path):
            checkpoint = torch.load(checkpoint_path, map_location=device, weights_only=False)
            if isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint:
                vae.load_state_dict(checkpoint['model_state_dict'])
                if 'optimizer_state_dict' in checkpoint:
                    optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
                global_batch_start = int(checkpoint.get('global_batch_counter', global_batch_start))
            elif isinstance(checkpoint, dict):
                # 纯 state_dict：只恢复模型，优化器重新初始化
                vae.load_state_dict(checkpoint)
            else:
                raise ValueError(f"不支持的检查点格式: {checkpoint_path}")

        scaler = torch.amp.GradScaler('cuda') if config['use_amp'] else None
        global_batch_counter = {'count': global_batch_start}

        # 计算beta
        if epoch <= config['beta_anneal_epochs']:
            beta = config['beta_start'] + (config['beta_end'] - config['beta_start']) * epoch / config['beta_anneal_epochs']
        else:
            beta = config['beta_end']

        epoch_start_time = time.time()
        t0 = time.time()
        train_loss = train_epoch(
            vae, train_loader, optimizer,
            ego_goal, beta, device,
            use_amp=config['use_amp'],
            scaler=scaler,
            epoch=epoch,
            gradient_clip=config['gradient_clip'],
            diagnostic_interval=config.get('diagnostic_interval', 0),
            global_batch_counter=global_batch_counter,
            swanlab_enabled=False,  # 由主进程统一记录
            solver_log_file=solver_log_file,
            solver_log_phase="train",
            normalization_params=norm_params,
            observations_normalized=observations_are_normalized,
            normalize_reconstruction=observations_are_normalized,
        )
        train_time = time.time() - t0

        t1 = time.time()
        val_loss = validate(
            vae, val_loader,
            ego_goal, beta, device,
            solver_log_file=solver_log_file,
            solver_log_phase="val",
            epoch=epoch,
            normalization_params=norm_params,
            observations_normalized=observations_are_normalized,
            normalize_reconstruction=observations_are_normalized,
        )
        val_time = time.time() - t1
        val_goal_error = val_loss.get('goal_error', 0.0)

        goal_eval_time = 0.0
        distribution_stats = None
        distribution_path = None
        if epoch % config['goal_eval_interval'] == 0 or epoch == config['n_epochs']:
            t2 = time.time()
            distribution_path = os.path.join(results_save_dir, f"goal_distribution_epoch{epoch}.png")
            distribution_stats = sample_and_visualize_latent_space(
                vae, opponent_goals, device,
                num_samples=10000,
                save_path=distribution_path,
            )
            goal_eval_time = time.time() - t2

        epoch_time = time.time() - epoch_start_time
        updated_batch_count = global_batch_counter['count']

        latest_checkpoint = os.path.join(save_dir, "latest_checkpoint.pth")
        torch.save({
            'epoch': epoch,
            'model_state_dict': vae.state_dict(),
            'optimizer_state_dict': optimizer.state_dict(),
            'train_losses': [train_loss],
            'val_losses': [val_loss],
            'goal_errors': [distribution_stats] if distribution_stats else [],
            'config': config,
            'global_batch_counter': updated_batch_count,
        }, latest_checkpoint)

        if epoch % config['save_interval'] == 0 or epoch == config['n_epochs']:
            torch.save(vae.encoder.state_dict(), os.path.join(save_dir, f"encoder_epoch_{epoch}.pth"))
            torch.save(vae.decoder.state_dict(), os.path.join(save_dir, f"decoder_epoch_{epoch}.pth"))
            torch.save(vae.state_dict(), os.path.join(save_dir, f"vae_epoch_{epoch}.pth"))

        queue.put({
            'status': 'ok',
            'epoch': epoch,
            'train_loss': train_loss,
            'val_loss': val_loss,
            'val_goal_error': val_goal_error,
            'distribution_stats': distribution_stats,
            'distribution_path': distribution_path,
            'train_time': train_time,
            'val_time': val_time,
            'goal_eval_time': goal_eval_time,
            'epoch_time': epoch_time,
            'global_batch_counter': updated_batch_count,
            'checkpoint_path': latest_checkpoint,
        })
    except Exception as exc:
        import traceback
        queue.put({
            'status': 'error',
            'error': str(exc),
            'traceback': traceback.format_exc(),
            'epoch': epoch,
        })


def run_epoch_in_subprocess(
    epoch: int,
    config: Dict[str, Any],
    dataset_path: str,
    save_dir: str,
    results_save_dir: str,
    normalization_params: Dict[str, Any],
    global_batch_start: int,
    checkpoint_path: Optional[str],
    solver_log_file: str,
) -> Dict[str, Any]:
    """
    在独立子进程中运行单个epoch，返回训练/验证指标和最新断点路径�?
    """
    ctx = mp.get_context("spawn")
    queue: mp.Queue = ctx.Queue()
    payload = {
        'epoch': epoch,
        'config': config,
        'dataset_path': dataset_path,
        'save_dir': save_dir,
        'results_save_dir': results_save_dir,
        'normalization_params': normalization_params,
        'global_batch_start': global_batch_start,
        'checkpoint_path': checkpoint_path,
        'solver_log_file': solver_log_file,
    }

    process = ctx.Process(target=_train_epoch_worker, args=(queue, payload))
    process.start()
    result: Dict[str, Any] = queue.get()
    process.join()

    if process.exitcode != 0 or result.get('status') != 'ok':
        raise RuntimeError(
            f"Epoch {epoch} 子进程失败: {result.get('error', 'unknown error')}\n"
            f"{result.get('traceback', '')}"
        )
    return result


def main():
    """主训练函数"""

    print("="*70)
    print("VAE模型训练 - 学习Julia策略")
    print("="*70)

    # ==================== 数据集选择 ====================
    DATASET_NAME = "vae_dataset_20251120_155419.npz"  # 示例: 'vae_dataset_20251125_142430.npz' 或None（自动）

    # ==================== 1. 配置参数（参考Julia）====================
    print("\n[1] 配置训练参数...")
    import argparse
    parser = argparse.ArgumentParser()
    parser.add_argument('--horizon', type=int, default=10)
    parser.add_argument('--latent_dim', type=int, default=16)
    parser.add_argument('--hidden_dim', type=int, default=128)
    parser.add_argument('--batch_size', type=int, default=128)
    parser.add_argument('--n_epochs', type=int, default=120)
    parser.add_argument('--learning_rate', type=float, default=0.0002)
    parser.add_argument('--gradient_clip', type=float, default=30.0)
    parser.add_argument('--train_split', type=float, default=0.8)
    parser.add_argument('--beta_start', type=float, default=0.5)
    parser.add_argument('--beta_end', type=float, default=0.5)
    parser.add_argument('--beta_anneal_epochs', type=int, default=0)
    parser.add_argument('--normalize_data', type=bool, default=False)
    parser.add_argument('--mcp_restart_interval', type=int, default=0)
    parser.add_argument('--diagnostic_interval', type=int, default=0)
    parser.add_argument('--save_interval', type=int, default=5)
    parser.add_argument('--goal_eval_interval', type=int, default=5)
    parser.add_argument('--device', type=str, default='cuda' if torch.cuda.is_available() else 'cpu')
    parser.add_argument('--use_amp', type=bool, default=False)
    parser.add_argument('--pin_memory', type=bool, default=torch.cuda.is_available())
    parser.add_argument('--use_swanlab', type=bool, default=SWANLAB_AVAILABLE)
    parser.add_argument('--swanlab_project', type=str, default='drone-vae-training')
    parser.add_argument('--enable_gradient_logging', type=bool, default=False)
    parser.add_argument('--solver_use_warm_start', type=bool, default=False)
    parser.add_argument('--resume', type=str, default=None, 
                       help='从检查点恢复训练，指定检查点路径（如: results/demo_vae/training/models/vae_xxx/latest_checkpoint.pth）')
    parser.add_argument('--resume_epoch', type=int, default=None,
                       help='从指定epoch的检查点恢复（如果指定了--resume目录，会自动查找该目录下的检查点）')
    args = parser.parse_args()
    config = vars(args)

    if torch.cuda.is_available():
        torch.backends.cudnn.benchmark = True


    print("  配置:")
    for key, val in config.items():
        print(f"    {key}: {val}")

    device = torch.device(config['device'])

    # ==================== 2. 选择数据集 ====================
    print("\n[2] 加载数据集..")
    script_dir = os.path.dirname(os.path.abspath(__file__))
    data_dir = os.path.join(script_dir, "vaedataset")
    dataset_files = [f for f in os.listdir(data_dir) if f.startswith('vae_dataset_') and f.endswith('.npz')]
    if not dataset_files:
        raise FileNotFoundError(f"在{data_dir}目录下未找到数据集文件")

    if DATASET_NAME is not None:
        if DATASET_NAME in dataset_files:
            selected_dataset = DATASET_NAME
            print(f"  ✅ 使用手动指定数据集: {DATASET_NAME}")
        else:
            print(f"  ⚠️ 指定的数据集不存在: {DATASET_NAME}")
            print("  可用数据集:")
            for i, f in enumerate(sorted(dataset_files), 1):
                print(f"      {i}. {f}")
            raise FileNotFoundError(f"数据集'{DATASET_NAME}' 不存在")
    else:
        selected_dataset = sorted(dataset_files)[-1]
        print(f"  ✅ 自动使用最新数据集: {selected_dataset}")
        if len(dataset_files) > 1:
            print(f"  ℹ️  找到 {len(dataset_files)} 个数据集，其他可选:")
            for f in sorted(dataset_files)[:-1]:
                print(f"      - {f}")
            print("  💡 提示: 在代码开头设置DATASET_NAME 可手动选择")

    dataset_path = os.path.join(data_dir, selected_dataset)
    print(f"  数据集完整路径: {dataset_path}")

    print("\n[3] 数据概览（仅统计，不持有PATH实例）")
    train_loader_preview, val_loader_preview, opponent_goals, observation_dim, normalization_params = prepare_data_loaders(
        config, dataset_path, verbose=True
    )
    n_train_samples = len(getattr(train_loader_preview, 'dataset', []))
    n_val_samples = len(getattr(val_loader_preview, 'dataset', []))
    print(f"  预览: train_batches={len(train_loader_preview)}, val_batches={len(val_loader_preview)}")
    del train_loader_preview, val_loader_preview

    # ==================== 目录与配置保存 ====================
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    models_root = os.path.join(repo_root, "results", "demo_vae", "training", "models")
    os.makedirs(models_root, exist_ok=True)
    save_dir = os.path.join(models_root, f"vae_{timestamp}")
    os.makedirs(save_dir, exist_ok=True)
    results_save_dir = save_dir
    solver_log_file = os.path.join(save_dir, "solver_stats.csv")

    with open(os.path.join(save_dir, "config.json"), 'w') as f:
        json.dump(config, f, indent=2)
    with open(os.path.join(save_dir, "normalization.json"), 'w') as f:
        json.dump(normalization_params, f, indent=2)

    print(f"\n[4] 训练输出目录: {save_dir}")
    print("  每个epoch将在独立子进程中运行，Julia/PATH状态会随进程退出被回收。")
    start_epoch = 1 
    train_losses: List[Dict[str, float]] = []
    val_losses: List[Dict[str, float]] = []
    goal_errors: List[Dict[str, float]] = []
    checkpoint_path: Optional[str] = None
    global_batch_counter_val = 0
    resume_config = None

    if args.resume:
        print(f"\n[4] 从检查点恢复训练...")
        checkpoint_file = args.resume
        
        # 如果指定的是目录，查找检查点文件
        if os.path.isdir(checkpoint_file):
            checkpoint_dir = checkpoint_file
            if args.resume_epoch is not None:
                # 查找指定epoch的检查点
                epoch_checkpoint = os.path.join(checkpoint_dir, f"vae_epoch_{args.resume_epoch}.pth")
                if os.path.exists(epoch_checkpoint):
                    checkpoint_file = epoch_checkpoint
                    print(f"  ✓ 找到epoch {args.resume_epoch}的检查点: {checkpoint_file}")
                else:
                    print(f"  ⚠️ 未找到epoch {args.resume_epoch}的检查点，使用latest_checkpoint.pth")
                    checkpoint_file = os.path.join(checkpoint_dir, "latest_checkpoint.pth")
            else:
                checkpoint_file = os.path.join(checkpoint_dir, "latest_checkpoint.pth")
        else:
            checkpoint_dir = os.path.dirname(checkpoint_file)

        if not os.path.exists(checkpoint_file):
            raise FileNotFoundError(f"检查点文件不存在: {checkpoint_file}")

        # 续训时复用原目录与检查点，保证子进程加载已有权重
        save_dir = checkpoint_dir
        results_save_dir = save_dir
        solver_log_file = os.path.join(save_dir, "solver_stats.csv")
        checkpoint_path = checkpoint_file

        print(f"  ✓ 加载检查点: {checkpoint_file}")

        # 加载检查点（兼容 latest_checkpoint 与纯 state_dict 文件）
        checkpoint_raw = torch.load(checkpoint_file, map_location='cpu', weights_only=False)
        checkpoint = checkpoint_raw if isinstance(checkpoint_raw, dict) else {}
        checkpoint_has_model_state = isinstance(checkpoint, dict) and 'model_state_dict' in checkpoint
        checkpoint_is_state_dict_only = isinstance(checkpoint_raw, dict) and not checkpoint_has_model_state

        # 恢复训练历史（仅完整检查点可用）
        if checkpoint_has_model_state and 'train_losses' in checkpoint and checkpoint['train_losses']:
            train_losses = checkpoint['train_losses']
            print(f"  ✓ 恢复训练损失历史: {len(train_losses)} 个epoch")
        if checkpoint_has_model_state and 'val_losses' in checkpoint and checkpoint['val_losses']:
            val_losses = checkpoint['val_losses']
            print(f"  ✓ 恢复验证损失历史: {len(val_losses)} 个epoch")
        if checkpoint_has_model_state and 'goal_errors' in checkpoint and checkpoint['goal_errors']:
            goal_errors_raw = checkpoint['goal_errors']
            # 确保所有goal_errors都有'epoch'键（兼容旧格式）
            goal_errors = []
            for idx, e in enumerate(goal_errors_raw):
                if isinstance(e, dict):
                    if 'epoch' not in e:
                        # 旧格式：没有epoch键，使用索引+1（假设从epoch 1开始）
                        e = e.copy()
                        e['epoch'] = idx + 1
                    goal_errors.append(e)
            print(f"  ✓ 恢复目标误差历史: {len(goal_errors)} 个记录")
        
        # 恢复epoch计数
        if 'epoch' in checkpoint:
            start_epoch = checkpoint['epoch'] + 1
            print(f"  ✓ 从epoch {start_epoch} 继续训练（检查点保存到epoch {checkpoint['epoch']}）")
        elif args.resume_epoch is not None:
            start_epoch = args.resume_epoch + 1
            print(f"  ℹ️ 检测到纯state_dict，按 --resume_epoch={args.resume_epoch} 继续，从 epoch {start_epoch} 开始")
        elif checkpoint_is_state_dict_only:
            print("  ℹ️ 检测到纯state_dict检查点，未提供epoch信息，将从epoch 1开始计数（可用 --resume_epoch 调整）")

        # 恢复全局batch计数
        if 'global_batch_counter' in checkpoint:
            global_batch_counter_val = checkpoint['global_batch_counter']
            print(f"  ✓ 恢复全局batch计数: {global_batch_counter_val}")
                # 验证normalization参数是否一致
        norm_file = os.path.join(save_dir, "normalization.json")
        if os.path.exists(norm_file):
            with open(norm_file, 'r') as f:
                saved_norm_params = json.load(f)
            if saved_norm_params != normalization_params:
                print(f"  ⚠️ 警告: 检查点的normalization参数与当前数据集不一致")
                print(f"    检查点: {saved_norm_params}")
                print(f"    当前: {normalization_params}")
                print(f"    继续使用检查点的normalization参数")
                normalization_params = saved_norm_params
    swanlab_run = None
    if config['use_swanlab']:
        if not SWANLAB_AVAILABLE:
            print("⚠️  启用了use_swanlab但未安装swanlab，自动跳过日志记录。")
            config['use_swanlab'] = False
        else:
            import swanlab
            swanlab_run = swanlab.init(
                project=config['swanlab_project'],
                name=f"vae_{timestamp}",
                config={
                    'latent_dim': config['latent_dim'],
                    'hidden_dim': config['hidden_dim'],
                    'batch_size': config['batch_size'],
                    'n_epochs': config['n_epochs'],
                    'learning_rate': config['learning_rate'],
                    'gradient_clip': config['gradient_clip'],
                    'beta_start': config['beta_start'],
                    'beta_end': config['beta_end'],
                    'beta_anneal_epochs': config['beta_anneal_epochs'],
                    'horizon': config['horizon'],
                    'observation_dim': observation_dim,
                    'n_train_samples': n_train_samples,
                    'n_val_samples': n_val_samples,
                    'resumed': args.resume is not None,
                    'start_epoch': start_epoch,
                },
                tags=['drone-vae', 'mcp-decoder', 'nash-equilibrium'] + (['resumed'] if args.resume else [])
            )
            print("  ✅ swanlab 已初始化（主进程单个 run 记录全程）")
            if args.resume:
                print(f"  ℹ️  恢复训练模式，将从epoch {start_epoch}开始记录")

    for epoch in range(start_epoch, config['n_epochs'] + 1):
        print(f"\n{'='*70}")
        print(f"Epoch {epoch}/{config['n_epochs']} - 启动子进程")
        print(f"{'='*70}")

        result = run_epoch_in_subprocess(
            epoch=epoch,
            config=config,
            dataset_path=dataset_path,
            save_dir=save_dir,
            results_save_dir=results_save_dir,
            normalization_params=normalization_params,
            global_batch_start=global_batch_counter_val,
            checkpoint_path=checkpoint_path,
            solver_log_file=solver_log_file,
        )

        checkpoint_path = result['checkpoint_path']
        global_batch_counter_val = result['global_batch_counter']
        train_losses.append(result['train_loss'])
        val_losses.append(result['val_loss'])
        if result['distribution_stats'] is not None:
            goal_errors.append({'epoch': epoch, **result['distribution_stats']})

        print(f"  训练: Loss={result['train_loss']['total']:.4f}, "
              f"Recon={result['train_loss']['reconstruction']:.4f}, "
              f"KL={result['train_loss']['kl_divergence']:.4f}, "
              f"GradNorm={result['train_loss']['grad_norm']:.4f}")
        print(f"  验证: Loss={result['val_loss']['total']:.4f}, "
              f"Recon={result['val_loss']['reconstruction']:.4f}, "
              f"KL={result['val_loss']['kl_divergence']:.4f}, "
              f"GoalErr={result['val_goal_error']:.4f} m")
        print(f"  ⏱️  用时: total={result['epoch_time']:.2f}s, "
              f"train={result['train_time']:.2f}s, val={result['val_time']:.2f}s, "
              f"goal_eval={result['goal_eval_time']:.2f}s")
        if result['distribution_stats'] is not None:
            stats = result['distribution_stats']
            print(f"  📊 分布匹配: 均值差={stats['mean_diff']:.3f}m, "
                  f"标准差差={stats['std_diff']:.3f}m")
            if result['distribution_path']:
                print(f"  🎨 分布可视化已保存: {result['distribution_path']}")
        else:
            print(f"  ℹ️  跳过目标分布评估（每{config['goal_eval_interval']}个epoch）")

        if swanlab_run is not None:
            log_payload = {
                'epoch': epoch,
                'train/loss': result['train_loss']['total'],
                'train/reconstruction_loss': result['train_loss']['reconstruction'],
                'train/kl_divergence': result['train_loss']['kl_divergence'],
                'train/grad_norm': result['train_loss']['grad_norm'],
                'val/loss': result['val_loss']['total'],
                'val/reconstruction_loss': result['val_loss']['reconstruction'],
                'val/kl_divergence': result['val_loss']['kl_divergence'],
                'val/goal_error': result['val_goal_error'],
                'time/train_time': result['train_time'],
                'time/val_time': result['val_time'],
                'time/goal_eval_time': result['goal_eval_time'],
                'time/epoch_time': result['epoch_time'],
            }
            if result['distribution_stats'] is not None:
                log_payload.update({
                    'distribution/mean_diff': result['distribution_stats']['mean_diff'],
                    'distribution/std_diff': result['distribution_stats']['std_diff'],
                    'distribution/pred_mean_x': result['distribution_stats']['pred_mean_x'],
                    'distribution/pred_mean_y': result['distribution_stats']['pred_mean_y'],
                    'distribution/pred_mean_z': result['distribution_stats']['pred_mean_z'],
                })
            swanlab.log(log_payload, step=epoch)
            if result['distribution_path']:
                swanlab.log({"goal_distribution": swanlab.Image(result['distribution_path'])}, step=epoch)

        if (epoch % config['save_interval'] == 0 or epoch == config['n_epochs']) and goal_errors:
            plot_training_history(
                train_losses, val_losses, goal_errors,
                os.path.join(results_save_dir, f"training_history_epoch_{epoch}.png")
            )

    print(f"\n{'='*70}")
    print("训练完成！")
    print(f"{'='*70}")
    print(f"  最终训练损失: {train_losses[-1]['total']:.4f}")
    print(f"  最终验证损失: {val_losses[-1]['total']:.4f}")
    if goal_errors:
        print(f"  最终分布匹配: 均值差={goal_errors[-1]['mean_diff']:.3f}m, "
              f"标准差差={goal_errors[-1]['std_diff']:.3f}m")
    print(f"  模型保存目录: {save_dir}")

    if swanlab_run is not None:
        swanlab.run.summary['final_train_loss'] = train_losses[-1]['total']
        swanlab.run.summary['final_val_loss'] = val_losses[-1]['total']
        if goal_errors:
            swanlab.run.summary['final_mean_diff'] = goal_errors[-1]['mean_diff']
            swanlab.run.summary['final_std_diff'] = goal_errors[-1]['std_diff']
        swanlab.run.summary['save_dir'] = save_dir
        swanlab.finish()
        print("  ✅ swanlab 日志已关闭")


if __name__ == "__main__":
    main()
