"""
VAE Training Module

实现MCP-VAE的训练流程，包括数据加载、损失计算、优化等
"""

import torch
import torch.nn as nn
import torch.optim as optim
from torch.utils.data import DataLoader, Dataset
from torch.utils.tensorboard import SummaryWriter

import numpy as np

# 解决Qt平台插件问题 - 使用Agg后端（非交互式）
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt

from tqdm import tqdm
import os
import json
from typing import Dict, Any, Optional, List, Tuple
import time
from datetime import datetime

from .vae_model import MCP_VAE
from .vae_model import compute_elbo_loss


class TrajectoryDataset(Dataset):
    """
    轨迹数据集

    对应Julia版本中的数据处理逻辑
    """

    def __init__(
        self,
        observations: np.ndarray,
        initial_states: Optional[np.ndarray] = None,
        gt_goals: Optional[np.ndarray] = None,
        normalize: bool = True,
        obs_mean: Optional[torch.Tensor] = None,
        obs_std: Optional[torch.Tensor] = None
    ):
        """
        初始化数据集

        Args:
            observations: 观察数据 [N, observation_dim]
            initial_states: 初始状态 [N, state_dim]
            gt_goals: 真实目标 [N, goal_dim]
            normalize: 是否标准化
            obs_mean: 预先计算好的均值（通常来自训练集）
            obs_std: 预先计算好的标准差（通常来自训练集）
        """
        self.observations = torch.FloatTensor(observations)
        self.initial_states = torch.FloatTensor(initial_states) if initial_states is not None else None
        self.gt_goals = torch.FloatTensor(gt_goals) if gt_goals is not None else None

        # 保存是否做过标准化（用于后续解码输出的对齐）
        self.is_normalized = normalize

        # 归一化统计量：优先使用提供的训练集统计
        if obs_mean is not None and obs_std is not None:
            self.obs_mean = torch.as_tensor(obs_mean, dtype=self.observations.dtype).view(1, -1)
            self.obs_std = torch.as_tensor(obs_std, dtype=self.observations.dtype).view(1, -1)
        else:
            self.obs_mean = self.observations.mean(dim=0, keepdim=True)
            self.obs_std = self.observations.std(dim=0, keepdim=True) + 1e-8

        # 数据标准化（若启用）
        if normalize:
            self.observations = (self.observations - self.obs_mean) / self.obs_std

    def __len__(self):
        return len(self.observations)

    def __getitem__(self, idx):
        item = {'observations': self.observations[idx]}

        if self.initial_states is not None:
            item['initial_states'] = self.initial_states[idx]

        if self.gt_goals is not None:
            item['gt_goals'] = self.gt_goals[idx]

        return item

    def get_normalization_params(self):
        """获取标准化参数"""
        return self.obs_mean, self.obs_std


class VAETrainer:
    """
    MCP-VAE训练器

    实现完整的训练流程，对应Julia版本中的训练逻辑
    """

    def __init__(
        self,
        model: MCP_VAE,
        train_dataset: TrajectoryDataset,
        val_dataset: Optional[TrajectoryDataset] = None,
        config: Optional[Dict[str, Any]] = None
    ):
        self.model = model
        self.train_dataset = train_dataset
        self.val_dataset = val_dataset

        # 默认配置
        default_config = {
            'batch_size': 128,
            'learning_rate': 0.0002,
            'num_epochs': 1000,
            'beta_start': 0.0,
            'beta_end': 1.0,
            'beta_warmup_epochs': 100,
            'clip_grad_norm': 50.0,
            'save_interval': 50,
            'log_interval': 10,
            'device': 'cuda' if torch.cuda.is_available() else 'cpu',
            'num_workers': 4,
            'normalize_data': True,  # 与Julia对齐：可选的输入/输出标准化
        }

        self.config = {**default_config, **(config or {})}

        # 设置设备
        self.device = torch.device(self.config['device'])
        self.model.to(self.device)

        # 归一化配置与统计量（与Julia保持一致的开关）
        self.normalize_data = bool(self.config.get('normalize_data', True))
        self.observations_normalized = getattr(self.train_dataset, 'is_normalized', False)
        if self.normalize_data != self.observations_normalized:
            print(f"⚠️ normalize_data={self.normalize_data} 与数据集标准化标记({self.observations_normalized})不一致，将按数据集标记对齐解码输出尺度。")
        self.obs_mean, self.obs_std = self.train_dataset.get_normalization_params()
        self.obs_mean_device = self.obs_mean.to(self.device)
        self.obs_std_device = self.obs_std.to(self.device)
        self.apply_normalization = self.normalize_data or self.observations_normalized
        self.norm_params_device = (self.obs_mean_device, self.obs_std_device)

        # 数据加载器
        self.train_loader = DataLoader(
            train_dataset,
            batch_size=self.config['batch_size'],
            shuffle=True,
            num_workers=self.config['num_workers'],
            pin_memory=True if self.device.type == 'cuda' else False
        )

        if val_dataset is not None:
            self.val_loader = DataLoader(
                val_dataset,
                batch_size=self.config['batch_size'],
                shuffle=False,
                num_workers=self.config['num_workers'],
                pin_memory=True if self.device.type == 'cuda' else False
            )
        else:
            self.val_loader = None

        # 优化器
        self.optimizer = optim.Adam(
            self.model.parameters(),
            lr=self.config['learning_rate'],
            betas=(0.9, 0.999)
        )

        # 学习率调度器
        self.scheduler = optim.lr_scheduler.ReduceLROnPlateau(
            self.optimizer, mode='min', factor=0.5, patience=20, verbose=True
        )

        # 训练历史
        self.train_history = {
            'epoch': [],
            'train_loss': [],
            'train_recon_loss': [],
            'train_kl_loss': [],
            'val_loss': [],
            'val_recon_loss': [],
            'val_kl_loss': [],
            'beta': []
        }

        # TensorBoard
        self.writer = None

    def setup_tensorboard(self, log_dir: str = 'runs'):
        """设置TensorBoard日志"""
        timestamp = datetime.now().strftime('%Y%m%d_%H%M%S')
        log_dir = os.path.join(log_dir, f'vae_training_{timestamp}')
        self.writer = SummaryWriter(log_dir)

    def compute_beta(self, epoch: int) -> float:
        """计算beta权重（KL散度权重的退火）"""
        if epoch < self.config['beta_warmup_epochs']:
            # 线性退火
            beta = self.config['beta_start'] + (
                self.config['beta_end'] - self.config['beta_start']
            ) * epoch / self.config['beta_warmup_epochs']
        else:
            beta = self.config['beta_end']

        return beta

    def train_epoch(self, epoch: int) -> Dict[str, float]:
        """训练一个epoch"""
        self.model.train()

        total_loss = 0
        total_recon_loss = 0
        total_kl_loss = 0
        num_batches = 0

        beta = self.compute_beta(epoch)

        pbar = tqdm(self.train_loader, desc=f'Epoch {epoch}')
        for batch in pbar:
            # 移动到设备
            observations = batch['observations'].to(self.device)
            initial_states = batch.get('initial_states')
            gt_goals = batch.get('gt_goals')

            if initial_states is not None:
                initial_states = initial_states.to(self.device)
            if gt_goals is not None:
                gt_goals = gt_goals.to(self.device)

            # 前向传播
            self.optimizer.zero_grad()

            try:
                loss_dict = self.model.compute_elbo_loss(
                    observations,
                    initial_states,
                    gt_goals,
                    beta=beta,
                    normalization_params=self.norm_params_device if self.apply_normalization else None,
                    normalize_reconstruction=self.apply_normalization,
                    observations_normalized=self.observations_normalized
                )

                loss = loss_dict['elbo_loss']
                recon_loss = loss_dict['recon_loss']
                kl_loss = loss_dict['kl_loss']

                # 反向传播
                loss.backward()

                # 梯度裁剪
                if self.config['clip_grad_norm'] > 0:
                    torch.nn.utils.clip_grad_norm_(
                        self.model.parameters(),
                        self.config['clip_grad_norm']
                    )

                self.optimizer.step()

                # 累计损失
                total_loss += loss.item()
                total_recon_loss += recon_loss.item()
                total_kl_loss += kl_loss.item()
                num_batches += 1

                # 更新进度条
                pbar.set_postfix({
                    'Loss': f'{loss.item():.4f}',
                    'Recon': f'{recon_loss.item():.4f}',
                    'KL': f'{kl_loss.item():.4f}',
                    'Beta': f'{beta:.4f}'
                })

            except Exception as e:
                print(f"训练批次出错: {e}")
                continue

        # 平均损失
        avg_loss = total_loss / num_batches if num_batches > 0 else 0
        avg_recon_loss = total_recon_loss / num_batches if num_batches > 0 else 0
        avg_kl_loss = total_kl_loss / num_batches if num_batches > 0 else 0

        return {
            'loss': avg_loss,
            'recon_loss': avg_recon_loss,
            'kl_loss': avg_kl_loss,
            'beta': beta
        }

    def validate_epoch(self, epoch: int) -> Dict[str, float]:
        """验证一个epoch"""
        if self.val_loader is None:
            return {}

        self.model.eval()

        total_loss = 0
        total_recon_loss = 0
        total_kl_loss = 0
        num_batches = 0

        beta = self.compute_beta(epoch)

        with torch.no_grad():
            for batch in self.val_loader:
                observations = batch['observations'].to(self.device)
                initial_states = batch.get('initial_states')
                gt_goals = batch.get('gt_goals')

                if initial_states is not None:
                    initial_states = initial_states.to(self.device)
                if gt_goals is not None:
                    gt_goals = gt_goals.to(self.device)

                try:
                    loss_dict = self.model.compute_elbo_loss(
                        observations,
                        initial_states,
                        gt_goals,
                        beta=beta,
                        normalization_params=self.norm_params_device if self.apply_normalization else None,
                        normalize_reconstruction=self.apply_normalization,
                        observations_normalized=self.observations_normalized
                    )

                    loss = loss_dict['elbo_loss']
                    recon_loss = loss_dict['recon_loss']
                    kl_loss = loss_dict['kl_loss']

                    total_loss += loss.item()
                    total_recon_loss += recon_loss.item()
                    total_kl_loss += kl_loss.item()
                    num_batches += 1

                except Exception as e:
                    print(f"验证批次出错: {e}")
                    continue

        avg_loss = total_loss / num_batches if num_batches > 0 else 0
        avg_recon_loss = total_recon_loss / num_batches if num_batches > 0 else 0
        avg_kl_loss = total_kl_loss / num_batches if num_batches > 0 else 0

        return {
            'loss': avg_loss,
            'recon_loss': avg_recon_loss,
            'kl_loss': avg_kl_loss
        }

    def train(self, save_dir: str = 'checkpoints'):
        """完整训练流程"""
        print(f"开始训练MCP-VAE，设备: {self.device}")
        print(f"训练样本数: {len(self.train_dataset)}")
        if self.val_dataset:
            print(f"验证样本数: {len(self.val_dataset)}")

        # 创建保存目录
        os.makedirs(save_dir, exist_ok=True)

        # 设置TensorBoard
        self.setup_tensorboard()

        best_val_loss = float('inf')
        start_time = time.time()

        for epoch in range(1, self.config['num_epochs'] + 1):
            # 训练
            train_metrics = self.train_epoch(epoch)

            # 验证
            val_metrics = self.validate_epoch(epoch)

            # 更新历史
            self.train_history['epoch'].append(epoch)
            self.train_history['train_loss'].append(train_metrics['loss'])
            self.train_history['train_recon_loss'].append(train_metrics['recon_loss'])
            self.train_history['train_kl_loss'].append(train_metrics['kl_loss'])
            self.train_history['beta'].append(train_metrics['beta'])

            if val_metrics:
                self.train_history['val_loss'].append(val_metrics['loss'])
                self.train_history['val_recon_loss'].append(val_metrics['recon_loss'])
                self.train_history['val_kl_loss'].append(val_metrics['kl_loss'])

                # 学习率调度
                self.scheduler.step(val_metrics['loss'])

                # 保存最佳模型
                if val_metrics['loss'] < best_val_loss:
                    best_val_loss = val_metrics['loss']
                    self.save_checkpoint(
                        os.path.join(save_dir, 'best_model.pth'),
                        epoch, best_val_loss
                    )

            # 记录到TensorBoard
            if self.writer:
                self.writer.add_scalar('Loss/Train', train_metrics['loss'], epoch)
                self.writer.add_scalar('Loss/Recon_Train', train_metrics['recon_loss'], epoch)
                self.writer.add_scalar('Loss/KL_Train', train_metrics['kl_loss'], epoch)
                self.writer.add_scalar('Training/Beta', train_metrics['beta'], epoch)

                if val_metrics:
                    self.writer.add_scalar('Loss/Val', val_metrics['loss'], epoch)
                    self.writer.add_scalar('Loss/Recon_Val', val_metrics['recon_loss'], epoch)
                    self.writer.add_scalar('Loss/KL_Val', val_metrics['kl_loss'], epoch)

            # 打印进度
            if epoch % self.config['log_interval'] == 0:
                elapsed = time.time() - start_time
                print(f"Epoch {epoch}/{self.config['num_epochs']} "
                      f"Train Loss: {train_metrics['loss']:.4f} "
                      f"Val Loss: {val_metrics.get('loss', 0):.4f} "
                      f"Beta: {train_metrics['beta']:.4f} "
                      f"Time: {elapsed:.1f}s")

            # 定期保存
            if epoch % self.config['save_interval'] == 0:
                self.save_checkpoint(
                    os.path.join(save_dir, f'checkpoint_epoch_{epoch}.pth'),
                    epoch
                )

        # 训练完成
        print(f"训练完成! 总时间: {time.time() - start_time:.1f}s")
        if self.writer:
            self.writer.close()

        # 保存最终模型
        self.save_checkpoint(
            os.path.join(save_dir, 'final_model.pth'),
            self.config['num_epochs']
        )

    def save_checkpoint(self, filepath: str, epoch: int, val_loss: Optional[float] = None):
        """保存检查点"""
        checkpoint = {
            'epoch': epoch,
            'model_state_dict': self.model.state_dict(),
            'optimizer_state_dict': self.optimizer.state_dict(),
            'scheduler_state_dict': self.scheduler.state_dict(),
            'train_history': self.train_history,
            'config': self.config,
            'model_config': {
                'observation_dim': self.model.observation_dim,
                'latent_dim': self.model.latent_dim,
                'game_param_dim': self.model.game_param_dim,
                'lane_width': self.model.lane_width,
                'collision_radius': self.model.collision_radius
            }
        }

        if val_loss is not None:
            checkpoint['val_loss'] = val_loss

        torch.save(checkpoint, filepath)
        print(f"模型保存到: {filepath}")

    def load_checkpoint(self, filepath: str):
        """加载检查点"""
        checkpoint = torch.load(filepath, map_location=self.device)

        self.model.load_state_dict(checkpoint['model_state_dict'])
        self.optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
        self.scheduler.load_state_dict(checkpoint['scheduler_state_dict'])
        self.train_history = checkpoint['train_history']

        print(f"模型加载自: {filepath}")
        print(f"Epoch: {checkpoint['epoch']}")

        return checkpoint

    def plot_training_history(self, save_path: Optional[str] = None):
        """绘制训练历史"""
        fig, axes = plt.subplots(2, 2, figsize=(12, 8))

        # 总损失
        axes[0, 0].plot(self.train_history['epoch'], self.train_history['train_loss'], label='Train')
        if self.train_history['val_loss']:
            axes[0, 0].plot(self.train_history['epoch'], self.train_history['val_loss'], label='Val')
        axes[0, 0].set_title('Total Loss')
        axes[0, 0].set_xlabel('Epoch')
        axes[0, 0].set_ylabel('Loss')
        axes[0, 0].legend()

        # 重构损失
        axes[0, 1].plot(self.train_history['epoch'], self.train_history['train_recon_loss'], label='Train')
        if self.train_history['val_recon_loss']:
            axes[0, 1].plot(self.train_history['epoch'], self.train_history['val_recon_loss'], label='Val')
        axes[0, 1].set_title('Reconstruction Loss')
        axes[0, 1].set_xlabel('Epoch')
        axes[0, 1].set_ylabel('Loss')
        axes[0, 1].legend()

        # KL损失
        axes[1, 0].plot(self.train_history['epoch'], self.train_history['train_kl_loss'], label='Train')
        if self.train_history['val_kl_loss']:
            axes[1, 0].plot(self.train_history['epoch'], self.train_history['val_kl_loss'], label='Val')
        axes[1, 0].set_title('KL Divergence Loss')
        axes[1, 0].set_xlabel('Epoch')
        axes[1, 0].set_ylabel('Loss')
        axes[1, 0].legend()

        # Beta权重
        axes[1, 1].plot(self.train_history['epoch'], self.train_history['beta'])
        axes[1, 1].set_title('Beta Weight')
        axes[1, 1].set_xlabel('Epoch')
        axes[1, 1].set_ylabel('Beta')

        plt.tight_layout()

        if save_path:
            plt.savefig(save_path, dpi=300, bbox_inches='tight')
        plt.close()  # 关闭图形以释放内存


def create_data_loaders(
    observations: np.ndarray,
    initial_states: Optional[np.ndarray] = None,
    gt_goals: Optional[np.ndarray] = None,
    train_ratio: float = 0.8,
    batch_size: int = 128,
    num_workers: int = 4,
    normalize: bool = True
) -> Tuple[DataLoader, DataLoader]:
    """
    创建训练和验证数据加载器

    Args:
        observations: 观察数据
        initial_states: 初始状态
        gt_goals: 真实目标
        train_ratio: 训练集比例
        batch_size: 批次大小
        num_workers: 工作进程数
        normalize: 是否对输入做标准化（使用训练集统计量）

    Returns:
        训练和验证数据加载器
    """
    # 分割数据
    n_total = len(observations)
    n_train = int(n_total * train_ratio)

    indices = np.random.permutation(n_total)
    train_indices = indices[:n_train]
    val_indices = indices[n_train:]

    # 创建数据集
    train_obs = observations[train_indices]
    val_obs = observations[val_indices]

    train_states = initial_states[train_indices] if initial_states is not None else None
    val_states = initial_states[val_indices] if initial_states is not None else None

    train_goals = gt_goals[train_indices] if gt_goals is not None else None
    val_goals = gt_goals[val_indices] if gt_goals is not None else None

    train_dataset = TrajectoryDataset(train_obs, train_states, train_goals, normalize=normalize)
    obs_mean, obs_std = train_dataset.get_normalization_params()
    val_dataset = TrajectoryDataset(
        val_obs,
        val_states,
        val_goals,
        normalize=normalize,
        obs_mean=obs_mean,
        obs_std=obs_std
    )

    # 获取标准化参数（使用训练集的统计量）
    # 注意：验证集已在构造时使用训练集统计量完成同尺度标准化

    # 创建数据加载器
    train_loader = DataLoader(
        train_dataset, batch_size=batch_size, shuffle=True, num_workers=num_workers
    )
    val_loader = DataLoader(
        val_dataset, batch_size=batch_size, shuffle=False, num_workers=num_workers
    )

    return train_loader, val_loader


# ========== Script-style training utilities (aligned with 3demo/vae_demo/train_vae.py) ==========

def log_solver_stats_to_file(
    log_path: Optional[str],
    phase: str,
    epoch: int,
    batch_idx: int,
    stats: Optional[Dict[str, Any]]
) -> None:
    """将求解器统计信息追加写入CSV，便于离线分析。"""
    if not log_path or not stats:
        return
    os.makedirs(os.path.dirname(log_path), exist_ok=True)
    header = "phase,epoch,batch,calls,success,failures,time_limit,solve_time_total,solve_time_avg,status_counts\n"
    need_header = not os.path.exists(log_path)
    status_counts = stats.get('status_counts', {})
    status_str = json.dumps(status_counts, ensure_ascii=False)
    calls = max(int(stats.get('calls', 0)), 0)
    solve_time_total = float(stats.get('solve_time', 0.0))
    solve_time_avg = solve_time_total / calls if calls > 0 else 0.0
    line = (
        f"{phase},{epoch},{batch_idx},"
        f"{calls},{int(stats.get('success', 0))},{int(stats.get('failures', 0))},"
        f"{int(stats.get('time_limit', 0))},"
        f"{solve_time_total:.6f},{solve_time_avg:.6f},\"{status_str}\"\n"
    )
    with open(log_path, 'a', encoding='utf-8') as f:
        if need_header:
            f.write(header)
        f.write(line)


def _maybe_import_swanlab(enabled: bool):
    if not enabled:
        return False, None
    try:
        import swanlab  # type: ignore
    except ImportError:
        return False, None
    return True, swanlab


def train_epoch(
    model: torch.nn.Module,
    dataloader: DataLoader,
    optimizer: optim.Optimizer,
    ego_goal: torch.Tensor,
    beta: float,
    device: torch.device,
    use_amp: bool = False,
    scaler=None,
    epoch: int = 0,
    gradient_clip: float = 1.0,
    diagnostic_interval: int = 0,
    global_batch_counter: Optional[Dict[str, int]] = None,
    swanlab_enabled: bool = False,
    solver_log_file: Optional[str] = None,
    solver_log_phase: str = "train",
    normalization_params: Optional[Any] = None,
    observations_normalized: bool = False,
    normalize_reconstruction: bool = False,
) -> Dict[str, float]:
    """
    执行一个训练epoch，返回包含平均损失和梯度范数的字典。
    """
    model.train()

    total_losses = {'total': 0.0, 'reconstruction': 0.0, 'kl_divergence': 0.0}
    grad_norms: List[float] = []
    n_batches = 0
    decoder = getattr(model, 'decoder', None)
    mcp_solver = getattr(decoder, 'mcp_solver', None) if decoder is not None else None
    if global_batch_counter is None:
        global_batch_counter = {'count': 0}

    swanlab_enabled, swanlab = _maybe_import_swanlab(swanlab_enabled)

    norm_params = None
    if normalization_params is not None:
        if isinstance(normalization_params, dict):
            if normalization_params.get('enabled') and 'obs_mean' in normalization_params and 'obs_std' in normalization_params:
                norm_params = (
                    torch.as_tensor(normalization_params['obs_mean'], device=device).view(1, -1),
                    torch.as_tensor(normalization_params['obs_std'], device=device).view(1, -1),
                )
        else:
            norm_params = normalization_params

    for batch_idx, (observations_batch, initial_states_batch, true_opponent_goals_batch) in enumerate(dataloader):
        batch_start = time.time()
        observations_batch = observations_batch.to(device, non_blocking=True)
        initial_states_batch = initial_states_batch.to(device, non_blocking=True)
        true_opponent_goals_batch = true_opponent_goals_batch.to(device, non_blocking=True)

        optimizer.zero_grad(set_to_none=True)
        if mcp_solver is not None and hasattr(mcp_solver, 'reset_batch_stats'):
            mcp_solver.reset_batch_stats()

        if use_amp and scaler is not None and device.type == 'cuda':
            with torch.cuda.amp.autocast():
                output = model(observations_batch, initial_states_batch, ego_goal)
                loss, loss_dict = compute_elbo_loss(
                    observations_batch,
                    output['observations'],
                    output['mu'],
                    output['logvar'],
                    beta,
                    normalization_params=norm_params,
                    observations_normalized=observations_normalized,
                    normalize_reconstruction=normalize_reconstruction,
                )
            scaler.scale(loss).backward()
            grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
            scaler.step(optimizer)
            scaler.update()
        else:
            output = model(observations_batch, initial_states_batch, ego_goal)
            loss, loss_dict = compute_elbo_loss(
                observations_batch,
                output['observations'],
                output['mu'],
                output['logvar'],
                beta,
                normalization_params=norm_params,
                observations_normalized=observations_normalized,
                normalize_reconstruction=normalize_reconstruction,
            )
            loss.backward()
            grad_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), gradient_clip)
            optimizer.step()

        grad_norms.append(float(grad_norm))
        batch_timeout_count = 0
        batch_stats = None
        if mcp_solver is not None and hasattr(mcp_solver, 'get_batch_stats'):
            batch_stats = mcp_solver.get_batch_stats()
            batch_timeout_count = batch_stats.get('time_limit', 0)
            log_solver_stats_to_file(
                solver_log_file,
                solver_log_phase,
                epoch,
                batch_idx + 1,
                batch_stats
            )

        for key in total_losses:
            total_losses[key] += float(loss_dict[key])
        n_batches += 1

        if swanlab_enabled and swanlab is not None:
            swanlab.log(
                {
                    'train/batch_loss': float(loss_dict['total']),
                    'train/batch_recon': float(loss_dict['reconstruction']),
                    'train/batch_kl': float(loss_dict['kl_divergence']),
                    'train/batch_grad_norm': float(grad_norm),
                    'train/batch_solver_timeouts': float(batch_timeout_count),
                },
                step=global_batch_counter['count'],
            )
        global_batch_counter['count'] += 1

        if (batch_idx + 1) % 50 == 0:
            batch_time = time.time() - batch_start
            print(f"  ⏱️  Batch {batch_idx+1}/{len(dataloader)}: loss={loss_dict['total']:.4f}, grad_norm={grad_norm:.2f}, time={batch_time:.2f}s")

        del output, loss, loss_dict

    avg_losses = {key: val / max(n_batches, 1) for key, val in total_losses.items()}
    avg_losses['grad_norm'] = float(np.mean(grad_norms)) if grad_norms else 0.0
    return avg_losses


def validate(
    model: torch.nn.Module,
    dataloader: DataLoader,
    ego_goal: torch.Tensor,
    beta: float,
    device: torch.device,
    solver_log_file: Optional[str] = None,
    solver_log_phase: str = "val",
    epoch: int = 0,
    normalization_params: Optional[Any] = None,
    observations_normalized: bool = False,
    normalize_reconstruction: bool = False,
) -> Dict[str, float]:
    """
    验证模型（调用完整的forward，包括MCP解码器）
    """
    was_training = model.training
    model.eval()

    total_losses = {'total': 0.0, 'reconstruction': 0.0, 'kl_divergence': 0.0}
    n_batches = 0
    goal_error_sum = 0.0
    n_goal_samples = 0

    decoder = getattr(model, 'decoder', None)
    mcp_solver = getattr(decoder, 'mcp_solver', None) if decoder is not None else None

    norm_params = None
    if normalization_params is not None:
        if isinstance(normalization_params, dict):
            if normalization_params.get('enabled') and 'obs_mean' in normalization_params and 'obs_std' in normalization_params:
                norm_params = (
                    torch.as_tensor(normalization_params['obs_mean'], device=device).view(1, -1),
                    torch.as_tensor(normalization_params['obs_std'], device=device).view(1, -1),
                )
        else:
            norm_params = normalization_params

    with torch.no_grad():
        for observations_batch, initial_states_batch, true_opponent_goals_batch in dataloader:
            observations_batch = observations_batch.to(device)
            initial_states_batch = initial_states_batch.to(device)
            true_opponent_goals_batch = true_opponent_goals_batch.to(device)

            if mcp_solver is not None and hasattr(mcp_solver, 'reset_batch_stats'):
                mcp_solver.reset_batch_stats()

            output = model(observations_batch, initial_states_batch, ego_goal)

            _, loss_dict = compute_elbo_loss(
                observations_batch,
                output['observations'],
                output['mu'],
                output['logvar'],
                beta,
                normalization_params=norm_params,
                observations_normalized=observations_normalized,
                normalize_reconstruction=normalize_reconstruction,
            )

            for key in total_losses:
                total_losses[key] += loss_dict[key]
            n_batches += 1

            predicted_goals = output.get('opponent_goals')
            if predicted_goals is not None:
                error = torch.norm(predicted_goals.detach() - true_opponent_goals_batch, dim=1)
                goal_error_sum += error.sum().item()
                n_goal_samples += error.numel()

            if mcp_solver is not None and hasattr(mcp_solver, 'get_batch_stats'):
                batch_stats = mcp_solver.get_batch_stats()
                log_solver_stats_to_file(
                    solver_log_file,
                    solver_log_phase,
                    epoch,
                    n_batches,
                    batch_stats
                )

    avg_losses = {key: val / max(n_batches, 1) for key, val in total_losses.items()}
    avg_losses['goal_error'] = goal_error_sum / max(n_goal_samples, 1) if n_goal_samples > 0 else 0.0

    if was_training:
        model.train()

    return avg_losses


def sample_and_visualize_latent_space(
    model: torch.nn.Module,
    true_goals: np.ndarray,
    device: torch.device,
    num_samples: int = 10000,
    save_path: Optional[str] = None
) -> Dict[str, float]:
    """
    采样潜在空间并可视化分布对比（无需MCP求解）
    """
    was_training = model.training
    model.eval()

    with torch.no_grad():
        z = torch.randn(num_samples, model.encoder.latent_dim, device=device)
        predicted_goals = model.decoder.decode_to_game_params(z)
        predicted_goals = predicted_goals.cpu().numpy()

    pred_mean = predicted_goals.mean(axis=0)
    pred_std = predicted_goals.std(axis=0)
    true_mean = true_goals.mean(axis=0)
    true_std = true_goals.std(axis=0)

    mean_diff = np.linalg.norm(pred_mean - true_mean)
    std_diff = np.linalg.norm(pred_std - true_std)

    stats = {
        'mean_diff': mean_diff,
        'std_diff': std_diff,
        'pred_mean_x': pred_mean[0],
        'pred_mean_y': pred_mean[1],
        'pred_mean_z': pred_mean[2],
        'true_mean_x': true_mean[0],
        'true_mean_y': true_mean[1],
        'true_mean_z': true_mean[2],
    }

    if save_path:
        fig = plt.figure(figsize=(15, 5))

        ax1 = fig.add_subplot(131)
        ax1.scatter(true_goals[:, 0], true_goals[:, 1], alpha=0.3, s=10, label='True', c='blue')
        ax1.scatter(predicted_goals[:, 0], predicted_goals[:, 1], alpha=0.3, s=10, label='Predicted', c='red')
        ax1.set_xlabel('X (m)')
        ax1.set_ylabel('Y (m)')
        ax1.set_title('Goal Distribution (XY plane)')
        ax1.legend()
        ax1.grid(True, alpha=0.3)

        ax2 = fig.add_subplot(132)
        ax2.scatter(true_goals[:, 0], true_goals[:, 2], alpha=0.3, s=10, label='True', c='blue')
        ax2.scatter(predicted_goals[:, 0], predicted_goals[:, 2], alpha=0.3, s=10, label='Predicted', c='red')
        ax2.set_xlabel('X (m)')
        ax2.set_ylabel('Z (m)')
        ax2.set_title('Goal Distribution (XZ plane)')
        ax2.legend()
        ax2.grid(True, alpha=0.3)

        ax3 = fig.add_subplot(133)
        ax3.scatter(true_goals[:, 1], true_goals[:, 2], alpha=0.3, s=10, label='True', c='blue')
        ax3.scatter(predicted_goals[:, 1], predicted_goals[:, 2], alpha=0.3, s=10, label='Predicted', c='red')
        ax3.set_xlabel('Y (m)')
        ax3.set_ylabel('Z (m)')
        ax3.set_title('Goal Distribution (YZ plane)')
        ax3.legend()
        ax3.grid(True, alpha=0.3)

        plt.tight_layout()
        plt.savefig(save_path, dpi=150, bbox_inches='tight')
        plt.close()

    if was_training:
        model.train()

    return stats
