#!/usr/bin/env python3
"""
VAE训练数据集生成脚本 (Julia风格对齐)

MPC策略（与MPGP一致）：
1. 预测horizon: 10步
2. 执行步数: 1步 (control_horizon)
3. MPC滚动求解生成50步轨迹

数据生成策略（Julia风格）：
1. 生成100个episodes（直行50 + 右拐50，等量采样）
2. 每个episode使用MPC滚动求解生成50步轨迹（共51个状态点）
3. 对每条轨迹进行切片（滑动窗口，长度10步）
4. 每个样本包含：
   - Ego轨迹(60维): Ego完整状态(6维×10步) - 观测窗口
   - Opponent轨迹(60维): Opponent完整状态(6维×10步) - 观测窗口
   - Opponent目标(3维): opponent_goal - 同episode共享
   - 初始状态(12维): 观测窗口第一帧的状态 (t-1) - Julia风格

双峰分布设计（Julia MixtureModel对齐）：
- Julia: MixtureModel([峰1, 峰2], weights=[0.5, 0.5])
- Python: 每个episode随机选择峰1或峰2 (p=0.5)，然后高斯采样
- 峰1: [0, -7, 25] (向左下避让)
- 峰2: [-7, 0, 25] (向左避让)
- 结果: 两种避让策略的样本随机交错分布

Julia风格关键设计：
- initial_states_indices = trajectory_slices_indices .- 1
- 初始状态等于观测窗口的第一帧（t-1）
- 观测窗口覆盖[t-1, t+8]（包含初始状态）
- 51步轨迹 -> 42个切片（窗口包含初始状态）

数据结构：
- Ego轨迹: 每步6维 [px, py, pz, vx, vy, vz] × 10步 = 60维
- Opponent轨迹: 每步6维 [px, py, pz, vx, vy, vz] × 10步 = 60维
- VAE输入: Ego轨迹(60维) + Opponent轨迹(60维) = 120维

最终数据集：
- 100 episodes × 42 slices = 4200 个样本
- 每个样本：60维Ego + 60维Opponent + 3维目标 + 12维初始状态(t-1)
- 峰1和峰2的样本随机混合（Julia风格）
"""

# 解决OpenMP冲突问题
import os
os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'

import sys
import numpy as np
import torch
from typing import Tuple, List, Optional
from datetime import datetime
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# 添加路径
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', '..'))
sys.path.insert(0, repo_root)  # allow importing games package
sys.path.insert(0, os.path.join(repo_root, 'src'))
sys.path.insert(0, os.path.join(repo_root, 'games'))

from games.drone_game import DroneGame
from src.mcp.mcp_solver import MCPGameSolver


def _to_numpy(value):
    """Convert tensors or lists to numpy arrays."""
    if isinstance(value, torch.Tensor):
        return value.detach().cpu().numpy()
    return np.asarray(value)


def generate_episode_with_mpc(
    game: DroneGame,
    mcp_solver: MCPGameSolver,
    ego_goal: torch.Tensor,
    opponent_goal: torch.Tensor,
    n_sim_steps: int = 50,
    control_horizon: int = 5
) -> torch.Tensor:
    """
    使用MPC滚动求解生成一个episode (与MPGP对齐)
    
    策略：
    1. 每次求解完整horizon的Nash均衡（预测10步）
    2. 执行前control_horizon步控制（执行5步）
    3. 更新状态，重复上述过程
    
    Args:
        game: 无人机博弈
        mcp_solver: MCP求解器
        ego_goal: ego目标位置
        opponent_goal: opponent目标位置
        n_sim_steps: 模拟步数
        control_horizon: 每次执行的步数
    
    Returns:
        trajectory: [n_sim_steps+1, 12] 完整轨迹（包含初始状态）
    """
    # 保存原始目标
    original_goals = [game.target_positions[i].clone() for i in range(2)]
    
    # 设置目标
    game.target_positions[0] = ego_goal
    game.target_positions[1] = opponent_goal
    
    # 初始化
    current_state = game.get_initial_state()
    trajectory = [current_state.clone()]
    
    # MPC滚动求解（每次执行control_horizon步）
    step = 0
    while step < n_sim_steps:
        try:
            with torch.no_grad():
                result = mcp_solver.solve_game(current_state, warm_start=None)
            
            if result['success']:
                # 执行前control_horizon步控制
                controls = result['controls']  # [horizon, 6]
                steps_to_execute = min(control_horizon, n_sim_steps - step)
                
                for k in range(steps_to_execute):
                    control_k = controls[min(k, controls.shape[0]-1)]  # 防止越界
                    next_state = game.step_func(current_state, control_k)
                    trajectory.append(next_state.clone())
                    current_state = next_state
                    step += 1
            else:
                # 求解失败，使用零控制
                if step == 0:  # 第一次失败时输出详细信息
                    print(f"  [警告] Step {step}: Nash求解失败")
                    print(f"    状态: {result.get('status', 'unknown')}")
                    print(f"    残差: {result.get('residual', 'N/A'):.6e}" if 'residual' in result else "    残差: N/A")
                    print(f"    当前状态: ego_pos={current_state[0:3].tolist()}, opp_pos={current_state[6:9].tolist()}")
                    print(f"    目标: ego={ego_goal.tolist()}, opp={opponent_goal.tolist()}")
                else:
                    print(f"  [警告] Step {step}: Nash求解失败，使用零控制")
                steps_to_execute = min(control_horizon, n_sim_steps - step)
                zero_control = torch.zeros(6)
                for k in range(steps_to_execute):
                    next_state = game.step_func(current_state, zero_control)
                    trajectory.append(next_state.clone())
                    current_state = next_state
                    step += 1
                
        except Exception as e:
            print(f"  [错误] Step {step}: {e}")
            # 异常处理，使用零控制
            steps_to_execute = min(control_horizon, n_sim_steps - step)
            zero_control = torch.zeros(6)
            for k in range(steps_to_execute):
                next_state = game.step_func(current_state, zero_control)
                trajectory.append(next_state.clone())
                current_state = next_state
                step += 1
    
    # 恢复原始目标
    game.target_positions[0] = original_goals[0]
    game.target_positions[1] = original_goals[1]
    
    return torch.stack(trajectory, dim=0)


def plot_episode_trajectory_3d(
    trajectory: torch.Tensor,
    ego_goal: torch.Tensor,
    opponent_goal: torch.Tensor,
    episode_id: int = 0,
    save_path: str = None
):
    """
    绘制单个episode的3D轨迹
    
    Args:
        trajectory: [n_steps, 12] 完整轨迹
        ego_goal: [3] Ego目标位置
        opponent_goal: [3] Opponent目标位置
        episode_id: Episode编号
        save_path: 保存路径
    """
    # 转换为numpy
    if isinstance(trajectory, torch.Tensor):
        trajectory = trajectory.numpy()
    if isinstance(ego_goal, torch.Tensor):
        ego_goal = ego_goal.numpy()
    if isinstance(opponent_goal, torch.Tensor):
        opponent_goal = opponent_goal.numpy()
    
    # 提取轨迹
    ego_traj = trajectory[:, 0:3]      # [n_steps, 3] - px, py, pz
    opponent_traj = trajectory[:, 6:9]  # [n_steps, 3] - px, py, pz
    
    # 创建3D图
    fig = plt.figure(figsize=(15, 5))
    
    # 子图1: 3D轨迹
    ax1 = fig.add_subplot(131, projection='3d')
    
    # 绘制轨迹
    ax1.plot(ego_traj[:, 0], ego_traj[:, 1], ego_traj[:, 2], 
             'b-', linewidth=2, label='Ego', alpha=0.8)
    ax1.plot(opponent_traj[:, 0], opponent_traj[:, 1], opponent_traj[:, 2], 
             'r-', linewidth=2, label='Opponent', alpha=0.8)
    
    # 绘制起点
    ax1.scatter(ego_traj[0, 0], ego_traj[0, 1], ego_traj[0, 2], 
                c='blue', marker='o', s=100, label='Ego Start')
    ax1.scatter(opponent_traj[0, 0], opponent_traj[0, 1], opponent_traj[0, 2], 
                c='red', marker='o', s=100, label='Opp Start')
    
    # 绘制目标
    ax1.scatter(ego_goal[0], ego_goal[1], ego_goal[2], 
                c='blue', marker='*', s=300, label='Ego Goal', edgecolors='black')
    ax1.scatter(opponent_goal[0], opponent_goal[1], opponent_goal[2], 
                c='red', marker='*', s=300, label='Opp Goal', edgecolors='black')
    
    ax1.set_xlabel('X (m)')
    ax1.set_ylabel('Y (m)')
    ax1.set_zlabel('Z (m)')
    ax1.set_title(f'Episode {episode_id}: 3D Trajectory')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    # 子图2: XY平面
    ax2 = fig.add_subplot(132)
    ax2.plot(ego_traj[:, 0], ego_traj[:, 1], 'b-', linewidth=2, label='Ego', alpha=0.8)
    ax2.plot(opponent_traj[:, 0], opponent_traj[:, 1], 'r-', linewidth=2, label='Opponent', alpha=0.8)
    ax2.scatter(ego_traj[0, 0], ego_traj[0, 1], c='blue', marker='o', s=100)
    ax2.scatter(opponent_traj[0, 0], opponent_traj[0, 1], c='red', marker='o', s=100)
    ax2.scatter(ego_goal[0], ego_goal[1], c='blue', marker='*', s=300, edgecolors='black')
    ax2.scatter(opponent_goal[0], opponent_goal[1], c='red', marker='*', s=300, edgecolors='black')
    ax2.set_xlabel('X (m)')
    ax2.set_ylabel('Y (m)')
    ax2.set_title('XY Plane View')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    ax2.axis('equal')
    
    # 子图3: 高度随时间变化
    ax3 = fig.add_subplot(133)
    time_steps = np.arange(len(trajectory)) * 0.1  # dt=0.1
    ax3.plot(time_steps, ego_traj[:, 2], 'b-', linewidth=2, label='Ego', alpha=0.8)
    ax3.plot(time_steps, opponent_traj[:, 2], 'r-', linewidth=2, label='Opponent', alpha=0.8)
    ax3.axhline(y=ego_goal[2], color='b', linestyle='--', alpha=0.5, label='Ego Goal Z')
    ax3.axhline(y=opponent_goal[2], color='r', linestyle='--', alpha=0.5, label='Opp Goal Z')
    ax3.set_xlabel('Time (s)')
    ax3.set_ylabel('Altitude (m)')
    ax3.set_title('Altitude Profile')
    ax3.legend()
    ax3.grid(True, alpha=0.3)
    
    plt.tight_layout()
    
    if save_path:
        plt.savefig(save_path, dpi=150, bbox_inches='tight')
        print(f"  📊 轨迹图已保存: {save_path}")
    
    plt.show()


def plot_examples_from_full_dataset(full_dataset_path: str, save_dir: str):
    """
    从完整轨迹数据集中选取两条代表性轨迹进行可视化。
    """
    data = np.load(full_dataset_path)
    trajectories = data["trajectories"]
    ego_goals = data["ego_goals"]
    opponent_goals = data["opponent_goals"]

    def find_closest(target):
        target = np.asarray(target)
        diff = opponent_goals - target
        return np.argmin(np.linalg.norm(diff, axis=1))

    straight_idx = find_closest([0.0, -7.0, 25.0])
    turn_idx = find_closest([-7.0, 0.0, 25.0])
    if straight_idx == turn_idx:
        turn_idx = min(turn_idx + 1, len(trajectories) - 1)

    os.makedirs(save_dir, exist_ok=True)
    straight_path = os.path.join(save_dir, "plotdataset_peak1.png")
    turn_path = os.path.join(save_dir, "plotdataset_peak2.png")

    plot_episode_trajectory_3d(
        trajectories[straight_idx],
        ego_goals[straight_idx],
        opponent_goals[straight_idx],
        episode_id=straight_idx,
        save_path=straight_path,
    )

    plot_episode_trajectory_3d(
        trajectories[turn_idx],
        ego_goals[turn_idx],
        opponent_goals[turn_idx],
        episode_id=turn_idx,
        save_path=turn_path,
    )


def generate_dataset_with_mpc_slicing(
    game: DroneGame,
    mcp_solver: MCPGameSolver,
    n_episodes: int = 100,
    n_sim_steps: int = 50,
    observation_window: int = 10,
    goal_perturbation: float = 0.5
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, np.ndarray, List[torch.Tensor], List[torch.Tensor]]:
    """
    使用MPC模拟 + Episode切片生成完整数据集 (Julia风格)
    
    策略：
    1. 对手目标在原始目标附近高斯采样（σ=goal_perturbation/2，95%落在±goal_perturbation米范围内）
    2. 对每个目标，使用MPC滚动求解生成完整轨迹（50步）
    3. 对每个轨迹进行切片（滑动窗口，长度10步）
    4. 每个样本包含：ego轨迹 + opponent轨迹 + opponent目标
    
    Args:
        game: 无人机博弈
        mcp_solver: MCP求解器
        n_episodes: Episodes数量
        n_sim_steps: 每个episode的模拟步数
        observation_window: 观测窗口长度
        goal_perturbation: 目标扰动范围（±米）
    
    Returns:
        ego_trajectories:    [N, 60]  ego轨迹观测
        opponent_trajectories: [N, 60]  opponent轨迹观测
        initial_states:      [N, 12]  每个切片的初始状态
        opponent_goals:      [N, 3]   opponent目标
        
    数据增强效果:
        - 输入: n_episodes个长轨迹
        - 输出: n_episodes × (n_sim_steps + 2 - observation_window) 个训练样本
        - 快速验证: 1 episode × 42 slices = 42 个样本
        - 生产环境:100 episodes × 42 slices = 4200 个样本
        
    MPC配置（与MPGP对齐）:
        - 预测horizon: 10步
        - 执行步数: 5步 (control_horizon)
        - 总模拟步数: 60步
        - MPC循环次数: 12次 (60/5)
    """
    print(f"\n{'='*70}")
    print(f"🚀 VAE数据集生成 (Julia风格)")
    print(f"{'='*70}")
    print(f"\n配置参数:")
    print(f"  Episodes数量:        {n_episodes}")
    print(f"  每个episode模拟步数: {n_sim_steps}")
    print(f"  观测窗口长度:        {observation_window}步")
    print(f"  目标扰动范围:        ±{goal_perturbation}米")
    
    # 固定的Ego目标
    original_ego_goal = torch.tensor([7.0, 0.0, 35.0])
    
    # 🎯 双峰高斯分布目标 (Julia风格的MixtureModel)
    # 峰1: [0.0, -7.0, 25.0] - 向左下飞 (50%概率，随机选择)
    # 峰2: [-7.0, 0.0, 25.0] - 向左飞 (50%概率，随机选择)
    peak1_center = torch.tensor([0.0, -7.0, 25.0])
    peak2_center = torch.tensor([-7.0, 0.0, 25.0])
    
    # 高斯标准差 (goal_perturbation/2，使95%样本在±goal_perturbation范围内)
    gaussian_std = goal_perturbation / 2
    
    print(f"\n目标设置 (双峰高斯混合，Julia风格):")
    print(f"  Ego目标 (固定):      {original_ego_goal.tolist()}")
    print(f"  Opponent目标分布:    MixtureModel (随机混合)")
    print(f"    峰1: μ={peak1_center.tolist()}, σ={gaussian_std:.1f}, p=0.5")
    print(f"      → Opponent向左下避让 (Y方向)")
    print(f"    峰2: μ={peak2_center.tolist()}, σ={gaussian_std:.1f}, p=0.5")
    print(f"      → Opponent向左避让 (X方向)")
    print(f"  采样方式: 每个episode随机选择峰1或峰2，然后高斯采样")
    print(f"  物理意义: 两种不同的避让策略，随机交错分布")
    
    all_ego_trajectories = []
    all_opponent_trajectories = []
    all_initial_states = []
    all_opponent_goals = []
    
    # 保存完整轨迹用于可视化
    all_trajectories = []
    all_episode_goals = []
    
    print(f"\n{'='*70}")
    print(f"开始生成Episodes (Julia风格随机混合)...")
    print(f"{'='*70}\n")
    
    for episode_id in range(n_episodes):
        print(f"[Episode {episode_id+1}/{n_episodes}]")
        
        # 🎯 双峰高斯采样 (Julia风格: 真正的混合模型随机采样)
        # 每个episode随机选择峰1或峰2 (各50%概率)
        if np.random.rand() < 0.5:
            # 峰1: 向左下飞 [0, -7, 25]
            center = peak1_center
            peak_name = "峰1(左下)"
        else:
            # 峰2: 向左飞 [-7, 0, 25]
            center = peak2_center
            peak_name = "峰2(左)"
        
        # 在选中的峰附近高斯采样
        opponent_goal = center + torch.tensor([
            np.random.normal(0, gaussian_std),  # X
            np.random.normal(0, gaussian_std),  # Y
            np.random.normal(0, gaussian_std)   # Z
        ])
        
        print(f"  {peak_name} - Opponent目标: [{opponent_goal[0]:.2f}, {opponent_goal[1]:.2f}, {opponent_goal[2]:.2f}]")
        
        ego_goal = original_ego_goal  # Ego目标固定
        
        # 使用MPC生成完整轨迹
        print(f"  🔄 MPC滚动求解 (预测10步，执行1步，共{n_sim_steps}步)...", end=' ')
        trajectory = generate_episode_with_mpc(
            game, mcp_solver, ego_goal, opponent_goal, n_sim_steps, control_horizon=1
        )  # [n_sim_steps+1, 12]
        print(f"✓ 完成")
        
        # 保存完整轨迹和目标
        all_trajectories.append(trajectory)
        all_episode_goals.append((ego_goal, opponent_goal))
        
        # Episode切片（滑动窗口，Julia风格）
        # Julia: initial_states_indices = trajectory_slices_indices .- 1
        # 即：初始状态与观测窗口的第一帧相同 (t-1)，窗口覆盖[t-1, t+window-2]
        num_slices = trajectory.shape[0] - observation_window + 1  # 51步轨迹-> 42个切片（包含初始状态）
        print(f"  ✂️  切片数 {num_slices}（窗口长度={observation_window}，窗口现包含初始状态t-1）")
        
        for slice_start in range(1, num_slices + 1):  # 从1开始，因为需要前一步作为初始状态
            # Julia风格：初始状态与观测窗口第一帧一致 (t-1)
            initial_state_idx = slice_start - 1  # t-1
            observation_start_idx = initial_state_idx  # 包含初始状态这一帧 (t-1)
            observation_end_idx = observation_start_idx + observation_window
            
            # 提取observation_window步的轨迹 (包含初始状态)
            slice_trajectory = trajectory[observation_start_idx:observation_end_idx]  # [10, 12]
            
            # 提取ego完整状态和opponent完整状态（各6维）
            ego_obs = slice_trajectory[:, 0:6].flatten().numpy()      # [60] - ego完整状态
            opponent_obs = slice_trajectory[:, 6:12].flatten().numpy()  # [60] - opponent完整状态
            
            # 初始状态（即当前窗口的第一帧，Julia风格）
            initial_state = trajectory[initial_state_idx].numpy()  # [12] - 来自 t-1
            
            # 对手目标（整个episode共享同一个目标）
            all_ego_trajectories.append(ego_obs)
            all_opponent_trajectories.append(opponent_obs)  # opponent状态观测
            all_initial_states.append(initial_state)
            all_opponent_goals.append(opponent_goal.numpy())
        
        if (episode_id + 1) % 5 == 0:
            total_samples = len(all_ego_trajectories)
            print(f"  ✅ 累计样本数: {total_samples}\n")
    
    # 转换为numpy数组
    ego_trajectories = np.array(all_ego_trajectories)
    opponent_trajectories = np.array(all_opponent_trajectories)
    initial_states = np.array(all_initial_states)
    opponent_goals = np.array(all_opponent_goals)
    
    print(f"\n{'='*70}")
    print(f"✅ 数据生成完成!")
    print(f"{'='*70}")
    print(f"\n数据集统计:")
    print(f"  总样本数:            {len(ego_trajectories)}")
    print(f"  数据增强倍率:        {len(ego_trajectories) / n_episodes:.1f}x")
    print(f"\n数据维度:")
    print(f"  Ego轨迹:             {ego_trajectories.shape}  (Ego完整状态6维×10步 = 60维)")
    print(f"  Opponent轨迹:        {opponent_trajectories.shape}  (Opponent完整状态6维×10步 = 60维)")
    print(f"  初始状态:            {initial_states.shape}")
    print(f"  Opponent目标:        {opponent_goals.shape}")
    print(f"\n单个样本构成:")
    print(f"  Ego轨迹维度:         {ego_trajectories.shape[1]} (每步6维: pos[3] + vel[3], 共10步)")
    print(f"  Opponent轨迹维度:    {opponent_trajectories.shape[1]} (每步6维: pos[3] + vel[3], 共10步)")
    print(f"  标签维度:            {opponent_goals.shape[1]}")
    
    return ego_trajectories, opponent_trajectories, initial_states, opponent_goals, all_trajectories, all_episode_goals


def save_dataset(
    ego_trajectories: np.ndarray,
    opponent_trajectories: np.ndarray,
    initial_states: np.ndarray,
    opponent_goals: np.ndarray,
    save_dir: str = None,
    n_episodes: int = 100,
    observation_window: int = 10,
    n_sim_steps: int = 50
):
    """
    保存数据集到文件
    
    Args:
        ego_trajectories: Ego轨迹
        opponent_trajectories: Opponent轨迹
        initial_states: 初始状态
        opponent_goals: Opponent目标
        save_dir: 保存目录（默认为项目根目录下的data文件夹）
    """
    # 创建保存目录（与examples同级）
    if save_dir is None:
        project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
        save_dir = os.path.join(project_root, "data")
    os.makedirs(save_dir, exist_ok=True)
    
    # 生成时间戳
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    
    # 保存路径
    save_path = os.path.join(save_dir, f"vae_dataset_{timestamp}.npz")
    
    print(f"\n{'='*70}")
    print(f"💾 保存数据集...")
    print(f"{'='*70}")
    print(f"\n保存路径: {save_path}")
    
    # 保存为npz格式（压缩）
    np.savez_compressed(
        save_path,
        ego_trajectories=ego_trajectories,
        opponent_trajectories=opponent_trajectories,
        initial_states=initial_states,
        opponent_goals=opponent_goals,
        # 元数据
        n_samples=len(ego_trajectories),
        observation_window=observation_window,
        n_episodes=n_episodes,
        n_sim_steps=n_sim_steps
    )
    
    # 计算文件大小
    file_size = os.path.getsize(save_path) / (1024 * 1024)  # MB
    print(f"文件大小: {file_size:.2f} MB")
    
    print(f"\n✅ 数据集保存成功!")
    print(f"\n加载方式:")
    print(f"  data = np.load('{save_path}')")
    print(f"  ego_traj = data['ego_trajectories']")
    print(f"  opp_traj = data['opponent_trajectories']")
    print(f"  init_states = data['initial_states']")
    print(f"  opp_goals = data['opponent_goals']")
    
    return save_path


def save_full_episode_dataset(
    all_trajectories,
    all_episode_goals,
    save_dir: str,
    max_episodes: Optional[int] = None
):
    """
    保存完整的episode轨迹，方便可视化。
    """
    os.makedirs(save_dir, exist_ok=True)
    n_total = len(all_trajectories)
    n_save = n_total if max_episodes is None else min(max_episodes, n_total)
    
    trajectory_list = []
    ego_goal_list = []
    opponent_goal_list = []
    
    for idx in range(n_save):
        trajectory_list.append(_to_numpy(all_trajectories[idx]))
        ego_goal, opponent_goal = all_episode_goals[idx]
        ego_goal_list.append(_to_numpy(ego_goal))
        opponent_goal_list.append(_to_numpy(opponent_goal))
    
    trajectories = np.stack(trajectory_list, axis=0)
    ego_goals = np.stack(ego_goal_list, axis=0)
    opponent_goals = np.stack(opponent_goal_list, axis=0)
    
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    save_path = os.path.join(save_dir, f"vae_full_trajectories_{timestamp}.npz")
    
    np.savez_compressed(
        save_path,
        trajectories=trajectories,
        ego_goals=ego_goals,
        opponent_goals=opponent_goals,
        n_episodes=n_save,
        trajectory_length=trajectories.shape[1],
        state_dim=trajectories.shape[2]
    )
    
    print(f"\n  ✓ 完整轨迹数据保存成功: {save_path}")
    print(f"    episodes: {n_save}/{n_total}, trajectories shape: {trajectories.shape}")
    return save_path


def main():
    """
    主函数：生成完整的VAE训练数据集
    """
    print(f"\n{'='*70}")
    print(f"  VAE训练数据集生成器 (Julia风格)")
    print(f"{'='*70}\n")
    
    # 1. 初始化游戏和求解器
    print("[1] 初始化无人机博弈和MCP求解器...")
    
    game = DroneGame(
        dt=0.1,
        collision_radius=2.0,  # 与MPGP保持一致
        control_limits={
            'theta': 0.5,
            'phi': 0.5,
            'tau': 20.0
        },
        velocity_limit=6.0,  # 🔧 关键修复：从15.0降低到6.0，与MPGP/simple_demo保持一致
        altitude_limits=(15.0, 55.0),
        g=9.81
    )
    
    horizon = 10
    solver_params = {
        # 关闭PATH详细日志，避免终端输出大量迭代信息
        "tolerance": 1e-4,
        "verbose": False,
        "max_iterations": 100000,
        "major_iteration_limit": 1000,
        "minor_iteration_limit": 50000,
        "time_limit": 1200.0,
    }
    mcp_solver = MCPGameSolver(
        game=game,
        horizon=horizon,
        solver_params=solver_params,
    )
    
    print(f"  ✓ 游戏初始化完成")
    print(f"  ✓ MCP求解器初始化完成 (horizon={horizon})")
    
    # 2. 生成数据集
    print(f"\n[2] 生成数据集...")
    n_episodes = 100
    n_sim_steps = 50
    observation_window = 10
    goal_perturbation = 0.5
    
    ego_trajectories, opponent_trajectories, initial_states, opponent_goals, \
    all_trajectories, all_episode_goals = generate_dataset_with_mpc_slicing(
            game=game,
            mcp_solver=mcp_solver,
            n_episodes=n_episodes,
            n_sim_steps=n_sim_steps,
            observation_window=observation_window,
            goal_perturbation=goal_perturbation
        )
    
    # 3. 保存数据集
    print(f"\n[3] 保存数据集...")
    
    # 使用与train_vae.py相同的路径计算方式
    project_root = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
    dataset_output_dir = os.path.join(os.path.dirname(__file__), "vaedataset")
    os.makedirs(dataset_output_dir, exist_ok=True)
    data_dir = dataset_output_dir
    
    save_path = save_dataset(
        ego_trajectories,
        opponent_trajectories,
        initial_states,
        opponent_goals,
        save_dir=data_dir,
        n_episodes=n_episodes,
        observation_window=observation_window,
        n_sim_steps=n_sim_steps
    )

    print(f"\n[4.1] 保存完整Episode轨迹（全部{n_episodes}条）...")
    full_traj_path = save_full_episode_dataset(
        all_trajectories=all_trajectories,
        all_episode_goals=all_episode_goals,
        save_dir=data_dir,
        max_episodes=n_episodes
    )

    print(f"\n[4.2] 使用完整轨迹绘制代表性示例...")
    plot_examples_from_full_dataset(full_traj_path, data_dir)
    
    # 5. 验证数据集
    print(f"\n[5] 验证数据集...")
    print(f"\n{'='*70}")
    print(f"🔍 数据集检查")
    print(f"{'='*70}\n")
    
    # 加载并检查
    data = np.load(save_path)
    print("数据集内容:")
    for key in data.files:
        if key.startswith('n_') or key.endswith('_window') or key.endswith('_steps'):
            print(f"  {key}: {data[key]}")
        else:
            print(f"  {key}: {data[key].shape}")
    
    # 检查数据范围
    print(f"\n数据范围检查:")
    print(f"  Opponent目标 X: [{opponent_goals[:, 0].min():.2f}, {opponent_goals[:, 0].max():.2f}]")
    print(f"  Opponent目标 Y: [{opponent_goals[:, 1].min():.2f}, {opponent_goals[:, 1].max():.2f}]")
    print(f"  Opponent目标 Z: [{opponent_goals[:, 2].min():.2f}, {opponent_goals[:, 2].max():.2f}]")
    
    print(f"\n{'='*70}")
    print(f"✅ 数据集生成完成！")
    print(f"{'='*70}\n")
    print(f"下一步:")
    print(f"  1. 使用此数据集训练VAE")
    print(f"  2. 评估VAE学习到的目标分布")
    print(f"  3. 与真实分布对比")
    print(f"  4. 使用 {full_traj_path} 可视化完整轨迹")
    print(f"\n")


if __name__ == "__main__":
    main()

