#!/usr/bin/env python3
"""
Testing dataset generator for the VAE demo.
仅生成完整轨迹的npz文件，并按固定顺序分配对手目标（前25条直行、后25条右转，剩余保持混合采样逻辑）。
"""

import os
import sys
from datetime import datetime
from typing import List, Sequence, Tuple

import numpy as np
import torch

# OpenMP conflict workaround
os.environ["KMP_DUPLICATE_LIB_OK"] = "TRUE"

# Repository paths
REPO_ROOT = os.path.abspath(os.path.join(os.path.dirname(__file__), "..", ".."))
sys.path.insert(0, REPO_ROOT)
sys.path.insert(0, os.path.join(REPO_ROOT, "src"))
sys.path.insert(0, os.path.join(REPO_ROOT, "games"))

from games.drone_game import DroneGame
from src.mcp.mcp_solver import MCPGameSolver


def _to_numpy(value: torch.Tensor) -> np.ndarray:
    return value.detach().cpu().numpy()


def generate_episode_with_mpc(
    game: DroneGame,
    mcp_solver: MCPGameSolver,
    ego_goal: torch.Tensor,
    opponent_goal: torch.Tensor,
    n_sim_steps: int,
    control_horizon: int = 1,
) -> torch.Tensor:
    """
    Roll out one episode with MPC (same settings as the training generator).
    """
    original_goals = [game.target_positions[i].clone() for i in range(2)]
    game.target_positions[0] = ego_goal
    game.target_positions[1] = opponent_goal

    current_state = game.get_initial_state()
    trajectory = [current_state.clone()]
    step = 0

    while step < n_sim_steps:
        try:
            with torch.no_grad():
                result = mcp_solver.solve_game(current_state, warm_start=None)

            if result["success"]:
                controls = result["controls"]
                steps_to_execute = min(control_horizon, n_sim_steps - step)
                for k in range(steps_to_execute):
                    control_k = controls[min(k, controls.shape[0] - 1)]
                    next_state = game.step_func(current_state, control_k)
                    trajectory.append(next_state.clone())
                    current_state = next_state
                    step += 1
            else:
                steps_to_execute = min(control_horizon, n_sim_steps - step)
                zero_control = torch.zeros(6)
                for _ in range(steps_to_execute):
                    next_state = game.step_func(current_state, zero_control)
                    trajectory.append(next_state.clone())
                    current_state = next_state
                    step += 1
        except Exception as exc:  # pragma: no cover - safety net
            print(f"[警告] MPC异常，使用零控制: {exc}")
            steps_to_execute = min(control_horizon, n_sim_steps - step)
            zero_control = torch.zeros(6)
            for _ in range(steps_to_execute):
                next_state = game.step_func(current_state, zero_control)
                trajectory.append(next_state.clone())
                current_state = next_state
                step += 1

    game.target_positions[0] = original_goals[0]
    game.target_positions[1] = original_goals[1]
    return torch.stack(trajectory, dim=0)


def build_opponent_goal_sequence(
    n_episodes: int,
    goal_perturbation: float,
) -> Tuple[List[torch.Tensor], List[str], float]:
    """
    Construct opponent-goal list:
    - first 25: straight (peak1)
    - next 25: right turn (peak2)
    - rest: follow original mixed sampling.
    """
    peak1_center = torch.tensor([0.0, -7.0, 25.0])
    peak2_center = torch.tensor([-7.0, 0.0, 25.0])
    gaussian_std = goal_perturbation / 2

    straight_quota = min(25, n_episodes)
    turn_quota = min(25, n_episodes - straight_quota)
    remaining = n_episodes - straight_quota - turn_quota

    goals: List[torch.Tensor] = []
    labels: List[str] = []

    for idx in range(straight_quota):
        noise = torch.tensor(
            [
                np.random.normal(0, gaussian_std),
                np.random.normal(0, gaussian_std),
                np.random.normal(0, gaussian_std),
            ]
        )
        goals.append(peak1_center + noise)
        labels.append("straight")

    for idx in range(turn_quota):
        noise = torch.tensor(
            [
                np.random.normal(0, gaussian_std),
                np.random.normal(0, gaussian_std),
                np.random.normal(0, gaussian_std),
            ]
        )
        goals.append(peak2_center + noise)
        labels.append("right_turn")

    for _ in range(remaining):
        center = peak1_center if np.random.rand() < 0.5 else peak2_center
        noise = torch.tensor(
            [
                np.random.normal(0, gaussian_std),
                np.random.normal(0, gaussian_std),
                np.random.normal(0, gaussian_std),
            ]
        )
        goals.append(center + noise)
        labels.append("mixed")

    return goals, labels, gaussian_std


def generate_full_trajectory_dataset(
    game: DroneGame,
    mcp_solver: MCPGameSolver,
    n_episodes: int,
    n_sim_steps: int,
    goal_perturbation: float,
) -> Tuple[np.ndarray, np.ndarray, np.ndarray, List[str]]:
    """
    Generate only the full-trajectory dataset for testing.
    """
    ego_goal = torch.tensor([7.0, 0.0, 35.0])
    opponent_goals, labels, gaussian_std = build_opponent_goal_sequence(
        n_episodes, goal_perturbation
    )

    trajectories: List[np.ndarray] = []
    ego_goals: List[np.ndarray] = []
    opponent_goal_array: List[np.ndarray] = []

    print(f"\n{'='*64}")
    print("生成测试集（仅完整轨迹）")
    print(f"总集数: {n_episodes}, 模拟步数: {n_sim_steps}, σ={gaussian_std:.2f}")
    print("前25条: 对手直行；后25条: 对手右转；其余: 随机混合")
    print(f"{'='*64}")

    for idx, opp_goal in enumerate(opponent_goals):
        print(f"[Episode {idx + 1:03d}] 模式: {labels[idx]}, 目标: {opp_goal.tolist()}")
        trajectory = generate_episode_with_mpc(
            game=game,
            mcp_solver=mcp_solver,
            ego_goal=ego_goal,
            opponent_goal=opp_goal,
            n_sim_steps=n_sim_steps,
            control_horizon=1,
        )
        trajectories.append(_to_numpy(trajectory))
        ego_goals.append(_to_numpy(ego_goal))
        opponent_goal_array.append(_to_numpy(opp_goal))

    return (
        np.stack(trajectories, axis=0),
        np.stack(ego_goals, axis=0),
        np.stack(opponent_goal_array, axis=0),
        labels,
    )


def save_full_dataset(
    trajectories: np.ndarray,
    ego_goals: np.ndarray,
    opponent_goals: np.ndarray,
    labels: Sequence[str],
    save_dir: str,
) -> str:
    os.makedirs(save_dir, exist_ok=True)
    timestamp = datetime.now().strftime("%Y%m%d_%H%M%S")
    save_path = os.path.join(save_dir, f"vae_full_testing_{timestamp}.npz")

    np.savez_compressed(
        save_path,
        trajectories=trajectories,
        ego_goals=ego_goals,
        opponent_goals=opponent_goals,
        labels=np.array(labels),
        n_episodes=trajectories.shape[0],
        trajectory_length=trajectories.shape[1],
        state_dim=trajectories.shape[2],
    )
    print(f"\n数据保存完毕: {save_path}")
    return save_path


def main() -> None:
    game = DroneGame(
        dt=0.1,
        collision_radius=2.0,
        control_limits={"theta": 0.5, "phi": 0.5, "tau": 20.0},
        velocity_limit=6.0,
        altitude_limits=(15.0, 55.0),
        g=9.81,
    )

    horizon = 10
    solver_params = {
        "tolerance": 1e-4,
        "verbose": False,
        "max_iterations": 100000,
        "major_iteration_limit": 1000,
        "minor_iteration_limit": 50000,
        "time_limit": 1200.0,
    }
    mcp_solver = MCPGameSolver(game=game, horizon=horizon, solver_params=solver_params)

    n_episodes = 100
    n_sim_steps = 50
    goal_perturbation = 0.5

    trajectories, ego_goals, opponent_goals, labels = generate_full_trajectory_dataset(
        game=game,
        mcp_solver=mcp_solver,
        n_episodes=n_episodes,
        n_sim_steps=n_sim_steps,
        goal_perturbation=goal_perturbation,
    )

    output_dir = os.path.join(REPO_ROOT, "results", "demo_vae", "testing")
    save_full_dataset(
        trajectories=trajectories,
        ego_goals=ego_goals,
        opponent_goals=opponent_goals,
        labels=labels,
        save_dir=output_dir,
    )


if __name__ == "__main__":
    main()
