#!/usr/bin/env python3
"""
恒定速度模型下的 MPC 对手轨迹预测（使用 MCPGameSolver 的 PATH 求解器）
"""

import os
import sys
from typing import Dict, List, Optional
from glob import glob

import numpy as np
import torch

# OpenMP runtime clash guard
import config  # noqa: F401

# UTF-8 stdout/stderr
try:
    sys.stdout.reconfigure(encoding="utf-8", errors="replace")
    sys.stderr.reconfigure(encoding="utf-8", errors="replace")
except Exception:
    pass

# 路径
repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
for p in [repo_root, os.path.join(repo_root, "src"), os.path.join(repo_root, "games")]:
    if p not in sys.path:
        sys.path.insert(0, p)

from games.drone_game import DroneGame  # noqa: E402
from src.mcp.mcp_solver import MCPGameSolver  # noqa: E402


def find_latest_full_trajectory(dataset_dir: str) -> Optional[str]:
    pattern = os.path.join(dataset_dir, "vae_full_trajectories_*.npz")
    files = glob(pattern)
    if not files:
        return None
    files.sort(key=os.path.getmtime, reverse=True)
    return files[0]


def compute_min_distance(
    ego_traj: np.ndarray, opp_traj: np.ndarray, dt: float, collision_radius: float
) -> Dict[str, np.ndarray]:
    k = min(len(ego_traj), len(opp_traj))
    dists = np.linalg.norm(ego_traj[:k, 0:3] - opp_traj[:k, 0:3], axis=1)
    idx = int(np.argmin(dists))
    min_dist = float(dists[idx])
    collide = min_dist < collision_radius
    return {
        "min_dist": min_dist,
        "index": idx,
        "time": idx * dt,
        "ego_pos": ego_traj[idx, 0:3].copy(),
        "opp_pos": opp_traj[idx, 0:3].copy(),
        "collide": collide,
    }


def main_cv_eval():
    """
    与 test_vae 对齐：窗口10步，MPC预测41次，保存误差/成本/状态/控制到npz。
    使用 MCPGameSolver 的 PATH 求解器（需要 Julia + PATHSolver.jl）。
    """
    print(f"\n{'='*70}")
    print("恒定速度模型MPC对手轨迹预测（PATH求解）")
    print(f"{'='*70}\n")

    n_initial_obs = 10
    n_prediction_cycles = 41

    # 数据集
    script_dir = os.path.dirname(os.path.abspath(__file__))
    dataset_dir = os.path.join(script_dir, "vae_demo", "vaedataset")
    fixed_path = r"D:\test8\multi-uav-trajectory-planning\results\demo_vae\testing\vae_full_testing_20251127_155202.npz"
    dataset_path = fixed_path if os.path.isfile(fixed_path) else find_latest_full_trajectory(dataset_dir)
    if dataset_path is None or not os.path.isfile(dataset_path):
        raise FileNotFoundError(f"未找到轨迹数据集：{fixed_path} 或 {dataset_dir}")

    data = np.load(dataset_path)
    trajectories = data["trajectories"]  # [n_episodes, n_steps, 12]
    ego_goals = data["ego_goals"]  # [n_episodes, 3]
    opponent_goals = data["opponent_goals"]  # [n_episodes, 3]

    n_episodes = len(trajectories)
    if trajectories.shape[1] < n_initial_obs + n_prediction_cycles:
        raise ValueError(
            f"轨迹长度不足：需要>={n_initial_obs + n_prediction_cycles}，当前 {trajectories.shape[1]}"
        )
    if n_episodes < 50:
        raise ValueError(f"数据集条目不足 50 条，当前 {n_episodes}")

    # 游戏与求解器（PATH）
    game = DroneGame(
        dt=0.1,
        collision_radius=2.0,
        control_limits={"theta": 0.5, "phi": 0.5, "tau": 20.0},
        velocity_limit=6.0,
        altitude_limits=(15.0, 55.0),
        g=9.81,
    )
    solver_params = {
        "tolerance": 1e-4,
        "verbose": False,
        "max_iterations": 100000,
        "major_iteration_limit": 1000,
        "minor_iteration_limit": 50000,
        "time_limit": 1200.0,
    }
    game_solver = MCPGameSolver(
        game=game,
        horizon=10,
        solver_type="path",
        solver_params=solver_params,
    )
    game_solver.set_warm_start_enabled(True)

    # 结果缓存
    use_episodes = 50
    output_dir = os.path.join(repo_root, "results", "demo_mpc")
    os.makedirs(output_dir, exist_ok=True)
    error_norms = np.zeros((use_episodes, n_prediction_cycles), dtype=np.float32)
    min_distances = np.zeros((use_episodes,), dtype=np.float32)
    costs = np.full((use_episodes, n_prediction_cycles), np.nan, dtype=np.float32)
    controls = np.zeros((use_episodes, n_prediction_cycles, 3), dtype=np.float32)
    states = np.zeros((use_episodes, n_prediction_cycles, 12), dtype=np.float32)
    collision_count = 0

    for episode_id in range(use_episodes):
        traj = trajectories[episode_id]
        ego_goal = ego_goals[episode_id]
        opp_goal_true = opponent_goals[episode_id]

        state_history: List[np.ndarray] = [traj[k].copy() for k in range(n_initial_obs)]
        current_state = state_history[-1].copy()
        success_count = 0

        for mpc_idx in range(n_prediction_cycles):
            dataset_idx = n_initial_obs - 1 + mpc_idx
            opp_state_obs = traj[dataset_idx, 6:12].copy()

            # 使用剩余的 MPC 循环数（不包含初始观测窗口）做匀速外推
            remaining_steps = max(0, n_prediction_cycles - (mpc_idx + 1))
            opp_goal_pred = opp_state_obs[0:3] + opp_state_obs[3:6] * (remaining_steps * game.dt)
            error_norms[episode_id, mpc_idx] = np.linalg.norm(opp_goal_pred - opp_goal_true)

            # 当前状态：ego 用模拟，opp 用观测
            current_state_sim = current_state.copy()
            current_state_sim[6:12] = opp_state_obs

            # 设置目标后调用 PATH 求解器
            game.target_positions[0] = torch.tensor(ego_goal, dtype=torch.float32)
            game.target_positions[1] = torch.tensor(opp_goal_pred, dtype=torch.float32)
            mcp_result = game_solver.solve_game(
                initial_state=torch.tensor(current_state_sim, dtype=torch.float32),
                warm_start=None,
            )

            if mcp_result["success"]:
                success_count += 1
                u_first = mcp_result["controls"][0, 0:3].detach().cpu().numpy()
                costs[episode_id, mpc_idx] = float(mcp_result.get("cost", np.nan))
            else:
                u_first = np.zeros(3, dtype=np.float32)
                costs[episode_id, mpc_idx] = np.nan
            controls[episode_id, mpc_idx] = u_first

            # 推一步：ego 用求解控制，opp 用数据集下一步
            full_control = np.concatenate([u_first, np.zeros(3, dtype=np.float32)])
            next_full_state = game_solver.game.step_func(
                torch.tensor(current_state_sim, dtype=torch.float32),
                torch.tensor(full_control, dtype=torch.float32),
            ).cpu().numpy()
            next_state = next_full_state.copy()
            # 对手状态使用数据集下一步，确保对齐
            next_state[6:12] = traj[dataset_idx + 1, 6:12]

            current_state = next_state
            state_history.append(current_state.copy())
            states[episode_id, mpc_idx] = current_state

        dists = [np.linalg.norm(s[0:3] - s[6:9]) for s in state_history]
        min_d = float(np.min(dists))
        min_distances[episode_id] = min_d
        collision_count += int(min_d < game.collision_radius)
        print(f"[episode {episode_id}] success {success_count}/{n_prediction_cycles}, min_dist {min_d:.3f} m")

    result_path = os.path.join(output_dir, "mpc_result.npz")
    np.savez(
        result_path,
        error_norms=error_norms,
        min_distances=min_distances,
        costs=costs,
        states=states,
        controls=controls,
        meta=dict(n_episodes=use_episodes, n_prediction_cycles=n_prediction_cycles),
    )
    print(f"\n[save] 结果已保存: {result_path}")
    print(f"总碰撞次数: {collision_count} / {use_episodes}")


if __name__ == "__main__":
    main_cv_eval()
