#!/usr/bin/env python3
"""
Adaptive Multi-Player Game Planning (MPGP) Algorithm

实现自适应MPGP在线学习算法，支持多个玩家同时估计对方目标参数
每个玩家独立维护观察缓冲区和参数估计，实现完全分布式的互相预测
"""

import sys
import os
import argparse
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'src'))
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', 'games'))

# 导入配置（必须在其他库之前）
import config
from julia.api import Julia
Julia(compiled_modules=False)
import torch
import numpy as np
import json
from typing import Tuple, List, Optional, Dict, Any
from collections import deque

DEFAULT_DATASET_PATH = os.path.abspath(
    os.path.join(
        os.path.dirname(__file__),
        "..",
        "results",
        "demo_vae",
        "testing",
        "vae_full_trajectories_20251120_155419.npz",
    )
)

# 解决Qt平台插件问题 - 使用Agg后端（非交互式）
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# 导入可微分MCP求解器
from mcp.mcp_solver import MCPGameSolver, solve_mcp_game_differentiable


class AdaptiveMPGP:
    """
    自适应MPGP算法 - Julia风格（非对称信息）
    
    基于：Auto-Encoding Bayesian Inverse Games
    
    核心思想：
    - Ego (player 0): 需要推断opponent目标
      max_θ p(Y | X(z*(θ)), U(z*(θ)))
    - Opponent (player 1): 知道ego目标（假设）
    
    信息假设（Julia风格）：
    - Ego: 知道自己目标 + 需要推断opponent目标
    - Opponent: 知道自己目标 + 知道ego目标
    
    实现细节：
    
    1. 非对称推理：
       - 只有ego (player 0)进行逆博弈优化
       - Opponent (player 1)直接使用双方真实目标
       
    2. 梯度计算（仅针对ego）：
       ∇_θ L = (∇_X p)(∇_U X)(∇_θ U*)
       
       其中：
       - ∇_X p: 观测损失对状态的梯度（PyTorch自动微分）
       - ∇_U X: 动力学雅可比 dx/du（PyTorch自动微分）
       - ∇_θ U*: IFT梯度（DifferentiableMCPGameSolver.backward）
       
    3. 算法流程（MPC循环）：
       a) Ego收集观测: Y_ego ← observe(opponent_trajectory)
       b) Ego逆博弈优化:
          - 求解参数化MCP: u*(θ) ← solve_mcp_differentiable(θ)
          - 重建状态: x*(θ) ← reconstruct_states(u*(θ))
          - 计算损失: L = -log p(Y_ego | x*(θ))
          - 梯度更新: θ ← θ - lr·∇_θ L（使用IFT+自动微分）
       c) 双方规划: Ego用估计目标，Opponent用真实目标
       d) 执行控制: 应用MPC控制
    """
    
    def __init__(self,
                 mcp_game_solver: MCPGameSolver,
                 n_players: int = 2,
                 learning_rate: float = 0.01,
                 stop_tolerance: float = 1e-4,
                 max_inverse_steps: int = 20,
                 buffer_size: int = 20,
                 observation_weight: float = 1.0,
                 regularization_weight: float = 0.01,
                 observation_noise_pos: float = 0.5,
                 observation_noise_vel: float = 1.0,
                 verbose: bool = True,
                 use_momentum: bool = True,
                 momentum_beta: float = 0.9):
        """
        初始化自适应MPGP算法（支持多玩家互相预测 + 极大似然估计）
        
        使用极大似然估计（MLE）来估计参数θ：
        θ* = argmax_θ p(Y | X(z*(θ)), U(z*(θ)))
        
        梯度：∇θ log p(Y | X, U) = (∇X log p)(∇z* X)(∇θ z*) + (∇U log p)(∇z* U)(∇θ z*)
        
        Args:
            mcp_game_solver: MCPGameSolver实例
            n_players: 玩家数量
            learning_rate: 参数更新学习率
            stop_tolerance: 逆博弈优化停止容差
            max_inverse_steps: 逆博弈优化最大迭代次数
            buffer_size: 观察缓冲区大小
            observation_weight: 观测损失权重（对MLE的总体缩放）
            regularization_weight: 参数正则化权重
            observation_noise_pos: 位置观测的标准差 σ_pos（用于似然函数）
            observation_noise_vel: 速度观测的标准差 σ_vel（用于似然函数）
            verbose: 是否显示详细输出
            use_momentum: 是否使用动量优化
            momentum_beta: 动量系数
        """
        # 核心组件
        self.solver = mcp_game_solver
        self.game = mcp_game_solver.game
        self.n_players = n_players
        
        # 超参数
        self.lr = learning_rate
        self.stop_tol = stop_tolerance
        self.max_inverse_steps = max_inverse_steps
        self.buffer_size = buffer_size
        self.obs_weight = observation_weight
        self.reg_weight = regularization_weight
        self.verbose = verbose
        
        # MLE相关参数：观测噪声标准差
        self.sigma_pos = observation_noise_pos
        self.sigma_vel = observation_noise_vel
        
        # 🔧 Julia风格修改：只有ego需要估计opponent目标
        # ego_id = 0, opponent_id = 1
        self.ego_id = 0
        self.opponent_id = 1
        
        # 只为ego维护参数估计和观察缓冲区
        self.theta_hat = {}  # {0: tensor[3]} - 仅ego估计opponent目标
        self.theta_prior = {}  # {0: tensor[3]}
        self.observation_buffer = {}  # {0: deque} - 仅ego观察opponent
        
        # 只为ego初始化观察缓冲区
        self.observation_buffer[self.ego_id] = deque(maxlen=buffer_size)
        
        # 动量相关参数
        self.use_momentum = use_momentum
        self.momentum_beta = momentum_beta
        
        # 只为ego维护动量
        self.momentum = {}  # {0: momentum_vector}
        
        # 统计信息（只为ego）
        self.iteration_count = 0
        self.parameter_history = {self.ego_id: []}
        self.loss_history = {self.ego_id: []}
        
        # 🔧 状态历史记录（用于逆优化从观测起点开始预测）
        self.state_history = {}  # {timestamp: state_tensor}
        
        # 真实目标（用于调试，可选）
        self.true_targets = None  # {player_id: true_target_position}
        
    def initialize_parameters(self, 
                             theta_init: Dict[int, torch.Tensor], 
                             theta_prior: Optional[Dict[int, torch.Tensor]] = None,
                             true_targets: Optional[Dict[int, torch.Tensor]] = None):
        """
        初始化参数估计（Julia风格：只为ego初始化）
        
        Args:
            theta_init: {0: tensor[3]} - ego对opponent目标的初始估计
            theta_prior: {0: tensor[3]} - ego的先验参数（可选）
            true_targets: {0: tensor[3], 1: tensor[3]} - 双方真实目标（必需）
        """
        # 只为ego初始化参数估计（需要梯度）
        self.theta_hat[self.ego_id] = theta_init[self.ego_id].clone().detach().requires_grad_(True)
        
        # 初始化先验参数
        if theta_prior is not None:
            self.theta_prior[self.ego_id] = theta_prior[self.ego_id].clone().detach()
        else:
            self.theta_prior[self.ego_id] = theta_init[self.ego_id].clone().detach()
        
        # 初始化动量
        if self.use_momentum:
            self.momentum[self.ego_id] = torch.zeros_like(self.theta_hat[self.ego_id])
        
        # 记录初始参数
        self.parameter_history[self.ego_id].append(self.theta_hat[self.ego_id].detach().clone())
        
        # 保存真实目标（Julia风格：opponent知道ego目标）
        if true_targets is None:
            raise ValueError("Julia风格需要提供true_targets（opponent需要知道ego目标）")
        self.true_targets = {pid: target.clone().detach() for pid, target in true_targets.items()}
        
        
    def update_observation_buffer(self, 
                                 player_id: int,
                                 observation: Dict[str, torch.Tensor]):
        """
        更新指定玩家的观察缓冲区
        
        Args:
            player_id: 玩家ID
            observation: 观测数据字典，包含 'opponent_state' 和 'timestamp'
        """
        self.observation_buffer[player_id].append(observation)
      
    
    def compute_observation_loss(self,
                                player_id: int,
                                predicted_states: torch.Tensor,
                                current_time: int = 0) -> torch.Tensor:
        """
        计算指定玩家的观测损失（基于极大似然估计）
        
        使用高斯观测模型的负对数似然作为损失函数：
        L_NLL = -log p(Y | r(U)) = ||Y - r(U)||² / (2σ²)
        
        其中：
        - Y: 观测到的对手位置序列
        - r(U): 从状态X和控制U映射到期望位置的函数
        - σ: 观测噪声标准差
        
        参考文献中的公式：
        ‖Y − r(U)‖₂² 等价于高斯观测模型的负对数似然
        
        Args:
            player_id: 玩家ID
            predicted_states: 预测的状态序列 [horizon+1, n_states]
                - predicted_states[0]对应当前时刻(current_time)
                - predicted_states[k]对应未来k步(current_time+k)
            current_time: 当前绝对时间戳（预测轨迹的起始时间）
            
        Returns:
            负对数似然损失（标量）
        """
        # 检查该玩家是否有观测缓冲区（只有ego有）
        if player_id not in self.observation_buffer:
            return torch.tensor(0.0, requires_grad=True)
        
        if len(self.observation_buffer[player_id]) == 0:
            if self.verbose:
                print(f"    [观测匹配] 玩家{player_id}: 缓冲区为空！")
            return torch.tensor(0.0, requires_grad=True)
        
        if self.verbose:
            print(f"    [观测匹配调试] 玩家{player_id}: 缓冲区={len(self.observation_buffer[player_id])}个观测, current_time={current_time}, pred_shape={predicted_states.shape}")
        
        # 获取对手的玩家ID
        opponent_id = 1 - player_id  # 2玩家情况
        
        # 对手状态在完整状态向量中的索引
        opponent_state_start = opponent_id * self.solver.n_states_per_player
        opponent_state_end = (opponent_id + 1) * self.solver.n_states_per_player
        
        # 计算观测损失
        # 🔧 关键修复：使用列表收集loss，最后求和（避免累加时断开梯度）
        loss_list = []
        n_obs = 0
        n_skipped = 0  # 跳过的观测数量（不在预测窗口内）
        
        # 【调试】记录匹配详情
        if self.verbose:
            match_details = []
        
        # 🔧 关键优化：只使用最近prediction_horizon步的观测
        # 理由：prediction_horizon固定为10步，而MPC仅执行1步，二者长度自然对齐
        # 优势：100%观测直接位置匹配，无需历史一致性损失
        horizon = predicted_states.shape[0] - 1  # prediction_horizon
        
        # 筛选：只保留最近horizon步的观测
        all_obs = list(self.observation_buffer[player_id])
        if len(all_obs) > horizon:
            # 取最新的horizon个观测
            recent_obs = all_obs[-horizon:]
        else:
            recent_obs = all_obs
        
        # 记录观测起点（用于调试）
        if len(recent_obs) > 0:
            observation_start_time = recent_obs[0]['timestamp']
            observation_end_time = recent_obs[-1]['timestamp']
        else:
            observation_start_time = current_time
            observation_end_time = current_time
        
        for obs in recent_obs:
            abs_timestamp = obs['timestamp']  # 观测的绝对时间戳
            observed_state = obs['opponent_state']  # [6]: [px, py, pz, vx, vy, vz]
            observed_pos = observed_state[0:3]
            observed_vel = observed_state[3:6]
            
            # 计算相对索引：predicted_states[0]对应current_time
            # predicted_states[i]对应current_time+i
            relative_index = abs_timestamp - current_time
            
            # ✅ 100%直接位置匹配
            # 🔧 关键：当从observation_start_time开始预测时，relative_index可以=0
            #    因为predicted_states[0]就是observation_start_time时刻的状态
            if 0 <= relative_index < predicted_states.shape[0]:
                predicted_opponent_state = predicted_states[relative_index, opponent_state_start:opponent_state_end]
                predicted_pos = predicted_opponent_state[0:3]
                predicted_vel = predicted_opponent_state[3:6]
                
                # 位置损失（XY高权重）
                pos_error = predicted_pos - observed_pos
                pos_loss_xy = torch.sum(pos_error[0:2] ** 2) * 10.0 / (2 * self.sigma_pos ** 2)  # XY权重10倍
                pos_loss_z = torch.sum(pos_error[2:3] ** 2) / (2 * self.sigma_pos ** 2)
                pos_loss = pos_loss_xy + pos_loss_z
                
                # 速度损失
                vel_error = predicted_vel - observed_vel
                vel_loss = torch.sum(vel_error ** 2) / (2 * self.sigma_vel ** 2)
                
                loss_list.append(pos_loss + vel_loss)
                n_obs += 1
                
                if self.verbose:
                    match_details.append(f"t={abs_timestamp}")
            else:
                # 不应该发生（所有观测都应该在范围内）
                n_skipped += 1
                if self.verbose:
                    print(f"      [警告] 观测t={abs_timestamp}超出范围，relative_index={relative_index}")
        
        # 调试信息
        if self.verbose:
            prediction_window_end = current_time + predicted_states.shape[0] - 1
            match_rate = (n_obs / len(recent_obs) * 100) if len(recent_obs) > 0 else 0
            print(f"    [观测匹配] 玩家{player_id}: 使用{n_obs}/{len(recent_obs)}个观测 ({match_rate:.0f}%匹配), 跳过{n_skipped}个")
            print(f"      观测区间=[t{observation_start_time}, t{observation_end_time}], 预测窗口=[t{current_time}, t{prediction_window_end}]")
            if n_obs > 0:
                print(f"      匹配时刻: {match_details[:10]}")
            if n_obs == 0:
                print(f"      [警告] 没有任何观测被使用！缓冲区大小={len(self.observation_buffer[player_id])}")
                if len(self.observation_buffer[player_id]) > 0:
                    timestamps = [obs['timestamp'] for obs in self.observation_buffer[player_id]]
                    print(f"      缓冲区时间戳: {timestamps}")
                    print(f"      预测窗口: [{current_time}, {window_end}]")
                    print(f"      有效范围: relative_index ∈ [1, {predicted_states.shape[0]-1}]")
        
        # 平均损失
        if n_obs > 0:
            # 🔧 使用torch.stack + mean保持梯度
            total_loss = torch.stack(loss_list).sum()
            avg_loss = total_loss / n_obs
            
            if self.verbose:
                print(f"      总损失: {total_loss.item():.6f}, 平均损失: {avg_loss.item():.6f}")
            
            return avg_loss
        else:
            return torch.tensor(0.0, requires_grad=True)
    
    def _reconstruct_states_differentiable(self, 
                                          initial_state: torch.Tensor,
                                          controls: torch.Tensor) -> torch.Tensor:
        """
        从控制序列重建状态序列（保持可微分）
        
        使用游戏动力学前向模拟，保持梯度流: controls -> states
        
        Args:
            initial_state: 初始状态 [n_states]
            controls: 控制序列 [horizon, n_controls]
            
        Returns:
            states: 状态序列 [horizon+1, n_states]
        """
        horizon = controls.shape[0]
        n_states = initial_state.shape[0]
        
        # 关键修复：确保initial_state也参与梯度计算
        # 通过添加0来创建一个有grad_fn的tensor
        if not initial_state.requires_grad:
            # 创建一个连接到controls的初始状态
            # 这样states序列就能保持梯度流
            initial_state_with_grad = initial_state + controls.sum() * 0.0
        else:
            initial_state_with_grad = initial_state
        
        # 使用列表收集状态，保持梯度连接
        states_list = [initial_state_with_grad]
        
        # 前向模拟动力学
        for k in range(horizon):
            current_state = states_list[-1]
            current_control = controls[k]
            
            # 使用游戏的动力学函数: x_{k+1} = f(x_k, u_k)
            # DroneGame使用step_func进行状态转移
            next_state = self.game.step_func(current_state, current_control)
            states_list.append(next_state)
        
        # 将列表堆叠成张量，保持梯度
        states = torch.stack(states_list, dim=0)
        
        return states
    
    def solve_parametrized_game(self, 
                               initial_state: torch.Tensor,
                               theta_estimates: Dict[int, torch.Tensor],
                               warm_start: Optional[torch.Tensor] = None,
                               use_differentiable: bool = True,
                               current_time: int = 0) -> Dict[str, Any]:
        """
        求解参数化博弈 - 论文方法
        
        论文公式(11)-(12)：
        max_θ p(Y | X(z*(θ)), U(z*(θ)))
        
        梯度链（适配打靶法）：
        ∇_θ L = (∇_X L)(∇_U X)(∇_θ U*)
        
        其中：
        - ∇_X L: 观测损失对状态的梯度（PyTorch自动微分）
        - ∇_U X: 动力学雅可比（PyTorch自动微分）
        - ∇_θ U*: IFT梯度（DifferentiableMCPGameSolver.backward）
        
        Args:
            initial_state: 初始状态 [n_states]
            theta_estimates: {player_id: tensor[3]} - 参数θ（目标估计）
            warm_start: 热启动
            use_differentiable: 是否使用可微分求解器
            current_time: 当前绝对时间戳（用于匹配观测）
            
        Returns:
            包含controls, states, losses的字典
        """
        if use_differentiable:
            # ✅ 论文方法：使用可微分MCP求解器 + IFT梯度
            from mcp.mcp_solver import solve_mcp_game_differentiable
            
            theta_mcp = {player_id: theta_estimates[player_id] for player_id in theta_estimates.keys()}
            
            # 求解参数化MCP：u*(θ)
            # IFT计算 ∂u*/∂θ
            controls = solve_mcp_game_differentiable(
                theta_estimates=theta_mcp,
                initial_state=initial_state,
                mcp_game_solver=self.solver
            )
            
            # 🔧 关键：通过PyTorch autograd重建states
            # 这样PyTorch自动计算 ∂states/∂controls
            # 总梯度链：loss -> states -> controls -> theta
            #           (PyTorch)  (PyTorch)   (IFT)
            states = self._reconstruct_states_differentiable(initial_state, controls)
            
            success = True
            residual = 0.0
            
        else:
            # 标准求解器
            original_targets = {}
            for player_id in range(self.n_players):
                original_targets[player_id] = self.game.target_positions[player_id].clone()
            
            for player_id in theta_estimates.keys():
                self.game.target_positions[player_id] = theta_estimates[player_id].detach()
            
            with torch.no_grad():
                result = self.solver.solve_game(initial_state, warm_start)
                controls = result['controls']
                states = result['states']
                success = result['success']
                residual = result['residual']
            
            for player_id, target in original_targets.items():
                self.game.target_positions[player_id] = target
        
        # 计算每个玩家的观测损失
        losses = {}
        for player_id in range(self.n_players):
            obs_loss = self.compute_observation_loss(player_id, states, current_time)
            losses[player_id] = obs_loss
        
        return {
            'controls': controls,
            'states': states,
            'losses': losses,
            'success': success,
            'residual': residual
        }
       
    
    def inverse_game_optimization(self, 
                                 initial_state: torch.Tensor,
                                 warm_start: Optional[torch.Tensor] = None,
                                 current_time: int = 0) -> Dict[int, torch.Tensor]:
        """
        执行逆博弈优化 - Julia风格：只优化ego对opponent目标的估计
        
        对ego(player 0)求解：
        min_{θ̂₀→₁} L_obs_0(x₁*(g₀, θ̂₀→₁), Y₀) + λ·||θ̂₀→₁ - θ_prior||²
        
        其中：
        - g₀: ego真实目标（已知）
        - θ̂₀→₁: ego对opponent目标的估计（待优化）
        - x₁*: Nash均衡中opponent的轨迹
        - Y₀: ego观测到的opponent历史轨迹
        
        对手(player 1)：直接使用已知的ego目标，无需优化
        
        Args:
            initial_state: 当前状态 [n_states]
            warm_start: 热启动（可选）
            current_time: 当前绝对时间戳（用于匹配观测）
            
        Returns:
            {0: updated_theta} - ego更新后的参数估计
        """
        # 收集调试信息
        debug_info = {
            'buffer_info': [],
            'gradient_info': [],
            'iteration_info': [],
            'inverse_prediction_windows': []
        }
        
        # 只收集ego的缓冲区信息
        player_id = self.ego_id
        buffer = self.observation_buffer[player_id]
        if len(buffer) > 0:
            timestamps = [obs['timestamp'] for obs in buffer]
            valid_obs = [t for t in timestamps if current_time < t <= current_time + self.solver.horizon]
            debug_info['buffer_info'].append({
                'player_id': player_id,
                'total': len(buffer),
                'valid': len(valid_obs),
                'timestamps_range': (min(timestamps), max(timestamps))
            })
            
            # 计算逆优化预测窗口（从观测起点开始）
            all_obs = list(buffer)
            horizon = self.solver.horizon
            recent_obs = all_obs[-horizon:] if len(all_obs) > horizon else all_obs
            obs_start = recent_obs[0]['timestamp']
            obs_end = recent_obs[-1]['timestamp']
            
            debug_info['inverse_prediction_windows'].append({
                'player_id': player_id,
                'obs_start': obs_start,
                'obs_end': obs_end,
                'obs_count': len(recent_obs),
                'has_state': obs_start in self.state_history
            })
        else:
            debug_info['buffer_info'].append({
                'player_id': player_id,
                'total': 0,
                'valid': 0,
                'timestamps_range': None
            })
        
        # 检查是否有可用的未来观测（relative_index >= 1）
        has_usable_obs = False
        for obs in self.observation_buffer[player_id]:
            relative_index = obs['timestamp'] - current_time
            if relative_index >= 1:  # 需要未来观测
                has_usable_obs = True
                break
        
        if not has_usable_obs:
            debug_info['skip_reason'] = '没有可用的未来观测'
            print(f"\n⚠️  跳过逆博弈优化：没有可用的未来观测")
            print(f"  当前时间={current_time}, ego缓冲区大小={len(self.observation_buffer[player_id])}")
            if len(self.observation_buffer[player_id]) > 0:
                timestamps_0 = [obs['timestamp'] for obs in self.observation_buffer[player_id]]
                print(f"  ego观测时间戳: {timestamps_0[:5]}...{timestamps_0[-5:]}")
            # 返回当前估计（不更新）
            return {self.ego_id: self.theta_hat[self.ego_id].clone()}
        
        # Julia风格：只优化ego对opponent的估计
        player_i = self.ego_id
        opponent_j = self.opponent_id
        
        # 确定观测起点，从观测起点开始预测（确保100%观测匹配）
        all_obs = list(self.observation_buffer[player_i])
        if len(all_obs) > 0:
            horizon = self.solver.horizon
            recent_obs = all_obs[-horizon:] if len(all_obs) > horizon else all_obs
            
            observation_start_time = recent_obs[0]['timestamp']
            observation_end_time = recent_obs[-1]['timestamp']
            observation_length = observation_end_time - observation_start_time + 1
            
            # 🔧 关键修复：始终从观测起点开始预测，即使没有历史状态
            # 获取观测起点的状态
            if observation_start_time in self.state_history:
                inverse_initial_state = self.state_history[observation_start_time]
                inverse_current_time = observation_start_time
            else:
                # 即使没有历史状态，也用观测起点时间来匹配观测
                # 这样可以利用100%的观测数据
                inverse_initial_state = initial_state
                inverse_current_time = observation_start_time  # 改为observation_start_time而非current_time
                print(f"      [警告] ego: 观测起点t={observation_start_time}的状态未找到，使用当前状态t={current_time}作为初始状态")
        else:
            inverse_initial_state = initial_state
            inverse_current_time = current_time
            observation_start_time = current_time
            observation_length = horizon
        
        # 准备ego的可优化参数
        theta_opt_i = self.theta_hat[player_i].clone().detach().requires_grad_(True)
        
        best_loss_i = float('inf')
        best_theta_i = theta_opt_i.clone()
        
        # 自适应学习率
        current_lr = self.lr
        
        # 收集迭代信息
        iteration_details = []
        
        for iteration in range(self.max_inverse_steps):
            # 清零梯度
            if theta_opt_i.grad is not None:
                theta_opt_i.grad.zero_()
            
            # 构造theta_estimates（Julia风格）
            # - ego使用真实目标（无梯度）
            # - opponent使用ego估计的目标（待优化）
            true_target_no_grad = self.true_targets[player_i].clone().detach()
            
            theta_estimates_for_solve = {
                player_i: true_target_no_grad,  # ego用真实目标（无梯度）
                opponent_j: theta_opt_i          # opponent用ego的估计（待优化）
            }
            
            # 求解参数化MCP得到Nash均衡
            result = self.solve_parametrized_game(
                initial_state=inverse_initial_state,
                theta_estimates=theta_estimates_for_solve,
                warm_start=warm_start,
                use_differentiable=True,
                current_time=inverse_current_time
            )
            
            states = result['states']
            losses = result['losses']
            
            # 计算ego的观测损失
            obs_loss_i = losses[player_i]
            
            # 第一次迭代时打印观测vs预测的对比
            if iteration == 0 and self.verbose:
                opponent_start = opponent_j * self.solver.n_states_per_player
                opponent_end = (opponent_j + 1) * self.solver.n_states_per_player
                print(f"      [观测vs预测] ego观测opponent轨迹（前5步）:")
                print(f"        当前θ估计: {theta_opt_i.detach().numpy()}")
                print(f"        真实目标: {self.true_targets[opponent_j].numpy()}")
                recent_obs_for_debug = list(self.observation_buffer[player_i])[-horizon:] if len(self.observation_buffer[player_i]) > 0 else []
                for obs_idx, obs in enumerate(recent_obs_for_debug[:5]):
                    rel_idx = obs['timestamp'] - inverse_current_time
                    if 0 <= rel_idx < states.shape[0]:
                        pred_state = states[rel_idx, opponent_start:opponent_end]
                        obs_state = obs['opponent_state']
                        pos_diff = pred_state[0:3] - obs_state[0:3]
                        pred_pos = pred_state[0:3].detach().numpy()
                        obs_pos = obs_state[0:3].numpy()
                        diff_np = pos_diff.detach().numpy()
                        print(f"        t={obs['timestamp']}: 观测={obs_pos}, 预测={pred_pos}")
                        print(f"          差值(预测-观测)=[{diff_np[0]:+.2f}, {diff_np[1]:+.2f}, {diff_np[2]:+.2f}]")
            
            # 正则化损失
            reg_loss_i = torch.sum((theta_opt_i - self.theta_prior[player_i]) ** 2)
            
            # 总损失
            total_loss_i = self.obs_weight * obs_loss_i + self.reg_weight * reg_loss_i
            
            # 调试：检查loss是否有梯度
            if iteration == 0:
                print(f"      [Loss调试] ego:")
                print(f"        obs_loss: {obs_loss_i.item():.6f}, requires_grad={obs_loss_i.requires_grad}, grad_fn={obs_loss_i.grad_fn}")
                print(f"        total_loss: {total_loss_i.item():.6f}, requires_grad={total_loss_i.requires_grad}, grad_fn={total_loss_i.grad_fn}")
                
                true_opponent_target = self.true_targets[opponent_j]
                print(f"        ego估计opponent目标: {theta_opt_i.detach().numpy()}")
                print(f"        opponent真实目标: {true_opponent_target.numpy()}")
                print(f"        误差向量: {(theta_opt_i.detach() - true_opponent_target).numpy()}")
            
            # 记录损失历史
            loss_dict_i = {
                'total': total_loss_i.item(),
                'observation': obs_loss_i.item(),
                'regularization': (self.reg_weight * reg_loss_i).item()
            }
            if len(self.loss_history[player_i]) == 0 or iteration == 0:
                self.loss_history[player_i].append(loss_dict_i)
            else:
                self.loss_history[player_i][-1] = loss_dict_i
        
            # 记录迭代详情（前3次）
            if iteration < 3:
                iteration_details.append({
                    'iter': iteration,
                    'theta': theta_opt_i.detach().numpy().copy(),
                    'loss': total_loss_i.item(),
                    'obs_loss': obs_loss_i.item(),
                    'is_best': total_loss_i.item() < best_loss_i
                })
            
            # 检查收敛
            if total_loss_i.item() < best_loss_i:
                best_loss_i = total_loss_i.item()
            
            if total_loss_i.item() < self.stop_tol:
                break
            
            # 反向传播计算梯度
            total_loss_i.backward()
            
            # 收集梯度信息（第一次迭代）
            if iteration == 0:
                # 🔍 梯度方向验证：手动扰动θ检查损失变化
                if theta_opt_i.grad is not None and self.verbose:
                    print(f"\n      [梯度验证] 手动扰动θ检查损失变化:")
                    grad_components = theta_opt_i.grad.detach().numpy().copy()
                    
                    # 保存当前loss
                    current_loss = total_loss_i.item()
                    
                    # 沿梯度正方向扰动（应该增加loss）
                    with torch.no_grad():
                        theta_perturb = theta_opt_i.detach().clone()
                        epsilon = 0.1
                        theta_perturb += epsilon * theta_opt_i.grad / torch.norm(theta_opt_i.grad)
                        
                        # 重新求解
                        theta_estimates_test = {
                            player_i: true_target_no_grad,
                            opponent_j: theta_perturb
                        }
                        result_test = self.solve_parametrized_game(
                            initial_state=inverse_initial_state,
                            theta_estimates=theta_estimates_test,
                            warm_start=warm_start,
                            use_differentiable=False,
                            current_time=inverse_current_time
                        )
                        states_test = result_test['states']
                        
                        # 计算新loss
                        opponent_start_idx = opponent_j * self.solver.n_states_per_player
                        opponent_end_idx = (opponent_j + 1) * self.solver.n_states_per_player
                        loss_test = 0.0
                        for obs in recent_obs:
                            rel_idx = obs['timestamp'] - inverse_current_time
                            if 0 <= rel_idx < states_test.shape[0]:
                                pred_pos = states_test[rel_idx, opponent_start_idx:opponent_start_idx+3]
                                obs_pos = obs['opponent_state'][0:3]
                                loss_test += torch.sum((pred_pos - obs_pos) ** 2).item()
                        
                        delta_loss = loss_test - current_loss
                        print(f"        沿梯度方向扰动ε={epsilon}: Δloss={delta_loss:+.6f}")
                        print(f"        预期: Δloss>0 (沿梯度上升) | 实际: {'✓' if delta_loss > 0 else '✗ 梯度方向错误！'}")
                
                if theta_opt_i.grad is not None:
                    grad_norm_i = torch.norm(theta_opt_i.grad).item()
                    grad_components = theta_opt_i.grad.detach().numpy().copy()
                    debug_info['gradient_info'].append({
                        'player_id': player_i,
                        'gradient': grad_components,
                        'grad_norm': grad_norm_i,
                        'obs_loss': obs_loss_i.item(),
                        'reg_loss': (self.reg_weight * reg_loss_i).item(),
                        'theta_current': theta_opt_i.detach().numpy().copy()
                    })
                    
                    # 检查梯度方向
                    error_vector = theta_opt_i.detach() - self.true_targets[opponent_j]
                    grad_dot_error = torch.dot(torch.tensor(grad_components), error_vector).item()
                    print(f"      [梯度方向检查] ego:")
                    print(f"        梯度: [{grad_components[0]:.4f}, {grad_components[1]:.4f}, {grad_components[2]:.4f}]")
                    print(f"        误差: [{error_vector[0]:.4f}, {error_vector[1]:.4f}, {error_vector[2]:.4f}]")
                    print(f"        梯度·误差: {grad_dot_error:.4f} ({'正确✅' if grad_dot_error > 0 else '错误❌反向'})")
                    
                    if grad_norm_i < 1e-6:
                        print(f"      [警告] ego梯度接近0！")
                        print(f"        观测损失={obs_loss_i.item():.6f}")
                        print(f"        正则损失={reg_loss_i.item():.6f}")
                        print(f"        theta_opt={theta_opt_i.detach().numpy()}")
                        print(f"        梯度分量: X={grad_components[0]:.6e}, Y={grad_components[1]:.6e}, Z={grad_components[2]:.6e}")
                else:
                    debug_info['gradient_info'].append({
                        'player_id': player_i,
                        'gradient': None,
                        'grad_norm': 0.0,
                        'obs_loss': 0.0,
                        'reg_loss': 0.0,
                        'theta_current': theta_opt_i.detach().numpy().copy()
                    })
        
            # 使用原地更新保持计算图
            if theta_opt_i.grad is not None:
                grad_i = theta_opt_i.grad.clone()
                
                # 梯度裁剪（防止爆炸）
                grad_norm_i = torch.norm(grad_i)
                max_grad_norm = 10.0
                if grad_norm_i > max_grad_norm:
                    grad_i = grad_i * (max_grad_norm / grad_norm_i)
                    print(f"      [梯度裁剪] ego: {grad_norm_i:.2f} → {max_grad_norm}")
                
                # 动量更新
                if self.use_momentum:
                    self.momentum[player_i] = self.momentum_beta * self.momentum[player_i] + (1 - self.momentum_beta) * grad_i
                    update_i = current_lr * self.momentum[player_i]
                else:
                    update_i = current_lr * grad_i
                
                # 记录更新步长
                if iteration == 0:
                    update_norm = torch.norm(update_i).item()
                    print(f"      [更新] ego: 步长={update_norm:.6f}, lr={current_lr}")
                
                # 原地更新
                with torch.no_grad():
                    theta_opt_i.sub_(update_i)
        
        # 保存最终优化后的theta
        best_theta_i = theta_opt_i.detach().clone()
        
        # 保存迭代详情
        debug_info['iteration_info'].append({
            'player_id': player_i,
            'iterations': iteration_details
        })
        
        # 更新ego的参数估计
        theta_before = self.theta_hat[player_i].detach().clone()
        self.theta_hat[player_i] = best_theta_i.clone().detach().requires_grad_(True)
        theta_after = self.theta_hat[player_i].detach().clone()
        change = torch.norm(theta_after - theta_before).item()
        
        update_summary = [{
            'player_id': player_i,
            'before': theta_before.numpy().copy(),
            'after': theta_after.numpy().copy(),
            'change': change
        }]
        
        self.parameter_history[player_i].append(theta_after)
        
        # 构造返回的theta字典（只包含ego的估计）
        updated_thetas = {player_i: theta_after}
        
        # 输出调试信息
        print(f"\n{'='*60}")
        print(f"逆博弈优化调试报告（Julia风格 - MPC时间t={current_time}）")
        print(f"{'='*60}")
        
        # 观测缓冲区状态
        print(f"\n[1] 观测缓冲区 (MPC窗口=[{current_time}, {current_time + self.solver.horizon}]):")
        for info in debug_info['buffer_info']:
            if info['timestamps_range']:
                print(f"  ego: {info['total']}个观测, 可用{info['valid']}个, 时间戳{info['timestamps_range']}")
                for obs in self.observation_buffer[player_i]:
                    if current_time < obs['timestamp'] <= current_time + self.solver.horizon:
                        print(f"    首个可用观测(t={obs['timestamp']}): pos={obs['opponent_state'][0:3].numpy()}")
                        break
            else:
                print(f"  ego: 空缓冲区")
        
        # 逆优化预测窗口
        if debug_info['inverse_prediction_windows']:
            print(f"\n[1.5] 🎯 逆优化预测窗口（100%观测匹配策略）:")
            for window in debug_info['inverse_prediction_windows']:
                obs_start = window['obs_start']
                obs_end = window['obs_end']
                obs_count = window['obs_count']
                has_state = window['has_state']
                
                state_status = "✓ 可用" if has_state else "✗ 缺失(回退到current_time)"
                print(f"  ego: 从t={obs_start}预测到t={obs_end} ({obs_count}步)")
                print(f"    → 初始状态[t={obs_start}]: {state_status}")
                print(f"    → 预测窗口=[t{obs_start}, t{obs_end}], 观测数={obs_count}")
                print(f"    → 匹配率: {obs_count}/{obs_count} = 100% ✓")
        
        # 梯度信息
        
            print(f"\n[2] 第一次迭代梯度(Julia风格-只优化ego):")
            for info in debug_info['gradient_info']:
                if info['gradient'] is not None:
                    grad = info['gradient']
                    theta_cur = info['theta_current']
                    
                    true_opponent_target = self.true_targets[opponent_j].numpy()
                    error_vector = theta_cur - true_opponent_target
                    
                    print(f"  ego→opponent: 梯度={grad} | 范数={info['grad_norm']:.6e}")
                    print(f"    当前估计: {theta_cur}")
                    print(f"    真实目标: {true_opponent_target}")
                    print(f"    误差向量: {error_vector}")
                    print(f"    梯度分量: X={grad[0]:.6e}, Y={grad[1]:.6e}, Z={grad[2]:.6e}")
                    
                    print(f"    梯度方向诊断:")
                    for dim, name in enumerate(['X', 'Y', 'Z']):
                        should_dir = "减小" if error_vector[dim] > 0 else "增大" if error_vector[dim] < 0 else "保持"
                        actual_dir = "减小" if grad[dim] > 0 else "增大" if grad[dim] < 0 else "不变"
                        match = "OK" if (should_dir == actual_dir or should_dir == "保持") else "X"
                        print(f"      {name}: 应{should_dir}(误差={error_vector[dim]:+.2f}) | 实际{actual_dir}(梯度={grad[dim]:+.6e}) {match}")
                else:
                    print(f"  ego: 梯度=None [失败]")
        
        # 迭代演变详情（继续读取原有代码）
        if debug_info['iteration_info']:
            print(f"\n[2.5] 优化迭代演变（前3步）:")
            for player_info in debug_info['iteration_info']:
                player_id = player_info['player_id']
                print(f"  玩家{player_id}:")
                for iter_detail in player_info['iterations']:
                    best_mark = " *BEST*" if iter_detail['is_best'] else ""  # 🔧 修复：移除emoji避免编码错误
                    print(f"    迭代{iter_detail['iter']}: theta={iter_detail['theta']}, loss={iter_detail['loss']:.4f}{best_mark}")
        
        # 3. 参数更新总结
        print(f"\n[3] 参数更新总结:")
        for info in update_summary:
            opponent_id = 1 - info['player_id']
            if self.true_targets is not None:
                true_opponent_target = self.true_targets[opponent_id].numpy()
                error_before = np.linalg.norm(info['before'] - true_opponent_target)
                error_after = np.linalg.norm(info['after'] - true_opponent_target)
                print(f"  玩家{info['player_id']}→玩家{opponent_id}:")
                print(f"    更新前: {info['before']} | 误差={error_before:.4f}")
                print(f"    更新后: {info['after']} | 误差={error_after:.4f}")
                print(f"    真实值: {true_opponent_target}")
                print(f"    变化量: {info['change']:.6f}")
            else:
                print(f"  玩家{info['player_id']}:")
                print(f"    更新前: {info['before']}")
                print(f"    更新后: {info['after']}")
                print(f"    变化量: {info['change']:.6f}")
        
        print(f"{'='*60}\n")
        
        return updated_thetas
    
    def forward_game_solution(self, 
                             initial_state: torch.Tensor,
                             warm_start: Optional[torch.Tensor] = None,
                             anchor_player: Optional[int] = None) -> Dict[str, torch.Tensor]:
        """
        求解正向博弈Nash均衡 - 非对称信息模式
        
        非对称信息下的Nash均衡求解策略：
        
        anchor_player=0 (ego视角)：
        - Ego: 用真实目标g₀（已知）
        - Opponent: 用ego估计的目标θ̂₀→₁（ego的主观世界）
        - 目的：生成ego的执行控制（基于ego对博弈的理解）
        
        anchor_player=1 (opponent视角)：
        - Ego: 用真实目标g₀（opponent已知）
        - Opponent: 用真实目标g₁（已知）
        - 目的：生成opponent的执行控制（真实策略，基于完全信息）
        
        anchor_player=None (仿真模式)：
        - 双方都用真实目标
        - 目的：离线仿真或测试
        
        Args:
            initial_state: 当前状态 [n_states]
            warm_start: 热启动（可选）
            anchor_player: 锚定玩家，决定采用哪个视角的博弈设定
            
        Returns:
            包含controls和states的字典
        """
        original_targets = {}
        for player_id in range(self.n_players):
            original_targets[player_id] = self.game.target_positions[player_id].clone()
        
        # Julia风格目标设置
        if self.true_targets is not None:
            if anchor_player is None:
                # 仿真模式：双方都用真实目标
                for player_id in range(self.n_players):
                    self.game.target_positions[player_id] = self.true_targets[player_id].detach()
            elif anchor_player == self.ego_id:
                # ego视角：ego用真实目标，opponent用ego的估计
                self.game.target_positions[self.ego_id] = self.true_targets[self.ego_id].detach()
                self.game.target_positions[self.opponent_id] = self.theta_hat[self.ego_id].detach()
            elif anchor_player == self.opponent_id:
                # opponent视角：双方都用真实目标（opponent知道ego目标）
                self.game.target_positions[self.ego_id] = self.true_targets[self.ego_id].detach()
                self.game.target_positions[self.opponent_id] = self.true_targets[self.opponent_id].detach()
        else:
            # 无真实目标：使用估计
            for player_id in range(self.n_players):
                opponent_id = 1 - player_id
                self.game.target_positions[opponent_id] = self.theta_hat[player_id].detach()
        
        # 求解博弈（不需要梯度）
        with torch.no_grad():
            result = self.solver.solve_game(initial_state, warm_start)
        
        # 恢复原始目标
        for player_id, target in original_targets.items():
            self.game.target_positions[player_id] = target
        
        return {
            'controls': result['controls'],
            'states': result['states'],
            'success': result['success'],
            'residual': result.get('residual', 0.0),
            'cost': result.get('cost', float('nan'))
        }
        
    
    def step(self, 
             current_state: torch.Tensor,
             observations: Dict[int, Dict[str, torch.Tensor]],
             warm_start: Optional[torch.Tensor] = None,
             current_time: int = 0,
             bidirectional: bool = True) -> Tuple[Dict[int, torch.Tensor], Dict[int, torch.Tensor], Dict[str, Any]]:
        """
        执行完整的自适应MPGP时间步（Julia风格：非对称信息博弈）
        
        非对称信息假设：
        - Ego (player 0): 不知道opponent目标，需要在线学习估计
        - Opponent (player 1): 知道ego目标（协作/对抗场景的假设）
        
        流程（bidirectional=True）：
        1. Ego观测opponent，更新观测缓冲区
        2. 逆博弈优化：ego更新对opponent目标的估计 θ̂₀→₁
        3. 正向Nash均衡求解：
           - Ego视角：用真实目标g₀ + 估计目标θ̂₀→₁ → 求解Nash均衡
           - Opponent视角：用已知的双方真实目标 → 求解Nash均衡（真实策略）
        4. 执行控制：
           - Ego执行第一次求解的控制（基于估计的博弈）
           - Opponent执行第二次求解的控制（基于真实的博弈）
        
        注：这不是两个独立MPC，而是非对称信息下的执行策略组合
        
        Args:
            current_state: 当前状态 [n_states]
            observations: {player_id: observation} - 观测数据（仅ego需要）
            warm_start: 热启动
            current_time: 当前绝对时间戳
            bidirectional: 是否使用双向预测模式（True=非对称信息，False=仿真模式）
            
        Returns:
            controls: {player_id: control} - 各玩家的控制输入
            updated_thetas: {0: ego估计, 1: opponent真实目标} - 参数估计
            info: 额外信息
        """
        self.iteration_count += 1
        
        # 保存当前状态到历史记录
        self.state_history[current_time] = current_state.clone()
        
        # 1. 只更新ego的观察缓冲区（Julia风格）
        if self.ego_id in observations:
            self.update_observation_buffer(self.ego_id, observations[self.ego_id])
        
        # 2. 逆博弈优化 - 只有ego估计opponent目标
        ego_theta_result = self.inverse_game_optimization(current_state, warm_start, current_time)
        
        # 构造完整的theta字典（ego估计 + opponent真实目标）
        updated_thetas = {
            self.ego_id: ego_theta_result[self.ego_id],
            self.opponent_id: self.true_targets[self.opponent_id].clone()
        }
        
        # 3. 正向博弈求解（非对称执行）
        if bidirectional and self.true_targets is not None:
            # 非对称信息模式：两次Nash均衡求解
            
            # 第一次：Ego视角的博弈（ego知道自己目标，估计opponent目标）
            # - ego: 真实目标g₀
            # - opponent: ego估计的目标θ̂₀→₁
            # → 求解Nash均衡，ego执行这个控制
            result_anchor0 = self.forward_game_solution(current_state, warm_start, anchor_player=self.ego_id)
            
            # 第二次：Opponent视角的博弈（opponent知道双方真实目标）
            # - ego: 真实目标g₀ 
            # - opponent: 真实目标g₁
            # → 求解Nash均衡，opponent执行这个控制（真实策略）
            result_anchor1 = self.forward_game_solution(current_state, warm_start, anchor_player=self.opponent_id)
            
            # 组合控制：各自执行自己视角下的Nash均衡策略
            # - Ego控制：来自第一次求解（基于估计的博弈）
            # - Opponent控制：来自第二次求解（基于真实的博弈）
            combined_controls = result_anchor0['controls'].clone()
            
            # 替换opponent的控制为第二次求解的结果
            opp_start = self.opponent_id * self.solver.n_controls_per_player
            opp_end = (self.opponent_id + 1) * self.solver.n_controls_per_player
            combined_controls[:, opp_start:opp_end] = result_anchor1['controls'][:, opp_start:opp_end]
            
            # 重新计算状态序列（使用组合的控制和真实动力学）
            combined_states = [current_state]
            temp_state = current_state.clone()
            for t in range(combined_controls.shape[0]):
                temp_state = self.game.step_func(temp_state, combined_controls[t])
                combined_states.append(temp_state)
            combined_states = torch.stack(combined_states)
            
            result = {
                'controls': combined_controls,
                'states': combined_states,
                'success': result_anchor0['success'] and result_anchor1['success'],
                'residual': max(result_anchor0['residual'], result_anchor1['residual']),
                'cost': result_anchor0.get('cost', float('nan')) if result_anchor0 is not None else float('nan')
            }
        
        else:
            # 单向预测或无真实目标模式
            result = self.forward_game_solution(current_state, warm_start, anchor_player=None)
            result_anchor0 = result
            result_anchor1 = None
        
        # 4. 提取所有玩家的控制输入
        controls = {}
        for player_id in range(self.n_players):
            controls[player_id] = result['controls'][0,
                player_id * self.solver.n_controls_per_player:
                (player_id + 1) * self.solver.n_controls_per_player]
        
        info = {
            'full_controls': result['controls'],
            'full_states': result['states'],
            'result_anchor0': result_anchor0,
            'result_anchor1': result_anchor1,
            'parameter_estimates': {pid: theta.detach().clone() for pid, theta in updated_thetas.items()},
            'loss_history': {self.ego_id: self.loss_history[self.ego_id][-1] if self.loss_history[self.ego_id] else None},
            'success': result['success'],
            'residual': result.get('residual', 0.0),
            'cost': float(result.get('cost', float('nan')))
        }
        
        return controls, updated_thetas, info
    
    def get_current_estimates(self) -> Dict[int, torch.Tensor]:
        """获取所有玩家的当前参数估计"""
        return {pid: theta.detach().clone() for pid, theta in self.theta_hat.items()}

# 工具函数
def create_observation(opponent_state: torch.Tensor,
                      timestamp: int) -> Dict[str, torch.Tensor]:
    """
    创建观测数据字典
    
    Args:
        opponent_state: 对手状态 [n_states_per_player]
        timestamp: 时间戳
        
    Returns:
        观测数据字典
    """
    return {
        'opponent_state': opponent_state.clone(),
        'timestamp': timestamp
    }


def visualize_trajectories(trajectory_history: List[torch.Tensor],
                           true_target_player0: torch.Tensor,
                           true_target_player1: torch.Tensor,
                           estimated_targets_player0: List[torch.Tensor] = None,
                           estimated_targets_player1: List[torch.Tensor] = None,
                           collision_radius: float = 2.0,
                           save_path: str = None):
    """
    Visualize 3D flight trajectories of two drones
    
    Args:
        trajectory_history: Trajectory history, each element is a [12] state vector
        true_target_player0: True target position for player 0 [3]
        true_target_player1: True target position for player 1 [3]
        estimated_targets_player0: Estimated target history by player 1 for player 0
        estimated_targets_player1: Estimated target history by player 0 for player 1
        collision_radius: Collision radius
        save_path: Path to save the figure (optional)
    """
    # Extract trajectory data
    positions_0 = []
    positions_1 = []
    
    for state in trajectory_history:
        positions_0.append(state[0:3].numpy())
        positions_1.append(state[6:9].numpy())
    
    positions_0 = np.array(positions_0)
    positions_1 = np.array(positions_1)
    
    # Create figure
    fig = plt.figure(figsize=(16, 6))
    
    # 3D trajectory plot
    ax1 = fig.add_subplot(131, projection='3d')
    
    # Plot trajectories
    ax1.plot(positions_0[:, 0], positions_0[:, 1], positions_0[:, 2], 
             'b-', linewidth=2, label='Drone 0 Trajectory', alpha=0.7)
    ax1.plot(positions_1[:, 0], positions_1[:, 1], positions_1[:, 2], 
             'r-', linewidth=2, label='Drone 1 Trajectory', alpha=0.7)
    
    # Start points
    ax1.scatter(positions_0[0, 0], positions_0[0, 1], positions_0[0, 2], 
                c='blue', marker='o', s=150, label='Drone 0 Start', edgecolors='black', linewidths=2)
    ax1.scatter(positions_1[0, 0], positions_1[0, 1], positions_1[0, 2], 
                c='red', marker='o', s=150, label='Drone 1 Start', edgecolors='black', linewidths=2)
    
    # End points
    ax1.scatter(positions_0[-1, 0], positions_0[-1, 1], positions_0[-1, 2], 
                c='blue', marker='*', s=300, label='Drone 0 End', edgecolors='black', linewidths=2)
    ax1.scatter(positions_1[-1, 0], positions_1[-1, 1], positions_1[-1, 2], 
                c='red', marker='*', s=300, label='Drone 1 End', edgecolors='black', linewidths=2)
    
    # True targets
    ax1.scatter(true_target_player0[0], true_target_player0[1], true_target_player0[2], 
                c='cyan', marker='^', s=200, label='Player 0 True Target', edgecolors='black', linewidths=2)
    ax1.scatter(true_target_player1[0], true_target_player1[1], true_target_player1[2], 
                c='orange', marker='^', s=200, label='Player 1 True Target', edgecolors='black', linewidths=2)
    
    ax1.set_xlabel('X (m)', fontsize=10)
    ax1.set_ylabel('Y (m)', fontsize=10)
    ax1.set_zlabel('Z (m)', fontsize=10)
    ax1.set_title('3D Drone Flight Trajectories', fontsize=12, fontweight='bold')
    ax1.legend(loc='upper left', fontsize=8)
    ax1.grid(True, alpha=0.3)
    
    # XY plane projection
    ax2 = fig.add_subplot(132)
    ax2.plot(positions_0[:, 0], positions_0[:, 1], 'b-', linewidth=2, label='Drone 0', alpha=0.7)
    ax2.plot(positions_1[:, 0], positions_1[:, 1], 'r-', linewidth=2, label='Drone 1', alpha=0.7)
    ax2.scatter(positions_0[0, 0], positions_0[0, 1], c='blue', marker='o', s=100, edgecolors='black', linewidths=2)
    ax2.scatter(positions_1[0, 0], positions_1[0, 1], c='red', marker='o', s=100, edgecolors='black', linewidths=2)
    ax2.scatter(positions_0[-1, 0], positions_0[-1, 1], c='blue', marker='*', s=200, edgecolors='black', linewidths=2)
    ax2.scatter(positions_1[-1, 0], positions_1[-1, 1], c='red', marker='*', s=200, edgecolors='black', linewidths=2)
    ax2.scatter(true_target_player0[0], true_target_player0[1], c='cyan', marker='^', s=150, 
                label='Target 0', edgecolors='black', linewidths=2)
    ax2.scatter(true_target_player1[0], true_target_player1[1], c='orange', marker='^', s=150, 
                label='Target 1', edgecolors='black', linewidths=2)
    
    # Draw collision radius
    for i in range(0, len(positions_0), max(1, len(positions_0)//5)):
        circle = plt.Circle((positions_0[i, 0], positions_0[i, 1]), collision_radius, 
                           color='blue', fill=False, linestyle='--', alpha=0.3)
        ax2.add_patch(circle)
        circle = plt.Circle((positions_1[i, 0], positions_1[i, 1]), collision_radius, 
                           color='red', fill=False, linestyle='--', alpha=0.3)
        ax2.add_patch(circle)
    
    ax2.set_xlabel('X (m)', fontsize=10)
    ax2.set_ylabel('Y (m)', fontsize=10)
    ax2.set_title('XY Plane Projection (Top View)', fontsize=12, fontweight='bold')
    ax2.legend(fontsize=9)
    ax2.grid(True, alpha=0.3)
    ax2.axis('equal')
    
    # Altitude vs time
    ax3 = fig.add_subplot(133)
    time_steps = np.arange(len(positions_0))
    ax3.plot(time_steps, positions_0[:, 2], 'b-', linewidth=2, marker='o', label='Drone 0 Altitude', markersize=4)
    ax3.plot(time_steps, positions_1[:, 2], 'r-', linewidth=2, marker='s', label='Drone 1 Altitude', markersize=4)
    ax3.axhline(y=true_target_player0[2], color='cyan', linestyle='--', linewidth=2, label='Target 0 Altitude')
    ax3.axhline(y=true_target_player1[2], color='orange', linestyle='--', linewidth=2, label='Target 1 Altitude')
    ax3.set_xlabel('Time Step', fontsize=10)
    ax3.set_ylabel('Altitude Z (m)', fontsize=10)
    ax3.set_title('Altitude vs Time', fontsize=12, fontweight='bold')
    ax3.legend(fontsize=9)
    ax3.grid(True, alpha=0.3)
    
    plt.tight_layout()
    
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"\nTrajectory plot saved to: {save_path}")
    
    plt.close()  # 关闭图形以释放内存


def visualize_parameter_estimation(parameter_history: Dict[int, List[torch.Tensor]],
                                   true_targets: Dict[int, torch.Tensor],
                                   save_path: str = None):
    """
    可视化参数估计收敛情况（仅绘制ego→opponent的预测）
    """
    if not parameter_history:
        print("No parameter history available for visualization.")
        return

    if 0 in parameter_history:
        player_id = 0
    else:
        player_id = sorted(parameter_history.keys())[0]

    param_hist = parameter_history.get(player_id, [])
    if not param_hist:
        print(f"No parameter history for player {player_id}.")
        return

    opponent_id = 1 - player_id
    true_target = true_targets[opponent_id]

    estimates = np.array([theta.numpy() for theta in param_hist])
    time_steps = np.arange(len(estimates))
    errors = np.linalg.norm(estimates - true_target.numpy(), axis=1)

    fig, axes = plt.subplots(1, 2, figsize=(12, 4.5))

    ax_left = axes[0]
    ax_left.plot(time_steps, estimates[:, 0], 'r-', linewidth=2, marker='o', label='Est. X', markersize=4)
    ax_left.plot(time_steps, estimates[:, 1], 'g-', linewidth=2, marker='s', label='Est. Y', markersize=4)
    ax_left.plot(time_steps, estimates[:, 2], 'b-', linewidth=2, marker='^', label='Est. Z', markersize=4)
    ax_left.axhline(y=true_target[0].item(), color='r', linestyle='--', alpha=0.7, label='True X')
    ax_left.axhline(y=true_target[1].item(), color='g', linestyle='--', alpha=0.7, label='True Y')
    ax_left.axhline(y=true_target[2].item(), color='b', linestyle='--', alpha=0.7, label='True Z')
    ax_left.set_xlabel('Time Step', fontsize=10)
    ax_left.set_ylabel('Coordinate (m)', fontsize=10)
    ax_left.set_title(f'Player {player_id} Estimating Player {opponent_id}', fontsize=11, fontweight='bold')
    ax_left.legend(fontsize=8)
    ax_left.grid(True, alpha=0.3)

    ax_right = axes[1]
    ax_right.plot(time_steps, errors, 'k-', linewidth=2, marker='o', markersize=5)
    ax_right.set_xlabel('Time Step', fontsize=10)
    ax_right.set_ylabel('Estimation Error (m)', fontsize=10)
    ax_right.set_title('Estimation Error Convergence', fontsize=11, fontweight='bold')
    ax_right.grid(True, alpha=0.3)
    ax_right.text(0, errors[0], f'{errors[0]:.2f}', fontsize=9, ha='right')
    ax_right.text(len(errors)-1, errors[-1], f'{errors[-1]:.2f}', fontsize=9, ha='left')

    plt.tight_layout()

    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"Parameter estimation plot saved to: {save_path}")
    
    plt.close()  # 关闭图形以释放内存


def _load_npz_dict(path: str) -> Dict[str, np.ndarray]:
    """安全加载npz文件为字典，不存在则返回空字典。"""
    if not os.path.exists(path):
        return {}
    with np.load(path, allow_pickle=True) as data:
        return {key: data[key] for key in data.files}


def _concat_entries(existing: Optional[np.ndarray], new_entry: np.ndarray) -> np.ndarray:
    """在episode维度拼接结果，保持历史数据。"""
    if existing is None or existing.size == 0:
        return new_entry
    return np.concatenate([existing, new_entry], axis=0)


def _update_summary_files(goal_errors: np.ndarray,
                          min_distances: np.ndarray,
                          costs: np.ndarray,
                          summary_json_path: str,
                          summary_png_path: str,
                          dataset_path: str) -> None:
    """根据累积结果刷新summary json与plot。"""
    if goal_errors.size == 0:
        return

    os.makedirs(os.path.dirname(summary_json_path), exist_ok=True)

    per_step_mean = np.nanmean(goal_errors, axis=0)
    per_step_var = np.nanvar(goal_errors, axis=0)
    overall_goal_error_mean = float(np.nanmean(goal_errors))
    min_distance_mean = float(np.nanmean(min_distances))
    min_distance_var = float(np.nanvar(min_distances))
    cost_mean = float(np.nanmean(costs))
    cost_var = float(np.nanvar(costs))

    steps = np.arange(1, per_step_mean.shape[0] + 1)
    std = np.sqrt(np.clip(per_step_var, a_min=0.0, a_max=None))

    fig, ax = plt.subplots(figsize=(8, 4.5))
    ax.plot(steps, per_step_mean, marker="o", color="tab:blue", linewidth=2,
            label="Mean terminal error")
    ax.fill_between(steps, per_step_mean - std, per_step_mean + std,
                    color="tab:blue", alpha=0.2, label="+/- 1 std")
    ax.set_xlabel("MPC step")
    ax.set_ylabel("Terminal error")
    ax.set_title("Per-step mean terminal error (MPGP)")
    ax.grid(True, linestyle="--", alpha=0.6)
    ax.legend()
    fig.tight_layout()
    fig.savefig(summary_png_path, dpi=300)
    plt.close(fig)

    summary_payload = {
        "n_episodes": int(goal_errors.shape[0]),
        "n_mpc_steps": int(goal_errors.shape[1]),
        "overall_goal_error_mean": overall_goal_error_mean,
        "min_distance_mean": min_distance_mean,
        "min_distance_var": min_distance_var,
        "cost_mean": cost_mean,
        "cost_var": cost_var,
        "per_step_mean": per_step_mean.tolist(),
        "per_step_var": per_step_var.tolist(),
        "dataset_path": dataset_path,
    }
    with open(summary_json_path, "w", encoding="utf-8") as f:
        json.dump(summary_payload, f, indent=2)


def _append_episode_results(result_path: str,
                            summary_json_path: str,
                            summary_png_path: str,
                            episode_result: Dict[str, np.ndarray]) -> None:
    """将单次episode结果追加到npz文件，并刷新summary。"""
    os.makedirs(os.path.dirname(result_path), exist_ok=True)

    existing = _load_npz_dict(result_path)

    goal_errors = _concat_entries(existing.get("goal_errors"), episode_result["goal_errors"])
    min_distances = _concat_entries(existing.get("min_distances"), episode_result["min_distances"])
    costs = _concat_entries(existing.get("costs"), episode_result["costs"])
    states = _concat_entries(existing.get("states"), episode_result["states"])
    controls = _concat_entries(existing.get("controls"), episode_result["controls"])
    trajectory_indices = _concat_entries(
        existing.get("trajectory_indices"), episode_result["trajectory_indices"]
    )

    np.savez_compressed(
        result_path,
        goal_errors=goal_errors,
        min_distances=min_distances,
        costs=costs,
        states=states,
        controls=controls,
        trajectory_indices=trajectory_indices,
        n_episodes=goal_errors.shape[0],
        n_mpc_steps=episode_result["n_mpc_steps"],
        n_initial_obs=episode_result["n_initial_obs"],
        dataset_path=episode_result["dataset_path"],
    )

    _update_summary_files(
        goal_errors=goal_errors,
        min_distances=min_distances,
        costs=costs,
        summary_json_path=summary_json_path,
        summary_png_path=summary_png_path,
        dataset_path=episode_result["dataset_path"],
    )


def _append_collision_results(collision_path: str,
                              collision_payload: Optional[Dict[str, np.ndarray]]) -> None:
    """把碰撞结果追加到npz（若本次无碰撞则跳过）。"""
    if collision_payload is None:
        return

    os.makedirs(os.path.dirname(collision_path), exist_ok=True)
    existing = _load_npz_dict(collision_path)

    trajectories = _concat_entries(existing.get("trajectories"), collision_payload["trajectories"])
    indices = _concat_entries(existing.get("indices"), collision_payload["indices"])
    min_distances = _concat_entries(existing.get("min_distances"), collision_payload["min_distances"])
    dataset_starts = _concat_entries(existing.get("dataset_starts"), collision_payload["dataset_starts"])
    ego_goals = _concat_entries(existing.get("ego_goals"), collision_payload["ego_goals"])
    opponent_goals = _concat_entries(existing.get("opponent_goals"), collision_payload["opponent_goals"])

    np.savez_compressed(
        collision_path,
        trajectories=trajectories,
        indices=indices,
        min_distances=min_distances,
        dataset_starts=dataset_starts,
        ego_goals=ego_goals,
        opponent_goals=opponent_goals,
        collision_radius=collision_payload["collision_radius"],
        dataset_path=collision_payload["dataset_path"],
    )


# ===== 完整使用示例（双向预测版本）=====
def example_usage():
    """完整使用示例 - 双向预测无人机博弈场景"""
    config.print_config()
    
    print("="*60)
    print("自适应MPGP算法 - 双向预测无人机博弈示例")
    print("两架无人机互相不知道对方的目标，需要互相预测")
    print("="*60)
    
    # 1. 导入无人机博弈
    from drone_game import DroneGame
    
    # 2. 创建博弈实例
    dt = 0.1
    game = DroneGame(
        dt=dt,
        collision_radius=2.0,
        control_limits={
            'theta': 0.5,
            'phi': 0.5,
            'tau': 20.0
        },
        velocity_limit=6.0,
        altitude_limits=(15.0, 55.0),
        g=9.81
    )
    
    # 保存真实目标（两个玩家都有真实目标，但互相不知道）
    true_target_player0 = game.target_positions[0].detach().clone()
    true_target_player1 = game.target_positions[1].detach().clone()
    
    print(f"\n博弈设置（真实目标）:")
    print(f"  玩家0真实目标: {true_target_player0.numpy()}")
    print(f"  玩家1真实目标: {true_target_player1.numpy()}")
    print(f"  碰撞半径: {game.collision_radius} m")
    
    # 3. 创建MCP求解器（MPC配置）
    # MPC参数设置
    control_horizon = 1      # 控制步数：每次执行1步，然后重新优化（滚动窗口）
    prediction_horizon = 10  # 预测步数：规划10步，执行1步，确保观测与预测窗口对齐

    solver = MCPGameSolver(
        game=game,
        horizon=prediction_horizon,
        solver_type="path",
        solver_params={
            'tolerance': 1e-4,
            'verbose': False,  # 关闭PATH求解器的详细输出
            'max_iterations': 100000,
            'major_iteration_limit': 1000,
            'minor_iteration_limit': 50000,
            'time_limit': 1200.0
        }
    )
    
    print(f"\nMPC求解器设置:")
    print(f"  预测步数 (Prediction Horizon): {prediction_horizon} 步 ({prediction_horizon * dt:.1f} 秒)")
    print(f"  控制步数 (Control Horizon): {control_horizon} 步 (MPC滚动执行)")
    print(f"  时间步长 (dt): {dt} 秒")
    print(f"  决策变量数: {solver.n_vars}")
    print(f"  MPC原理: 每次优化{prediction_horizon}步，应用前{control_horizon}步，然后滚动到下一时刻")
    
    # MPC循环配置
    n_mpc_steps = 41  # 🔧 控制步长为1步，41次MPC使轨迹长度与数据集(51点)对齐
    n_initial_obs = 10  # 使用前10个时间步作为初始观测（含起点）
    
    # 4. 创建自适应MPGP算法（平衡速度与精度版本）
    # 🔧 buffer_size设计：
    # - 物理意义：保留多少个历史观测点
    # - 需求：容纳整个MPC过程的所有观测
    # - 计算：n_initial_obs + n_mpc_steps * control_horizon
    #        = 10 + 41 * 1 = 51个观测
    # - 设置：留余量，设为80
    adaptive_mpgp = AdaptiveMPGP(
        mcp_game_solver=solver,
        n_players=2,
        learning_rate=0.005,  # 🔧 配合1步执行采用更小步长，防止振荡
        stop_tolerance=1e-3,
        max_inverse_steps=20,  # ✅ 增加迭代次数（10→20），提高精度
        buffer_size=80,  # 🔧 足够容纳所有观测（51个）+ 余量
        observation_weight=10.0,  # ✅ 进一步增强观测权重（5.0→10.0），强化梯度信号
        regularization_weight=0.001,  # ✅ 降低正则化（0.01→0.001），减少对先验的约束
        observation_noise_pos=0.1,  # 位置噪声
        observation_noise_vel=0.5,  # 速度噪声
        use_momentum=True,  # ✅ 启用动量，稳定优化
        momentum_beta=0.9,
        verbose=True  # ✅ 启用详细输出，监控优化过程
    )
    
    total_expected_obs = n_initial_obs + n_mpc_steps * control_horizon
    print(f"\n优化器设置（观测-预测窗口对齐版本）:")
    print(f"  学习率: {adaptive_mpgp.lr} (更小步长提升稳定性)")
    print(f"  观测缓冲区大小: {adaptive_mpgp.observation_buffer[0].maxlen} (容纳全部{total_expected_obs}个观测)")
    print(f"  预测horizon: {prediction_horizon}步 (固定10步，执行1步)")
    print(f"  每次逆优化使用: 最近{prediction_horizon}步观测 (与预测窗口对齐)")
    print(f"  观测权重: {adaptive_mpgp.obs_weight} (增强观测信号)")
    print(f"  正则化权重: {adaptive_mpgp.reg_weight} (降低约束，更信任观测)")
    print(f"  动量系数: {adaptive_mpgp.momentum_beta} (平滑优化轨迹)")
    print(f"  最大迭代次数: {adaptive_mpgp.max_inverse_steps} (平衡速度与精度)")
    print(f"  MPC循环次数: {n_mpc_steps} (轨迹点数对齐数据集={total_expected_obs})")
    
    # 5. 生成初始观测数据（使用真实博弈轨迹的前几步）
    print(f"\n{'='*60}")
    print("生成初始观测数据（用于MLE估计）")
    print(f"{'='*60}")
    
    # 步骤1: 使用真实目标求解一次博弈，获取真实轨迹（类似运行simple_demo）
    initial_state = game.get_initial_state()
    print(f"步骤1: 使用真实目标求解博弈（模拟simple_demo运行）...")
    print(f"  初始状态: {initial_state.numpy()}")
    print(f"  玩家0真实目标: {true_target_player0.numpy()}")
    print(f"  玩家1真实目标: {true_target_player1.numpy()}")
    
    with torch.no_grad():
        true_result = solver.solve_game(initial_state, warm_start=None)
    
    if not true_result['success']:
        print(f"警告: 真实博弈求解未收敛，残差={true_result['residual']:.6e}")
        print(f"继续使用当前解...")
    else:
        print(f"[OK] 真实博弈求解成功，残差={true_result['residual']:.6e}")
    
    # 步骤2: 提取前n_initial_obs步的真实轨迹作为初始观测
    print(f"\n步骤2: 提取前 {n_initial_obs} 步作为初始观测数据...")
    true_states = true_result['states']
    
    # Julia风格：只为ego填充初始观测缓冲区
    for t in range(n_initial_obs):
        # ego观察到opponent的状态
        obs_ego = create_observation(
            opponent_state=true_states[t, 6:12],  # opponent的状态 [px, py, pz, vx, vy, vz]
            timestamp=t
        )
        adaptive_mpgp.update_observation_buffer(player_id=0, observation=obs_ego)
        
        # Julia风格：只有ego需要观察opponent
        obs_ego = create_observation(
            opponent_state=true_states[t, 6:12],  # opponent的状态
            timestamp=t
        )
        adaptive_mpgp.update_observation_buffer(player_id=0, observation=obs_ego)
    
    print(f"[OK] 初始观测数据已填充（Julia风格）:")
    print(f"  ego观测缓冲区: {len(adaptive_mpgp.observation_buffer[0])} 个观测")
    
    # 【关键修复】添加MPC起始状态(t=n_initial_obs)的观测
    print(f"\n步骤2.5: 添加MPC起始状态(t={n_initial_obs})的观测...")
    obs_ego_initial = create_observation(
        opponent_state=true_states[n_initial_obs, 6:12],
        timestamp=n_initial_obs
    )
    adaptive_mpgp.update_observation_buffer(player_id=0, observation=obs_ego_initial)
    
    print(f"[OK] MPC起始观测已添加:")
    print(f"  ego观测缓冲区: {len(adaptive_mpgp.observation_buffer[0])} 个观测（时间戳0-{n_initial_obs}）")
    
    # 步骤3: 初始化参数估计（中性初始猜测）
    print(f"\n步骤3: 初始化参数估计（中性初始猜测）...")
    # 真实目标：ego=[0,-7,25], opponent=[7,0,35]
    # 使用中等高度+轻微偏移作为初始猜测
    # Julia风格：只需要ego对opponent的初始猜测
    theta_init = {
        0: torch.tensor([0.0, 0.0, 30.0])  # ego对opponent目标的猜测（中性初始值）
    }
    adaptive_mpgp.initialize_parameters(
        theta_init=theta_init,
        theta_prior=theta_init,
        true_targets={
            0: true_target_player0,  # ego的真实目标
            1: true_target_player1   # opponent的真实目标（opponent知道ego目标）
        }
    )
    
    print(f"  ego(0)初始猜测opponent(1)目标: {theta_init[0].numpy()} (真实: {true_target_player1.numpy()}, 误差: {torch.norm(theta_init[0] - true_target_player1).item():.2f})")
    print(f"  opponent(1)知道ego(0)真实目标: {true_target_player0.numpy()} (Julia风格假设)")
    
    # 步骤4: 设置当前状态为第n_initial_obs步的状态（从这里开始MPGP循环）
    current_state = true_states[n_initial_obs].clone()
    print(f"\n步骤4: 设置MPGP起始状态（时间步 {n_initial_obs}）")
    print(f"  当前状态: {current_state.numpy()}")
    
    
    # 6. MPC滚动窗口仿真
    # MPC循环配置：
    # - 初始观测：前n_initial_obs步（来自真实Nash均衡）
    # - 每次MPC循环：优化prediction_horizon步，执行control_horizon步
    # - 总共执行n_mpc_steps次MPC循环（此处为41次）
    # - 总仿真时间 = (n_initial_obs + n_mpc_steps * control_horizon) * dt
    
    total_steps_simulated = n_initial_obs + n_mpc_steps * control_horizon  # 实际仿真的总步数
    total_simulation_time = total_steps_simulated * dt
    
    print(f"\n{'='*60}")
    print("MPC滚动窗口配置")
    print(f"{'='*60}")
    print(f"  初始观测: {n_initial_obs} 步 ({n_initial_obs * dt:.2f} 秒) - 来自真实博弈轨迹")
    print(f"  预测步数: {prediction_horizon} 步 ({prediction_horizon * dt:.2f} 秒) - 每次优化窗口")
    print(f"  控制步数: {control_horizon} 步 ({control_horizon * dt:.2f} 秒) - 每次执行步数")
    print(f"  MPC循环次数: {n_mpc_steps} 次")
    print(f"  总仿真步数: {total_steps_simulated} 步")
    print(f"  总仿真时间: {total_simulation_time:.2f} 秒")
    print(f"  执行流程: 初始观测(t=0-{n_initial_obs-1}) → MPC循环×{n_mpc_steps}(t={n_initial_obs}-{total_steps_simulated-1})")
    print(f"  MPC策略: 每次优化{prediction_horizon}步 → 执行前{control_horizon}步 → 向前滚动 → 重复")
    
    # 初始化轨迹记录（只使用初始观测阶段的真实轨迹）
    # 注意：只记录前n_initial_obs步，因为这是在MPGP启动前的"历史数据"
    trajectory_history = [true_states[i].clone() for i in range(n_initial_obs)]
    print(f"\n轨迹初始化:")
    print(f"  已记录初始 {len(trajectory_history)} 个状态点（时间步 0-{n_initial_obs-1}）")
    print(f"  这是MPGP启动前的历史轨迹（基于真实Nash均衡）")
    print(f"  从时间步 {n_initial_obs} 开始，将记录MPGP的真实执行轨迹")
    
    print(f"\n开始MPC滚动执行...")
    print(f"初始轨迹包含: {len(trajectory_history)} 个状态点")
    print(f"初始观测缓冲区: ego={len(adaptive_mpgp.observation_buffer[0])}个")
    print(f"当前theta_hat[0]（ego对opponent的估计）: {adaptive_mpgp.theta_hat[0].detach().numpy()}")
    print(f"真实目标: ego={true_target_player0.numpy()}, opponent={true_target_player1.numpy()}")
    
    for t in range(n_mpc_steps):
        # 计算当前绝对时间戳：从初始观测结束后直接开始MPC
        # MPC#k：t=n_initial_obs+k-1 → n_initial_obs+k，按1步滚动执行
        current_absolute_time = n_initial_obs + t * control_horizon
        next_time = current_absolute_time + control_horizon
        
        print(f"\n{'='*60}")
        print(f"MPC循环 {t+1}/{n_mpc_steps} (时间步 {current_absolute_time} → {next_time})")
        print(f"  即将执行 {control_horizon} 步控制")
        print(f"{'='*60}")
        
        # ====================================================================
        # Julia风格双向预测流程（正确时序）：
        # 1. 先执行双向正向求解，收集未来观测（只有ego需要观测）
        # 2. 基于观测执行逆博弈优化（只优化ego参数）
        # 3. 执行控制（真实动力学）
        # 4. 更新当前状态
        # ====================================================================
        
        # ✅ 步骤1: 先执行双向正向求解，收集ego的观测
        print(f"\n[步骤1: Julia风格双向正向求解] 时间步{current_absolute_time}")
        
        # 第一次：ego视角（ego真实目标 + opponent估计目标）
        result_anchor0_prefetch = adaptive_mpgp.forward_game_solution(
            current_state, warm_start=None, anchor_player=0
        )
        
        # 第二次：opponent视角（双方都用真实目标，opponent知道ego目标）
        result_anchor1_prefetch = adaptive_mpgp.forward_game_solution(
            current_state, warm_start=None, anchor_player=1
        )
        
        # 从opponent视角求解提取ego的观测（ego观测opponent的轨迹）
        states_anchor1_prefetch = result_anchor1_prefetch['states']
        horizon_to_collect = min(control_horizon, states_anchor1_prefetch.shape[0] - 1)
        
        print(f"  收集ego观测时间戳 {current_absolute_time+1} 到 {current_absolute_time+horizon_to_collect}")
        for k in range(1, horizon_to_collect + 1):
            observation_timestamp = current_absolute_time + k
            
            # 只有ego需要观测opponent（Julia风格）
            obs_ego = create_observation(
                opponent_state=states_anchor1_prefetch[k, 6:12],  # opponent状态
                timestamp=observation_timestamp
            )
            adaptive_mpgp.update_observation_buffer(player_id=0, observation=obs_ego)
            # opponent不需要观测（已知ego目标）
        
        print(f"  观测收集完成，ego缓冲区={len(adaptive_mpgp.observation_buffer[0])}个")
        
        # ✅ 诊断：显示刚收集的观测内容
        if horizon_to_collect > 0:
            first_obs_timestamp = current_absolute_time + 1
            if first_obs_timestamp < len(true_states):
                print(f"  首个观测(t={first_obs_timestamp}):")
                print(f"    ego观测opponent位置: {states_anchor1_prefetch[1, 6:9].numpy()}")
        
        # ✅ 步骤2: 执行MPGP步骤（基于已收集的观测进行逆优化）
        print(f"\n[步骤2: Julia风格MPGP逆优化] 时间步{current_absolute_time}")
        
        # 🔍 诊断：显示缓冲区中最新的几个观测
        if t == 0 or t == n_mpc_steps - 1:  # 第一次和最后一次MPC循环
            print(f"\n  📊 观测缓冲区诊断（MPC#{t+1}）:")
            print(f"  ego观测opponent（真实目标[7, 0, 35]）:")
            if len(adaptive_mpgp.observation_buffer[0]) > 0:
                recent_obs = list(adaptive_mpgp.observation_buffer[0])[-5:]
                for obs in recent_obs:
                    t_stamp = obs['timestamp']
                    pos = obs['opponent_state'][0:3].numpy()
                    vel = obs['opponent_state'][3:6].numpy()
                    print(f"    t={t_stamp}: pos={pos}, vel={vel}")
                print(f"    💡 应该看到X→7, Y→0, Z→35")
        
        controls, updated_thetas, info = adaptive_mpgp.step(
            current_state,
            observations={},  # 观测已在缓冲区
            warm_start=None,
            current_time=current_absolute_time,
            bidirectional=True
        )
        
        # 简洁的参数估计摘要（Julia风格）
        error_0 = torch.norm(updated_thetas[0].detach() - true_target_player1).item()
        print(f"\n[参数更新摘要 MPC#{t+1}] (Julia风格)")
        print(f"  ego→opponent: {updated_thetas[0].detach().numpy()} | 真值: {true_target_player1.numpy()} | 误差: {error_0:.4f}")
        print(f"  opponent已知ego目标: {updated_thetas[1].detach().numpy()} (真值, 无需估计)")
        print(f"  参数历史: ego={len(adaptive_mpgp.parameter_history[0])}次")
        print(f"  观测缓冲: ego={len(adaptive_mpgp.observation_buffer[0])}个")
        
        # ✅ 步骤3: 真实执行控制（使用MPGP双向预测的真实执行轨迹）
        # 从MPGP的双向预测中提取真实执行的状态
        if info['full_states'] is not None and len(info['full_states']) > 0:
            # 执行前control_horizon步（MPC滚动窗口策略）
            exec_steps = min(control_horizon, len(info['full_states']) - 1)
            
            for step_idx in range(1, exec_steps + 1):  # 从1开始（0是当前状态）
                # 记录MPGP真实执行的轨迹
                trajectory_history.append(info['full_states'][step_idx].detach().clone())
            
            # 更新当前状态为执行后的状态
            current_state = info['full_states'][exec_steps].clone()
            
            print(f"  执行 {exec_steps} 步MPGP控制后状态:")
            print(f"    ego: 位置={current_state[0:3].numpy()}, 速度={current_state[3:6].numpy()}")
            print(f"    opponent: 位置={current_state[6:9].numpy()}, 速度={current_state[9:12].numpy()}")
        else:
            print(f"  警告: 无法获取状态序列，跳过本步")
            continue
        
        # 碰撞检测
        pos_0 = current_state[0:3]
        pos_1 = current_state[6:9]
        distance = torch.norm(pos_0 - pos_1).item()
        status = "⚠️碰撞" if distance < game.collision_radius else "✓安全"
        print(f"  [{status}] 距离: {distance:.2f}m")
    
    
    # 8. 碰撞分析（完整轨迹）
    print(f"\n{'='*60}")
    print("碰撞安全分析（完整轨迹）")
    print(f"{'='*60}")
    
    min_distance = float('inf')
    min_distance_time = 0
    collision_count = 0
    collision_times = []
    
    for i, state in enumerate(trajectory_history):
        pos_0 = state[0:3]
        pos_1 = state[6:9]
        distance = torch.norm(pos_0 - pos_1).item()
        
        if distance < min_distance:
            min_distance = distance
            min_distance_time = i
        
        if distance < game.collision_radius:
            collision_count += 1
            collision_times.append(i)
    
    print(f"轨迹总长度: {len(trajectory_history)} 个状态点 ({len(trajectory_history) * dt:.2f} 秒)")
    print(f"碰撞半径: {game.collision_radius} m")
    print(f"\n最小距离: {min_distance:.4f} m (发生在时间步 {min_distance_time}, t={min_distance_time * dt:.2f}s)")
    
    if collision_count > 0:
        print(f"\n[FAIL] 发现碰撞: {collision_count} 次")
        print(f"碰撞时间步: {collision_times[:10]}{'...' if len(collision_times) > 10 else ''}")
        print(f"首次碰撞: 时间步 {collision_times[0]} (t={collision_times[0] * dt:.2f}s)")
    else:
        print(f"\n[OK] 无碰撞 - 安全余量: {min_distance - game.collision_radius:.4f} m")
    
    # 9. 显示收敛结果（Julia风格）- 只显示ego的估计
    print(f"\n{'='*60}")
    print("参数估计演变摘要（Julia风格 - 只ego估计）")
    print(f"{'='*60}")
    
    # 显示目标设置
    print(f"\n[目标设置]")
    print(f"  ego(0)真实目标: {true_target_player0.numpy()}")
    print(f"  opponent(1)真实目标: {true_target_player1.numpy()}")
    
    # 只显示ego的估计历史
    player_id = 0
    opponent_id = 1
    true_opponent_target = true_target_player1
    
    param_history = adaptive_mpgp.parameter_history[player_id]
    if param_history:
        print(f"\n[ego估计opponent目标的演变]")
        print(f"  opponent真实目标: {true_opponent_target.numpy()}")
        print(f"  总共{len(param_history)}次更新:")
        
        # 显示所有更新
        for i, theta in enumerate(param_history):
            error = torch.norm(theta - true_opponent_target).item()
            delta = (theta - true_opponent_target).numpy()
            
            print(f"    步骤{i+1}: {theta.numpy()} | 误差: {error:.4f}")
            if i > 0:
                prev_theta = param_history[i-1]
                change = (theta - prev_theta).numpy()
                print(f"            变化: Δ={change} | 各维误差: X={delta[0]:.2f}, Y={delta[1]:.2f}, Z={delta[2]:.2f}")
    
    print(f"\n[opponent信息假设]")
    print(f"  opponent已知ego真实目标: {true_target_player0.numpy()} (Julia风格假设)")
    
    # 10. Visualize trajectories
    print(f"\n{'='*60}")
    print("生成可视化图表")
    print(f"{'='*60}")
    
    # Prepare absolute output path for the demo PNG
    demo_output_dir = os.path.abspath(os.path.join(os.path.dirname(__file__), '..', 'results', 'demo_mpgp'))
    os.makedirs(demo_output_dir, exist_ok=True)
    trajectory_output_path = os.path.join(demo_output_dir, 'mpgp_demo.png')

    # Visualize flight trajectories
    visualize_trajectories(
        trajectory_history=trajectory_history,
        true_target_player0=true_target_player0,
        true_target_player1=true_target_player1,
        collision_radius=game.collision_radius,
        save_path=trajectory_output_path
    )

    parameter_plot_path = os.path.join(demo_output_dir, 'parameter_estimation.png')
    print(f"[可视化] 参数估计图将保存至: {parameter_plot_path}")

    # Visualize parameter estimation convergence
    visualize_parameter_estimation(
        parameter_history=adaptive_mpgp.parameter_history,
        true_targets={0: true_target_player0, 1: true_target_player1},
        save_path=parameter_plot_path
    )
    
    print("\nVisualization completed!")




def run_dataset_mpgp(trajectory_idx: int,
                     dataset_path: str = DEFAULT_DATASET_PATH,
                     obs_window: int = 10,
                     n_mpc_steps: int = 40) -> None:
    """
    使用数据集观测运行一次MPGP：仅处理一个轨迹索引，并把结果追加到公共文件。

    Args:
        trajectory_idx: npz文件中的轨迹索引。
        dataset_path: 数据集路径。
        obs_window: 逆优化阶段使用的观测窗口长度。
        n_mpc_steps: 本次episode执行的MPC步数。
    """
    from drone_game import DroneGame  # local import to avoid unused overhead

    dataset_path = os.path.abspath(dataset_path)
    repo_root = os.path.abspath(os.path.join(os.path.dirname(__file__), ".."))
    data = np.load(dataset_path)
    trajectories_np: np.ndarray = data["trajectories"]
    ego_goals_np: np.ndarray = data["ego_goals"]
    opponent_goals_np: np.ndarray = data["opponent_goals"]

    n_available = trajectories_np.shape[0]
    if trajectory_idx < 0 or trajectory_idx >= n_available:
        raise ValueError(f"trajectory_idx={trajectory_idx} 超出范围 0~{n_available - 1}")

    traj_np = trajectories_np[trajectory_idx]
    traj_len = traj_np.shape[0]
    if traj_len <= 1:
        raise ValueError(f"轨迹{trajectory_idx} 长度过短，无法运行MPGP（长度={traj_len})")

    # Basic config
    dt = 0.1
    control_horizon = 1
    prediction_horizon = 10

    # Environment and solver
    game = DroneGame(
        dt=dt,
        collision_radius=2.0,
        control_limits={"theta": 0.5, "phi": 0.5, "tau": 20.0},
        velocity_limit=6.0,
        altitude_limits=(15.0, 55.0),
        g=9.81,
    )

    solver_params = {
        "tolerance": 1e-4,
        "verbose": False,
        "max_iterations": 100000,
        "major_iteration_limit": 1000,
        "minor_iteration_limit": 50000,
        "time_limit": 1200.0,
    }
    solver = MCPGameSolver(
        game=game,
        horizon=prediction_horizon,
        solver_type="path",
        solver_params=solver_params,
    )

    n_controls_per_player = solver.n_controls_per_player
    n_states_per_player = solver.n_states_per_player
    n_players = 2
    opponent_slice = slice(n_states_per_player, 2 * n_states_per_player)
    state_dim = n_states_per_player * n_players
    control_dim = n_controls_per_player * n_players

    output_dir = os.path.join(repo_root, "results", "demo_mpgp")
    os.makedirs(output_dir, exist_ok=True)
    result_path = os.path.join(output_dir, "mpgp_result.npz")
    collision_path = os.path.join(output_dir, "mpgp_cll.npz")
    summary_json_path = os.path.join(output_dir, "mpgp_metrics_summary.json")
    summary_png_path = os.path.join(output_dir, "mpgp_metrics_summary.png")

    goal_errors_episode = np.full((n_mpc_steps,), np.nan, dtype=np.float32)
    cost_per_step = np.full((n_mpc_steps,), np.nan, dtype=np.float32)

    executed_states: List[torch.Tensor] = []
    executed_controls: List[np.ndarray] = []

    true_target_player0 = torch.tensor(ego_goals_np[trajectory_idx], dtype=torch.float32)
    true_target_player1 = torch.tensor(opponent_goals_np[trajectory_idx], dtype=torch.float32)
    game.target_positions[0] = true_target_player0.clone()
    game.target_positions[1] = true_target_player1.clone()

    adaptive_mpgp = AdaptiveMPGP(
        mcp_game_solver=solver,
        n_players=2,
        learning_rate=0.005,
        stop_tolerance=1e-3,
        max_inverse_steps=20,
        buffer_size=80,
        observation_weight=10.0,
        regularization_weight=0.001,
        observation_noise_pos=0.1,
        observation_noise_vel=0.5,
        use_momentum=True,
        momentum_beta=0.9,
        verbose=False,
    )

    theta_init = {0: torch.tensor([0.0, 0.0, 30.0], dtype=torch.float32)}
    adaptive_mpgp.initialize_parameters(
        theta_init=theta_init,
        theta_prior=theta_init,
        true_targets={0: true_target_player0, 1: true_target_player1},
    )

    adaptive_mpgp.observation_buffer[0].clear()

    current_state = torch.tensor(traj_np[0], dtype=torch.float32)
    executed_states.append(current_state.clone())

    print("\n" + "=" * 60)
    print(f"单次MPGP数据集运行 | 轨迹索引 = {trajectory_idx} / {n_available - 1}")
    print("=" * 60)

    # Step 0: no observations; ego forward with init theta; opponent uses dataset t=1
    _, updated_thetas, info = adaptive_mpgp.step(
        current_state=current_state,
        observations={},
        warm_start=None,
        current_time=0,
        bidirectional=True,
    )
    err0 = torch.norm(updated_thetas[0].detach() - true_target_player1).item()
    goal_errors_episode[0] = err0
    cost_per_step[0] = float(info.get("cost", np.nan))

    full_ctrls = info.get("full_controls", None)
    if full_ctrls is not None and full_ctrls.shape[0] > 0:
        ego_ctrl = full_ctrls[0, 0:n_controls_per_player]
    else:
        ego_ctrl = torch.zeros(n_controls_per_player)

    ctrl_vec = torch.zeros(control_dim)
    ctrl_vec[0:n_controls_per_player] = ego_ctrl
    executed_controls.append(ctrl_vec.detach().cpu().numpy())

    next_state = game.step_func(current_state, ctrl_vec)
    next_state[opponent_slice] = torch.tensor(traj_np[1, 6:12], dtype=torch.float32)

    current_state = next_state
    executed_states.append(current_state.clone())

    # Seed first inverse window t=1..obs_window (truncated if shorter)
    adaptive_mpgp.observation_buffer[0].clear()
    obs_start = 1
    obs_end = min(obs_start + obs_window - 1, traj_len - 1)
    for t_obs in range(obs_start, obs_end + 1):
        obs_state = torch.tensor(traj_np[t_obs, 6:12], dtype=torch.float32)
        adaptive_mpgp.update_observation_buffer(
            player_id=0,
            observation=create_observation(obs_state, timestamp=t_obs),
        )

    # Rolling MPC: window t=k..k+obs_window-1 for inverse
    for step_idx in range(1, n_mpc_steps):
        current_abs_time = step_idx
        next_idx = current_abs_time + control_horizon
        if next_idx >= traj_len:
            print(f"提前结束：数据长度不足，next_idx={next_idx}, traj_len={traj_len}")
            break

        adaptive_mpgp.observation_buffer[0].clear()
        obs_start = current_abs_time
        obs_end = min(current_abs_time + obs_window - 1, traj_len - 1)
        for t_obs in range(obs_start, obs_end + 1):
            obs_state = torch.tensor(traj_np[t_obs, 6:12], dtype=torch.float32)
            adaptive_mpgp.update_observation_buffer(
                player_id=0,
                observation=create_observation(obs_state, timestamp=t_obs),
            )

        _, updated_thetas, info = adaptive_mpgp.step(
            current_state=current_state,
            observations={},
            warm_start=None,
            current_time=current_abs_time,
            bidirectional=True,
        )

        err = torch.norm(
            updated_thetas[0].detach() - true_target_player1
        ).item()
        goal_errors_episode[step_idx] = err
        cost_per_step[step_idx] = float(info.get("cost", np.nan))

        full_ctrls = info.get("full_controls", None)
        if full_ctrls is not None and full_ctrls.shape[0] > 0:
            ego_ctrl = full_ctrls[0, 0:n_controls_per_player]
        else:
            ego_ctrl = torch.zeros(n_controls_per_player)

        ctrl_vec = torch.zeros(control_dim)
        ctrl_vec[0:n_controls_per_player] = ego_ctrl
        executed_controls.append(ctrl_vec.detach().cpu().numpy())

        next_state = game.step_func(current_state, ctrl_vec)
        next_state[opponent_slice] = torch.tensor(traj_np[next_idx, 6:12], dtype=torch.float32)

        current_state = next_state
        executed_states.append(current_state.clone())

    exec_np = torch.stack(executed_states).numpy()
    diff_exec = exec_np[:, 0:3] - exec_np[:, 6:9]
    min_dist = float(np.sqrt((diff_exec ** 2).sum(axis=1)).min())

    state_buffer = np.full((n_mpc_steps + 1, state_dim), np.nan, dtype=np.float32)
    state_len = min(exec_np.shape[0], n_mpc_steps + 1)
    state_buffer[:state_len, :] = exec_np[:state_len]

    control_buffer = np.full((n_mpc_steps, control_dim), np.nan, dtype=np.float32)
    if executed_controls:
        ctrl_np = np.stack(executed_controls, axis=0)
        ctrl_len = min(ctrl_np.shape[0], n_mpc_steps)
        control_buffer[:ctrl_len, :] = ctrl_np[:ctrl_len]

    episode_result = {
        "goal_errors": goal_errors_episode[np.newaxis, :],
        "min_distances": np.array([min_dist], dtype=np.float32),
        "costs": cost_per_step[np.newaxis, :],
        "states": state_buffer[np.newaxis, :, :],
        "controls": control_buffer[np.newaxis, :, :],
        "trajectory_indices": np.array([trajectory_idx], dtype=np.int64),
        "n_mpc_steps": n_mpc_steps,
        "n_initial_obs": obs_window,
        "dataset_path": dataset_path,
    }

    _append_episode_results(
        result_path=result_path,
        summary_json_path=summary_json_path,
        summary_png_path=summary_png_path,
        episode_result=episode_result,
    )

    collision_payload: Optional[Dict[str, np.ndarray]] = None
    if min_dist < game.collision_radius:
        collision_payload = {
            "trajectories": exec_np[np.newaxis, :, :],
            "indices": np.array([trajectory_idx], dtype=np.int64),
            "min_distances": np.array([min_dist], dtype=np.float32),
            "dataset_starts": np.array([traj_np[0]], dtype=np.float32),
            "ego_goals": np.array([ego_goals_np[trajectory_idx]], dtype=np.float32),
            "opponent_goals": np.array([opponent_goals_np[trajectory_idx]], dtype=np.float32),
            "collision_radius": game.collision_radius,
            "dataset_path": dataset_path,
        }

    _append_collision_results(collision_path, collision_payload)

    print(f"\n[Result] 轨迹{trajectory_idx} 终端误差均值: {np.nanmean(goal_errors_episode):.4f}")
    print(f"[Result] 最小距离: {min_dist:.4f} m | 剩余缓冲: {min_dist - game.collision_radius:.4f} m")
    print(f"[Result] 数据已写入: {result_path}")


if __name__ == "__main__":
    parser = argparse.ArgumentParser(description="运行单次MPGP数据集episode")
    parser.add_argument(
        "--mode",
        choices=["dataset", "example"],
        default="dataset",
        help="dataset=单轨迹运行, example=运行demo示例",
    )
    parser.add_argument(
        "--trajectory-idx",
        type=int,
        default=None,
        help="npz数据集中要运行的轨迹索引（dataset模式必填）",
    )
    parser.add_argument(
        "--dataset-path",
        type=str,
        default=DEFAULT_DATASET_PATH,
        help="数据集npz路径（默认指向demo_vae测试集）",
    )
    parser.add_argument(
        "--obs-window",
        type=int,
        default=10,
        help="逆优化观测窗口长度",
    )
    parser.add_argument(
        "--n-mpc-steps",
        type=int,
        default=40,
        help="单次执行的MPC步数",
    )

    args = parser.parse_args()

    if args.mode == "dataset":
        if args.trajectory_idx is None:
            parser.error("--trajectory-idx 在dataset模式下必须提供")
        run_dataset_mpgp(
            trajectory_idx=args.trajectory_idx,
            dataset_path=args.dataset_path,
            obs_window=args.obs_window,
            n_mpc_steps=args.n_mpc_steps,
        )
    else:
        example_usage()
