"""
MCP Solver Implementation

实现混合互补问题求解器，支持博弈论中的纳什均衡计算
基于PATHMCP.py的实现思路
"""

import torch
import torch.nn as nn
import numpy as np
from typing import Dict, Any, Optional, List, Callable, Tuple
import casadi as ca
import warnings
import copy
import time
from collections import OrderedDict

try:
    import jax
    import jax.numpy as jnp
    from jax import grad, jacfwd, jacrev
    JAX_AVAILABLE = True
except ImportError:
    JAX_AVAILABLE = False
    print("[WARNING] JAX not available, falling back to manual IFT implementation")

from .path_solver import PATHMCPWrapper, solve_mcp_differentiable


class MCPSolver:
    """
    混合互补问题求解器基类

    实现KKT条件的求解，用于纳什均衡计算
    """

    def __init__(
        self,
        tolerance: float = 1e-6,
        max_iterations: int = 1000,
        verbose: bool = False
    ):
        self.tolerance = tolerance
        self.max_iterations = max_iterations
        self.verbose = verbose

    def solve(
        self,
        F: Callable,
        z0: np.ndarray,
        lb: Optional[np.ndarray] = None,
        ub: Optional[np.ndarray] = None
    ) -> Dict[str, Any]:
        """
        求解MCP问题: 找到z使得F(z) ≥ 0, lb ≤ z ≤ ub, F(z)^T(z-lb) = 0, F(z)^T(ub-z) = 0

        Args:
            F: 函数F
            z0: 初始点
            lb: 下界
            ub: 上界

        Returns:
            求解结果
        """
        raise NotImplementedError("子类必须实现solve方法")


class SimpleIterativeMCPSolver(MCPSolver):
    """
    简单迭代MCP求解器

    使用投影梯度下降法求解MCP问题
    """

    def __init__(self, learning_rate: float = 0.01, **kwargs):
        super().__init__(**kwargs)
        self.learning_rate = learning_rate

    def solve(
        self,
        F: Callable,
        z0: np.ndarray,
        lb: Optional[np.ndarray] = None,
        ub: Optional[np.ndarray] = None
    ) -> Dict[str, Any]:
        """使用投影梯度法求解"""
        n = len(z0)
        z = z0.copy()

        if lb is None:
            lb = -np.inf * np.ones(n)
        if ub is None:
            ub = np.inf * np.ones(n)

        for iteration in range(self.max_iterations):
            # 计算函数值
            F_val = F(z)

            # 检查收敛
            residual = np.linalg.norm(F_val)
            if residual < self.tolerance:
                return {
                    'solution': z,
                    'success': True,
                    'residual': residual,
                    'iterations': iteration,
                    'status': 'converged'
                }

            # 投影梯度步
            z_new = z - self.learning_rate * F_val
            z = np.clip(z_new, lb, ub)

        return {
            'solution': z,
            'success': False,
            'residual': np.linalg.norm(F(z)),
            'iterations': self.max_iterations,
            'status': 'max_iterations'
        }


class MCPGameSolver:
    """
    博弈MCP求解器
    
    将多智能体博弈问题转换为MCP问题并求解
    基于PATHMCP.py的实现思路，使用CasADi构建符号表达式
    """

    def __init__(
        self,
        game,
        horizon: int,
        block_sizes: Optional[List[int]] = None,
        solver_type: str = "path",
        solver_params: Optional[Dict] = None
    ):
        """
        初始化博弈MCP求解器

        Args:
            game: 博弈对象
            horizon: 时间步长
            block_sizes: 块大小
            solver_type: 求解器类型 ("path", "simple")
            solver_params: 求解器参数
        """
        self.game = game
        self.horizon = horizon
        self.N = horizon
        self.block_sizes = block_sizes or []
        self.solver_type = solver_type
        if solver_params is None:
            if solver_type == "path":
                # 与3demo/pathsolver_demo.py保持一致的PATH参数
                solver_params = {
                    'tolerance': 1e-4,
                    'verbose': True,
                    'max_iterations': 100000,
                    'major_iteration_limit': 1000,
                    'minor_iteration_limit': 50000,
                    'time_limit': 1200.0
                }
            elif solver_type == "simple":
                solver_params = {
                    'learning_rate': 0.01,
                    'max_iterations': 10000,
                    'tolerance': 1e-2,
                    'verbose': True
                }
            else:
                solver_params = {}

        self.solver_params = solver_params

        # 初始化求解器
        if solver_type == "path":
            self.mcp_solver = PATHMCPWrapper(**self.solver_params)
        elif solver_type == "simple":
            self.mcp_solver = SimpleIterativeMCPSolver(**self.solver_params)
        else:
            raise ValueError(f"未知求解器类型: {solver_type}")
        
        # 🆕 记录求解器类型，用于缓存管理
        self._solver_type = solver_type
        self._warm_start_enabled = True
        self._last_control_warm_start = None
        self._warm_start_cache = OrderedDict()
        self._warm_start_cache_size = 2048

        # 🆕 运行时监控：记录求解器调用与time limit命中次数
        self._batch_stats = self._create_empty_stats()
        self._total_stats = self._create_empty_stats()

        # 缓存MCP函数求值所需的参数，避免重复构造lambda
        self._problem_cache = {
            'x0': None,
            'u_prev': None,
            'target_vec': None,
        }

        def _cached_F_numpy(z):
            cache = self._problem_cache
            return np.array(self.F(
                z,
                cache['x0'],
                cache['u_prev'],
                cache['target_vec']
            )).squeeze()

        def _cached_J_numpy(z):
            cache = self._problem_cache
            return np.array(self.J(
                z,
                cache['x0'],
                cache['u_prev'],
                cache['target_vec']
            ))

        self._cached_F_numpy = _cached_F_numpy
        self._cached_J_numpy = _cached_J_numpy

        # 获取博弈参数
        self.M = getattr(self.game, 'n_players', 2)  # 玩家数量
        self.n_states = getattr(self.game, 'n_states', 12)  # 状态维度
        self.n_controls = getattr(self.game, 'n_controls', 6)  # 总控制维度
        
        # 从游戏对象获取每个玩家的控制和状态维度
        if hasattr(self.game, 'control_dim_per_player'):
            self.n_controls_per_player = self.game.control_dim_per_player
        else:
            self.n_controls_per_player = self.n_controls // self.M
        
        if hasattr(self.game, 'state_dim_per_player'):
            self.n_states_per_player = self.game.state_dim_per_player
        else:
            self.n_states_per_player = self.n_states // self.M
        
        # 构建MCP问题（使用CasADi符号表达式）
        self._build_mcp_problem()

    def _build_mcp_problem(self):
        """构建MCP问题的符号表示（参考PATHMCP.py）"""
        # print(f"\n=== 构建MCP问题 ===")
        # print(f"玩家数量: {self.M}")
        # print(f"时间步数: {self.N}")
        # print(f"状态维度: {self.n_states}")
        # print(f"控制维度: {self.n_controls}")
        # print(f"每个玩家控制维度: {self.n_controls_per_player}")
        
        # 决策变量维度
        self.n_u = self.n_controls  # 总控制维度
        self.n_q = self.n_states    # 总状态维度
        
        # 总控制变量数 = horizon * n_controls
        self.n_vars = self.N * self.n_u
        
        # 约束数量统计
        self.n_c = [0 for _ in range(self.N + 1)]  # 每个时间步的约束数
        
        # 动力学约束: N * n_states（隐式处理，不作为显式约束）
        # 每个时间步的约束:
        # - 碰撞约束: 1个
        # - 速度约束: 12个 (每个无人机3个速度分量的上下界)
        # - 高度约束: 4个 (每个无人机高度的上下界)
        for k in range(self.N + 1):
            self.n_c[k] = 1 + 12 + 4  # 碰撞 + 速度 + 高度 = 17个约束
        
        # 总约束数
        total_constraints = sum(self.n_c)
        # print(f"总约束数: {total_constraints} (每时间步17个: 碰撞1 + 速度12 + 高度4)")
        
        # 总MCP变量数 = 控制变量 + 拉格朗日乘子
        self.n_mcp_vars = self.n_vars + total_constraints
        self.n_constraints = total_constraints
        
        # print(f"决策变量数: {self.n_vars}")
        # print(f"MCP总变量数: {self.n_mcp_vars}")
        
        # 构建CasADi符号表达式
        self._build_casadi_functions()
    
    def _build_casadi_functions(self):
        """构建CasADi符号函数（参考PATHMCP.py）"""
        # print("\n构建CasADi符号函数...")
        
        # 控制变量占位符：u_0, ..., u_{N-1}
        u_ph = []
        for k in range(self.N):
            u_ph.append(ca.MX.sym(f'u_{k}', self.n_u))
        
        # 初始状态占位符
        x0_ph = ca.MX.sym('x0', self.n_q)
        
        # 前一控制输入占位符（用于计算成本）
        u_prev_ph = ca.MX.sym('u_prev', self.n_u)
        
        # 【关键修复】目标位置占位符 - 参数化target
        target_ph = []
        for player in range(self.M):
            target_ph.append(ca.MX.sym(f'target_{player}', 3))  # 每个玩家3D目标
        
        # 使用game的动力学函数展开状态序列
        x_ph = [x0_ph]
        for k in range(self.N):
            # 调用游戏的step_func进行状态转移
            # 这里需要将CasADi符号转换为游戏可用的形式
            x_next = self._casadi_dynamics(x_ph[k], u_ph[k])
            x_ph.append(x_next)
        
        # 构建成本函数
        J = [ca.MX.zeros(1) for _ in range(self.M)]  # 每个玩家的总成本
        for player in range(self.M):
            cost_terms = []
            for k in range(self.N):
                # 阶段成本
                if k == 0:
                    uk_prev = u_prev_ph[player*self.n_controls_per_player:(player+1)*self.n_controls_per_player]
                else:
                    uk_prev = u_ph[k-1][player*self.n_controls_per_player:(player+1)*self.n_controls_per_player]
                
                # 【修复】传入参数化的target
                cost_k = self._casadi_cost(x_ph[k], u_ph[k], player, uk_prev, target_ph[player], is_terminal=False)
                cost_terms.append(cost_k)
            
            # 终端成本
            cost_N = self._casadi_cost(x_ph[-1], ca.MX.zeros(self.n_u), player, u_ph[-1][player*self.n_controls_per_player:(player+1)*self.n_controls_per_player], target_ph[player], is_terminal=True)
            cost_terms.append(cost_N)
            
            J[player] = ca.sum1(ca.vertcat(*cost_terms))
        
        # 构建约束函数
        C = []
        for k in range(self.N + 1):
            # 碰撞约束 (1个)
            c_collision = self._casadi_collision_constraint(x_ph[k])
            
            # 速度约束 (12个)
            c_velocity = self._casadi_velocity_constraints(x_ph[k])
            
            # 高度约束 (4个)
            c_altitude = self._casadi_altitude_constraints(x_ph[k])
            
            # 组合所有约束 (总共17个)
            c_all = ca.vertcat(c_collision, c_velocity, c_altitude)
            C.append(c_all)
        
        # 状态序列作为控制序列的函数
        u_vec = ca.vertcat(*u_ph)
        x_vec = ca.vertcat(*x_ph)
        c_vec = ca.vertcat(*C)
        
        # 【修复】在使用前定义target_vec_ph
        target_vec_ph = ca.vertcat(*target_ph)  # 将所有target堆叠成一个向量
        
        # 创建CasADi函数
        # 【修复】所有函数都需要包含target参数
        self.f_dynamics = ca.Function('dynamics', [u_vec, x0_ph, u_prev_ph, target_vec_ph], [x_vec])
        self.f_cost = ca.Function('cost', [u_vec, x0_ph, u_prev_ph, target_vec_ph], J)
        self.f_constraints = ca.Function('constraints', [u_vec, x0_ph, u_prev_ph, target_vec_ph], [c_vec])
        
        # 构建拉格朗日函数和KKT条件
        l_ph = ca.MX.sym('lambda', sum(self.n_c))
        
        # 成本函数梯度（用于warm start）
        J_total = ca.sum1(ca.vertcat(*J))
        grad_J = ca.gradient(J_total, u_vec)
        self.f_grad_cost = ca.Function('grad_cost', [u_vec, x0_ph, u_prev_ph, target_vec_ph], [grad_J])
        
        # 约束雅可比（用于warm start）
        jac_C = ca.jacobian(c_vec, u_vec)
        self.f_jac_constraints = ca.Function('jac_constraints', [u_vec, x0_ph, u_prev_ph, target_vec_ph], [jac_C])
        
        # 拉格朗日函数 L = sum(J) - lambda^T * g(u)
        # 注意：约束形式为 g(u) >= 0，所以拉格朗日函数中是 -lambda^T * g(u)
        L_total = J_total - ca.dot(l_ph, c_vec)
        
        # KKT条件：∇_u L = 0
        grad_L = ca.gradient(L_total, u_vec)
        
        # MCP函数 F = [∇_u L; g(u)]
        # 注意：约束形式为 g(u) >= 0，拉格朗日乘子 λ >= 0
        # 互补性条件：λ^T * g(u) = 0，即 λ_i > 0 => g_i(u) = 0，g_i(u) > 0 => λ_i = 0
        F_mcp = ca.vertcat(grad_L, c_vec)
        
        # 创建F和J（雅可比）函数
        z_ph = ca.vertcat(u_vec, l_ph)
        
        # 【修复】将target作为输入参数
        self.F = ca.Function('F', [z_ph, x0_ph, u_prev_ph, target_vec_ph], [F_mcp])
        
        # 雅可比矩阵
        J_mcp = ca.jacobian(F_mcp, z_ph)
        self.J = ca.Function('J', [z_ph, x0_ph, u_prev_ph, target_vec_ph], [J_mcp])
        
        # print("CasADi符号函数构建完成")
        # print(f"  - F函数输入维度: {self.n_mcp_vars}")
        # print(f"  - F函数输出维度: {self.n_mcp_vars}")
        # print(f"  - J函数输出维度: {self.n_mcp_vars} x {self.n_mcp_vars}")
        
        # 启用JIT编译以提高性能
        # print("\n启用CasADi JIT编译优化...")
        jit_options = {
            'flags': ['-O3'],  # 最高优化级别
            'verbose': False
        }
        try:
            self.F = self.F.expand()  # 展开MX为SX以便JIT
            self.J = self.J.expand()
            # print("  - MX表达式已展开为SX")
        except Exception as e:
            # print(f"  - 警告: 无法展开表达式 ({e})")
            pass
        
        # print("CasADi函数构建和优化完成")
    
    def _casadi_dynamics(self, x: ca.MX, u: ca.MX) -> ca.MX:
        """CasADi版本的动力学函数"""
        # 使用游戏对象的step_func_casadi
        if hasattr(self.game, 'step_func_casadi'):
            # 分别处理两个玩家
            x_next = ca.MX.zeros(self.n_q)
            for player in range(self.M):
                x_start = player * self.n_states_per_player
                x_end = (player + 1) * self.n_states_per_player
                u_start = player * self.n_controls_per_player
                u_end = (player + 1) * self.n_controls_per_player
                
                x_player = x[x_start:x_end]
                u_player = u[u_start:u_end]
                
                x_next_player = self.game.step_func_casadi(x_player, u_player)
                x_next[x_start:x_end] = x_next_player
            
            return x_next
        else:
            # 简单的线性动力学近似
            return x + 0.1 * ca.vertcat(u[0:self.n_states_per_player], u[self.n_controls_per_player:self.n_controls_per_player+self.n_states_per_player])
    
    def _casadi_cost(self, x: ca.MX, u: ca.MX, player: int, u_prev: ca.MX, target: ca.MX, is_terminal: bool) -> ca.MX:
        """CasADi版本的成本函数 - 去除碰撞成本，仅依赖硬约束
        
        Args:
            target: 参数化的目标位置 [3]（CasADi符号）
        """
        u_start = player * self.n_controls_per_player
        u_end = (player + 1) * self.n_controls_per_player
        x_start = player * self.n_states_per_player
        x_end = (player + 1) * self.n_states_per_player
        
        x_player = x[x_start:x_end]
        u_player = u[u_start:u_end]
        
        # 【修复】target现在是输入参数，不再从game中读取！
        # 这使得target成为可优化的参数
        
        # 1. 途径点跟踪成本 - Z方向权重更大
        pos = x_player[0:3]
        # 分维度权重：Z方向权重5倍于X,Y（因为Z响应慢）
        pos_cost = 0.01 * (ca.sumsqr(pos[0:2] - target[0:2])  # X,Y权重0.01
                          + 5.0 * ca.sumsqr(pos[2] - target[2]))  # Z权重0.05
        
        # 2. 控制成本：直接惩罚控制幅值
        control_cost = 0.0001 * ca.sumsqr(u_player)
        
        # 3. 速度约束：通过硬约束处理，不在成本函数中惩罚
        
        # 4. 高度约束：通过硬约束处理，不在成本函数中惩罚
        
        # 5. 碰撞避免：通过硬约束处理，不在成本函数中惩罚
        
        return pos_cost + control_cost
    
    def _casadi_collision_constraint(self, x: ca.MX) -> ca.MX:
        """CasADi版本的碰撞约束
        
        约束定义：distance(pos1, pos2) - collision_radius >= 0
        即：两个无人机中心的距离必须大于等于碰撞半径
        
        🔧 关键修复：之前错误地使用了 2*radius，导致约束过于严格
        """
        # 两个无人机的位置
        pos1 = x[0:3]
        pos2 = x[6:9]
        
        # 距离
        distance = ca.norm_2(pos1 - pos2)
        
        # 约束: distance - collision_radius >= 0
        # 🔧 修复：使用1倍radius而非2倍（之前的bug）
        collision_radius = getattr(self.game, 'collision_radius', 1.0)
        return distance - collision_radius
    
    def _casadi_velocity_constraints(self, x: ca.MX) -> ca.MX:
        """CasADi版本的速度约束"""
        velocity_limit = getattr(self.game, 'velocity_limit', 15.0)
        
        constraints = []
        
        # 无人机1速度约束
        vel1 = x[3:6]  # [vx1, vy1, vz1]
        for i in range(3):
            constraints.append(velocity_limit - vel1[i])  # v_max - v >= 0
            constraints.append(velocity_limit + vel1[i])  # v_max + v >= 0
        
        # 无人机2速度约束
        vel2 = x[9:12]  # [vx2, vy2, vz2]
        for i in range(3):
            constraints.append(velocity_limit - vel2[i])
            constraints.append(velocity_limit + vel2[i])
        
        return ca.vertcat(*constraints)  # 返回12个约束
    
    def _casadi_altitude_constraints(self, x: ca.MX) -> ca.MX:
        """CasADi版本的高度约束"""
        altitude_limits = getattr(self.game, 'altitude_limits', (20.0, 40.0))
        z_min, z_max = altitude_limits
        
        z1 = x[2]   # 无人机1高度
        z2 = x[8]   # 无人机2高度
        
        constraints = ca.vertcat(
            z1 - z_min,        # z1 >= z_min
            z_max - z1,        # z1 <= z_max
            z2 - z_min,        # z2 >= z_min
            z_max - z2         # z2 <= z_max
        )
        
        return constraints  # 返回4个约束


    def solve_game(
        self,
        initial_state: torch.Tensor,
        warm_start: Optional[torch.Tensor] = None,
        warm_start_key: Optional[bytes] = None
    ) -> Dict[str, Any]:
        """
        求解博弈（参考PATHMCP.py的solve方法）

        Args:
            initial_state: 初始状态 [n_states]
            warm_start: 热启动点 [n_vars] (仅控制变量)

        Returns:
            博弈解
        """
        # print(f"\n=== 开始求解博弈 ===")
        
        # 转换初始状态为numpy
        x0 = initial_state.detach().cpu().numpy()
        u_prev = np.zeros(self.n_u)
        
        # 【修复】获取当前target值
        target_vec = []
        for player in range(self.M):
            if hasattr(self.game, 'target_positions'):
                target = self.game.target_positions[player].detach().cpu().numpy()
            else:
                target = np.array([5.0 if player == 0 else 0.0, 0.0 if player == 0 else -5.0, 30.0])
            target_vec.extend(target)
        target_vec = np.array(target_vec)  # [M*3]
        
        # print(f"  Target参数: {target_vec}")
        
        # 初始化控制序列
        cached_from_key = None
        if warm_start is not None:
            u = warm_start.detach().cpu().numpy()
        else:
            if warm_start_key is not None:
                cached_from_key = self._get_cached_warm_start(warm_start_key)
            if cached_from_key is not None:
                u = cached_from_key
            elif self._warm_start_enabled and self._last_control_warm_start is not None:
                u = self._last_control_warm_start.copy()
            else:
                u = np.zeros(self.n_vars)
        
        # 初始化拉格朗日乘子（使用CasADi自动微分warm start）
        # print("初始化拉格朗日乘子...")
        # 使用CasADi函数计算梯度（比有限差分快得多）
        # 【修复】传入target_vec参数
        q = np.array(self.f_grad_cost(u, x0, u_prev, target_vec)).squeeze()
        # 使用CasADi函数计算约束雅可比
        G = np.array(self.f_jac_constraints(u, x0, u_prev, target_vec))
        
        # 最小二乘解: min ||G^T @ λ + q||²  s.t. λ ≥ 0
        # 等价于求解: (G @ G^T) @ λ = -G @ q, λ ≥ 0
        try:
            import scipy.sparse.linalg as spla
            l = np.maximum(0, -spla.lsqr(G @ G.T, G @ q)[0])
        except Exception as e:
            # print(f"  警告: 最小二乘求解失败 ({e})，使用零初始化")
            l = np.zeros(self.n_constraints)
        
        # 初始点
        z0 = np.concatenate([u, l])
        
        # print(f"初始点维度: {len(z0)}")
        # print(f"  - 控制变量: {len(u)}")
        # print(f"  - 拉格朗日乘子: {len(l)}")
        
        # 更新缓存，让F/J函数复用结构
        self._problem_cache['x0'] = x0
        self._problem_cache['u_prev'] = u_prev
        self._problem_cache['target_vec'] = target_vec
        
        # 变量边界
        lb = np.full(self.n_mcp_vars, -np.inf)
        ub = np.full(self.n_mcp_vars, np.inf)
        
        # 控制变量边界（从游戏获取）
        if hasattr(self.game, 'get_control_bounds'):
            ctrl_lb, ctrl_ub = self.game.get_control_bounds()
            ctrl_lb_np = ctrl_lb.cpu().numpy()
            ctrl_ub_np = ctrl_ub.cpu().numpy()
            
            # 扩展到所有时间步
            for k in range(self.N):
                start_idx = k * self.n_u
                end_idx = (k + 1) * self.n_u
                lb[start_idx:end_idx] = ctrl_lb_np
                ub[start_idx:end_idx] = ctrl_ub_np
        
        # 拉格朗日乘子非负
        lb[self.n_vars:] = 0.0
        
        # print(f"\n调用MCP求解器...")
        # print(f"求解器类型: {self.solver_type}")
        
        # 求解MCP
        solve_start = time.perf_counter()
        if self.solver_type == "path":
            result = self.mcp_solver.solve(
                self._cached_F_numpy, self._cached_J_numpy, z0, lb, ub,
                use_warm_start=self._warm_start_enabled
            )
        else:
            result = self.mcp_solver.solve(
                self._cached_F_numpy, z0, lb, ub
            )
        solve_duration = time.perf_counter() - solve_start
        
        # 提取解
        solution = result['solution']
        u_sol = solution[:self.n_vars]
        l_sol = solution[self.n_vars:]
        
        # print(f"\n求解完成:")
        # print(f"  成功: {result['success']}")
        # print(f"  状态: {result.get('status', 'unknown')}")
        if 'residual' in result:
            # print(f"  残差: {result['residual']:.6e}")
            pass
        if 'stationarity' in result:
            # print(f"  静态性: {result['stationarity']:.6e}")
            # print(f"  可行性: {result['feasibility']:.6e}")
            # print(f"  互补性: {result['complementarity']:.6e}")
            pass
        
        # 计算状态序列
        # 【修复】传入target_vec参数
        x_seq = np.array(self.f_dynamics(u_sol, x0, u_prev, target_vec)).squeeze()
        
        # 重塑控制序列
        u_seq = u_sol.reshape((self.N, self.n_u))
        
        # 重塑状态序列
        x_seq_reshaped = x_seq.reshape((self.N + 1, self.n_q))
        
        # 转换为torch张量
        controls_torch = torch.from_numpy(u_seq).float()
        states_torch = torch.from_numpy(x_seq_reshaped).float()
        multipliers_torch = torch.from_numpy(l_sol).float()
        # 计算成本：包含所有玩家（求和）
        cost_np = np.array(self.f_cost(u_sol, x0, u_prev, target_vec)).squeeze()
        total_cost = float(np.sum(cost_np)) if cost_np.ndim > 0 else float(cost_np)
        
        # 记录求解统计（用于诊断性能问题）
        self._record_solver_stats(result, solve_duration)

        payload = {
            'controls': controls_torch,
            'states': states_torch,
            'multipliers': multipliers_torch,
            'success': result['success'],
            'residual': result.get('residual', 0.0),
            'status': result.get('status', 'unknown'),
            'stationarity': result.get('stationarity', 0.0),
            'feasibility': result.get('feasibility', 0.0),
            'complementarity': result.get('complementarity', 0.0),
            'cost': total_cost,
        }

        if self._warm_start_enabled:
            if result['success']:
                self._last_control_warm_start = u_sol.reshape(-1).copy()
            else:
                self._last_control_warm_start = None
        if result['success']:
            self._save_warm_start(warm_start_key, u_sol.reshape(-1))

        return payload
    
    def clear_solver_cache(self):
        """
        🆕 清除求解器缓存
        
        用途：
        - 在训练epoch之间重置求解器状态
        - 避免warm start缓存累积导致的性能下降
        - 防止batch间的状态污染
        """
        if self._solver_type != "path":
            return

        solver_backend = getattr(self, 'mcp_solver', None)
        if solver_backend is None:
            return

        if hasattr(solver_backend, 'clear_solver_cache'):
            solver_backend.clear_solver_cache()
        elif hasattr(solver_backend, 'clear_cache'):
            solver_backend.clear_cache()
        self._last_control_warm_start = None
        self._warm_start_cache.clear()

    def _create_empty_stats(self) -> Dict[str, Any]:
        return {
            'calls': 0,
            'success': 0,
            'failures': 0,
            'time_limit': 0,
            'solve_time': 0.0,
            'status_counts': {}
        }

    def reset_batch_stats(self):
        """重置单个DataLoader batch的统计信息。"""
        self._batch_stats = self._create_empty_stats()

    def get_batch_stats(self) -> Dict[str, int]:
        """返回当前batch的求解统计。"""
        stats_copy = dict(self._batch_stats)
        stats_copy['status_counts'] = dict(self._batch_stats.get('status_counts', {}))
        return stats_copy

    def _record_solver_stats(self, result: Dict[str, Any], duration: float = 0.0) -> None:
        """记录求解器调用统计，并跟踪time limit命中次数。"""
        if not hasattr(self, '_batch_stats'):
            self.reset_batch_stats()
        if not hasattr(self, '_total_stats'):
            self._total_stats = self._create_empty_stats()

        status = str(result.get('status', '') or '').lower()
        # 常见的PATH状态是"time_limit"或"Time limit", 同时兼容其他写法
        time_limit_hit = 'time_limit' in status or 'time limit' in status or status.strip() == 'timelimit'
        success = bool(result.get('success', False))

        self._batch_stats['calls'] += 1
        self._total_stats['calls'] += 1
        self._batch_stats['solve_time'] += float(duration)
        self._total_stats['solve_time'] += float(duration)
        if not success:
            self._batch_stats['failures'] += 1
            self._total_stats['failures'] += 1
        else:
            self._batch_stats['success'] += 1
            self._total_stats['success'] += 1
        if time_limit_hit:
            self._batch_stats['time_limit'] += 1
            self._total_stats['time_limit'] += 1
        if status:
            status_counts = self._batch_stats.setdefault('status_counts', {})
            total_status_counts = self._total_stats.setdefault('status_counts', {})
            status_counts[status] = status_counts.get(status, 0) + 1
            total_status_counts[status] = total_status_counts.get(status, 0) + 1

    def _get_cached_warm_start(self, key: Optional[bytes]) -> Optional[np.ndarray]:
        if key is None:
            return None
        cached = self._warm_start_cache.get(key)
        if cached is None:
            return None
        self._warm_start_cache.move_to_end(key)
        return cached.copy()

    def _save_warm_start(self, key: Optional[bytes], controls_flat: np.ndarray) -> None:
        if key is None:
            return
        self._warm_start_cache[key] = controls_flat.copy()
        self._warm_start_cache.move_to_end(key)
        if len(self._warm_start_cache) > self._warm_start_cache_size:
            self._warm_start_cache.popitem(last=False)

    def set_warm_start_enabled(self, enabled: bool):
        """启用或关闭内部warm start缓存。"""
        self._warm_start_enabled = bool(enabled)
        if not self._warm_start_enabled:
            self._last_control_warm_start = None
            self._warm_start_cache.clear()


class DifferentiableMCPGameSolver(torch.autograd.Function):
    """
    可微分博弈MCP求解器 - 基于隐式函数定理（IFT）
    
    理论基础（参考论文 Section B）：
    
    对于参数化MCP问题 Ψ(θ) := (F(·; θ), ℓ, u)，设 z* ∈ ℝⁿ 是解
    我们希望计算雅可比矩阵 ∇_θ z* = (∂z*_i/∂θ_k) ∈ ℝⁿˣᵖ
    
    关键思想：
    1. 将z*分解为活跃边界集I和非活跃集Ī（满足严格互补性）
    2. 对于非活跃集 z̄* := [z*]_Ī，由IFT推导：
       0 = ∇_θ [F̄(z̄*(θ), θ)]
         = ∇_θ F̄ + (∇_{z̄*} F̄)(∇_θ z̄*) + (∇_{z*} F̄)(∇_θ z̄*) 
                                              ︸︷︷︸
                                               =0（活跃集不变）
    3. 如果雅可比 ∇_{z̄*} F̄ 可逆，则：
       ∇_θ z̄* = -(∇_{z̄*} F̄)^{-1} (∇_θ F̄)    [论文公式10]
    
    实现：
    - forward: 求解MCP得到 z* = [u*, λ*]
    - backward: 使用IFT计算 ∇_θ z*，并链式法则传播梯度
    """

    @staticmethod
    def forward(ctx, theta_stacked, initial_state, mcp_game_solver, warm_start=None, warm_start_key=None):
        """
        前向传播：求解参数化MCP
        
        ⚠️ 只返回controls，不返回states！
        states应该在外部通过PyTorch autograd从controls构建，
        这样才能保持完整的梯度链：∂states/∂controls由PyTorch自动微分。
        
        Args:
            theta_stacked: tensor[n_players, 3] - 堆叠的参数估计（参数θ）
            initial_state: tensor[n_states] - 初始状态
            mcp_game_solver: MCPGameSolver实例
            
        Returns:
            controls: tensor[horizon, n_controls] - 控制序列（可微分，连接到theta）
        """
        # print(f"\n[IFT Forward]")
        # print(f"  theta_stacked.requires_grad: {theta_stacked.requires_grad}")
        
        # 转换为字典格式（用于游戏设置）
        n_players = theta_stacked.shape[0]
        theta_estimates = {}
        theta_needs_grad_mask = []  # 记录哪些参数需要梯度
        
        for player_id in range(n_players):
            theta_estimates[player_id] = theta_stacked[player_id]
            # 🔧 关键：记录每个参数是否需要梯度
            # 注意：由于stack的特性，我们需要检查原始输入
            # 但在autograd.Function中我们无法直接访问原始输入的requires_grad
            # 作为替代，我们假设如果某个theta的梯度在backward中为0，说明它不需要梯度
            theta_needs_grad_mask.append(True)  # 先假设都需要，在backward中修正
        
        # 更新游戏目标（参数化）
        game = mcp_game_solver.game
        original_targets = {}
        
        # 【关键】theta_estimates的语义：
        # theta_estimates[i] = 玩家i的目标参数（直接映射）
        # 这样避免语义混乱
        # print(f"\n[Forward调试] 使用参数化目标")
        for player_id in range(mcp_game_solver.M):
            original_targets[player_id] = game.target_positions[player_id].clone()
            # 直接映射：theta_estimates[i] -> target[i]
            game.target_positions[player_id] = theta_estimates[player_id].detach()
        
        # 【调试】打印更新后的目标
        # print(f"  theta_estimates[0]: {theta_estimates[0].detach().cpu().numpy()}")
        # print(f"  theta_estimates[1]: {theta_estimates[1].detach().cpu().numpy()}")
        # print(f"  game.target_positions[0]: {game.target_positions[0].cpu().numpy()}")
        # print(f"  game.target_positions[1]: {game.target_positions[1].cpu().numpy()}")
        
        # 求解MCP（无梯度）
        with torch.no_grad():
            result = mcp_game_solver.solve_game(
                initial_state,
                warm_start=warm_start,
                warm_start_key=warm_start_key
            )
            controls_np = result['controls'].cpu().numpy()
            multipliers = result['multipliers']
            
            # 将MCP解转换为numpy（用于后续计算雅可比）
            z_star = np.concatenate([
                controls_np.reshape(-1),
                multipliers.cpu().numpy()
            ])
        
        # 恢复原始目标
        for player_id, target in original_targets.items():
            game.target_positions[player_id] = target
        
        # 🔧 关键：从numpy创建tensor，不设置requires_grad
        # PyTorch会自动将返回值包装成有grad_fn的tensor（指向DifferentiableMCPGameSolver）
        controls = torch.from_numpy(controls_np).float().to(theta_stacked.device)
        
        # print(f"  [OK] controls创建完成，PyTorch会自动添加grad_fn")
        
        # 保存用于反向传播（只保存必要的数据）
        # ⚠️ 不保存states，因为states会在外部构建
        ctx.save_for_backward(theta_stacked, initial_state, controls, multipliers)
        ctx.mcp_game_solver = mcp_game_solver
        ctx.z_star_np = z_star
        
        return controls

    @staticmethod
    def backward_jax(ctx, grad_controls):
        """
        使用 JAX 自动微分的 IFT 实现
        
        核心思想：使用 JAX 的线性求解器而非手动 lstsq，类似 Julia 的实现
        但保留手动的雅可比计算（因为 MCP 函数基于 Julia）
        
        Args:
            grad_controls: tensor[horizon, n_controls] - 损失对控制的梯度
            
        Returns:
            grad_theta_stacked: tensor[n_players, 3] - 损失对theta的梯度
        """
        if not JAX_AVAILABLE:
            raise RuntimeError("JAX not available. Install with: pip install jax jaxlib")
        
        theta_stacked, initial_state, controls, multipliers = ctx.saved_tensors
        mcp_game_solver = ctx.mcp_game_solver
        z_star_np = ctx.z_star_np
        
        n_players = theta_stacked.shape[0]
        n_theta = n_players * 3
        
        # 1. 构造损失梯度
        n_controls_total = controls.shape[0] * controls.shape[1]
        
        if grad_controls is None or torch.allclose(grad_controls, torch.zeros_like(grad_controls)):
            dL_du = np.zeros(n_controls_total)
        else:
            dL_du = grad_controls.reshape(-1).detach().cpu().numpy()
        
        n_multipliers = multipliers.shape[0]
        dL_dlambda = np.zeros(n_multipliers)
        dL_dz = np.concatenate([dL_du, dL_dlambda])
        
        # 2. 准备参数
        x0_np = initial_state.detach().cpu().numpy()
        u_prev_np = np.zeros(mcp_game_solver.n_u)
        
        theta_estimates = {}
        for player_id in range(n_players):
            theta_estimates[player_id] = theta_stacked[player_id]
        
        target_vec = []
        for player_id in range(n_players):
            target_vec.extend(theta_estimates[player_id].detach().cpu().numpy())
        target_vec = np.array(target_vec)
        
        try:
            # 3. 计算雅可比矩阵（使用现有的手动方法）
            J_F_z = np.array(mcp_game_solver.J(z_star_np, x0_np, u_prev_np, target_vec))
            
            # 计算 ∇_θ F 使用有限差分
            dF_dtheta = np.zeros((len(z_star_np), n_theta))
            eps = 1e-4
            target_vec_np = np.array(target_vec, dtype=np.float64)
            
            for player_id in range(n_players):
                for dim in range(3):
                    param_idx = player_id * 3 + dim
                    target_vec_plus = target_vec_np.copy()
                    target_vec_plus[param_idx] = target_vec_np[param_idx] + eps
                    
                    F_plus = np.array(mcp_game_solver.F(z_star_np, x0_np, u_prev_np, target_vec_plus)).squeeze()
                    F_orig = np.array(mcp_game_solver.F(z_star_np, x0_np, u_prev_np, target_vec_np)).squeeze()
                    
                    dF_col = (F_plus - F_orig) / eps
                    dF_dtheta[:, param_idx] = dF_col
            
            # 4. 活跃集判定（简化版）
            tol_active = 1e-4
            n_u = n_controls_total
            inactive_mask = np.ones(len(z_star_np), dtype=bool)
            
            # 控制变量假设都非活跃
            # 拉格朗日乘子：接近0则不活跃
            lambda_vals = z_star_np[n_u:]
            for i in range(len(lambda_vals)):
                if np.abs(lambda_vals[i]) < tol_active:
                    inactive_mask[n_u + i] = False
            
            inactive_idx = np.where(inactive_mask)[0]
            n_inactive = len(inactive_idx)
            
            if n_inactive == 0:
                return torch.zeros_like(theta_stacked), None, None
            
            # 5. 约简系统
            J_F_z_bar = J_F_z[np.ix_(inactive_idx, inactive_idx)]
            dF_dtheta_bar = dF_dtheta[inactive_idx, :]
            
            # 6. 使用 JAX 的线性求解器（关键改进！）
            # 转换为 JAX arrays
            J_F_z_bar_jax = jnp.array(J_F_z_bar)
            dF_dtheta_bar_jax = jnp.array(dF_dtheta_bar)
            
            # JAX 的 solve 比 scipy.linalg.lstsq 更稳定
            # 它会自动选择最优的数值方法（LU, QR, SVD等）
            dz_bar_dtheta_jax = -jnp.linalg.solve(J_F_z_bar_jax, dF_dtheta_bar_jax)
            
            
            # 转换回 numpy
            dz_bar_dtheta = np.array(dz_bar_dtheta_jax)
            
            # 7. 扩展回完整空间
            dz_dtheta = np.zeros((len(z_star_np), n_theta))
            dz_dtheta[inactive_idx, :] = dz_bar_dtheta
            
            # 8. 链式法则
            dL_dtheta = dL_dz @ dz_dtheta
            
            # 9. 转换为 PyTorch tensor
            grad_theta_list = []
            for player_id in range(n_players):
                start_idx = player_id * 3
                end_idx = (player_id + 1) * 3
                grad = torch.from_numpy(dL_dtheta[start_idx:end_idx]).float().to(theta_stacked.device)
                grad_theta_list.append(grad)
            
            grad_theta_stacked = torch.stack(grad_theta_list)
            
            if torch.isnan(grad_theta_stacked).any():
                # print("  [JAX-IFT警告] 检测到NaN梯度，返回零梯度")
                grad_theta_stacked = torch.zeros_like(grad_theta_stacked)
            
            # print(f"  [JAX-IFT] 成功计算梯度，范数: {torch.norm(grad_theta_stacked).item():.6e}")
            return grad_theta_stacked, None, None, None, None
            
        except Exception as e:
            # print(f"  [JAX-IFT错误] 计算失败: {e}")
            # import traceback
            # traceback.print_exc()
            return torch.zeros_like(theta_stacked), None, None
    
    @staticmethod
    def backward(ctx, grad_controls):
        """
        反向传播：使用隐式函数定理（IFT）计算梯度
        
        支持两种实现：
        1. JAX 自动微分（推荐，类似 Julia 实现）
        2. 手动 IFT（备选，数值稳定性较差）
        
        论文公式12（简化版）：
        ∇_θ L = (∇_u L) · (∇_θ u*)
        
        其中：
        - ∇_u L: 损失对controls的梯度（来自grad_controls，由PyTorch自动微分）
        - ∇_θ u*: MCP最优控制对参数的雅可比（由IFT计算）
        
        注意：∂states/∂controls的梯度由PyTorch在外部自动处理！
        
        Args:
            grad_controls: tensor[horizon, n_controls] - 损失对控制的梯度
            
        Returns:
            grad_theta_stacked: tensor[n_players, 3] - 损失对theta的梯度
            None: initial_state不需要梯度
            None: mcp_game_solver不需要梯度
        """
        # 尝试使用 JAX 实现
        if JAX_AVAILABLE:
            try:
                return DifferentiableMCPGameSolver.backward_jax(ctx, grad_controls)
            except Exception as e:
                # print(f"  [警告] JAX实现失败，回退到手动IFT: {e}")
                pass
        
        # 回退到手动 IFT 实现
        # print(f"\n" + "="*60)
        # print(f"[IFT BACKWARD被调用!!!]")
        # print(f"="*60)
        # print(f"  grad_controls形状: {grad_controls.shape}")
        # print(f"  grad_controls范数: {torch.norm(grad_controls).item():.6e}")
        
        theta_stacked, initial_state, controls, multipliers = ctx.saved_tensors
        mcp_game_solver = ctx.mcp_game_solver
        z_star_np = ctx.z_star_np
        
        # print(f"  theta_stacked形状: {theta_stacked.shape}")
        # print(f"  initial_state形状: {initial_state.shape}")
        # print(f"  controls形状: {controls.shape}")
        # print(f"  multipliers形状: {multipliers.shape}")
        
        # 1. 构造 ∇_u* L（损失对最优控制的梯度）
        # 这是PyTorch自动微分给我们的，已经包含了完整的链式法则：
        # ∇_u L = ∂L/∂states · ∂states/∂controls（由PyTorch在外部自动计算）
        n_controls_total = controls.shape[0] * controls.shape[1]
        
        if grad_controls is None or torch.allclose(grad_controls, torch.zeros_like(grad_controls)):
            # print(f"  [警告] grad_controls为零或None，返回零梯度")
            dL_du = np.zeros(n_controls_total)
        else:
            dL_du = grad_controls.reshape(-1).detach().cpu().numpy()  # [n_controls_total]
            # print(f"  [OK] 收到有效梯度，范数: {np.linalg.norm(dL_du):.6e}")
        
        # print(f"  dL_du[:6]: {dL_du[:6]}")
        
        # 对于拉格朗日乘子λ，损失通常不直接依赖它们（除非显式使用）
        # 所以 ∂L/∂λ ≈ 0
        n_multipliers = multipliers.shape[0]
        dL_dlambda = np.zeros(n_multipliers)
        
        # 组合成完整的 ∇_z* L
        dL_dz = np.concatenate([dL_du, dL_dlambda])  # [n_mcp_vars]
        # print(f"  dL_dz范数: {np.linalg.norm(dL_dz):.6e}")
        
        # 2. 计算 ∇_θ z* 使用IFT（论文公式10）
        # ∇_θ z* = -(∇_z* F)^{-1} (∇_θ F)
        
        # 将theta_estimates恢复为字典
        n_players = theta_stacked.shape[0]
        theta_estimates = {}
        for player_id in range(n_players):
            theta_estimates[player_id] = theta_stacked[player_id]
        
        # print(f"  [IFT] 使用隐式函数定理计算 du/dtheta")
        
        # 🔧 关键修复：检查哪些theta参数实际需要梯度
        # 只对requires_grad=True的参数计算导数
        theta_needs_grad = []
        for player_id in range(n_players):
            needs_grad = theta_stacked[player_id].requires_grad
            theta_needs_grad.append(needs_grad)
            # print(f"  theta[{player_id}] requires_grad: {needs_grad}")
        
        # 计算雅可比矩阵
        x0_np = initial_state.detach().cpu().numpy()
        u_prev_np = np.zeros(mcp_game_solver.n_u)
        
        # 【关键】获取当前target值用于计算雅可比
        target_vec = []
        for player_id in range(n_players):
            target_vec.extend(theta_estimates[player_id].detach().cpu().numpy())
        target_vec = np.array(target_vec)  # [n_players*3]
        
        try:
            # 2a. 计算 ∇_z* F（F对z的雅可比）
            J_F_z = np.array(mcp_game_solver.J(z_star_np, x0_np, u_prev_np, target_vec))  # [n_mcp_vars, n_mcp_vars]
            # print(f"    J_F_z形状: {J_F_z.shape}")
            
            # 2b. 计算 ∇_θ F（F对θ的雅可比）
            # 🔧 关键修复：只对requires_grad=True的参数计算导数
            # 这避免了将梯度错误分配给不需要梯度的参数
            
            n_theta = n_players * 3  # 总参数数
            dF_dtheta = np.zeros((len(z_star_np), n_theta))
            
            # 使用有限差分计算 ∇_θ F
            eps = 1e-4  # 增大扰动（从1e-6到1e-4）
            # 确保target_vec是numpy数组
            target_vec_np = np.array(target_vec, dtype=np.float64)
            
            for player_id in range(n_players):
                # 🔧 只对requires_grad=True的玩家计算导数
                if not theta_needs_grad[player_id]:
                    # print(f"    跳过玩家{player_id}的导数计算（requires_grad=False）")
                    continue
                
                for dim in range(3):  # x, y, z
                    param_idx = player_id * 3 + dim
                    target_vec_plus = target_vec_np.copy()
                    target_vec_plus[param_idx] = target_vec_np[param_idx] + eps  # 显式赋值而非+=
                    
                    F_plus = np.array(mcp_game_solver.F(z_star_np, x0_np, u_prev_np, target_vec_plus)).squeeze()
                    F_orig = np.array(mcp_game_solver.F(z_star_np, x0_np, u_prev_np, target_vec_np)).squeeze()
                    
                    dF_col = (F_plus - F_orig) / eps
                    dF_dtheta[:, param_idx] = dF_col
                    
                    # Debug: Check player0 Z dimension
                    if player_id == 0 and dim == 2:
                        # print(f"    [DEBUG_P0_Z] Finite difference for Player0 Z:")
                        # print(f"      theta0_orig: {target_vec[0:3]}")
                        # print(f"      theta0_plus: {target_vec_plus[0:3]}")
                        # print(f"      F_change_norm: {np.linalg.norm(F_plus - F_orig):.6e}")
                        # print(f"      F_change_max: {np.max(np.abs(F_plus - F_orig)):.6e}")
                        # print(f"      dF_dtheta_Z_norm: {np.linalg.norm(dF_col):.6e}")
                        pass
            
            # print(f"    dF_dtheta范数: {np.linalg.norm(dF_dtheta):.6e}")
            # print(f"    dF_dtheta非零元素: {np.count_nonzero(dF_dtheta)}/{dF_dtheta.size}")
            # 详细检查每个theta维度
            for player_id in range(n_players):
                for dim, dim_name in enumerate(['X', 'Y', 'Z']):
                    col_idx = player_id * 3 + dim
                    col = dF_dtheta[:, col_idx]
                    nonzero_count = np.count_nonzero(np.abs(col) > 1e-10)
                    col_norm = np.linalg.norm(col)
                    # print(f"    dF/dtheta[player{player_id}_{dim_name}]: 范数={col_norm:.6e}, 非零={nonzero_count}/{len(col)}")
            
            # 2c. 识别活跃约束和非活跃约束（论文Section B.1）
            # 🔧 关键修复：放宽活跃容差，避免将略微接近边界的控制视为严格饱和
            # 原因：即使控制接近边界，参数变化仍可能通过其他时间步影响轨迹
            # 经验值：使用相对容差而非绝对容差
            tol_active = 1e-4  # 从1e-6放宽到1e-4
            
            # 对于控制变量：检查是否在边界
            u_lb, u_ub = mcp_game_solver.game.get_control_bounds() if hasattr(mcp_game_solver.game, 'get_control_bounds') else (
                torch.full((mcp_game_solver.n_u,), -np.inf),
                torch.full((mcp_game_solver.n_u,), np.inf)
            )
            u_lb_np = u_lb.cpu().numpy()
            u_ub_np = u_ub.cpu().numpy()
            
            # 对于拉格朗日乘子：检查是否为0（约束不活跃）
            n_u = n_controls_total
            inactive_mask = np.ones(len(z_star_np), dtype=bool)
            
            # 🔧 关键修复：对于控制变量，假设都是非活跃的
            # 原因：
            # 1. 控制饱和在优化控制问题中很常见
            # 2. 即使某些控制饱和，参数变化仍会通过其他时间步的控制影响轨迹
            # 3. 严格的活跃集判定会导致梯度完全为0，使优化无法进行
            # 
            # 替代方案：只通过拉格朗日乘子判定约束活跃性
            # （互补松弛条件：λ=0 或 g(x)=0，我们认为λ≈0时约束不活跃）
            # print(f"    [IFT策略] 对控制变量不判定边界活跃性（假设都非活跃）")
            # print(f"    [IFT策略] 只通过拉格朗日乘子判定约束活跃性")
            
            # 拉格朗日乘子：接近0则活跃
            lambda_vals = z_star_np[n_u:]
            for i in range(len(lambda_vals)):
                if np.abs(lambda_vals[i]) < tol_active:
                    inactive_mask[n_u + i] = False
            
            # 提取非活跃集（论文公式8）
            inactive_idx = np.where(inactive_mask)[0]
            n_inactive = len(inactive_idx)
            
            # print(f"    活跃约束分析:")
            # print(f"      总变量数: {len(z_star_np)}")
            # print(f"      非活跃变量数: {n_inactive}")
            # print(f"      活跃率: {1 - n_inactive/len(z_star_np):.2%}")
            
            # 🔧 调试：检查控制变量的活跃状态
            control_indices = list(range(n_controls_total))
            active_controls = [i for i in control_indices if not inactive_mask[i]]
            inactive_controls = [i for i in control_indices if inactive_mask[i]]
            # print(f"      活跃控制变量数: {len(active_controls)}/{n_controls_total}")
            # print(f"      非活跃控制变量数: {len(inactive_controls)}/{n_controls_total}")
            # print(f"      活跃控制变量索引: {active_controls[:20]}..." if len(active_controls) > 20 else f"      活跃控制变量索引: {active_controls}")
            
            # 🔧 关键诊断：碰撞约束分析
            # print(f"\n    [碰撞约束诊断]")
            # 碰撞约束对应的拉格朗日乘子在每个时间步的第0个位置
            # n_c[k] = 17: [0]=碰撞, [1:13]=速度, [13:17]=高度
            collision_multipliers = []
            constraint_offset = n_controls_total
            for k in range(mcp_game_solver.N + 1):
                # 每个时间步17个约束，第0个是碰撞约束
                constraint_base = sum(mcp_game_solver.n_c[:k])
                collision_idx = constraint_offset + constraint_base  # 第k步的碰撞约束对应的λ索引
                collision_lambda = lambda_vals[constraint_base] if constraint_base < len(lambda_vals) else 0.0
                collision_multipliers.append(collision_lambda)
                
                # 检查该时间步的碰撞约束是否活跃
                is_active = np.abs(collision_lambda) >= tol_active
                if is_active:
                    # print(f"      时间步{k}: λ_collision={collision_lambda:.6e} [ACTIVE碰撞风险!]")
                    pass
                elif collision_lambda > 1e-8:
                    # print(f"      时间步{k}: λ_collision={collision_lambda:.6e} [接近活跃]")
                    pass
            
            max_collision_lambda = max(np.abs(c) for c in collision_multipliers)
            # print(f"      最大碰撞乘子: {max_collision_lambda:.6e}")
            # print(f"      活跃容差: {tol_active:.6e}")
            
            if max_collision_lambda > tol_active:
                # print(f"      ⚠️ 警告：存在活跃碰撞约束，IFT梯度可能不准确！")
                # print(f"      建议：增大碰撞半径或调整轨迹参数")
                
                # 🔧 特殊处理：当碰撞约束active时，强制将所有拉格朗日乘子视为inactive
                # 原因：碰撞约束active时，活跃集判定可能不稳定，导致梯度不连续
                # 解决方案：忽略约束的活跃状态，强制计算完整梯度
                # print(f"      [IFT修复] 强制将所有约束拉格朗日乘子标记为inactive（避免梯度不连续）")
                for i in range(len(lambda_vals)):
                    inactive_mask[n_controls_total + i] = True  # 强制inactive
                
                # 重新计算inactive集合
                inactive_idx = np.where(inactive_mask)[0]
                n_inactive = len(inactive_idx)
                # print(f"      [IFT修复后] 非活跃变量数: {n_inactive}")
            
            if n_inactive == 0:
                # print("    [IFT警告] 所有变量都在活跃边界，返回零梯度")
                return torch.zeros_like(theta_stacked), None, None
            
            # 约简雅可比到非活跃集
            J_F_z_bar = J_F_z[np.ix_(inactive_idx, inactive_idx)]
            dF_dtheta_bar = dF_dtheta[inactive_idx, :]
            
            # print(f"    约简系统:")
            # print(f"      J_F_z_bar形状: {J_F_z_bar.shape}")
            # print(f"      dF_dtheta_bar形状: {dF_dtheta_bar.shape}")
            
            # 求解约简系统（论文公式10）
            # dz_bar_dtheta = -(J_F_z_bar)^{-1} @ dF_dtheta_bar
            import scipy.linalg as la
            dz_bar_dtheta = -la.lstsq(J_F_z_bar, dF_dtheta_bar)[0]  # [n_inactive, n_theta]
            # print(f"      dz_bar_dtheta范数: {np.linalg.norm(dz_bar_dtheta):.6e}")
            
            # 扩展回完整空间（活跃集梯度为0）
            dz_dtheta = np.zeros((len(z_star_np), n_theta))
            dz_dtheta[inactive_idx, :] = dz_bar_dtheta
            # print(f"      dz_dtheta范数: {np.linalg.norm(dz_dtheta):.6e}")
            
            # 3. 链式法则：∇_θ L = (∇_z L)^T @ (∇_θ z)
            dL_dtheta = dL_dz @ dz_dtheta  # [n_theta]
            # print(f"  dL_dtheta: {dL_dtheta}")
            # print(f"  dL_dtheta范数: {np.linalg.norm(dL_dtheta):.6e}")
            
            # 🔧 调试：检查中间计算
            # print(f"  [调试] dL_dz范数: {np.linalg.norm(dL_dz):.6e}")
            # print(f"  [调试] dz_dtheta范数: {np.linalg.norm(dz_dtheta):.6e}")
            dL_dz_nonzero = np.abs(dL_dz) > 1e-10
            dz_dtheta_nonzero = np.abs(dz_dtheta) > 1e-10
            # print(f"  [调试] dL_dz非零元素: {np.count_nonzero(dL_dz_nonzero)}/{len(dL_dz)}")
            # print(f"  [调试] dz_dtheta非零元素: {np.count_nonzero(dz_dtheta_nonzero)}/{dz_dtheta.size}")
            
            # 🔧 关键调试：检查非零位置是否重叠
            dL_dz_idx = np.where(dL_dz_nonzero)[0]
            # print(f"  [调试] dL_dz非零索引: {dL_dz_idx[:10]}..." if len(dL_dz_idx) > 10 else f"  [调试] dL_dz非零索引: {dL_dz_idx}")
            
            # 检查dz_dtheta中这些行是否有非零元素
            if len(dL_dz_idx) > 0:
                dz_dtheta_at_nonzero = dz_dtheta[dL_dz_idx, :]
                # print(f"  [调试] dz_dtheta在dL_dz非零位置的范数: {np.linalg.norm(dz_dtheta_at_nonzero):.6e}")
                # print(f"  [调试] dz_dtheta在dL_dz非零位置的非零元素: {np.count_nonzero(np.abs(dz_dtheta_at_nonzero) > 1e-10)}/{dz_dtheta_at_nonzero.size}")
            
            # 4. 转换为张量格式并返回
            # 获取输入张量所在的设备
            device = theta_stacked.device
            
            grad_theta_list = []
            for player_id in range(n_players):
                start_idx = player_id * 3
                end_idx = (player_id + 1) * 3
                grad = torch.from_numpy(dL_dtheta[start_idx:end_idx]).float().to(device)
                grad_theta_list.append(grad)
                # print(f"  玩家{player_id}梯度: {grad.cpu().numpy()}")
            
            grad_theta_stacked = torch.stack(grad_theta_list)
            
            if torch.isnan(grad_theta_stacked).any():
                # print("  [警告] 检测到NaN梯度，返回零梯度")
                grad_theta_stacked = torch.zeros_like(grad_theta_stacked)
            
            # print(f"  返回的grad_theta_stacked: {grad_theta_stacked}")
            return grad_theta_stacked, None, None, None, None
            
        except Exception as e:
            # print(f"  [IFT错误] 隐式微分失败: {e}")
            import traceback
            traceback.print_exc()
            
            # 返回零梯度（需要在正确的设备上）
            device = theta_stacked.device
            grad_theta_dict = {}
            for player_id in range(n_players):
                grad_theta_dict[player_id] = torch.zeros(3, device=device)
            return torch.stack([grad_theta_dict[i] for i in range(n_players)]), None, None, None, None


def solve_mcp_game_differentiable(
    theta_estimates: Dict[int, torch.Tensor],
    initial_state: torch.Tensor,
    mcp_game_solver: MCPGameSolver,
    warm_start: Optional[torch.Tensor] = None,
    warm_start_key: Optional[bytes] = None
) -> torch.Tensor:
    """
    可微分博弈求解的便捷函数（使用隐式函数定理）
    
    前向：求解MCP得到最优控制序列
    反向：使用IFT计算梯度 ∇_θ u*
    
    ⚠️ 重要：只返回controls，不返回states！
    states应该在外部通过动力学函数从controls构建，
    这样PyTorch才能自动处理 ∂states/∂controls 的梯度。

    Args:
        theta_estimates: {player_id: tensor[3]} - 各玩家的目标估计（参数θ）
        initial_state: tensor[n_states] - 初始状态
        mcp_game_solver: MCPGameSolver实例

    Returns:
        controls: tensor[horizon, n_controls] - 控制序列（可微分，支持IFT梯度）
    """
    # 关键修复：将字典转换为堆叠的tensor（PyTorch autograd.Function只接受tensor）
    theta_list = []
    for player_id in sorted(theta_estimates.keys()):
        theta_list.append(theta_estimates[player_id])
    theta_stacked = torch.stack(theta_list)  # [n_players, 3]
    
    return DifferentiableMCPGameSolver.apply(
        theta_stacked, initial_state, mcp_game_solver, warm_start, warm_start_key
    )
