import numpy as np
import gymnasium as gym
import highway_env
from scipy.linalg import solve_continuous_are
import jax
import jax.numpy as jnp

# ======================
# 车辆参数配置
# ======================
L = 4.5  # 车辆轴距（米）
DT = 0.1  # 时间步长
HORIZON = 25  # 预测时域
MAX_ITER = 50  # iLQR最大迭代次数

# ======================
# 动力学模型（使用JAX实现自动微分）
# ======================
@jax.jit
def bicycle_model(state, u):
    x, y, v, psi = state
    a, delta = u
    
    beta = jnp.arctan(jnp.tan(delta) / 2)
    dxdt = v * jnp.cos(psi + beta)
    dydt = v * jnp.sin(psi + beta)
    dvdt = a
    dpsidt = v * jnp.tan(delta) / L
    
    return jnp.array([x + dxdt*DT, y + dydt*DT, v + dvdt*DT, psi + dpsidt*DT])

# ======================
# 成本函数及导数
# ======================
@jax.jit
def cost_function(state, u, target_lane, target_speed, obstacles):
    x, y, v, psi = state
    a, delta = u
    
    # 车道跟踪
    lane_cost = 10.0 * (y - target_lane)**2
    
    # 速度跟踪
    speed_cost = 2.0 * (v - target_speed)**2
    
    # 控制量惩罚
    control_cost = 0.5 * a**2 + 0.2 * delta**2
    
    # 避障惩罚
    obstacle_cost = 0.0
    for (ox, oy) in obstacles:
        dist = jnp.sqrt((x - ox)**2 + (y - oy)**2)
        obstacle_cost += 100.0 * jnp.exp(-0.5 * dist)
    
    return lane_cost + speed_cost + control_cost + obstacle_cost

# 自动生成梯度函数
grad_cost = jax.jit(jax.grad(cost_function, argnums=(0,1)))
hess_cost = jax.jit(jax.hessian(cost_function, argnums=(0,1)))

# ======================
# iLQR求解器
# ======================
# ======================
# iLQR求解器（修正版本）
# ======================
# ======================
# iLQR求解器（维度修正版本）
# ======================
class iLQRSolver:
    def __init__(self):
        self.alpha = 0.5
        self.reg = 1e-4
        
    def backward_pass(self, states, actions, target_lane, target_speed, obstacles):
        n = 4  # 状态维度 [x, y, v, psi]
        m = 2  # 控制维度 [a, delta]
        
        Vx = np.zeros(n)
        Vxx = np.zeros((n, n))
        
        K_list = []
        k_list = []
        
        for t in reversed(range(HORIZON)):
            x = states[t]
            u = actions[t]
            
            # 转换为JAX数组
            x_jax = jnp.array(x)
            u_jax = jnp.array(u)
            obstacles_jax = [jnp.array(o) for o in obstacles]
            
            # 计算梯度
            lx, lu = grad_cost(x_jax, u_jax, target_lane, target_speed, obstacles_jax)
            
            # 计算Hessian
            hessian = hess_cost(x_jax, u_jax, target_lane, target_speed, obstacles_jax)
            (lxx, lxu), (lux, luu) = hessian  # 正确拆包结构
            
            # 转换为numpy数组
            lx = np.array(lx)
            lu = np.array(lu)
            lxx = np.array(lxx)
            lux = np.array(lux)
            luu = np.array(luu)
            
            # 计算动力学雅可比矩阵
            A = np.array(jax.jacobian(bicycle_model, argnums=0)(x_jax, u_jax))  # (4,4)
            B = np.array(jax.jacobian(bicycle_model, argnums=1)(x_jax, u_jax))  # (4,2)
            
            # 计算Q函数项（修正交叉项）
            Qx = lx + A.T @ Vx
            Qu = lu + B.T @ Vx
            Qxx = lxx + A.T @ Vxx @ A
            Quu = luu + B.T @ Vxx @ B
            Qux = lux + B.T @ Vxx @ A  # 关键修正：使用B^T Vxx A (2,4)
            
            # 正则化
            Quu += self.reg * np.eye(m)
            
            # 求解控制律（修正维度）
            try:
                Quu_inv = np.linalg.inv(Quu)
            except np.linalg.LinAlgError:
                Quu_inv = np.linalg.pinv(Quu)
                
            K = -Quu_inv @ Qux  # (2,4)
            k = -Quu_inv @ Qu   # (2,1)
            
            # 更新V函数（修正维度）
            Vx = Qx + K.T @ Quu @ k.flatten() + K.T @ Qu + Qux.T @ k.flatten()
            Vxx = Qxx + K.T @ Quu @ K + Qux.T @ K + K.T @ Qux  # 正确维度计算
            
            K_list.append(K)
            k_list.append(k)
        
        return list(reversed(K_list)), list(reversed(k_list))

    # 保持forward_pass和optimize方法不变


    # 保持其他方法不变
    
    def forward_pass(self, initial_state, K_gains, k_offsets, actions):
        new_states = [initial_state]
        new_actions = []
        
        x = initial_state.copy()
        for t in range(HORIZON):
            u = actions[t] + K_gains[t] @ (x - new_states[t]) + k_offsets[t]
            x = bicycle_model(x, u)
            new_states.append(x)
            new_actions.append(u)
            
        return new_states[:-1], new_actions
    
    def optimize(self, initial_state, target_lane, target_speed, obstacles):
        # 初始控制序列
        actions = [np.zeros(2) for _ in range(HORIZON)]
        states = [initial_state.copy()]
        
        # 初始前向模拟
        x = initial_state.copy()
        for t in range(HORIZON-1):
            x = bicycle_model(x, actions[t])
            states.append(x)
        
        # iLQR主循环
        for _ in range(MAX_ITER):
            K_gains, k_offsets = self.backward_pass(states, actions, target_lane, target_speed, obstacles)
            new_states, new_actions = self.forward_pass(initial_state, K_gains, k_offsets, actions)
            
            # 简单线搜索
            actions = new_actions
            states = new_states
        
        return actions[0]

# ======================
# Gym环境接口
# ======================
class HighwayILQRWrapper:
    def __init__(self):
        self.env = gym.make("highway-fast-v0", render_mode="rgb_array")
        # self.env.configure({
        #     "action": {"type": "ContinuousAction"},
        #     "observation": {
        #         "type": "Kinematics",
        #         "vehicles_count": 10,
        #         "features": ["x", "y", "vx", "vy", "heading"],
        #         "absolute": True
        #     },
        #     "simulation_frequency": 15,
        #     "policy_frequency": 15,
        # })
        self.solver = iLQRSolver()
        
    def obs_to_state(self, obs):
        # 转换观测到iLQR状态 [x, y, speed, heading]
        ego = obs[0]
        return np.array([
            ego[0],  # x
            ego[1],  # y
            np.linalg.norm(ego[2:4]),  # speed
            ego[4]   # heading
        ])
    
    def get_obstacles(self, obs):
        # 提取周围车辆位置
        return [(vehicle[0], vehicle[1]) for vehicle in obs[1:] if abs(vehicle[0] - obs[0][0]) < 50]
    
    def run(self):
        obs, info = self.env.reset()
        done = False
        
        while not done:
            # 获取当前状态
            ego_state = self.obs_to_state(obs)
            target_lane = 3.0  # 目标车道中心线y坐标
            target_speed = 25.0  # m/s (~90 km/h)
            obstacles = self.get_obstacles(obs)
            
            # 求解iLQR
            action = self.solver.optimize(
                ego_state, 
                target_lane,
                target_speed,
                obstacles
            )
            
            # 执行动作
            obs, reward, done, info,_ = self.env.step(action)
            self.env.render()
            
        self.env.close()

# ======================
# 主程序
# ======================
if __name__ == "__main__":
    controller = HighwayILQRWrapper()
    controller.run()