import numpy as np 
import math
import pdb
from scipy.optimize import fmin_cobyla

from bins.PlannerLogger import PlannerLogger
from src.motionplanner.Constrained_ILQR.scripts.ilqr.obstacles import Obstacle

Logger = PlannerLogger()

class Constraints:
    def __init__(self, args, obstacle_bb):
        self.args = args
        self.control_cost = np.array([[self.args.w_acc,                   0],
                                      [              0, self.args.w_yawrate]])

        self.state_cost = np.array([[self.args.w_pos, 0, 0, 0],
                                     [0, self.args.w_pos, 0, 0],
                                     [0, 0, self.args.w_vel, 0],
                                     [0, 0, 0,               0]])
        self.coeffs = None

        self.number_of_npc = 1  # hardcode

        self.obs_constraints = {}
        for i in range(self.number_of_npc):
            self.obs_constraints[i] = Obstacle(args, i, obstacle_bb)

    def get_state_cost_derivatives(self, state, poly_coeffs, ref_traj, npc_traj, target_traj=None):
        """
        返回状态代价对状态的一阶和二阶导数。
        除了原有的参考轨迹跟踪、障碍物避让和目标车辆 offset 项外，
        这里增加边界约束的惩罚项。
        """
        l_x = np.zeros((self.args.num_states, self.args.horizon))
        l_xx = np.zeros((self.args.num_states, self.args.num_states, self.args.horizon))
        for i in range(self.args.horizon):
            # 原有的参考轨迹跟踪项
            ref_point = self.find_closest_point(state[:, i], ref_traj)
            x_r, y_r, v_r = ref_point
            diff = np.array([state[0, i] - x_r,
                             state[1, i] - y_r,
                             state[2, i] - v_r,
                             0])
            traj_cost = 2 * self.state_cost @ diff
            l_x_i = traj_cost
            l_xx_i = 2 * self.state_cost

            # 加入障碍物避障项（对所有障碍物车辆）
            for j in range(0, len(npc_traj)):
                if npc_traj[j] is None:
                    continue
                b_dot_obs, b_ddot_obs = self.obs_constraints[0].get_obstacle_cost_derivatives(npc_traj[j], i,
                                                                                              state[:, i])
                l_x_i += b_dot_obs.squeeze()
                l_xx_i += b_ddot_obs

            # 新增：目标车辆 offset 约束项（如果 target_traj 非空）
            if target_traj is not None:
                dx = state[0, i] - target_traj[0, i]
                dy = state[1, i] - target_traj[1, i]
                d = np.sqrt(dx ** 2 + dy ** 2)
                if d < 1e-3:
                    d = 1e-3
                dcost_dx = 2 * self.args.w_offset * (d - self.args.offset) * (dx / d)
                dcost_dy = 2 * self.args.w_offset * (d - self.args.offset) * (dy / d)
                l_x_offset = np.array([dcost_dx, dcost_dy, 0, 0])
                l_x_i += l_x_offset

                H11 = 2 * self.args.w_offset * (
                        (dx ** 2 / d ** 2) + (d - self.args.offset) / d - (d - self.args.offset) * (dx ** 2) / (
                        d ** 3))
                H22 = 2 * self.args.w_offset * (
                        (dy ** 2 / d ** 2) + (d - self.args.offset) / d - (d - self.args.offset) * (dy ** 2) / (
                        d ** 3))
                H12 = 2 * self.args.w_offset * ((dx * dy) / d ** 2 - (d - self.args.offset) * dx * dy / (d ** 3))
                H_offset = np.zeros((4, 4))
                H_offset[0, 0] = H11
                H_offset[0, 1] = H12
                H_offset[1, 0] = H12
                H_offset[1, 1] = H22
                l_xx_i += H_offset

            # 新增：状态边界惩罚项
            cost_bound, grad_bound, hess_bound = self.get_state_boundary_penalty(state[:, i])
            l_x_i += grad_bound
            l_xx_i += hess_bound

            l_x[:, i] = l_x_i
            l_xx[:, :, i] = l_xx_i
        return l_x, l_xx

    def get_control_cost_derivatives(self, state, control):
        """
        返回控制代价的线性项（r 向量）和二阶项（R 矩阵）。
        """
        P1 = np.array([[1], [0]])
        P2 = np.array([[0], [1]])

        l_u = np.zeros((self.args.num_ctrls, self.args.horizon))
        l_uu = np.zeros((self.args.num_ctrls, self.args.num_ctrls, self.args.horizon))
        for i in range(self.args.horizon):
            # Acceleration Barrier Max
            c = (np.matmul(control[:, i].T, P1) - self.args.acc_limits[1])
            b_1, b_dot_1, b_ddot_1 = self.barrier_function(self.args.q1_acc, self.args.q2_acc, c, P1)

            # Acceleration Barrier Min
            c = (self.args.acc_limits[0] - np.matmul(control[:, i].T, P1))
            b_2, b_dot_2, b_ddot_2 = self.barrier_function(self.args.q1_acc, self.args.q2_acc, c, -P1)

            velocity = state[2, i]

            # Yawrate Barrier Max
            c = (np.matmul(control[:, i].T, P2) - velocity * math.tan(self.args.steer_angle_limits[1]) / self.args.wheelbase)
            b_3, b_dot_3, b_ddot_3 = self.barrier_function(self.args.q1_yawrate, self.args.q2_yawrate, c, P2)

            # Yawrate Barrier Min
            c = (velocity * math.tan(self.args.steer_angle_limits[0]) / self.args.wheelbase - np.matmul(control[:, i].T, P2))
            b_4, b_dot_4, b_ddot_4 = self.barrier_function(self.args.q1_yawrate, self.args.q2_yawrate, c, -P2)

            l_u_i = b_dot_1 + b_dot_2 + b_dot_3 + b_dot_4 + (2 * control[:, i].T @ self.control_cost).reshape(-1, 1)
            l_uu_i = b_ddot_1 + b_ddot_2 + b_ddot_3 + b_ddot_4 + 2 * self.control_cost

            l_u[:, i] = l_u_i.squeeze()
            l_uu[:, :, i] = l_uu_i.squeeze()

        return l_u, l_uu

    def barrier_function(self, q1, q2, c, c_dot):
        b = q1 * np.exp(q2 * c)
        b_dot = q1 * q2 * np.exp(q2 * c) * c_dot
        b_ddot = q1 * (q2 ** 2) * np.exp(q2 * c) * np.matmul(c_dot, c_dot.T)
        return b, b_dot, b_ddot

    def get_cost_derivatives(self, state, control, poly_coeffs, ref_traj, npc_traj, target_traj = None):
        """
        返回轨迹代价的各项梯度与 Hessian。
        修改后 ref_traj 包含位置与速度参考。
        """
        self.state = state
        l_u, l_uu = self.get_control_cost_derivatives(state, control)
        l_x, l_xx = self.get_state_cost_derivatives(state, poly_coeffs, ref_traj, npc_traj, target_traj)
        l_ux = np.zeros((self.args.num_ctrls, self.args.num_states, self.args.horizon))
        return l_x, l_xx, l_u, l_uu, l_ux

    def get_total_cost(self, state, control_seq, poly_coeffs, ref_traj, npc_traj, target_traj=None):
        """
        返回给定状态和控制序列的总代价，
        除了原有的状态跟踪和控制成本外，
        增加目标车辆 offset 和状态边界惩罚项。
        """
        J = 0
        for i in range(self.args.horizon):
            ref_point = self.find_closest_point(state[:, i], ref_traj)
            x_r, y_r, v_r = ref_point
            ref_state = np.array([x_r, y_r, v_r, 0])
            state_diff = state[:, i] - ref_state
            c_state = state_diff.T @ self.state_cost @ state_diff
            c_ctrl = control_seq[:, i].T @ self.control_cost @ control_seq[:, i]
            J += c_state + c_ctrl

            if target_traj is not None:
                dx = state[0, i] - target_traj[0, i]
                dy = state[1, i] - target_traj[1, i]
                d = np.sqrt(dx ** 2 + dy ** 2)
                J += self.args.w_offset * (d - self.args.offset) ** 2

            # 新增：状态边界成本
            cost_bound, _, _ = self.get_state_boundary_penalty(state[:, i])
            J += cost_bound

        return J

    def find_closest_point(self, state, ref_traj):
        """
        在参考轨迹中寻找距离给定状态 (state) 最近的点。
        这里 ref_traj 是形状 (N, 3) 的数组，列依次为 [x, y, v_ref]，
        距离仅计算位置部分。
        """
        diff = ref_traj[:, :2] - np.array([state[0], state[1]])
        distances = np.sum(diff**2, axis=1)
        min_i = np.argmin(distances)
        return ref_traj[min_i, :]  # 返回 [x, y, v_ref]

    def desired_pose_function(self, x):
        return np.polyval(self.coeffs, x)

    def offset_obj(self, X):
        x, y = X
        return np.sqrt((x - self.state[0])**2 + (y - self.state[1])**2)

    def c1(self, X):
        x, y = X
        return self.desired_pose_function(x) - y

    def get_offset_cost(self):
        X = fmin_cobyla(self.offset_obj, x0=[self.state[0], self.state[1]], cons=[self.c1])
        x_r, y_r = X
        state_diff = np.array([self.state[0] - x_r, self.state[1] - y_r])
        Qk = np.array([[1, 0, 0],
                       [0, 1, 0],
                       [0, 0, self.args.w_vel]])
        return np.matmul(np.matmul(state_diff.T * Qk, state_diff))

    def get_velocity_cost(self, current_speed):
        return self.args.w_vel * abs(current_speed - self.args.desired_speed)

    def get_state_boundary_penalty(self, state):
        """
        对单个状态（4维）计算边界惩罚成本、梯度和 Hessian。
        假设 state = [x, y, v, θ]，只对 x, y, v 添加边界惩罚。
        """
        cost = 0.0
        grad = np.zeros_like(state)
        hess = np.zeros((len(state), len(state)))

        # 方向矩阵 P1 和 P2，类似于控制的处理方式
        P1 = np.array([[1], [0]])  # x 方向
        P2 = np.array([[0], [1]])  # y 方向

        # 对 x 分量（索引 0）
        if state[0] < self.args.x_min:
            diff = self.args.x_min - state[0]
            b, b_dot, b_ddot = self.barrier_function(self.args.w_state_bound, self.args.q1_acc, diff, P1)
            cost += b
            grad[0] = b_dot[0]  # Ensure grad is scalar, get the first element
            hess[0, 0] = b_ddot[0, 0]  # Ensure hess is scalar, get the first element
        elif state[0] > self.args.x_max:
            diff = state[0] - self.args.x_max
            b, b_dot, b_ddot = self.barrier_function(self.args.w_state_bound, self.args.q1_acc, diff, P1)
            cost += b
            grad[0] = b_dot[0]  # Ensure grad is scalar
            hess[0, 0] = b_ddot[0, 0]  # Ensure hess is scalar

        # 对 y 分量（索引 1）
        if state[1] < self.args.y_min:
            diff = self.args.y_min - state[1]
            b, b_dot, b_ddot = self.barrier_function(self.args.w_state_bound, self.args.q1_acc, diff, P2)
            cost += b
            grad[1] = b_dot[0]  # Ensure grad is scalar
            hess[1, 1] = b_ddot[0, 0]  # Ensure hess is scalar
        elif state[1] > self.args.y_max:
            diff = state[1] - self.args.y_max
            b, b_dot, b_ddot = self.barrier_function(self.args.w_state_bound, self.args.q1_acc, diff, P2)
            cost += b
            grad[1] = b_dot[0]  # Ensure grad is scalar
            hess[1, 1] = b_ddot[0, 0]  # Ensure hess is scalar

        # 对速度 v 分量（索引 2）
        if state[2] < self.args.v_min:
            diff = self.args.v_min - state[2]
            b, b_dot, b_ddot = self.barrier_function(self.args.w_state_bound, self.args.q1_acc, diff, P2)  # 使用 P2
            cost += b
            grad[2] = b_dot[0]  # Ensure grad is scalar
            hess[2, 2] = b_ddot[0, 0]  # Ensure hess is scalar
        elif state[2] > self.args.v_max:
            diff = state[2] - self.args.v_max
            b, b_dot, b_ddot = self.barrier_function(self.args.w_state_bound, self.args.q1_acc, diff, P2)  # 使用 P2
            cost += b
            grad[2] = b_dot[0]  # Ensure grad is scalar
            hess[2, 2] = b_ddot[0, 0]  # Ensure hess is scalar

        # 对 θ 分量（索引 3）通常为周期性变量，可以不加入或采用特殊处理
        return cost, grad, hess