import taichi as ti
import numpy as np
from dataclasses import dataclass
from typing import Optional, Tuple, Dict, Any

@dataclass
class QPResult:
    """
    二次规划问题的求解结果

    :param x: 最优解
    :param obj_val: 最优目标函数值
    :param iterations: 求解的迭代次数
    :param status: 求解状态（'solved', 'max_iter_reached', 'infeasible'等）
    :param residuals: 残差信息
    :param solve_time: 求解耗时（秒）
    """
    x: np.ndarray
    obj_val: float
    iterations: int
    status: str
    residuals: Dict[str, float]
    solve_time: float


@ti.data_oriented
class QPSolver:
    """
    基于Taichi的QP优化问题求解器，使用ADMM算法
    """
    
    def __init__(self, arch=ti.gpu):
        """
        初始化QP求解器

        :param arch: 计算架构，默认为ti.gpu，可选ti.cpu
        """
        # print(arch,ti.gpu) #gpu [<Arch.cuda: 3>, <Arch.metal: 4>, <Arch.vulkan: 10>, <Arch.opengl: 5>, <Arch.dx11: 6>, <Arch.dx12: 7>, <Arch.gles: 11>, <Arch.amdgpu: 9>]
        # 检测GPU是否可用，如果不可用则回退到CPU
        try:
            # 尝试初始化GPU
            if arch == ti.gpu:
                ti.init(arch=ti.gpu)
                print("使用GPU计算:",ti.gpu)
            else:
                ti.init(arch=ti.cpu)
                print("使用CPU计算")
        except Exception as e:
            print(f"GPU初始化失败: {e}")
            print("回退到CPU计算")
            ti.init(arch=ti.cpu)
        
        self.initialized = False
        self.max_iter = 2000
        self.abs_tol = 1e-4
        self.rel_tol = 1e-3
        self.rho = 1.0  # ADMM惩罚参数
        self.adaptive_rho = True
        self.verbose = False
        
    def _initialize_fields(self, n, m_ineq, m_eq):
        """
        初始化Taichi场

        :param n: 变量维度
        :param m_ineq: 不等式约束数量
        :param m_eq: 等式约束数量
        """
        # 主问题变量
        self.x = ti.field(ti.f32, shape=n)
        
        # ADMM中的辅助变量
        if m_ineq > 0:
            self.z = ti.field(ti.f32, shape=m_ineq)
            self.u = ti.field(ti.f32, shape=m_ineq)  # 对偶变量
        
        if m_eq > 0:
            self.y = ti.field(ti.f32, shape=m_eq)  # 等式约束的对偶变量
            
        # 工作变量
        self.x_new = ti.field(ti.f32, shape=n)
        self.Px = ti.field(ti.f32, shape=n)
        
        # 残差
        self.primal_res = ti.field(ti.f32, shape=1)
        self.dual_res = ti.field(ti.f32, shape=1)
        
        self.initialized = True

    @ti.kernel
    def _compute_Px(self, P: ti.types.ndarray()):
        """
        计算P·x

        :param P: 二次优化项系数矩阵
        """
        n = self.x.shape[0]
        for i in range(n):
            self.Px[i] = 0.0
            for j in range(n):
                self.Px[i] += P[i, j] * self.x[j]
    
    @ti.kernel
    def _update_x(self, P_factored: ti.types.ndarray(), 
                  q: ti.types.ndarray(),
                  G: ti.types.ndarray(), 
                  A: ti.types.ndarray(),
                  rho: ti.f32):
        """
        更新原问题变量x

        :param P_factored: 预计算的系数矩阵(P + rho*G^T·G + rho*A^T·A)的分解
        :param q: 线性优化项系数向量
        :param G: 不等式约束矩阵
        :param A: 等式约束矩阵
        :param rho: ADMM惩罚参数
        """
        n = self.x.shape[0]
        
        # 为每个变量初始化b_i为-q[i]
        for i in range(n):
            b_i = -q[i]
            
            # G^T(rho*z - u)项
            # 在Taichi内核中使用ti.static
            if ti.static(hasattr(self, 'z')):
                m_ineq = self.z.shape[0]
                for j in range(m_ineq):
                    b_i += G[j, i] * (rho * self.z[j] - self.u[j])
            
            # A^T(rho*b - y)项
            if ti.static(hasattr(self, 'y')):
                m_eq = self.y.shape[0]
                for j in range(m_eq):
                    b_i += A[j, i] * self.y[j]
            
            # 计算新的x值
            self.x_new[i] = 0.0
            for j in range(n):
                self.x_new[i] += P_factored[i, j] * b_i
    
    @ti.kernel
    def _update_z(self, G: ti.types.ndarray(), h: ti.types.ndarray(), rho: ti.f32):
        """
        更新中间变量z（不等式约束的松弛变量）

        :param G: 不等式约束矩阵
        :param h: 不等式约束右侧向量
        :param rho: ADMM惩罚参数
        """
        n = self.x.shape[0]
        m_ineq = self.z.shape[0]
        
        # 计算G*x并直接更新z
        for i in range(m_ineq):
            Gx_i = 0.0
            for j in range(n):
                Gx_i += G[i, j] * self.x_new[j]
            
            # z = Π(Gx + u/rho, h)，其中Π是投影算子
            self.z[i] = min(Gx_i + self.u[i]/rho, h[i])
    
    @ti.kernel
    def _update_dual_variables(self, G: ti.types.ndarray(), h: ti.types.ndarray(),
                              A: ti.types.ndarray(), b: ti.types.ndarray(), 
                              rho: ti.f32):
        """
        更新对偶变量

        :param G: 不等式约束矩阵
        :param h: 不等式约束右侧向量
        :param A: 等式约束矩阵
        :param b: 等式约束右侧向量
        :param rho: ADMM惩罚参数
        """
        n = self.x.shape[0]
        
        # 更新不等式约束的对偶变量 u
        if ti.static(hasattr(self, 'z')):
            m_ineq = self.z.shape[0]
            for i in range(m_ineq):
                Gx_i = 0.0
                for j in range(n):
                    Gx_i += G[i, j] * self.x_new[j]
                self.u[i] = self.u[i] + rho * (Gx_i - self.z[i])
        
        # 更新等式约束的对偶变量 y
        if ti.static(hasattr(self, 'y')):
            m_eq = self.y.shape[0]
            for i in range(m_eq):
                Ax_i = 0.0
                for j in range(n):
                    Ax_i += A[i, j] * self.x_new[j]
                self.y[i] = self.y[i] + rho * (Ax_i - b[i])
    
    @ti.kernel
    def _compute_residuals(self, G: ti.types.ndarray(), h: ti.types.ndarray(),
                          A: ti.types.ndarray(), b: ti.types.ndarray()):
        """
        计算ADMM算法的残差

        :param G: 不等式约束矩阵
        :param h: 不等式约束右侧向量
        :param A: 等式约束矩阵
        :param b: 等式约束右侧向量
        """
        n = self.x.shape[0]
        
        # 计算原问题残差 r_prim = max(||Gx - z||, ||Ax - b||)
        r_prim = 0.0
        
        # ||Gx - z||
        if ti.static(hasattr(self, 'z')):
            m_ineq = self.z.shape[0]
            for i in range(m_ineq):
                Gx_i = 0.0
                for j in range(n):
                    Gx_i += G[i, j] * self.x_new[j]
                r_prim = max(r_prim, abs(Gx_i - self.z[i]))
        
        # ||Ax - b||
        if ti.static(hasattr(self, 'y')):
            m_eq = self.y.shape[0]
            for i in range(m_eq):
                Ax_i = 0.0
                for j in range(n):
                    Ax_i += A[i, j] * self.x_new[j]
                r_prim = max(r_prim, abs(Ax_i - b[i]))
        
        # 计算对偶残差 r_dual = ||x - x_prev||
        r_dual = 0.0
        for i in range(n):
            r_dual = max(r_dual, abs(self.x_new[i] - self.x[i]))
        
        self.primal_res[0] = r_prim
        self.dual_res[0] = r_dual
    
    @ti.kernel
    def _update_x_final(self):
        """
        更新x为最新值
        """
        for i in range(self.x.shape[0]):
            self.x[i] = self.x_new[i]
    
    @ti.kernel
    def _compute_objective(self, P: ti.types.ndarray(), q: ti.types.ndarray()) -> ti.f32:
        """
        计算当前目标函数值

        :param P: 二次优化项系数矩阵
        :param q: 线性优化项系数向量
        :return: 目标函数值
        """
        n = self.x.shape[0]
        obj = 0.0
        
        # 0.5 * x^T P x
        for i in range(n):
            for j in range(n):
                obj += 0.5 * self.x[i] * P[i, j] * self.x[j]
        
        # q^T x
        for i in range(n):
            obj += q[i] * self.x[i]
        
        return obj
    
    def solve(self, P: np.ndarray, q: np.ndarray, 
             G: Optional[np.ndarray] = None, 
             h: Optional[np.ndarray] = None,
             A: Optional[np.ndarray] = None, 
             b: Optional[np.ndarray] = None,
             max_iter: int = None,
             abs_tol: float = None,
             rel_tol: float = None,
             rho: float = None,
             adaptive_rho: bool = None,
             verbose: bool = None) -> QPResult:
        """
        求解二次规划问题
        
        min  0.5 * x^T P x + q^T x
        s.t. Gx <= h
             Ax = b

        :param P: 二次优化项系数矩阵，必须是半正定的
        :param q: 线性优化项系数向量
        :param G: 不等式约束矩阵
        :param h: 不等式约束右侧向量
        :param A: 等式约束矩阵
        :param b: 等式约束右侧向量
        :param max_iter: 最大迭代次数
        :param abs_tol: 绝对收敛容差
        :param rel_tol: 相对收敛容差
        :param rho: ADMM惩罚参数
        :param adaptive_rho: 是否自适应调整rho
        :param verbose: 是否打印求解进度信息
        :return: QPResult对象，包含最优解和求解信息
        """
        import time
        start_time = time.time()
        
        # 设置参数
        if max_iter is not None:
            self.max_iter = max_iter
        if abs_tol is not None:
            self.abs_tol = abs_tol
        if rel_tol is not None:
            self.rel_tol = rel_tol
        if rho is not None:
            self.rho = rho
        if adaptive_rho is not None:
            self.adaptive_rho = adaptive_rho
        if verbose is not None:
            self.verbose = verbose
        
        # 确保所有输入是float32类型，这对Taichi很重要
        P = P.astype(np.float32)
        q = q.astype(np.float32)
        
        # 问题规模
        n = len(q)  # 变量维度
        m_ineq = 0 if G is None else G.shape[0]  # 不等式约束数量
        m_eq = 0 if A is None else A.shape[0]    # 等式约束数量
        
        # 确保输入格式正确
        if G is None:
            G = np.zeros((0, n), dtype=np.float32)
            h = np.zeros(0, dtype=np.float32)
        else:
            G = G.astype(np.float32)
            h = h.astype(np.float32)
            
        if A is None:
            A = np.zeros((0, n), dtype=np.float32)
            b = np.zeros(0, dtype=np.float32)
        else:
            A = A.astype(np.float32)
            b = b.astype(np.float32)
        
        if not self.initialized or self.x.shape[0] != n:
            self._initialize_fields(n, m_ineq, m_eq)
        
        # 预计算线性系统的系数矩阵
        # (P + rho*G^T·G + rho*A^T·A)
        P_augmented = P.copy()
        
        # 添加小的正则化项以确保正定性和数值稳定性
        P_augmented += 1e-8 * np.eye(n, dtype=np.float32)
        
        if m_ineq > 0:
            P_augmented += self.rho * G.T @ G
        
        if m_eq > 0:
            P_augmented += self.rho * A.T @ A
        
        # 实际中应使用矩阵分解加速求解，这里为简化起见直接求逆
        try:
            # 确保矩阵条件数不会太大
            condition_number = np.linalg.cond(P_augmented)
            if condition_number > 1e10:
                # 条件数太大，增加更强的正则化
                reg_factor = 1e-6 * max(1.0, condition_number / 1e10)
                print(f"矩阵条件数较大 ({condition_number:.2e})，增加正则化 ({reg_factor:.2e})")
                P_augmented += reg_factor * np.eye(n, dtype=np.float32)
            
            P_factored = np.linalg.inv(P_augmented)
        except np.linalg.LinAlgError:
            # 如果矩阵不可逆，尝试添加更强的正则化
            print("矩阵求逆失败，添加更强的正则化")
            P_augmented += 1e-4 * np.eye(n, dtype=np.float32)
            try:
                P_factored = np.linalg.inv(P_augmented)
            except np.linalg.LinAlgError:
                # 如果仍然失败，使用伪逆
                print("使用伪逆代替直接求逆")
                P_factored = np.linalg.pinv(P_augmented)
        
        # 初始化变量
        x_np = np.zeros(n, dtype=np.float32)
        if m_ineq > 0:
            z_np = np.zeros(m_ineq, dtype=np.float32)
            u_np = np.zeros(m_ineq, dtype=np.float32)
            # 将numpy数组复制到Taichi场
            for i in range(m_ineq):
                self.z[i] = z_np[i]
                self.u[i] = u_np[i]
        
        if m_eq > 0:
            y_np = np.zeros(m_eq, dtype=np.float32)
            # 将numpy数组复制到Taichi场
            for i in range(m_eq):
                self.y[i] = y_np[i]
        
        # 将初始值复制到Taichi场
        for i in range(n):
            self.x[i] = x_np[i]
            self.x_new[i] = x_np[i]
        
        # ADMM迭代求解
        converged = False
        iterations = 0
        
        # 用于检测发散的变量
        best_residual = float('inf')
        no_progress_count = 0
        diverging = False
        
        for it in range(self.max_iter):
            iterations = it + 1
            
            # 1. 更新x
            self._update_x(P_factored, q, G, A, self.rho)
            
            # 2. 更新z (对偶变量)
            if m_ineq > 0:
                self._update_z(G, h, self.rho)
            
            # 3. 更新对偶变量 u 和 y
            self._update_dual_variables(G, h, A, b, self.rho)
            
            # 4. 计算残差
            self._compute_residuals(G, h, A, b)
            
            # 获取残差值
            primal_res = self.primal_res[0]
            dual_res = self.dual_res[0]
            
            # 检查非法值（NaN或Inf）
            if np.isnan(primal_res) or np.isnan(dual_res) or np.isinf(primal_res) or np.isinf(dual_res):
                print(f"警告：迭代 {it} 中检测到NaN或Inf值")
                # 尝试恢复计算，减小rho值
                self.rho = max(1e-6, self.rho * 0.1)
                continue
                
            # 收敛检查
            if primal_res < self.abs_tol and dual_res < self.abs_tol:
                converged = True
                break
            
            # 检测是否发散或停滞
            current_residual = max(primal_res, dual_res)
            if current_residual < best_residual:
                best_residual = current_residual
                no_progress_count = 0
            else:
                no_progress_count += 1
                
            # 如果长时间没有进展，尝试调整策略
            if no_progress_count > 50:
                # 重置进展检测器
                no_progress_count = 0
                
                # 如果残差非常大，可能是发散了
                if current_residual > 1e6:
                    print(f"迭代 {it}: 可能发散，调整rho和增加正则化")
                    # 尝试更小的rho值
                    self.rho = max(1e-6, self.rho * 0.1)
                    # 增加P的正则化
                    P_augmented += 1e-4 * np.eye(n, dtype=np.float32)
                    try:
                        P_factored = np.linalg.inv(P_augmented)
                    except:
                        P_factored = np.linalg.pinv(P_augmented)
                else:
                    # 如果残差不是很大，可能只是收敛很慢
                    print(f"迭代 {it}: 收敛较慢，调整rho")
                    if self.rho > 0.1:
                        self.rho *= 0.5
                    else:
                        self.rho *= 2.0
            
            # 自适应调整rho
            if self.adaptive_rho and it > 0 and it % 10 == 0 and no_progress_count < 10:
                if primal_res > 10 * dual_res:
                    self.rho *= 2.0
                elif dual_res > 10 * primal_res:
                    self.rho /= 2.0
                    
                # 限制rho的范围
                self.rho = max(1e-6, min(1e6, self.rho))
            
            # 更新x为最新值
            self._update_x_final()
            
            # 打印进度
            if self.verbose and it % 100 == 0:
                obj_val = self._compute_objective(P, q)
                print(f"迭代 {it}: 目标值 = {obj_val}, 原问题残差 = {primal_res}, 对偶残差 = {dual_res}")
        
        # 取最终的x结果
        x_result = np.zeros(n, dtype=np.float32)
        for i in range(n):
            x_result[i] = self.x_new[i]
        
        # 计算最终目标函数值
        obj_val = self._compute_objective(P, q)
        
        # 确定求解状态
        if converged:
            status = "solved"
        else:
            status = "max_iter_reached"
        
        # 返回结果
        result = QPResult(
            x=x_result,
            obj_val=float(obj_val),
            iterations=iterations,
            status=status,
            residuals={"primal": float(primal_res), "dual": float(dual_res)},
            solve_time=time.time() - start_time
        )
        
        return result 