"""
DUCA (Dual Consensus Algorithm) 算法实现

基于论文实现的双共识算法
"""

import numpy as np
from typing import Dict, Optional, Callable
from .base_algorithm import BaseDistributedAlgorithm
from .solvers import solve_local_subproblem
import cvxpy as cp


class DUCA(BaseDistributedAlgorithm):
    """DUCA算法实现"""
    
    def __init__(self, problem, network, **kwargs):
        """
        初始化DUCA算法
        
        参数:
            problem: 分布式优化问题实例
            network: 网络拓扑实例
            rho: 惩罚参数（默认1.0）
            mode: 通信模式 ('single-exchange' 或 'double-exchange')
            step_size: 对偶变量步长（默认自适应）
        """
        super().__init__(problem, network, **kwargs)
        
        # DUCA特定参数
        self.rho = kwargs.get('rho', 1.0)
        self.mode = kwargs.get('mode', 'single-exchange')
        self.step_size = kwargs.get('step_size', None)
        
        # 权重矩阵
        self.W = network.weight_matrix
        
        # 如果未指定步长，使用自适应步长
        if self.step_size is None:
            # 基于网络拓扑的自适应步长
            eigenvalues = network.get_weight_eigenvalues()
            self.step_size = 1.0 / (1 + abs(eigenvalues[1]))
            
    def _initialize_auxiliary_variables(self):
        """初始化DUCA的辅助变量"""
        # z_i 是节点i维护的全局对偶变量估计
        for i in range(self.num_nodes):
            self.z[i] = np.zeros(self.m + self.p)
            
        # 用于双交换模式的额外变量
        if self.mode == 'double-exchange':
            self.v = {}  # 辅助共识变量
            for i in range(self.num_nodes):
                self.v[i] = np.zeros(self.m + self.p)
                
    def _update_primal_variables(self):
        """更新原始变量 x_i"""
        for i in range(self.num_nodes):
            # 解决局部子问题
            # minimize f_i(x_i) + <y_i, G_i(x_i)>
            # subject to x_i ∈ X_i
            
            # 构建子问题
            x_i = cp.Variable(self.problem.dimensions[i])
            
            # 目标函数
            obj = self.problem.objectives[i](x_i)
            
            # 添加对偶项
            # 分离不等式和等式约束的对偶变量
            lambda_i = self.y[i][:self.m]  # 不等式约束对偶变量
            nu_i = self.y[i][self.m:]      # 等式约束对偶变量
            
            # 计算 G_i(x_i) = [g_i(x_i); h_i(x_i)]
            dual_term = 0
            
            # 不等式约束贡献
            for j, g_ij in enumerate(self.problem.inequalities[i]):
                if j < self.m:
                    dual_term += lambda_i[j] * g_ij(x_i)
                    
            # 等式约束贡献
            for j, h_ij in enumerate(self.problem.equalities[i]):
                if j < self.p:
                    dual_term += nu_i[j] * h_ij(x_i)
                    
            # 总目标
            objective = obj + dual_term
            
            # 局部约束
            constraints = []
            if self.problem.local_sets[i] is not None:
                set_type = self.problem.local_sets[i]['type']
                if set_type == 'box':
                    lower = self.problem.local_sets[i]['lower']
                    upper = self.problem.local_sets[i]['upper']
                    constraints.append(x_i >= lower)
                    constraints.append(x_i <= upper)
                elif set_type == 'ball':
                    center = self.problem.local_sets[i]['center']
                    radius = self.problem.local_sets[i]['radius']
                    constraints.append(cp.norm(x_i - center) <= radius)
                elif set_type == 'simplex':
                    constraints.append(x_i >= 0)
                    constraints.append(cp.sum(x_i) <= 1)
                    
            # 求解子问题
            try:
                prob = cp.Problem(cp.Minimize(objective), constraints)
                prob.solve(solver=cp.OSQP, verbose=False)
                
                if prob.status == cp.OPTIMAL:
                    self.x[i] = x_i.value
                else:
                    # 如果求解失败，保持原值或使用投影
                    if self.problem.local_sets[i] is not None:
                        self.x[i] = self.problem.project_to_local_set(self.x[i], i)
            except:
                # 备用方案：使用梯度下降
                self._gradient_step_primal(i)
                
    def _gradient_step_primal(self, i: int):
        """使用梯度步进行原始变量更新（备用方案）"""
        # 计算梯度
        grad = self.problem.objectives[i].gradient(self.x[i])
        
        # 添加对偶项的梯度
        lambda_i = self.y[i][:self.m]
        nu_i = self.y[i][self.m:]
        
        # 不等式约束梯度
        for j, g_ij in enumerate(self.problem.inequalities[i]):
            if j < self.m and hasattr(g_ij, 'gradient'):
                grad += lambda_i[j] * g_ij.gradient(self.x[i])
                
        # 等式约束梯度
        for j, h_ij in enumerate(self.problem.equalities[i]):
            if j < self.p and hasattr(h_ij, 'gradient'):
                grad += nu_i[j] * h_ij.gradient(self.x[i])
                
        # 梯度步
        step_size = 0.01 / (1 + np.linalg.norm(grad))
        x_new = self.x[i] - step_size * grad
        
        # 投影到局部约束集
        if self.problem.local_sets[i] is not None:
            self.x[i] = self.problem.project_to_local_set(x_new, i)
        else:
            self.x[i] = x_new
            
    def _update_dual_variables(self):
        """更新对偶变量 y_i"""
        if self.mode == 'single-exchange':
            self._single_exchange_dual_update()
        else:
            self._double_exchange_dual_update()
            
    def _single_exchange_dual_update(self):
        """单交换模式的对偶变量更新"""
        # 计算约束值
        g, h = self.problem.evaluate_constraints(self.x)
        
        # 构建全局约束向量
        c = np.concatenate([g, h])
        
        # 更新每个节点的对偶变量
        y_new = {}
        for i in range(self.num_nodes):
            # 共识步骤：y_i^{k+1} = Σ_j W_ij * y_j^k
            y_consensus = np.zeros(self.m + self.p)
            for j in range(self.num_nodes):
                y_consensus += self.W[i, j] * self.y[j]
                
            # 梯度上升步骤
            y_new[i] = y_consensus + self.step_size * c / self.num_nodes
            
            # 投影到非负象限（仅对不等式约束的对偶变量）
            y_new[i][:self.m] = np.maximum(y_new[i][:self.m], 0)
            
        # 更新所有节点
        self.y = y_new
        
    def _double_exchange_dual_update(self):
        """双交换模式的对偶变量更新"""
        # 第一次交换：更新辅助变量v
        v_new = {}
        for i in range(self.num_nodes):
            v_new[i] = np.zeros(self.m + self.p)
            for j in range(self.num_nodes):
                v_new[i] += self.W[i, j] * self.v[j]
                
        self.v = v_new
        
        # 计算约束值
        g, h = self.problem.evaluate_constraints(self.x)
        c = np.concatenate([g, h])
        
        # 更新对偶变量
        y_new = {}
        for i in range(self.num_nodes):
            # 梯度上升
            y_new[i] = self.v[i] + self.step_size * c / self.num_nodes
            
            # 投影
            y_new[i][:self.m] = np.maximum(y_new[i][:self.m], 0)
            
        self.y = y_new
        
        # 第二次交换：更新v基于新的y
        v_new = {}
        for i in range(self.num_nodes):
            v_new[i] = np.zeros(self.m + self.p)
            for j in range(self.num_nodes):
                v_new[i] += self.W[i, j] * self.y[j]
                
        self.v = v_new
        
    def _update_auxiliary_variables(self):
        """更新辅助变量（DUCA基础版本无额外辅助变量更新）"""
        # 更新全局对偶变量估计
        for i in range(self.num_nodes):
            self.z[i] = self.y[i].copy()
            
    def set_penalty_parameter(self, rho: float):
        """动态调整惩罚参数"""
        self.rho = rho
        
    def get_primal_objective(self) -> float:
        """获取当前原始目标函数值"""
        return self.problem.evaluate_objective(self.x)
        
    def get_dual_objective(self) -> float:
        """获取对偶目标函数值（如果可计算）"""
        # 这需要根据具体问题计算拉格朗日对偶函数
        # 这里返回一个估计值
        primal_obj = self.get_primal_objective()
        g, h = self.problem.evaluate_constraints(self.x)
        
        y_avg = self.get_consensus_dual()
        lambda_avg = y_avg[:self.m]
        nu_avg = y_avg[self.m:]
        
        dual_obj = primal_obj + np.dot(lambda_avg, g) + np.dot(nu_avg, h)
        return dual_obj 