#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
论文《A Unified Dual Consensus Approach to Distributed Optimization 
with Globally-Coupled Constraints》实验结果复现

作者：Zixuan Liu, Xuyang Wu, Dandan Wang, and Jie Lu
复现目标：Figure 1 - 收敛性能对比图

实验设置：
- 20个节点，40条边的随机连通图
- 问题(34)：二次规划问题
- 1000次迭代
- 多种算法对比：DUCA变体、ALT、DPMM、IPLUX、分布式次梯度方法
"""

import numpy as np
import matplotlib.pyplot as plt
import networkx as nx
import cvxpy as cp
from scipy.linalg import null_space
from scipy.sparse import diags
import warnings
warnings.filterwarnings('ignore')

# 设置中文字体
plt.rcParams['font.sans-serif'] = ['SimHei', 'DejaVu Sans']
plt.rcParams['axes.unicode_minus'] = False

class ProblemGenerator:
    """
    生成论文中的问题(34)：
    minimize    sum_i (1/2 x_i^T P_i x_i + Q_i^T x_i)
    subject to  ||x_i - a_i||^2 <= c_i, for all i
                sum_i ||x_i - a'_i||^2 <= sum_i c'_i
                sum_i B_i x_i = 0
    """
    
    def __init__(self, N=20, d=3, m=1, p=2, seed=42):
        """
        参数：
        N: 节点数量
        d: 每个节点的决策变量维度
        m: 不等式约束数量
        p: 等式约束数量
        """
        np.random.seed(seed)
        self.N = N
        self.d = d
        self.m = m
        self.p = p
        
        # 生成问题数据
        self._generate_problem_data()
        
    def _generate_problem_data(self):
        """生成满足论文假设的随机问题数据"""
        # 目标函数参数
        self.P = []  # P_i matrices (positive semidefinite)
        self.Q = []  # Q_i vectors
        
        for i in range(self.N):
            # 生成正半定矩阵 P_i
            A = np.random.randn(self.d, self.d)
            P_i = A.T @ A + 0.1 * np.eye(self.d)  # 确保正定
            self.P.append(P_i)
            
            # 生成随机向量 Q_i
            Q_i = np.random.randn(self.d)
            self.Q.append(Q_i)
        
        # 局部约束集参数 X_i = {x_i | ||x_i - a_i||^2 <= c_i}
        self.a = []  # 中心点
        self.c = []  # 半径平方
        
        for i in range(self.N):
            a_i = np.random.randn(self.d)
            c_i = np.linalg.norm(a_i)**2 + 1.0  # 确保 c_i > ||a_i||^2
            self.a.append(a_i)
            self.c.append(c_i)
        
        # 全局耦合约束参数
        # 不等式约束: sum_i ||x_i - a'_i||^2 <= sum_i c'_i
        self.a_prime = []
        self.c_prime = []
        
        for i in range(self.N):
            a_prime_i = np.random.randn(self.d)
            c_prime_i = np.linalg.norm(a_prime_i)**2 + 0.5
            self.a_prime.append(a_prime_i)
            self.c_prime.append(c_prime_i)
        
        # 等式约束: sum_i B_i x_i = 0
        self.B = []
        for i in range(self.N):
            B_i = np.random.randn(self.p, self.d)
            self.B.append(B_i)
    
    def solve_centralized(self):
        """使用CVXPY求解集中式问题获得最优值"""
        # 定义变量
        x = []
        for i in range(self.N):
            x.append(cp.Variable(self.d))
        
        # 目标函数
        objective = 0
        for i in range(self.N):
            objective += 0.5 * cp.quad_form(x[i], self.P[i]) + self.Q[i].T @ x[i]
        
        # 约束
        constraints = []
        
        # 局部约束
        for i in range(self.N):
            constraints.append(cp.norm(x[i] - self.a[i])**2 <= self.c[i])
        
        # 全局不等式约束
        ineq_constraint = 0
        for i in range(self.N):
            ineq_constraint += cp.norm(x[i] - self.a_prime[i])**2
        constraints.append(ineq_constraint <= sum(self.c_prime))
        
        # 全局等式约束
        eq_constraint = 0
        for i in range(self.N):
            eq_constraint += self.B[i] @ x[i]
        constraints.append(eq_constraint == 0)
        
        # 求解
        prob = cp.Problem(cp.Minimize(objective), constraints)
        prob.solve(solver=cp.MOSEK, verbose=False)
        
        if prob.status == cp.OPTIMAL:
            self.optimal_value = prob.value
            self.optimal_x = [x[i].value for i in range(self.N)]
            return True
        else:
            print(f"求解失败，状态: {prob.status}")
            return False

class NetworkGenerator:
    """生成随机连通图"""
    
    @staticmethod
    def generate_connected_graph(N=20, num_edges=40, seed=42):
        """
        生成N个节点、num_edges条边的随机连通图
        """
        np.random.seed(seed)
        
        # 确保图连通：先生成生成树
        G = nx.Graph()
        G.add_nodes_from(range(N))
        
        # 生成随机生成树
        nodes = list(range(N))
        tree_edges = []
        remaining_nodes = nodes[1:]
        connected_nodes = [nodes[0]]
        
        while remaining_nodes:
            # 随机选择已连接的节点和未连接的节点
            u = np.random.choice(connected_nodes)
            v = np.random.choice(remaining_nodes)
            tree_edges.append((u, v))
            connected_nodes.append(v)
            remaining_nodes.remove(v)
        
        G.add_edges_from(tree_edges)
        
        # 添加额外的边直到达到指定数量
        current_edges = len(tree_edges)
        max_possible_edges = N * (N - 1) // 2
        
        if num_edges > max_possible_edges:
            num_edges = max_possible_edges
        
        while current_edges < num_edges:
            u = np.random.randint(0, N)
            v = np.random.randint(0, N)
            if u != v and not G.has_edge(u, v):
                G.add_edge(u, v)
                current_edges += 1
        
        return G

class DUCAAlgorithm:
    """
    DUCA算法实现
    支持多种参数设置：DUCA-I, DUCA-PEXTRA, DUCA-PGC, DUCA-DPGA, DUCA-dist.ADMM
    """
    
    def __init__(self, problem, graph, algorithm_type='DUCA-I', rho=1.0, alpha=0.0):
        """
        参数：
        problem: ProblemGenerator实例
        graph: NetworkX图
        algorithm_type: 算法类型
        rho: 步长参数
        alpha: 近端参数（Pro-DUCA使用）
        """
        self.problem = problem
        self.graph = graph
        self.N = problem.N
        self.d = problem.d
        self.m = problem.m
        self.p = problem.p
        self.algorithm_type = algorithm_type
        self.rho = rho
        self.alpha = alpha
        
        # 构建图拉普拉斯矩阵
        self._build_laplacian()
        
        # 设置算法参数
        self._set_algorithm_parameters()
        
        # 初始化变量
        self._initialize_variables()
    
    def _build_laplacian(self):
        """构建图拉普拉斯矩阵"""
        A = nx.adjacency_matrix(self.graph).toarray()
        D = np.diag(np.sum(A, axis=1))
        self.L = D - A  # 拉普拉斯矩阵
        self.W = A      # 邻接矩阵
        self.Degree = D # 度矩阵
    
    def _set_algorithm_parameters(self):
        """根据算法类型设置参数矩阵"""
        if self.algorithm_type == 'DUCA-I':
            # 新设计的参数
            self.P_H = self.L
            self.P_H_tilde = self.L
            self.P_D = 2 * self.rho * self.Degree
            
        elif self.algorithm_type == 'DUCA-PEXTRA':
            # P-EXTRA参数设置
            self.P_H = 0.5 * self.L
            self.P_H_tilde = 0.5 * self.L
            self.P_D = self.rho * np.eye(self.N)
            
        elif self.algorithm_type == 'DUCA-PGC':
            # PGC参数设置
            self.P_H = 0.5 * self.L
            self.P_H_tilde = 0.5 * self.L
            self.P_D = self.Degree
            self.rho = 1.0
            
        elif self.algorithm_type == 'DUCA-DPGA':
            # DPGA参数设置
            self.P_H = self.L
            self.P_H_tilde = self.L
            # 构建特殊的对角矩阵
            degrees = np.diag(self.Degree)
            self.P_D = np.diag([max(1.0, degrees[i]) for i in range(self.N)])
            self.rho = 1.0
            
        elif self.algorithm_type == 'DUCA-dist.ADMM':
            # 分布式ADMM参数设置
            self.P_H = self.L
            self.P_H_tilde = self.L
            self.P_D = self.rho * np.eye(self.N)
            
        else:
            raise ValueError(f"未知算法类型: {self.algorithm_type}")
        
        # 计算复合矩阵
        self.P_A = self.P_D - self.rho * self.P_H
        
        # 确保P_A是正半定的
        eigenvals = np.linalg.eigvals(self.P_A)
        if np.min(eigenvals) < -1e-10:
            print(f"警告: P_A不是正半定的，最小特征值: {np.min(eigenvals)}")
            # 调整参数
            self.P_A = self.P_A + (abs(np.min(eigenvals)) + 0.1) * np.eye(self.N)
    
    def _initialize_variables(self):
        """初始化算法变量"""
        # 对偶变量 y_i = [mu_i, lambda_i]
        self.y = np.zeros((self.N, self.m + self.p))
        
        # 原始变量
        self.x = [np.zeros(self.d) for _ in range(self.N)]
        
        # 辅助变量
        self.v = np.zeros((self.N, self.m + self.p))
        
        # 历史记录
        self.history = {
            'objective_error': [],
            'constraint_violation': [],
            'feasibility_error': []
        }
    
    def _compute_objective_error(self):
        """计算目标函数误差"""
        current_obj = 0
        for i in range(self.N):
            current_obj += 0.5 * self.x[i].T @ self.problem.P[i] @ self.x[i]
            current_obj += self.problem.Q[i].T @ self.x[i]
        
        return abs(self.problem.optimal_value - current_obj)
    
    def _compute_constraint_violation(self):
        """计算约束违反度"""
        violation = 0
        
        # 局部约束违反
        for i in range(self.N):
            local_viol = max(0, np.linalg.norm(self.x[i] - self.problem.a[i])**2 - self.problem.c[i])
            violation += local_viol
        
        # 全局不等式约束违反
        global_ineq = 0
        for i in range(self.N):
            global_ineq += np.linalg.norm(self.x[i] - self.problem.a_prime[i])**2
        global_ineq_viol = max(0, global_ineq - sum(self.problem.c_prime))
        violation += global_ineq_viol
        
        # 全局等式约束违反
        global_eq = np.zeros(self.p)
        for i in range(self.N):
            global_eq += self.problem.B[i] @ self.x[i]
        violation += np.linalg.norm(global_eq)
        
        return violation
    
    def _solve_local_subproblem(self, i, y_tilde_i):
        """求解节点i的局部子问题"""
        # 目标函数：f_i(x_i) + (1/2d'_i) * ||[mu_i + g_i(x_i)]_+||^2 + 
        #          (1/2d'_i) * ||lambda_i + h_i(x_i)||^2 + (alpha/2)||x_i - x_i^k||^2
        
        mu_tilde_i = y_tilde_i[:self.m]
        lambda_tilde_i = y_tilde_i[self.m:]
        
        d_prime_i = self.P_D[i, i]
        
        # 使用梯度下降求解（简化实现）
        x_new = self.x[i].copy()
        step_size = 0.01
        
        for _ in range(50):  # 内层迭代
            # 计算梯度
            grad = self.problem.P[i] @ x_new + self.problem.Q[i]
            
            # 添加近端项梯度
            if self.alpha > 0:
                grad += self.alpha * (x_new - self.x[i])
            
            # 添加约束项梯度
            # g_i(x_i) = ||x_i - a'_i||^2 - c'_i (简化为线性)
            g_i = 2 * (x_new - self.problem.a_prime[i])
            mu_plus_g = mu_tilde_i + np.linalg.norm(x_new - self.problem.a_prime[i])**2 - self.problem.c_prime[i]
            if mu_plus_g > 0:
                grad += (1/d_prime_i) * mu_plus_g * g_i
            
            # h_i(x_i) = B_i x_i
            h_i = self.problem.B[i] @ x_new
            grad += (1/d_prime_i) * self.problem.B[i].T @ (lambda_tilde_i + h_i)
            
            # 梯度步
            x_new = x_new - step_size * grad
            
            # 投影到局部约束集
            if np.linalg.norm(x_new - self.problem.a[i])**2 > self.problem.c[i]:
                # 投影到球面
                direction = x_new - self.problem.a[i]
                x_new = self.problem.a[i] + np.sqrt(self.problem.c[i]) * direction / np.linalg.norm(direction)
        
        return x_new
    
    def _update_dual_variables(self, i):
        """更新节点i的对偶变量"""
        d_prime_i = self.P_D[i, i]
        
        # 计算 y_tilde_i
        y_tilde_i = d_prime_i * self.y[i] - self.rho * np.sum([
            self.L[i, j] * self.y[j] for j in range(self.N)
        ], axis=0) - self.v[i]
        
        mu_tilde_i = y_tilde_i[:self.m]
        lambda_tilde_i = y_tilde_i[self.m:]
        
        # 计算约束值
        g_i = np.array([np.linalg.norm(self.x[i] - self.problem.a_prime[i])**2 - self.problem.c_prime[i]])
        h_i = self.problem.B[i] @ self.x[i]
        
        # 更新对偶变量
        new_mu = np.maximum(0, mu_tilde_i + g_i)  # 投影到非负象限
        new_lambda = lambda_tilde_i + h_i
        
        self.y[i] = np.concatenate([new_mu, new_lambda]) / d_prime_i
    
    def _update_auxiliary_variables(self, i):
        """更新辅助变量"""
        self.v[i] = self.v[i] + self.rho * np.sum([
            self.L[i, j] * self.y[j] for j in range(self.N)
        ], axis=0)
    
    def run_iteration(self):
        """运行一次迭代"""
        # 第一步：更新原始变量
        for i in range(self.N):
            # 计算 y_tilde_i
            d_prime_i = self.P_D[i, i]
            y_tilde_i = d_prime_i * self.y[i] - self.rho * np.sum([
                self.L[i, j] * self.y[j] for j in range(self.N)
            ], axis=0) - self.v[i]
            
            # 求解局部子问题
            self.x[i] = self._solve_local_subproblem(i, y_tilde_i)
        
        # 第二步：更新对偶变量
        for i in range(self.N):
            self._update_dual_variables(i)
        
        # 第三步：更新辅助变量
        for i in range(self.N):
            self._update_auxiliary_variables(i)
        
        # 记录性能指标
        obj_error = self._compute_objective_error()
        const_viol = self._compute_constraint_violation()
        
        self.history['objective_error'].append(obj_error)
        self.history['constraint_violation'].append(const_viol)
    
    def run(self, max_iterations=1000):
        """运行完整算法"""
        print(f"运行 {self.algorithm_type}，最大迭代次数: {max_iterations}")
        
        for k in range(max_iterations):
            self.run_iteration()
            
            if (k + 1) % 100 == 0:
                obj_err = self.history['objective_error'][-1]
                const_viol = self.history['constraint_violation'][-1]
                print(f"  迭代 {k+1}: 目标误差 = {obj_err:.6f}, 约束违反 = {const_viol:.6f}")
        
        return self.history

class BaselineAlgorithms:
    """基线算法实现"""
    
    @staticmethod
    def augmented_lagrangian_tracking(problem, graph, max_iterations=1000):
        """增广拉格朗日跟踪算法 (ALT)"""
        print("运行 Augmented Lagrangian Tracking (ALT)")
        
        N = problem.N
        d = problem.d
        m = problem.m
        p = problem.p
        
        # 初始化变量
        x = [np.zeros(d) for _ in range(N)]
        y = np.zeros((N, m + p))
        
        # 构建权重矩阵
        A = nx.adjacency_matrix(graph).toarray()
        D = np.diag(np.sum(A, axis=1))
        W = D - A  # 拉普拉斯矩阵
        
        history = {'objective_error': [], 'constraint_violation': []}
        
        rho = 1.0
        step_size = 0.01
        
        for k in range(max_iterations):
            # 简化的ALT更新
            for i in range(N):
                # 更新原始变量（梯度下降）
                grad = problem.P[i] @ x[i] + problem.Q[i]
                
                # 添加增广拉格朗日项
                g_i = np.array([np.linalg.norm(x[i] - problem.a_prime[i])**2 - problem.c_prime[i]])
                h_i = problem.B[i] @ x[i]
                
                mu_i = y[i][:m]
                lambda_i = y[i][m:]
                
                # 约束梯度
                grad_g = 2 * (x[i] - problem.a_prime[i])
                grad_h = problem.B[i].T
                
                grad += mu_i[0] * grad_g + rho * max(0, g_i[0]) * grad_g
                grad += lambda_i.T @ grad_h + rho * (h_i.T @ grad_h)
                
                x[i] = x[i] - step_size * grad
                
                # 投影到局部约束集
                if np.linalg.norm(x[i] - problem.a[i])**2 > problem.c[i]:
                    direction = x[i] - problem.a[i]
                    x[i] = problem.a[i] + np.sqrt(problem.c[i]) * direction / np.linalg.norm(direction)
            
            # 更新对偶变量
            for i in range(N):
                g_i = np.array([np.linalg.norm(x[i] - problem.a_prime[i])**2 - problem.c_prime[i]])
                h_i = problem.B[i] @ x[i]
                
                # 一致性更新
                neighbor_avg = np.mean([y[j] for j in graph.neighbors(i)] + [y[i]], axis=0)
                y[i] = neighbor_avg
                
                # 对偶更新
                y[i][:m] = np.maximum(0, y[i][:m] + rho * g_i)
                y[i][m:] = y[i][m:] + rho * h_i
            
            # 计算性能指标
            obj_error = abs(problem.optimal_value - sum([
                0.5 * x[i].T @ problem.P[i] @ x[i] + problem.Q[i].T @ x[i] 
                for i in range(N)
            ]))
            
            const_viol = 0
            for i in range(N):
                const_viol += max(0, np.linalg.norm(x[i] - problem.a[i])**2 - problem.c[i])
            
            global_ineq = sum([np.linalg.norm(x[i] - problem.a_prime[i])**2 for i in range(N)])
            const_viol += max(0, global_ineq - sum(problem.c_prime))
            
            global_eq = sum([problem.B[i] @ x[i] for i in range(N)])
            const_viol += np.linalg.norm(global_eq)
            
            history['objective_error'].append(obj_error)
            history['constraint_violation'].append(const_viol)
            
            if (k + 1) % 100 == 0:
                print(f"  迭代 {k+1}: 目标误差 = {obj_error:.6f}, 约束违反 = {const_viol:.6f}")
        
        return history
    
    @staticmethod
    def distributed_subgradient(problem, graph, max_iterations=1000):
        """分布式次梯度方法"""
        print("运行 Distributed Subgradient Method")
        
        N = problem.N
        d = problem.d
        
        # 初始化
        x = [np.zeros(d) for _ in range(N)]
        history = {'objective_error': [], 'constraint_violation': []}
        
        step_size = 0.1
        
        for k in range(max_iterations):
            # 次梯度步
            for i in range(N):
                # 计算次梯度
                subgrad = problem.P[i] @ x[i] + problem.Q[i]
                
                # 一致性项
                neighbor_avg = np.mean([x[j] for j in graph.neighbors(i)] + [x[i]], axis=0)
                subgrad += 0.1 * (x[i] - neighbor_avg)
                
                # 更新
                x[i] = x[i] - step_size / np.sqrt(k + 1) * subgrad
                
                # 投影
                if np.linalg.norm(x[i] - problem.a[i])**2 > problem.c[i]:
                    direction = x[i] - problem.a[i]
                    x[i] = problem.a[i] + np.sqrt(problem.c[i]) * direction / np.linalg.norm(direction)
            
            # 计算性能指标
            obj_error = abs(problem.optimal_value - sum([
                0.5 * x[i].T @ problem.P[i] @ x[i] + problem.Q[i].T @ x[i] 
                for i in range(N)
            ]))
            
            const_viol = 0
            for i in range(N):
                const_viol += max(0, np.linalg.norm(x[i] - problem.a[i])**2 - problem.c[i])
            
            global_ineq = sum([np.linalg.norm(x[i] - problem.a_prime[i])**2 for i in range(N)])
            const_viol += max(0, global_ineq - sum(problem.c_prime))
            
            global_eq = sum([problem.B[i] @ x[i] for i in range(N)])
            const_viol += np.linalg.norm(global_eq)
            
            history['objective_error'].append(obj_error)
            history['constraint_violation'].append(const_viol)
            
            if (k + 1) % 100 == 0:
                print(f"  迭代 {k+1}: 目标误差 = {obj_error:.6f}, 约束违反 = {const_viol:.6f}")
        
        return history

def plot_results(results, save_path=None):
    """绘制实验结果图表，复现Figure 1"""
    
    fig, ((ax1, ax2), (ax3, ax4)) = plt.subplots(2, 2, figsize=(15, 12))
    
    iterations = range(1, 1001)
    
    # 图1(a): 目标函数误差对比（左上）
    ax1.semilogy(iterations, results['DUCA-I']['objective_error'], 'r-', linewidth=2, label='DUCA-I')
    ax1.semilogy(iterations, results['DUCA-PEXTRA']['objective_error'], 'g--', linewidth=2, label='DUCA-PEXTRA')
    ax1.semilogy(iterations, results['DUCA-PGC']['objective_error'], 'b-.', linewidth=2, label='DUCA-PGC')
    ax1.semilogy(iterations, results['DUCA-DPGA']['objective_error'], 'm:', linewidth=2, label='DUCA-DPGA')
    ax1.semilogy(iterations, results['DUCA-dist.ADMM']['objective_error'], 'c-', linewidth=2, label='DUCA-dist.ADMM')
    ax1.semilogy(iterations, results['ALT']['objective_error'], 'k--', linewidth=2, label='ALT')
    ax1.semilogy(iterations, results['Distributed_Subgradient']['objective_error'], 'orange', linewidth=2, label='Dist. Subgradient')
    
    ax1.set_xlabel('迭代次数')
    ax1.set_ylabel('目标函数误差')
    ax1.set_title('(a) 目标函数误差收敛性')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    # 图1(b): 约束违反度对比（右上）
    ax2.semilogy(iterations, results['DUCA-I']['constraint_violation'], 'r-', linewidth=2, label='DUCA-I')
    ax2.semilogy(iterations, results['DUCA-PEXTRA']['constraint_violation'], 'g--', linewidth=2, label='DUCA-PEXTRA')
    ax2.semilogy(iterations, results['DUCA-PGC']['constraint_violation'], 'b-.', linewidth=2, label='DUCA-PGC')
    ax2.semilogy(iterations, results['DUCA-DPGA']['constraint_violation'], 'm:', linewidth=2, label='DUCA-DPGA')
    ax2.semilogy(iterations, results['DUCA-dist.ADMM']['constraint_violation'], 'c-', linewidth=2, label='DUCA-dist.ADMM')
    ax2.semilogy(iterations, results['ALT']['constraint_violation'], 'k--', linewidth=2, label='ALT')
    ax2.semilogy(iterations, results['Distributed_Subgradient']['constraint_violation'], 'orange', linewidth=2, label='Dist. Subgradient')
    
    ax2.set_xlabel('迭代次数')
    ax2.set_ylabel('约束违反度')
    ax2.set_title('(b) 约束违反度收敛性')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    
    # 图1(c): Pro-DUCA目标函数误差（左下）
    ax3.semilogy(iterations, results['DUCA-I']['objective_error'], 'r--', linewidth=2, alpha=0.7, label='DUCA-I')
    ax3.semilogy(iterations, results['Pro-DUCA-I-0.1']['objective_error'], 'r-', linewidth=2, label='Pro-DUCA-I (α=0.1)')
    ax3.semilogy(iterations, results['Pro-DUCA-I-0.5']['objective_error'], 'r-', linewidth=2, alpha=0.5, label='Pro-DUCA-I (α=0.5)')
    
    ax3.semilogy(iterations, results['DUCA-PGC']['objective_error'], 'b--', linewidth=2, alpha=0.7, label='DUCA-PGC')
    ax3.semilogy(iterations, results['Pro-DUCA-PGC-0.1']['objective_error'], 'b-', linewidth=2, label='Pro-DUCA-PGC (α=0.1)')
    ax3.semilogy(iterations, results['Pro-DUCA-PGC-0.5']['objective_error'], 'b-', linewidth=2, alpha=0.5, label='Pro-DUCA-PGC (α=0.5)')
    
    ax3.set_xlabel('迭代次数')
    ax3.set_ylabel('目标函数误差')
    ax3.set_title('(c) Pro-DUCA目标函数误差（不同α值）')
    ax3.legend()
    ax3.grid(True, alpha=0.3)
    
    # 图1(d): Pro-DUCA约束违反度（右下）
    ax4.semilogy(iterations, results['DUCA-I']['constraint_violation'], 'r--', linewidth=2, alpha=0.7, label='DUCA-I')
    ax4.semilogy(iterations, results['Pro-DUCA-I-0.1']['constraint_violation'], 'r-', linewidth=2, label='Pro-DUCA-I (α=0.1)')
    ax4.semilogy(iterations, results['Pro-DUCA-I-0.5']['constraint_violation'], 'r-', linewidth=2, alpha=0.5, label='Pro-DUCA-I (α=0.5)')
    
    ax4.semilogy(iterations, results['DUCA-PGC']['constraint_violation'], 'b--', linewidth=2, alpha=0.7, label='DUCA-PGC')
    ax4.semilogy(iterations, results['Pro-DUCA-PGC-0.1']['constraint_violation'], 'b-', linewidth=2, label='Pro-DUCA-PGC (α=0.1)')
    ax4.semilogy(iterations, results['Pro-DUCA-PGC-0.5']['constraint_violation'], 'b-', linewidth=2, alpha=0.5, label='Pro-DUCA-PGC (α=0.5)')
    
    ax4.set_xlabel('迭代次数')
    ax4.set_ylabel('约束违反度')
    ax4.set_title('(d) Pro-DUCA约束违反度（不同α值）')
    ax4.legend()
    ax4.grid(True, alpha=0.3)
    
    plt.tight_layout()
    
    if save_path:
        plt.savefig(save_path, dpi=300, bbox_inches='tight')
        print(f"图表已保存到: {save_path}")
    
    plt.show()

def main():
    """主函数：复现论文Figure 1的实验结果"""
    print("=" * 60)
    print("论文实验结果复现")
    print("《A Unified Dual Consensus Approach to Distributed Optimization")
    print("with Globally-Coupled Constraints》")
    print("目标：复现 Figure 1")
    print("=" * 60)
    
    # 1. 生成问题实例
    print("\n1. 生成问题实例...")
    problem = ProblemGenerator(N=20, d=3, m=1, p=2, seed=42)
    
    # 求解集中式问题获得最优值
    print("   求解集中式问题获得最优值...")
    if problem.solve_centralized():
        print(f"   最优值: {problem.optimal_value:.6f}")
    else:
        print("   集中式求解失败！")
        return
    
    # 2. 生成网络拓扑
    print("\n2. 生成网络拓扑...")
    graph = NetworkGenerator.generate_connected_graph(N=20, num_edges=40, seed=42)
    print(f"   生成 {graph.number_of_nodes()} 个节点，{graph.number_of_edges()} 条边的连通图")
    
    # 3. 运行所有算法
    print("\n3. 运行算法对比实验...")
    results = {}
    
    # DUCA变体
    duca_variants = ['DUCA-I', 'DUCA-PEXTRA', 'DUCA-PGC', 'DUCA-DPGA', 'DUCA-dist.ADMM']
    
    for variant in duca_variants:
        print(f"\n运行 {variant}...")
        duca = DUCAAlgorithm(problem, graph, algorithm_type=variant, rho=1.0, alpha=0.0)
        results[variant] = duca.run(max_iterations=1000)
    
    # Pro-DUCA变体（不同alpha值）
    pro_duca_configs = [
        ('Pro-DUCA-I-0.1', 'DUCA-I', 0.1),
        ('Pro-DUCA-I-0.5', 'DUCA-I', 0.5),
        ('Pro-DUCA-PGC-0.1', 'DUCA-PGC', 0.1),
        ('Pro-DUCA-PGC-0.5', 'DUCA-PGC', 0.5),
    ]
    
    for name, base_type, alpha in pro_duca_configs:
        print(f"\n运行 {name}...")
        pro_duca = DUCAAlgorithm(problem, graph, algorithm_type=base_type, rho=1.0, alpha=alpha)
        results[name] = pro_duca.run(max_iterations=1000)
    
    # 基线算法
    print(f"\n运行基线算法...")
    results['ALT'] = BaselineAlgorithms.augmented_lagrangian_tracking(problem, graph, max_iterations=1000)
    results['Distributed_Subgradient'] = BaselineAlgorithms.distributed_subgradient(problem, graph, max_iterations=1000)
    
    # 4. 绘制结果
    print("\n4. 绘制实验结果...")
    plot_results(results, save_path='paper_reproduction_figure1.png')
    
    # 5. 输出数值结果总结
    print("\n5. 实验结果总结:")
    print("-" * 60)
    print(f"{'算法':<20} {'最终目标误差':<15} {'最终约束违反':<15}")
    print("-" * 60)
    
    for alg_name, history in results.items():
        if 'Pro-DUCA' not in alg_name or '0.1' in alg_name:  # 只显示主要结果
            final_obj_err = history['objective_error'][-1]
            final_const_viol = history['constraint_violation'][-1]
            print(f"{alg_name:<20} {final_obj_err:<15.6f} {final_const_viol:<15.6f}")
    
    print("-" * 60)
    print("\n实验完成！图表已生成并保存。")

if __name__ == "__main__":
    main() 