"""
分布式算法基类

定义了分布式优化算法的通用接口和基本功能
"""

import numpy as np
from abc import ABC, abstractmethod
from typing import Dict, List, Optional, Tuple, Any
from tqdm import tqdm
import time


class BaseDistributedAlgorithm(ABC):
    """分布式算法基类"""
    
    def __init__(self, problem, network, **kwargs):
        """
        初始化分布式算法
        
        参数:
            problem: 分布式优化问题实例
            network: 网络拓扑实例
            **kwargs: 算法特定参数
        """
        self.problem = problem
        self.network = network
        self.num_nodes = network.num_nodes
        
        # 算法参数
        self.max_iterations = kwargs.get('max_iterations', 1000)
        self.tolerance = kwargs.get('tolerance', 1e-6)
        self.verbose = kwargs.get('verbose', True)
        
        # 初始化变量
        self.x = {}  # 原始变量
        self.y = {}  # 对偶变量
        self.z = {}  # 辅助变量
        
        # 初始化历史记录
        self.history = {
            'objective_values': [],
            'primal_residuals': [],
            'dual_residuals': [],
            'constraint_violations': [],
            'consensus_errors': [],
            'iteration_times': []
        }
        
        # 问题维度
        self.m = problem.m  # 不等式约束数量
        self.p = problem.p  # 等式约束数量
        
    def initialize(self, x0: Optional[Dict[int, np.ndarray]] = None,
                  y0: Optional[Dict[int, np.ndarray]] = None):
        """
        初始化算法变量
        
        参数:
            x0: 原始变量初始值
            y0: 对偶变量初始值
        """
        # 初始化原始变量
        if x0 is None:
            for i in range(self.num_nodes):
                self.x[i] = np.zeros(self.problem.dimensions[i])
        else:
            self.x = x0.copy()
            
        # 初始化对偶变量
        if y0 is None:
            for i in range(self.num_nodes):
                self.y[i] = np.zeros(self.m + self.p)
        else:
            self.y = y0.copy()
            
        # 初始化辅助变量（子类可重写）
        self._initialize_auxiliary_variables()
        
    @abstractmethod
    def _initialize_auxiliary_variables(self):
        """初始化算法特定的辅助变量"""
        pass
        
    @abstractmethod
    def _update_primal_variables(self):
        """更新原始变量"""
        pass
        
    @abstractmethod
    def _update_dual_variables(self):
        """更新对偶变量"""
        pass
        
    @abstractmethod
    def _update_auxiliary_variables(self):
        """更新辅助变量"""
        pass
        
    def solve(self, x0: Optional[Dict[int, np.ndarray]] = None,
             y0: Optional[Dict[int, np.ndarray]] = None) -> Dict[str, Any]:
        """
        运行算法求解问题
        
        参数:
            x0: 原始变量初始值
            y0: 对偶变量初始值
            
        返回:
            结果字典，包含:
                - solution: 最优解
                - objective_value: 最优目标值
                - iterations: 实际迭代次数
                - history: 算法历史记录
        """
        # 初始化
        self.initialize(x0, y0)
        
        # 主循环
        if self.verbose:
            pbar = tqdm(range(self.max_iterations), desc="DUCA算法")
        else:
            pbar = range(self.max_iterations)
            
        converged = False
        actual_iterations = 0
        
        for k in pbar:
            start_time = time.time()
            
            # 保存上一次迭代的值（用于收敛判断）
            x_old = {i: self.x[i].copy() for i in range(self.num_nodes)}
            y_old = {i: self.y[i].copy() for i in range(self.num_nodes)}
            
            # 算法更新
            self._update_primal_variables()
            self._update_dual_variables()
            self._update_auxiliary_variables()
            
            # 计算性能指标
            metrics = self._compute_metrics()
            
            # 记录历史
            self.history['objective_values'].append(metrics['objective_value'])
            self.history['primal_residuals'].append(metrics['primal_residual'])
            self.history['dual_residuals'].append(metrics['dual_residual'])
            self.history['constraint_violations'].append(metrics['constraint_violation'])
            self.history['consensus_errors'].append(metrics['consensus_error'])
            self.history['iteration_times'].append(time.time() - start_time)
            
            # 更新进度条
            if self.verbose:
                pbar.set_postfix({
                    'obj': f"{metrics['objective_value']:.4e}",
                    'cons_viol': f"{metrics['constraint_violation']:.4e}",
                    'consensus': f"{metrics['consensus_error']:.4e}"
                })
                
            # 检查收敛
            if self._check_convergence(x_old, y_old, metrics):
                converged = True
                actual_iterations = k + 1
                if self.verbose:
                    print(f"\n算法在 {actual_iterations} 次迭代后收敛")
                break
                
        if not converged:
            actual_iterations = self.max_iterations
            if self.verbose:
                print(f"\n算法达到最大迭代次数 {self.max_iterations}")
                
        # 返回结果
        return {
            'solution': self.x,
            'dual_solution': self.y,
            'objective_value': metrics['objective_value'],
            'iterations': actual_iterations,
            'converged': converged,
            'history': self.history
        }
        
    def _compute_metrics(self) -> Dict[str, float]:
        """计算当前迭代的性能指标"""
        # 目标函数值
        obj_value = self.problem.evaluate_objective(self.x)
        
        # 约束违反度
        g, h = self.problem.evaluate_constraints(self.x)
        ineq_violation = np.maximum(g, 0).sum()
        eq_violation = np.abs(h).sum()
        constraint_violation = ineq_violation + eq_violation
        
        # 原始残差（相邻迭代的变化）
        primal_residual = 0.0
        
        # 对偶残差
        dual_residual = 0.0
        
        # 共识误差（对偶变量的一致性）
        y_avg = np.zeros(self.m + self.p)
        for i in range(self.num_nodes):
            y_avg += self.y[i]
        y_avg /= self.num_nodes
        
        consensus_error = 0.0
        for i in range(self.num_nodes):
            consensus_error += np.linalg.norm(self.y[i] - y_avg)**2
        consensus_error = np.sqrt(consensus_error)
        
        return {
            'objective_value': obj_value,
            'constraint_violation': constraint_violation,
            'primal_residual': primal_residual,
            'dual_residual': dual_residual,
            'consensus_error': consensus_error
        }
        
    def _check_convergence(self, x_old: Dict[int, np.ndarray],
                          y_old: Dict[int, np.ndarray],
                          metrics: Dict[str, float]) -> bool:
        """检查算法是否收敛"""
        # 原始变量变化
        primal_change = 0.0
        for i in range(self.num_nodes):
            primal_change += np.linalg.norm(self.x[i] - x_old[i])**2
        primal_change = np.sqrt(primal_change)
        
        # 对偶变量变化
        dual_change = 0.0
        for i in range(self.num_nodes):
            dual_change += np.linalg.norm(self.y[i] - y_old[i])**2
        dual_change = np.sqrt(dual_change)
        
        # 收敛条件
        converged = (
            primal_change < self.tolerance and
            dual_change < self.tolerance and
            metrics['constraint_violation'] < self.tolerance and
            metrics['consensus_error'] < self.tolerance
        )
        
        return converged
        
    def get_average_solution(self) -> np.ndarray:
        """获取所有节点的平均解"""
        total_dim = sum(self.problem.dimensions)
        x_avg = np.zeros(total_dim)
        
        offset = 0
        for i in range(self.num_nodes):
            dim_i = self.problem.dimensions[i]
            x_avg[offset:offset+dim_i] = self.x[i]
            offset += dim_i
            
        return x_avg
        
    def get_consensus_dual(self) -> np.ndarray:
        """获取共识的对偶变量"""
        y_avg = np.zeros(self.m + self.p)
        for i in range(self.num_nodes):
            y_avg += self.y[i]
        y_avg /= self.num_nodes
        return y_avg 