"""
Week 7: 二阶优化方法实现
Second-Order Optimization Methods Implementation
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Callable
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class NewtonOptimizer:
    """牛顿法优化器"""
    
    def __init__(self, max_iterations: int = 100, tolerance: float = 1e-6):
        self.max_iterations = max_iterations
        self.tolerance = tolerance
        self.history = []
        
    def optimize(self, objective_func: Callable, gradient_func: Callable, 
                hessian_func: Callable, x0: np.ndarray, verbose: bool = False):
        """
        牛顿法优化
        Args:
            objective_func: 目标函数
            gradient_func: 梯度函数
            hessian_func: Hessian函数
            x0: 初始点
        """
        x = x0.copy()
        self.history = [x.copy()]
        
        for i in range(self.max_iterations):
            # 计算梯度和Hessian
            grad = gradient_func(x)
            hess = hessian_func(x)
            
            # 检查收敛
            if np.linalg.norm(grad) < self.tolerance:
                if verbose:
                    print(f"牛顿法在第{i+1}次迭代后收敛")
                break
            
            # 检查Hessian是否正定
            try:
                # 尝试Cholesky分解检查正定性
                np.linalg.cholesky(hess)
                # 牛顿步
                newton_step = np.linalg.solve(hess, grad)
            except np.linalg.LinAlgError:
                if verbose:
                    print(f"Hessian不正定，使用梯度下降步")
                # 如果Hessian不正定，使用梯度下降
                newton_step = 0.01 * grad
            
            # 更新参数
            x = x - newton_step
            self.history.append(x.copy())
            
            if verbose and (i % 10 == 0 or i < 5):
                obj_val = objective_func(x)
                grad_norm = np.linalg.norm(grad)
                print(f"迭代 {i+1}: f(x) = {obj_val:.6f}, ||∇f|| = {grad_norm:.6f}")
        
        return x, self.history

class BFGSOptimizer:
    """BFGS拟牛顿法优化器"""
    
    def __init__(self, max_iterations: int = 100, tolerance: float = 1e-6):
        self.max_iterations = max_iterations
        self.tolerance = tolerance
        self.history = []
        
    def optimize(self, objective_func: Callable, gradient_func: Callable, 
                x0: np.ndarray, verbose: bool = False):
        """
        BFGS优化算法
        """
        x = x0.copy()
        n = len(x)
        
        # 初始化逆Hessian近似为单位矩阵
        B_inv = np.eye(n)
        
        self.history = [x.copy()]
        grad_old = gradient_func(x)
        
        for i in range(self.max_iterations):
            # 检查收敛
            if np.linalg.norm(grad_old) < self.tolerance:
                if verbose:
                    print(f"BFGS在第{i+1}次迭代后收敛")
                break
            
            # 计算搜索方向
            p = -B_inv @ grad_old
            
            # 线搜索 (简化版，使用固定步长)
            alpha = self.line_search(objective_func, gradient_func, x, p)
            
            # 更新参数
            x_new = x + alpha * p
            grad_new = gradient_func(x_new)
            
            # BFGS更新
            s = x_new - x
            y = grad_new - grad_old
            
            # 避免除零
            if np.dot(s, y) > 1e-10:
                # Sherman-Morrison公式更新逆Hessian
                rho = 1.0 / np.dot(y, s)
                A1 = np.eye(n) - rho * np.outer(s, y)
                A2 = np.eye(n) - rho * np.outer(y, s)
                B_inv = A1 @ B_inv @ A2 + rho * np.outer(s, s)
            
            x = x_new
            grad_old = grad_new
            self.history.append(x.copy())
            
            if verbose and (i % 10 == 0 or i < 5):
                obj_val = objective_func(x)
                grad_norm = np.linalg.norm(grad_old)
                print(f"迭代 {i+1}: f(x) = {obj_val:.6f}, ||∇f|| = {grad_norm:.6f}")
        
        return x, self.history
    
    def line_search(self, objective_func: Callable, gradient_func: Callable, 
                   x: np.ndarray, p: np.ndarray) -> float:
        """简化的线搜索"""
        alpha = 1.0
        c1 = 1e-4  # Armijo条件参数
        
        f_x = objective_func(x)
        grad_x = gradient_func(x)
        grad_p = np.dot(grad_x, p)
        
        # Armijo条件
        for _ in range(20):  # 最多20次回退
            if objective_func(x + alpha * p) <= f_x + c1 * alpha * grad_p:
                break
            alpha *= 0.5
        
        return alpha

class QuadraticFunction:
    """二次函数测试类"""
    
    def __init__(self, A: np.ndarray, b: np.ndarray, c: float = 0):
        """
        二次函数: f(x) = 0.5 * x^T A x + b^T x + c
        """
        self.A = A
        self.b = b
        self.c = c
        
    def objective(self, x: np.ndarray) -> float:
        """目标函数值"""
        return 0.5 * x.T @ self.A @ x + self.b.T @ x + self.c
    
    def gradient(self, x: np.ndarray) -> np.ndarray:
        """梯度"""
        return self.A @ x + self.b
    
    def hessian(self, x: np.ndarray) -> np.ndarray:
        """Hessian矩阵"""
        return self.A
    
    def analytical_solution(self) -> np.ndarray:
        """解析解"""
        return -np.linalg.solve(self.A, self.b)

def demonstrate_newton_methods():
    """演示牛顿法和拟牛顿法"""
    print("=== 二阶优化方法演示 ===\n")
    
    # 1. 创建测试问题
    print("1. 创建二次函数测试问题")
    np.random.seed(42)
    n = 2
    
    # 创建正定矩阵
    M = np.random.randn(n, n)
    A = M.T @ M + np.eye(n)  # 确保正定
    b = np.random.randn(n)
    
    quad_func = QuadraticFunction(A, b)
    
    print(f"   A矩阵条件数: {np.linalg.cond(A):.2f}")
    print(f"   解析解: {quad_func.analytical_solution()}")
    
    # 2. 比较不同优化方法
    x0 = np.array([3.0, 3.0])  # 起始点
    
    # 牛顿法
    print("\n2. 牛顿法优化")
    newton_opt = NewtonOptimizer(max_iterations=50)
    x_newton, history_newton = newton_opt.optimize(
        quad_func.objective, quad_func.gradient, quad_func.hessian, x0, verbose=True
    )
    
    # BFGS
    print("\n3. BFGS优化")
    bfgs_opt = BFGSOptimizer(max_iterations=50)
    x_bfgs, history_bfgs = bfgs_opt.optimize(
        quad_func.objective, quad_func.gradient, x0, verbose=True
    )
    
    # 梯度下降 (作为对比)
    print("\n4. 梯度下降优化")
    x_gd, history_gd = gradient_descent_comparison(quad_func, x0)
    
    # 可视化结果
    visualize_optimization_comparison(
        quad_func, [history_newton, history_bfgs, history_gd],
        ['牛顿法', 'BFGS', '梯度下降']
    )

def gradient_descent_comparison(quad_func: QuadraticFunction, x0: np.ndarray, 
                              learning_rate: float = 0.1, max_iterations: int = 100):
    """梯度下降对比实现"""
    x = x0.copy()
    history = [x.copy()]
    
    for i in range(max_iterations):
        grad = quad_func.gradient(x)
        
        if np.linalg.norm(grad) < 1e-6:
            print(f"梯度下降在第{i+1}次迭代后收敛")
            break
            
        x = x - learning_rate * grad
        history.append(x.copy())
        
        if i < 5 or i % 20 == 0:
            obj_val = quad_func.objective(x)
            grad_norm = np.linalg.norm(grad)
            print(f"迭代 {i+1}: f(x) = {obj_val:.6f}, ||∇f|| = {grad_norm:.6f}")
    
    return x, history

def visualize_optimization_comparison(quad_func: QuadraticFunction, histories: list[list], 
                                    method_names: list[str]):
    """可视化优化方法比较"""
    fig, axes = plt.subplots(2, 2, figsize=(15, 12))
    
    # 1. 优化轨迹
    ax1 = axes[0, 0]
    
    # 创建等高线
    x_range = np.linspace(-1, 4, 100)
    y_range = np.linspace(-1, 4, 100)
    X, Y = np.meshgrid(x_range, y_range)
    Z = np.zeros_like(X)
    
    for i in range(X.shape[0]):
        for j in range(X.shape[1]):
            point = np.array([X[i, j], Y[i, j]])
            Z[i, j] = quad_func.objective(point)
    
    contour = ax1.contour(X, Y, Z, levels=20, alpha=0.6)
    ax1.clabel(contour, inline=True, fontsize=8)
    
    # 绘制优化轨迹
    colors = ['red', 'blue', 'green']
    markers = ['o', 's', '^']
    
    for i, (history, name, color, marker) in enumerate(zip(histories, method_names, colors, markers)):
        history_array = np.array(history)
        ax1.plot(history_array[:, 0], history_array[:, 1], 
                color=color, marker=marker, markersize=4, 
                linewidth=2, label=name, alpha=0.8)
        
        # 标记起始点和终点
        ax1.plot(history_array[0, 0], history_array[0, 1], 
                marker='*', markersize=15, color='black', label='起始点' if i == 0 else "")
        ax1.plot(history_array[-1, 0], history_array[-1, 1], 
                marker='*', markersize=12, color=color)
    
    # 标记真实最优解
    true_opt = quad_func.analytical_solution()
    ax1.plot(true_opt[0], true_opt[1], marker='*', markersize=15, 
            color='gold', label='真实最优解')
    
    ax1.set_xlabel('x₁')
    ax1.set_ylabel('x₂')
    ax1.set_title('优化轨迹比较')
    ax1.legend()
    ax1.grid(True, alpha=0.3)
    
    # 2. 收敛曲线
    ax2 = axes[0, 1]
    
    for history, name, color in zip(histories, method_names, colors):
        obj_values = [quad_func.objective(x) for x in history]
        ax2.plot(obj_values, color=color, linewidth=2, label=name)
    
    ax2.set_xlabel('迭代次数')
    ax2.set_ylabel('目标函数值')
    ax2.set_title('收敛曲线')
    ax2.legend()
    ax2.grid(True, alpha=0.3)
    ax2.set_yscale('log')
    
    # 3. 梯度范数变化
    ax3 = axes[1, 0]
    
    for history, name, color in zip(histories, method_names, colors):
        grad_norms = [np.linalg.norm(quad_func.gradient(x)) for x in history]
        ax3.plot(grad_norms, color=color, linewidth=2, label=name)
    
    ax3.set_xlabel('迭代次数')
    ax3.set_ylabel('梯度范数')
    ax3.set_title('梯度范数变化')
    ax3.legend()
    ax3.grid(True, alpha=0.3)
    ax3.set_yscale('log')
    
    # 4. 收敛速度比较
    ax4 = axes[1, 1]
    
    # 计算到最优解的距离
    true_opt = quad_func.analytical_solution()
    
    for history, name, color in zip(histories, method_names, colors):
        distances = [np.linalg.norm(x - true_opt) for x in history]
        ax4.plot(distances, color=color, linewidth=2, label=name)
    
    ax4.set_xlabel('迭代次数')
    ax4.set_ylabel('到最优解的距离')
    ax4.set_title('收敛速度比较')
    ax4.legend()
    ax4.grid(True, alpha=0.3)
    ax4.set_yscale('log')
    
    plt.tight_layout()
    plt.show()

def analyze_condition_number_effect():
    """分析条件数对优化的影响"""
    print("\n=== 条件数影响分析 ===\n")
    
    condition_numbers = [1, 10, 100, 1000]
    results = {}
    
    for cond_num in condition_numbers:
        print(f"分析条件数 = {cond_num}")
        
        # 创建具有指定条件数的矩阵
        eigenvals = np.array([cond_num, 1.0])
        Q = np.array([[1/np.sqrt(2), 1/np.sqrt(2)], 
                     [1/np.sqrt(2), -1/np.sqrt(2)]])  # 正交矩阵
        A = Q @ np.diag(eigenvals) @ Q.T
        b = np.array([1.0, 1.0])
        
        quad_func = QuadraticFunction(A, b)
        x0 = np.array([3.0, 3.0])
        
        # 牛顿法
        newton_opt = NewtonOptimizer(max_iterations=20)
        _, history_newton = newton_opt.optimize(
            quad_func.objective, quad_func.gradient, quad_func.hessian, x0
        )
        
        # BFGS
        bfgs_opt = BFGSOptimizer(max_iterations=50)
        _, history_bfgs = bfgs_opt.optimize(
            quad_func.objective, quad_func.gradient, x0
        )
        
        # 梯度下降
        _, history_gd = gradient_descent_comparison(quad_func, x0, learning_rate=0.01)
        
        results[cond_num] = {
            'newton_iterations': len(history_newton),
            'bfgs_iterations': len(history_bfgs),
            'gd_iterations': len(history_gd)
        }
        
        print(f"  牛顿法迭代次数: {len(history_newton)}")
        print(f"  BFGS迭代次数: {len(history_bfgs)}")
        print(f"  梯度下降迭代次数: {len(history_gd)}\n")
    
    # 可视化条件数影响
    plt.figure(figsize=(12, 4))
    
    cond_nums = list(results.keys())
    newton_iters = [results[c]['newton_iterations'] for c in cond_nums]
    bfgs_iters = [results[c]['bfgs_iterations'] for c in cond_nums]
    gd_iters = [results[c]['gd_iterations'] for c in cond_nums]
    
    x_pos = np.arange(len(cond_nums))
    width = 0.25
    
    plt.bar(x_pos - width, newton_iters, width, label='牛顿法', alpha=0.8)
    plt.bar(x_pos, bfgs_iters, width, label='BFGS', alpha=0.8)
    plt.bar(x_pos + width, gd_iters, width, label='梯度下降', alpha=0.8)
    
    plt.xlabel('条件数')
    plt.ylabel('收敛迭代次数')
    plt.title('条件数对收敛速度的影响')
    plt.xticks(x_pos, [str(c) for c in cond_nums])
    plt.legend()
    plt.grid(True, alpha=0.3)
    plt.yscale('log')
    
    plt.tight_layout()
    plt.show()

if __name__ == "__main__":
    demonstrate_newton_methods()
    analyze_condition_number_effect()