"""
Week 1: 优化方法导论
Introduction to Optimization Methods
"""

import numpy as np
import matplotlib.pyplot as plt
from typing import Callable, 
import matplotlib
matplotlib.rcParams['font.sans-serif'] = ['SimHei']
matplotlib.rcParams['axes.unicode_minus'] = False

class OptimizationIntro:
    """优化方法导论类"""
    
    def __init__(self):
        self.name = "Optimization Introduction"
    
    def quadratic_function(self, x: float) -> float:
        """
        二次函数示例: f(x) = (x-2)² + 1
        """
        return (x - 2) ** 2 + 1
    
    def quadratic_derivative(self, x: float) -> float:
        """
        二次函数的导数: f'(x) = 2(x-2)
        """
        return 2 * (x - 2)
    
    def multivariate_function(self, x: np.ndarray) -> float:
        """
        多元函数示例: f(x,y) = (x-1)² + (y-2)² + 3
        """
        return (x[0] - 1) ** 2 + (x[1] - 2) ** 2 + 3
    
    def multivariate_gradient(self, x: np.ndarray) -> np.ndarray:
        """
        多元函数的梯度: ∇f = [2(x-1), 2(y-2)]
        """
        return np.array([2 * (x[0] - 1), 2 * (x[1] - 2)])
    
    def visualize_univariate_optimization(self):
        """可视化一元函数优化"""
        x = np.linspace(-1, 5, 1000)
        y = [self.quadratic_function(xi) for xi in x]
        
        plt.figure(figsize=(12, 4))
        
        # 函数图像
        plt.subplot(1, 2, 1)
        plt.plot(x, y, 'b-', linewidth=2, label='f(x) = (x-2)² + 1')
        plt.plot(2, 1, 'ro', markersize=10, label='最优点 (2, 1)')
        plt.xlabel('x')
        plt.ylabel('f(x)')
        plt.title('一元函数优化')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 导数图像
        plt.subplot(1, 2, 2)
        dy = [self.quadratic_derivative(xi) for xi in x]
        plt.plot(x, dy, 'g-', linewidth=2, label="f'(x) = 2(x-2)")
        plt.axhline(y=0, color='k', linestyle='--', alpha=0.5)
        plt.axvline(x=2, color='r', linestyle='--', alpha=0.5, label='x = 2 (导数为0)')
        plt.xlabel('x')
        plt.ylabel("f'(x)")
        plt.title('函数导数')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()
    
    def visualize_multivariate_optimization(self):
        """可视化多元函数优化"""
        # 创建网格
        x1 = np.linspace(-2, 4, 100)
        x2 = np.linspace(-1, 5, 100)
        X1, X2 = np.meshgrid(x1, x2)
        
        # 计算函数值
        Z = np.zeros_like(X1)
        for i in range(X1.shape[0]):
            for j in range(X1.shape[1]):
                Z[i, j] = self.multivariate_function(np.array([X1[i, j], X2[i, j]]))
        
        fig = plt.figure(figsize=(15, 5))
        
        # 3D表面图
        ax1 = fig.add_subplot(131, projection='3d')
        surf = ax1.plot_surface(X1, X2, Z, alpha=0.8, cmap='viridis')
        ax1.plot([1], [2], [3], 'ro', markersize=10, label='最优点 (1, 2, 3)')
        ax1.set_xlabel('x₁')
        ax1.set_ylabel('x₂')
        ax1.set_zlabel('f(x₁, x₂)')
        ax1.set_title('多元函数3D图')
        
        # 等高线图
        ax2 = fig.add_subplot(132)
        contour = ax2.contour(X1, X2, Z, levels=20)
        ax2.clabel(contour, inline=True, fontsize=8)
        ax2.plot(1, 2, 'ro', markersize=10, label='最优点 (1, 2)')
        ax2.set_xlabel('x₁')
        ax2.set_ylabel('x₂')
        ax2.set_title('等高线图')
        ax2.legend()
        
        # 梯度场
        ax3 = fig.add_subplot(133)
        # 降低采样密度以便观察
        x1_sparse = np.linspace(-2, 4, 20)
        x2_sparse = np.linspace(-1, 5, 20)
        X1_sparse, X2_sparse = np.meshgrid(x1_sparse, x2_sparse)
        
        # 计算梯度
        U = np.zeros_like(X1_sparse)
        V = np.zeros_like(X2_sparse)
        for i in range(X1_sparse.shape[0]):
            for j in range(X1_sparse.shape[1]):
                grad = self.multivariate_gradient(np.array([X1_sparse[i, j], X2_sparse[i, j]]))
                U[i, j] = -grad[0]  # 负梯度方向
                V[i, j] = -grad[1]
        
        ax3.quiver(X1_sparse, X2_sparse, U, V, alpha=0.7)
        ax3.contour(X1, X2, Z, levels=10, alpha=0.3)
        ax3.plot(1, 2, 'ro', markersize=10, label='最优点 (1, 2)')
        ax3.set_xlabel('x₁')
        ax3.set_ylabel('x₂')
        ax3.set_title('梯度场 (负梯度方向)')
        ax3.legend()
        
        plt.tight_layout()
        plt.show()
    
    def simple_gradient_descent_demo(self):
        """简单梯度下降演示"""
        # 一元函数梯度下降
        learning_rate = 0.1
        max_iterations = 20
        x_current = 5.0  # 起始点
        
        x_history = [x_current]
        f_history = [self.quadratic_function(x_current)]
        
        print("=== 一元函数梯度下降演示 ===")
        print(f"起始点: x = {x_current:.4f}, f(x) = {f_history[0]:.4f}")
        
        for i in range(max_iterations):
            gradient = self.quadratic_derivative(x_current)
            x_current = x_current - learning_rate * gradient
            
            x_history.append(x_current)
            f_history.append(self.quadratic_function(x_current))
            
            if i < 5 or i % 5 == 0:  # 只打印前几步和每5步
                print(f"第{i+1}步: x = {x_current:.4f}, f(x) = {f_history[-1]:.4f}, 梯度 = {gradient:.4f}")
        
        print(f"最终结果: x = {x_current:.4f}, f(x) = {f_history[-1]:.4f}")
        print(f"理论最优解: x = 2.0000, f(x) = 1.0000\n")
        
        # 可视化梯度下降过程
        plt.figure(figsize=(12, 4))
        
        # 函数和优化路径
        plt.subplot(1, 2, 1)
        x_plot = np.linspace(0, 6, 1000)
        y_plot = [self.quadratic_function(xi) for xi in x_plot]
        plt.plot(x_plot, y_plot, 'b-', linewidth=2, label='f(x) = (x-2)² + 1')
        plt.plot(x_history, f_history, 'ro-', markersize=6, label='梯度下降路径')
        plt.plot(2, 1, 'g*', markersize=15, label='真实最优点')
        plt.xlabel('x')
        plt.ylabel('f(x)')
        plt.title('梯度下降优化过程')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        # 收敛曲线
        plt.subplot(1, 2, 2)
        plt.plot(range(len(f_history)), f_history, 'ro-', markersize=4)
        plt.axhline(y=1, color='g', linestyle='--', label='最优值 f* = 1')
        plt.xlabel('迭代次数')
        plt.ylabel('函数值')
        plt.title('收敛曲线')
        plt.legend()
        plt.grid(True, alpha=0.3)
        
        plt.tight_layout()
        plt.show()

def demonstrate_optimization_intro():
    """演示优化方法导论"""
    opt = OptimizationIntro()
    
    print("=== 优化方法导论演示 ===\n")
    
    print("1. 一元函数优化可视化...")
    opt.visualize_univariate_optimization()
    
    print("2. 多元函数优化可视化...")
    opt.visualize_multivariate_optimization()
    
    print("3. 简单梯度下降演示...")
    opt.simple_gradient_descent_demo()
    
    print("4. 优化基本概念总结:")
    print("   - 目标函数: 需要最小化或最大化的函数")
    print("   - 梯度: 函数变化最快的方向")
    print("   - 最优解: 使目标函数达到极值的参数")
    print("   - 学习率: 控制参数更新步长的超参数")
    print("   - 收敛: 算法逐渐接近最优解的过程")

if __name__ == "__main__":
    demonstrate_optimization_intro()