import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

class GradientDescentOptimizer:
    def __init__(self, learning_rate=0.01, max_iters=1000, tolerance=1e-6):
        self.learning_rate = learning_rate
        self.max_iters = max_iters
        self.tolerance = tolerance
        self.history = []
    
    def objective_function(self, x, y):
        """目标函数：f(x,y) = x^2 + y^2 + 2*sin(x) + 2*cos(y)"""
        return x**2 + y**2 + 2*np.sin(x) + 2*np.cos(y)
    
    def gradient(self, x, y):
        """计算梯度"""
        dx = 2*x + 2*np.cos(x)
        dy = 2*y - 2*np.sin(y)
        return np.array([dx, dy])
    
    def optimize(self, start_point):
        """执行梯度下降优化"""
        point = np.array(start_point)
        self.history = [point.copy()]
        
        for i in range(self.max_iters):
            # 计算梯度
            grad = self.gradient(point[0], point[1])
            
            # 更新参数
            new_point = point - self.learning_rate * grad
            
            # 检查收敛
            if np.linalg.norm(new_point - point) < self.tolerance:
                print(f"算法在第 {i+1} 次迭代后收敛")
                break
            
            point = new_point
            self.history.append(point.copy())
        
        return point, self.objective_function(point[0], point[1])
    
    def visualize_optimization(self, start_point):
        """可视化优化过程"""
        # 创建网格
        x = np.linspace(-4, 4, 100)
        y = np.linspace(-4, 4, 100)
        X, Y = np.meshgrid(x, y)
        Z = self.objective_function(X, Y)
        
        # 执行优化
        optimal_point, optimal_value = self.optimize(start_point)
        
        # 创建图形
        fig = plt.figure(figsize=(15, 5))
        
        # 2D等高线图
        ax1 = plt.subplot(131)
        contour = ax1.contour(X, Y, Z, levels=20)
        ax1.clabel(contour, inline=True, fontsize=8)
        
        # 绘制优化路径
        history = np.array(self.history)
        ax1.plot(history[:, 0], history[:, 1], 'ro-', markersize=3, linewidth=1)
        ax1.plot(start_point[0], start_point[1], 'go', markersize=8, label='起始点')
        ax1.plot(optimal_point[0], optimal_point[1], 'r*', markersize=10, label='最优点')
        
        ax1.set_xlabel('x')
        ax1.set_ylabel('y')
        ax1.set_title('梯度下降路径 (2D视图)')
        ax1.legend()
        ax1.grid(True)
        
        # 3D表面图
        ax2 = plt.subplot(132, projection='3d')
        ax2.plot_surface(X, Y, Z, alpha=0.6, cmap='viridis')
        
        # 绘制优化路径
        history_z = [self.objective_function(h[0], h[1]) for h in self.history]
        ax2.plot(history[:, 0], history[:, 1], history_z, 'ro-', markersize=3)
        ax2.scatter([start_point[0]], [start_point[1]], 
                   [self.objective_function(start_point[0], start_point[1])],
                   color='green', s=100, label='起始点')
        ax2.scatter([optimal_point[0]], [optimal_point[1]], [optimal_value],
                   color='red', s=100, marker='*', label='最优点')
        
        ax2.set_xlabel('x')
        ax2.set_ylabel('y')
        ax2.set_zlabel('f(x,y)')
        ax2.set_title('梯度下降路径 (3D视图)')
        
        # 目标函数值变化
        ax3 = plt.subplot(133)
        function_values = [self.objective_function(h[0], h[1]) for h in self.history]
        ax3.plot(function_values, 'b-', linewidth=2)
        ax3.set_xlabel('迭代次数')
        ax3.set_ylabel('目标函数值')
        ax3.set_title('收敛过程')
        ax3.grid(True)
        
        plt.tight_layout()
        plt.show()
        
        # 打印结果
        print(f"起始点: ({start_point[0]:.3f}, {start_point[1]:.3f})")
        print(f"最优点: ({optimal_point[0]:.3f}, {optimal_point[1]:.3f})")
        print(f"最优值: {optimal_value:.6f}")
        print(f"迭代次数: {len(self.history)}")
        print(f"学习率: {self.learning_rate}")

# 不同梯度下降变种的实现
class AdaptiveGradientDescent:
    def __init__(self, initial_lr=0.1, max_iters=1000):
        self.initial_lr = initial_lr
        self.max_iters = max_iters
        self.history = []
    
    def objective_function(self, x, y):
        return x**2 + y**2 + 2*np.sin(x) + 2*np.cos(y)
    
    def gradient(self, x, y):
        dx = 2*x + 2*np.cos(x)
        dy = 2*y - 2*np.sin(y)
        return np.array([dx, dy])
    
    def line_search(self, point, direction):
        """简单的线搜索找到最佳步长"""
        alpha = self.initial_lr
        for _ in range(10):
            new_point = point - alpha * direction
            if self.objective_function(new_point[0], new_point[1]) < \
               self.objective_function(point[0], point[1]):
                return alpha
            alpha *= 0.5
        return alpha
    
    def optimize_with_momentum(self, start_point, momentum=0.9):
        """带动量的梯度下降"""
        point = np.array(start_point)
        velocity = np.zeros_like(point)
        self.history = [point.copy()]
        
        for i in range(self.max_iters):
            grad = self.gradient(point[0], point[1])
            
            # 更新速度和位置
            velocity = momentum * velocity + self.initial_lr * grad
            point = point - velocity
            
            self.history.append(point.copy())
            
            if np.linalg.norm(grad) < 1e-6:
                break
        
        return point
    
    def optimize_with_adaptive_lr(self, start_point):
        """自适应学习率的梯度下降"""
        point = np.array(start_point)
        self.history = [point.copy()]
        
        for i in range(self.max_iters):
            grad = self.gradient(point[0], point[1])
            
            # 自适应学习率
            lr = self.line_search(point, grad)
            point = point - lr * grad
            
            self.history.append(point.copy())
            
            if np.linalg.norm(grad) < 1e-6:
                break
        
        return point

# 测试梯度下降优化器
if __name__ == "__main__":
    # 基础梯度下降
    print("=== 基础梯度下降 ===")
    optimizer = GradientDescentOptimizer(learning_rate=0.1, max_iters=100)
    start_point = [3.0, 3.0]
    optimizer.visualize_optimization(start_point)
    
    # 比较不同学习率
    print("\n=== 不同学习率比较 ===")
    learning_rates = [0.01, 0.1, 0.3]
    colors = ['blue', 'red', 'green']
    
    plt.figure(figsize=(10, 8))
    
    # 创建等高线图
    x = np.linspace(-4, 4, 100)
    y = np.linspace(-4, 4, 100)
    X, Y = np.meshgrid(x, y)
    Z = optimizer.objective_function(X, Y)
    plt.contour(X, Y, Z, levels=15, alpha=0.6)
    
    for lr, color in zip(learning_rates, colors):
        opt = GradientDescentOptimizer(learning_rate=lr, max_iters=50)
        final_point, final_value = opt.optimize([3.0, 3.0])
        
        history = np.array(opt.history)
        plt.plot(history[:, 0], history[:, 1], 
                color=color, marker='o', markersize=2, 
                label=f'学习率={lr}, 迭代{len(history)}次')
    
    plt.xlabel('x')
    plt.ylabel('y')
    plt.title('不同学习率的收敛对比')
    plt.legend()
    plt.grid(True)
    plt.show()
    
    # 测试自适应方法
    print("\n=== 自适应梯度下降 ===")
    adaptive_opt = AdaptiveGradientDescent()
    
    # 带动量的优化
    momentum_result = adaptive_opt.optimize_with_momentum([3.0, 3.0])
    print(f"动量法结果: {momentum_result}, 迭代次数: {len(adaptive_opt.history)}")
    
    # 自适应学习率优化
    adaptive_opt_2 = AdaptiveGradientDescent()
    adaptive_result = adaptive_opt_2.optimize_with_adaptive_lr([3.0, 3.0])
    print(f"自适应学习率结果: {adaptive_result}, 迭代次数: {len(adaptive_opt_2.history)}") 

    