import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D

# 定义目标函数
def f(x, y):
    return x**2 + y**2

# 计算梯度
def gradient(f, x, y, h=1e-5):
    dfdx = (f(x + h, y) - f(x - h, y)) / (2 * h)
    dfdy = (f(x, y + h) - f(x, y - h)) / (2 * h)
    return np.array([dfdx, dfdy])

# 梯度下降算法
def gradient_descent(f, start, learning_rate=0.1, max_iter=100):
    x, y = start
    history = [(x, y)]
    for i in range(max_iter):
        grad = gradient(f, x, y)
        x -= learning_rate * grad[0]
        y -= learning_rate * grad[1]
        history.append((x, y))
    return history

# 设置起始点和学习率
start = (4, 4)
learning_rate = 0.1

# 执行梯度下降
path = gradient_descent(f, start, learning_rate)

# 绘制结果
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
X = np.linspace(-5, 5, 100)
Y = np.linspace(-5, 5, 100)
X, Y = np.meshgrid(X, Y)
Z = f(X, Y)
ax.plot_surface(X, Y, Z, cmap='viridis', alpha=0.6)

# 绘制梯度下降路径
x_path, y_path = zip(*path)
z_path = f(np.array(x_path), np.array(y_path))
ax.plot(x_path, y_path, z_path, color='r', marker='o')

ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
plt.show()
