import matplotlib
matplotlib.use('TkAgg')  # 设置后端为 TkAgg
import matplotlib.pyplot as plt
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
# 定义Rosenbrock函数
def f2(x, y):
    return (1 - x)**2 + 100 * (y - x**2)**2

# 定义梯度函数
def grad2(x, y):
    dx = -2 * (1 - x) - 400 * x * (y - x**2)
    dy = 200 * (y - x**2)
    return dx, dy

# 绘制函数图像
x = np.linspace(-5, 5, 500)
y = np.linspace(-5, 5, 500)
X, Y = np.meshgrid(x, y)
Z = f2(X, Y)

fig = plt.figure()
plt.cla()
ax = fig.add_axes(Axes3D(fig)) # ax是坐标轴实例
ax.plot_surface(X,Y,Z,
                rstride=1,cstride=1, # 跨度，修改看看效果即可
                cmap=plt.get_cmap('rainbow'))

# 梯度下降法求极值
x_initial = -1.5  # 初始 x 值
y_initial = 2.0   # 初始 y 值
learning_rate = 0.002  # 学习率（较小的学习率，因为Rosenbrock函数的梯度较大）
tolerance = 1e-7  # 收敛条件
max_iterations = 10000  # 最大迭代次数

x_history = [x_initial]
y_history = [y_initial]
f_history = [f2(x_initial, y_initial)]

for i in range(max_iterations):
    df_dx, df_dy = grad2(x_initial, y_initial)
    gradient_magnitude = np.sqrt(df_dx**2 + df_dy**2)
    if gradient_magnitude < tolerance:
        break
    x_initial = x_initial - learning_rate * df_dx
    y_initial = y_initial - learning_rate * df_dy
    x_history.append(x_initial)
    y_history.append(y_initial)
    f_history.append(f2(x_initial, y_initial))

    if i % 500 == 0:
        print(f"Iteration {i}: x = {x_initial:.3f}, y = {y_initial:.3f}, f(x, y) = {f2(x_initial, y_initial):.6f}, grad = [{df_dx},{df_dy}]")

print(f"Total iterations: {i+1}")
print(f"Final x = {x_initial:.6f}, y = {y_initial:.6f}, f(x, y) = {f2(x_initial, y_initial):.6f}")

# 绘制梯度下降路径

ax.scatter(x_history, y_history, f_history, color='black',s=11, label='Gradient Descent Path')
ax.set_title("3D Surface Plot with Gradient Descent Path")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.legend(loc="upper right")
plt.show()