import numpy as np


def f(x):
    """目标函数"""
    return x[0] ** 2 + 25 * x[1] ** 2
def grad_f(x):
    """目标函数的梯度"""
    return np.array([2 * x[0], 50 * x[1]])

def steepest_descent(x0, alpha, epsilon, max_iter=1000):
    """最速下降法实现"""
    x = np.array(x0)
    for i in range(max_iter):
        grad = grad_f(x)
        if np.linalg.norm(grad) < epsilon:
            break
        x = x - alpha * grad
    return x

# 初始值
x0 = np.array([2, 2])
# 步长（学习率），这个值可能需要调整以获得最佳性能
alpha = 0.01  # 注意：这个值可能需要根据你的具体问题进行调整
# 终止误差
epsilon = 0.001

# 执行最速下降法
x_min = steepest_descent(x0, alpha, epsilon)
print("Minimum found at:", x_min)
print("Function value at minimum:", f(x_min))