import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
import sympy
import math

# 定义预定值
x0 = np.array([29,44,57])  # 初始值，改为三维向量
E = 10**(-4)  # 到达精度就停止

# 定义函数及导数
def f(x):
    # 确保 x 是一个数组
    x = np.asarray(x)
    # 计算函数值
    return -(x[0] - 30) ** 2 - (x[1] - 40) ** 2 - (x[2] - 50) ** 2 + 2500

f_d = lambda x: np.array([2 * (x[0] - 30), 2 * (x[1] - 40), 2 * (x[2] - 50)])  # 导数

# 定义BP神经网络模型
model = tf.keras.Sequential([
    tf.keras.layers.Dense(12, activation='relu', input_shape=(3,)),
    tf.keras.layers.Dense(1, activation='linear')
])

# 编译模型
model.compile(optimizer='adam', loss='mean_squared_error')

# 生成训练数据
x1_values = np.array([15, 20, 25, 30, 35, 40, 45])
x2_values = np.array([25, 30, 35, 40, 45, 50, 55])
x3_values = np.array([35, 40, 45, 50, 55, 60, 65])
x1_mesh, x2_mesh, x3_mesh = np.meshgrid(x1_values, x2_values, x3_values)
f_values = -(x1_mesh - 30) ** 2 - (x2_mesh - 40) ** 2 - (x3_mesh - 50) ** 2 + 2500

# 归一化数据
scaler = MinMaxScaler()
f_values_scaled = scaler.fit_transform(f_values.reshape(-1, 1)).flatten()

# 训练模型
model.fit(np.column_stack((x1_mesh.ravel(), x2_mesh.ravel(), x3_mesh.ravel())), f_values_scaled, epochs=1000, batch_size=32, verbose=0)

# 使用模型进行预测
x_test = np.column_stack((x1_mesh.ravel(), x2_mesh.ravel(), x3_mesh.ravel()))
f_values_pred = model.predict(x_test)

# 反归一化预测值
f_values_pred = scaler.inverse_transform(f_values_pred.reshape(-1, 1)).flatten()

# 找到最大值及其对应的索引
max_index = np.argmax(f_values_pred)

# 计算最大值对应的x1, x2, x3取值
max_x1 = x1_mesh.ravel()[max_index]
max_x2 = x2_mesh.ravel()[max_index]
max_x3 = x3_mesh.ravel()[max_index]

# 打印最大值和对应的x1, x2, x3取值
max_f_value = f_values_pred[max_index]
print("Max F(X) value:", max_f_value)
print("x1, x2, x3 at max F(X):", max_x1, max_x2, max_x3)

def steepest_descent(f, f_d, x0, E):
    x = x0
    alpha = 0.1
    while True:
        gradient = f_d(x)
        if np.linalg.norm(gradient) < E:
            break
        # 将 x - alpha * gradient 转换为形状为 (3,) 的数组
        x_new = x - alpha * gradient[:3]
        # 计算 f(x_new) 和 f(x)
        f_new = f(x_new)
        f_old = f(x)
        # 解方程 f(x_new) - f(x) = 0，求解 alpha
        alpha_solutions = sympy.solve(f_new - f_old, alpha)
        if alpha_solutions:
            alpha = alpha_solutions[0]
        else:
            print("Warning: No solution found for alpha. Using default value.")
            alpha = 0.1  # 使用默认值
        # 更新 x
        x = x - alpha * gradient
    return x

# 定义牛顿法函数
def newton_method(f_d, f_dd, x0, E):
    x = x0
    while True:
        gradient = f_d(x)
        if np.linalg.norm(gradient) < E:
            break
        hessian = f_dd(x)
        x = x - np.linalg.inv(hessian).dot(gradient)
    return x

# 使用最速下降法和牛顿法进行优化
optimal_x = steepest_descent(f, f_d, x0, E)
print("Optimal x using Steepest Descent:", optimal_x)


# 使用优化后的 x 计算最大值
max_f_value = f(optimal_x)
print("Max F(X) value using Steepest Descent:", max_f_value)
optimal_x = newton_method(f_d, lambda x: np.array([[2, 0, 0], [0, 2, 0], [0, 0, 2]]), x0, E)
print("Optimal x using Newton's Method:", optimal_x)
max_f_value = f(optimal_x)

print("Max F(X) value using Steepest Descent:", max_f_value)
