import numpy as np
import tensorflow as tf
from sklearn.preprocessing import MinMaxScaler
import sympy
import math

# 到达精度就停止
E = 10 ** (-4)  
# 这里将wij、wjp、b1按照正确的numpy数组形式初始化（之前代码中存在类型混用问题）
wij = np.array([[-1.6310, 1.3209, -0.0335, 1.7913, 1.0476, -0.5819, -1.7725, 2.0742, -1.0378, 2.4707, 1.8822, -0.1239, -1.3504, -0.9531, 1.2606],
                [1.1011, -1.4885, 1.1340, -1.9753, 1.2616, -0.6451, -0.2343, 0.7114, -0.8157, 0.6839, -0.4079, -0.4302, -0.6905, -1.7629, -1.6664],
                [0.5969, -1.6763, -2.2739, 0.0904, -0.2882, 1.9866, 1.9658, -0.3371, 1.8880, -1.0109, -1.5593, 0.2370, -1.9294, -1.6484, 1.7724],
                [1.8985, -0.6473, 1.1957, -1.2257, 2.1977, 1.6458, 0.6343, -1.5799, 1.4529, 0.1995, -1.1304, -2.6744, 1.0509, 0.9978, 0.0967]])
wjp = np.array([0.2661, -0.3078, 0.4609, 0.8309, -0.2396, 0.4325, 0.0535, 0.3316, -0.0641, -0.4618, 0.0115, -0.7142, 0.3105, -0.4724, -0.0900])
b1 = np.array([[2.7248], [2.4582], [1.8240], [1.7894], [-1.2047], [0.9728], [0.2339], [0.2339], [-0.3614], [0.8517], [0.9173], [-2.0029], [-2.0102], [-2.2651], [2.7889]])
b2 = -1.0444

# 定义BP神经网络模型
model = tf.keras.Sequential([
    tf.keras.layers.Dense(12, activation='relu', input_shape=(3,)),
    tf.keras.layers.Dense(1, activation='linear')
])


def steepest_descent(f, f_d, x0, E, learning_rate):
    x = x0
    while True:
        gradient = f_d(x)
        if np.linalg.norm(gradient) < E:
            break
        x -= learning_rate * gradient
    return x
wij = np.array(wij).reshape(4, 15)

# 假设的激活函数F（这里简单示例为类似Sigmoid函数，你需按实际替换）
def F(z):
    return 1 / (1 + np.exp(-z))

# 输出函数Y的计算
def output_function(x, wij, wjp, b1, b2):
    linear_result = np.dot(x, wij) + b1
    activated_result = F(linear_result)
    return np.dot(activated_result, wjp) + b2


# 计算输出函数Y关于x的梯度（这里简单示意，实际可能更复杂，假设wij、wjp、b1、b2为常数）
def gradient_y_wrt_x(x, wij, wjp, b1, b2):
    # 先计算中间结果
    linear_result = np.dot(x, wij) + b1
    activated_result = F(linear_result)
    dF_dz = activated_result * (1 - activated_result)  # 类似Sigmoid函数的导数

    # 链式法则计算梯度
    gradient = np.dot(dF_dz * wij.T, wjp)
    return gradient


# 初始化x值随机生成100个作为初始群（这里修改维度为(100, 4)，与wij的乘法运算适配）
x_init_group = np.random.rand(100, 4)
# 设置学习率（可调整优化）
learning_rate = 0.01

# 对初始群里的每个x使用最速下降法求最优解
optimal_x_group = []
for x in x_init_group:
    optimal_x = steepest_descent(lambda x: output_function(x, wij, wjp, b1, b2),
                                 lambda x: gradient_y_wrt_x(x, wij, wjp, b1, b2),
                                 x, E, learning_rate)
    optimal_x_group.append(optimal_x)

optimal_x_group = np.array(optimal_x_group)
# 计算对应的y解
optimal_y_group = [output_function(x, wij, wjp, b1, b2) for x in optimal_x_group]

print("Optimal x solutions using Steepest Descent:", optimal_x_group)
print("Optimal y solutions using Steepest Descent:", optimal_y_group)