import numpy as np


# 重新准确确认wij原始未归一化时的最小值和最大值（此处示例值需替换为真实准确值）
original_wij_min = -2.0
original_wij_max = 2.0

# 重新准确确认x原始未归一化时的最小值和最大值（同样需替换为真实准确值）
original_x_min = -2.0
original_x_max = 130.0

# 定义精度阈值，到达精度就停止优化
E = 10 ** (-4)

# 定义归一化后的wij、wjp、b1、b2的值，维度符合要求
wij = np.array([[-1.6310, 1.3209, -0.0335, 1.7913, 1.0476, -0.5819, -1.7725, 2.0742, -1.0378, 2.4707, 1.8822, -0.1239, -1.3504, -0.9531, 1.2606],
                [1.1011, -1.4885, 1.1340, -1.9753, 1.2616, -0.6451, -0.2343, 0.7114, -0.8157, 0.6839, -0.4079, -0.4302, -0.6905, -1.7629, -1.6664],
                [0.5969, -1.6763, -2.2739, 0.0904, -0.2882, 1.9866, 1.9658, -0.3371, 1.8880, -1.0109, -1.5593, 0.2370, -1.9294, -1.6484, 1.7724],
                [1.8985, -0.6473, 1.1957, -1.2257, 2.1977, 1.6458, 0.6343, -1.5799, 1.4529, 0.1995, -1.1304, -2.6744, 1.0509, 0.9978, 0.0967]])
wjp = np.array([[0.2661], [-0.3078], [0.4609], [0.8309], [-0.2396], [0.4325], [0.2339], [0.3316], [-0.0641], [-0.4618], [0.0115], [-0.7142], [0.3105], [-0.4724], [-0.0900]])
b1 = np.array([[2.7248], [2.4582], [1.8240], [1.7894], [-1.2047], [0.9728], [0.2339], [0.424], [-0.3614], [0.8517], [0.9173], [-2.0029], [-2.0102], [-2.2651], [2.7889]])
b2 = -1.0444
Y=([2778.56,2696.64,2653 31,3114.71,3617.25,3151.42,2732,2625.24,3786.3,3239.86,2455.36,2657.93,2804.02,2758.28,2907.33,2674.54,3446.03,3775.17,2725.88,2556.83,2451.47,2952.7,2143.1,1697.75,2929.57,2853.82,3051.21,3084.97,,2834.29,3027.52,3072.38,3038.83,3114.91,2860.76,2951.2,2994.69])

def normalize_data(data, min_val, max_val):
    """
    对输入数据进行归一化处理，将其范围映射到[0, 1]区间，同时记录原始数据范围
    """
    return (data - min_val) / (max_val - min_val) 


def denormalize_data(data, min_val, max_val):
    """
    对归一化后的数据进行反归一化，恢复到原始数据范围
    """
    return data * (max_val - min_val) + min_val


def target_function(x):
    """
    计算目标函数值
    """
    # 确保x是二维数组，以适配矩阵乘法，处理可能传入一维数组的情况
    if x.ndim == 1:
        x = x[:, np.newaxis]
    else:
        x = x.T

    # 使用准确的原始范围对x进行归一化
    x = normalize_data(x, original_x_min, original_x_max)

    hidden_output = np.dot(x, wij) + b1.T
    output = np.dot(hidden_output, wjp) + b2
    return output.flatten()


def gradient_target_function(x):
    if x.ndim == 1:
        x = x[:, np.newaxis]
    else:
        x = x.T

    # 使用准确的原始范围对x进行归一化
    x = normalize_data(x, original_x_min, original_x_max)

    # 计算隐藏层输出
    hidden_output = np.dot(x, wij) + b1.T
    # 计算输出对隐藏层输出的导数
    d_output_d_hidden = wjp
    # 计算隐藏层输出对输入的导数
    d_hidden_d_x = wij
    # 根据链式法则计算目标函数对输入的梯度，注意这里调整维度匹配
    gradient = np.dot(d_hidden_d_x, d_output_d_hidden).T
    return gradient


# 最速下降法，改进初始化和步长策略等
def steepest_descent():
    # 使用均匀分布初始化x（可根据实际情况调整范围等参数）
    x = np.random.uniform(low=0, high=1, size=(wij.shape[0], 1))
    # 初始步长，可根据实际情况调整
    step_size = 0.001
    gradient_norm = np.inf
    max_iterations = 20000  # 设置最大迭代次数，防止陷入死循环
    iteration = 0
    while gradient_norm > E and iteration < max_iterations:
        gradient = gradient_target_function(x)
        # 根据梯度大小自适应调整步长（简单示例，可进一步优化系数等参数）
        step_size = step_size / (1 + np.linalg.norm(gradient))
        new_x = x - step_size * gradient
        gradient_norm = np.linalg.norm(new_x - x)
        x = new_x
        iteration += 1
        # 打印调试信息，方便查看迭代过程
        print("当前迭代次数:", iteration, "梯度:", gradient, "当前步长:", step_size)
        print("更新前 x:", x.T, "更新后 x:", new_x.T)

    # 使用准确的原始范围对最终的x进行反归一化
    x = denormalize_data(x, original_x_min, original_x_max)
    return x


result_x = steepest_descent()
result_y = target_function(result_x)
print("x的取值:", result_x)
print("目标函数的最大值:", result_y)
import matplotlib.pyplot as plt

# 假设已经得到了 result_x 和 result_y
# 对 result_x 进行扁平化处理（如果需要统一维度来画图的话，根据实际情况调整）
result_x_flatten = result_x.flatten()

plt.plot(result_x_flatten, label='x values')
plt.plot(result_y, label='Target function values')
plt.xlabel('Index')
plt.ylabel('Values')
plt.title('Comparison of x and target function values')
plt.legend()
plt.show()