import numpy as np
import matplotlib.pyplot as plt
from scipy.optimize import approx_fprime
from concurrent.futures import ThreadPoolExecutor

def f(x):
    """
    目标函数：计算向量x中每个元素的二次函数和。
    """
    return np.sum(np.sin(x ** 2) + 4 * x + 4)

def clip_to_bounds(x, bounds):
    """
    将向量x的值限制在给定的范围内。
    """
    return np.clip(x, [b[0] for b in bounds], [b[1] for b in bounds])

def run_gradient_descent(f, initial_x, learning_rate, num_iterations, epsilon, bounds):
    """
    运行梯度下降算法。
    """
    x = np.array(initial_x, dtype=float)
    history = []
    for i in range(num_iterations):
        history.append((x.copy(), f(x)))
        grad = approx_fprime(x, lambda x: f(x), epsilon)
        x = x - learning_rate * grad
        if bounds is not None:
            x = clip_to_bounds(x, bounds)
        if np.linalg.norm(grad) < epsilon:
            break
    return x, history

def gradient_descent(f, dim, bounds=None, initial_guess=None, learning_rate=0.1, num_iterations=100, epsilon=1e-8,
                     verbose=True):
    """
    梯度下降法求解最优化问题。
    """
    if initial_guess is None:
        # 随机生成12个初始猜测值
        initial_guesses = [np.random.rand(dim) * 20 - 10 for _ in range(12)]
    else:
        # 根据初始猜测值生成5个初始点，并额外随机生成6个初始点
        initial_guesses = [initial_guess + np.random.randn(dim) * 0.5 for _ in range(5)]
        initial_guesses += [np.random.rand(dim) * 20 - 10 for _ in range(6)]

    # 并行运行梯度下降
    with ThreadPoolExecutor() as executor:
        results = list(executor.map(lambda x: run_gradient_descent(f, x, learning_rate, num_iterations, epsilon, bounds),
                                    initial_guesses))

    # 找到最优解
    best_result = min(results, key=lambda res: res[1][-1][1])

    # 绘制迭代图的内置函数
    def plot_gradient_descent(best_history):
        func_values = [rec[1] for rec in best_history]
        plt.figure(figsize=(10, 5))
        plt.plot(range(len(func_values)), func_values, marker='o', linestyle='-', color='b')
        plt.xlabel('Iteration')
        plt.ylabel('Function Value')
        plt.title('Gradient Descent Progress (Best of Parallel Runs)')
        plt.grid(True)
        plt.show()

    # 绘制最优结果的迭代图
    if verbose:
        plot_gradient_descent(best_result[1])

    return best_result

# 设置参数
dim = 2
bounds = ((-10, 10), (-10, 10))  # 求解区间
initial_guess = None  # 可以设置为None或具体的初始猜测值
learning_rate = 0.1
num_iterations = 100
epsilon = 1e-8
verbose = True

# 调用梯度下降法函数
final_x, history = gradient_descent(f, dim, bounds, initial_guess, learning_rate, num_iterations, epsilon, verbose)

# 输出最终结果
print(f"Final result: x = {final_x}, f(x) = {f(final_x)}")