def objective_function(x):
    return (x - 1) ** 2


def penalty_term(x, u):
    return u * max(0, 2 - x)


def penalized_objective(x, u):
    return objective_function(x) + penalty_term(x, u)


def update_x(x, u, learning_rate=0.1):
    # 当 x < 2 时，考虑惩罚项对梯度的影响
    if x < 2:
        grad = 2 * (x - 1) + u  # 梯度包括来自目标函数和惩罚项的部分
    else:
        grad = 2 * (x - 1)  # 当 x >= 2 时，惩罚项为0，梯度仅来自目标函数
    return x - learning_rate * grad


# 初始化参数
x = 4
u = 1
c = 10
epsilon = 1e-5
max_iterations = 10  # 添加最大迭代次数以防止无限循环

# 迭代过程
for iteration in range(max_iterations):
    # 计算当前惩罚后的目标函数值
    f_penalized = penalized_objective(x, u)
    print(f"Iteration {iteration + 1}: x = {x}, u = {u}, penalized objective = {f_penalized}")

    # 更新x
    x_new = update_x(x, u)

    # 检查收敛性
    if abs(x_new - x) < epsilon:
        break

    x = x_new

    # 如果x仍然小于2且接近但未达到收敛条件，则增加惩罚因子
    if x < 2 and iteration > 10:  # 添加一个额外的条件来避免在开始时频繁增加u
        u *= c

    # 输出最终结果
print(f"Final solution: x = {x}, with penalty factor u = {u}")