import math


# 原函数
def my_func(ipt):
    return (ipt ** 4) + 2 * (ipt ** 3) - 3 * (ipt ** 2) - 2 * ipt


# 梯度函数
def grad_func(ipt):
    return 4 * (ipt ** 3) + 6 * (ipt ** 2) - 6 * ipt - 2


# 移步函数
def func_dealer(ipt_x, gradient, learning_rate):
    print(f'current ipt: {ipt_x}')
    print(f'current gradient: {gradient}')
    print(f'current learning_rate: {learning_rate}\n\n')
    ipt_x -= learning_rate * gradient
    y = my_func(ipt_x)
    return ipt_x, y


# 输入起始位置、步长、迭代次数与找到局部最优解之后的额外迭代次数
x = float(input('start point: \n'))
eta = float(input('learning ate:\n'))
iteration_number = int(input('number of iteration:\n'))
extra_move = int(input('extra move after finding the local minimum:\n'))

start_eta = eta
# 设定局部最优
local_x = x
local_y = my_func(x)

# 初始化记录列表
x_record = []
y_record = []

i = 0
while i < iteration_number:
    if i > iteration_number*2:
        break

    # 记录数据
    x_record.append(x)
    y_record.append(my_func(x))

    # 求当前位置的梯度
    grad = grad_func(x)

    # 处理梯度为0时的状况
    if math.fabs(grad) < 0.005:
        local_x = x
        local_y = my_func(x)
        iteration_number += extra_move
        if grad > 0:
            grad += 1
            x -= 1
        else:
            grad -= 1
            x += 1
        eta = start_eta


    # 计算下一步的位置
    x, current_y = func_dealer(x, grad, eta)

    if current_y < local_y:
        local_x = x
        local_y = current_y
        # 若新位置的y小于当前最优，则更新局部最优,减小步长
        eta *= 0.95
    else:
        # 若新位置的y不小于当前最优，则增加步长
        eta *= 1.05

    i += 1

print(f'the minimum found is {local_y}')

import matplotlib.pyplot as plt

plt.plot(x_record, y_record)
plt.show()


# x=1.6起始, 学习率0.07, 20次迭代， 局部最小值奖励4步，大概找到全局最小
