import matplotlib.pyplot as plt
import numpy as np
def f(x):
    return np.exp(x[0]+3*x[1]-0.1)+np.exp(x[0]-3*x[1]-0.1)+np.exp(-x[0]-0.1)

def gradient(x):
    return np.array([np.exp(x[0] + 3 * x[1] - 0.1) + np.exp(x[0] - 3 * x[1] - 0.1) - np.exp(-x[0] - 0.1),
                     3*np.exp(x[0] + 3 * x[1] - 0.1) - 3*np.exp(x[0] - 3 * x[1] - 0.1)],dtype=np.float32)

def hessian(x):
    return np.array([[np.exp(x[0] + 3 * x[1] - 0.1)+ np.exp(x[0] - 3 * x[1] - 0.1) + np.exp(-x[0] - 0.1),
                      3 * np.exp(x[0] + 3 * x[1] - 0.1) - 3 * np.exp(x[0] - 3 * x[1] - 0.1)],
                     [3 * np.exp(x[0] + 3 * x[1] - 0.1) - 3 * np.exp(x[0] - 3 * x[1] - 0.1),
                      9 * np.exp(x[0] + 3 * x[1] - 0.1) + 9 * np.exp(x[0] - 3 * x[1] - 0.1)]],dtype=np.float32)

def direction(x):
    return - np.linalg.inv(hessian(x)) @ gradient(x)

epsilon = 1e-7
x0 = [1,1]
alpha = 0.4
beta = 0.5
plt.figure(figsize=(9,4))
plt.subplot(121)
plt.xlabel("x")
plt.ylabel("y")
pointls = []
loss = []
while True:
    loss.append(f(x0))
    pointls.append(x0)
    d = direction(x0)
    lamda = gradient(x0) @ d
    if -0.5 * lamda < epsilon:
        break
    t = 1
    x1 = x0 + t * d
    while f(x1) > f(x0) + alpha * t * lamda:
        t = beta * t
        x1 = x0 + t * d
    plt.plot([x0[0], x1[0]], [x0[1], x1[1]])
    x0 = x1
    print(f"f(x) = {f(x0)}")

print(f"x={x0}")
plt.subplot(122)
plt.plot(range(0,len(loss)),loss)
plt.xlabel("k")
plt.ylabel("f(x)")
plt.show()