import matplotlib.pyplot as plt
import numpy as np
def f(x):
    return np.exp(x[0]+3*x[1]-0.1)+np.exp(x[0]-3*x[1]-0.1)+np.exp(-x[0]-0.1)
def gradient(x):
    return np.array([np.exp(x[0] + 3 * x[1] - 0.1) + np.exp(x[0] - 3 * x[1] - 0.1) - np.exp(-x[0] - 0.1),
                     3*np.exp(x[0] + 3 * x[1] - 0.1) - 3*np.exp(x[0] - 3 * x[1] - 0.1)],dtype=np.float32)

epsilon = 1e-7
x0 = [1,1]
alpha = 0.2
beta = 0.5
plt.figure(figsize=(9,4))
plt.subplot(121)
plt.xlabel("x")
plt.ylabel("y")
pointls = []
loss = []
while np.linalg.norm(gradient(x0)) > epsilon:
    loss.append(f(x0))
    pointls.append(x0)
    d = -1 * gradient(x0)
    t = 1
    x1 = x0 + t * d
    while f(x1) > f(x0) + alpha * t * np.dot(d,-d):
        t = beta * t
        x1 = x0 + t * d
    plt.plot([x0[0], x1[0]], [x0[1], x1[1]])
    x0 = x1
    print(f"f(x) = {f(x0)}")
print(len(pointls),pointls[-1])
plt.subplot(122)
plt.plot(range(0,len(loss)),loss)
plt.xlabel("k")
plt.ylabel("f(x)")
plt.show()