import os
import numpy as np
from scipy.optimize import linesearch, line_search
from matplotlib import pyplot as plt
m = 4


#使用强wolf准则优化步长
rho=0.1
sigma=0.9
gamma=0.1
def Alpha(x,alpha,d):
    cnt = 0
    while True:
        if fun(x+alpha*d) <= fun(x)+rho*alpha*np.dot(nablaf(x).T,d) and np.linalg.norm(np.dot(nablaf(x+alpha*d).T,d),ord=None,axis=None) <= -sigma*np.dot(nablaf(x).T,d):
             return alpha
        cnt = cnt + 1
        if cnt > 20:
            return gamma*alpha
        alpha=gamma*alpha

def fun(x):
    loss = 0
    for i in range(m):
        if i%2 == 0:
            r = 10*(x[i+1]-x[i] ** 2 )
            loss = loss + r ** 2
        else:
            r = 1 - x[i-1]
            loss = loss + r ** 2
    return loss

def nablaf(x):
    nabla = np.zeros(m)
    for i in range(m):
        if i%2 == 0:
            nabla[i] = 400*x[i] ** 3 - 400*x[i]*x[i+1] + 2*x[i] - 2
        else:
            nabla[i] = 200*x[i] - 200*x[i-1] ** 2
    return nabla

def plot_objs_figure(ls_xs, ls_objs, const_xs=None,
                     const_objs=None, armijo_xs=None,
                     armijo_objs=None):
    print(ls_objs)
    plt.xlabel('Number of steps')
    plt.ylabel('Objective Function')
    #plt.yscale("log")
    plt.plot(range(len(ls_objs)), ls_objs,'r', label='SR1')
    plt.plot(range(len(const_objs)), const_objs,'b', label='DFP')
    plt.plot(range(len(armijo_objs)), armijo_objs,'g', label='BFGS')
    plt.legend(loc='upper right')
    plt.show()
def SR1(H_k, s, y):
    H_k = H_k + np.outer((s - H_k @ y), (s - H_k @ y)) / ((s - H_k @ y).T @ y)
    return H_k

def DFP(H_k, s_k, y_k):
    Hs = np.dot(H_k, s_k)
    return H_k + np.outer(s_k, s_k) / np.dot(s_k, y_k) \
        - np.outer(Hs, Hs) / np.dot(s_k, Hs)
        
def BFGS(H_k, s_k, y_k):
    Hs = np.dot(H_k, s_k)
    return H_k - np.outer(Hs, Hs) / np.dot(s_k, Hs) \
        + np.outer(y_k, y_k) / np.dot(s_k, y_k)
        
def wolfe_search(initial_x, H_fun):
    """Gradient descent algorithm."""
    # Define parameters to store x and objective func. values
    xs = [initial_x]
    objectives = []
    x = initial_x
    H_k = np.eye(4)
    for n_iter in range(100):
        xs.append(x)
        g_k = nablaf(x)
        d_k = - np.dot(H_k, g_k)
        if fun(x) < 1e-8:
            break
        alpha_k = Alpha(x, 50., d_k)
        # alpha_k = line_search(fun, nablaf, x, d_k)[0]
        if alpha_k == None:
            break
        elif isinstance(alpha_k, float):
            alpha_k = alpha_k
        else:
            alpha_k = alpha_k.squeeze()
        x_next = x + alpha_k * d_k
        s = x_next - x
        y = nablaf(x_next) - nablaf(x)
        H_k = H_fun(H_k, s, y)
        x = x_next
        
        objectives.append(fun(x))
        xs.append(x)
        print("Gradient Descent({bi}/{ti}): objective={l}".format(
              bi=n_iter+1, ti=100 - 1, l=fun(x)))

    return objectives, xs

x_0 = np.array([1.2,1,1,1])  
objs_SR1, xs_SR1 = wolfe_search(x_0, SR1)
objs_DFP, xs_DFP = wolfe_search(x_0, DFP)
objs_BFGS, xs_BFGS = wolfe_search(x_0, BFGS)
# plot_xs_figure(np.array(ls_xs))
plot_objs_figure(ls_xs=np.array(xs_SR1), ls_objs=np.array(objs_SR1), const_objs=np.array(objs_DFP), armijo_objs=np.array(objs_BFGS))