import numpy as np
from numpy import matlib as mb
from numpy.linalg import norm, inv
from abc import ABC, abstractmethod
from scipy.optimize import brent

M = 100

class Function(ABC):
    
    def __init__(self):
        pass
    
    @abstractmethod
    def f(vec):
        return vec

class One_dimen_func(object):
    
    def __init__(self,Function, X, p):
        self.Function = Function
        self.X = X
        self.p = p

    def f(self,a):
        if a<0:
            return 1e6
        else:
            return self.Function.f(self.X+a*self.p)        
    

def grad(Function, position):
    epsilon = 5e-5
    if not position.shape[1] == 1:
        print("Error! Input is incorrect position!")
        return
    ans = np.zeros(position.shape)
    for i in range(position.shape[0]):
        x1 = np.array(position)
        x2 = np.array(position)
        x1[i,0] += epsilon
        x2[i,0] -= epsilon
        ans[i,0] = (Function.f(x1)-Function.f(x2))/(2*epsilon)

    return ans

def Hessian(Function, position):
    epsilon = 5e-5
    if not position.shape[1] == 1:
        print("Error! Input is incorrect position!")
        return
    ans = np.zeros((position.shape[0],position.shape[0]))
    for i in range(position.shape[0]):
        for j in range(position.shape[0]):
            x1 = np.array(position)
            x2 = np.array(position)
            x3 = np.array(position)
            x4 = np.array(position)
            x1[i,0] += epsilon
            x1[j,0] += epsilon
            x2[i,0] += epsilon
            x2[j,0] -= epsilon
            x3[i,0] -= epsilon
            x3[j,0] += epsilon
            x4[i,0] -= epsilon
            x4[j,0] -= epsilon
            ans[i,j] = ((Function.f(x1)-Function.f(x3))/(2*epsilon)-(Function.f(x2)-Function.f(x4))/(2*epsilon))/(2*epsilon)
    return ans


def deriv(Function, position, target, alpha):
    epsilon = 1e-6
    return (Function.f(position + (alpha+epsilon)*target)-Function.f(position + (alpha-epsilon)*target))/(2*epsilon)

def Front_Back_Method(Function, position, target, _h = 1.0, _t = 2):
    a = 0.0
    h = _h
    t = _t
    X = np.array(position)
    for i in range(M):
        Y = X+a*target
        if Function.f(X) < Function.f(Y):
            return a
        a = a+h
        h *= t
        X = np.array(Y)
    return -1

def Linear_Search_0618(Function, position, target):
    begin = 0
    end = Front_Back_Method(Function, position, target, 1e-3)
    X = np.array(position)
    p = np.array(target)
    print(begin,end)
    while(abs(deriv(Function, X, p, begin))>1e-6):
        lbd = begin + 0.382*(end-begin)
        mu = begin + 0.618*(end-begin)
        if(mu-lbd>1e-6):
            print(begin,lbd,mu,end)
        if Function.f(X+lbd*p) > Function.f(X+mu*p):
            begin = lbd
        else:
            end = mu
    return X+begin*p

def Linear_Search_Newton(Function, position, target):
    x = 0
    epsilon = 1e-3
    X = np.array(position)
    p = np.array(target)
    diff1 = deriv(Function, X, p, x)
    diff2 = (deriv(Function, X, p, x+epsilon)-deriv(Function, X, p, x-epsilon))/(2*epsilon)
    while(abs(diff1)>1e-6):
        x = x - diff1/diff2
        diff1 = deriv(Function, X, p, x)
        diff2 = (deriv(Function, X, p, x+epsilon)-deriv(Function, X, p, x-epsilon))/(2*epsilon)
    return X+x*p

def Steepest_Descent(Function, position, epsilon =1e-3):
    X = np.array(position)
    p = -grad(Function, X)
    iter = 0
    while(norm(p)>epsilon):
        phi = One_dimen_func(Function, X, p)
        X = X + brent(phi.f)*p
        p = -grad(Function, X)
        iter += 1
    print("Iteration times:", iter, "(Steepest Descent Method)")
    return X

def Barzilai_Borwein(Function, position, epsilon =1e-3):
    s = np.array(position)
    y = -grad(Function, s)
    iter = 1
    phi = One_dimen_func(Function, s, y)
    X = s + brent(phi.f)*y
    p = -grad(Function,X)
    while(norm(p)>epsilon):
        t = np.array(X)
        X = X + norm(np.matmul(np.transpose(X-s),y-p))/norm(y-p)**2 * p
        s = np.array(t)
        y = np.array(p)
        p = -grad(Function, X)
        iter += 1
    print("Iteration times:", iter, "(Barzilai_Borwein Method)")
    return X

def Newton_Method(Function, position):
    X = np.array(position)
    p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
    iter = 0
    while(norm(p)>1e-3):
        phi = One_dimen_func(Function, X, p)
        X = X + brent(phi.f)*p
        p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
        iter += 1
    print("Iteration times:", iter, "(Newton Method)")
    return X

def pure_Newton_Method(Function, position):
    X = np.array(position)
    p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
    iter = 0
    while(norm(p)>1e-3):
        X = X + p
        p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
        iter += 1
    print("Iteration times:", iter, "(Pure Newton Method)")
    return X

    




        
    
