import numpy as np
from numpy import matlib as mb
from numpy.linalg import norm, inv
from abc import ABC, abstractmethod
from scipy.optimize import brent

M = 100

class Function(ABC):
    
    def __init__(self):
        pass
    
    @abstractmethod
    def f(vec):
        return vec

class One_dimen_func(object):
    
    def __init__(self,Function, X, p):
        self.Function = Function
        self.X = X
        self.p = p

    def f(self,a):
        if a<0:
            return 1e6
        else:
            return self.Function.f(self.X+a*self.p)        
    

def grad(Function, position):
    epsilon = 5e-5
    if not position.shape[1] == 1:
        print("Error! Input is incorrect position!")
        return
    ans = np.zeros(position.shape)
    for i in range(position.shape[0]):
        x1 = np.array(position)
        x2 = np.array(position)
        x1[i,0] += epsilon
        x2[i,0] -= epsilon
        ans[i,0] = (Function.f(x1)-Function.f(x2))/(2*epsilon)

    return ans

def Hessian(Function, position):
    epsilon = 5e-5
    if not position.shape[1] == 1:
        print("Error! Input is incorrect position!")
        return
    ans = np.zeros((position.shape[0],position.shape[0]))
    for i in range(position.shape[0]):
        for j in range(position.shape[0]):
            x1 = np.array(position)
            x2 = np.array(position)
            x3 = np.array(position)
            x4 = np.array(position)
            x1[i,0] += epsilon
            x1[j,0] += epsilon
            x2[i,0] += epsilon
            x2[j,0] -= epsilon
            x3[i,0] -= epsilon
            x3[j,0] += epsilon
            x4[i,0] -= epsilon
            x4[j,0] -= epsilon
            ans[i,j] = ((Function.f(x1)-Function.f(x3))/(2*epsilon)-(Function.f(x2)-Function.f(x4))/(2*epsilon))/(2*epsilon)
    return ans


def deriv(Function, position, target, alpha):
    epsilon = 1e-6
    return (Function.f(position + (alpha+epsilon)*target)-Function.f(position + (alpha-epsilon)*target))/(2*epsilon)

def Front_Back_Method(Function, position, target, _h = 1.0, _t = 2):
    a = 0.0
    h = _h
    t = _t
    X = np.array(position)
    for i in range(M):
        Y = X+a*target
        if Function.f(X) < Function.f(Y):
            return a
        a = a+h
        h *= t
        X = np.array(Y)
    return -1

def Linear_Search_0618(Function, position, target):
    begin = 0
    end = Front_Back_Method(Function, position, target, 1e-3)
    X = np.array(position)
    p = np.array(target)
    print(begin,end)
    while(abs(deriv(Function, X, p, begin))>1e-6):
        lbd = begin + 0.382*(end-begin)
        mu = begin + 0.618*(end-begin)
        if(mu-lbd>1e-6):
            print(begin,lbd,mu,end)
        if Function.f(X+lbd*p) > Function.f(X+mu*p):
            begin = lbd
        else:
            end = mu
    return X+begin*p

def Linear_Search_Newton(Function, position, target):
    x = 0
    epsilon = 1e-3
    X = np.array(position)
    p = np.array(target)
    diff1 = deriv(Function, X, p, x)
    diff2 = (deriv(Function, X, p, x+epsilon)-deriv(Function, X, p, x-epsilon))/(2*epsilon)
    while(abs(diff1)>1e-6):
        x = x - diff1/diff2
        diff1 = deriv(Function, X, p, x)
        diff2 = (deriv(Function, X, p, x+epsilon)-deriv(Function, X, p, x-epsilon))/(2*epsilon)
    return X+x*p

def Steepest_Descent(Function, position, epsilon =1e-3):
    X = np.array(position)
    p = -grad(Function, X)
    iter = 0
    while(norm(p)>epsilon):
        phi = One_dimen_func(Function, X, p)
        X = X + brent(phi.f)*p
        p = -grad(Function, X)
        iter += 1
    print("Iteration times:", iter, "(Steepest Descent Method)")
    return X

def Barzilai_Borwein(Function, position, epsilon =1e-3):
    s = np.array(position)
    y = -grad(Function, s)
    iter = 1
    phi = One_dimen_func(Function, s, y)
    X = s + brent(phi.f)*y
    p = -grad(Function,X)
    while(norm(p)>epsilon):
        t = np.array(X)
        X = X + norm(np.matmul(np.transpose(X-s),y-p))/norm(y-p)**2 * p
        s = np.array(t)
        y = np.array(p)
        p = -grad(Function, X)
        iter += 1
    print("Iteration times:", iter, "(Barzilai_Borwein Method)")
    return X

def Newton_Method(Function, position):
    X = np.array(position)
    p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
    iter = 0
    while(norm(p)>1e-3):
        phi = One_dimen_func(Function, X, p)
        X = X + brent(phi.f)*p
        p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
        iter += 1
    print("Iteration times:", iter, "(Newton Method)")
    return X

def pure_Newton_Method(Function, position):
    X = np.array(position)
    p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
    iter = 0
    while(norm(p)>1e-3):
        X = X + p
        p = -np.matmul(inv(Hessian(Function,X)),grad(Function, X))
        iter += 1
    print("Iteration times:", iter, "(Pure Newton Method)")
    return X

def CG_alpha(A, r, p):
    return np.matmul(np.transpose(r),p)[0,0]/np.matmul(np.transpose(p),np.matmul(A,p))[0,0]

def pre_CG_alpha(A, r, p,y):
    return np.matmul(np.transpose(r),y)[0,0]/np.matmul(np.transpose(p),np.matmul(A,p))[0,0]

def CG_Method(position, A, b,shutup = False, list_out = False):
    ans = []
    X = np.array(position)
    r = np.matmul(A,X)-b
    p = -r
    iter = 0
    while(norm(r)>1e-6):
        if(list_out):
            ans.append(np.array(X))
        alpha = -CG_alpha(A,r,p)
        X = X + alpha*p
        t = np.array(r)
        r = r + alpha*np.matmul(A,p)
        beta = norm(r)**2/norm(t)**2
        p = -r+beta*p
        iter += 1
    if not shutup:
        print("Iteration times:", iter, "(CG Method)")
    if list_out:
        return ans
    return X

def PRE_CG_Method(position, A, b, C, list_out = False):
    ans = []
    M = np.matmul(np.transpose(C),C)
    X = np.array(position)
    r = np.matmul(A,X)-b
    y = CG_Method(np.array(X),M,r,True)
    p = -y
    iter = 0
    while(norm(r)>1e-6):
        if(list_out):
            ans.append(np.array(X))
        alpha = pre_CG_alpha(A,r,p,y)
        X = X + alpha*p
        t = np.array(r)
        r = r + alpha*np.matmul(A,p)
        ty = np.array(y)
        y = CG_Method(np.array(X),M,r,True)
        beta = np.matmul(np.transpose(r),y)[0,0]/np.matmul(np.transpose(t),ty)[0,0]
        p = -y+beta*p
        iter += 1
    print("Iteration times:", iter, "(CG Method)")
    if list_out:
        return ans
    return X

def BFGS_next(H,y,s):
    if(norm(y)*norm(s)<1):
        y = 1e3*y
        s = 1e3*s
    return np.matmul(np.eye(H.shape[0])-np.matmul(s,np.transpose(y))/np.matmul(np.transpose(s),y)[0,0],np.matmul(H,np.eye(H.shape[0])-np.matmul(y,np.transpose(s))/np.matmul(np.transpose(s),y)[0,0]))+np.matmul(s,np.transpose(s))/np.matmul(np.transpose(s),y)[0,0]
def BFGS_Method(Function, position, epsilon = 1e-5):
    X = np.array(position)
    H = inv(Hessian(Function,X))
    iter = 0
    p = grad(Function, X)
    while(norm(p)>epsilon):
        t = np.array(X)
        q = np.array(p)
        p = -np.matmul(H,p)
        phi = One_dimen_func(Function, X, p)
        X = X + brent(phi.f)*p
        s = X-t
        p = grad(Function, X)
        y = p-q
        H = BFGS_next(H,y,s)
        iter += 1
    print("Iteration times:", iter, "(BFGS Method)")
    return X
        
    

    




        
    
