import numpy as np
import matplotlib.pyplot as plt
import random
import math

from Goldstein import *
from GriewankFunction import *
from Wolfe import *
from Backtracking import *

def steepestDescent(x0, criterionFunction,Func,dFunc,maxSteps):
    """
    最速下降法实现代码
    param:{
        x0：初值
        criterionFunction：准则函数
        Func：求解函数
        dFunc：求解函数导数
        maxSteps：最大搜索次数 
    }
    return:X收敛序列
    """
    # print('初始点为:')
    # print(x0,'\n')  
    imax = maxSteps
    alpha = 1
    preAlpha = 0 #前一步步长，回溯法专用
    W=np.zeros((x0.shape[0],imax))
    W[:,0] = x0
    i = 1  
    x = x0
    grad = dFunc(x)
    delta = sum(grad**2) # 初始误差
 
 
    while i<imax and delta>10**(-5):
        p = -dFunc(x)
        x0=x
        if criterionFunction == Goldstein:
            alpha = criterionFunction(Func,dFunc,1,p,x,1.5,0.1)
        elif criterionFunction == Wolfe:
            alpha = criterionFunction(Func,dFunc,1,p,x,1.5,0.001,0.1)
        elif criterionFunction == Backtracking:
            temp = criterionFunction(Func,dFunc,preAlpha-alpha,p,x,0.9,0.001)
            preAlpha = alpha
            alpha = temp
        x = x + alpha*p
        W[:,i] = x
        grad = dFunc(x)
        delta = sum(grad**2)
        
        i=i+1
    
    # print("迭代次数为:",i)
    # print("近似最优解为:")
    # print(x,'\n')
    # print("最优近似解梯度值为：")
    # print(dFunc(x))
    W=W[:,0:i] # 记录迭代点
    return W

if __name__ == "__main__":
    # X1=np.arange(-10,10+0.05,0.05)
    # X2=np.arange(-10,10+0.05,0.05)
    # [x1,x2]=np.meshgrid(X1,X2)
    # f = (x1**2+x2**2)/4000 - np.cos(x1/np.sqrt(1))*np.cos(x2/np.sqrt(2)) + 1
    # plt.contour(x1,x2,f,20) # 画出函数的20条轮廓线
    failure = 0
    # x0 = np.random.uniform(-10,10,10)
    for i in range(1,100):
        x0 = np.random.uniform(-10,10,20)
        # W=steepestDescent(x0,Goldstein,Griewank,dGriewank,2000)
        # W = steepestDescent(x0,Wolfe,Griewank,dGriewank,2000)
        W = steepestDescent(x0,Backtracking,Griewank,dGriewank,5000)
        if(W.shape[1]==5000):
            failure = failure + 1
            
    # W=steepestDescent(x0,Goldstein,Griewank,dGriewank,2000)
    # W = steepestDescent(x0,Wolfe,Griewank,dGriewank,2000)
    # W = steepestDescent(x0,Backtracking,Griewank,dGriewank,2000)
    print("收敛率:")
    print((100-failure)/100)
    # plt.plot(W[0,:],W[1,:],'g*',W[0,:],W[1,:]) # 画出迭代点收敛的轨迹
    # plt.show()
    
    