package scu.maqiang.numeric;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.function.Function;

import scu.maqiang.mesh.MatrixFunc;
import scu.maqiang.mesh.ScalarFunc;
import scu.maqiang.mesh.Tecplot;
import scu.maqiang.mesh.VectorFunc;



/**
 * 非线性最优化类，其中的方法都是稳态方法，实现一些基本的最优化算法, 包括一维黄金分割法, 
 * @author 马强
 *
 */
public class NLOPT {
	
	public static InexactSearchStep[] inexactStep;
	static {
		inexactStep = new InexactSearchStep[]{NLOPT::ArmijoStep, NLOPT::WolfeStep};
	}
	public enum SearchStep{
		Armijo(0),
		Wolfe(1);
		private SearchStep(int value) {
			this.value = value;
		}
		
		public int getValue() {
			return value;
		}
		
		private int value;
	}
	
	public static ScalarFunc func_1 = (x, label, param) -> 3 * x[0] * x[0]  + 2 * x[1] * x[1]  - 4 * x[0] - 6 * x[1];
	public static VectorFunc dfunc_1 = (x, label, param) -> {
		double[] grad = new double[2];
		grad[0] = 6 * x[0] - 4;
		grad[1] = 4 * x[1] - 6;
		return grad;
	};
	public static MatrixFunc hessfunc_1 = (x, label, param) -> new double[][] {{6.0, 0.0}, {0.0, 4.0}};
	
	public static ScalarFunc HimmelblauFunc = (x, label, param) -> (x[0] * x[0] + x[1] - 11) * (x[0] * x[0] + x[1] - 11)
			                                            + (x[0] + x[1] * x[1] - 7) * (x[0] + x[1] * x[1] - 7);
	public static VectorFunc dhimme = (x, label, param) -> {
		double[] grad = new double[2];
		grad[0] = 2 * x[0] * (x[0] * x[0] + x[1] - 11) + 2 * (x[0] + x[1] * x[1] - 7);
		grad[1] = 2 * (x[0] * x[0] + x[1] - 11) + 2 * x[1] * (x[0] + x[1] * x[1] - 7);
		return grad;
	};

	public static ScalarFunc RosenbrockFunc = (x, label, param) -> 100 * (x[0] * x[0] - x[1]) * (x[0] * x[0] - x[1]) + (x[0] - 1) * (x[0] - 1);
	public static VectorFunc d_RosenbrockFunc = (x, label, param) -> {
		double[] grad = new double[2];
		grad[0] = 400 * x[0] * (x[0] * x[0] - x[1]) + 2 * (x[0] - 1);
		grad[1] = -200 * (x[0] * x[0] - x[1]);
		return grad;
	};
	public static MatrixFunc hess_RosenbrockFunc = (x, label, param) -> {
		double[][] hess = new double[2][2];
		hess[0][0] = 1200 * x[0] * x[0] - 400 * x[1] + 2.0;
		hess[0][1] = -400 * x[0];
		hess[1][0] = -400 * x[0];
		hess[1][1] = 200;
		return hess;
	};
	
	/**
	 * Armijo 准则非精确步长搜索
	 * @param func 原函数
	 * @param dfunc 函数导数
	 * @param beta armijo参数
	 * @param sigma armijo参数
	 * @param xk 上一步终止点
	 * @param dk 搜索方向向量
	 * @return 满足Armijo准则的步长
	 */
	public static double ArmijoStep(Function<double[], Double> func, Function<double[], double[]> dfunc, 
			                 double beta, double sigma, double[] xk, double[] dk) {
		int m = 0; 
		int mmax = 20;
		double alpha = 1.0;
		double[] newk = null;
		while (m <= mmax) {
			newk = MVO.add(xk, alpha, dk);
			if (func.apply(newk) < func.apply(xk) + sigma * alpha * MVO.dot_product(dfunc.apply(xk), dk)) {
				break;
			}
			alpha = alpha * beta;
		    m = m + 1;
		 }
		//System.out.println("Iteration: n = " + m + ", p = " + Arrays.toString(newk) + ", v = " + func.apply(newk));
		return alpha;
	}

	/**
	 * 基于Armijo准则的非精确步长搜索
	 * @param func 原函数
	 * @param dfunc 函数导数
	 * @param xk 上一步终止点
	 * @param dk 搜索方向向量
	 * @param param
	 * @param param2
	 * @return 满足Armijo准则的步长
	 */
	public static double ArmijoStep(ScalarFunc func, VectorFunc dfunc, double[] xk, double[] dk, double[] param, double[][] param2) {
		int m = 0;
		double beta = 0.0;
		double sigma = 0.0;
		if (param == null) {
			beta = 0.5;
			sigma = 0.4;			
		}else {
			beta = param[0];
			sigma = param[1];
		}
		int mmax = 20;
		double alpha = 1.0;
		double[] newk = null;
		while (m <= mmax) {
			newk = MVO.add(xk, alpha, dk);
			if (func.action(newk, 0, param) < func.action(xk, 0, param) + 
					sigma * alpha * MVO.dot_product(dfunc.action(xk, 0, param2), dk)) {
				break;
			}
			alpha = alpha * beta;
		    m = m + 1;
		 }
		//System.out.println("Iteration: n = " + m + ", p = " + Arrays.toString(newk) + ", v = " + func.apply(newk));
		return alpha;
	}
	
	/**
	 *  基于强Wolfe条件的非精确线搜索
	 * @param F 原函数
	 * @param G 函数梯度
	 * @param xk 上一步终止点
	 * @param s 搜索方向向量
	 * @param param 参数
	 * @return 满足强Wolfe条件的步长
	 */
	public static double WolfeStep(ScalarFunc F, VectorFunc G, double[] xk, double[] s, double[] param, double[][] param2) {
		int m = 0;
//		double tau = 0.1;
//		double chi = 0.75;
//		double rho = 0.1;
//		double sigma = 0.1;
		double tau = 0.1;
		double chi = 0.9;
		double rho = 1.0e-4;
		double sigma = 0.1;		
		int mhat = 400;
		double epsilon = 1e-10;
		// compute f0 and g0
		double f0 = F.action(xk, 0, param);
		double[] gk = G.action(xk, 0, param2);
		m = m+2;
		double deltaf0 = f0;
		// step 2 Initialize line search
		double[] dk = s.clone();
		double aL = 0;
		double aU = 1e99;
		double fL = f0;
		//gk为梯为, dk为下降方向
		double dfL = MVO.dot_product(gk, dk);
		double a0 = 0;
		double a0hat = 0;
		double a0Lhat = 0.0;
		double a0Uhat = 0.0;
		if (Math.abs(dfL) > epsilon) {
			a0 = -2 * deltaf0 / dfL;
		}else {
			a0 = 1;
		}
		if ((a0 <= 1e-9)||(a0 > 1)) {
			a0 = 1;
		}
		//step 3
		while(true) {
			double[] deltak = new double[dk.length];
			double aa0 = a0;
			Arrays.setAll(deltak, i -> xk[i] + aa0 * dk[i]);
			f0 = F.action(deltak, 0, param);
			m = m + 1;
			//step 4
		    if ((f0 > (fL + rho*(a0 - aL)*dfL)) && (Math.abs(fL - f0) > epsilon) && (m < mhat)) {
		    	if (a0 < aU) {
			    	  aU = a0;
			    }
				//compute a0hat using equation 7.65
				a0hat = aL + ((a0 - aL) * (a0 - aL) * dfL)/(2*(fL - f0 + (a0 - aL)*dfL));
				a0Lhat = aL + tau*(aU - aL);
				if (a0hat < a0Lhat) {
					a0hat = a0Lhat;
				}
				a0Uhat = aU - tau*(aU - aL);
				if (a0hat > a0Uhat) {
					a0hat = a0Uhat;
				}
				a0 = a0hat;
		    } else {
		    	double[] gtemp = G.action(deltak, 0, param2);
		    	double df0 = MVO.dot_product(gtemp, dk);
		    	m = m + 1;
		    	//step 6
		    	if (((df0 < sigma*dfL) && (Math.abs(fL - f0) > epsilon) && (m < mhat) && (dfL != df0))) {
		      		double deltaa0 = (a0 - aL)*df0/(dfL - df0);
			        if (deltaa0 <= 0) {
			        	a0hat = 2*a0;
			        } else {
			        	a0hat = a0 + deltaa0;
			        }
			        a0Uhat = a0 + chi * (aU - a0);
			        if (a0hat > a0Uhat) {
			        	a0hat = a0Uhat;
			        }
			        aL = a0;
			        a0 = a0hat;
			        fL = f0;
			        dfL = df0;
		    	}else {
		    		break;
		    	}
		    }
		}
		double z = 0.0;
		if (a0 < 1e-5) {
			z = 1e-5;
		}
		else {
			z = a0;
		}
		return z;
	}
	
	/**
	 * 使用非精确搜索的最速下降法求函数极小值
	 * @param func 原函数表达式
	 * @param gfunc 函数梯度表达式
	 * @param param1
	 * @param x0 迭代起始点
	 * @param searchStep 非精确搜索类型
	 * @param param1 非精确搜索参数, 若为null, 则取非精确搜索取默认值
	 * @param reportFile
	 * @return
	 */
	public static double[] SteepestDescent(ScalarFunc func, VectorFunc gfunc, double[] param1, double[][] gparam, double[] x0,
			                               NLOPT.SearchStep searchStep, double[]param2, String reportFile) {
		int maxk = 5000;
		int k = 0; 
		double epsilon=1e-5;
		double[] g = null;
		int n = x0.length;
		double[] d = new double[n];
		InexactSearchStep ss = inexactStep[searchStep.getValue()];
		ArrayList<double[]> resultData = new ArrayList<>();
		while (k < maxk) {
			double[] stepData = new double[n + 2];
			for(int i = 0; i < n; i++) {
				stepData[i] = x0[i];
			}
		    g = gfunc.action(x0, 0, gparam);
		    double[] gg = g;
		    Arrays.setAll(d, i -> -gg[i]);
		    stepData[n] = MVO.L2Norm(d);
		    stepData[n + 1] = func.action(x0, 0, param1);
		    resultData.add(stepData);
		    if(stepData[n] < epsilon) {
		    	break;
		    }
		    double alpha = ss.apply(func, gfunc, x0, d, param1, gparam);
		    Arrays.setAll(x0, i -> x0[i] + alpha * d[i]);
		    k++;
		    //System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		}
		Tecplot.LineXY(reportFile, resultData);
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.action(x0, 0, param1));
		return x0;
	}
	
	/**
	 * 在已知函数值，函数梯度以及函数Hesse矩阵下使用最速下降法计算函数极小值
	 * @param func
	 * @param gfunc
	 * @param hessFunc
	 * @param x0
	 * @param param1
	 * @param param2
	 * @param param3
	 * @return
	 */
	public static double[] SteepestDescent(ScalarFunc func, VectorFunc gfunc, MatrixFunc hessFunc, double[] param1, double[][] param2, double[][][] param3,
			                               double[] x0) {
		int maxk = 5000;
		int k = 0; 
		double epsilon=1e-10;
		double[] g = null;
		while (k < maxk) {
		    g = gfunc.action(x0, 0, param2);
		    if(MVO.L2Norm(g) < epsilon) {
		    	break;
		    }
		    double[][] mat = hessFunc.action(x0, 0, param3);
		    double alpha = MVO.dot_product(g, g) / MVO.yAx(g, mat, g);
		    double[] gg = g;
		    Arrays.setAll(x0, i -> x0[i] - alpha * gg[i]);
		    k++;
		    //System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		}
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.action(x0, 0, param1));
		return x0;		
	}
	
	
	/**
	 * 阻尼牛顿法求解无约束最优化问题
	 * @param func 目标函数
	 * @param gfunc 目标函数导数
	 * @param hessfunc 目标函数Hesse矩阵
	 * @param x0 初始猜测值
	 * @param searchStep 非精确步长搜索算法
	 * @return 局部极小值点
	 */
	public static double[] DampedNewtonMethod(ScalarFunc func, VectorFunc gfunc, 
			                                  MatrixFunc hessfunc, double[] param1, double[][] param2, double[][][] param3,
			                                  double[] x0, NLOPT.SearchStep searchStep, double[] paramS) {
		int maxk = 100;
		int k = 0; 
		double epsilon = 1e-5;
		double[] gk = null;
		int n = x0.length;
		double[][] Gk = null;
		double[] dk = new double[n];
		DirectDSolver solver = new DirectDSolver();
		InexactSearchStep ss = inexactStep[searchStep.getValue()];
		while(k < maxk) {
			gk = gfunc.action(x0, 0, param2);
		    Gk = hessfunc.action(x0, 0, param3);
		    solver.setMatrix(Gk);
		    solver.PGaussSolve(gk, dk);
		    Arrays.setAll(dk, i -> -dk[i]);
		    if(MVO.L2Norm(gk) < epsilon) {
		    	break;
		    }
		    double alpha = ss.apply(func, gfunc, x0, dk, param1, param2);
		    Arrays.setAll(x0, i -> x0[i] + alpha * dk[i]);
		    k++;
		}
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.action(x0, 0, param1));
		return x0;
	}
	
	/**
	 * 
	 * @param func
	 * @param gfunc
	 * @param x0
	 * @return
	 */
	public static double[] QuasiNewton_SR1(Function<double[], Double> func, Function<double[], double[]> gfunc, double[] x0) {
		int maxk=500;
		double rho=0.55;
		double sigma=0.4;
		double epsilon=1e-10; 
		int k = 0;
		int n = x0.length;
		double[][] Hk = MVO.eye(n); 
		double[] gk = null;
		double[] dk = new double[n];
		double[] sk = new double[n];
		double[] yk = new double[n];
		while(k < maxk) {
			gk = gfunc.apply(x0);
		    MVO.matmul(Hk, gk, dk);
		    Arrays.setAll(dk, i -> -dk[i]);
		    if(MVO.L2Norm(gk) < epsilon) {
		    	break;
		    }
		    double alpha = ArmijoStep(func, gfunc, rho, sigma, x0, dk);
		    Arrays.setAll(sk, i -> alpha * dk[i]);
		    Arrays.setAll(x0, i -> x0[i] + sk[i]);
		    MVO.add(gfunc.apply(x0), -1.0, gk, yk);
		    double[] Hkyk = MVO.matmul(Hk, yk);
		    Arrays.setAll(gk, i -> sk[i] - Hkyk[i]);
		    double temp = MVO.dot_product(gk, yk);
		    for(int i = 0; i < n; i++) {
		    	for(int j = 0; j < n; j++) {
		    		Hk[i][j] += gk[i] * gk[j] / temp;
		    	}
		    }
		    k++;
		    System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		}
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		return x0;
	}
	
	/**
	 * 拟牛顿法BFGS程序求解无约束最优化问题
	 * @param func 目标函数表达式
	 * @param gfunc 目标函数梯度
	 * @param x0 初始猜测值
	 * @return 局部最优化解
	 */
	public static double[] QuasiNewton_BFGS(ScalarFunc func, VectorFunc gfunc, double[] param, double[][] param2, double[] x0) {
		int maxk = 500;
		double epsilon1 = 1e-10;
		int k = 0;
		int n = x0.length;
		double[][] Bk = MVO.eye(n);
		double[] gk = null;
		double[] dk = new double[n];
		double[] sk = new double[n];
		double[] yk = new double[n];
		IterDSolver solver = new IterDSolver();
		while(k < maxk) {
		    gk = gfunc.action(x0, 0, param2);
		    if(MVO.L2Norm(gk) < epsilon1) {
		    	break;
		    }
		    solver.CGNE(Bk, gk, dk, 0);
		    Arrays.setAll(dk, i -> -dk[i]);
		    //double alpha = ArmijoStep(func, gfunc, param, rho, sigma1, x0, dk);
		    double alpha = WolfeStep(func, gfunc, x0, dk, param, param2);
		    //BFGS校正
		    Arrays.setAll(sk, i -> alpha * dk[i]);
		    Arrays.setAll(x0, i -> x0[i] + sk[i]);
		    MVO.add(gfunc.action(x0, 0, param2), -1.0, gk, yk);
		    double temp1 = MVO.dot_product(yk, sk);
		    
		    if(temp1 > 0) {
		    	double temp2 = MVO.yAx(sk, Bk, sk);
		    	MVO.matmul(Bk, sk, gk);
		    	for(int i = 0; i < n; i++) {
		    		for(int j = 0; j < n; j++) {
		    			Bk[i][j] = Bk[i][j] - gk[i] * gk[j] / temp2 + yk[i] * yk[j] / temp1;
		    		}
		    	}
		    }
		    k=k+1;
		    //System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		}
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.action(x0, 0, param));
		return x0;
	}
	
	public static double[] BFGS_H(ScalarFunc func, VectorFunc gfunc, double[] param, double[][] param2, double[] x0) {
		int maxk = 500;
		double epsilon1 = 1e-10;
		int k = 0;
		int n = x0.length;
		double[][] Hk = MVO.eye(n);
		double[] gk = gfunc.action(x0, 0, param2);
		double[] dk = new double[n];
		double[] sk = new double[n];
		double[] yk = new double[n];
		while(k < maxk) {
		    if(MVO.L2Norm(gk) < epsilon1) {
		    	break;
		    }
		    MVO.matmul(Hk, gk, dk);
		    Arrays.setAll(dk, i -> -dk[i]);
		    //double alpha = ArmijoStep(func, gfunc, param, rho, sigma1, x0, dk);
		    double alpha = WolfeStep(func, gfunc, x0, dk, param, param2);
		    //BFGS校正
		    Arrays.setAll(sk, i -> alpha * dk[i]);
		    Arrays.setAll(x0, i -> x0[i] + sk[i]);
		    double[] newgk = gfunc.action(x0, 0, param2);
		    MVO.add(newgk, -1.0, gk, yk);
		    double rho = 1.0 / MVO.dot_product(yk, sk);
		    MVO.matmul(Hk, yk, gk);
		    double ykHkyk = MVO.dot_product(yk, gk);
		    double temp = rho * (rho * ykHkyk + 1);
		    for(int i = 0; i < n; i++) {
		    	for(int j = 0; j < n; j++) {
		    		Hk[i][j] = Hk[i][j] - rho * (gk[i] * sk[j]+ gk[j] * sk[i]) + temp * sk[i] * sk[j];
		    	}
		    }
		    gk = newgk;
		    k = k + 1;
		    //System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		}
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.action(x0, 0, param));
		return x0;
	}
	
	
	/**
	 * 使用FR共轭梯度法求解无约束问题: min f(x)
	 * @param func 目标函数
	 * @param gfunc 目标函数梯度
	 * @param x0 初始点
	 * @return 最优点
	 */
	public static Triple<double[], Double, Integer> CG_FR(Function<double[], Double> func, Function<double[], double[]> gfunc, double[] x0) {
		int maxk = 20000;
		double rho = 0.6;
		double sigma = 0.4;
		int k = 0; 
		double epsilon = 1e-8; 
		int n = x0.length;
		double[] g = null;
		double[] g0 = new double[n];
		double[] d = new double[n];
		double[] d0 = new double[n];
		double beta = 0.0;
		while(k<maxk) {
		    g = gfunc.apply(x0);   //计算梯度
		    double[] gg = g;
		    int itern = (int)(k - (n+1) * Math.floor(k * 1.0 / (n+1)));
		    itern ++;
		    //计算搜索方向
		    if(itern == 1) {
		    	Arrays.setAll(d, i -> -gg[i]);
		    }
		    else {
		    	double temp1 = MVO.dot_product(g, g);
		    	double temp2 = MVO.dot_product(g0, g0);
		    	beta = temp1 / temp2;
		    	double bbeta = beta;
		    	Arrays.setAll(d, i -> -gg[i] + bbeta * d0[i]);
		        double gd = MVO.dot_product(g, d);
		        if(gd >= 0.0) {
		        	Arrays.setAll(d, i -> -gg[i]);
		        }
		    }
		    //检验终止条件
		    if(MVO.L2Norm(g) < epsilon) {
		    	break;
		    }
		    //Armijo搜索
		    double alpha = ArmijoStep(func, gfunc, rho, sigma, x0, d);
		    Arrays.setAll(x0, i-> x0[i] + alpha * d[i]);
		    Arrays.setAll(g0, i-> gg[i]);
		    Arrays.setAll(d0, i -> d[i]);
		    k++;
		}
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		return new Triple<double[], Double, Integer>(x0, func.apply(x0), k);
	}

	/**
	 * 使用FR共轭梯度法求解无约束问题: min f(x)
	 * @param func 目标函数
	 * @param gfunc 目标函数梯度
	 * @param x0 初始点
	 * @return 最优点
	 */
	public static Triple<double[], Double, Integer> CG_PRPlus(Function<double[], Double> func, Function<double[], double[]> gfunc, double[] x0) {
		int maxk = 5000;
		double rho = 0.6;
		double sigma = 0.4;
		int k = 0; 
		double epsilon = 1e-10; 
		int n = x0.length;
		double[] g = null;
		double[] g0 = new double[n];
		double[] d = new double[n];
		double[] d0 = new double[n];
		double beta = 0.0;
		while(k<maxk) {
		    g = gfunc.apply(x0);   //计算梯度
		    double[] gg = g;
		    int itern = (int)(k - (n+1) * Math.floor(k * 1.0 / (n+1)));
		    itern ++;
		    //计算搜索方向
		    if(itern == 1) {
		    	Arrays.setAll(d, i -> -gg[i]);
		    }
		    else {
		    	double temp1 = MVO.dot_product(g, g);
		    	double temp12 = MVO.dot_product(g, g0);
		    	double temp2 = MVO.dot_product(g0, g0);
		    	beta = (temp1 - temp12) / temp2;
		    	double bbeta = Double.max(beta, 0.0);
		    	Arrays.setAll(d, i -> -gg[i] + bbeta * d0[i]);
		        double gd = MVO.dot_product(g, d);
		        if(gd >= 0.0) {
		        	Arrays.setAll(d, i -> -gg[i]);
		        }
		    }
		    //检验终止条件
		    if(MVO.L2Norm(g) < epsilon) {
		    	break;
		    }
		    //Armijo搜索
		    double alpha = ArmijoStep(func, gfunc, rho, sigma, x0, d);
		    Arrays.setAll(x0, i-> x0[i] + alpha * d[i]);
		    Arrays.setAll(g0, i-> gg[i]);
		    Arrays.setAll(d0, i -> d[i]);
		    k++;
		}
		System.out.println("Iteration: k = " + k + ", x = " + Arrays.toString(x0) + ", v = " + func.apply(x0));
		return new Triple<double[], Double, Integer>(x0, func.apply(x0), k);
	}
	
	/**
	 * 等式约束的为罚函数法
	 * @param objFunc
	 * @param gFunc
	 * @param conFunc
	 * @param gc
	 * @param x0
	 * @return
	 */
	public static double[] PenaltyMethod_Equality(ScalarFunc objFunc, VectorFunc gFunc, 
			                                      VectorFunc conFunc, VectorFunc[] gc, double[] x0) {
		double[] penaltyParam = {1.0};
		ScalarFunc penaltyFunc = (x, label, param) -> {
			double result = 0.0;
			result = objFunc.action(x, label, param);
			double[] cc = conFunc.action(x, label, null);
			for(int i = 0, n = cc.length; i < n; i++) {
				result += 0.5 * param[0] * cc[i] * cc[i];
			}
			return result;
		};
		
		VectorFunc gradFunc = (x, label, param) -> {
			//n为约束个数
			int n = gc.length;
			//System.out.println("n = " + n);
			double[] grad = gFunc.action(x, label, param);
			double[] constraintValue = conFunc.action(x, label, param);
			for(int i = 0; i < n; i++) {
				double[] gradconstraint = gc[i].action(x, label, param);
				for(int j = 0, nGrad = grad.length; j < nGrad; j++) {
					grad[j] += constraintValue[i] * gradconstraint[j] * param[0][0];
				}
			}
			return grad;
		};
		double epsilon = 1.0e-6;
		while (true) {
			QuasiNewton_BFGS(penaltyFunc, gradFunc, penaltyParam, null, x0);
			//System.out.println(MVO.L2Norm(g.action(x0, 0, null)));
			double[] cValue = conFunc.action(x0, 0, new double[][]{penaltyParam});
			if(MVO.L2Norm(cValue) < epsilon) {
				break;
			}
			penaltyParam[0] *= 10;
		}
		return null;
	}
	
	/**
	 * 
	 * @param f
	 * @param g
	 * @param ec
	 * @param gec
	 * @param ic
	 * @param gic
	 * @param x0
	 * @return
	 */
	public static double[] PenaltyMethod_General(ScalarFunc f, VectorFunc g, 
			                                     VectorFunc ec, VectorFunc[] gec, 
			                                     VectorFunc ic, VectorFunc[] gic, 
			                                     double[] x0) {
		double[] penaltyParam = {1.0};
		//构造罚函数
		ScalarFunc penaltyFunc = (x, label, param) -> {
			double result = 0.0;
			result = f.action(x, label, param);
			double[] ecc = ec.action(x, label, null);
			for(int i = 0, n = ecc.length; i < n; i++) {
				result += 0.5 * param[0] * ecc[i] * ecc[i];
			}
			double[] icc = ic.action(x, label, null);
			for(int i = 0, n = icc.length; i < n; i++) {
				double min = Math.min(icc[0], 0);
				result += 0.5 * param[0] * min * min;
			}			
			return result;
		};
		
		//构造罚函数梯度
		VectorFunc gradFunc = (x, label, param) -> {
			//n为约束个数
			int n = gec.length;
			//System.out.println("n = " + n);
			double[] grad = g.action(x, label, param);
			
			double[] eConstraintValue = ec.action(x, label, param);
			for(int i = 0; i < n; i++) {
				double[] gradconstraint = gec[i].action(x, label, param);
				for(int j = 0, nGrad = grad.length; j < nGrad; j++) {
					grad[j] += eConstraintValue[i] * gradconstraint[j] * param[0][0];
				}
			}
			
			double[] iConstraintValue = ec.action(x, label, param);
			for(int i = 0; i < n; i++) {
				double[] gradconstraint = gic[i].action(x, label, param);
				for(int j = 0, nGrad = grad.length; j < nGrad; j++) {
					grad[j] += iConstraintValue[i] * gradconstraint[j] * param[0][0];
				}
			}
			return grad;
		};
		double epsilon = 1.0e-6;
		while (true) {
			QuasiNewton_BFGS(penaltyFunc, gradFunc, penaltyParam, null, x0);
			//System.out.println(MVO.L2Norm(g.action(x0, 0, null)));
			double[] ecValue = ec.action(x0, 0, new double[][]{penaltyParam});
			double[] icValue = ec.action(x0, 0, new double[][]{penaltyParam});
			double eL2 = MVO.L2Norm(ecValue);
			double iL2 = MVO.L2Norm(icValue);
			if(Math.sqrt(eL2 * eL2 + iL2 * iL2) < epsilon) {
				break;
			}
			penaltyParam[0] *= 10;
		}
		return null;
	}
	
	
	public static double[] AugmentedLagrangianMethod_Equality(ScalarFunc objFunc, VectorFunc gFunc, double[][] paramObj,
			                                                  VectorFunc conFunc, VectorFunc[] gradCon, double[][] paramCon,
			                                                  double[] x0) {
		
		
		ScalarFunc penaltyFunc = (x, label, param) -> {
			double result = 0.0;
			result = objFunc.action(x, label, paramObj[0]);
			double[] cc = conFunc.action(x, label, paramCon);
			for(int i = 0, n = cc.length; i < n; i++) {
				result -= param[i] * cc[i];
				result += 0.5 * param[n] * cc[i] * cc[i];
			}
			return result;
		};
		
		int nCon = conFunc.action(x0, 0, paramCon).length;
		double[] ALParams = new double[nCon + 1];
		ALParams[nCon] = 1.0;
		
		VectorFunc gradFunc = (x, label, param) -> {
			//n为约束个数
			int n = gradCon.length;
			//System.out.println("n = " + n);
			double[] grad = gFunc.action(x, label, paramObj);
			double[] constraintValue = conFunc.action(x, label, paramCon);
			for(int i = 0; i < n; i++) {
				double[] gradconstraint = gradCon[i].action(x, label, paramCon);
				for(int j = 0, nGrad = grad.length; j < nGrad; j++) {
					grad[j] -= param[i][0] * gradconstraint[j];
					grad[j] += constraintValue[i] * gradconstraint[j] * param[n][0];
				}
			}
			return grad;
		};
		double epsilon = 1.0e-6;
		while (true) {
			QuasiNewton_BFGS(penaltyFunc, gradFunc, ALParams, null, x0);
			//System.out.println(MVO.L2Norm(g.action(x0, 0, null)));
			double[] cValue = conFunc.action(x0, 0, paramCon);
			if(MVO.L2Norm(cValue) < epsilon) {
				break;
			}
			double[] constraintValue = conFunc.action(x0, 0, paramCon);
			for(int i = 0; i < nCon; i++) {
				ALParams[i] -= ALParams[nCon] * constraintValue[i];
			}
			ALParams[nCon] *= 5;
		}
		return null;		
	}
	
	
	public static void main(String[] args) {
		
//		ScalarFunc func = (x, label, param) -> 100 * (x[0] * x[0] - x[1]) * (x[0] * x[0] - x[1]) + (x[0] - 1) * (x[0] - 1);
//		VectorFunc dfunc = (x, label, param) -> {
//			double[] grad = new double[2];
//			grad[0] = 400 * x[0] * (x[0] * x[0] - x[1]) + 2 * (x[0] - 1);
//			grad[1] = -200 * (x[0] * x[0] - x[1]);
//			return grad;
//		};
//		double[] x = SteepestDescent(func, dfunc, new double[] {0.0, 0.0}, NLOPT.SearchStep.Wolfe, null);
//		x = SteepestDescent(func, dfunc, new double[] {0.0, 0.0}, NLOPT.SearchStep.Armijo, null);
		
		//SteepestDescentCase1();
		//PenaltyMathod();
//		Function<double[], double[][]> hessfunc = x -> {
//			double[][] hess = new double[2][2];
//			hess[0][0] = 1200 * x[0] * x[0] - 400 * x[1] + 2;
//			hess[1][0] = hess[0][1] = -400 * x[0];
//			hess[1][1] = 200;
//			return hess;
//		};
//		double alpha = ArmijoStep(func, dfunc, 0.5, 0.2, new double[] {-1, 1}, new double[] {1, -2});
//		System.out.println(alpha);
//		double[] x0 = new double[] {0.0, 0.0};
		//double[] x = SteepestDescent(func, dfunc, x0);
		//double[] x = DampedNewtonMethod(func, dfunc, hessfunc, x0);
		//double[] x = QuasiNewton_SR1(func, dfunc, x0);
		//double[] x = QuasiNewton_BFGS(func, dfunc, x0);
//		Triple<double[], Double, Integer> result = CG_FR(func, dfunc, x0);
//		System.out.println("Minimizer: " + Arrays.toString(result.getFirst()) + "\n"
//				          +"Minimized Value: " + result.getSecond() + "\n"
//				          +"Iteration number: " + result.getThird());*/
//		SteepestDescentCase1();
//		BFGSCase1();
//		HimmelblauFunc();
		//CGCase();
		//PenaltyMathod();
	
//		ScalarFunc f = (x, label, param) -> x[0] + x[1];
//		VectorFunc g = (x, label, param) -> new double[] {1, 1};
//		VectorFunc c = (x, label, param) -> new double[] {x[0] * x[0] + x[1] * x[1] - 2};
//		VectorFunc[] gc = {(x, label, param) -> new double[] {2 * x[0], 2 * x[1]}};
//
//		ScalarFunc f = (x, label, param) -> 2 * x[0] * x[0] - x[1] * x[1] + x[0] - x[1];
//		VectorFunc g = (x, label, param) -> new double[] {4 * x[0] + 1, -2 * x[1] - 1};
//		VectorFunc c = (x, label, param) -> new double[] {x[0] - x[1]};
//		VectorFunc[] gc = {(x, label, param) -> new double[] {1, -1}};
//		
//		ScalarFunc f = (x, label, param) -> x[0] * x[0] + x[1] * x[1];
//		VectorFunc g = (x, label, param) -> new double[] {2 * x[0], 2 * x[1]};
//		VectorFunc c = (x, label, param) -> new double[] {x[0] -  x[1] + 1};
//		VectorFunc[] gc = {(x, label, param) -> new double[] {1, -1}};
//		
//		ScalarFunc f = (x, label, param) -> -5 * x[0] * x[0] + x[1] * x[1];
//		VectorFunc g = (x, label, param) -> new double[] {-10 * x[0], 2 * x[1]};
//		VectorFunc c = (x, label, param) -> new double[] {x[0] - 1};
//		VectorFunc[] gc = {(x, label, param) -> new double[] {1, 0}};
	/*
		double[] x0 = {2, 2};
		//PenaltyMethod_Equality(f, g, c, gc, x0);

		ScalarFunc f1 = (x, label, param) -> x[0] * x[0] + x[1] * x[1];
		VectorFunc g1 = (x, label, param) -> new double[] {2 * x[0], 2 * x[1]};
		VectorFunc c1 = (x, label, param) -> new double[] {x[0] + x[1] - 1};
		VectorFunc[] gc1 = {(x, label, param) -> new double[] {1, 1}};
		PenaltyMethod_Equality(f1, g1, c1, gc1, x0);
		x0 = new double[]{2, 2};
		System.out.println("-------------------------");
		AugmentedLagrangianMethod_Equality(f1, g1, null, c1, gc1, null, x0);
		System.out.println("========================");
		
		
		ScalarFunc f2 = (x, label, param) -> -x[0] - x[1];
		VectorFunc g2 = (x, label, param) -> new double[] {-1, -1};
		VectorFunc c2 = (x, label, param) -> new double[] {x[0] * x[0] + x[1] * x[1] - 1};
		VectorFunc[] gc2 = {(x, label, param) -> new double[] {2 * x[0], 2 * x[1]}};
		PenaltyMethod_Equality(f2, g2, c2, gc2, x0);
		x0 = new double[]{2, 2};
		System.out.println("-------------------------");
		AugmentedLagrangianMethod_Equality(f2, g2, null, c2, gc2, null, x0);
		System.out.println("========================");
		ScalarFunc f3 = (x, label, param) -> x[0] * x[0] * x[0] + x[1] * x[1] * x[1];
		VectorFunc g3 = (x, label, param) -> new double[] {3 * x[0] * x[0], 3 * x[1] * x[1]};
		VectorFunc c3 = (x, label, param) -> new double[] {x[0] + x[1] - 1};
		VectorFunc[] gc3 = {(x, label, param) -> new double[] {1, 1}};
		//AugmentedLagrangianMethod_Equality(f3, g3, null, c3, gc3, null, x0);
		PenaltyMethod_Equality(f3, g3, c3, gc3, x0);
		x0 = new double[]{2, 2};
		System.out.println("-------------------------");
		AugmentedLagrangianMethod_Equality(f3, g3, null, c3, gc3, null, x0);
//		ScalarFunc f = (x, label, param) -> (x[0] * x[0] + x[1] - 11) * (x[0] * x[0] + x[1] - 11)
//				                           +(x[0] + x[1] * x[1] - 7) * (x[0] + x[1] * x[1] - 7);
//		VectorFunc g = (x, label, param) -> {
//			double[] grad = new double[2];
//			grad[0] = 4 * x[0] * (x[0] * x[0] + x[1] - 11) + 2 * (x[0] + x[1] * x[1] - 7);
//			grad[1] = 2 * (x[0] * x[0] + x[1] - 11) + 4 * x[1] * (x[0] + x[1] * x[1] - 7);
//			return grad;
//		};
		//double alpha = inExactLineSearch(new double[] {6, 6}, new double[] {-1, -1}, f, g, null);
		//double[] x = SteepestDescent(f, g, new double[] {6, 6}, null);
//		double[] x = QuasiNewton_BFGS(f, g, null, new double[] {6, 6});
		//System.out.println(Arrays.tost);
		SteepestDescentCase1();
	*/	
		BFGSCase1();
	}

	public static void SteepestDescentCase1() {
		double[] x0 = new double[] {0.5, 1.0};
		double[] x = SteepestDescent(RosenbrockFunc, d_RosenbrockFunc, null, null, x0, NLOPT.SearchStep.Wolfe, null, "SD_Wolfe.dat");
		x0 =  new double[] {0.5, 1.0};
		x = SteepestDescent(RosenbrockFunc, d_RosenbrockFunc, null, null, x0, NLOPT.SearchStep.Armijo, null, "SD_Armijo.dat");
		x0 =  new double[] {0.5, 1.0};
		x = SteepestDescent(RosenbrockFunc, d_RosenbrockFunc, hess_RosenbrockFunc, null, null, null, x0);
		
		double[][] A = {{3, 2, 0}, {2, 3, 2}, {0, 2, 3}};
		double[] b = {5, 7, 5};
		ScalarFunc quadForm = (xx, label, param) -> 0.5 * MVO.yAx(xx, A, xx) - MVO.dot_product(b, xx);
		VectorFunc gradFunc = (xx, label, param) -> MVO.add(MVO.matmul(A, xx), -1.0, b);
		MatrixFunc hessFunc = (xx, label, param) -> A;
		x0 = new double[3];
		x = SteepestDescent(quadForm, gradFunc, hessFunc, null, null, null, x0);
	}
	
	
	
	public static void BFGSCase1() {
//		ScalarFunc func = (x, label, param) ->  (x[0] + 10 * x[1]) * (x[0] + 10 * x[1])
//				                              + 5 * (x[2] - 10 * x[3]) * (x[2] - 10 * x[3])
//				                              + (x[1] - 2 * x[2]) * (x[1] - 2 * x[2])
//				                              +10 * (x[0] - x[3]) * (x[0] - x[3]);
//		VectorFunc dfunc = (x, label, param) -> {
//			double[] grad = new double[4];
//			grad[0] = 22 * x[0] + 20 * x[1] - 20 * x[3];
//			grad[1] = 202 * x[1] + 20 * x[0] - 4 * x[2];
//			grad[2] = 18 * x[2] - 4 * x[1] - 100 * x[3];
//			grad[3] = 1020 * x[3] - 20 * x[0] - 100 * x[2];
//			return grad;
//		};
//		double[] x0 = new double[] {3, -1.0, 0, 1};
		double[] x0 = {0, 1.5};
		double[] x = BFGS_H(RosenbrockFunc, d_RosenbrockFunc, null, null, x0);
	}
	
	public static void HimmelblauFunc() {
		Function<double[], Double> HimmelblauFunc = x -> (x[0] * x[0] + x[1] - 11) * (x[0] * x[0] + x[1] - 11)
				                             		   + (x[0] + x[1] * x[1] - 7) * (x[0] + x[1] * x[1] - 7);
		
		Function<double[], double[]> dhimme = x -> {
			double[] grad = new double[2];
			grad[0] = 2 * x[0] * (x[0] * x[0] + x[1] - 11) + 2 * (x[0] + x[1] * x[1] - 7);
			grad[1] = 2 * (x[0] * x[0] + x[1] - 11) + 2 * x[1] * (x[0] + x[1] * x[1] - 7);
			return grad;
		};
		double[] x0 = new double[] {100.0, 100.0};
		double[] x = QuasiNewton_SR1(HimmelblauFunc, dhimme, x0);
	}
	
	public static void CGCase() {
		Function<double[], Double> f1 = x -> 4 * x[0] * x[0] + 4 * x[1] * x[1] - 4 * x[0] * x[1] - 12 * x[1];
		Function<double[], double[]> gf1 = x -> {
			double[] grad = new double[2];
			grad[0] = 8 * x[0] - 4 * x[1];
			grad[1] = 8 * x[1] - 4 * x[0] - 12;
			return grad;
		};
		double[] x0 = {-0.5, 1};
		Triple<double[], Double, Integer> result = CG_FR(f1, gf1, x0);
		System.out.println("Minimizer: " + Arrays.toString(result.getFirst()) + "\n"
		          +"Minimized Value: " + result.getSecond() + "\n"
		          +"Iteration number: " + result.getThird());
		x0 = new double[]{-0.5, 1};
		result = CG_PRPlus(f1, gf1, x0);
		System.out.println("Minimizer: " + Arrays.toString(result.getFirst()) + "\n"
		          +"Minimized Value: " + result.getSecond() + "\n"
		          +"Iteration number: " + result.getThird());
		f1 = x -> x[0] * x[0] - 2 * x[0] * x[1] + 2 * x[1] * x[1] + x[2] * x[2] - x[1] * x[2] + x[0] + 3 * x[1] - x[2];
		gf1 = x -> {
			double[] grad = new double[3];
			grad[0] = 2 * x[0] - 2 * x[1] + 1;
			grad[1] = -2 * x[0] + 4 * x[1] - x[2] + 3;
			grad[2] = 2 * x[2] - x[1] - 1;
			return grad;
		};
		x0 = new double[] {0, 0, 0};
		result = CG_FR(f1, gf1, x0);
		System.out.println("Minimizer: " + Arrays.toString(result.getFirst()) + "\n"
		          +"Minimized Value: " + result.getSecond() + "\n"
		          +"Iteration number: " + result.getThird());
		x0 = new double[] {0, 0, 0};
		result = CG_PRPlus(f1, gf1, x0);
		System.out.println("Minimizer: " + Arrays.toString(result.getFirst()) + "\n"
		          +"Minimized Value: " + result.getSecond() + "\n"
		          +"Iteration number: " + result.getThird());
		
	}
		
	public static void PenaltyMathod() {
		double mu = 1;
		ScalarFunc f1 = (x, label, param) -> x[0] + x[1] + 0.5 / mu * Math.pow(x[0] * x[0] + x[1] * x[1] -2, 2);
		VectorFunc gf1 = (x, label, param) -> {
			double[] grad = new double[2];
			double temp = x[0] * x[0] + x[1] * x[1] -2;
			grad[0] = 1 + 2 * x[0] * temp / mu;
			grad[1] = 1 + 2 * x[1] * temp / mu;
			return grad;
		};
		double[] x0 = new double[] {0.1, 0.1};
		double[] x = QuasiNewton_BFGS(f1, gf1, null, null, x0);
		double mmu = 0.1 * mu;
		f1 = (xx, label, param) -> xx[0] + xx[1] + 0.5 / mmu * Math.pow(xx[0] * xx[0] + xx[1] * xx[1] -2, 2);
		gf1 = (xx, label, param) -> {
			double[] grad = new double[2];
			double temp = xx[0] * xx[0] + xx[1] * xx[1] -2;
			grad[0] = 1 + 2 * xx[0] * temp / mmu;
			grad[1] = 1 + 2 * xx[1] * temp / mmu;
			return grad;
		};
		x = QuasiNewton_BFGS(f1, gf1, null, null, x0);
		
//		Triple<double[], Double, Integer> result = CG_FR(f1, gf1, x0);
//		System.out.println("Minimizer: " + Arrays.toString(result.getFirst()) + "\n"
//		          +"Minimized Value: " + result.getSecond() + "\n"
//		          +"Iteration number: " + result.getThird());
	}
}

