package net.yegong.mva.pca;

import net.yegong.matrix.ColumnVector;
import net.yegong.matrix.DiagonalMatrix;
import net.yegong.matrix.Matrix;
import net.yegong.matrix.RowVector;
import net.yegong.matrix.SymmetricMatrix;
import net.yegong.matrix.Transformer;
import net.yegong.mva.ComputingException;

/**
 * The PCACalc implementation using the algorithm CCIPCA This algorithm is slow,
 * and big errors. But sometimes it can save a little memory.
 * 
 * @author cooper
 */
public class CCIPCACalc extends AbstractPCACalc {

	/*
	 * To modify this algorithm, you'd better look into 
	 * <i>Largest-Eigenvalue-Theory for Incremental Principal Component Analysis</i>
	 * by the Shuicheng Yan, Xiaoou Tang
	 * 
	 * The variable name in this class is classic name of papers
	 * about ccipca. So please don't change it.
	 */

	/**
	 * The ccipca algorithm is not a good algorithm -- slow, big errors.
	 * 
	 * @param data
	 * @param nRow
	 * @param nCol
	 * @param factor
	 * @param minCumulative
	 * @param errors
	 * @return the list of eigenvalues, eigenvectors, contributions, cumulative
	 *         contributions, mean, standard deviation
	 * @throws ComputingException
	 */
	@Override
	protected void calc(Matrix mat, int factor) throws ComputingException {
		//we don't compute the average vector dynamically 
		//for a nice result.
		calcMean(mat);

		if (factor <= 0) {
			SymmetricMatrix cov = mat.rightMultiplyMyTranspose();
			factor = calcFactor(cov);
		}
		int nCol = mat.getColumnsCount();
		int nRow = mat.getRowsCount();

		//float[] u = new float[nCol];
		RowVector u = new RowVector(nCol);
		float tmp1, tmp2;
		//float[] v = new float[factor * nCol];
		Matrix v = new Matrix(nCol, factor);
		/* 
		 * use a 1D-array to simulate 2D-array, 
		 * to bring into correspondence with kevd & evd.
		 */
		Matrix last = new Matrix(nCol, factor);
		last.set(0, 0, Float.NaN);
		int maxIteration = 10000 / nRow;
		if (maxIteration < 2)
			maxIteration = 1;
		for (int n = 1; n <= maxIteration * nRow; ++n) {
			//convert the vectors to a zero-mean vector
			//new_mean = (n-1)/n*old_mean + 1/n * new_vector
			//when the i==1, we must pass it without processing
			int offset = (n - 1) % nRow;
			u = mat.getRow(offset);
			for (int i = 0; i < factor && i < n; ++i) {
				if (i + 1 == n) {
					v.setColumn(i, u.transpose());
				}
				else {
					//compute each v_i
					tmp1 = tmp2 = 0.0f;
					ColumnVector vec = v.getColumn(i);
					tmp1 = vec.innerProduct(u);
					tmp2 = vec.sumSquare();
					tmp1 = tmp1 / n / (float) Math.sqrt(tmp2);
					tmp2 = 1 - 1.0f / n;
					vec = vec.add(u.transpose(), tmp2, tmp1).toColumnVector();
					v.setColumn(i, vec);

					//compute each u_i+1
					tmp1 = tmp2 = 0.0f;
					tmp1 = vec.innerProduct(u);
					tmp2 = vec.sumSquare();
					vec.scale(tmp1 / tmp2);
					u = u.minus(vec.transpose()).toRowVector();
				}
			}
			if (n % nRow == 0) {
				Matrix residuals = last.minus(v);
				float e = residuals.sumSquare() / residuals.getElementsCount();
				if (Math.sqrt(e) < errors) {
					break;
				}
				last = v.clone();
			}
		}

		//normalize the eigenvectors
		RowVector vals = v.sumSquareOfColumn();
		eigenvalue = new DiagonalMatrix(vals.getColumnMajorArray(), factor, factor);
		for (int i = 0; i < factor; ++i) {
			eigenvalue.set(i, (float) Math.sqrt(eigenvalue.get(i)));
		}
		sum_var = Math.min(sum_var, vals.sum());
		Transformer.divideEachRow(v, vals);
		eigenvector = v;

		DiagonalMatrix d = eigenvalue.clone();
		calcContribution(d);
	}
}
