package com.stanford.ml.svm;

/**
 * Support vector machine constructs a hyperplane in a high dimensional space,
 * which can be used for classification.
 * 
 * @author Fatih Sunor
 * 
 */
public class SVM {

	private static final double TOLERANCE_HIGH = 10e-3;
	private static final double TOLERANCE_LOW = 10e-5;
	private static final double C = 1.0;
	private static final int MAX_LOOP = 10;

	private double error1, error2;
	private double a1Previous, a2Previous;
	private double L, H;
	private Model _model;

	public void train(Problem train) {
		Kernel kernel = new Kernel();
		SMO(train, kernel);
	}

	/**
	 * Predicts the class labels based on an optimization problem built by the
	 * test data
	 * 
	 * @param problem
	 * @return
	 */
	public int[] test(Problem problem) {
		int[] predictions = new int[problem.getL()];
		for (int i = 0; i < problem.getL(); i++) {
			predictions[i] = (classify(problem.getX()[i]) < 0 ? -1 : 1);
		}
		return predictions;
	}

	/**
	 * Sequential Minimal Optimization Function
	 * 
	 * @param training data set
	 * @param kernel
	 */
	private void SMO(Problem train, Kernel kernel) {
		int pass = 0;
		int alphaShift = 0;
		int i, j;
		double eta;
		_model = new Model();
		_model.setAlpha(new double[train.getL()]);
		_model.setB(0);
		_model.setKernel(kernel);
		_model.setX(train.getX());
		_model.setY(train.getY());
		_model.setL(train.getL());
		_model.setN(train.getN());
		while (pass < MAX_LOOP) {
			alphaShift = 0;
			for (i = 0; i < train.getL(); i++) {
				error1 = classify(train.getX()[i]) - train.getY()[i];
				if ((train.getY()[i] * error1 < -TOLERANCE_LOW && _model
						.getAlpha()[i] < C)
						|| (train.getY()[i] * error1 > TOLERANCE_LOW && _model
								.getAlpha()[i] > 0)) {
					j = (int) Math.floor(Math.random() * (train.getL() - 1));
					j = (j < i) ? j : (j + 1);
					error2 = classify(train.getX()[j]) - train.getY()[j];
					a1Previous = _model.getAlpha()[i];
					a1Previous = _model.getAlpha()[j];
					L = calculateL(train.getY()[i], train.getY()[j]);
					H = calculateH(train.getY()[i], train.getY()[j]);
					if (L == H) // next i
						continue;
					eta = 2
							* calculateKernelValue(train.getX()[i],
									train.getX()[j])
							- calculateKernelValue(train.getX()[i],
									train.getX()[i])
							- calculateKernelValue(train.getX()[j],
									train.getX()[j]);
					if (eta >= 0) // next i
						continue;
					_model.getAlpha()[j] = a1Previous - (train.getY()[j] * (error1 - error2)) / eta;
					if (_model.getAlpha()[j] > H)
						_model.getAlpha()[j] = H;
					else if (_model.getAlpha()[j] < L)
						_model.getAlpha()[j] = L;
					if (Math.abs(_model.getAlpha()[j] - a1Previous) < TOLERANCE_HIGH)																// i
						continue;
					_model.getAlpha()[i] = a1Previous + train.getY()[i]
							* train.getY()[j]
							* (a2Previous - _model.getAlpha()[j]);
					calculateBias(
							_model.getAlpha()[i],
							_model.getAlpha()[j],
							train.getY()[i],
							train.getY()[j],
							calculateKernelValue(train.getX()[i], train.getX()[i]),
							calculateKernelValue(train.getX()[j], train.getX()[j]),
							calculateKernelValue(train.getX()[i], train.getX()[j]));
					alphaShift++;
				}
			}
			if (alphaShift == 0)
				pass++;
			else
				pass = 0;
		}
	}

	/**
	 * Calculates L
	 * 
	 * @param yi
	 * @param yj
	 * @return Returns L
	 */
	private double calculateL(int yi, int yj) {
		double L = 0;
		if (yi != yj) {
			L = Math.max(0, -a1Previous + a2Previous);
		} else {
			L = Math.max(0, a1Previous + a2Previous - C);
		}
		return L;
	}

	/**
	 * Calculates H
	 * 
	 * @param yi
	 * @param yj
	 * @return Returns H
	 */
	private double calculateH(int yi, int yj) {
		double H = 0;
		if (yi != yj) {
			H = Math.min(C, -a1Previous + a2Previous + C);
		} else {
			H = Math.min(C, a1Previous + a2Previous);
		}
		return H;
	}

	/**
	 * Calculates the bias and stores in the model. The bias term is calculated
	 * by using unbounded Lagrange multipliers
	 * 
	 * @param ai
	 * @param aj
	 * @param yi
	 * @param yj
	 * @param kii
	 * @param kjj
	 * @param kij
	 */
	private void calculateBias(double ai, double aj, int yi, int yj,
			double kii, double kjj, double kij) {
		double b1 = _model.getB() - error1 - yi * (ai - a1Previous) * kii - yj
				* (aj - a2Previous) * kij;
		double b2 = _model.getB() - error2 - yi * (ai - a1Previous) * kij - yj
				* (aj - a2Previous) * kjj;
		if (0 < ai && ai < C)
			_model.setB(b1);
		else if (0 < aj && aj < C)
			_model.setB(b2);
		else
			_model.setB((b1 + b2) / 2);
	}

	/**
	 * Based on the kernel of the model it calculates the kernel value between
	 * two points
	 * 
	 * @param x1 First point/vector
	 * @param x2 Second point/vector
	 * @return Kernel value between x and z
	 */
	private double calculateKernelValue(Feature[] x1, Feature[] x2) {
		return SVMUtil.linearKernel(x1, x2);
	}

	/**
	 * Classify given a feature vector
	 * 
	 * @param features is the feature vector
	 * @return class label -1 or 1
	 */
	public double classify(Feature[] features) {
		double f = 0;
		for (int i = 0; i < _model.getL(); i++) {
			f += _model.getAlpha()[i] * _model.getY()[i]
					* calculateKernelValue(features, _model.getX()[i]);
		}
		return f + _model.getB();
	}

	/**
	 * Returns the model
	 */
	public Model getModel() {
		return _model;
	}

	/**
	 * Sets the model
	 */
	public void setModel(Model model) {
		_model = model;
	}
}
