/**
 * Copyright 2011 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.ml;

import java.util.Collections;
import java.util.Random;

import org.apache.commons.math3.optimization.ConvergenceChecker;
import org.apache.commons.math3.optimization.PointValuePair;
import org.apache.commons.math3.random.RandomVectorGenerator;

import com.google.common.base.Preconditions;

import edu.byu.nlp.data.Dataset;
import edu.byu.nlp.data.Instance;
import edu.byu.nlp.util.DoubleArrays;

/**
 * An implementation of stochastic gradient descent for learning weight vectors. The basic algorithm is as follows:
 * {@code
 *   while not converged:
 *     shuffle data
 *     for each instance in data:
 *     	   gradient = gradient at instance and weights
 *         weights = weights + learningRate * gradient // (vector add)
 * }
 * 
 * @author rah67
 *
 */
// TODO(rah67): implement optimization interface.
// TODO(rah67): add max iterations parameter.
public class StochasticGradientDescent<I extends Instance> {

	private final DifferentiableObjectiveFunctionFactory<I> objectiveFunction;
	private final double learningRate;
	// Preferred to holding a potentially large array in memory.
	private final RandomVectorGenerator weightInitializer;
	private final Random rnd;
	private final ConvergenceChecker<PointValuePair> convergenceChecker;
	
	/**
	 * @param objectiveFunction the function to optimize
	 * @param learningRate the learning rate
	 * @param weightInitializer provides the initial weights
	 * @param convergenceChecker criteria for convergence
	 * 
	 * @throws NullPointerException if any of the arguments are null
	 * @throws IllegalArgumentException if learningRate <= 0.0
	 * 
	 * @see edu.byu.nlp.ml.RandomVectorGenerators
	 */
	public StochasticGradientDescent(DifferentiableObjectiveFunctionFactory<I> objectiveFunction, double learningRate,
			RandomVectorGenerator weightInitializer, Random rnd, ConvergenceChecker<PointValuePair> convergenceChecker)
	{
		Preconditions.checkNotNull(objectiveFunction);
		Preconditions.checkNotNull(weightInitializer);
		Preconditions.checkNotNull(rnd);
		Preconditions.checkNotNull(convergenceChecker);
		Preconditions.checkArgument(learningRate > 0.0);
		
		this.learningRate = learningRate;
		this.objectiveFunction = objectiveFunction;
		this.rnd = rnd;
		this.weightInitializer = weightInitializer;
		this.convergenceChecker = convergenceChecker;
	}

	public double[] learnWeights(Dataset<I> data) throws IllegalArgumentException {
		Preconditions.checkNotNull(data);
		
		double[] weights = weightInitializer.nextVector();
		PointValuePair prevPair;
		PointValuePair curPair = new PointValuePair(weights, 0.0, false);
		
		int macroIterations = 0;
		do {
			data.shuffle(rnd);
			for (I instance : data) {
				// TODO(rah67): this method needs to be sparse
				double[] gradient =
					objectiveFunction.over(Collections.singleton(instance)).gradient().value(weights);
				DoubleArrays.addToSelfWeighted(weights, learningRate, gradient);
			}
			double value = objectiveFunction.over(data).value(weights);
			prevPair = curPair;
			curPair = new PointValuePair(weights, value);
			++macroIterations;
		} while (!convergenceChecker.converged(macroIterations, curPair, prevPair));
		return weights;
	}
}
