/**
 * Copyright 2011 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.ml;

import java.util.logging.Logger;

import org.apache.commons.math3.analysis.DifferentiableMultivariateFunction;
import org.apache.commons.math3.analysis.MultivariateFunction;

import com.google.common.base.Preconditions;

import edu.byu.nlp.data.FeatureMatrix;
import edu.byu.nlp.data.LabeledInstance;
import edu.byu.nlp.math.CachedDifferentiableMultivariateFunction;
import edu.byu.nlp.math.CachedDifferentiableMultivariateFunction.ValueAndGradient;
import edu.byu.nlp.math.SparseIntegralUnivariateVectorialFunction;
import edu.byu.nlp.math.SparselyDifferentiableIntegralEnclosedUnivariateRealFunction;
import edu.byu.nlp.math.ValueVisitor;
import edu.byu.nlp.stats.CategoricalLogDistribution;
import edu.byu.nlp.stats.ConditionalCategoricalLogDistribution;
import edu.byu.nlp.stats.ConditionalCategoricalLogDistributionFactory;
import edu.byu.nlp.util.DoubleArrays;
import edu.byu.nlp.util.ValueSupplier;

/**
 * Common objective functions.
 * 
 * @author rah67
 *
 */
public class ObjectiveFunctions {

	private static Logger logger = Logger.getLogger(ObjectiveFunctions.class.getName());
	
	// Uninstantiable (utility methods)
	private ObjectiveFunctions() {}
	
	private static class LogLoss implements MultivariateFunction {

		private final Iterable<LabeledInstance> instances;
		private final ConditionalCategoricalLogDistributionFactory<FeatureMatrix> factory;
		private final double alpha;
		private final MultivariateFunction regularizer;
		
		public LogLoss(Iterable<LabeledInstance> instances,
				ConditionalCategoricalLogDistributionFactory<FeatureMatrix> factory,
				double alpha,
				MultivariateFunction regularizer) {
			this.instances = instances;
			this.factory = factory;
			this.alpha = alpha;
			this.regularizer = regularizer;
		}

		@Override
		public double value(double[] weights) {
			ConditionalCategoricalLogDistribution<FeatureMatrix> dist = factory.newInstance(weights);
			if (dist == null) {
				throw new IllegalStateException("Factory returned null distribution");
			}
			
			double sum = 0.0;
			for (LabeledInstance instance : instances) {
				System.out.println(System.identityHashCode(instance.getFeatures()));
				System.out.println(instance.getFeatures());
				sum += dist.given(instance.getFeatures()).logProbabilityOf(instance.getLabel()); 
			}
			if (regularizer != null) {
				sum -= alpha * regularizer.value(weights);
			}
			return sum;
		}
	}

	public static ObjectiveFunctionFactory<LabeledInstance> logLoss(
			ConditionalCategoricalLogDistributionFactory<FeatureMatrix> factory) {
		return logLoss(factory, 0.0, null);
	}
	
	/**
	 * Creates a function of a weight vector that evaluates to the log loss of the provided instances. That is, returns
	 * the function:
	 * 
	 * f(w) = \sum_i log p(y_i|x_i; w) - alpha * R(w).
	 * 
	 * The distribution log p(y|x; w) is created through the provided factory and evaluated for each labeled
	 * instance in the dataset.
	 * 
	 * A null regularizer performs no regularization.
	 * 
	 * @throws NullPointerException if either instances or distFactory are null
	 */
	public static ObjectiveFunctionFactory<LabeledInstance> logLoss(
			final ConditionalCategoricalLogDistributionFactory<FeatureMatrix> factory,
			final double alpha,
			final MultivariateFunction regularizer) {
		Preconditions.checkNotNull(factory);
		
		return new ObjectiveFunctionFactory<LabeledInstance>() {
			@Override
			public MultivariateFunction over(Iterable<LabeledInstance> instances) {
				Preconditions.checkNotNull(instances);
				return new LogLoss(instances, factory, alpha, regularizer);
			}
		};
	}
	
	/**
	 * A categorical distribution that is differentiable w.r.t to the parameters at a given instance and label,
	 * but at said instance and label, the derivative is potentially sparse. The derivative of interest is
	 * d/dw_f p(y|x;w ). Although the derivative is w.r.t to w, ConditionalDistribution.given(x) effectively
	 * encapsulates x and w. Therefore, we can treat the derivative as a function of the remaining variable y, namely:
	 * g_{x,w}(y) = d/dw_f log p(y|x;w ). This is precisely the function that the method derivative() should return.
	 * Please note that the derivative function MUST be the derivative of the log of distribution.
	 */
	public static interface SparselyDifferentiableCategoricalLogDistribution extends CategoricalLogDistribution,
			SparselyDifferentiableIntegralEnclosedUnivariateRealFunction { }
	
	/**
	 * A {@code ConditionalCategoricalDistribution} that is differentiable w.r.t to the weights w.
	 */
	public static interface SparselyDifferentiableConditionalCategoricalLogDistribution
			extends ConditionalCategoricalLogDistribution<FeatureMatrix> {
		@Override
		SparselyDifferentiableCategoricalLogDistribution given(FeatureMatrix instance);
	}
	
	/**
	 * A factory for a {@code ProbabilisticClassifier} that is differentiable w.r.t to the weights w.
	 */
	public static interface SparselyDifferentiableConditionalCategoricalLogDistributionFactory
			extends ConditionalCategoricalLogDistributionFactory<FeatureMatrix> {
		@Override
		SparselyDifferentiableConditionalCategoricalLogDistribution newInstance(double[] weights);
	}
	
	/**
	 * A visitor that adds the value being visited to the gradient. 
	 */
	private static class GradientVisitor implements ValueVisitor {
		private final double[] gradient;
		
		public GradientVisitor(int dimension) {
			gradient = new double[dimension];
		}
		
		@Override
		public void visit(int index, double value) {
			logger.finest(String.format("Visiting index %d with value %f", index, value));
			gradient[index] = value;
		}
		
		public double[] getGradient() { return gradient; }
	}
	
	/**
	 * A supplier (to be used with a cache) that computes log loss and its gradient.
	 * Provided a differentiable (w.r.t to w) distribution, log p(y|x; w), this class supplies the value and the
	 * gradient given a weight vector. The distribution, log p(y|x; w), is created through the provided factory. Note
	 * that the derivative should be for the LOG of the distribution.
	 */
	private static class LogLossSupplier implements ValueSupplier<double[], ValueAndGradient> {

		private final Iterable<LabeledInstance> instances;
		private final SparselyDifferentiableConditionalCategoricalLogDistributionFactory factory;
		private final double alpha;
		private final DifferentiableMultivariateFunction regularizer;
		
		/**
		 * If regularizer is null, alpha is ignored. 
		 */
		public LogLossSupplier(Iterable<LabeledInstance> instances,
				SparselyDifferentiableConditionalCategoricalLogDistributionFactory factory,
				double alpha,
				DifferentiableMultivariateFunction regularizer) {
			this.instances = instances;
			this.factory = factory;
			this.alpha = alpha;
			this.regularizer = regularizer;
		}
		
		// TODO(rah67): weighted instances
		@Override
		public ValueAndGradient get(double[] weights) {
			SparselyDifferentiableConditionalCategoricalLogDistribution conditionalDist = factory.newInstance(weights);
			// value = \sum_i log p(y_i|x_i; w)
			// d/dw_f \sum_i log p(y_i|x_i; w) = \sum_i d/dw_f log p(y_i|x_i; w)
			double value = 0.0;
			GradientVisitor visitor = new GradientVisitor(weights.length);
			for (LabeledInstance instance : instances) {
				SparselyDifferentiableCategoricalLogDistribution dist = conditionalDist.given(instance.getFeatures());
				if (dist == null) {
					throw new RuntimeException("Conditional distribution was null");
				}
				value += dist.logProbabilityOf(instance.getLabel());
				SparseIntegralUnivariateVectorialFunction derivative = dist.sparseGradient();
				if (derivative == null) {
					throw new RuntimeException("Derivative was null");
				}
				derivative.walkValuesSparsely(instance.getLabel(), visitor);
			}
			
			double[] gradient = visitor.getGradient();
			
			if (regularizer != null) {
				try {
					value -= alpha * regularizer.value(weights);
					double[] regularizerGradient = regularizer.gradient().value(weights);
					DoubleArrays.subtractToSelfWeighted(gradient, alpha, regularizerGradient);
				} catch (Exception e) {
					throw new RuntimeException(e);
				}
			}
			return new ValueAndGradient(value, gradient);
		}
	}
	
	public static DifferentiableObjectiveFunctionFactory<LabeledInstance> differentiableLogLoss(
			SparselyDifferentiableConditionalCategoricalLogDistributionFactory classifierFactory) {
		return differentiableLogLoss(classifierFactory, 0.0, null); 
	}
	
	/**
	 * Creates a differentiable function of a weight vector that evaluates to the regularized log loss of the provided
	 * instances. That is, for the regularizer R(w), returns the function:
	 * 
	 * f(w) = \sum_i log p(y_i|x_i; w) - alpha R(w).
	 * 
	 * whose derivative is
	 * 
	 * d/dw_f f(w) = \sum_i d\dw_f log p(y_i|x_i; w) - alpha d/dw_f R(w).
	 * 
	 * The distribution log p(y|x; w) is created through the provided factory and evaluated for each labeled instance
	 * in the dataset. The distribution log p(y|x; w) must be differentiable.
	 * 
	 * The derivative (an instance of {@code SparseIntegralUnivariateVectorialFunction}), should only invoke visit
	 * on the visitor at most once per feature, or the results may be undefined.
	 * 
	 * If regularizer is null, then alpha is ignored and no regularization is performed.
	 * 
	 * @throws NullPointerException if instances or distFactory are null
	 */
	public static DifferentiableObjectiveFunctionFactory<LabeledInstance> differentiableLogLoss(
			final SparselyDifferentiableConditionalCategoricalLogDistributionFactory classifierFactory,
			final double alpha,
			final DifferentiableMultivariateFunction regularizer) {
		Preconditions.checkNotNull(classifierFactory);
		
		// The differentiable function interface is inefficient for optimization since it separately calls value() and
		// derivative which require renormalizing distributions (the most expensive part of the evaluation). To avoid
		// duplicate computation, we simultaneously compute the value and the gradient via the LogLossSupplier and
		// cache the values using CachedDifferentiableMultivariateRealFunction.
		return new DifferentiableObjectiveFunctionFactory<LabeledInstance>() {
			
			@Override
			public DifferentiableMultivariateFunction over(
					Iterable<LabeledInstance> instances) {
				Preconditions.checkNotNull(instances);
				
				return new CachedDifferentiableMultivariateFunction(
						new LogLossSupplier(instances, classifierFactory, alpha, regularizer));
			}
		};
	}
	
}
