/**
 * Copyright 2011 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.classify;

import java.util.logging.Logger;

import org.apache.commons.math3.analysis.function.Exp;
import org.apache.commons.math3.linear.RealMatrixPreservingVisitor;
import org.apache.commons.math3.linear.RealVector;

import edu.byu.nlp.data.FeatureMatrix;
import edu.byu.nlp.math.RealVectors;
import edu.byu.nlp.math.SparseIntegralUnivariateVectorialFunction;
import edu.byu.nlp.math.ValueVisitor;
import edu.byu.nlp.ml.ObjectiveFunctions.SparselyDifferentiableCategoricalLogDistribution;
import edu.byu.nlp.ml.ObjectiveFunctions.SparselyDifferentiableConditionalCategoricalLogDistribution;

/**
 * A multi-variate log-linear model (aka logistic regression). 
 * 
 * @author rah67
 *
 */
// TODO(rah67): consider storing k-1 weights (probably applicable to linear model as well).
public class LogLinearModel extends LinearModel implements SparselyDifferentiableConditionalCategoricalLogDistribution {

	private static Logger logger = Logger.getLogger(LogLinearModel.class.getName());
	
	/**
	 * The actual (log) conditional distribution. 
	 */
	private final class LogConditionalDistribution implements SparselyDifferentiableCategoricalLogDistribution {

		private final FeatureMatrix input;
		
		// Either the product of the input and the weights OR the log probs, according to the value of normalized.
		private final RealVector scores;
		private boolean normalized;

		public LogConditionalDistribution(FeatureMatrix input) {
			this.input = input;
			this.scores = scores(input);
			this.normalized = false;
		}

		/**
		 * Visitor of the FeatureMatrix for computing the derivative. Note that this class assumes that the matrix is
		 * walked in column order! 
		 */
		private class Visitor implements RealMatrixPreservingVisitor {
			
			private final RealVector probs;
			private final int trueLabel;
			private final ValueVisitor visitor;
			
			private int curFeature;
			private double featureSum;
			
			public Visitor(RealVector probs, int trueLabel, ValueVisitor visitor) {
				this.probs = probs;
				this.trueLabel = trueLabel;
				this.visitor = visitor;
			}
			
			private void visitLastFeature() {
				// TODO(rah67): consider adding a tolerance here
				if (curFeature > -1 && featureSum != 0.0) {
					visitor.visit(curFeature, featureSum);
				}
			}

			@Override
			public double end() {
				visitLastFeature();
				return 0.0;
			}

			@Override
			public void start(int arg0, int arg1, int arg2, int arg3, int arg4, int arg5) {
				curFeature = -1;
				featureSum = 0.0;
			}

			public void visit(int label, int feature, double value) {
				logger.finest(String.format("Visiting: %d, %d, %f", label, feature, value));
				// d/dw_f log p(y|x; w) = d/dw_f [(sum_f w_f * f_f(y, x)) - log(sum_y' exp(sum_f w_f * f_f(y', x)))]
				//                      = f_f(y, x) - d/dw_f log(sum_y' exp(sum_f w_f * f_f(y', x)))
				//                      = f_f(y, x) - 1/z(x) * (sum_y' d/dw_f exp(sum_f w_f * f_f(y', x)))
				//                      = f_f(y, x) - 1/z(x) * (sum_y' exp(sum_f w_f * f_f(y', x)) * f_f(y',x))
				//                      = f_f(y, x) - sum_y' p(y'|x) * f_f(y',x))

				// p(y'|x) * f_f(y', x) fires for all labels
				double gradientUpdate = -probs.getEntry(label) * value;
				if (label == trueLabel) {
					// f_f(y, x) fires for the true label
					gradientUpdate += value;
				}

				// Don't visit the feature until we've received all of the values for this feature.
				if (feature == curFeature) {
					featureSum += gradientUpdate;
				} else {
					visitLastFeature();
					curFeature = feature;
					featureSum = gradientUpdate;
				}
			}
		}
		
		/**
		 * Computes the parameters to a LogCategorical distribution for the input based on the formula for logistic
		 * regression. Namely, \theta_y = log p(y|x, w) = x \cdot w - log Z(x)
		 */
		private RealVector logProbs() {
			if (!normalized) {
				// scores is a vector where each element is the unnormalized log probability of each class, i.e.
				// x \cdot w. So we simply normalize in log space (compute and subtract Z(x)) to get the log
				// distribution.
				RealVectors.logNormalizeToSelf(scores);
				normalized = true;
			}
			return scores;
		}
		
		@Override
		public SparseIntegralUnivariateVectorialFunction sparseGradient() {
			final RealVector probs = logProbs().map(new Exp());
			return new SparseIntegralUnivariateVectorialFunction() {
				@Override
				public void walkValuesSparsely(int trueLabel, ValueVisitor visitor) {
					input.walkSparselyInColumnOrder(new Visitor(probs, trueLabel, visitor));
				}
			};
		}

		@Override
		public double value(int x) {
			return logProbs().getEntry(x);
		}

		@Override
		public double logProbabilityOf(int event) {
			return logProbs().getEntry(event);
		}

		@Override
		public int argMax() {
			// Normalization does not affect the argMax, so we needn't check.
			return scores.getMaxIndex();
		}
	}

	/**
	 * Constructs a linear model for the given weights and number of classes. This class assumes ownership of the
	 * weights, which generally shouldn't be changed after this instance is instantiated.
	 * 
	 * @throws NullPointerException if weights is null
	 * @throws IllegalArgumentException if numClasses < 0
	 */
	public LogLinearModel(RealVector weights, int numClasses) {
		super(weights, numClasses);
	}
	
	public SparselyDifferentiableCategoricalLogDistribution given(FeatureMatrix input) {
		return new LogConditionalDistribution(input);
	}
}
