/**
 * Copyright 2012 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.cluster.mom;

import edu.byu.nlp.cluster.Dataset;
import edu.byu.nlp.cluster.em.Expectable;
import edu.byu.nlp.cluster.em.Maximizable;
import edu.byu.nlp.cluster.em.PartialCounts;
import edu.byu.nlp.data.SparseFeatureVector;
import edu.byu.nlp.math.Math2;
import edu.byu.nlp.util.DoubleArrays;
import edu.byu.nlp.util.Matrices;

/**
 * @author rah67
 *
 */
public class VariationalBayesExpectable implements Expectable<MoMParameters> {

	private final HyperParams priors;
	private final boolean isHard;
	private final double temp;
	
	public VariationalBayesExpectable(HyperParams priors, boolean isHard, double temp) {
		this.priors = priors;
		this.isHard = isHard;
		this.temp = temp;
	}
	
	/** {@inheritDoc} */
	@Override
	public Maximizable<MoMParameters> expect(Dataset data, MoMParameters curParams, MoMParameters nextParams) {
		// Anneal the parameters. By dividing them by temp, we are saving significant computation by avoiding
		// the division in each and every posterior computation of p(y|x). Note, however, that the model is no
		// longer normalized.
		if (!Math2.doubleEquals(temp, 1.0, 1e-10)) {
			DoubleArrays.divideToSelf(curParams.getLogPOfY(), temp);
			Matrices.divideToSelf(curParams.getLogPOfXGivenY(), temp);
		}

		// Sufficient Statistics
		// For memory efficiency, we re-use the parameters in nextParams
		PartialCounts counts = resetNextParams(nextParams);
		
		// Needed for computing the lower bound
		double entropyOfTheta = 0.0;
		
		for (SparseFeatureVector instance : data.unlabeledData()) {
			// log p(y|x) = p(y, x) - c
			double[] logPosterior = curParams.logJoint(instance);
			// logJoint is p(y, x) so we compute and subtract c to obtain log p(y|x)  
			DoubleArrays.logNormalizeToSelf(logPosterior);
			
			entropyOfTheta += exponentiatePosteriorAndReturnEntropy(logPosterior);
			// logPosterior is now p(y|x)
			
			if (isHard) {
				counts.incrementCounts(instance, DoubleArrays.argMax(logPosterior));
			} else {
				counts.incrementCounts(instance, logPosterior);
			}
		}
		
		return new VariationalBayesMaximizable(priors.getAlpha(), priors.getBeta(), counts, entropyOfTheta, temp);
	}
	
	private PartialCounts resetNextParams(MoMParameters nextParams) {
		return PartialCounts.newInstanceFromInitialCounts(nextParams.getLogPOfY(), priors.getAlphaPlusObservedCounts(),
				priors.getBetaPlusObservedCounts(), nextParams.getLogPOfXGivenY());
	}

	private double exponentiatePosteriorAndReturnEntropy(double[] logPosterior) {
		double entropy = 0.0;
		// Simultaneously compute entropy and convert to probability space
		for (int k = 0; k < logPosterior.length; k++) {
			double prob = Math.exp(logPosterior[k]);
			if (prob > 0.0) {
				entropy -= prob * logPosterior[k];
			}
			logPosterior[k] = prob;
		}
		return entropy;
	}
	
}
