/**
 * Copyright 2012 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.cluster.mom;

import edu.byu.nlp.cluster.Dataset;
import edu.byu.nlp.cluster.em.Expectable;
import edu.byu.nlp.cluster.em.Maximizable;
import edu.byu.nlp.cluster.em.PartialCounts;
import edu.byu.nlp.data.SparseFeatureVector;
import edu.byu.nlp.math.Math2;
import edu.byu.nlp.util.DoubleArrays;
import edu.byu.nlp.util.Matrices;

/**
 * @author rah67
 *
 */
public class EMExpectable implements Expectable<MoMParameters> {

	private final double alpha;
	private final double beta;
	private final double[] alphaMinusOneAndObservedCounts;
	private final double[][] betaMinusOneAndObservedCounts;
	private final double temp;
	
	public EMExpectable(double alpha, double beta, double[] alphaMinusOneAndObservedCounts,
			double[][] betaMinusOneAndObservedCounts) {
		this(alpha, beta, alphaMinusOneAndObservedCounts, betaMinusOneAndObservedCounts, 1.0);
	}
	
	public EMExpectable(double alpha, double beta, double[] alphaMinusOneAndObservedCounts,
			double[][] betaMinusOneAndObservedCounts, double temp) {
		this.alpha = alpha;
		this.beta = beta;
		this.alphaMinusOneAndObservedCounts = alphaMinusOneAndObservedCounts;
		this.betaMinusOneAndObservedCounts = betaMinusOneAndObservedCounts;
		this.temp = temp;
	}
	
	/** {@inheritDoc} */
	@Override
	public Maximizable<MoMParameters> expect(Dataset data, MoMParameters curParams, MoMParameters nextParams) {
		// Anneal the parameters. By dividing them by temp, we are saving significant computation by avoiding
		// the division in each and every posterior computation of p(y|x). Note, however, that the model is no
		// longer normalized.
		if (!Math2.doubleEquals(temp, 1.0, 1e-10)) {
			DoubleArrays.divideToSelf(curParams.getLogPOfY(), temp);
			Matrices.divideToSelf(curParams.getLogPOfXGivenY(), temp);
		}
		
		// Sufficient Statistics
		// For memory efficiency, we re-use the parameters in nextParams
		PartialCounts counts = resetNextParams(nextParams);
		
		// Needed for computing the lower bound
		double logLikelihood = 0.0;
		
		for (SparseFeatureVector instance : data.unlabeledData()) {
			// log p(y|x) = p(y, x) - c
			double[] posterior = curParams.logJoint(instance);
			// logJoint is p(y, x) so we compute and subtract c to obtain log p(y|x)  
			double logPOfX = DoubleArrays.logNormalizeToSelf(posterior);
			// Exponentiate so that logPosterior will contain p(y|x)
			DoubleArrays.expToSelf(posterior);
			
			// Update the sufficient statistics
			counts.incrementCounts(instance, posterior);

			// This is the \sum_y (log p(y | lambda) + log p(x | y; theta) part of the lower bound.
			logLikelihood += logPOfX;
		}
		
		return new EMMaximizable(alpha, beta, curParams, counts, logLikelihood);
	}
	
	private PartialCounts resetNextParams(MoMParameters nextParams) {
		return PartialCounts.newInstanceFromInitialCounts(nextParams.getLogPOfY(), alphaMinusOneAndObservedCounts,
				betaMinusOneAndObservedCounts, nextParams.getLogPOfXGivenY());
	}

}
