/**
 * Copyright 2012 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.cluster.mom;

import com.google.common.annotations.VisibleForTesting;

import edu.byu.nlp.cluster.em.Maximizable;
import edu.byu.nlp.cluster.em.PartialCounts;
import edu.byu.nlp.math.GammaFunctions;
import edu.byu.nlp.math.optimize.ValueAndObject;

/**
 * @author rah67
 *
 */
public class VariationalBayesMaximizable implements Maximizable<MoMParameters> {

	private final double alpha;
	private final double beta;
	private final double[] a;
	private final double[][] b;
	private final double entropyOfTheta;
	private final double temp;

	public VariationalBayesMaximizable(double alpha, double beta, PartialCounts counts, double entropyOfTheta, double temp) {
		this.alpha = alpha;
		this.beta = beta;
		this.a = counts.getYCounts();
		this.b = counts.getXGivenYCounts();
		this.entropyOfTheta = entropyOfTheta;
		this.temp = temp;
	}

	/** {@inheritDoc} */
	@Override
	public ValueAndObject<MoMParameters> maximize() {
		// FIXME : lower bound does not take into account observed data
		double lowerBound = lowerBound();
		MoMParameters parameters = MixtureOfMultinomialsUtil.newVariationalParameters(a, b, temp);
		// a and b have now been altered to contains differences of logarithms of digammas.
		return new ValueAndObject<MoMParameters>(lowerBound, parameters);
	}

	/** Computes the lower bound.
	 * <pre>
	 *   L(lambda, theta, y) = E[log p(lambda, theta, y, x)] - E[log q(lambda, theta, y)]
	 *     = E[log p(lambda)] + E[log p(theta)] + E[log p(y | lambda)] + E[log p(x | y, theta, lambda]
	 *       - E[log q(lambda)] - E[log q(theta)] - E[log q(y)]
	 *     = E[log p(lambda, y) + E[log p(theta, x, y] - E[log q(lambda)] - E[log q(theta)] - E[log q(y)]
	 * </pre>
	 * where the expectations are with respect to the q distribution.  
	 */
	@VisibleForTesting
	double lowerBound() {
		// TODO : could cache the logBeta over alpha and beta
		double lowerBound = GammaFunctions.logBeta(a) - GammaFunctions.logBetaSymmetric(alpha, a.length);
		for (int k = 0; k < a.length; k++) {
			lowerBound += GammaFunctions.logBeta(b[k]);
		}
		lowerBound -= b.length * GammaFunctions.logBetaSymmetric(beta, b[0].length);
		lowerBound += entropyOfTheta;
		
		return lowerBound;
	}
	
	@VisibleForTesting
	double[] getA() {
		return a;
	}
	
	@VisibleForTesting
	double[][] getB() {
		return b;
	}
	
	@VisibleForTesting
	double getEntropyOfTheta() {
		return entropyOfTheta;
	}
}
