/**
 * Copyright 2012 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.cluster.mom;

import java.util.Arrays;
import java.util.logging.Logger;

import org.apache.commons.math3.analysis.DifferentiableMultivariateFunction;
import org.apache.commons.math3.optimization.GoalType;
import org.apache.commons.math3.optimization.general.ConjugateGradientFormula;
import org.apache.commons.math3.optimization.general.NonLinearConjugateGradientOptimizer;

import com.google.common.base.Preconditions;

import edu.byu.nlp.cluster.Dataset;
import edu.byu.nlp.cluster.ProbabilisticModel;
import edu.byu.nlp.data.SparseFeatureVector;
import edu.byu.nlp.math.optimize.IterativeOptimizer;
import edu.byu.nlp.math.optimize.IterativeOptimizer.Optimizable;
import edu.byu.nlp.math.optimize.IterativeOptimizer.ReturnType;
import edu.byu.nlp.stats.DirichletDistribution;
import edu.byu.nlp.stats.DirichletMLEOptimizable;
import edu.byu.nlp.stats.SymmetricDirichletMLEDifferentiableFunction;
import edu.byu.nlp.stats.SymmetricDirichletMLEFPOptimizable;
import edu.byu.nlp.stats.SymmetricDirichletMLENROptimizable;
import edu.byu.nlp.util.DoubleArrays;
import edu.byu.nlp.util.Matrices;
import edu.byu.nlp.util.Nullable;

/**
 * @author rah67
 *
 */
public class UncollapsedParameters {

	private static final Logger logger = Logger.getLogger(UncollapsedParameters.class.getName());
	
	// Reference to data
	private final Dataset data;

	// Priors
	private final double[] alpha;
	private double beta;
	
	// Parameters
	private final MoMParameters params;
	
	// Assignments to y
	private final int[] y;
	
	private final IterativeOptimizer io;

	private UncollapsedParameters(Dataset data, double[] alpha, double beta, MoMParameters params, int[] y,
			@Nullable IterativeOptimizer io) {
		this.data = data;
		this.alpha = alpha;
		this.beta = beta;
		this.params = params;
		this.y = y;
		this.io = io;
	}
	
	public double logJoint() {
		double[] alphaStar = alpha.clone();
		double[][] betaStar = Matrices.constant(beta, params.getNumClasses(), params.getNumFeatures());
		
		// FIXME : avoid doing this twice
		int i = 0;
		for (SparseFeatureVector instance : data.unlabeledData()) {
			++alphaStar[y[i]];
			instance.addTo(betaStar[y[i]]);
			++i;
		}
		
		double[] theta = params.getLogPOfY();
		double[][] phi = params.getLogPOfXGivenY();
		double logJoint = 0.0;
		for(int k = 0; k < alphaStar.length; k++) {
			logJoint += alphaStar[k] * theta[k];
			for(int v = 0; v < betaStar[k].length; v++) {
				logJoint += betaStar[k][v] * phi[k][v];
			}
		}
		
		return logJoint;
	}
	
	public void nextAssignments(final Assigner assigner) {
		assignPhiAndTheta(assigner);
		assignY(assigner);
	}
	
	private void assignPhiAndTheta(final Assigner assigner) {
		double[] theta = params.getLogPOfY();
		double[][] phi = params.getLogPOfXGivenY();
		
		System.arraycopy(alpha, 0, theta, 0, alpha.length);
		Matrices.fill(phi, beta);
		int i = 0;
		// TODO : avoid doing this multiple times
		for (SparseFeatureVector instance : data.unlabeledData()) {
			++theta[y[i]];
			instance.addTo(phi[y[i]]);
			++i;
		}
		assigner.assignThetaInPlace(theta);
		assigner.assignPhiInPlace(phi);
	}
	
	private void assignY(final Assigner assigner) {
		int i = 0;
		for (SparseFeatureVector instance : data.unlabeledData()) {
			double[] completeConditional = params.logJoint(instance);
			y[i] = assigner.assignY(completeConditional);
			++i;
		}
	}
	
	public void optimizeAlpha() {
		Preconditions.checkNotNull(io);

		double[][] data = new double[][]{ params.getLogPOfY() };
		DirichletDistribution.methodOfMoments(data, alpha);
		io.optimize(DirichletMLEOptimizable.newOptimizable(data, true), ReturnType.LAST, true, alpha);
		logger.info("Alpha = " + Arrays.toString(alpha));
	}
	
	public void optimizeBeta() {
		Preconditions.checkNotNull(io);
		
		double b = beta;
		double initial = beta;
		try {
			initial = DirichletDistribution.methodOfMomentsSymmetric(params.getLogPOfXGivenY());
		} catch (Exception e) {
			logger.warning(e.toString());
		}
		logger.info("Initial guess = " + initial);
		try {
			Optimizable<Double> optimizable =
					SymmetricDirichletMLENROptimizable.newOptimizable(params.getLogPOfXGivenY());
			b = io.optimize(optimizable, ReturnType.LAST, true, initial).getObject();
			logger.info("Beta (Newton Raphson) = " + b);
		} catch (Exception e) {
			logger.warning(e.toString());
		}
		try {
			Optimizable<Double> optimizable =
					SymmetricDirichletMLEFPOptimizable.newOptimizable(params.getLogPOfXGivenY());
			b = io.optimize(optimizable, ReturnType.LAST, true, initial).getObject();
			logger.info("Beta (Fixed Point) = " + b);
		} catch (Exception e) {
			logger.warning(e.toString());
		}
		try {
			NonLinearConjugateGradientOptimizer o =
					new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.POLAK_RIBIERE);
			DifferentiableMultivariateFunction f = 
					SymmetricDirichletMLEDifferentiableFunction.newDifferentiableFunction(params.getLogPOfXGivenY());
			b = o.optimize(250, f, GoalType.MAXIMIZE, new double[]{initial}).getPointRef()[0];
			logger.info("Beta (CG, Polak-Ribiere) = " + b);
		} catch (Exception e) {
			logger.warning(e.toString());
		}
		try {
			NonLinearConjugateGradientOptimizer o =
					new NonLinearConjugateGradientOptimizer(ConjugateGradientFormula.FLETCHER_REEVES);
			DifferentiableMultivariateFunction f =
					SymmetricDirichletMLEDifferentiableFunction.newDifferentiableFunction(params.getLogPOfXGivenY());
			b = o.optimize(250, f, GoalType.MAXIMIZE, new double[]{initial}).getPointRef()[0];
			logger.info("Beta (CG, Fletcher-Reeves) = " + b);
		} catch (Exception e) {
			logger.warning(e.toString());
		}
		beta = b;
		logger.info("Beta = " + beta);
	}
	
	public int[] getY() {
		return y;
	}

	public ProbabilisticModel getModel() {
		return MixtureOfMultinomialsModel.newWithParameters(params, false);
	}

	public static UncollapsedParameters newInstance(Dataset data, int[] initialY, int numClusters, double alpha,
			double beta, @Nullable IterativeOptimizer io) {
		Preconditions.checkArgument(alpha > 0.0, "Alpha must be > 0.0; was %s", alpha);
		return newInstance(data, initialY, numClusters, DoubleArrays.constant(alpha, numClusters), beta, io);
	}
	
	public static UncollapsedParameters newInstance(Dataset data, int[] initialY, int numClusters, double[] alpha,
			double beta, @Nullable IterativeOptimizer io) {
		Preconditions.checkNotNull(data);
		Preconditions.checkNotNull(initialY);
		Preconditions.checkArgument(numClusters > 1);
		for (int k = 0; k < alpha.length; k++) {
			Preconditions.checkArgument(alpha[k] > 0.0, "alpha[%s] must be > 0.0; was %s", k, alpha[k]);
		}
		Preconditions.checkArgument(beta > 0.0, "Beta must be > 0.0; was %s", beta);
		
		double[] theta = new double[numClusters];
		double[][] phi = new double[numClusters][data.getNumFeatures()];
		MoMParameters params = MoMParameters.fromLogProbabilities(theta, phi, false, false);
		
		return new UncollapsedParameters(data, alpha, beta, params, initialY, io);
	}

}
