/**
 * 
 */
package edu.byu.nlp.cluster.mom;

import static edu.byu.nlp.math.optimize.ConvergenceCheckers.maxIterations;
import static edu.byu.nlp.math.optimize.ConvergenceCheckers.or;
import static edu.byu.nlp.math.optimize.ConvergenceCheckers.relativePercentChange;

import java.util.Collection;
import java.util.logging.Logger;

import org.apache.commons.math3.random.MersenneTwister;
import org.apache.commons.math3.random.RandomGenerator;

import com.google.common.collect.Lists;

import edu.byu.nlp.cluster.AlternatingEMClusterer;
import edu.byu.nlp.cluster.AnnealedEMClusterer;
import edu.byu.nlp.cluster.Clusterer;
import edu.byu.nlp.cluster.Clustering;
import edu.byu.nlp.cluster.ClusteringMetrics;
import edu.byu.nlp.cluster.Dataset;
import edu.byu.nlp.cluster.EMClusterer;
import edu.byu.nlp.cluster.em.AlternatingEM;
import edu.byu.nlp.cluster.em.ExpectationMaximization;
import edu.byu.nlp.cluster.em.ParameterInitializer;
import edu.byu.nlp.math.optimize.IterativeOptimizer;
import edu.byu.nlp.pipes.docs.TopNPerDocumentFeatureSelectorFactory;
import edu.byu.nlp.pipes.docs.TwentyNewsgroups;
import edu.byu.nlp.util.DoubleArrays;
import edu.byu.nlp.util.joptparse.OptionParser;
import edu.byu.nlp.util.joptparse.annotations.Option;

/**
 * @author rah67
 *
 */
public class ClustererEvaluator {

	private static Logger logger = Logger.getLogger(ClustererEvaluator.class.getName());
	
	@Option(help="base directory of the documents")
	private static String basedir = "20_newsgroups";
	
	@Option
	private static String dataset = "reduced_set";
	
	@Option
	private static String split = "all";

	@Option
	private static int minFeaturesToKeepPerDocument = 10;
	
	@Option
	private static int K = 10;
	
	@Option
	private static int numSamples = 200;
	
	@Option
	private static int maxIterations = 50;
	
	@Option
	private static double tolerance = 1e-6;
	
	@Option
	private static double alpha = 1.0;
	
	@Option
	private static double beta = 0.1;
	
	@Option
	private static double lambda = 0.1;
	
	@Option(optStrings={"--annealing-schedule", "-s"},
			action = Option.APPEND)
	private static Collection<Double> annealingSchedule = Lists.newArrayList();
	
	@Option
	private static double effectiveSampleSize = 2;
	
	@Option
	private static double percentUnlabeled = 1.0;
	
	private static enum ClustererType { em, vb, collapsed, uncollapsed, gradient_inductive };
	
	@Option
	private static boolean isHard = false;
	
	@Option
	private static ClustererType clustererType = ClustererType.collapsed;
	
	public static void main(String[] args) {
		// Parse options
		new OptionParser(ClustererEvaluator.class).parseArgs(args);
		RandomGenerator rnd = new MersenneTwister();
		
		Clusterer clusterer = newClusterer(rnd);

		Dataset data = readData(rnd);
		if (percentUnlabeled < 1.0 && K < data.getNumLabels()) {
			K = data.getNumLabels();
		}
		Clustering clustering = clusterer.cluster(data, K);
		evaluate(clustering);
	}

	private static Dataset readData(RandomGenerator rnd) {
		TwentyNewsgroups newsgroups =
				new TwentyNewsgroups(basedir, dataset, split,
						new TopNPerDocumentFeatureSelectorFactory<String>(minFeaturesToKeepPerDocument));
		Dataset data = newsgroups.dataset();

		// Print for verification
//		new StandardOutSink<Integer, SparseFeatureVector>().process(pipeAndData.getOutput());
		logger.info("Number of instances = " + data.labeledData().size());
		logger.info("Number of tokens = " + data.getNumTokens());
		logger.info("Number of features = " + data.getNumFeatures());
		logger.info("Number of classes = " + data.getNumLabels());

		data.shuffle(rnd);
		data.hideLabels((int)(data.labeledData().size() * percentUnlabeled));
//		data.hideLabels((int)(data.labeledData().size()));
		data = data.copy();
		return data;
	}

	private static Clusterer newClusterer(RandomGenerator rnd) {
		/*
		ModelInitializer initializer = new MixtureOfMultinomialInitializer(alpha, beta,
				MixtureOfMultinomialInitializer.uniformPOfY(),
				MixtureOfMultinomialInitializer.samplePOfXGivenYFromUniformDirichlet(rnd));
//				MixtureOfMultinomialInitializer.sampleFromPosteriorPOfY(alpha, rnd),
//				MixtureOfMultinomialInitializer.fromIndividualDocs(beta, true, rnd));
//				MixtureOfMultinomialInitializer.sampleFromPosteriorPOfXGivenY(beta, rnd));
//				MixtureOfMultinomialInitializer.noisyMarginal(beta, effectiveSampleSize, rnd));
//				MixtureOfMultinomialInitializer.noisyMarginalInterpolatedWithPosterior(beta, lambda,
//						effectiveSampleSize, rnd));
		*/
		ParameterInitializer<MoMParameters> initializer = new MoMParameterInitializer(
					MoMParameterInitializer.uniformPOfY(),
					MoMParameterInitializer.samplePOfXGivenYFromUniformDirichlet(rnd));
		IterativeOptimizer optimizer = new IterativeOptimizer(or(relativePercentChange(tolerance),
				maxIterations(maxIterations)));
		ExpectationMaximization em = new ExpectationMaximization(optimizer);
		switch (clustererType) {
		case em:
			if (isHard) {
				logger.warning("Hard EM not yet implemented");
				System.exit(-1);
			}
			if (annealingSchedule.size() > 0) {
				return new AnnealedEMClusterer<MoMParameters>(em, initializer, new EMEMAble(alpha, beta),
						DoubleArrays.fromDoubleCollection(annealingSchedule));
			}
			return new EMClusterer<MoMParameters>(em, initializer, new EMEMAble(alpha, beta));
		case collapsed:
			if (annealingSchedule.size() > 0) {
				logger.warning("Annealing schedule specified, but not implemented for collapsed sampler.");
				System.exit(-1);
			}
			return new CollapsedClusterer(alpha, beta, optimizer, initializer, isHard, rnd);
		case vb:
			if (annealingSchedule.size() > 0) {
				return new AnnealedEMClusterer<MoMParameters>(em, initializer, new VariationalBayesEMAble(alpha, beta,
						isHard), DoubleArrays.fromDoubleCollection(annealingSchedule));
			}
			AlternatingEM aem = new AlternatingEM(em, optimizer);
			VariationalBayesAltEMAble altEMAble = new VariationalBayesAltEMAble(isHard, 1.0);
			VariationalBayesHyperparameterOptimizer o2 = new VariationalBayesHyperparameterOptimizer();
			ParameterInitializer<HyperParams> hpInitializer = new MoMHyperParamsInitializer(alpha, beta);
			return new AlternatingEMClusterer<HyperParams, MoMParameters>(aem, hpInitializer, initializer, altEMAble,
					o2);
//			return new EMClusterer<MoMParameters>(em, initializer, new VariationalBayesEMAble(alpha, beta, isHard));
		case uncollapsed:
			if (annealingSchedule.size() > 0) {
				logger.warning("Annealing schedule specified, but not implemented for collapsed sampler.");
				System.exit(-1);
			}
			return new UncollapsedClusterer(alpha, beta, optimizer, initializer, isHard, rnd);
		case gradient_inductive:
			return new GradientInductive(alpha, beta, maxIterations, initializer);
		default:
			throw new IllegalArgumentException(String.format("Clusterer type %s not implemented", clustererType));
		}
	}
	
    private static void evaluate(Clustering clustering) {
        /*
    	for (Enumeration<Instance<Integer, SparseFeatureVector>> e : Iterables2.enumerate(data.unlabledInstances())) {
        	System.out.println(y[e.getIndex()] + "\t" + e.getElement().getLabel());
        }
        */
        
        System.out.println(ClusteringMetrics.buildConfusionMatrix(clustering));
        
        System.out.println();
        System.out.println("F-Measure: " + ClusteringMetrics.calculateFMeasure(clustering));
        System.out.println("Adjusted Rand Index: " + 
        		ClusteringMetrics.calculateAdjustedRandIndexInLogSpace(clustering));
        // for each instance
        // for each possible label
    }
	
/**
	private double scoreInstance1(double curAccuracy, int annotatorId) {
		// Average over each possible label
		double expectedExpAcc = 0.0;
		for (int k = 0; k < numLabels; k++) {
			// Set label = k;
			
			// Average probabilities over samples
			double[] pOfY;
			for (Sample s) {
				for (int i = 0; ) {
					Vectors.sum(pOfY[i], s.pOfY());
				}
			}
			
			double expAccGivenK = 0.0;
			for (int i = 0; ) {
				expAcc += Vectors.max(pOfY[i]) / numSamples;
			}
			
			expectedExpAcc += pOfK() * expAcc;
		}
	}

	private double scoreInstance2(double curAccuracy, int annotatorId) {
		// For each possible label
		for (int k = 0; k < numLabels; k++) {
			// Set label = k;
			
			// Average probabilities over samples
			double[] pOfY;
			for (Sample s) {
				for (int i = 0; ) {
					Vectors.sum(pOfY[i], s.pOfY());
				}
			}
			
			double expAcc = 0.0;
			for (int i = 0; ) {
				expAcc += Vectors.max(pOfY[i]) / numSamples;
			}
			
		}
	}
        **/
}
