/**
 * Copyright 2012 Brigham Young University
 *
 * Licensed under the Apache License, Version 2.0 (the "License");
 * you may not use this file except in compliance with the License.
 * You may obtain a copy of the License at
 *
 *    http://www.apache.org/licenses/LICENSE-2.0
 *
 * Unless required by applicable law or agreed to in writing, software
 * distributed under the License is distributed on an "AS IS" BASIS,
 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
 * See the License for the specific language governing permissions and
 * limitations under the License.
 */
package edu.byu.nlp.cluster;

import org.apache.commons.math3.util.ArithmeticUtils;

import com.google.common.base.Preconditions;

import edu.byu.nlp.data.SparseFeatureVector;
import edu.byu.nlp.pipes.Instance;
import edu.byu.nlp.util.DoubleArrays;
import edu.byu.nlp.util.Indexer;
import edu.byu.nlp.util.IntArrays;
import edu.byu.nlp.util.Pair;

/**
 * @author rah67
 *
 */
public class ClusteringMetrics {

	private ClusteringMetrics() { }
	
	/** Computes the number of times each class (label) (first index) occurs with each cluster (second index) **/
	// TODO : consider creating an IntMatrix data type.
	public static int[][] classClusterCounts(Clustering clustering) {
		int numLabels = clustering.getData().getNumLabels();
		int numClusters = clustering.getNumClusters();
		int[][] classClusterCounts = new int[numLabels][numClusters];
		for (Pair<Instance<Integer, SparseFeatureVector>, Integer> pair :
				Clusterers.pairUp(clustering.getData().unlabledInstances(), clustering.getAssignments())) {
			++classClusterCounts[pair.getFirst().getLabel()][pair.getSecond()];
		}
		return classClusterCounts;
	}
	
	public static int[] classCounts(int[][] classClusterCounts) {
		int[] classCounts = new int[classClusterCounts.length];
		for (int i = 0; i < classClusterCounts.length; i++) {
			for (int j = 0; j < classClusterCounts[i].length; j++) {
				classCounts[i] += classClusterCounts[i][j];
			}
		}
		return classCounts;
	}
	
	public static int[] clusterCounts(int[][] classClusterCounts) {
		int[] clusterCounts = new int[classClusterCounts.length];
		for (int i = 0; i < classClusterCounts.length; i++) {
			for (int j = 0; j < classClusterCounts[i].length; j++) {
				clusterCounts[j] += classClusterCounts[i][j];
			}
		}
		return clusterCounts;
	}
	
	public static double calculateFMeasure(Clustering clustering) {
		int numLabels = clustering.getData().getNumLabels();
		int numClusters = clustering.getNumClusters();
		int numDocs = clustering.getAssignments().length;
		
		int[][] classClusterCounts = classClusterCounts(clustering);
		int[] classCounts = classCounts(classClusterCounts);
		int[] clusterCounts = clusterCounts(classClusterCounts);

		double[][] recallClassCluster = new double[numLabels][numClusters];
		double[][] precisionClassCluster = new double[numLabels][numClusters];
		double[][] fmeasureClassCluster = new double[numLabels][numClusters];
		
		for (int cluster = 0; cluster < clusterCounts.length; cluster++) {
			for (int label = 0; label < classCounts.length; label++) {
				double currentRecall = (double) classClusterCounts[label][cluster] / (double) classCounts[label];
				double currentPrecision = (double) classClusterCounts[label][cluster] / (double) clusterCounts[cluster];
				double currentFScore = (2.0 * currentRecall * currentPrecision) / (currentPrecision + currentRecall);
				
				if (!Double.isNaN(currentRecall)) {
					recallClassCluster[label][cluster] = currentRecall;
				}
				if (!Double.isNaN(currentPrecision)) {
					precisionClassCluster[label][cluster] = currentPrecision;
				}
				if (!Double.isNaN(currentFScore)) {
					fmeasureClassCluster[label][cluster] = currentFScore;
				}
			}
		}
		
		// This will be a sum by the end of the next loop
		double fmeasure = 0;
		for (int label = 0; label < classCounts.length; label++) {
			double scalingFactor = (double) classCounts[label] / (double) numDocs;
			fmeasure += scalingFactor * DoubleArrays.max(fmeasureClassCluster[label]);
		}
		return fmeasure;
	}
	
	/**
	 * This function calculates the adjusted rand index of the clustering provided by clusterer. This is a measure of
	 * how well the location of each pair of documents matches the gold standard classification. For example, if two
	 * documents were in the same class in the gold standard, putting them in the same cluster counts for you and
	 * putting them in separate clusters counts against you. Likewise, if two documents were in separate classes in the
	 * gold standard, then putting them in the same cluster counts against you and putting them in separate clusters
	 * counts for you.
	 * 
	 * This implementation matches that described in "Details of the Adjusted Rand index and Clustering algorithms"
	 * Supplement to the paper "An empirical study on Principal Component Analysis for clustering gene expression data",
	 * by Ka Yee Yeung and Walter L. Ruzzo, May 3, 2001
	 * 
	 * See http://en.wikipedia.org/wiki/Rand_index#Adjusted_Rand_index (19 May 2011).
	 */
	public static double calculateAdjustedRandIndexInLogSpace(Clustering clustering) {
		Preconditions.checkNotNull(clustering);
		int[][] classClusterCounts = classClusterCounts(clustering);
		int[] classCounts = classCounts(classClusterCounts);
		int[] clusterCounts = clusterCounts(classClusterCounts);
		return calculateAdjustedRandIndex(classClusterCounts, classCounts, clusterCounts);
	}

	/** @throws IllegalArgumentException if the rows do not have exactly the same number of columns **/
	public static double calculateAdjustedRandIndex(int[][] classClusterCounts, int[] classCounts, int[] clusterCounts)
	{
		Preconditions.checkNotNull(classClusterCounts);

		if (classClusterCounts.length == 0 || classClusterCounts[0].length == 0) {
			return Double.NaN;
		}
		
		double sum_n_ij_choose_2 = 0.0;

		// Calculate sum_ij (n_ij choose 2)
		for(int i = 0; i < classClusterCounts.length; i++) {
			for(int j = 0; j < classClusterCounts[i].length; j++) {
				int n_ij = classClusterCounts[i][j];
				if(n_ij >= 2) {
					sum_n_ij_choose_2 += ArithmeticUtils.binomialCoefficientDouble(n_ij, 2);
			    } 
            }
		}
		
		double N_choose_2 = ArithmeticUtils.binomialCoefficientDouble((int) IntArrays.sum(classCounts), 2);
		
		// Calculate sum_i (a_i choose 2)
		double sum_a_i_choose_2 = 0.0;
		for(int i = 0; i < classCounts.length; i++) {
			if (classCounts[i] >= 2) {
				sum_a_i_choose_2 += ArithmeticUtils.binomialCoefficientDouble(classCounts[i], 2);
			}
		}
		
		// Calculate sum_j (b_j choose 2)
		double sum_b_j_choose_2 = 0.0;
		for(int j = 0; j < clusterCounts.length; j++) {
			if(clusterCounts[j] >= 2) {
				sum_b_j_choose_2 += ArithmeticUtils.binomialCoefficientDouble(clusterCounts[j], 2);
			}
		}
		
		double expectedIndex = sum_a_i_choose_2 * sum_b_j_choose_2 / N_choose_2;
		double maxIndex = (sum_a_i_choose_2 + sum_b_j_choose_2) / 2.0;
		
		return (sum_n_ij_choose_2 - expectedIndex) / (maxIndex - expectedIndex);
	}

	public static ConfusionMatrix buildConfusionMatrix(Clustering clustering) {
		return buildConfusionMatrix(classClusterCounts(clustering), clustering.getData().getLabelIndex());
	}

	public static ConfusionMatrix buildConfusionMatrix(int[][] classClusterCounts, Indexer<String> labelIndex) {
    	Preconditions.checkNotNull(classClusterCounts);
    	
    	if (classClusterCounts.length == 0) {
    		return new ConfusionMatrix(0, 0, labelIndex);
    	}
    	
    	ConfusionMatrix confusionMatrix = new ConfusionMatrix(classClusterCounts.length,
    			classClusterCounts[0].length, labelIndex);
    	for (int i = 0; i < classClusterCounts.length; i++) {
    		for (int j = 0; j < classClusterCounts[i].length; j++) {
    			confusionMatrix.addToEntry(i, j, classClusterCounts[i][j]);
    		}
    	}
    	return confusionMatrix;
	}
}
