package weeny.util;

import java.util.Collection;
import java.util.Collections;
import java.util.Comparator;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

//conditionalEntropy
//mutualEntropy
//entropy
public class Entropy {
    public static final double LOG2 = Math.log(2);
    public static double log2(double x) {
        return Math.log(x)/LOG2;
    }

    /**
     * How much exclusive information info1 has, given another info2? Please 
     * notice that the the order of the parameters is important. Unlike
     * {@link #mutualEntropy(Map, Map, int, int)}, in which you can swap the
     * parameters, but still get the same result.
     * 
     * This value is non-negative. It equals zero if info1 and info2 are the
     * same. Its maximum value is the original entorpy of info1, when info1 and
     * info2 are independent (i.e., mutual entropy is 0).
     */
	public static double conditionalEntropy(
			Map<? extends Object, ? extends Number> info1,
			double entropy1,
			Map<? extends Object, ? extends Number> info2,
			int info1Bin, int info2Bin)
	{
		double mutual = mutualEntropy(info1, info2, info1Bin, info2Bin);
		return entropy1-mutual;
	}
	/**
	 * We get two pieces of information, each containing a object-to-number map.
	 * Mutual information tells you how much info shared between these two. The
	 * resulting value scales from 0 to 1. If info1 and info2 are identical, the
	 * mutual info is max = 1. On the other hand, if info1 and info2 are totally
	 * not related (i.e., independent), the resulting mutual info is min = 0.
	 * 
	 * Generally, the bigger the mutual value, the more similar (related) these
	 * two pieces of information.
	 */
	public static double mutualEntropy(
			Map<? extends Object, ? extends Number> info1, 
			Map<? extends Object, ? extends Number> info2, 
			int info1Bin, int info2Bin)
	{
		double info1Max = Collections.max(info1.values(), COMP).doubleValue();
		double info1Min = Collections.min(info1.values(), COMP).doubleValue();
		double info2Max = Collections.max(info2.values(), COMP).doubleValue();
		double info2Min = Collections.min(info2.values(), COMP).doubleValue();
		double info1Delta = (info1Max-info1Min)/info1Bin;
		double info2Delta = (info2Max-info2Min)/info2Bin;
		int[][] binCounts = new int[info1Bin][info2Bin];
		Set totalWordSet = new HashSet(info1.keySet());
		totalWordSet.addAll(info2.keySet());
		for (Object word : totalWordSet) {
			double value1 = info1.containsKey(word)? 
					info1.get(word).doubleValue() : info1Min;
			double value2 = info2.containsKey(word)? 
					info2.get(word).doubleValue() : info2Min;
			int bin1Idx = (int) ((value1-info1Min)/info1Delta);
			int bin2Idx = (int) ((value2-info2Min)/info2Delta);
			if (bin1Idx >= info1Bin) {
				bin1Idx--;
			}
			if (bin2Idx >= info2Bin) {
				bin2Idx--;
			}
			binCounts[bin1Idx][bin2Idx] ++;
		}
		int[] bin1Counts = new int[info1Bin];
		int[] bin2Counts = new int[info2Bin];
		for(int i = 0; i<info1Bin; i++){
			for(int j = 0; j<info2Bin; j++){
				bin1Counts[i] += binCounts[i][j];
				bin2Counts[j] += binCounts[i][j];
			}
		}
		double totalWeight = totalWordSet.size()*1.0;
		double result = 0;
		for(int i = 0; i<info1Bin; i++){
			for(int j = 0; j<info2Bin; j++){
				double pij = binCounts[i][j]/totalWeight;
				double pi  = bin1Counts[i]/totalWeight;
				double pj  = bin2Counts[j]/totalWeight;
				if (pij*pi*pj == 0) {
					continue;
				}
				result += pij * log2(pij/pi/pj);
			}
		}
		return result;
	}
	/**
	 * We get a collection of Numbers (either Double or Integer), then we build
	 * a histogram for all of the Numbers. For bucket i, we calculate its
	 * proportion to the size of the collection, saying P_i. Then the resulting
	 * entropy should be: 0-\sum_i {P_i * \log(P_i)}, which is quite a standard
	 * entropy calculation.
	 * 
	 * Generally, the more evenly the numbers distribute, the bigger the final
	 * value. On the other hand, if all numbers are the same, the resulting
	 * value will be minimum = 0.
	 * 
	 * From the information entropy point of view, if something is more messy,
	 * it contains more information (entropy is high). On the other hand, if
	 * a thing is highly organized (e.g., all numbers are the same), it contains
	 * less information (entropy is low).
	 */
	public static double entropy(Collection<? extends Number> values, int bins){
		double max = Collections.max(values, COMP).doubleValue();
		double min = Collections.min(values, COMP).doubleValue();
		double gapSize = (max - min)/bins;
		int[] buckets = new int[bins];
		for(Number d : values){
			int binIdx = (int) ((d.doubleValue()-min)/gapSize);
			if (binIdx >= bins) {
				binIdx --;
			}
			buckets[binIdx] ++;
		}
		double result = 0;
		double totalWeight = values.size()*1.0;
		for(int i = 0; i<bins; i++){
			double pi = buckets[i]/totalWeight;
			if (pi<=0) {
				continue;
			}
			result -= pi * log2(pi);
		}
		return result;
	}
	
	/**
	 * Comparing Numbers. It is only used in this Entropy class. And here, we
	 * assume that Number can only be Double or Integer~
	 */
	private static Comparator<Number> COMP = new Comparator<Number>() {
		public int compare(Number o1, Number o2) {
			if (!(o1 instanceof Double) && !(o1 instanceof Integer)){
				error(o1);
			}
			if (!(o2 instanceof Double) && !(o2 instanceof Integer)){
				error(o2);
			}
			return Double.compare(o1.doubleValue(), o2.doubleValue());
		}
		private void error(Number num){
			throw new IllegalArgumentException(num + ": not Double or Integer");
		}
	};
	//use histogram distribtion to calc the probability and entrop
}