/**
 * 
 */
package edu.umd.clip.lm.model.training;

import java.util.Arrays;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Map;

import edu.berkeley.nlp.util.Pair;
import edu.umd.clip.lm.factors.FactorTuple;
import edu.umd.clip.lm.factors.FactorTupleDescription;
import edu.umd.clip.lm.model.Experiment;
import edu.umd.clip.lm.model.data.Context;
import edu.umd.clip.lm.questions.Question;
import edu.umd.clip.lm.util.Long2IntMap;
import edu.umd.clip.lm.util.Sampler;

public final class ContextVariable {
	final boolean isHidden;
	final byte offset;
	final byte factorIdx;
	final long mask;
	//double score;
	
	public ContextVariable(Question question) {
		this(question.isAboutHidden(), question.getTimeOffset(), question.getIndex());
	}
	
	public ContextVariable(boolean isHidden, byte offset, byte factorIdx) {
		this.isHidden = isHidden;
		this.offset = offset;
		this.factorIdx = factorIdx;
		
		FactorTupleDescription desc = Experiment.getInstance().getTupleDescription();
		if (isHidden) {
			this.mask = desc.getHiddenFactorsMask(); 
		} else {
			byte start = desc.getFactorBitStart()[factorIdx];
			byte length = desc.getFactorBitLength()[factorIdx];
			this.mask = ((1 << length) - 1 ) << start;
		}
	}

	public int getIntValue(long tupleBits) {
		if (isHidden) { 
			FactorTupleDescription desc = Experiment.getInstance().getTupleDescription();
			return desc.packHiddenFactorsToInt(getValue(tupleBits));
		} 
		return FactorTuple.getValue(tupleBits, factorIdx);
	}
	
	public int getIntValue(Context context) {
		long data[] = context.data;
		return getIntValue(data[data.length+offset]);
	}
	
	public long getValue(long tupleBits) {
		return tupleBits & mask;
	}
	
	public long getValue(Context context) {
		long data[] = context.data;
		return getValue(data[data.length+offset]);
	}
	
	@Override
	public int hashCode() {
		final int prime = 31;
		int result = 1;
		result = prime * result + factorIdx;
		result = prime * result + (isHidden ? 1231 : 1237);
		result = prime * result + offset;
		return result;
	}

	@Override
	public boolean equals(Object obj) {
		if (this == obj)
			return true;
		if (obj == null)
			return false;
		if (!(obj instanceof ContextVariable))
			return false;
		ContextVariable other = (ContextVariable) obj;
		if (factorIdx != other.factorIdx)
			return false;
		if (isHidden != other.isHidden)
			return false;
		if (offset != other.offset)
			return false;
		return true;
	}
	
	public String toString() {
		FactorTupleDescription desc = Experiment.getInstance().getTupleDescription();
		return (isHidden ? "hidden" : desc.getDescription(factorIdx).getId()) + "@" + Byte.toString(offset);
	}

	public boolean isHidden() {
		return isHidden;
	}

	public byte getOffset() {
		return offset;
	}
	
	/*
	public double computeSamplingScore(CountDistribution wordCounts, CountDistribution varCounts) {
		int NR_TRIES = 5;
		double scores[] = new double[NR_TRIES];
		
		Sampler varSampler = new Sampler(varCounts);
		Sampler wordSampler = new Sampler(wordCounts);
		
		for(int i=0; i<NR_TRIES; ++i) {
			// create the random sampling
			HashMap<Integer, Long2IntMap> eventsByWord = new HashMap<Integer, Long2IntMap>(varCounts.getCounts().size());
			
			long nrSamples = varCounts.getTotalCount();
			for(long sample = 0; sample<nrSamples; ++sample) {
				long wordSample = wordSampler.getSample();
				Integer varSample = (int) varSampler.getSample();
				
				Long2IntMap map = eventsByWord.get(varSample);
				if (map == null) {
					map = new Long2IntMap();
					eventsByWord.put(varSample, map);
				}
				map.addAndGet(wordSample, 1);
			}
			
			if (eventsByWord.size() == 1) {
				scores[i] = 0;
				continue;
			}

			// use the Exchange algorithm to find the best split
			Pair<HashSet<Integer>, HashSet<Integer>> split = ExchangeAlgo.getExchangeWordSplit(eventsByWord.keySet(), eventsByWord, null);
			Long2IntMap firstMap = new Long2IntMap();
			Long2IntMap secondMap = new Long2IntMap();
			for(Map.Entry<Integer, Long2IntMap> e : eventsByWord.entrySet()) {
				Integer var = e.getKey();
				Long2IntMap varMap = e.getValue();
				Long2IntMap theMap = split.getFirst().contains(var) ? firstMap : secondMap;
				theMap.addMap(varMap);
			}
			
			// compute the entropy reduction
			long firstMapTotalCount = firstMap.sumValues();
			long secondMapTotalCount = nrSamples - firstMapTotalCount;
			double entropy = wordCounts.getCounts().computeEntropy();
			double newEntropy = (firstMapTotalCount * firstMap.computeEntropy() +
								secondMapTotalCount * secondMap.computeEntropy()) / nrSamples;
			
			if (entropy >= newEntropy) {
				// although it must always be true, it actually isn't so due to fp error accumulation
				scores[i] = entropy - newEntropy;
			}
		}
		
		// get the median score
		Arrays.sort(scores);
		return scores[scores.length / 2];
	}
	*/
}