/**
 * 
 */
package com.gragra.contextClusterer;
import java.io.BufferedWriter;
import java.io.IOException;
import java.util.Map;
import it.unimi.dsi.fastutil.doubles.DoubleArrayList;
import it.unimi.dsi.fastutil.ints.Int2DoubleOpenHashMap;
import it.unimi.dsi.fastutil.ints.Int2IntOpenHashMap;
import it.unimi.dsi.fastutil.ints.Int2ObjectOpenHashMap;
import it.unimi.dsi.fastutil.ints.IntCollection;
import it.unimi.dsi.fastutil.ints.IntHeapPriorityQueue;
import it.unimi.dsi.fastutil.ints.IntIterator;
import it.unimi.dsi.fastutil.ints.IntPriorityQueue;
import it.unimi.dsi.fastutil.objects.ObjectArrayList;
import com.google.common.collect.HashMultimap;
import com.google.common.collect.Multimap;
import com.gragra.clustering.ClusteringAlgorithm;
import com.gragra.data.Corpus;
import com.gragra.data.MixingCorpus;
import com.gragra.data.MixingCorpusBaseImplementation;
import com.gragra.frameStatistics.LeftAndRightStatistics;
import com.gragra.sampling.ThreadedUniformAccess;
import com.gragra.sampling.sequentialStructure.ExtensionInformationHolder;
import com.gragra.sampling.sequentialStructure.ExtensionLevels;
import com.gragra.sampling.sequentialStructure.WordAndTagHolder;
import com.gragra.sampling.vector.VectorStructure;
import com.gragra.util.StringToInt;
/**
 * This class is used to cluster frames into classes depending on the words they co-occure with.
 * @author Christoph Teichmann
 * created Mar 24, 2013 8:04:58 PM
 * @version 0.1
 */
public class LeftRightClustering
{
	/**
	 * 
	 */
	private final boolean useLabels;
	/**
	 * contains the mappings from frames to cluster numbers
	 */
	private final Int2IntOpenHashMap assignments = new Int2IntOpenHashMap();
	/**
	 * contains the data that is used to generate the vectors that will be used for for clustering
	 */
	private final Corpus<WordAndTagHolder> corp; 
	/**
	 * creates a new instance
	 * @param ca the vector clustering algorithm that will be used to cluster the data
	 * @param c the corpus from which the frame information is generated
	 * @param frameSize the size of the frames used, must be 2*x+1 for x some Integer >= 1
	 * @param limit the minimum absolute frequency that a frame must have in order to be considered for clustering
	 */
	public LeftRightClustering(ClusteringAlgorithm ca, Corpus<WordAndTagHolder> c, int limit,int look,
																								boolean useLabels, int threads)
	{
		this.ca = ca;
		this.sg = new LeftAndRightStatistics(c,look);
		this.limit = limit;
		this.corp = c;
		this.useLabels = useLabels;
		this.threads = threads;
	}
	/**
	 * 
	 */
	private final int threads;
	/**
	 * this is the minimum number of occurrences a frame must have in order for it to be clustered
	 */
	private final int limit;
	/**
	 * this contains the number that is used to indicate the vector normalization unit that is used 
	 * for the words occurring within the frames, note that the number of the ExtensionLevels is used
	 * as an offset, so they are ignored, in case they are to be used as additional features
	 */
	private final static int wordUnit = ExtensionLevels.values().length;
	/**
	 * calling this method leads to the clustering of the frames with the given clustering algorithm,
	 * afterwards the mappings can be used to assign context tags in a corpus
	 * @param numberOfClusters the intended number of clusters, the underlying clustering algorithms
	 * may change this number in order to match some internal criterion
	 * @throws InterruptedException 
	 * @throws IOException 
	 */
	public void cluster() throws IOException, InterruptedException
	{
		IntCollection ic = sg.getAllWords();
		MixingCorpus<VectorStructure> corpus = new MixingCorpusBaseImplementation<VectorStructure>();
		IntIterator ii = ic.iterator();
		while(ii.hasNext())
		{
			int word = ii.nextInt();
			addFrame(word,ic,corpus);
		}
		ca.cluster(corpus,new ThreadedUniformAccess(1));
		for(int i=0;i<corpus.size();++i)
		{
			VectorStructure vs = corpus.getEntry(i);
			this.assignments.put(vs.getName(), ca.getBest(vs,i));
		}
	}
	/**
	 * used to hold all the values for the vector that is currently in construction
	 */
	private final DoubleArrayList valuesList = new DoubleArrayList();
	/**
	 * holds the list of unit-element codes for the vector that is currently in construction
	 */
	private final ObjectArrayList<int[]> codeList = new ObjectArrayList<int[]>();
	/**
	 * gets the statistics for the frame and creates a vector structure from them, which is then added
	 * to the corpus of VectorStructures
	 * @param frame
	 * @param ic
	 * @param corpus 
	 * @param clusteringNumber 
	 */
	private void addFrame(int word, IntCollection ic, Corpus<VectorStructure> corpus)
	{
		valuesList.clear();
		codeList.clear();
		int look = this.sg.getPositions();
		makeLeftFrames(word, look);
		makeRightFrames(word, look);
		makeFeaturesCounts(word);
		this.makeAdditional();
		corpus.addData(new VectorStructure(word, valuesList.toArray(new double[valuesList.size()]), codeList.toArray(new int[codeList.size()][]),this.threads));
	}
	/**
	 * 
	 * @param word
	 */
	private void makeFeaturesCounts(int word)
	{
		Int2ObjectOpenHashMap<Int2DoubleOpenHashMap> featCounts = this.sg.getFeatureCounts(word);
		if(featCounts == null)
		{return;}
		IntIterator iit = featCounts.keySet().iterator();
		while(iit.hasNext())
		{
			int unit = iit.nextInt();
			boolean useCount = ExtensionLevels.values()[unit] == ExtensionLevels.CaseTypeLevel;
			Int2DoubleOpenHashMap elements = featCounts.get(unit);
			IntIterator el = elements.keySet().iterator();
			while(el.hasNext())
			{
				int element = el.nextInt();
				this.codeList.add(new int[] {unit,element});
				this.valuesList.add(useCount ? elements.get(element) : 1.0);
			}
		}
	}
	/**
	 * 
	 */
	protected void makeAdditional()
	{}
	/**
	 * @param word
	 * @param look
	 */
	protected void makeLeftFrames(int word, int look)
	{
		for(int i=0;i<look;++i)
		{
			IntCollection coocs;
			if(!this.useLabels)
			{coocs = this.sg.getLeftForWord(word,i);}
			else
			{coocs = this.sg.getLeftTagForWord(word,i);}
			if(coocs != null)
			{
				IntIterator iit = coocs.iterator();
				while (iit.hasNext())
				{
					int fram = iit.next();
					int rank;
					if(!this.useLabels)
					{rank = this.sg.getRank(fram);}
					else
					{rank = this.sg.getTagRank(fram);}
					if(rank >= this.limit && fram > 0)
					{continue;}
					double val;
					if(!this.useLabels)
					{val = this.sg.getLeftCoCount(word,fram,i);}
					else
					{val = this.sg.getLeftCoTagCount(word,fram,i);}
					codeList.add(new int[] {wordUnit+i,fram});
					valuesList.add(val);
				}
			}
		}
	}
	/**
	 * @param word
	 * @param look
	 */
	protected void makeRightFrames(int word, int look)
	{
		for(int i=0;i<look;++i)
		{
			IntCollection coocs;
			if(!this.useLabels)
			{coocs = this.sg.getRightForWord(word,i);}
			else
			{coocs = this.sg.getRightTagForWord(word,i);}
			if(coocs != null)
			{
				IntIterator iit = coocs.iterator();
				while (iit.hasNext())
				{
					int fram = iit.next();
					int rank;
					if(!this.useLabels)
					{rank = this.sg.getRank(fram);}
					else
					{rank = this.sg.getTagRank(fram);}
					if(rank > this.limit && fram > 0)
					{continue;}
					double val;
					if(!this.useLabels)
					{val = this.sg.getRightCoCount(word,fram,i);}
					else
					{val = this.sg.getRightCoTagCount(word,fram,i);}
					codeList.add(new int[] {wordUnit+i+look,fram});
					valuesList.add(val);
				}
			}
		}
	}
	/**
	 * the algorithm used for clustering
	 */
	private final ClusteringAlgorithm ca;
	/**
	 * the statistics generator used to generate the properties of the frames that are being used
	 */
	private final LeftAndRightStatistics sg;
	/**
	 * once a clustering has been generated, this method can be used to annotate the given corpus with
	 * tags in the ContextTagLevel of the ExtensionLevels enumberable. note that this overwrites all
	 * previous information for the level. if a frames does not have a cluster assigned to it, then no
	 * annotation will be added
	 *
	 * @param to
	 */
	public void addContextPOSLayer(Corpus<WordAndTagHolder> to)
	{
		for(int i=0;i<to.size();++i)
		{
			WordAndTagHolder wth = to.getEntry(i);
			ExtensionInformationHolder eih = wth.getExtensionInformation();
			for(int j=0;j<wth.getLength();++j)
			{
				if(this.assignments.containsKey(wth.getWord(j)))
				{eih.addInformation(j, ExtensionLevels.ContextTagLevel, this.assignments.get(wth.getWord(j)));}
			}
		}
	}
	/**
	 * annotates the corpus that was used for inference as in the addContexPOSLayer method
	 */
	public void addContextPOSLayerToSelf()
	{this.addContextPOSLayer(corp);}
	/**
	 * writes of the cluster assignments that have been generated
	 * @param to
	 * @throws IOException
	 */
	public void writeOutClusters(BufferedWriter to) throws IOException
	{
		Multimap<Integer, Integer> mm = HashMultimap.create();
		for(Map.Entry<Integer, Integer> ent : this.assignments.entrySet())
		{mm.put(ent.getValue(), ent.getKey());}
		IntPriorityQueue ipq = new IntHeapPriorityQueue();
		for(Integer i : mm.keySet())
		{ipq.enqueue(i);}
		boolean first = true;
		while(!ipq.isEmpty())
		{
			if(first)
			{first = false;}
			else
			{to.newLine();}
			Integer i = ipq.dequeue();
			to.write(Integer.toString(i));
			to.write(" :");
			for(Integer k : mm.get(i))
			{
				to.write(" ");
				to.write(StringToInt.reverse(k));
			}
		}
		to.flush();
	}
}