/**
 * 
 */
package com.gragra.nounIdentification;
import java.io.BufferedWriter;
import java.io.IOException;
import it.unimi.dsi.fastutil.ints.IntIterator;
import it.unimi.dsi.fastutil.ints.IntList;
import it.unimi.dsi.fastutil.ints.IntOpenHashSet;
import it.unimi.dsi.fastutil.ints.IntSet;
import com.gragra.data.Corpus;
import com.gragra.frameStatistics.StatisticsGenerator;
import com.gragra.sampling.sequentialStructure.ExtensionInformationHolder;
import com.gragra.sampling.sequentialStructure.ExtensionInformationValue;
import com.gragra.sampling.sequentialStructure.ExtensionLevels;
import com.gragra.sampling.sequentialStructure.WordAndTagHolder;
import com.gragra.sampling.sequentialStructure.WordType;
import com.gragra.util.StringToInt;
/**
 * this class is used the implement general functionality that can be used to identify nouns and noun-frames
 * by frame count statistics
 * @author Christoph Teichmann
 * created Mar 24, 2013 4:44:49 PM
 * @version 0.1
 */
public abstract class BaseNounIdentifier
{
	/**
	 * the set of frames that are currently considered noun candidates
	 */
	protected final IntOpenHashSet frames = new IntOpenHashSet();
	/**
	 * the set of codes that are currently considered nouns
	 */
	protected final IntOpenHashSet nouns = new IntOpenHashSet();
	/**
	 * the statistics used; this is also the origin of the frame numbers
	 */
	private final StatisticsGenerator sg;
	/**
	 * constructs a new instance from the given data and with the given restrictions, induction of noun
	 * annotation tools is NOT started
	 * @param data
	 */
	public BaseNounIdentifier(Corpus<WordAndTagHolder> data, int frameSize, int preMarkers, int postMarkers)
	{
		this.data = data;
		this.sg = createStatistics(data, frameSize, preMarkers, postMarkers);
	}
	/**
	 * used to generate the statistics system that will be used as the base for inference
	 * @param data
	 * @param frameSize
	 * @param preMarkers
	 * @param postMarkers
	 * @return
	 */
	protected StatisticsGenerator createStatistics(
			Corpus<WordAndTagHolder> data, int frameSize, int preMarkers,
			int postMarkers)
	{return new StatisticsGenerator(data, frameSize, preMarkers, postMarkers);}
	/**
	 * this method can be use to start the induction of the annotation tools
	 */
	public void identify()
	{
		int iterations = 0;
		while(!this.hasConverged(iterations))
		{this.iterate(iterations++);}
		for(Integer p : nouns)
		{System.out.println(StringToInt.reverse(p));}
	}
	/**
	 * implements a single re-estimation step of the noun and frame classes, with i being the number of the
	 * current iteration
	 * @param i
	 */
	private void iterate(int i)
	{
		this.initializeNouns();
		IntList il = this.sortFramesByScore(this.nouns,i);
		this.frames.clear();
		this.frames.addAll(this.selectSublist(il,this.maxFrames(i)));
		il = this.sortWordsByScore(this.frames,i);
		this.nouns.clear();
		this.nouns.addAll(this.selectSublist(il, this.maxNouns(i)));
	}
	/**
	 * returns the maximum number of nouns induced in the iteration number i
	 * @param i
	 * @return
	 */
	protected abstract int maxNouns(int i);
	/**
	 * returns a list of all the words sorted by some score for the given iteration
	 * @param frames2 the frames that where identified in the current iteration
	 * @return
	 */
	protected abstract IntList sortWordsByScore(IntSet frames2,int iteration);
	/**
	 * get the header of the given list up to the number maxframes or the length of the list
	 * @param il
	 * @param maxFrames
	 * @return
	 */
	private IntList selectSublist(IntList il, int maxFrames)
	{return il.subList(0, Math.min(maxFrames, il.size()));}
	/**
	 * the maximum number of nouns that are used in the given iteration
	 * @param i
	 * @return
	 */
	protected abstract int maxFrames(int iteration);
	/**
	 * sorts the frames in the returned list according to some score for the current iteration
	 * @param iteration the iteration number
	 * @param nouns2 the set of nouns found in the last iteration
	 * @return
	 */
	protected abstract IntList sortFramesByScore(IntSet restriction, int iteration);
	/**
	 * used to fill the nouns for the first iteration
	 */
	private void initializeNouns()
	{
		if(this.nouns.isEmpty())
		{this.nouns.addAll(getSg().getAllWords());}
	}
	/**
	 * returns true if the process has converged
	 * @param iterations
	 * @return
	 */
	protected abstract boolean hasConverged(int iterations);
	/**
	 * the data that is used
	 */
	private final Corpus<WordAndTagHolder> data;
	/**
	 * adds WordType.Noun annotation to the given Corpus; based on the discovered frames
	 * @param data
	 */
	public void annotateCorpusFrames(Corpus<WordAndTagHolder> data)
	{
		for(int i=0;i<data.size();++i)
		{annotateEntry(data,i);}
	}
	/**
	 * this is used to annotate the given entry in the corpus with WordType.Noun annotation based on the
	 * discovered frames
	 * @param data2
	 * @param i
	 */
	protected void annotateEntry(Corpus<WordAndTagHolder> data2, int i)
	{
		WordAndTagHolder wth = data2.getEntry(i);
		this.getSg().fillMappedSequence(wth);
		ExtensionInformationHolder eih = wth.getExtensionInformation();
		for(int j=0;j<wth.getLength();++j)
		{
			if(this.getSg().hasFrameFor(j,wth))
			{
				int num = this.getSg().fillFrameExternal(j);
				if(this.frames.contains(num))
				{eih.addInformation(j, ExtensionLevels.WordTypeLevel, this.getAssociatedType());}
			}
		}
	}
	/**
	 * returns double array that first contains precision, then recall, then f-score, given the 
	 * data as a gold standard, note that the evaluation is based on the number of proposed nouns and
	 * nouns present in the corpus
	 *
	 * @param data
	 * @param microAverage set to true to activate micro-averaging
	 */
	public double[] evaluate(Corpus<WordAndTagHolder> data, boolean microAverage)
	{
		double[] correctPresentProposed = new double[] {0.0,0.0,0.0};
		for(int i=0;i<data.size();++i)
		{processEntry(data, microAverage, correctPresentProposed, i);}
		double precision = calcPrecision(data, microAverage, correctPresentProposed);
		double recall = calcRecall(data, microAverage, correctPresentProposed);
		return new double[] {precision,recall,2*(precision*recall)/(precision+recall)};
	}
	/**
	 * used to calculate recall dependent on the microAverage value and the data in correctPresentProposed
	 * @param data2
	 * @param microAverage
	 * @param correctPresentProposed
	 * @return
	 */
	private double calcRecall(Corpus<WordAndTagHolder> data2, boolean microAverage,
			double[] correctPresentProposed)
	{
		if(microAverage)
		{
			if(correctPresentProposed[1] > 0.0)
			{return correctPresentProposed[0]/correctPresentProposed[1];}
			else
			{return 1.0;}
		}
		else
		{
			if(data.size() > 0)
			{return correctPresentProposed[1]/((double) data2.size());}
			else
			{return 1.0;}
		}
	}
	/**
	 * used to calculate precision dependent on the microAverage value and the data in correctPresentProposed
	 * @param data
	 * @param microAverage
	 * @param correctPresentProposed
	 */
	protected double calcPrecision(Corpus<WordAndTagHolder> data2, boolean microAverage,
			double[] correctPresentProposed)
	{
		if(microAverage)
		{
			if(correctPresentProposed[2] > 0.0)
			{return correctPresentProposed[0]/correctPresentProposed[2];}
			else
			{return 1.0;}
		}
		else
		{
			if(data.size() > 0)
			{return correctPresentProposed[0]/((double) data2.size());}
			else
			{return 1.0;}
		}
	}
	/**
	 * used to extend the given correctPresentProposed array with the statistics from the given entry,
	 * if microAverage is set to true, then values in their absolute counts will be added, otherwise the
	 * values will be averaged to the number of proposals/present instance accordingly
	 * @param data
	 * @param microAverage
	 * @param correctPresentProposed
	 * @param i the number of the structure to be processed
	 */
	protected void processEntry(Corpus<WordAndTagHolder> data, boolean microAverage,
			double[] correctPresentProposed, int i)
	{
		WordAndTagHolder wth = (WordAndTagHolder) data.getEntry(i);
		this.getSg().fillMappedSequence(wth);
		ExtensionInformationHolder eih = wth.getExtensionInformation();
		double localCorrect = 0.0;
		double localPresent = 0.0;
		double localProposed = 0.0;
		for(int j=0;j<wth.getLength();++j)
		{
			boolean isNoun = false;
			isNoun = decideTypePresent(wth, j);
			ExtensionInformationValue eiv = eih.getFiniteInformation(j, ExtensionLevels.WordTypeLevel);
			if(isNoun)
			{++localProposed;}
			if(eiv == this.getAssociatedType())
			{
				++localPresent;
				if(isNoun)
				{++localCorrect;}
			}
		}
		if(microAverage)
		{
			correctPresentProposed[0] += localCorrect;
			correctPresentProposed[1] += localPresent;
			correctPresentProposed[2] += localProposed;
		}
		else
		{
			correctPresentProposed[0] += localProposed > 0 ?  localCorrect / localProposed : 1.0;
			correctPresentProposed[1] += localPresent > 0 ? localCorrect / localPresent : 1.0;
		}
	}
	/**
	 * only used in evaluation
	 * can be used to decide if the requested type should be predicted in the given position of the given
	 * WordAndTagHolder, deriving types can overwrite this to change how evaluation is done
	 * @param wth
	 * @param j
	 * @return
	 */
	protected boolean decideTypePresent(WordAndTagHolder wth, int j)
	{
		boolean isNoun;
		if(this.getSg().hasFrameFor(j,wth))
		{
			int num = this.getSg().fillFrameExternal(j);
			isNoun = this.frames.contains(num);
		}
		else
		{isNoun = false;}
		return isNoun;
	}
	/**
	 * does the same as the standard evaluate method, but uses the annotation of the corpus the model was
	 * created from
	 * @param microAverage
	 * @return
	 */
	public double[] evaluateOnSelf(boolean microAverage)
	{return this.evaluate(data, microAverage);}
	/**
	 * used to write all the frames currently judged to be noun frames out to the given buffer with one frame
	 * per line and the format defined by the StatisticsGenerator class
	 * @throws IOException
	 */
	public void writeOutFrames(BufferedWriter to) throws IOException
	{
		IntIterator iit = this.frames.iterator();
		boolean first = true;
		while (iit.hasNext())
		{
			if(first)
			{first = false;}
			else
			{to.newLine();}
			int frame = iit.next();
			to.write(this.getSg().frameToString(frame));
		}
		to.flush();
	}
	/**
	 * writes out all the words currently judged to be nouns to the given buffer, with one word per line
	 * @param to
	 * @throws IOException
	 */
	public void writeOutWords(BufferedWriter to) throws IOException
	{
		IntIterator iit = this.nouns.iterator();
		boolean first = true;
		while (iit.hasNext())
		{
			if(first)
			{first = false;}
			else
			{to.newLine();}
			int noun = iit.next();
			to.write(StringToInt.reverse(noun));
		}
		to.flush();
	}
	/**
	 * returns the statistics generator that is being used
	 * @return the sg
	 */
	public StatisticsGenerator getSg()
	{return sg;}
	/**
	 * adds WordType.Noun annotation to every word that is currently judged to be a noun candidate in the
	 * given corpus
	 */
	public void annotateCorpusWords(Corpus<WordAndTagHolder> data)
	{
		for(int i=0;i<data.size();++i)
		{annotateEntryWord(data,i);}
	}
	/**
	 * annotates the given entry from the given corpus based on the currently induced words
	 * @param data2
	 * @param i
	 */
	protected void annotateEntryWord(Corpus<WordAndTagHolder> data2, int i)
	{
		WordAndTagHolder wth = data2.getEntry(i);
		ExtensionInformationHolder eih = wth.getExtensionInformation();
		for(int j=0;j<wth.getLength();++j)
		{
			int word = wth.getWord(j);
			if(this.nouns.contains(word))
			{eih.addInformation(j, ExtensionLevels.WordTypeLevel, getAssociatedType());}
		}
	}
	/**
	 * returns the type that is used to annotate in the respective methods, if this method is changed, then
	 * annotation can be changed to create a different type of annotation
	 * @return
	 */
	protected ExtensionInformationValue getAssociatedType()
	{return WordType.Noun;}
}