package CorpusConverter;

import TrainingData.TrainingInstanceCollection;
import TrainingData.TrainingInstance;
import java.util.StringTokenizer;
import java.util.NoSuchElementException; 
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Iterator;
import java.util.Map;
import java.util.TreeMap;
import java.util.Vector;
import java.lang.Integer;

/**
 * @author Robert Pethes
 * 
 * Class for converting a corpus into a TrainingInstanceCollection. The corpus needs to be in a tweaked Penn Treebank format: words and tags are
 * separated by a whitespace character, and each sentence starts in a new line.
 *
 */
public class TweakedPennTreebankConverter extends CorpusConverter {
	
	private String _missingTag;
	private Map<NGram,Integer> _ngramIndexMap = new TreeMap<NGram,Integer>();
	private Map<String,Integer> _wordIndexMap = new TreeMap<String,Integer>();
	private int _nextWordIndex = 0;
	private int _nextNGramIndex = 0;
	private int _n;
	
	
	
	/**
	 * Initializes the converter.
	 * 
	 * @param corpus the corpus to convert.
	 * @param missingTag the tag used for missing tags.
	 * @param n the N parameter of the NGram model to use.
	 */
	public TweakedPennTreebankConverter(TextCorpus corpus, String missingTag, int n) {
		super(corpus);
		_n = n;
		_missingTag = missingTag;
		_wordIndexMap.put("." , new Integer(0));
		_wordIndexMap.put(missingTag , new Integer(1));
		_nextNGramIndex = 1;
		_nextWordIndex = 2;
	}

	@Override
	public TrainingInstanceCollection convert() throws IOException, NoSuchElementException {
		
		String line=null;
		
		Vector<TrainingInstance> instanceArray = new Vector<TrainingInstance>();
		do {
			
			line = _corpus.getNextSentence();
			
			if (line==null)
				break;
			
			if (line.isEmpty())
				continue;
			
			StringTokenizer tokenizer = new StringTokenizer(line); 
			
			int wordNr = tokenizer.countTokens() / 2;
			
			if (wordNr<1)
				continue;
			
			ArrayList<String> wordArray = new ArrayList<String>(wordNr);
			ArrayList<String> tagArray = new ArrayList<String>(wordNr);
			
			String word = null;
			String tag = null;
			
			boolean parseError = false;
			
			while (tokenizer.hasMoreTokens() && ! parseError ){
				
				try {
					word = tokenizer.nextToken();
					tag = tokenizer.nextToken();
					
					if (tag.equals("."))
						continue;
					
					wordArray.add(word);
					tagArray.add(tag);
				} catch (NoSuchElementException e){
					System.out.println("Parse error at:");
					System.out.println(line);
					
					parseError = true;
				}
			}
			
			if (parseError)
				continue;
			
			wordArray.add(".");
			tagArray.add(".");
			
			TrainingInstance instance = createTrainingInstance(wordArray,tagArray);
			
			if (instance!=null){
				instanceArray.add(instance);
			}
			
		} while (true);
		
		return new TrainingInstanceCollection(_nextNGramIndex,_nextWordIndex,instanceArray);
	}
	
	/**
	 * Creates a training instance from a sentence.
	 * 
	 * @param wordArray the words in the sentence.
	 * @param tagArray the respective tags of the words in the sentence.
	 * @return the training instance corresponding to the sentence.
	 */
	private TrainingInstance createTrainingInstance( ArrayList<String> wordArray, ArrayList<String> tagArray) {
		
		int size = wordArray.size();
		
		if (size==0)
			return null;
		
		// wordArray.size() and tagArray.size() must be equal 
		if (tagArray.size()!=size)
			return null;
		
		int[] states = new int[size];
		int[] emissions = new int[size];
		
		ArrayList<String> tags = new ArrayList<String>(_n); 
		
		for (int i = 0; i < _n; i++){
			tags.add(_missingTag);
		}
		
		NGram shiftingNGram = new NGram(tags);
		
		Iterator<String> iWords = wordArray.iterator();
		Iterator<String> iTags  = tagArray.iterator();
		
		int cnt = 0;
		
		while(iWords.hasNext() && iTags.hasNext()){
			
			String currentWord = iWords.next();
			String currentTag = iTags.next();
			
			shiftingNGram.push_back(currentTag);
			
			Integer ngramIndex = _ngramIndexMap.get(shiftingNGram);
			
			if (ngramIndex==null){
				ngramIndex = new Integer(_nextNGramIndex);
				_ngramIndexMap.put(shiftingNGram.clone(), ngramIndex);
				++_nextNGramIndex;
			}
			
			if (currentTag.equals(".")) {
				states[cnt] = 0;
				if (!_ngramIndexMap.containsValue(0))
					_ngramIndexMap.put(shiftingNGram.clone(), 0);
			}
			else
				states[cnt] = ngramIndex.intValue();
			
			Integer wordIndex = _wordIndexMap.get(currentWord);
			
			if (wordIndex==null){
				wordIndex = new Integer(_nextWordIndex);
				_wordIndexMap.put(currentWord, wordIndex);
				++_nextWordIndex;
			}
			
			emissions[cnt] = wordIndex.intValue();
			++cnt;
		}
		
		return new TrainingInstance(emissions,states);
	}	
}
