package edu.cornell.cs4740.qa.parsing;

import java.util.ArrayList;
import java.util.List;

import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.LowerCaseTokenizerFactory;
import com.aliasi.tokenizer.PorterStemmerTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

import edu.cornell.cs4740.qa.WordContainer;

public class TokenFactory {
	
	static final TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
	static final SentenceModel SENTENCE_MODEL = new IndoEuropeanSentenceModel();
	
	public List<String> getSentences(String s){
		List<String> sentences = new ArrayList<String>();
		
		List<String> tokenList = new ArrayList<String>();
		List<String> whiteList = new ArrayList<String>();
		Tokenizer tokenizer = TOKENIZER_FACTORY.tokenizer(s.toCharArray(), 0,s.length());
		tokenizer.tokenize(tokenList,whiteList);
		String[] tokens = new String[tokenList.size()];
		String[] whites = new String[whiteList.size()];
		tokenList.toArray(tokens);
		whiteList.toArray(whites);
		int[] sentenceBoundaries = SENTENCE_MODEL.boundaryIndices(tokens,whites);
		int sentStartTok = 0;
		int sentEndTok = 0;
		for (int i = 0; i < sentenceBoundaries.length; ++i) {
			String sentence = "";
		    sentEndTok = sentenceBoundaries[i];
		    //System.out.println("SENTENCE "+(i+1)+": ");
		    for (int j=sentStartTok; j <= sentEndTok; j++) {
		        sentence += (tokens[j]+whites[j+1]);
		    }
		    //System.out.println(sentence);
		    //System.out.println();
		    sentences.add(sentence);
		    sentStartTok = sentEndTok+1;
		}
		return sentences;
	}
	
	public List<WordContainer> sentenceToTokens(String sentence) {
		TokenizerFactory tokenizerFactory = TOKENIZER_FACTORY;
		tokenizerFactory = new LowerCaseTokenizerFactory(tokenizerFactory);
		
		TokenizerFactory stemmedTokenizerFactory = new PorterStemmerTokenizerFactory(tokenizerFactory);
		
		Tokenizer lowerCaseTokenizer = tokenizerFactory.tokenizer(sentence.toCharArray(), 0, sentence.length());
		Tokenizer stemmerTokenizer = stemmedTokenizerFactory.tokenizer(sentence.toCharArray(), 0, sentence.length());
		
		List<WordContainer> tokenList = new ArrayList<WordContainer>();
		String[] lowerCaseTokens = lowerCaseTokenizer.tokenize();
		String[] stemTokens = stemmerTokenizer.tokenize();
		
		assert(lowerCaseTokens.length == stemTokens.length);
		
		for(int x = 0; x < lowerCaseTokens.length; x++) {
			WordContainer wc = new WordContainer(lowerCaseTokens[x]);
			wc.setStemmedWord(stemTokens[x]);
			
			tokenList.add(wc);
		}
		
		return tokenList;
	}
}
