package edu.cornell.cs4740.wsd.parsing;

import java.util.ArrayList;
import java.util.List;

import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.LowerCaseTokenizerFactory;
import com.aliasi.tokenizer.PorterStemmerTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

import edu.cornell.cs4740.wsd.Bigram;
import edu.cornell.cs4740.wsd.Word;

public class TrainingData {
	Word word;
	String wordWithStem;
	List<Boolean> senseUsage;
	
	List<WordDistanceContainer> preceedingWords;
	List<WordDistanceContainer> succeedingWords;
	
	List<BigramDistanceContainer> preceedingBigrams;
	List<BigramDistanceContainer> succeedingBigrams;
	
	
	static final TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
	static final SentenceModel SENTENCE_MODEL = new IndoEuropeanSentenceModel();
	
	
	public TrainingData(String s, boolean keepStems) throws Exception {
		String[] firstSplit = s.split("@");
		String wordAndUsage = firstSplit[0];
		String preceedingString = firstSplit[1];
		String actualWord = firstSplit[2];
		String succeedingString = firstSplit[3];
		
		String[] wordAndUsageArr = wordAndUsage.split(" ");
		word = new Word(wordAndUsageArr[0]);
		senseUsage = new ArrayList<Boolean>();
		for(int x = 1; x < wordAndUsageArr.length; x++) {
			if(wordAndUsageArr[x].equals("0")) {
				senseUsage.add(Boolean.FALSE);
			} else if(wordAndUsageArr[x].equals("1")) {
				senseUsage.add(Boolean.TRUE);
			} else {
				throw new Exception(wordAndUsageArr[x] + " is not 0 or 1");
			}
		}
		
		wordWithStem = actualWord;
		
    	TokenizerFactory tokenizerFactory = new PorterStemmerTokenizerFactory(TOKENIZER_FACTORY);
    	TokenizerFactory tokenizerFactoryWithStems = TOKENIZER_FACTORY;
    	// makes all output lower case
    	tokenizerFactory = new LowerCaseTokenizerFactory(tokenizerFactory);
    	tokenizerFactoryWithStems = new LowerCaseTokenizerFactory(tokenizerFactoryWithStems);
    	
		////////////////////////////////
		//  deal with preceeding words
		////////////////////////////////
		
    	Tokenizer preceedingWordsTokenizer = tokenizerFactory.tokenizer(preceedingString.toCharArray(), 0, preceedingString.length());
    	Tokenizer preceedingWordsBigramTokenizer = tokenizerFactory.tokenizer(preceedingString.toCharArray(), 0, preceedingString.length());
    	Tokenizer preceedingWordsTokenizerWithStems = tokenizerFactoryWithStems.tokenizer(preceedingString.toCharArray(), 0, preceedingString.length());
    	String[] tokensWithStems = preceedingWordsTokenizerWithStems.tokenize();
    	
    	int preNumTokens = 0;
    	preceedingWords = new ArrayList<WordDistanceContainer>();
    	for(String token : preceedingWordsTokenizer) {
    		WordDistanceContainer wdc = new WordDistanceContainer(token, 0);
    		wdc.setStemmedWord(tokensWithStems[preNumTokens]);
    		preceedingWords.add(wdc);
    		preNumTokens++;
    	}
    	
    	// now add the distance information
    	for(int x = 0; x < preNumTokens; x++) {
    		preceedingWords.get(x).setDistance(preNumTokens - x);
    	}
    	
    	
    	String pastWord = "__S__";
    	String pastStemmedWord = "__S__";
    	preceedingBigrams = new ArrayList<BigramDistanceContainer>();
    	
    	int currentDistance = preNumTokens;
    	for(String token : preceedingWordsBigramTokenizer) {
    		Bigram b = new Bigram(pastWord, token);
    		BigramDistanceContainer bdc = new BigramDistanceContainer(b, currentDistance);
    		bdc.setStemmedWord1(pastStemmedWord);
    		String stemmedToken = tokensWithStems[tokensWithStems.length - currentDistance];
    		bdc.setStemmedWord2(stemmedToken);
    		preceedingBigrams.add(bdc);
    		currentDistance--;
    		pastWord = token;
    		pastStemmedWord = stemmedToken;
    		//System.out.println(preceedingBigrams.size());
    	}
    	BigramDistanceContainer bdcEnd = new BigramDistanceContainer(new Bigram(pastWord, actualWord), currentDistance);
    	bdcEnd.setStemmedWord1(pastStemmedWord);
    	bdcEnd.setStemmedWord2(wordWithStem);
    	preceedingBigrams.add(bdcEnd);
    	
		////////////////////////////////
		//  deal with succeeding words
		////////////////////////////////
    	
    	
    	Tokenizer succeedingWordsTokenizer = tokenizerFactory.tokenizer(succeedingString.toCharArray(), 0, succeedingString.length());
    	Tokenizer succeedingWordsBigramTokenizer = tokenizerFactory.tokenizer(succeedingString.toCharArray(), 0, succeedingString.length());
    	Tokenizer succeedingWordsTokenizerWithStems = tokenizerFactoryWithStems.tokenizer(succeedingString.toCharArray(), 0, succeedingString.length());
    	String[] succeedingTokensWithStems = succeedingWordsTokenizerWithStems.tokenize();
    	
    	int sucNumTokens = 0;
    	succeedingWords = new ArrayList<WordDistanceContainer>();
    	for(String token : succeedingWordsTokenizer) {
    		WordDistanceContainer wdc = new WordDistanceContainer(token, sucNumTokens + 1);
    		wdc.setStemmedWord(succeedingTokensWithStems[sucNumTokens]);
    		succeedingWords.add(wdc);
    		sucNumTokens++;
    	}
    	
    	// now add the distance information
    	//for(int x = 0; x < sucNumTokens; x++) {
    	//	succeedingWords.get(x).setDistance(x);
    	//}
    
    	succeedingBigrams = new ArrayList<BigramDistanceContainer>();
    	String sucPastWord=actualWord;
    	String sucPastStemmedWord = wordWithStem;
    	int sucCurrentDistance = 0;
    	for(String token : succeedingWordsBigramTokenizer) {
    		Bigram b = new Bigram(sucPastWord, token);
    		BigramDistanceContainer bdc = new BigramDistanceContainer(b, sucCurrentDistance);
    		bdc.setStemmedWord1(sucPastStemmedWord);
    		String stemmedToken = succeedingTokensWithStems[sucCurrentDistance];
    		bdc.setStemmedWord2(stemmedToken);
    		succeedingBigrams.add(bdc);
    		sucPastStemmedWord = stemmedToken;
    		sucCurrentDistance++;
    		sucPastWord = token;
    		
    	}
    	BigramDistanceContainer bdcEnd2 = new BigramDistanceContainer(new Bigram(sucPastWord, "__E__"), sucCurrentDistance);
    	bdcEnd2.setStemmedWord1(sucPastStemmedWord);
    	bdcEnd2.setStemmedWord2("__E__");
    	succeedingBigrams.add(bdcEnd2);
    	
    	
	}


	/* ie. begin */
	public Word getWord() {
		return word;
	}


	/* ie. beginning */
	public String getWordWithStem() {
		return wordWithStem;
	}


	public List<Boolean> getSenseUsage() {
		return senseUsage;
	}


	public List<WordDistanceContainer> getPreceedingWords() {
		return preceedingWords;
	}


	public List<WordDistanceContainer> getSucceedingWords() {
		return succeedingWords;
	}


	public List<BigramDistanceContainer> getPreceedingBigrams() {
		return preceedingBigrams;
	}


	public List<BigramDistanceContainer> getSucceedingBigrams() {
		return succeedingBigrams;
	}


	public void setSenseUsage(List<Boolean> senseUsage) {
		this.senseUsage = senseUsage;
	}
	
	// TODO: complete this method
	public String toTrainingDataString() {
		String out = word.toString();
		for(Boolean b : senseUsage) {
			out = (Boolean.TRUE.equals(b)) ? out.concat(" 1") : out.concat(" 0");
		}
		
		out+= "";
		
		return null;
		
	}
}
