package edu.cornell.cs4740.sentencegenerator;

import java.io.FileInputStream;
import java.io.FileNotFoundException;
import java.util.ArrayList;
import java.util.List;
import java.util.Scanner;

import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.LowerCaseTokenizerFactory;
import com.aliasi.tokenizer.PorterStemmerTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

public class Utilities {
	public static final String BREAK = "__B__";
	public static final String UNKNOWN = "__UNK__";
	
	static final TokenizerFactory TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
	static final SentenceModel SENTENCE_MODEL = new IndoEuropeanSentenceModel();
	
	/** Types of smoothing that can be used. **/
  public static enum Smoothing {
    NONE, LAPLACIAN, GOODTURING
  };
	
	// takes in a filename and returns a List of the
	// tokens inside the file
	public static List<String> tokenizeFile(String filename, boolean keepStems) {
		
		
		String textToTokenize = "";
		
		StringBuilder text = new StringBuilder();
	    String newLineChar = System.getProperty("line.separator");
	    
	    Scanner scanner = null;
		try {
			scanner = new Scanner(new FileInputStream(filename), "UTF-8");
		} catch (FileNotFoundException e) {
			System.err.println("Could not find file: " + filename);
			return null;
		}
	    try {
	      while (scanner.hasNextLine()){
	        text.append(scanner.nextLine() + newLineChar);
	      }
	    }
	    finally{
	      scanner.close();
	    }
	    
	    textToTokenize = text.toString();
	    
	    List<String> tokenList = sentencesToTokens(textToTokenize, keepStems);
	    
	    return tokenList;
	}
	
	public static List<String> sentencesToTokens(String textToTokenize, boolean keepStems) {
	    // we separate into sentences and put break chars in between each and 
	    // then put them back together
	    
	    List<String> tokenList = new ArrayList<String>();
	    List<String> sentences = getSentences(textToTokenize);
	    for(String sentence : sentences) {
	    	
		    // add a stemming filter to the tokenizer if we don't want to keep the
		    // stems
	    	TokenizerFactory tokenizerFactory = (keepStems ? TOKENIZER_FACTORY : new PorterStemmerTokenizerFactory(TOKENIZER_FACTORY));
	    	
	    	// makes all output lower case
	    	tokenizerFactory = new LowerCaseTokenizerFactory(tokenizerFactory);
	    	
	    	Tokenizer tokenizer = tokenizerFactory.tokenizer(sentence.toCharArray(), 0, sentence.length());
	    	
	    	for(String token : tokenizer) {
	    		tokenList.add(token);
	    	}
	    	tokenList.add(BREAK);	
	    }
	    
	    
	    return tokenList;
	}
	
	
	protected static List<String> getSentences(String s){
		List<String> sentences = new ArrayList<String>();
		
		List<String> tokenList = new ArrayList<String>();
		List<String> whiteList = new ArrayList<String>();
		Tokenizer tokenizer = TOKENIZER_FACTORY.tokenizer(s.toCharArray(), 0,s.length());
		tokenizer.tokenize(tokenList,whiteList);
		String[] tokens = new String[tokenList.size()];
		String[] whites = new String[whiteList.size()];
		tokenList.toArray(tokens);
		whiteList.toArray(whites);
		int[] sentenceBoundaries = SENTENCE_MODEL.boundaryIndices(tokens,whites);
		int sentStartTok = 0;
		int sentEndTok = 0;
		for (int i = 0; i < sentenceBoundaries.length; ++i) {
			String sentence = "";
		    sentEndTok = sentenceBoundaries[i];
		    //System.out.println("SENTENCE "+(i+1)+": ");
		    for (int j=sentStartTok; j <= sentEndTok; j++) {
		        sentence += (tokens[j]+whites[j+1]);
		    }
		    //System.out.println();
		    sentences.add(sentence);
		    sentStartTok = sentEndTok+1;
		}
		return sentences;

	}
	
}
