package ner;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedList;
import java.util.List;
import java.util.Set;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;
import com.aliasi.sentences.HeuristicSentenceModel;
import com.aliasi.sentences.IndoEuropeanSentenceModel;
import com.aliasi.sentences.SentenceChunker;
import com.aliasi.sentences.SentenceModel;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

public class Segmenter {

	static TokenizerFactory TOKENIZER_FACTORY;// = IndoEuropeanTokenizerFactory.INSTANCE;
	static SentenceModel SENTENCE_MODEL ;// = new IndoEuropeanSentenceModel();
	static SentenceChunker SENTENCE_CHUNKER;// = new SentenceChunker(TOKENIZER_FACTORY,SENTENCE_MODEL);
	   
	static final String[] POSSIBLE_STOPS={".","..","!","?","\"","'",").","...","\n-",":"};
	static final String[] IMPOSSIBLE_PENULTIMATES={".","..","!","?","'",").","...",
		"a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z",
		"mgr","ks","dyr","med","lek","dr","prof","hab","doc","bp",",",";",
		"al","os"};
	static final String[] IMPOSSIBLE_STARTS={")","]",",",";","%"};//+POSSIBLE STOPS
	
	
	
	public Segmenter(){
		
		TOKENIZER_FACTORY = IndoEuropeanTokenizerFactory.INSTANCE;
		
		Set<String> impossibleStarts=new HashSet<String>(Arrays.asList(IMPOSSIBLE_STARTS));
		impossibleStarts.addAll(new HashSet<String>(Arrays.asList(POSSIBLE_STOPS)));
		
		
		SENTENCE_MODEL=new HeuristicSentenceModel(new HashSet<String>(Arrays.asList(POSSIBLE_STOPS)), 
				new HashSet<String>(Arrays.asList(IMPOSSIBLE_PENULTIMATES)),
				impossibleStarts);
		SENTENCE_CHUNKER = new SentenceChunker(TOKENIZER_FACTORY,SENTENCE_MODEL);	
		
	}
	
	public List<String> splitBySentences(String content){
		
		Chunking chunking = SENTENCE_CHUNKER.chunk(content.toCharArray(),0,content.length());
		Set<Chunk> sentences = chunking.chunkSet();

		List<String> sentenceList=new ArrayList<String>(sentences.size());
		String slice = chunking.charSequence().toString();
		int i = 1;
		for (Iterator<Chunk> it = sentences.iterator(); it.hasNext(); ) {
		    
			Chunk sentence = it.next();
		    int start = sentence.start();
		    int end = sentence.end();
		 //   System.out.println("SENTENCE "+(i++)+":");
		 //   System.out.println(slice.substring(start,end));
		    sentenceList.add(slice.substring(start,end));
		    
		    //if(i>4)
		    	//break;
		}
		return sentenceList;
			
	}
	
	
	static public List<String> splitByWords(String content){
		
		List<String> tokenList = new ArrayList<String>();
		List<String> whiteList = new ArrayList<String>();
		Tokenizer tokenizer = TOKENIZER_FACTORY.tokenizer(content.toCharArray(),0,content.length());
		tokenizer.tokenize(tokenList,whiteList);

		return tokenList;
		
	}
	
	
	
}
