package pjn;

import java.util.ArrayList;
import java.util.Arrays;
import java.util.HashSet;
import java.util.List;
import java.util.Set;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;
import com.aliasi.sentences.HeuristicSentenceModel;
import com.aliasi.sentences.SentenceChunker;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;
import com.aliasi.tokenizer.Tokenizer;
import com.aliasi.tokenizer.TokenizerFactory;

public class Splitter {
    private Set<String> possibleStops = new HashSet<String>(Arrays.asList(".", "...", "!", "?", "\"", "'", ").", ":", "\n-"));

    private Set<String> impossiblePenultimate = new HashSet<String>(Arrays.asList("a", "b", "c", "d", "e", "f", "g", "h", "i", "j", "k", "l", "m", "n", "o",
            "p", "q", "r", "s", "t", "u", "v", "w", "x", "y", "z", ",", ";", ".", "...", "\"", "'", ").", ":", "\n-"));

    private Set<String> impossibleStarts = new HashSet<String>(Arrays.asList(")", "]", ",", ";", ":", ".", "...", "!", "?", "\"", "'", ").", ":", "\n-"));

    private TokenizerFactory tokenizerFactory = IndoEuropeanTokenizerFactory.INSTANCE;

    private SentenceChunker sentenceChunker = new SentenceChunker(tokenizerFactory, new HeuristicSentenceModel(possibleStops, impossiblePenultimate,
            impossibleStarts));

    public List<String> splitIntoSentences(String text) {
        Chunking chunking = sentenceChunker.chunk(text.toCharArray(), 0, text.length());
        Set<Chunk> chunkSet = chunking.chunkSet();

        List<String> sentences = new ArrayList<String>();
        for (Chunk sentence : chunkSet) {
            sentences.add(text.substring(sentence.start(), sentence.end()));
        }

        return sentences;
    }

    public List<String> splitIntoWords(String sentence) {
        Tokenizer tokenizer = tokenizerFactory.tokenizer(sentence.toCharArray(), 0, sentence.length());

        return Arrays.asList(tokenizer.tokenize());
    }
}
