package com.rizzo.back.helper;

import com.aliasi.lm.TokenizedLM;
import com.aliasi.tokenizer.TokenizerFactory;
import com.aliasi.util.ScoredObject;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
import java.util.SortedSet;

/**
 * TODO
 */
public class PhraseHelper {

    private static final Logger LOGGER = LoggerFactory.getLogger(PhraseHelper.class);

    private TokenizerFactory tokenizerFactory;

    public List<String> extractImportantPhrases(String information, int tokenizerNgram, int pruneFactor, int reportNgram, int minCount, int maxReturned) throws IOException {
        List<String> importantPhrases = new ArrayList<String>();
        // Train the model using the list of files found in the directory
        TokenizedLM model = new TokenizedLM(tokenizerFactory, tokenizerNgram);
        model.train(information);
        // limit the size of model by removing all sequences that
        // occur less than three times
        model.sequenceCounter().prune(pruneFactor);
        // dump the list of collocations
        SortedSet<ScoredObject<String[]>> collocations = model.collocationSet(reportNgram, minCount, maxReturned);
        for (ScoredObject<String[]> collocation : collocations) {
            double score = collocation.score();
            String[] tokens = collocation.getObject();
            StringBuffer phrase = new StringBuffer();
            for (String token : tokens) {
                phrase.append(token).append(" ");
            }
            LOGGER.debug("Score: " + score + " - Phrase: " + phrase);
            importantPhrases.add(phrase.toString());
        }
        return importantPhrases;
    }

    public void setTokenizerFactory(TokenizerFactory tokenizerFactory) {
        this.tokenizerFactory = tokenizerFactory;
    }
}
