/*
 * To change this template, choose Tools | Templates
 * and open the template in the editor.
 */
package normalizer;

import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;
import java.util.Collection;
import java.util.regex.Matcher;
import java.util.regex.Pattern;
import normalizer.constant.NormalizerPropertiesLoader;
import opennlp.tools.namefind.NameFinderME;
import opennlp.tools.namefind.TokenNameFinderModel;
import opennlp.tools.postag.POSModel;
import opennlp.tools.postag.POSTaggerME;
import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.tokenize.TokenizerME;
import opennlp.tools.tokenize.TokenizerModel;
import opennlp.tools.util.Span;

/**
 *
 * @author Ricardo
 */
public class TextProcessing {

    public String sentenceDetector(String data){
        String sentences = null;
        sentences = this.applyRegexExpression(data, "regex.sentence.detector.rule");
        return sentences;
    }
    
    public String[] tokenize(String data) {
        Tokenizer _tokenizer = null;
        String[] tokenizedColl = null;
        InputStream modelIn = null;
        try {
            // Loading tokenizer model
            modelIn = new FileInputStream("opennlp_models/en-token.bin");
            final TokenizerModel tokenModel = new TokenizerModel(modelIn);
            modelIn.close();
            _tokenizer = new TokenizerME(tokenModel);
            tokenizedColl = _tokenizer.tokenize(data);
            findName();

        } catch (final IOException ioe) {
            ioe.printStackTrace();
        } finally {
            if (modelIn != null) {
                try {
                    modelIn.close();
                } catch (final IOException e) {
                } // oh well!
            }
        }
        
        return tokenizedColl;
        
    }
    
    public static void findName() throws IOException {
        InputStream is = new FileInputStream("opennlp_models/es-ner-person.bin");

        TokenNameFinderModel model = new TokenNameFinderModel(is);
        is.close();

        NameFinderME nameFinder = new NameFinderME(model);

        String[] sentence = new String[]{
            "Mike",
            "Smith",
            "es",
            "una",
            "buena",
            "persona"
        };

        Span nameSpans[] = nameFinder.find(sentence);

        for (Span s : nameSpans) {
            System.out.println(s.toString());
        }

    }
    
    public String applyRegexExpression(String data, String regexProperty){
        
        StringBuilder cleanString = new StringBuilder();
        NormalizerPropertiesLoader properties = new NormalizerPropertiesLoader();
        String regexRules = properties.getProperty(regexProperty);
        String[] regexRuleColl = regexRules.split("¥");
        cleanString.append(data);
        for (String rule : regexRuleColl) {
            String[] regexExpression = rule.split(";");
            Pattern pattern = Pattern.compile(regexExpression[0]);
            Matcher matcher = pattern.matcher(cleanString.toString());
            cleanString = new StringBuilder(matcher.replaceAll(regexExpression[1]));
        }
        return cleanString.toString();
    }
    

}
