package nlp;

import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.util.ArrayList;
import java.util.List;

import opennlp.tools.tokenize.Tokenizer;
import opennlp.tools.tokenize.TokenizerME;
import opennlp.tools.tokenize.TokenizerModel;
import opennlp.tools.util.Span;
//JVM profiling imports
import com.bea.jvm.MethodProfileEntry;
import com.bea.jvm.NotAvailableException;
import com.bea.jvm.ProfilingSystem;
import com.bea.jvm.UnapplicableMethodException;
import java.lang.reflect.Method;
/**
 * @author Sebastian Suarez Benjumea ssuarezbe@unal.edu.co
 *
 */
public class NlpTokenizer {
	private String pathmodelFile;
	private Tokenizer tokenizer;
	//Attributes used for the Profiling
	private ProfilingSystem profiler;
	public MethodProfileEntry methodProfileEntry;
	public Method method;
	Class params[]={String.class};
	//Attributes used in the method "tokenizeLine()"in order to avoid create them all the time
	String tokens[];
	String validTokens[];
	private List<String> tokensList;
	int i;
	//Attributes used in the method "tokenPositionExtraction()"in order to avoid create them all the time
	Span tokenSpans[];
	
	/*
	 * @param File The configuration file that contain the model for the Sentence Segmentation process
	 */
	public NlpTokenizer(String pathConfigFile,ProfilingSystem profiler){
		this.pathmodelFile=pathConfigFile;
		// get Profiling System 
		this.profiler = profiler;
	}

	/*
	 * This method load the model in order to instanciate a SentenceDetectorME.
	 * It uses the File loaded in the constructor (atribute="modelFile") as the model file.
	 * Is should only call once in order to create only one "SentenceExtractorME". 
	 */
	public void init(){
		try{
		this.tokensList=new ArrayList<String>();
		InputStream modelIn=new FileInputStream(this.pathmodelFile);
		TokenizerModel model=new TokenizerModel(modelIn);
		this.tokenizer = new TokenizerME(model);;
		}catch (IOException e) {
			// TODO: handle exception
			e.printStackTrace();
		}
		/*
		 * Definition of the JRockit profiling code lines
		 * For more info check:
		 * http://www.rgagnon.com/javadetails/java-0031.html
		 * http://www.tomecode.com/2010/10/02/profiling-java-ap-with-jmapi-jrockit/
		 */
		// get method append from StringBuffer class
		try {
			this.method = NlpTokenizer.class.getDeclaredMethod("tokenizeLine", this.params);
			// set method to profiler
			this.methodProfileEntry = profiler.newMethodProfileEntry(method);
			this.methodProfileEntry.setInvocationCountEnabled(true);
			this.methodProfileEntry.setTimingEnabled(true);
		} catch (SecurityException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (NoSuchMethodException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (NotAvailableException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (UnapplicableMethodException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}
	
	public String[] tokenizeLine(String line){
		this.tokens= tokenizer.tokenize(line);
		//check that only words are added not 
		//check a regular expression page:
		// http://www.leepoint.net/notes-java/data/strings/40regular_expressions/25sum-regex.html
		this.tokensList.clear();
		for(this.i=0;this.i<tokens.length;i++){
			if(this.tokens[this.i].matches("[\\w]+")){
				this.tokensList.add(this.tokens[this.i]);
			}
		}
		validTokens=new String[tokensList.size()];
		return this.tokensList.toArray(validTokens);
	}

	public Span[] tokenPositionExtraction(String fileLine){
		this.tokenSpans = tokenizer.tokenizePos(fileLine);
		return this.tokenSpans;
	}
}
