/**
 *  Copyright 2006 Juan Manuel Caicedo 
 *  
 *  Licensed under the Apache License, Version 2.0 (the "License"); 
 *  you may not use this file except in compliance with the License. 
 *  You may obtain a copy of the License at 
 *  
 *  http://www.apache.org/licenses/LICENSE-2.0 
 *  Unless required by applicable law or agreed to in writing, software 
 *  distributed under the License is distributed on an "AS IS" BASIS, 
 *  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 
 *  See the License for the specific language governing permissions and 
 *  limitations under the License.
 * 
 */

package gate.creole.opennlp;

import gate.AnnotationSet;
import gate.DocumentContent;
import gate.FeatureMap;
import gate.Resource;
import gate.creole.ANNIEConstants;
import gate.creole.AbstractLanguageAnalyser;
import gate.creole.ExecutionException;
import gate.creole.ResourceInstantiationException;
import gate.util.GateRuntimeException;
import gate.util.SimpleFeatureMapImpl;

import java.io.File;
import java.net.URI;
import java.net.URL;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;

import opennlp.tools.lang.spanish.SentenceDetector;
import opennlp.tools.lang.spanish.TokenChunker;
import opennlp.tools.namefind.NameFinderME;
import opennlp.tools.util.Span;

public class Tokenizer extends AbstractLanguageAnalyser {

	public static final String TAG_DOCUMENT_PARAMETER_NAME = "document";

	public static final String TAG_INPUT_AS_PARAMETER_NAME = "inputASName";

	public static final String TAG_LEXICON_URL_PARAMETER_NAME = "lexiconURL";

	public static final String TAG_RULES_URL_PARAMETER_NAME = "rulesURL";

	public static final String TAG_ENCODING_PARAMETER_NAME = "encoding";

	public static final String BASE_TOKEN_ANNOTATION_TYPE_PARAMETER_NAME = "baseTokenAnnotationType";

	public static final String OUTPUT_ANNOTATION_TYPE_PARAMETER_NAME = "outputAnnotationType";

	public static final String BASE_SENTENCE_ANNOTATION_TYPE_PARAMETER_NAME = "baseSentenceAnnotationType";

	public static final String TAG_OUTPUT_AS_PARAMETER_NAME = "outputASName";



	public Resource init() throws ResourceInstantiationException {

		if (baseTokenAnnotationType == null
				|| baseTokenAnnotationType.trim().length() == 0) {
			throw new GateRuntimeException(
					"No base Token Annotation Type provided!");
		}

		if (baseSentenceAnnotationType == null
				|| baseSentenceAnnotationType.trim().length() == 0) {
			throw new GateRuntimeException(
					"No base Sentence Annotation Type provided!");
		}

		if (tokenizerModelURL == null) {
			throw new ResourceInstantiationException(
					"No URL provided for the tokenizer model!");
		}

		if (chunkerModelURL == null) {
			throw new ResourceInstantiationException(
					"No URL provided for the chunker model!");
		}

		if (sentenceModelURL == null) {
			throw new ResourceInstantiationException(
					"No URL provided for the sentence detector model!");
		}

		if (language == null)
			throw new ResourceInstantiationException("No Language provided!");

		if (language.equals("spanish")) {
			try {
				sentenceDetector = new SentenceDetector(
						Util.absoluteFilePath(sentenceModelURL));
				chunker = new TokenChunker(
						Util.absoluteFilePath(chunkerModelURL));
				tokenizer = new opennlp.tools.lang.spanish.Tokenizer(
						Util.absoluteFilePath(tokenizerModelURL));
			} catch (Exception e) {
				throw new ResourceInstantiationException(e);
			}
		}
		return super.init();
	}


	public void execute() throws ExecutionException {
		try {
			if (document == null)
				throw new GateRuntimeException("No document to process!");

			String content = document.getContent().toString();

			DocumentContent docContent = document.getContent();
			long contLen = document.getContent().size();

			int sentPosTmp[] = sentenceDetector.sentPosDetect(content);

			List<Integer> sentPos = new ArrayList<Integer>(
					sentPosTmp.length + 1);
			sentPos.add(0);
			for (int i = 0; i < sentPosTmp.length; i++) {
				sentPos.add(sentPosTmp[i]);
			}

			AnnotationSet annotSet = document.getAnnotations();
			for (int spi = 0, spn = sentPos.size(); spi < spn; spi++) {

				long sentStart = sentPos.get(spi);
				long sentEnd = (spi + 1 < spn) ? sentPos.get(spi + 1) : contLen;
				DocumentContent sentenceContent = docContent.getContent(
						sentStart, sentEnd);

				String sentence = sentenceContent.toString();

				Span[] tokensSpan = tokenizer.tokenizePos(sentence);
				String[] tokens = tokenizer.tokenize(sentence);
				String[] chunks = chunker.find(tokens, Collections.EMPTY_MAP);

				List<TokenAnnotation> annotSpans = new ArrayList<TokenAnnotation>();

				for (int ti = 0, tn = tokens.length; ti < tn; ti++) {
					Span span = tokensSpan[ti];
					String chunk = chunks[ti];

					long tokenStart = (long) span.getStart();
					long tokenEnd = (long) span.getEnd();

					if (chunk.equals(NameFinderME.CONTINUE)) {
						tokenEnd = tokensSpan[ti].getEnd();
						TokenAnnotation last = annotSpans
								.get(annotSpans.size() - 1);
						last.end = tokenEnd + sentStart;
						last.string += "_" + tokens[ti];
						;
					} else {
						TokenAnnotation tuple = new TokenAnnotation();
						tuple.start = tokenStart + sentStart;
						tuple.end = tokenEnd + sentStart;
						tuple.string = tokens[ti];
						annotSpans.add(tuple);
					}
				}

				for (TokenAnnotation span : annotSpans) {
					FeatureMap tokenFeats = new SimpleFeatureMapImpl();
					tokenFeats.put(ANNIEConstants.TOKEN_STRING_FEATURE_NAME,
							span.string);
					tokenFeats.put(ANNIEConstants.TOKEN_CATEGORY_FEATURE_NAME,
							"");
					annotSet.add(span.start, span.end, baseTokenAnnotationType,
							tokenFeats);
				}

				if (sentEnd > sentStart) {
					FeatureMap sentFeats = new SimpleFeatureMapImpl();
					annotSet.add(sentStart, sentEnd,
							baseSentenceAnnotationType, sentFeats);
				}
			}

		} catch (Exception e) {
			throw new ExecutionException(e);
		}
	}

	public void setChunkerModelURL(URL chunkerURL) {
		this.chunkerModelURL = chunkerURL;
	}

	public void setTokenizerModelURL(URL tokenizerURL) {
		this.tokenizerModelURL = tokenizerURL;
	}

	public void setSentenceModelURL(URL sentenceURL) {
		this.sentenceModelURL = sentenceURL;
	}

	public void setLanguage(String language) {
		this.language = language;
	}

	public void setBaseTokenAnnotationType(String baseTokenAnnotationType) {
		this.baseTokenAnnotationType = baseTokenAnnotationType;
	}

	public void setBaseSentenceAnnotationType(String baseSentenceAnnotationType) {
		this.baseSentenceAnnotationType = baseSentenceAnnotationType;
	}

	public String getBaseSentenceAnnotationType() {
		return baseSentenceAnnotationType;
	}

	public String getBaseTokenAnnotationType() {
		return baseTokenAnnotationType;
	}

	public URL getChunkerModelURL() {
		return chunkerModelURL;
	}

	public String getLanguage() {
		return language;
	}

	public URL getSentenceModelURL() {
		return sentenceModelURL;
	}

	public URL getTokenizerModelURL() {
		return tokenizerModelURL;
	}

	public void setInputASName(String newInputASName) {
		inputASName = newInputASName;
	}

	public String getInputASName() {
		return inputASName;
	}

	public String getOutputAnnotationType() {
		return outputAnnotationType;
	}

	public void setOutputAnnotationType(String outputAnnotationType) {
		this.outputAnnotationType = outputAnnotationType;
	}

	public String getOutputASName() {
		return outputASName;
	}

	public void setOutputASName(String outputASName) {
		this.outputASName = outputASName;
	}

	class TokenAnnotation {
		long start;

		long end;

		String string;
	}

	private String baseSentenceAnnotationType;

	private String baseTokenAnnotationType;

	private String inputASName;

	private String outputAnnotationType;

	private String outputASName;

	private URL sentenceModelURL;

	private URL tokenizerModelURL;

	private URL chunkerModelURL;

	private String language;

	private opennlp.tools.sentdetect.SentenceDetector sentenceDetector;

	private opennlp.tools.namefind.NameFinder chunker;

	private opennlp.tools.tokenize.Tokenizer tokenizer;

}
