package code.gateplugins;

import gate.Annotation;
import gate.AnnotationSet;
import gate.Factory;
import gate.FeatureMap;
import gate.Resource;
import gate.creole.AbstractLanguageAnalyser;
import gate.creole.ExecutionException;
import gate.creole.ResourceInstantiationException;
import gate.creole.metadata.CreoleParameter;
import gate.creole.metadata.CreoleResource;
import gate.creole.metadata.Optional;
import gate.creole.metadata.RunTime;
import gate.util.InvalidOffsetException;

import java.io.IOException;
import java.net.URL;

import org.apache.commons.lang.StringEscapeUtils;

import cmu.arktweetnlp.RunTagger.Decoder;
import cmu.arktweetnlp.Tagger;
import cmu.arktweetnlp.Twokenize;
import cmu.arktweetnlp.impl.ModelSentence;
import cmu.arktweetnlp.impl.Sentence;

@CreoleResource(name = "Twitter Tagger PR", comment = "Wrapper for ark-twitter-nlp tagger. " +
		"Specialized part of speech tagger and tokenizer for Twitter.")
public class TwitterTaggerPr extends AbstractLanguageAnalyser {
	private static final long serialVersionUID = 1L;
	
	String modelFilePath;
	URL modelFileUrl;
	String outputAS;
	String inputAS;
	private Tagger tagger;
	private boolean justTokenize;
	private Decoder decoder;

	public Resource init() throws ResourceInstantiationException {
		tagger = new Tagger();
		try {
			if(modelFilePath != null && modelFilePath.length() > 0) {
				tagger.loadModel(modelFilePath);
			} else {
				tagger.loadModel(modelFileUrl.toString());
			}
		} catch (IOException e) {
			throw new ResourceInstantiationException(e);
		}
		return this;
	}

	public void execute() throws ExecutionException {
		AnnotationSet outAS = getAS(outputAS);
		AnnotationSet inAS = getAS(inputAS);
		try {
			for (Annotation sentAnn : inAS.get("Sentence")) {
				long start = sentAnn.getStartNode().getOffset();
				long end = sentAnn.getEndNode().getOffset();
				Sentence sentence = new Sentence();
				String text = document.getContent().getContent(start, end).toString();
				sentence.tokens = Twokenize.tokenizeRawTweetText(text);
				ModelSentence mSentence = null;
				if (sentence.T() > 0 && !justTokenize) {
					mSentence = new ModelSentence(sentence.T());
					tagger.featureExtractor.computeFeatures(sentence, mSentence);
					if(decoder.equals(Decoder.GREEDY)) {
						tagger.model.greedyDecode(mSentence, true);	
					} else {
						tagger.model.viterbiDecode(mSentence);
					}
				}
				addTokens(outAS, start, sentence, mSentence, text);
			}
		} catch (InvalidOffsetException e) {
			throw new ExecutionException(e);
		}
	}

	private AnnotationSet getAS(String name) {
		AnnotationSet as = null;
		if (name != null) {
			as = document.getAnnotations(name);
		} else {
			as = document.getAnnotations();
		}
		return as;
	}

	private void addTokens(AnnotationSet out, long sentStart, 
			Sentence sentence, ModelSentence mSentence,
			String text) throws InvalidOffsetException {
		int inStart = 0;
		int inEnd = 0;
		for (int i = 0; i < sentence.tokens.size(); ++i) {
			FeatureMap fm = Factory.newFeatureMap();
			if(mSentence != null) {
				String label = tagger.model.labelVocab.name(mSentence.labels[i]);
				fm.put("category", label);
				// TODO: add a showConfidence property.
				if(!decoder.equals(Decoder.VITERBI)) {
					String conf = String.format("%.4f", mSentence.confidences[i]);
					fm.put("conf", conf);
				}
			}
			String tok = sentence.tokens.get(i);
			
			// A hack to find the right start position and fix multiple whitespaces.
			inStart = text.indexOf(tok.charAt(0), inEnd);
			int len = tok.length();
			if(inStart < 0) {
				// a hack to handle character escaping (sometimes the html chars are not escaped)
				String escapedTok = StringEscapeUtils.escapeHtml(tok);
				inStart = text.indexOf(escapedTok, inEnd);
				len = escapedTok.length();
			}
			if(inStart >= 0) {
				inEnd = inStart + len;
				out.add(sentStart + inStart, sentStart + inEnd, "Token", fm);
			}
		}
	}
	
	@RunTime
	@CreoleParameter(comment = "If true, just tokenize the input sentences.",
			defaultValue = "false")
	public void setJustTokenize(Boolean justTokenize) {
		this.justTokenize = justTokenize;
	}
	
	@RunTime
	@CreoleParameter(comment = "Algorithm type. " +
			"Confidence won't be displayed with Viterbi.", 
	defaultValue = "GREEDY")
	public void setDecoder(Decoder decoder) {
		this.decoder = decoder;
	}
	
	@Optional
	@RunTime
	@CreoleParameter(comment = "name of the annotationSet used for output")
	public void setOutputAnnotationSetName(String setName) {
		this.outputAS = setName;
	}
	
	@Optional
	@RunTime
	@CreoleParameter(comment = "name of the annotationSet used for input")
	public void setInputAnnotationSetName(String setName) {
		this.inputAS = setName;
	}
	
	@CreoleParameter(comment = "Url to the model file.",
			disjunction = "modelPath")
	public void setModelFileUrl(URL modelFileUrl) {
		this.modelFileUrl = modelFileUrl;
	}
	
	@CreoleParameter(comment = "Path to the model file. Default value points"
			+ " to a resource in the ark-tweet-nlp jar", 
			defaultValue = "/cmu/arktweetnlp/model.20120919",
			disjunction = "modelPath")
	public void setModelFilePath(String modelFilePath) {
		this.modelFilePath = modelFilePath;
	}
	
	public String getModelFilePath() {
		return modelFilePath;
	}

	public void reInit() throws ResourceInstantiationException {
		init();
	}
	
	public Boolean getJustTokenize() {
		return justTokenize;
	}

	public URL getModelFileUrl() {
		return modelFileUrl;
	}

	public String getOutputAnnotationSetName() {
		return outputAS;
	}
	
	public String getInputAnnotationSetName() {
		return inputAS;
	}

	public Decoder getDecoder() {
		return decoder;
	}
}
