package etxt2db.annotators;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.text.ParseException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;
import com.aliasi.dict.DictionaryEntry;
import com.aliasi.dict.ExactDictionaryChunker;
import com.aliasi.dict.MapDictionary;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;

import edu.cmu.minorthird.text.BasicTextLabels;
import edu.cmu.minorthird.text.MonotonicTextLabels;
import edu.cmu.minorthird.text.Span;
import edu.cmu.minorthird.text.TextBaseLoader;
import edu.cmu.minorthird.text.TextLabels;
import etxt2db.api.ClassificationModel;
import etxt2db.corpus.CompleteTextLabels;
import etxt2db.corpus.NoTokenSpan;


public class MyDictionaryClassificationModel extends ClassificationModel implements Serializable {
	private static final long serialVersionUID = -782912453005188658L;
	private Map<String,String> dictionary;
	private boolean caseSensitive;
	
	public MyDictionaryClassificationModel(File dictionaryFile) throws IOException, ParseException {
		this(dictionaryFile, true);
    }
	
	public MyDictionaryClassificationModel(File dictionaryFile, boolean caseSensitive) throws IOException, ParseException {
		this.caseSensitive=caseSensitive;
		TextBaseLoader baseLoader = new TextBaseLoader();
		baseLoader.load(dictionaryFile);
		TextLabels labels = baseLoader.getLabels();
		
		dictionary = new HashMap<String,String>();
		
		for(String attribute : labels.getTypes()){
			for(Span text : labels.getTypeSet(attribute, dictionaryFile.getName())){
				if(caseSensitive){
					dictionary.put(text.asString(),attribute);
				}else{
					dictionary.put(text.asString().toLowerCase(),attribute);
				}
			}
		}
    }
	
	@SuppressWarnings("deprecation")
	@Override
	public void annotate(MonotonicTextLabels arg0) {
		Iterator<Span> iter = arg0.getTextBase().documentSpanIterator();
		while(iter.hasNext()){
			Span span = iter.next();
			//The chunking interface of Lingpipe uses strings, so we need to
			//convert our documents to strings.
			for(int i=0; i<span.size(); i++){
				String text;
				if(caseSensitive){
					text = span.getToken(i).getValue();
				}else{
					text = span.getToken(i).getValue().toLowerCase();
				}
				for(String entry : dictionary.keySet()){
					if(text.equals(entry)){
						int start = span.getTextToken(i).getLo();
			            int end = span.getTextToken(i).getHi();
			            
			          //Luckily it is what we want in the Span interface
			            NoTokenSpan newSpan=new NoTokenSpan(span,start,end);
		            
			            //We add a new Span to the TexLabels
			            ((CompleteTextLabels)arg0).addNoTokenToType(newSpan,dictionary.get(entry));
					}
				}
			}
		}
	}

	@Override
	public TextLabels annotatedCopy(TextLabels arg0) {
		//Just create a new TextLabels (copy) before annotate
		CompleteTextLabels copy = new CompleteTextLabels(arg0.getTextBase());
		annotate(copy);
		//Return the new TextLabels
		return copy;
	}

	@Override
	public String explainAnnotation(TextLabels arg0, Span arg1) {
		return "";
	}
}
