package etxt2db.annotators;
import java.io.File;
import java.io.IOException;
import java.io.Serializable;
import java.text.ParseException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.arabidopsis.ahocorasick.AhoCorasick;
import org.arabidopsis.ahocorasick.SearchResult;

import com.aliasi.chunk.Chunk;
import com.aliasi.chunk.Chunking;
import com.aliasi.dict.DictionaryEntry;
import com.aliasi.dict.ExactDictionaryChunker;
import com.aliasi.dict.MapDictionary;
import com.aliasi.tokenizer.IndoEuropeanTokenizerFactory;

import edu.cmu.minorthird.text.BasicTextLabels;
import edu.cmu.minorthird.text.MonotonicTextLabels;
import edu.cmu.minorthird.text.Span;
import edu.cmu.minorthird.text.TextBaseLoader;
import edu.cmu.minorthird.text.TextLabels;
import etxt2db.api.ClassificationModel;
import etxt2db.corpus.CompleteTextLabels;
import etxt2db.corpus.NoTokenSpan;


public class NaiveDictionaryClassificationModel extends ClassificationModel implements Serializable {
	private static final long serialVersionUID = -782912453005188658L;
	private boolean caseSensitive;
	private AhoCorasick trie;
	private Map<String,String> types = new HashMap<String,String>();
	
	public NaiveDictionaryClassificationModel(File dictionaryFile) throws IOException, ParseException {
		this(dictionaryFile, true);
    }
	
	public NaiveDictionaryClassificationModel(File dictionaryFile, boolean caseSensitive) throws IOException, ParseException {
		this.caseSensitive=caseSensitive;
		TextBaseLoader baseLoader = new TextBaseLoader();
		baseLoader.load(dictionaryFile);
		TextLabels labels = baseLoader.getLabels();
		
		trie=new AhoCorasick();
		
		for(String attribute : labels.getTypes()){
			for(Span text : labels.getTypeSet(attribute, dictionaryFile.getName())){
				if(caseSensitive){
					trie.add(text.asString().getBytes(), text.asString());
					types.put(text.asString(), attribute);
				}else{
					trie.add(text.asString().toLowerCase().getBytes(), text.asString().toLowerCase());
					types.put(text.asString().toLowerCase(), attribute);
				}
			}
		}
		trie.prepare();
	}
	
	public NaiveDictionaryClassificationModel(List<String> dic, String type, boolean caseSensitive) {
		this.caseSensitive=caseSensitive;
		
		trie=new AhoCorasick();
		for(String entry : dic){
			if(caseSensitive){
				trie.add(entry.getBytes(), entry);
				types.put(entry, type);
			}else{
				trie.add(entry.toLowerCase().getBytes(), entry.toLowerCase());
				types.put(entry.toLowerCase(), type);
			}
		}
		trie.prepare();
	}
	
	@SuppressWarnings("deprecation")
	@Override
	public void annotate(MonotonicTextLabels arg0) {
		System.out.println("Starting now!");
		long time = System.currentTimeMillis();
		Iterator<Span> iter = arg0.getTextBase().documentSpanIterator();
		while(iter.hasNext()){
			Span span = iter.next();
			String document= span.asString();
			byte[] byteRepresentation;
			if(caseSensitive){
				byteRepresentation=document.getBytes();
			}else{
				document=document.toLowerCase();
				byteRepresentation=document.toLowerCase().getBytes();
			}
			//The chunking interface of Lingpipe uses strings, so we need to
			//convert our documents to strings.
			Iterator<SearchResult> i = trie.search(byteRepresentation);
			
			boolean needsConfirmation = false;
			
			if(document.length()!=byteRepresentation.length){
				needsConfirmation = true;
			}
			
			while(i.hasNext()){
				SearchResult result = i.next();
				int end = result.getLastIndex();
				Set<Object> out = result.getOutputs();
				String bestOutput=null;
				for(Object o : out){
					String str = (String)o;
					if(bestOutput==null || bestOutput.length()<str.length()){
						bestOutput=str;
					}
				}
				int start = end-bestOutput.length();
				
				if(needsConfirmation){
					if(end>document.length()){
						end=document.length();
						start=end-bestOutput.length();
					}
					
					while(!lowMemorySubstringComparison(document, bestOutput, start, end)){
						start--;
						end--;
					}
				}
	            
	            //Luckily it is what we want in the Span interface
	            NoTokenSpan newSpan=new NoTokenSpan(span,span.getLoChar()+start,span.getLoChar()+end);
	            
	            //We add a new Span to the TexLabels
				((CompleteTextLabels)arg0).addNoTokenToType(newSpan,types.get(bestOutput));
			}
		}
		System.out.println((System.currentTimeMillis()-time) + " miliseconds passed");
	}
	
	private boolean lowMemorySubstringComparison(String document, String output, int start, int end){
		for(int i=start;i<end;i++){
			if(document.charAt(i)!=output.charAt(i-start)){
				return false;
			}
		}
		return true;
	}

	@Override
	public TextLabels annotatedCopy(TextLabels arg0) {
		//Just create a new TextLabels (copy) before annotate
		CompleteTextLabels copy = new CompleteTextLabels(arg0.getTextBase());
		annotate(copy);
		//Return the new TextLabels
		return copy;
	}

	@Override
	public String explainAnnotation(TextLabels arg0, Span arg1) {
		return "";
	}
}
