package etxt2db.tokenizer;

import java.util.Arrays;
import java.util.HashSet;
import java.util.Map;
import java.util.Set;

import etxt2db.corpus.MyAnnotation;
import etxt2db.api.Document;

/**
 * An implementation of Tokenizer in which text is split into tokens
 * according to UCompare types as specified in the type mappings configuration file.
 * 
 * @author Gonçalo Simões
 * @author Rui Lageira
 */
public class UCompareTokenizer implements Tokenizer {

	/**
	 * 
	 */
	private static final long serialVersionUID = 9006586818246894946L;

	private String type;
	
	/**
	 * Constructor receiving the type serving as the splitting context.
	 * 
	 * @param type the String representing the UCompare type
	 */
	public UCompareTokenizer(String type) {
		this.type = type;
	}
	
	@Override
	public Span[] tokenize(Document text, int section) {
		
		Map<Span, MyAnnotation> tokens = text.getAnnotations().get(type);
		
		if(tokens==null){
			System.err.println("No annotation for type " + type + "! The behaviour of the component may not be what you expect! Please check your workflow!");
			return new Span[]{};
		}
	
		Set<Span> tokenSpans = tokens.keySet();
		
		tokenSpans = getSectionSpans(tokenSpans, text.getSectionSpan(section));
		
		Span[] spans = new Span[tokenSpans.size()];
		
		int i = 0;
		
		for(Span s : tokenSpans) {
			spans[i++] = s;
		}
		
		Arrays.sort(spans);
		
		return spans;
	}
	
	private Set<Span> getSectionSpans(Set<Span> original, Span section) {
		Set<Span> result = new HashSet<Span>();
		
		int sectionStart = section.getStart();
		
		for(Span s : original){
			if(section.contains(s)){
				result.add(new Span(s.getStart()-sectionStart,s.getEnd()-sectionStart));
			}
		}
		
		return result;
	}

}
