package edu.unika.aifb.graphindex.searcher.keyword.analyzer;

import java.io.IOException;
import java.util.LinkedList;

import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.TermAttribute;

public class CapitalizationSplitterTokenFilter extends TokenFilter {

	protected final LinkedList<String> tokens;
	
	 private TermAttribute termAttr;


	protected CapitalizationSplitterTokenFilter(TokenStream input) {
		super(input);
		this.tokens = new LinkedList<String>(); 
		this.termAttr = (TermAttribute) addAttribute(TermAttribute.class);

	}

	@Override
	public boolean incrementToken() throws IOException {
		
		
		while (true) {
			if (!tokens.isEmpty()) { 
				clearAttributes(); // important, do this only here, if you do it in the filter part you will break your stream!!!!
				String term=tokens.removeFirst();
				termAttr.setTermBuffer(term);
				termAttr.setTermLength(term.length());
				// set eventually offsets and so on in the other attributes
				return true;
			} else {
				if (!input.incrementToken()) return false;
				String currentTerm =termAttr.term();
				char[] buffer = termAttr.termBuffer();
				final int bufferLength = termAttr.termLength(); 
				decompose(buffer, bufferLength,currentTerm);	
			}
		}		
	}
	
	protected void decompose(final char[] termBuffer,final int bufferLength, String term ) {
		int i = 0, j = Math.min(1, bufferLength - 1);
		
		while(j < bufferLength - 1) {
			if(Character.isUpperCase(termBuffer[j])) {
				tokens.add(term.substring(i, j));
				i = j;
			}
			else 
				j++;
		}
		tokens.add(term.substring(i));
	}
}
