package org.zz.lucene_anlyzer;

import java.io.IOException;
import java.util.HashSet;

import org.apache.lucene.analysis.TokenFilter;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

public class RestrictedTokenCountFilter extends TokenFilter {
	private final CharTermAttribute termAtt = input.addAttribute(CharTermAttribute.class);
	private int tokenCount;
	private HashSet<String> tokenSet;
	public RestrictedTokenCountFilter(TokenStream input,int tokenCount) {
		super(input);
		// TODO Auto-generated constructor stub
		this.tokenCount = tokenCount;
		this.tokenSet = new HashSet<String>();
	}
	
	@Override
	public void reset() throws IOException {
		// TODO Auto-generated method stub
		super.reset();
		tokenSet.clear();
	}

	@Override
	public boolean incrementToken() throws IOException {
		// TODO Auto-generated method stub
		if(tokenSet.size() >= tokenCount)
			return false;
		else{
			if(!input.incrementToken())
				return false;
			else{
				tokenSet.add(termAtt.toString());
				if(tokenSet.size() >= tokenCount)
					return false;
				else
					return true;
			}
		}
	}
	
}
