package org.elasticsearch.index.analysis;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

import java.io.IOException;

public class DemoTokenizer extends Tokenizer {

    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);

    @Override
    public boolean incrementToken() throws IOException {
        //清除现有标记
        clearAttributes();

        char[] buffer = new char[1024];
        int len = 0, c;
        while ((c=input.read())!=-1){
            if(Character.isWhitespace(c)){
                if(len>0){
                    termAtt.copyBuffer(buffer,0,len);
                    return true;
                }
            }else{
                buffer[len++] = (char) c;
            }
        }
        return len > 0;
    }
}
