package com.huanmeiqi.lucene.demo.my;

import org.apache.lucene.analysis.Tokenizer;

import java.io.IOException;

/**
 * @author cq.Wang
 * @date 2018/8/9 14:01
 * @description
 */

public class SuffixTokenizer extends Tokenizer {

    private char[] buffer = new char[1024];
    private int	suffixOffset;
    private static int MAX_SIZE;
    private int	termSize;




    @Override
    public boolean incrementToken() throws IOException {
        // 清除所有词项属性
        clearAttributes();
        input.read();

        return false;
    }
}
