package com.wurd.wurdAnalysis;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;

import java.io.IOException;
import java.util.Iterator;
import java.util.Locale;

public class WurdTokenizer extends Tokenizer {

    private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);

    private Iterator<String> result = null;

    public WurdTokenizer() {

    }

    public WurdTokenizer(String thulac_user_specified_dict_name, Character thulac_separator, boolean thulac_useT2S, boolean thulac_seg_only, boolean thulac_useFilter, String thulac_prefix) {
        WurdAnalysis.getInstance(thulac_user_specified_dict_name,
                thulac_separator,thulac_useT2S,thulac_seg_only,thulac_useFilter,thulac_prefix);
    }

    /*
     * (non-Javadoc)
     * @see org.apache.lucene.analysis.TokenStream#incrementToken()
     */
    @Override
    public boolean incrementToken() throws IOException {
        // 清除所有的词元属性
        clearAttributes();
        if(result == null){
            try {
                result = WurdAnalysis.getInstance().analysis(input).iterator();
            } catch (Exception e) {
                e.printStackTrace();
            }
        }
        if (result != null && result.hasNext()) {
            String next = result.next();
            termAtt.append(next.toLowerCase(Locale.ROOT));
            termAtt.setLength(next.length());
            return true;
        } else {
            return false;
        }
    }

    /*
     * (non-Javadoc)
     * @see org.apache.lucene.analysis.Tokenizer#reset(java.io.Reader)
     */
    @Override
    public void reset() throws IOException {
        super.reset();
        result = null;
    }

    @Override
    public final void end() throws IOException {
        super.end();
    }
}
