package sharewithus.analyzer.lucene;

import java.io.IOException;
import java.io.Reader;
import java.util.List;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;
import org.apache.lucene.analysis.tokenattributes.TypeAttribute;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import sharewithus.analyzer.seg.BigramSegmenter;
import sharewithus.analyzer.seg.MaxLenthSegmenter;
import sharewithus.analyzer.seg.SUSegmenter;
import sharewithus.analyzer.seg.SUToken;

/**
 * Lucene3.0 Tokenizer适配器类
 * 
 * @author terry
 * 
 */
public final class SUTokenizer extends Tokenizer {
	
	public static final Logger log = LoggerFactory.getLogger(SUTokenizer.class);

	private CharTermAttribute termAtt;// 词属性
	private OffsetAttribute offsetAtt;// 位置属性
	private TypeAttribute typeAtt;// 类型属性
	private static final int IO_BUFFER_SIZE = 4096;
	private char[] ioBuffer = new char[IO_BUFFER_SIZE];

	private boolean done; // 是否执行过的标志
	private int offset = 0;
	private int upto = 0;
	List<SUToken> al; //切分结果

	private SUSegmenter _SUImplement = new BigramSegmenter();
	public static final int MODE_MAX_LENGTH = 0;
	public static final int MODE_BIGRAM = 1;

	public SUTokenizer(Reader reader, boolean isMaxWordLength) {
		super(reader);
		this.typeAtt = addAttribute(TypeAttribute.class);
		this.offsetAtt = addAttribute(OffsetAttribute.class);
		this.termAtt = addAttribute(CharTermAttribute.class);
		this.done = false;
		if (isMaxWordLength)
			_SUImplement = new MaxLenthSegmenter();
	}

	private void resizeIOBuffer(int newSize) {
		if (ioBuffer.length < newSize) {
			// Not big enough; create a new array with slight
			// over allocation and preserve content
			final char[] newCharBuffer = new char[newSize];
			System.arraycopy(ioBuffer, 0, newCharBuffer, 0, ioBuffer.length);
			ioBuffer = newCharBuffer;
		}
	}

	@Override
	public boolean incrementToken() throws IOException {
		if (!done) {
			clearAttributes();
			done = true;
			upto = 0;
			offset = 0;
			while (true) {
				final int length = input.read(ioBuffer, upto, ioBuffer.length
						- upto);
				if (length == -1)
					break;
				upto += length;
				if (upto == ioBuffer.length)
					resizeIOBuffer(upto * 2);
			}
			String txt = new String(ioBuffer, 0, upto);

			al = _SUImplement.segment(txt);
		}

		if (offset < al.size()) {
			SUToken token = al.get(offset);
			if (token != null) {
				//char[] word = token.getTerm().toCharArray();
				termAtt.copyBuffer(ioBuffer, token.getStart(), token.getEnd() - token.getStart());
				offsetAtt.setOffset(token.getStart(), token.getEnd());
				typeAtt.setType(token.getTaggedPOS().toString());
				++offset;
				return true;
			} else {
				return false;
			}
		}

		return false;
	}

	@Override
	public void reset() throws IOException {
		super.reset();
		this.done = false;
		this.upto = 0;
	}

	@Override
	public void reset(Reader input) throws IOException {
		super.reset(input);
		reset();
	}

	@Override
	public void end() throws IOException {
		// set final offset
		final int finalOffset = correctOffset(upto);
		offsetAtt.setOffset(finalOffset, finalOffset);
	}
}
