package org.wltea.analyzer.lucene;

import java.io.IOException;
import java.io.Reader;

import org.apache.lucene.analysis.Tokenizer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.OffsetAttribute;

public class BigramTokenizer extends Tokenizer {

	private Reader reader;
	private final CharTermAttribute termAtt = addAttribute(CharTermAttribute.class);
    private final OffsetAttribute offsetAtt = addAttribute(OffsetAttribute.class);
    static final int TYPE_NONE = 0;
    static final int TYPE_DIGIT = 1;
    static final int TYPE_EN = 2;
    static final int TYPE_CJK = 3;
    private int type = 0;
    private static final int MAX_WORD_LEN = 255;
    private static final int IO_BUFFER_SIZE = 1024;
    private int offset = 0;
    private int bufferIndex = 0;
    private int dataLen = 0;
    private int length = 0;
    private final char[] buffer = new char[MAX_WORD_LEN];
    private final char[] ioBuffer = new char[IO_BUFFER_SIZE];
    private boolean backtracking = false;
    private boolean repeat = false;
    private boolean preTokened = false;
    
	public BigramTokenizer(Reader reader){
		super(reader);
		this.reader = reader;
	}
	
	
	int start = 0;
	char currentChar ;
	@Override
	public boolean incrementToken() throws IOException {
		clearAttributes();
        while(true) { // loop until we find a non-empty token
          start = offset;
          while (true) { // loop until we've found a full token
            
            Character.UnicodeBlock ub;

            offset++;

            //缓冲指针是否到底？
            if (bufferIndex >= dataLen || bufferIndex==-1) {
                dataLen = input.read(ioBuffer);
                bufferIndex = 0;
            }

            //如果已经从流中读取完数据
            if (dataLen == -1) {
            	offset--;
                if (length > 0) break;
                else return false;
            } else {
            	currentChar = ioBuffer[bufferIndex++];
                ub = Character.UnicodeBlock.of(currentChar);
                if (ub == Character.UnicodeBlock.HALFWIDTH_AND_FULLWIDTH_FORMS) {
	                int i = (int) currentChar;
	                if (i >= 65281 && i <= 65374) {
	                  i = i - 65248;
	                  currentChar = (char) i;
	                }
                }
            }
            
//            System.out.println("current "+(Character.isDigit(currentChar)?"digit":Character.isLetter(currentChar)?"letter":"char")+" ->"+currentChar+",bufferIndex->"+bufferIndex+",dataLen->"+dataLen+",type->"+type+",backtracking->"+backtracking+",length->"+length);
                
            if (Character.isDigit(currentChar)){
            	if(type != TYPE_DIGIT){
            		type = TYPE_DIGIT;
            		if(length!=0){
            			//类型转换，输出上一种类型的字符串
        				backtracking = true;
	            		break;
            		}
            	}
            	buffer[length++] = currentChar;
            	if (length == MAX_WORD_LEN)  break;
            }else if (isEnglish(currentChar)) {
            	if(type != TYPE_EN){
        			type = TYPE_EN;
        			preTokened = false;
        			repeat = false;
        			if(length!=0){
        				//类型转换，输出上一种类型的字符串
        				backtracking = true;
                		break;
        			}
        			buffer[length++] = Character.toLowerCase(currentChar);;
            	}else{
            		if(length==1 && !preTokened && !repeat){
            	    	offset--;
            	    	bufferIndex--;
            	    	backtracking = true;
            	    	repeat = true;
            			break;
            		}else{
            			preTokened = false;
            		}
            		buffer[length++] = Character.toLowerCase(currentChar);
            		if(length==2){
            			backtracking = true;
            			preTokened = true;
            			repeat = false;
            			break;
            		}
            	}
            }else if(isCJK(currentChar)){
            	if(type != TYPE_CJK){
        			type = TYPE_CJK;
        			preTokened = false;
        			repeat = false;
        			if(length!=0){
        				//类型转换，输出上一种类型的字符串
        				backtracking = true;
                		break;
        			}
        			buffer[length++] = currentChar;
            	}else{
            		if(length==1 && !preTokened && !repeat){
            	    	offset--;
            	    	bufferIndex--;
            	    	backtracking = true;
            	    	repeat = true;
            			break;
            		}else{
            			preTokened = false;
            		}
            		buffer[length++] = currentChar;
            		if(length==2){
            			backtracking = true;
            			preTokened = true;
            			repeat = false;
            			break;
            		}
            	}
            }else{
            	type = TYPE_NONE;
            	if(length>0){
            		break;
            	}
            	length = 0;
            }
         }
      
         if (length > 0) {
    	    termAtt.copyBuffer(buffer, 0, length);
    	    offsetAtt.setOffset(correctOffset(start), correctOffset(start+length));
    	    length = 0;
    	    if(backtracking){
    	    	offset--;
    	    	bufferIndex--;
    	    	backtracking = false;
    	    }
            return true;
         }else if (dataLen == -1) {
            offset--;
            return false;
         }
	  }
    }
	
	private boolean isCJK(char c){
		return Character.isLetter(c);
	}
	
	private boolean isEnglish(char c){
		if(((c>='a'&&c<='z')   ||   (c>='A'&&c<='Z'))) return true;
		return false;
	}
}
