package org.newlucene.core.analysis;

import java.util.ArrayList;
import java.util.regex.Matcher;
import java.util.regex.Pattern;

public class UnicodeAnalyzer_NoStem implements Analyzer
{
	private String enRegex = "[a-zA-Z]+";
	private String numRegex = "[0-9]+";
	private String cnRegex = "[\\u4E00-\\u9FBF]+";
	private Pattern cnPattern = Pattern.compile(cnRegex);
	private Pattern numPattern = Pattern.compile(numRegex);
	private Pattern wordPattern = Pattern.compile(enRegex + "|" + numRegex + "|" + cnRegex + "|" + VIRTUAL_WORD);

	private ArrayList<String> wordList = new ArrayList<String>();
	private int wordListIndex;
	
	public static void main(String[] args)
	{
		UnicodeAnalyzer_NoStem analyzer = new UnicodeAnalyzer_NoStem();
		analyzer.set("6100 some texts \u4E01 cool \u9B33 another text OX1231J");
		Token t;
		while ((t = analyzer.next()) != null)
		{
			System.out.println(t);
		}
	}
	
	public void set(String text)
	{
		wordList.clear();
		wordListIndex = 0;
		Matcher cnMatcher = cnPattern.matcher("");
		Matcher numMatcher = numPattern.matcher("");
		Matcher wordMatcher = wordPattern.matcher(text);
		while (wordMatcher.find())
		{			
			String word = wordMatcher.group();
			if (cnMatcher.reset(word).matches())
			{
				// add each Chinese char to the array
				for (int i = 0; i < word.length(); i++)
				{
					wordList.add(word.charAt(i) + "");
				}
			}
			else if (numMatcher.reset(word).matches())
			{
				// number
				wordList.add(word);
			}
			else
			{
				// add the whole English word to the array, lower case and remove plural
				String enWord = word.toLowerCase();
				wordList.add(enWord);
			}
		}
	}
	
	public Token next()
	{
		if (wordListIndex >= wordList.size())
		{
			return null;	// end of tokens, done
		}
		else
		{
			String s = wordList.get(wordListIndex++);
			return new Token(s);
		}
	}
}
