package utils;

import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.util.ArrayList;

import net.imdict.analysis.chinese.ChineseAnalyzer;

import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.Token;
import org.apache.lucene.analysis.TokenStream;


public class Tokenizer {
	
	public static final String SEPARATOR = "\u0001";

	Analyzer ca = new ChineseAnalyzer();
	public ArrayList<String> tokenize(String str) throws IOException {
		ArrayList<String> tokens = new ArrayList<String>();
		Token nt = new Token();		
		InputStream inputStream = new ByteArrayInputStream(str.getBytes());
		InputStreamReader reader = new InputStreamReader(inputStream, "UTF-8");
		TokenStream ts = ca.tokenStream("sentence", reader);
		nt = ts.next(nt);
		while (nt != null) {
			tokens.add(nt.term());
			nt = ts.next(nt);
		}
		ts.close();
		return tokens;
	}
	
	public String tokenizeToString(String str) throws IOException {
		ArrayList<String> tokens = tokenize(str);
		StringBuilder output = new StringBuilder();
		for (int i = 0; i < tokens.size(); ++i) {
			output.append(tokens.get(i));
			if (i < tokens.size() - 1)
				output.append(SEPARATOR);
		}
		return output.toString();
	}
}
