package com.tangpian.sna.core.tokenizer;

import java.io.BufferedReader;
import java.io.IOException;
import java.io.InputStream;
import java.io.InputStreamReader;
import java.io.UnsupportedEncodingException;
import java.util.ArrayList;
import java.util.HashSet;
import java.util.List;

import org.ansj.domain.Term;
import org.ansj.library.UserDefineLibrary;
import org.ansj.splitWord.analysis.ToAnalysis;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import com.tangpian.sna.core.util.FileUtil;

public class AnsjTokenizer extends AbstractTokenizer {
	
	private static Logger logger = LoggerFactory.getLogger(AnsjTokenizer.class);

	private static HashSet<String> stopwords = new HashSet<String>();
	static {
		UserDefineLibrary.loadLibrary(FileUtil
				.getFilePathFromClasspath("res/library/userLibrary/userLibrary.dic"));
		logger.debug("user library load complete!");

		try {
			InputStream stopLibraryStream = AnsjTokenizer.class
					.getResourceAsStream("/res/library/userLibrary/stopLibrary.dic");
			BufferedReader reader = new BufferedReader(new InputStreamReader(
					stopLibraryStream));
			String tempString = null;

			while ((tempString = reader.readLine()) != null) {
				// if (stopwords.contains(tempString)) {
				// System.out.println(tempString);
				// }
				stopwords.add(tempString);
			}
			reader.close();
			stopLibraryStream.close();
		} catch (UnsupportedEncodingException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

	}

	/*
	 * (non-Javadoc)
	 * 
	 * @see com.tangpian.sna.Tokenizer#tokenString(java.lang.String)
	 */
	public List<String> tokenString(String origin) {
		List<Term> terms = ToAnalysis.paser(origin);
		List<String> result = new ArrayList<String>();
		for (Term term : terms) {
			String word = term.getName().trim();
			if (!stopwords.contains(word)) {
				result.add(word);
			}

		}

		return result;
	}

}
