package org.lab.jiang;

import java.io.IOException;
import java.io.StringReader;
import java.util.Map;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl;
import org.shirdrn.document.preprocessing.api.Term;
import org.shirdrn.document.preprocessing.core.common.TermImpl;

import com.google.common.collect.Maps;


public class LuceneDocumentAnalyzer extends AbstractWechatAnalyzer  {
	private static final Log LOG = LogFactory.getLog(LuceneDocumentAnalyzer.class);
	private final Analyzer analyzer;
	public LuceneDocumentAnalyzer() {
		super();
		analyzer = new SmartChineseAnalyzer(false);
	}
	@Override
	protected Map<String, Term> analyze(String content) {
		Map<String, Term> terms = Maps.newHashMap();
		try {
			String line = null;
			LOG.debug("Process content: " + line);
			 
			TokenStream ts = analyzer.tokenStream("", new StringReader(content));
			ts.reset();
			ts.addAttribute(CharTermAttribute.class); 
			while (ts.incrementToken()) {  
				CharTermAttributeImpl attr = (CharTermAttributeImpl) ts.getAttribute(CharTermAttribute.class);  
				String word = attr.toString().trim();
				if(!word.isEmpty() && !super.isStopword(word)) {
					Term term = terms.get(word);
					if(term == null) {
						term = new TermImpl(word);
						terms.put(word, term);
					}
					term.incrFreq();
				} else {
					LOG.debug("Filter out stop word: file="  + ", word=" + word);
				}
				ts.end();
			}
			ts.close();
		} catch (IOException e) {
			throw new RuntimeException("", e);
		} finally {
		}
		return terms;
	}

}
