package cn.edu.bjtu.alex.rewrite.tools;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.io.StringReader;
import java.util.Map;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.lucene.analysis.Analyzer;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.cn.smart.SmartChineseAnalyzer;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.analysis.tokenattributes.CharTermAttributeImpl;

import com.google.common.collect.Maps;

import cn.edu.bjtu.alex.rewrite.Context;
import cn.edu.bjtu.alex.rewrite.interfaces.DocumentAnalyzer;
import cn.edu.bjtu.alex.rewrite.interfaces.Term;
import cn.edu.bjtu.alex.rewrite.interfaces.impl.TermImpl;

public class LuceneDocumentAnalyzer extends AbstractDocumentAnalyzer implements DocumentAnalyzer {

	private static final Log LOG = LogFactory.getLog(LuceneDocumentAnalyzer.class);
	private final Analyzer analyzer;
	
	public LuceneDocumentAnalyzer(Context ctx) {
		super(ctx);
		analyzer = new SmartChineseAnalyzer(false);
	}

	public Map<String, Term> analyze(String line) {
		Map<String, Term> terms = Maps.newHashMap();
		TokenStream ts;
		try {
			ts = analyzer.tokenStream("", new StringReader(line));
			ts.reset();
			ts.addAttribute(CharTermAttribute.class); 
			while (ts.incrementToken()) {  
				CharTermAttributeImpl attr = (CharTermAttributeImpl) ts.getAttribute(CharTermAttribute.class);  
				String word = attr.toString().trim();
				if(!word.isEmpty() && !super.isStopword(word)) {
					Term term = terms.get(word);
					if(term == null) {
						term = new TermImpl(word);
						terms.put(word, term);
					}
					term.incrFreq();
				} else {
					LOG.debug("Filter out stop word : " + word);
				}
				ts.end();
			}
			ts.close();
		} catch (IOException e) {
			// TODO Auto-generated catch block
			e.printStackTrace();
		}

		return terms;
	}

}
