package org.joy.hpc.analyzer;

import java.io.IOException;
import java.util.List;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reporter;
import org.joy.analyzer.Analyzer;
import org.joy.analyzer.Hit;
import org.joy.analyzer.HitAnalyzer;
import org.joy.analyzer.PipelineAnalyzer;
import org.joy.analyzer.TokenAnalyzer;
import org.joy.analyzer.scoring.Scorer;
import org.joy.analyzer.terms.TermExtractor;
import org.joy.hpc.analyzer.examples.sogou.SogouDocument;
import org.joy.hpc.analyzer.examples.sogou.SogouDocumentInput;
import org.joy.nlp.ACWordSpliter;
import org.joy.nlp.PDWordSpliter;
import org.joy.nlp.WordSpliter;

/**
 * Mapper for Hit analysis
 * 
 * @author Lamfeeling
 * 
 */
public class HitMapper extends MapReduceBase implements
		Mapper<Text, SogouDocumentInput, Text, HitWritable> {
	private static final Log log = LogFactory.getLog(HitMapper.class.getName());
	private PipelineAnalyzer<WordSpliter, List<Hit>> analyzer = null;
	private Class<Scorer> scorerClass;
	private WordSpliter spliter;
	private Class<TermExtractor> extractorClass;

	@SuppressWarnings("unchecked")
	@Override
	public void map(Text id, final SogouDocumentInput doc,
			OutputCollector<Text, HitWritable> out, Reporter reporter)
			throws IOException {
		try {
			reporter.setStatus(doc.getURL()+" ready");
			// 初始化一个管道分析器，把分词和关键词分析结合起来。
			analyzer = new PipelineAnalyzer<WordSpliter, List<Hit>>(
					new Analyzer[] { 
							//分词分析器
							new TokenAnalyzer(),
							// 关键字分析器
							new HitAnalyzer() 
							});
			// 设置分析文档
			analyzer.setDoc(SogouDocument.createSogouDocument(doc));
			reporter.setStatus(doc.getURL()+" doc model done.");
			// 输入
			analyzer.input(spliter);
			// 执行分析操作
			analyzer.doAnalyze();

		//	log.info(doc.getURL());
			reporter.setStatus(doc.getURL()+" analysis done.");
			// get all the hits
			List<Hit> hits = analyzer.output();

			for (Hit hit : hits) {
				out.collect(new Text(hit.getTerm()), new HitWritable(hit, doc
						.getURL()));
			}
		} catch (Exception e) {
			e.printStackTrace();
		}
	}

	@Override
	public void close() throws IOException {
		// TODO Auto-generated method stub
		spliter.close();
		super.close();
	}

	@SuppressWarnings("unchecked")
	@Override
	public void configure(JobConf conf) {
		// 加载分析资源
		String scorerName = conf.get("org.joy.analyzer.scorer");
		String spliterName = conf.get("org.joy.analyzer.spliter");
		String termExtractorName = conf.get("org.joy.analyzer.termExactor");
		try {
			if (scorerName != null)
				scorerClass = (Class<Scorer>) Class.forName(scorerName);

			if (spliterName != null)
				spliter = (WordSpliter) Class.forName(spliterName)
						.newInstance();

			if (termExtractorName != null)
				extractorClass = (Class<TermExtractor>) Class
						.forName(termExtractorName);

			if (scorerClass == null)
				scorerClass = (Class<Scorer>) Class
						.forName("org.joy.analyzer.scoring.PWFScorer");

			if (spliter == null)
				spliter = new PDWordSpliter();

			if (extractorClass == null)
				extractorClass = (Class<TermExtractor>) Class
						.forName("org.joy.analyzer.terms.SimpleTermExtractor");

		} catch (Exception excp) {
			excp.printStackTrace();
		}
	}

}
