/**
 * AlexDocumentWordsCollector.java created by zhangzhidong 
 * at 上午10:48:18 2016年5月23日
 */
package cn.edu.bjtu.alex;


import java.io.File;
import java.util.HashSet;
import java.util.Iterator;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;

import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
import org.shirdrn.document.preprocessing.api.Context;
import org.shirdrn.document.preprocessing.api.DocumentAnalyzer;
import org.shirdrn.document.preprocessing.api.Term;
import org.shirdrn.document.preprocessing.api.TermFilter;
import org.shirdrn.document.preprocessing.api.constants.ConfigKeys;
import org.shirdrn.document.preprocessing.core.common.AbstractComponent;
import org.shirdrn.document.preprocessing.core.utils.NamedThreadFactory;
import org.shirdrn.document.preprocessing.core.utils.ReflectionUtils;

/**
 * 这个类用于处理停用词及源程序中所谓的“SingleWord”,后面是什么鬼我不清楚
 * @author zhangzhidong
 *
 */
public class AlexDocumentWordsCollector extends AbstractComponent {
	
	private static final Log LOG = LogFactory.getLog(AlexDocumentWordsCollector.class);
	private final Set<TermFilter> filters = new HashSet<TermFilter>();
	private ExecutorService executorService;
	private CountDownLatch latch;
	/**
	 * 
	 * 第一个类标用一个文件夹表示，每个文件夹用一个线程处理。
	 * @param context
	 */
	public AlexDocumentWordsCollector(final Context context) {
		super(context);
		String filterClassNames = context.getConfiguration().get(ConfigKeys.DOCUMENT_FILTER_CLASSES);
		if(filterClassNames != null) {
			LOG.info("Load filter classes: classNames=" + filterClassNames);
			String[] aClazz = filterClassNames.split("\\s*,\\s*");
			for(String clazz : aClazz) {
				TermFilter filter = ReflectionUtils.getInstance(
						clazz, TermFilter.class,  new Object[] { context });
				if(filter == null) {
					throw new RuntimeException("Fail to reflect: class=" + clazz);
				}
				filters.add(filter);
				LOG.info("Added filter instance: filter=" + filter);
			}
		}
	}

	public void fire() {
		int labelCnt = context.getFDMetadata().getInputRootDir().list(this).length;
		LOG.info("Start to collect: labelCnt=" + labelCnt);
		latch = new CountDownLatch(labelCnt);
		executorService = Executors.newCachedThreadPool(new NamedThreadFactory("POOL"));
		try {
			String []labels = context.getFDMetadata().getInputRootDir().list(this);
			//初始化labelVals
			LOG.info("labels finished ");
			for(String label :labels) {
				LOG.info("Collect words for: label=" + label);
				executorService.execute(new AlexEachLabelWordAnalysisWorker(label));
			}
		} finally {
			try {
				latch.await();
			} catch (InterruptedException e) { }
			LOG.info("Shutdown executor service: " + executorService);
			executorService.shutdown();
		}
		//统计完数据之后就可以开始形成语袋模型了.
		//一个word与一个integer的映射
		//mapping之所以在这里定义及初始化是后面的工作可能会用得到这个映射关系
		//生成arff文件先缓一缓。这个需要先根据chi选词
//		Map<String,Integer> mapping = new HashMap<>();
//		alexTurnTermTableIntoAttInfoVector(mapping);
//		Instances insts = alexConstructInstances(mapping);
//		ArffSaver as = new ArffSaver();
//		as.setInstances(insts);
//		try {
//			as.setFile(this.context.getFDMetadata().getArffOutputDir());
//			as.writeBatch();
//		} catch (IOException e) {
//			// TODO Auto-generated catch block
//			e.printStackTrace();
//		}
		stat();
	}
	
	protected void filterTerms(Map<String, Term> terms) {
		for(TermFilter filter : filters) {
			filter.filter(terms);
		}
	}

	private void stat() {
		LOG.info("STAT: totalDocCount=" + context.getVectorMetadata().totalDocCount());
		LOG.info("STAT: labelCount=" + context.getVectorMetadata().labelCount());
		Iterator<Entry<String, Map<String, Map<String, Term>>>> iter = context.getVectorMetadata().termTableIterator();
		while(iter.hasNext()) {
			Entry<String, Map<String, Map<String, Term>>> entry = iter.next();
			Iterator<Entry<String, Map<String, Term>>> docIter = entry.getValue().entrySet().iterator();
			int termCount = 0;
			while(docIter.hasNext()) {
				termCount += docIter.next().getValue().size();
			}
			LOG.info("STAT: label=" + entry.getKey() + ", docCount=" + entry.getValue().size() + ", termCount=" + termCount);
		}
	}
	
	/**
	 * 这个类把每个类标下面的文档，分词，然后形成表示Instances的.arff文件
	 * @author zhangzhidong
	 *
	 */
	private final class AlexEachLabelWordAnalysisWorker extends Thread {
		private final String label;
		private final DocumentAnalyzer analyzer;
		public AlexEachLabelWordAnalysisWorker(String label) {
			this.label = label;
			String analyzerClass = context.getConfiguration().get(ConfigKeys.DOCUMENT_ANALYZER_CLASS);
			LOG.info("Analyzer class name: class=" + analyzerClass);
			analyzer = ReflectionUtils.getInstance(
					analyzerClass, DocumentAnalyzer.class, new Object[] { context.getConfiguration() });
		}
		
		@Override
		public void run() {
			try {
				//CXXXX文件夹下面的东西文件
				File labelDir = new File(context.getFDMetadata().getInputRootDir(), label);
				File[] files = labelDir.listFiles(AlexDocumentWordsCollector.this);
				LOG.info("Prepare to analyze: label=" + label + ", totalFiles=" + files.length);
				int n = 0;
				for(File file : files) {
					analyze(label, file);
					++n;
				}
				LOG.info("Finish to analyze: label=" + label + ", fileCount=" + n);
			} finally {
				latch.countDown();
			}
		}
		
		protected void analyze(String label, File file) {
			String doc = file.getAbsolutePath();
			LOG.debug("Process document: label=" + label + ", file=" + doc);
			//看看这个是什么鬼
			Map<String, Term> terms = analyzer.analyze(file);
			LOG.info(label + "," + file.getName() + "," + terms.size());
			// filter terms
			filterTerms(terms);
			LOG.info("After filter:"+label + "," + file.getName() + "," + terms.size());
			// construct memory structure
			context.getVectorMetadata().addTerms(label, doc, terms);
			// add inverted table as needed
			context.getVectorMetadata().addTermsToInvertedTable(label, doc, terms);
			LOG.debug("Done: file=" + file + ", termCount=" + terms.size());
			LOG.debug("Terms in a doc: terms=" + terms);
		}
	}

}
