///**
// * AlexOutputingWekaArffFile.java created by zhangzhidong 
// * at 下午10:13:16 2016年5月24日
// */
package cn.edu.bjtu.alex;
//
//import java.io.File;
//import java.io.IOException;
//import java.util.Iterator;
//import java.util.Map;
//import java.util.Map.Entry;
//
//import org.apache.commons.logging.Log;
//import org.apache.commons.logging.LogFactory;
//import org.shirdrn.document.preprocessing.api.Context;
//import org.shirdrn.document.preprocessing.api.Term;
//import org.shirdrn.document.preprocessing.api.TermFeatureable;
//import org.shirdrn.document.preprocessing.core.common.AbstractComponent;
//
//import com.google.common.collect.Maps;
//
//import weka.core.Attribute;
//import weka.core.FastVector;
//import weka.core.Instance;
//import weka.core.Instances;
//import weka.core.converters.ArffSaver;
//
///**
// * @author zhangzhidong<br>
// * comment generated at 2016年5月24日下午10:13:16<br>
// * 主要是生成基于chi统计的向量化的表示文档的文件
// * 
// */
//public class AlexOutputingWekaArffFile extends AbstractComponent {
//	private static final Log LOG = LogFactory.getLog(AlexOutputingWekaArffFile.class);
//	//对应的实例的属性，训练时候出现的所有的Word都作为一个属性,其实也就是生成的arff文件的头
//	//每个都是一个Attribute
//	private FastVector attInfo = new FastVector();	
//	//word - term  包含着word - id的应对
//	private Map<String, TermFeatureable> featuredTermsMap = Maps.newHashMap();
//	/**
//	 * 
//	 * @param context 去重 去停之后的所以数据都存放在context里面了
//	 */
//	public AlexOutputingWekaArffFile(Context context) {
//		super(context);
//	}
//	/**
//	 * @Param
//	 * @Author zhangzhidong
//	 */
//	public void fire() {
//		// TODO Auto-generated method stub
//		alexConstructInstancesHeader();
//		alexConstructInstancesbody();
//	}
//	int numAttributes = -1;
//	//先编号，在对文档进行向量表示
//	private void alexConstructInstancesHeader(){
//		int labelIndex = 0;
//		int wordIndex = 0;
//		//对每个词组形成向量
//		Map<String, Integer> globalLabelToIdMap = Maps.newHashMap();
//		Map<Integer, String> globalIdToLabelMap = Maps.newHashMap();		
//		//类标。几个文件夹就有几个
//		for(String label : context.getVectorMetadata().labels()) {
//			Integer labelId = globalLabelToIdMap.get(label);
//			if(labelId == null) {
//				labelId = labelIndex;
//				globalLabelToIdMap.put(label, labelId);
//				globalIdToLabelMap.put(labelId, label);
//				++labelIndex;
//			}
//		}
//		//给每个term编号从0开始,通过iterator遍历赋值，然后通过iterator遍历取出，每次顺序都是一致
//		for(TermFeatureable term : context.getVectorMetadata().featuredTerms()) {
//			term.setId(wordIndex++);
//		}
//		context.getVectorMetadata().putLabelToIdPairs(globalLabelToIdMap);
//		context.getVectorMetadata().putIdToLabelPairs(globalIdToLabelMap);
//		
//		for(TermFeatureable term : context.getVectorMetadata().featuredTerms()) {
//			String word = term.getWord();
//			StringBuffer buf = new StringBuffer();
//			attInfo.addElement(new Attribute(word));
//			LOG.debug("Write feature term vector: word=" + word + ", datum=" + buf.toString());
//		}
//		//类标vector
//		FastVector labelVals = new FastVector();
//		for(String label:this.context.getVectorMetadata().labels()){
//			labelVals.addElement(label);
//		}
//		attInfo.addElement(new Attribute("$-classification-internal-$",labelVals));
//		//到这，属性及类属性都加上了，类属性是最后一个
//		insts =  new Instances("auto generated dataset for text catalog",this.attInfo,10000);
//		this.numAttributes = wordIndex + 1;
//	}
//	private Instances insts = null;
//	private void alexConstructInstancesbody(){
//		for(TermFeatureable term : context.getVectorMetadata().featuredTerms()) {
//			featuredTermsMap.put(term.getWord(), term);
//		}
//		Iterator<Entry<String, Map<String, Map<String, Term>>>> iter = context.getVectorMetadata().termTableIterator();
//		while(iter.hasNext()) {
//			Entry<String, Map<String, Map<String, Term>>> labelledDocsEntry = iter.next();
//			String label = labelledDocsEntry.getKey();
//			Integer labelId = getLabelId(label);
//			//构造的时候指定有多少个属性，其实就是 属性值个数+1  ，1是指的一个类标
//			if (this.numAttributes==-1){
//				throw new RuntimeException("WRONG");
//			}
//			Instance oneDoc = new Instance(this.numAttributes);
//			setAllZero(oneDoc);
//			oneDoc.setDataset(this.insts);
//			//类标已经确定了，直接在这里设置
//			oneDoc.setValue(this.numAttributes-1, label);
//			if(labelId != null) {
//				Map<String, Map<String, Term>>  docs = labelledDocsEntry.getValue();
//				Iterator<Entry<String, Map<String, Term>>> docsIter = docs.entrySet().iterator();
//				while(docsIter.hasNext()) {
//					Entry<String, Map<String, Term>> docsEntry = docsIter.next();
//					Map<String, Term> terms = docsEntry.getValue();
//					for(Entry<String, Term> termEntry : terms.entrySet()) {
//						String word = termEntry.getKey();
//						Integer wordId = getWordId(word);
//						if(wordId != null) {
//							Term term = termEntry.getValue();
//							oneDoc.setValue(wordId, term.getTfidf());
//						}
//					}
//					this.insts.add(oneDoc);
//				}
//			} else {
//				LOG.warn("Label ID can not be found: label=" + label + ", labelId=null");
//			}
//		}
//		
//		ArffSaver as = new ArffSaver();
//		as.setInstances(insts);
//		try {
//			File f = new File(this.context.getFDMetadata().getArffOutputDir(),"result.arff");
//			as.setFile(f);
//			as.writeBatch();
//			LOG.info("Finished: Arff Dir =" + context.getFDMetadata().getArffOutputDir());
//		} catch (IOException e) {
//			// TODO Auto-generated catch block
//			e.printStackTrace();
//		}
//		
//	}
//	
//
//	private Integer getWordId(String word) {
//		TermFeatureable term = featuredTermsMap.get(word);
//		return term == null ? null : term.getId();
//	}
//
//	private Integer getLabelId(String label) {
//		return context.getVectorMetadata().getlabelId(label);
//	}
//	private void setAllZero(Instance one){
//		int num = one.numAttributes();
//		//类标不设置，因为类标是string类型的
//		for(int i=0;i<num-1;i++){
//			one.setValue(i, 0.0);
//		}
//	}
//}
//	
//
//
//
//
//
//
//
//
//
////private void alexTurnTermTableIntoAttInfoVector(Map<String,Integer> mappingMap){
//////Instances insts = new Instances(name, attInfo, capacity)
//////这里以垃圾邮件的数据集为例。每个文档所包括的term不同，所以生成的instances文件要把所有的词都包括才可以
//////所以这里处理完一个文件夹(也就是一个分类之后）就要把所有的都添加进去。每个线程负责一个类，在这里完成效率高点。
//////形成arff文件
//////		context.getVectorMetadata().addTerms(label, doc, terms);
//////				class		doc			word    term
////Iterator<Entry<String, Map<String, Map<String, Term>>>> it = context.getVectorMetadata().termTableIterator();
////while(it.hasNext()){
////	Entry<String, Map<String, Map<String, Term>>> classEntry = it.next();
////	Map<String, Map<String, Term>> docMap = classEntry.getValue();
////	Iterator<Entry<String, Map<String, Term>>> it1 = docMap.entrySet().iterator();
////	while(it1.hasNext()){
////		Entry<String,Map<String,Term>> docEntry = it1.next();	
////		Map<String,Term>  wordMap = docEntry.getValue();
////		Iterator<Entry<String,Term>> it3 = wordMap.entrySet().iterator();
////		while(it3.hasNext()){
////			Entry<String,Term> termEntry = it3.next();
////			Term t = termEntry.getValue();
////			if(!mappingMap.containsKey(t.getWord())){
//////				Attribute att = new Attribute(t.getWord(), attVals);
//////				attInfo.addElement(att);
////				mappingMap.put(t.getWord(), attInfo.size()-1);
////			}else{
////				System.out.println(t.getWord()+" conficts value =  " + mappingMap.get(t.getWord()));
////			}
////		}
////	}	
////}
//////最后加上类标
////attInfo.addElement(new Attribute("instance-classification",this.labelVals));
////}
////private Instances alexConstructInstances(Map<String,Integer> mappingMap){
////Instances insts = new Instances("train-data", attInfo, 10000);
////
////Iterator<Entry<String, Map<String, Map<String, Term>>>> it = context.getVectorMetadata().termTableIterator();
////it = context.getVectorMetadata().termTableIterator();
////while(it.hasNext()){
////	Entry<String, Map<String, Map<String, Term>>> classEntry = it.next();
////	String classVal = classEntry.getKey();
////	Map<String, Map<String, Term>> docMap = classEntry.getValue();
////	Iterator<Entry<String, Map<String, Term>>> it1 = docMap.entrySet().iterator();
////	while(it1.hasNext()){
////		Entry<String,Map<String,Term>> docEntry = it1.next();
////		//文件名
////		String docName = docEntry.getKey();
////		Map<String,Term>  wordMap = docEntry.getValue();
////		//遍历每个词
////		Iterator<Entry<String ,Term >> wordit = wordMap.entrySet().iterator();
////		Instance inst = new Instance(attInfo.size());
////		//设定所属于的数据集
////		inst.setDataset(insts);
////		//类标单独设定
////		for(int i=0;i<inst.numAttributes()-1;i++){
////			inst.setValue(i, "0");
////		}
////		inst.setValue(attInfo.size()-1, classVal);
////		while(wordit.hasNext()){
////			Entry<String,Term> entry = wordit.next();
////			String word = entry.getKey();
////			int index = mappingMap.get(word);
////			inst.setValue(index, "1");
////		}
////		insts.add(inst);
////	}	
////}
////return insts;
////}
