package org.nlp.algo.feature.chi;

import java.io.File;
import java.io.FileFilter;
import java.io.IOException;
import java.io.StringReader;
import java.util.ArrayList;
import java.util.Collections;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;

import org.apache.commons.io.FileUtils;
import org.apache.log4j.Logger;
import org.apache.lucene.analysis.TokenStream;
import org.apache.lucene.analysis.tokenattributes.CharTermAttribute;
import org.apache.lucene.util.Version;
import org.nlp.lucene.BloomAnalyzer;

/**
 * 用于计算特征
 * @author kongxp
 * @since 2012-12-18 我们使用开放检验的方式进行特征提取。
 * 
 */
public class FeatureCom {
	private static  Logger _log = Logger.getLogger(FeatureCom.class);
	// 词与文章的关系 key为词
	private static Map<String, TypeDocument> wordDocs = new HashMap<String, TypeDocument>();
	private static Map<String, Integer> typeDocNum = new HashMap<String, Integer>();
	// key分类 list为 
	private static Map<String, List<Abcd>> wordAbacds = new HashMap<String, List<Abcd>>();

	//特征存放目录
	private static String featurePath = "resource/feature";
	
	// 语料存放目录
	private static String corpusPath = "resource/corpus";

	// 训练语料文件
	private static File trainDir = new File(corpusPath + "/train");
	
	public static void main(String[] args) throws Exception {

		// 将每一类文本切词封装到一个MAP列表
		// map<type,typeDocument>
		// map<word，typeDocument>
		// 遍历词 然后计算abcd。
		// 计算词的开放检验值
		// comOneType(new File(
		// "F:/machine_study/workspace/machinelearning/sougo/军事"));
		//修正读取文件方式
		//comOneType(new File("F:/machine_study/workspace/machinelearning/sougo-mini/IT"));
		
//		File TDIR = new File(corpusPath);
		File[] tranTypes = trainDir.listFiles();
		for (int i = 0; i < tranTypes.length; i++) {
			_log.debug(tranTypes[i].getName());
			if (tranTypes[i].isDirectory()
					&& !tranTypes[i].getName().startsWith(".")) {
//				_log.debug(tranTypes[i]);
				comOneType(tranTypes[i]);
			}
		}
		Set<String> types = typeDocNum.keySet();
		int n = 0;
		// 计算N 其实写个变量也行
		for (String type : types) {
//			_log.debug(type + " " + typeDocNum.get(type));
			n += typeDocNum.get(type);
		}

		Set<String> words = wordDocs.keySet();
		for (String type : types) {
			List<Abcd> typeList= new ArrayList<Abcd>();
			wordAbacds.put(type,typeList );
			for (String word : words) {
				//_log.debug(word + "---" + wordDocs.get(word));
				TypeDocument td = wordDocs.get(word);
				// a：在这个分类下包含这个词的文档数量
				int a = td.getFiles(type);
				// b：不在该分类下包含这个词的文档数量
				int b = td.getOther(type);
				// c：在这个分类下不包含这个词的文档数量
				int c = typeDocNum.get(type) - a;
				// d：不在该分类下，且不包含这个词的文档数量
				int d = n - a - b - c;
				Abcd abcd = new Abcd(a, b, c, d, n,word);
				typeList.add(abcd);
			}
		}
		// 分类排序所有的词
		export(featurePath);
		
		_log.debug(wordDocs.size());

	}

	private static void export(String outPath) throws IOException {
		int num = 0;
		Set<String> types = wordAbacds.keySet();
		Set<String> features = new HashSet<String>();
 		int k =0;
		for (String type : types) {
			List<Abcd> results = wordAbacds.get(type);
			Collections.sort(results);
			List<String> lines = new ArrayList<String>();
			for (Abcd abcd : results) {
				lines.add(++num+" "+abcd.toString());
			}
			FileUtils.writeLines(new File(outPath+"/"+type+".txt"), lines);
			int size = results.size()>1000?1000:results.size();
			
			for (int i = 0; i < size; i++) {
				features.add(k++ +" "+results.get(i).getWord());
			}
		}
 		FileUtils.writeLines(new File(outPath+"/"+"feature.txt"), features);
		
	}

	private static void comOneType(File file) throws IOException {
		String type = file.getName();
		File[] models = file.listFiles(new FileFilter() {
			@Override
			public boolean accept(File pathname) {				
				if (pathname.getName().endsWith(".txt")) {
					return true;
				}
				return false;
			}
		});

		Integer docNum = typeDocNum.get(type);
		if (docNum == null) {
			typeDocNum.put(type, models.length);
		} else {
			typeDocNum.put(type, docNum + models.length);
		}
		
		BloomAnalyzer analyzer = new BloomAnalyzer(Version.LUCENE_40); //直接用bloomAnalyzer类
		for (int e = 0; e < models.length; e++) {
			//_log.debug(models[e].getName());
			String str = FileUtils.readFileToString(models[e],"GBK");
//			FeatureAnalyzer analy = new FeatureAnalyzer();
			
			TokenStream token = analyzer.tokenStream("", new StringReader(
					str));
			CharTermAttribute term = token
					.addAttribute(CharTermAttribute.class);
			while (token.incrementToken()) {
				String word = term.toString();				
				TypeDocument wd = wordDocs.get(word);				
				if (wd == null) {
					wd = new TypeDocument();
					wordDocs.put(word, wd);
				}
				wd.addType(type, models[e].getName());
			}
		}
		analyzer.close();
		
	}

}
