package org.nlp.algo.classifier;

import java.io.File;
import java.io.FileFilter;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.ObjectOutputStream;
import java.util.ArrayList;
import java.util.List;

import com.aliasi.classify.Classification;
import com.aliasi.classify.Classified;
import com.aliasi.classify.KnnClassifier;
import com.aliasi.tokenizer.NGramTokenizerFactory;
import com.aliasi.tokenizer.TokenFeatureExtractor;
import com.aliasi.tokenizer.TokenizerFactory;
import com.aliasi.util.Files;

/**
 * @author longkeyy
 * 
 */
public class TrainKnnClassifier {
	// 训练语料文件夹
	private static String corpusPath = "resource/corpus/train";
	
	private static File TDIR = new File(corpusPath);
	
	//模型文件名
	private static String modelFile = "resource/model/KnnClassifier.model";
	
	// 定义分类
	private static String[] CATEGORIES;// = { "汽车", "财经", "IT", "健康", "体育",
										// "旅游", "教育", "招聘", "文化", "军事"};

	public static void main(String[] args) throws IOException {
		// TokenizerFactory tokenizerFactory =
		// IndoEuropeanTokenizerFactory.INSTANCE;
//		TokenizerFactory tokenizerFactory = LuceneTokenizerFactory.INSTANCE;
		
		TokenizerFactory tokenizerFactory = new NGramTokenizerFactory(2, 5);

		int catgoriesCount = 0;
		List<String> catgroies = new ArrayList<String>();

		File[] tranTypes = TDIR.listFiles(new FileFilter() {

			@Override
			public boolean accept(File arg0) {
				if (arg0.getName().startsWith(".")) {
					return false;
				}
				return true;
			}

		});
		for (int i = 0; i < tranTypes.length; i++) {
			if (tranTypes[i].isDirectory()
					&& !tranTypes[i].getName().startsWith(".")) {
				catgroies.add(tranTypes[i].toString().replace(
						tranTypes[i].getParent() + "\\", ""));
				System.out.println(tranTypes[catgoriesCount]);
				catgoriesCount++;
			}
		}

		CATEGORIES = new String[catgroies.size()];
		catgroies.toArray(CATEGORIES);

		TokenFeatureExtractor featureExtractor = new TokenFeatureExtractor(
				tokenizerFactory);
		KnnClassifier<CharSequence> classifier = new KnnClassifier<CharSequence>(featureExtractor, catgoriesCount);

		for (int i = 0; i < catgroies.size(); i++) {
			File classDir = new File(TDIR, CATEGORIES[i]);
			if (!classDir.isDirectory()) {
				System.out.println("不能找到目录=" + classDir);
			}
			Classification classification = new Classification(CATEGORIES[i]);
			// 训练器遍历分类文件夹下的所有文件
			for (File files : classDir.listFiles(new fileFilter())) {
				String text = Files.readFromFile(files, "gbk");
				System.out.println("正在训练 " + CATEGORIES[i] + files.getName());
				
				Classified<CharSequence> classified = new Classified<CharSequence>(
						text, classification);
				classifier.handle(classified);
			}
			
		}
	
		// 把分类器模型写到文件上
		System.out.println("开始生成分类器");
		
		ObjectOutputStream os = new ObjectOutputStream(new FileOutputStream(
				modelFile));
		classifier.compileTo(os);
		os.close();

		System.out.println("分类器生成完成");

	}

	private static class fileFilter implements FileFilter {

		@Override
		public boolean accept(File file) {
			if (file.getName().startsWith(".")) {
				return false;
			}
			return true;
		}
	}

}
