package naiveBayesTest;

import java.io.BufferedReader;
import java.io.File;
import java.io.FileFilter;
import java.io.FileReader;
import java.io.IOException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import java.util.Map.Entry;

import ParticiplePkg.IkParticiple;
import ParticiplePkg.Participle;

public class TestMain {

	public static void main(String[] args) {
		
		// 训练数据
		Train train = new Train();

		// 测试路径 下面有txt的测试文本
		final String testUrl = "D:\\workspace\\Test\\SogouC.mini\\test";
		File testFile = new File(testUrl);
		File[] testFiles = testFile.listFiles(new FileFilter() {

			@Override
			public boolean accept(File pathname) {
				if (pathname.getName().endsWith("test7.txt")) {
					return true;
				}
				return false;
			}

		});

		for (File file : testFiles) {
			Map<String, Double> classifyMap = train.classify(train.readtxt(file
					.getAbsolutePath()));
			Entry<String, Double>[] finallyEntry = orderBy(classifyMap);

			Entry<String, Double> ent = finallyEntry[0];
			System.out.println("文章 " + file.getName() + "属于"
					+ train.getClassMap().get(ent.getKey()) + "("
					+ ent.getKey() + ") 分数为：" + ent.getValue());

			// for(Entry<String,Double> ent:finallyEntry){
			// System.out.println("文章
			// "+file.getName()+"属于"+train.getClassMap().get(ent.getKey())+"("+ent.getKey()+")
			// 分数为："+ent.getValue());
			// }

		}

	}

	private static Entry<String, Double>[] orderBy(Map<String, Double> srEnts) {
		Set<Entry<String, Double>> set = srEnts.entrySet();
		Map.Entry<String, Double>[] entries = (Map.Entry[]) set
				.toArray(new Map.Entry[srEnts.size()]);
		Arrays.sort(entries, new Comparator<Entry<String, Double>>() {

			@Override
			public int compare(Entry<String, Double> o1,
					Entry<String, Double> o2) {
				if (o1.getValue() > o2.getValue()) {
					return -1;
				} else {
					return 1;
				}

			}

		});

		return entries;
	}

}

/**
 * 训练器
 * 
 * @author duyf
 * 
 */
class Train {

	// 训练集的位置
	private String trainPath = "D:\\workspace\\Test\\SogouC.mini\\Sample";

	// 类别序号对应的实际名称
	private Map<String, String> classMap = new HashMap<String, String>();

	// 类别对应的txt文本数
	private Map<String, Integer> classP = new HashMap<String, Integer>();

	// 所有文本数
	private double actCount = 0.0;

	// 所有训练数据中单词的IDF
	private Map<String, Double> idfMap = new HashMap<String, Double>();

	// 每个类别对应的词典和频数
	private Map<String, Map<String, Double>> classWordMap = new HashMap<String, Map<String, Double>>();

	// 分词器
	private Participle participle = new IkParticiple();

	public Train() {
		init();
	}

	public String readtxt(String path) {
		try {
			BufferedReader br = new BufferedReader(new FileReader(path));

			String str = "";

			String r = br.readLine();

			while (r != null) {

				str += r;

				r = br.readLine();

			}
			return str;
		} catch (IOException ex) {
			ex.printStackTrace();
		}

		return "";
	}

	private void init() {
		classMap.put("C000007", "汽车");
		classMap.put("C000008", "财经");
		classMap.put("C000010", "IT");
		classMap.put("C000013", "健康");
		classMap.put("C000014", "体育");
		classMap.put("C000016", "旅游");
		classMap.put("C000020", "教育");
		classMap.put("C000022", "招聘");
		classMap.put("C000023", "文化");
		classMap.put("C000024", "军事");

		// 计算各个类别的样本数
		Set<String> keySet = classMap.keySet();

		// 所有词汇的集合,是为了计算每个单词在多少篇文章中出现，用于后面计算idf
		Set<String> allWords = new HashSet<String>();

		// 存放每个类别的文件词汇内容
		Map<String, List<String[]>> classContentMap = new HashMap<String, List<String[]>>();
		for (String classKey : keySet) {
			Map<String, Double> wordMap = new HashMap<String, Double>();
			File f = new File(trainPath + File.separator + classKey);
			File[] files = f.listFiles(new FileFilter() {

				@Override
				public boolean accept(File pathname) {
					if (pathname.getName().endsWith(".txt")) {
						return true;
					}
					return false;
				}

			});
			List<String[]> fileContent = new ArrayList<String[]>();
			if (files != null) {
				for (File txt : files) {
					String content = readtxt(txt.getAbsolutePath());
					// 分词
					String[] word_arr = participle.participle(content, false);
					fileContent.add(word_arr);
					// 统计每个词出现的个数
					for (String word : word_arr) {
						if (wordMap.containsKey(word)) {
							Double wordCount = wordMap.get(word);
							wordMap.put(word, wordCount + 1);
						} else {
							wordMap.put(word, 1.0);
						}
						allWords.add(word);
					}
				}
			}

			// 每个类别对应的词典和频数
			classWordMap.put(classKey, wordMap);

			// 每个类别的文章数目
			classP.put(classKey, files.length);
			actCount += files.length;
			classContentMap.put(classKey, fileContent);

		}

		// 计算所有类别中所有词汇的idf，idf=log(所有文章/所有出现此词汇的文章数)

		for (String word : allWords) {
			boolean has = false;
			Set<Entry<String, List<String[]>>> classWordSet = classContentMap
					.entrySet();
			for (Entry<String, List<String[]>> ent : classWordSet) {
				List<String[]> wordList = ent.getValue();
				for (String[] words : wordList) {
					for (String myword : words) {
						if (myword.equals(word)) {
							has = true;
							break;
						}
					}
					if (has) {
						break;
					}

				}
				if (has) {
					if (has) {
						if (idfMap.containsKey(word)) {
							idfMap.put(word, idfMap.get(word) + 1.0);
						} else {
							idfMap.put(word, 1.0);
						}
					}
					continue;
				}

			}

		}

	}

	/**
	 * 分类
	 * 
	 * @param text
	 * @return 返回各个类别的概率大小
	 */
	public Map<String, Double> classify(String text) {
		// 分词，并且去重
		String[] text_words = participle.participle(text, true);

		Map<String, Double> frequencyOfType = new HashMap<String, Double>();
		Set<String> keySet = classMap.keySet();
		for (String classKey : keySet) {
			double typeOfThis = 1.0;
			Map<String, Double> wordMap = classWordMap.get(classKey);
			for (String word : text_words) {
				Double wordCount = wordMap.get(word);
				int articleCount = classP.get(classKey);

				Double wordidf = idfMap.get(word);
				wordidf = ((wordidf == null) ? 0.0001 : wordidf);
				wordidf=Math.log(actCount / wordidf);
				
				// 假如这个词在类别下的所有文章中木有，那么给定个极小的值 不影响计算
				double term_frequency = (wordCount == null) ? ((double) 1 / (articleCount + 1))
						: (wordCount / articleCount);
				
				// 文本在类别的概率 在这里按照特征向量独立统计，即概率=词汇1/文章数 * 词汇2/文章数 。。。
				// 当double无限小的时候会归为0，为了避免 *10
				typeOfThis = typeOfThis * wordidf * term_frequency * 10;
				
			}
			typeOfThis = ((typeOfThis == 1) ? 0 : typeOfThis);

			// 此类别文章出现的概率
			double classOfAll = classP.get(classKey) / actCount;

			// 根据贝叶斯公式 $(A|B)=S(B|A)*S(A)/S(B),由于$(B)是常数，在这里不做计算,不影响分类结果
			frequencyOfType.put(classKey, typeOfThis * classOfAll);
		}

		return frequencyOfType;
	}

	public void pringAll() {
		Set<Entry<String, Map<String, Double>>> classWordEntry = classWordMap
				.entrySet();
		for (Entry<String, Map<String, Double>> ent : classWordEntry) {
			System.out.println("类别： " + ent.getKey());
			Map<String, Double> wordMap = ent.getValue();
			Set<Entry<String, Double>> wordMapSet = wordMap.entrySet();
			for (Entry<String, Double> wordEnt : wordMapSet) {
				System.out.println(wordEnt.getKey() + ":" + wordEnt.getValue());
			}
		}
	}

	public Map<String, String> getClassMap() {
		return classMap;
	}

	public void setClassMap(Map<String, String> classMap) {
		this.classMap = classMap;
	}

}