package cn.com.homework.service;

import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Map.Entry;
import java.util.Set;
import java.util.TreeSet;

import org.apache.hadoop.conf.Configuration;

import cn.com.homework.utils.FileUtil;
import cn.com.homework.utils.IOUtil;

public class HdfsDataManager {
	// 1. 2. 3. 4.
	// static class
	private static Configuration conf = new Configuration();

	// 实现 copyFromLocalFile
	public static void copyFromLocal(String localPath, String hdfsPath) {
		IOUtil.writeByteArray2HdfsFile(
				IOUtil.readLocalFile2ByteArray(localPath), hdfsPath, conf);
	}

	/***
	 * 封装相同单词和个数
	 * 
	 * @author lv
	 *
	 */
	private static class SameWordAndCount {
		int count;
		Set<String> words;

		public SameWordAndCount(int count, Set<String> words) {
			this.count = count;
			this.words = words;
		}
	}

	/**
	 * 读取两个HDFS文件，返回相同的单词和个数
	 * 
	 * @param hdfsPathOne
	 * @param hdfsPathTwo
	 * @param charsetName
	 * @return
	 */
	public static SameWordAndCount getSameWordAndCountFromHdfs(
			String hdfsPathOne, String hdfsPathTwo, String charsetName) {
		// 读取文件内容
		Set<String> words01 = IOUtil.readHdfsFile2Set(hdfsPathOne, charsetName,
				conf);
		Set<String> words02 = IOUtil.readHdfsFile2Set(hdfsPathTwo, charsetName,
				conf);
		// 存放相同单词和个数
		Set<String> sameWords = new HashSet<String>();
		int count = 0;
		for (String word : words01) {
			if (words02.contains(word)) {
				sameWords.add(word);
				count++;
			}
		}

		return new SameWordAndCount(count, sameWords);

	}

	/***
	 * 判断hdfs和local文件单词相同的内容和个数
	 * 
	 * @param localPath
	 * @param hdfsPath
	 * @param charsetName
	 * @return
	 */
	public static SameWordAndCount getSameWordAndCountFromHdfsAndLocal(
			String localPath, String hdfsPath, String charsetName) {
		// 读取文件内容
		Set<String> words01 = IOUtil.readLocalFile2Set(localPath, charsetName);
		Set<String> words02 = IOUtil.readHdfsFile2Set(hdfsPath, charsetName,
				conf);
		// 存放相同单词和个数
		Set<String> sameWords = new HashSet<String>();
		int count = 0;
		for (String word : words01) {
			if (words02.contains(word)) {
				sameWords.add(word);
				count++;
			}
		}

		return new SameWordAndCount(count, sameWords);

	}

	/**
	 * 词频统计
	 * 
	 * @param hdfsDir
	 * @param charsetName
	 * @return
	 */
	public static Map<String, Integer> getWordCountFromHdfsDir(String hdfsDir,
			String charsetName) {
		// 统计所有词频
		Map<String, Integer> mapCnt = new HashMap<String, Integer>();
		// 1. 根据目录获取目录中的文件列表
		List<String> files = FileUtil.getFilePathsByDir(hdfsDir, conf);
		// 2. 依次读取每个文件的内容
		for (String file : files) {
			// 2.1 存放到List
			List<String> words = IOUtil.readHdfsFile2List(file, charsetName,
					conf);
			// 2.2 统计词频
			Map<String, Integer> map = new HashMap<String, Integer>();
			for (String word : words) {
				if (map.containsKey(word)) {
					// value + 1
					map.put(word, map.get(word) + 1);
				} else {
					map.put(word, 1);
				}
			}
			// 3. 合并各个文件的词频统计 map --> mapCnt
			for (String word : map.keySet()) {
				if (mapCnt.containsKey(word)) {
					// value + oldValue
					map.put(word, map.get(word) + mapCnt.get(word));
				} else {
					map.put(word, map.get(word));
				}
			}

		}
		// 4. 根据value进行倒序排序
		TreeSet<Entry<String, Integer>> treeSet = new TreeSet<Entry<String, Integer>>(
				new Comparator<Entry<String, Integer>>() {
					@Override
					public int compare(Entry<String, Integer> entry1,
							Entry<String, Integer> entry2) {
						return entry1.getValue() - entry2.getValue();
					}
				});

		Set<Entry<String, Integer>> resultSet = mapCnt.entrySet();
		treeSet.addAll(resultSet);

		// 5. 创建一个最终的Map
		Map<String, Integer> result = new HashMap<String, Integer>();
		for (Entry<String, Integer> entry : treeSet) {
			result.put(entry.getKey(), entry.getValue());
		}

		return result;
	}

}
