package cn.com.homework;

import java.io.BufferedReader;
import java.io.ByteArrayOutputStream;
import java.io.FileInputStream;
import java.io.IOException;
import java.io.InputStreamReader;
import java.util.ArrayList;
import java.util.Comparator;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.TreeSet;
import java.util.Map.Entry;
import java.util.Set;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.htrace.fasterxml.jackson.core.util.BufferRecycler;

public class Test02 {

	// 实现 copyFromLocalFile

	/**
	 * 1. 读取本地文件 --> 字节数组
	 * 
	 * 2. 字节数组 --> 写到指定的HDFS文件
	 */
	public static void main(String[] args) {
		copyFromLocal("", "");
	}

	/**
	 * 检查路径合法性
	 */
	private static void checkPath(String Path) {

	}

	private static Configuration conf = new Configuration();

	/**
	 * 写字节数组到 HDFS 文件
	 * 
	 * @param b
	 * @param hdfsPath
	 */
	public static void writeByteArray2HdfsFile(byte[] b, String hdfsPath) {
		// 1.判断b 和 hdfsPath合法性
		checkPath(hdfsPath);

		// 2. 写字节数组到HDFS文件中
		FSDataOutputStream out = null;
		FileSystem fs = null;
		try {
			fs = FileSystem.get(conf);
			// 3. 写文件
			out = fs.create(new Path(hdfsPath));
			out.write(b);
		} catch (IOException e) {
			e.printStackTrace();
		} finally {
			try {
				out.close();
				fs.close();
			} catch (IOException e) {
				e.printStackTrace();
			}

		}
	}

	/**
	 * 读取本地文件成字节数组
	 * 
	 * @param localPath
	 * @return
	 */
	public static byte[] readLocalFile2ByteArray(String localPath) {

		// 1. localPath 是否合法
		checkPath(localPath);
		// 2. 读取指定的文件内容
		FileInputStream fis = null;
		ByteArrayOutputStream bos = null;
		try {
			// 文件输入流
			fis = new FileInputStream(localPath);

			// 字节数组输出流
			bos = new ByteArrayOutputStream();

			// 读写过程
			byte[] bs = new byte[1024];
			int len = 0;
			while ((len = fis.read(bs)) > 0) {
				bos.write(bs, 0, len);
			}

			return bos.toByteArray();
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			try {
				fis.close();
				bos.close();
			} catch (IOException e) {
				e.printStackTrace();
			}
		}
		return null;
	}

	// 实现 copyFromLocalFile
	public static void copyFromLocal(String localPath, String hdfsPath) {
		writeByteArray2HdfsFile(readLocalFile2ByteArray(localPath), hdfsPath);
	}

	/**
	 * 读取HDFS文件,每一行是一个元素,读取到Set
	 */
	public static Set<String> readHdfsFile2Set(String hdfsPath,
			String charsetName) {
		// 1. 判断合法性
		checkPath(hdfsPath);

		// 存放数据
		Set<String> result = new HashSet<String>();

		BufferedReader br = null;
		FileSystem fs = null;
		try {

			// 获取文件系统
			fs = FileSystem.get(conf);
			// 打开文件流
			FSDataInputStream in = fs.open(new Path(hdfsPath));
			InputStreamReader isr = new InputStreamReader(in, charsetName);
			br = new BufferedReader(isr);

			// 读数据
			String line = null;
			while ((line = br.readLine()) != null) {
				result.add(line.trim());
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			try {
				br.close();
				fs.close();
			} catch (IOException e) {
				e.printStackTrace();
			}
		}
		return null;
	}

	public static List<String> readHdfsFile2List(String hdfsPath,
			String charsetName) {
		// 1. 判断合法性
		checkPath(hdfsPath);

		// 存放数据
		List<String> result = new ArrayList<String>();

		BufferedReader br = null;
		FileSystem fs = null;
		try {

			// 获取文件系统
			fs = FileSystem.get(conf);
			// 打开文件流
			FSDataInputStream in = fs.open(new Path(hdfsPath));
			InputStreamReader isr = new InputStreamReader(in, charsetName);
			br = new BufferedReader(isr);

			// 读数据
			String line = null;
			while ((line = br.readLine()) != null) {
				result.add(line.trim());
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			try {
				br.close();
				fs.close();
			} catch (IOException e) {
				e.printStackTrace();
			}
		}
		return null;
	}

	/**
	 * 封装相同单词和个数
	 * 
	 * @author yingfing
	 * @date 2020年11月18日 @time 下午7:47:55
	 */
	private static class SameWordAndCount {
		int count;
		Set<String> words;

		public SameWordAndCount(int count, Set<String> words) {
			this.count = count;
			this.words = words;
		}
	}

	/**
	 * 读取两个HDFS文件,返回相同的单词和个数
	 * 
	 * @param hdfsPathOne
	 * @param hdfsPathTwo
	 * @param charsetName
	 * @return
	 */
	public static SameWordAndCount getSameWordAndCountFromHdfs(
			String hdfsPathOne, String hdfsPathTwo, String charsetName) {
		// 读取文件内容
		Set<String> words01 = readHdfsFile2Set(hdfsPathOne, charsetName);
		Set<String> words02 = readHdfsFile2Set(hdfsPathTwo, charsetName);

		// 存放相同单词和个数
		Set<String> sameWords = new HashSet<String>();
		int count = 0;
		for (String word : words01) {
			if (words02.contains(word)) {
				sameWords.add(word);
				count++;
			}
		}
		return new SameWordAndCount(count, sameWords);
	}

	/**
	 * 判断hdfs和local文件单词相同的内容和个数
	 * 
	 * @param localPath
	 * @param hdfsPath
	 * @param charsetName
	 * @return
	 */
	public static SameWordAndCount getSameWordAndCountFromHdfsAndLocal(
			String localPath, String hdfsPath, String charsetName) {
		// 读取文件内容
		Set<String> words01 = readLocalFile2Set(localPath, charsetName);
		Set<String> words02 = readHdfsFile2Set(hdfsPath, charsetName);

		// 存放相同单词和个数
		Set<String> sameWords = new HashSet<String>();
		int count = 0;
		for (String word : words01) {
			if (words02.contains(word)) {
				sameWords.add(word);
				count++;
			}
		}
		return new SameWordAndCount(count, sameWords);
	}

	/**
	 * 读取本地文件到一个set中
	 * 
	 * @param localPath
	 * @param charsetName
	 * @return
	 */
	private static Set<String> readLocalFile2Set(String localPath,
			String charsetName) {
		// 1. 合法性检测
		checkPath(localPath);

		// 存放内容的set
		Set<String> result = new HashSet<String>();

		// 创建流
		BufferedReader br = null;
		try {
			FileInputStream fis = new FileInputStream(localPath);
			InputStreamReader isr = new InputStreamReader(fis);
			br = new BufferedReader(isr);

			// 读内容
			String line = null;
			while ((line = br.readLine()) != null) {
				result.add(line.trim());
			}
		} catch (Exception e) {
			e.printStackTrace();
		} finally {
			try {
				br.close();
			} catch (IOException e) {
				e.printStackTrace();
			}
		}

		return result;
	}

	/**
	 * 根据目录获取文件列表
	 * 
	 * @param hdfsDir
	 * @return
	 */
	public static List<String> getFilePathsByDir(String hdfsDir) {
		// 1. 合法性检验
		checkPath(hdfsDir);
		// 2. 集合
		List<String> result = new ArrayList<String>();

		FileSystem fs = null;
		try {
			Path dir = new Path(hdfsDir);
			fs = FileSystem.get(conf);
			FileStatus[] files = fs.listStatus(new Path(hdfsDir));
			for (FileStatus file : files) {
				result.add(file.toString());
			}
		} catch (Exception e) {
			e.printStackTrace();
		}

		return result;
	}

	/**
	 * 词频统计
	 * 
	 * @param hdfsDir
	 * @param charsetName
	 * @return
	 */
	public static Map<String, Integer> getWordCountFromHdfsDir(String hdfsDir,
			String charsetName) {
		// 统计所有词频
		Map<String, Integer> mapCnt = new HashMap<String, Integer>();
		// 1. 根据目录获取目录中的文件列表
		List<String> files = getFilePathsByDir(hdfsDir);
		// 2. 依次读取每个文件的内容

		for (String file : files) {
			// 2.1 存放到set
			List<String> words = readHdfsFile2List(file, charsetName);
			// 2.2 统计词频
			Map<String, Integer> map = new HashMap<String, Integer>();
			for (String word : words) {
				if (map.containsKey(word)) {
					// value +1
					map.put(word, map.get(word) + 1);
				} else {
					map.put(word, 1);
				}
			}
			// 3.合并各个文件的词频统计
			for (String word : map.keySet()) {
				if (mapCnt.containsKey(words)) {
					// value + oldValue
					map.put(word, map.get(word) + mapCnt.get(word));
				} else {
					map.put(word, map.get(word));
				}
			}
		}

		// 4. 根据 value 进行倒序排序

		TreeSet<Entry<String, Integer>> treeSet = new TreeSet<Map.Entry<String, Integer>>(
				new Comparator<Entry<String, Integer>>() {

					@Override
					public int compare(Entry<String, Integer> entry1,
							Entry<String, Integer> entry2) {
						return entry1.getValue() - entry2.getValue();
					}
				});

		Set<Entry<String, Integer>> resultSet = mapCnt.entrySet();
		treeSet.addAll(resultSet);

		// 5.创建一个最终的 Map
		Map<String, Integer> result = new HashMap<String, Integer>();
		for (Entry<String, Integer> entry : treeSet) {
			result.put(entry.getKey(), entry.getValue());
		}
		return result;
	}
}
