package com.cloudputing.common.util;

import java.io.IOException;
import java.io.InputStream;
import java.util.Arrays;

import org.apache.commons.lang.StringUtils;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.BlockLocation;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.protocol.DatanodeInfo;
import org.junit.Assert;

public class HdfsUtil {

	private static Configuration getConfiguration(String[] resourcePath) 
	{
		Assert.assertNotNull(resourcePath);
		Assert.assertTrue(resourcePath.length > 0);

		Configuration conf = new Configuration();
		for (String tempResourcePath : resourcePath) {
			conf.addResource(tempResourcePath);
		}
		return conf;
	}

	private static FileSystem getFileSystem(String[] resourcePath) throws IOException 
	{
		Configuration conf = getConfiguration(resourcePath);
		FileSystem hdfs = FileSystem.get(conf);
		return hdfs;
	}

	public static InputStream downloadStream(String[] resourcePath, String srcFile) throws Exception 
	{
		FileSystem hdfs = getFileSystem(resourcePath);
		Path src = new Path(srcFile);
		return hdfs.open(src);
	}

	// 上传本地文件到HDFS
	public static void upload(String[] resourcePath, String srcFile,String dstFile) throws Exception 
	{

		Configuration conf = getConfiguration(resourcePath);
		FileSystem hdfs = FileSystem.get(conf);

		Path src = new Path(srcFile);
		Path dst = new Path(dstFile);
		hdfs.copyFromLocalFile(src, dst);

		System.out.println("Upload to " + conf.get("fs.default.name"));
		FileStatus files[] = hdfs.listStatus(dst);
		for (FileStatus file : files) {
			System.out.println(file.getPath());
		}
	}

	// 创建HDFS文件
	public static void createHdfsFileWithContent(String[] resourcePath,
			String content, String fileFullName) throws Exception {

		FileSystem hdfs = getFileSystem(resourcePath);

		byte[] buff = content.getBytes();

		Path dst = new Path(fileFullName);
		FSDataOutputStream outputStream = null;
		try {
			outputStream = hdfs.create(dst);
			outputStream.write(buff, 0, buff.length);
		} catch (Exception e) {
			e.printStackTrace();

		} finally {
			if (outputStream != null) {
				outputStream.close();
			}
		}

		FileStatus files[] = hdfs.listStatus(dst);
		for (FileStatus file : files) {
			System.out.println(file.getPath());
		}
	}

	// 重命名HDFS文件
	public static void renameHdfsFile(String[] resourcePath, String fromPathFile, String toPathFile) throws Exception 
	{

		FileSystem hdfs = getFileSystem(resourcePath);
		
		Path dst = new Path("/");

		Path frpath = new Path(fromPathFile);
		Path topath = new Path(toPathFile);

		hdfs.rename(frpath, topath);

		FileStatus files[] = hdfs.listStatus(dst);
		for (FileStatus file : files) {
			System.out.println(file.getPath());
		}
	}

	// 刪除HDFS文件
	public static void delHdfsFile(String[] resourcePath, String delfile) throws Exception 
	{
		Assert.assertNotNull(resourcePath);
		Assert.assertTrue(StringUtils.isNotEmpty(delfile));

		FileSystem hdfs = getFileSystem(resourcePath);
		
		Path dst = new Path("/");

		Path topath = new Path(delfile);

		boolean ok = hdfs.delete(topath, true);
		System.out.println(ok ? "删除成功" : "删除失败");

		FileStatus files[] = hdfs.listStatus(dst);
		for (FileStatus file : files) {
			System.out.println(file.getPath());
		}
	}

	// 查看HDFS文件的最后修改时间
	public static void viewModifyTime(String[] resourcePath) throws Exception 
	{
		FileSystem hdfs = getFileSystem(resourcePath);
		
		Path dst = new Path("/");

		FileStatus files[] = hdfs.listStatus(dst);
		for (FileStatus file : files) {
			System.out.println(file.getPath() + "\t"
					+ file.getModificationTime());
		}
	}

	// 查看HDFS文件是否存在
	public static void checkFileIfExists(String[] resourcePath, String viewFile) throws Exception 
	{
		FileSystem hdfs = getFileSystem(resourcePath);
		
		Path dst = new Path(viewFile);

		boolean ok = hdfs.exists(dst);
		System.out.println(ok ? "文件存在" : "文件不存在");
	}

	// 查看某个文件在HDFS集群的位置
	public static void viewFileBlockLocation(String[] resourcePath, String viewFile) throws Exception 
	{
		FileSystem hdfs = getFileSystem(resourcePath);
		
		Path dst = new Path(viewFile);

		FileStatus fileStatus = hdfs.getFileStatus(dst);
		BlockLocation[] blockLocations = hdfs.getFileBlockLocations(fileStatus,
				0, fileStatus.getLen());
		for (BlockLocation block : blockLocations) {
			System.out.println(Arrays.toString(block.getHosts()) + "\t"
					+ Arrays.toString(block.getNames()) + "\t"
					+ block.getOffset() + "\t" + block.getLength());
		}
	}

	// 获取HDFS集群上所有节点名称
	public static void getHostName(String[] resourcePath) throws Exception {

		Configuration conf = getConfiguration(resourcePath);

		DistributedFileSystem hdfs = (DistributedFileSystem) FileSystem
				.get(conf);
		DatanodeInfo[] dataNodeStats = hdfs.getDataNodeStats();

		for (DatanodeInfo dataNode : dataNodeStats) {
			System.out.println(dataNode.getHostName() + "\t"
					+ dataNode.getName());
		}
	}

}