package com.sunzm.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.hdfs.HdfsConfiguration;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.IOException;
import java.net.URI;

/**
 * HDFS操作工具类
 *
 * @author Administrator
 * @version 1.0
 * @date 2021-07-01 14:38
 */
public class HDFSUtilsDemo {
    private static final Logger logger = LoggerFactory.getLogger(HDFSUtilsDemo.class);

    public static void main(String[] args) throws Exception {
        Configuration conf = new HdfsConfiguration();
        FileSystem localFs = FileSystem.getLocal(conf);

        URI uri = new URI("hdfs://10.111.3.201:8020");
        FileSystem hdfs = FileSystem.get(uri, conf);

        //判断本地文件是否存在
        Path localPath = new Path("C:/mydata/log4j.properties");
        if (localFs.exists(localPath)) {
            logger.info(localPath + ", 存在. ");
        } else {
            logger.warn(localPath + ", 不存在. ");
        }

        //判断HDFS上的文件是否存在
        Path hdfsPath = new Path("/data/data_center/cus/call_log_record/call_log_record-2021-05-01-2021-05-01");
        if (hdfs.exists(hdfsPath)) {
            logger.info(hdfsPath + ", 存在. ");
        } else {
            logger.warn(hdfsPath + ", 不存在. ");
        }

        localFs.close();
        hdfs.close();
    }

    /**
     * HDFS上的文件和目录常用操作
     */
    private static void pathAndFileOpeDemo(FileSystem hdfs) throws Exception {
        //删除文件, 第2个参数表示是否递归删除
        hdfs.delete(new Path("/tmp/sunzm/*"), true);

        //文件如果存在，就删除
        hdfs.deleteOnExit(new Path("/tmp/sunzm/*"));

        //把文件从HDFS拷贝到本地
        hdfs.copyToLocalFile(new Path("/tmp/sunzm/hdfs-file"), new Path("/tmp/sunzm/localfile/"));

        //把本地文件移动到HDFS
        hdfs.copyFromLocalFile(new Path("/tmp/sunzm/localfile"), new Path("/tmp/sunzm/hdfs-file"));

        //把文件从HDFS移动到本地，HDFS上的源文件会被删除
        hdfs.moveToLocalFile(new Path("/tmp/sunzm/hdfs-file"), new Path("/tmp/sunzm/localfile/"));

        //把本地文件移动到HDFS
        hdfs.moveFromLocalFile(new Path("/tmp/sunzm/localfile"), new Path("/tmp/sunzm/hdfs-file"));

        //列出 /tmp/sunzm/ 目录下的所有文件，第二个参数表示是否需要递归列出
        RemoteIterator<LocatedFileStatus> fileIterator = hdfs.listFiles(new Path("/tmp/sunzm/"), true);

        while (fileIterator.hasNext()){
            LocatedFileStatus fileStatus = fileIterator.next();
            Path path = fileStatus.getPath();
            BlockLocation[] blockLocations = fileStatus.getBlockLocations();
            long blockSize = fileStatus.getBlockSize();

            System.out.println("path:" + path + ",blockLocations:" + blockLocations + "," + blockSize);
        }

        //判断是否为文件
        boolean isFile = hdfs.isFile(new Path("/tmp/sunzm/chatMsg.txt"));
        System.out.println(isFile);

        //判断是否为文件夹
        boolean isDirectory = hdfs.isDirectory(new Path("/tmp/sunzm/"));
        System.out.println(isDirectory);

        //创建文件夹
        boolean mkdirs = hdfs.mkdirs(new Path("/tmp/sunzm/test/"));

        //创建一个文件夹，第二个参数表示如果文件已经存在，是否覆盖
        hdfs.create(new Path("/tmp/sunzm/test2/"), true);

        //创建一个文件
        hdfs.createFile(new Path("/tmp/sunzm/test.txt"));

        //另外创建文件，还可以指定副本数,权限等，还有很多API, 就不一一列举了
    }
}
