package com.iwy.hadoop.hdfs;

import cn.hutool.core.util.ObjectUtil;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;

import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.net.URI;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;

/**
 * @Author weibi
 * @Date 2021/1/20 15:15
 * @Description //TODO
 */
public class HDFSClient {


    /**
     * 创建目录
     *
     * @param path
     * @return
     */
    public Boolean mkdirs(String path) {
        try {
            return fs.mkdirs(new Path(path));
        } catch (IOException e) {
            e.printStackTrace();
        }
        return Boolean.FALSE;
    }

    /**
     * 删除
     *
     * @param path
     * @return
     */
    public Boolean delete(String path) {
        try {
            return fs.deleteOnExit(new Path(path));
        } catch (IOException e) {
            e.printStackTrace();
        }
        return Boolean.FALSE;
    }


    /**
     * hdfs 文件更名
     *
     * @param oldFile
     * @param newFile
     * @return
     */
    public Boolean rename(String oldFile, String newFile) {
        try {
            return fs.rename(new Path(oldFile), new Path(newFile));
        } catch (IOException e) {
            e.printStackTrace();
        }
        return Boolean.FALSE;
    }


    /**
     * 文件信息
     *
     * @param path
     * @return
     */
    public FileInfo fileDetail(String path) {
        try {
            RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path(path), Boolean.TRUE);
            FileInfo fileInfo = new FileInfo();
            while (listFiles.hasNext()) {
                LocatedFileStatus fileStatus = listFiles.next();

                fileInfo.setFileName(fileStatus.getPath().getName());
                fileInfo.setLen(fileStatus.getLen());
                fileInfo.setPermission(fileStatus.getPermission());

                // 块信息
                BlockLocation[] blockLocations = fileStatus.getBlockLocations();

                List<BlockInfo> list = new ArrayList<>();
                for (BlockLocation blockLocation : blockLocations) {
                    BlockInfo blockInfo = new BlockInfo();
                    blockInfo.setHosts(Arrays.asList(blockLocation.getHosts()));
                    list.add(blockInfo);
                }

                fileInfo.setBlocks(list);
            }
            return fileInfo;
        } catch (IOException e) {
            e.printStackTrace();
        }
        return null;
    }


    /**
     * 判断是否是文件
     *
     * @param path
     * @return
     */
    public Boolean isFile(String path) {

        try {
            RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path(path), Boolean.TRUE);

            while (listFiles.hasNext()) {
                LocatedFileStatus fileStatus = listFiles.next();
                if (fileStatus.isFile()) {
                    return Boolean.TRUE;
                }
            }

        } catch (IOException e) {
            e.printStackTrace();
        }

        return Boolean.FALSE;
    }

    /**
     * 上传文件到hdfs
     *
     * @return
     */
    public void uploadToHDFS(String source, String target) {

        try {
            fs.copyFromLocalFile(new Path(source), new Path(target));
        } catch (IOException e) {
            e.printStackTrace();
        }
    }

    /**
     * 从hdfs下载文件
     *
     * @param source 源 hdfs
     * @param target
     */
    public void downFromHDFS(String source, String target) {
        try {
            fs.copyToLocalFile(Boolean.FALSE, new Path(source), new Path(target), Boolean.TRUE);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }


    /**
     * 下载某一块的内容
     *
     * @param block  哪一块，从 0 开始
     * @param file   hdfs 文件
     * @param target 目标存储
     * @param flag   是否为最后一块
     */
    public void downBlock(int block, String file, String target, Boolean flag) {

        FSDataInputStream inputStream = null;
        FileOutputStream outputStream = null;
        try {
            inputStream = fs.open(new Path(file));
            outputStream = new FileOutputStream(new File(target));

            // 设置读取的起点
            inputStream.seek(block * 1024 * 1024 * 128);

            if (!flag) {
                byte[] buf = new byte[1024];
                for (int len = 0; len < 1024 * 128; len++) {
                    inputStream.read(buf);
                    outputStream.write(buf);
                }
            } else {
                Configuration cfg = new Configuration();
                IOUtils.copyBytes(inputStream, outputStream, cfg);
            }

        } catch (IOException e) {
            e.printStackTrace();
        } finally {

            if (ObjectUtil.isNotEmpty(inputStream)) {
                IOUtils.closeStream(inputStream);
            }

            if (ObjectUtil.isNotEmpty(outputStream)) {
                IOUtils.closeStream(outputStream);
            }
        }
    }


    private FileSystem fs;

    public HDFSClient() {
        fs = getFs();
    }

    private FileSystem getFs() {
        try {
            Configuration cfg = new Configuration();
            FileSystem fs = FileSystem.get(URI.create("hdfs://hadoop101:9000"), cfg, "root");
            return fs;
        } catch (IOException e) {
            e.printStackTrace();
        } catch (InterruptedException e) {
            e.printStackTrace();
        }
        return null;
    }

    public void close() {
        try {
            if (ObjectUtil.isNotEmpty(fs)) {
                fs.close();
            }
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}
