package practice.phase03.module01.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.fs.permission.FsPermission;
import org.junit.jupiter.api.AfterAll;
import org.junit.jupiter.api.BeforeAll;
import org.junit.jupiter.api.Test;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

public class HdfsTest {

    private static FileSystem fs;

    @BeforeAll
    static void init() throws URISyntaxException, IOException, InterruptedException {
        Configuration conf = new Configuration();
//        conf.set("dfs.replication", "6"); // 设置hdfs参数
        fs = FileSystem.get(new URI("hdfs://h1:9000"), conf, "root");
    }

    @AfterAll
    static void close() throws IOException {
        fs.close();
    }

    /**
     * 创建文件夹
     *
     * @throws IOException IOException
     */
    @Test
    public void makeDirTest() throws IOException {
        Path p = new Path("/api_test2");
        if (!fs.exists(p)) {
            fs.mkdirs(p);
        }
    }

    /**
     * 从本地复制文件到HDFS
     *
     * @throws IOException IOException
     */
    @Test
    public void uploadToHDFS() throws IOException {
        Path src = new Path("C:\\Users\\zfqli\\Projects\\big_data_lagou\\lagou_bigdata_assignments\\.gitignore");
        Path dst = new Path("/api_test");
        fs.copyFromLocalFile(src, dst);
    }

    /**
     * 删除HDFS文件/文件夹
     *
     * @throws IOException IOException
     */
    @Test
    public void delete() throws IOException {
        Path toDelete = new Path("/api_test1");
        fs.delete(toDelete, true);
    }

    /**
     * 遍历HDFS文件/文件夹，fs.listFiles, LocatedFileStatus
     *
     * @throws IOException IOException
     */
    @Test
    public void listFiles() throws IOException {
        Path p = new Path("/");
        RemoteIterator<LocatedFileStatus> remoteIterator = fs.listFiles(p, true);
        while (remoteIterator.hasNext()) {
            LocatedFileStatus lfs = remoteIterator.next();
            final String filename = lfs.getPath().getName();
            final long len = lfs.getLen();
            final FsPermission permission = lfs.getPermission();
            final String group = lfs.getGroup();
            final String owner = lfs.getOwner();
            final BlockLocation[] blockLocations = lfs.getBlockLocations();
            System.out.println(filename + "\t" + len + "\t" + permission + "\t" + group + "\t" + owner);
            for (BlockLocation blockLocation : blockLocations) {
                final String[] hosts = blockLocation.getHosts();
                for (String host : hosts) {
                    System.out.println(host);
                }
            }
            System.out.println("==============================================");
        }
    }

    /**
     * 遍历HDFS文件/文件夹，fs.listFiles, LocatedFileStatus
     *
     * @throws IOException IOException
     */
    @Test
    public void listFileStatus() throws IOException {
        Path p = new Path("/");
        FileStatus[] fileStatuses = fs.listStatus(p);
        for (FileStatus fileStatus : fileStatuses) {
            if (fileStatus.isFile()) {
                System.out.println(fileStatus.getPath().getName() + " is a file.");
            }
            if (fileStatus.isDirectory()) {
                System.out.println(fileStatus.getPath().getName() + " is a directory");
            }
        }
    }
}
