package com.xiaoxu.hdfs;

;import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.util.Progressable;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.io.*;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;

/**
 * Created by xiaoxu on 2018/12/10.
 */
public class ApiDemo {

    //你们自己的hdfs系统url
    //查看端口监听状态
    //    netstat -tunlp | grep 8020
    public static final String HDFS_URL = "hdfs://192.168.211.132:8020";
    FileSystem fileSystem = null;
    Configuration configuration = null;

    @Before
    public void setUp() throws URISyntaxException, IOException, InterruptedException {
        System.out.println("HDFS SET UP");
        configuration = new Configuration();
        //获取文件系统
        fileSystem = FileSystem.get(new URI(HDFS_URL), configuration,"hadoop");
    }

    @After
    public void destory() {
        configuration = null;
        fileSystem = null;
        System.out.println("HDFS destroy");
    }


    /**
     * 创建文件夹
     *
     * @throws IOException
     */
    @Test
    public void mkdir() throws IOException {
        fileSystem.mkdirs(new Path("/txy/test"));
    }

    /**
     * 创建文件
     */
    @Test
    public void create() throws IOException {
        FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path("/hdfsApi/test/first.txt"));
        fsDataOutputStream.write("第一次向hdfs系统中写入文件".getBytes());
        fsDataOutputStream.flush();
        fsDataOutputStream.close();
    }

    /**
     * 查看文件内容
     */
    @Test
    public void cat() throws IOException {
        FSDataInputStream fsDataInputStream = fileSystem.open(new Path("/hdfsApi/test/first.txt"));
        IOUtils.copyBytes(fsDataInputStream, System.out, 1024);
        fsDataInputStream.close();
    }


    /**
     * 重命名
     */
    @Test
    public void rename() throws IOException {
        Path oldPath = new Path("/hdfsApi/test/first.txt");
        Path newPath = new Path("/hdfsApi/test/firstNew.txt");
        fileSystem.rename(oldPath, newPath);
    }

    /**
     * 上传文件到HDFS
     */

    @Test
    public void copyFromLocalFile() throws IOException {
        Path localPath = new Path("D:\\景航\\teaching\\git_project\\hadoopDemo\\src\\test\\java\\com\\xiaoxu\\mapreduce\\hdfs\\copy.txt");
        Path hdfsPath = new Path("/hdfsApi/test/");
        fileSystem.copyFromLocalFile(localPath, hdfsPath);
    }

    /**
     * 上传文件到HDFS
     */
    @Test
    public void copyFromLocalFileWithProgress() throws IOException {
        File file = new File("D:\\code_environment\\jar.rar");
        FileInputStream fileInputStream = new FileInputStream(file);
        BufferedInputStream bufferedInputStream = new BufferedInputStream(fileInputStream);
        FSDataOutputStream fsDataOutputStream = fileSystem.create(new Path("/hdfsApi/test/jar.rar"), new Progressable() {
            @Override
            public void progress() {
                System.out.println(" * ");
            }
        });

        IOUtils.copyBytes(bufferedInputStream, fsDataOutputStream, 1024);
    }


    /**
     * 下载HDFS文件
     */
    @Test
    public void downLoad() throws IOException {
        Path localPath = new Path("D:/test/a.txt");
        Path hdfsPath = new Path("/hdfsApi/test/firstNew.txt");
        //演示异常解决办法
        fileSystem.copyToLocalFile(hdfsPath, localPath);

    }

    /**
     * 查看某个目录下的所有文件
     * <p>
     * 问题：我们已经在hdfs-site.xml中设置了副本系数为1，为什么此时查询文件看到的3呢？
     * 如果你是通过hdfs shell的方式put的上去的那么，才采用默认的副本系数1
     * 如果我们是java api上传上去的，在本地我们并没有手工设置副本系数，所以否则采用的是hadoop自己的副本系数
     */

    @Test
    public void listFiles() throws IOException {
        FileStatus[] fileStatuses = fileSystem.listStatus(new Path("/"));

        for (FileStatus fileStatus : fileStatuses) {
            String isDir = fileStatus.isDirectory() ? "文件夹" : "文件";
            System.out.println(isDir + " ");
            System.out.print(fileStatus.getPath().toString());
            short replication = fileStatus.getReplication();
            System.out.print(" 副本因子 " + replication);
            long len = fileStatus.getLen();
            System.out.println(" 文件大小 " + len);

        }
    }


    

    /**
     * 删除
     */

    @Test
    public void remove() throws IOException {
        fileSystem.delete(new Path("/hdfsapi/test"));
        fileSystem.delete(new Path("/hdfsapi/test1"), false);
    }


    /**
     * 递归查看目标文件夹下的所有文件
     */
    @Test
    public void listFilesRecursive() throws Exception {

        RemoteIterator<LocatedFileStatus> files = fileSystem.listFiles(new Path("/"), true);

        while (files.hasNext()) {
            LocatedFileStatus file = files.next();
            String isDir = file.isDirectory() ? "文件夹" : "文件";
            String permission = file.getPermission().toString();
            short replication = file.getReplication();
            long length = file.getLen();
            String path = file.getPath().toString();


            System.out.println(isDir + "\t" + permission
                    + "\t" + replication + "\t" + length
                    + "\t" + path
            );
        }
    }


    /**
     * 查看文件块信息
     */
    @Test
    public void getFileBlockLocations() throws Exception {

        FileStatus fileStatus = fileSystem.getFileStatus(new Path("/xiaoxu/test/hello.txt"));
        BlockLocation[] blocks = fileSystem.getFileBlockLocations(fileStatus,0,fileStatus.getLen());

        for(BlockLocation block : blocks) {

            for(String name: block.getNames()) {
                System.out.println(name +" : " + block.getOffset() + " : " + block.getLength() + " : " + Arrays.toString(block.getHosts()));
            }
        }
    }
}
