package cnn;

//HDFS JAVA API

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.net.URI;

public class HdfsApp {

    FileSystem fileSystem = null;
    Configuration configuration = null;


    /*
    创建文件夹
     */
    @Test
    public void mkdir() throws Exception {
        fileSystem.mkdirs(new Path("/hdfaapi/test"));
    }

    /*
   创建文件
    */
    @Test
    public void create() throws Exception {
        FSDataOutputStream outputStream = fileSystem.create(new Path("/hdfaapi/test/c.txt"));
        outputStream.write("hello hadoop" .getBytes());
        outputStream.flush();
        outputStream.close();
    }

    /*
   读取文件
    */
    @Test
    public void cat() throws Exception {
        FSDataInputStream fsDataInputStream = fileSystem.open(new Path("/hdfaapi/test/a.txt"));
        IOUtils.copyBytes(fsDataInputStream, System.out, 1024);
        fsDataInputStream.close();
    }

    /*
   重命名文件
    */
    @Test
    public void rename() throws Exception {
       Path oldPath = new Path("/hdfaapi/test/a.txt");
        Path newPath = new Path("/hdfaapi/test/b.txt");
        fileSystem.rename(oldPath,newPath);
    }

    /*
   复制本地文件到hdfs
    */
    @Test
    public void copyFromLocal () throws Exception {

        Path localPath = new Path("C:/Users/yw/Desktop/phone.txt");
        Path hdfsPath = new Path("/hdfs/test");
        fileSystem.copyFromLocalFile(localPath,hdfsPath);
    }

    /*
   列出文件列表
    */
    @Test
    public void listFiles() throws Exception {
        FileStatus[] fileStatuses = fileSystem.listStatus( new Path("/hdfaapi/test"));

        for(FileStatus fileStatus : fileStatuses) {
            String isDir = fileStatus.isDirectory() ? "文件夹" : "文件";
            short replication = fileStatus.getReplication();
            long len = fileStatus.getLen();
            String path = fileStatus.getPath().toString();

            System.out.println(isDir + "\t" + replication + "\t" +len + "\t" +path);
        }
    }

    /**
     * 删除
     */
    @Test
    public void delete() throws Exception{
        fileSystem.delete(new Path("/hdfaapi"));
    }

    /*
    test建立
    */

    //hdfs地址
    public static final String HDFS_PATH = "hdfs://106.15.179.224:9000";
    @Before
    public void setup() throws  Exception{
        System.out.println("setup");
        configuration = new Configuration();
        //java api 客户端需要配置不然会报错 File /hdfaapi/test/c.txt could only be replicated to 0 nodes instead of minReplication (=1).  There are 1 datanode(s) running and 1 node(s) are excluded in this operation.
        configuration.set("dfs.client.use.datanode.hostname", "true");
        fileSystem = FileSystem.get(new URI(HDFS_PATH),configuration,"hadoop");
    }

    /*
    test结束
    清空一切
    */
    @After
    public void out() throws  Exception{
        configuration = null;
        fileSystem = null;
        System.out.println("down");
    }

}
