package cuilitang.hadoop.hdfs.demo;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;
import org.apache.hadoop.io.IOUtils;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.io.FileInputStream;
import java.io.IOException;
import java.net.URI;

public class HdfsClient {

    private FileSystem fs;

    @Before
    public void before() throws IOException, InterruptedException {
        Configuration conf = new Configuration();
        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");

        conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");
        fs = FileSystem.get(URI.create("hdfs://node9:9000"),conf , "root");
        System.out.println("之前!!!!!!");
    }

    @Test
    public void put() throws IOException, InterruptedException {


        fs.copyFromLocalFile(new Path("C:/uploadpath/3.txt"), new Path("/3.txt"));
    }

    @Test
    public void get() throws IOException, InterruptedException {



        //用这个对象操作文件系统
        fs.copyToLocalFile(new Path("/user"), new Path("c:\\download"));

        //关闭文件系统
        fs.close();
    }

    @Test
    public void rename() throws IOException, InterruptedException {

        //操作
        fs.rename(new Path("/user"), new Path("/cuilitang"));


    }

    @Test
    public void delete() throws IOException {
        boolean delete = fs.delete(new Path("/2"), true);
        if (delete) {
            System.out.println("删除成功");
        } else {
            System.out.println("删除失败");
        }
    }

    /**
     * 将本地文件内容append到远程文件
     * @throws IOException
     */
    @Test
    public void du() throws IOException {

        FSDataOutputStream append = fs.append(new Path("/3.txt"), 1024);
        FileInputStream open = new FileInputStream("C:/download/1.txt");
        IOUtils.copyBytes(open, append, 1024, true);

    }

    @Test
    public void ls() throws IOException {
        FileStatus[] fileStatuses = fs.listStatus(new Path("/"));

        for (FileStatus fileStatus : fileStatuses) {
            if (fileStatus.isFile()) {
                System.out.println("一下信息是一个文件的信息");
                System.out.println(fileStatus.getPath());
                System.out.println(fileStatus.getLen());
            } else {
                System.out.println("这是一个文件夹");
                System.out.println(fileStatus.getPath());
            }
        }

    }

    @Test
    public void listFiles() throws IOException {
        RemoteIterator<LocatedFileStatus> files = fs.listFiles(new Path("/"), true);

        while (files.hasNext()) {
            LocatedFileStatus file = files.next();

            System.out.println("======================================");
            System.out.println(file.getPath());

            System.out.println("块信息：");
            BlockLocation[] blockLocations = file.getBlockLocations();
            for (BlockLocation blockLocation : blockLocations) {
                String[] hosts = blockLocation.getHosts();
                System.out.print("块在");
                for (String host : hosts) {
                    System.out.print(host + " ");
                }
            }

        }
    }

    @After
    public void after() throws IOException {
        System.out.println("After!!!!!!!!!!");
        //关闭文件系统
        fs.close();
    }


}


/**
 * hdfs追加解决报错:Failed to replace a bad datanode on the existing pipeline due to no more goo......
 * 　　在查询过程中，网上的大部分方法是修改hdfs-site.xml配置文件，添加
 *
 * <property>
 * <name>dfs.namenode.http.address</name>
 * <value>slave1:50070</value>
 * </property>
 * <property>
 * <name>dfs.support.append</name>
 * <value>true</value>
 * </property>
 * <property>
 * <name>dfs.client.block.write.replace-datanode-on-failure.policy</name>
 * <value>NEVER</value>
 * </property>
 * <property>
 * <name>dfs.client.block.write.replace-datanode-on-failure.enable</name>
 * <value>true</value>
 * </property>
 *
 * 但我添加后依旧报错，在下面博文找到解决方法（https://blog.csdn.net/caiandyong/article/details/44730031?utm_medium=distribute.pc_relevant.none-task-blog-2%7Edefault%7EOPENSEARCH%7Edefault-2.no_search_link&depth_1-utm_source=distribute.pc_relevant.none-task-blog-2%7Edefault%7EOPENSEARCH%7Edefault-2.no_search_link）
 *
 * java代码中添加
 *
 * conf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
 * conf.set("dfs.client.block.write.replace-datanode-on-failure.enable","true");
 * https://www.cnblogs.com/lspis/p/15306655.html
 */

