package com.shujia.hdfs;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.*;

import java.util.Arrays;

public class HDFSApiDemo {
    public static void main(String[] args) throws Exception {
        //创建配置文件对象
        Configuration conf = new Configuration();
        //设置连接的集群主节点
        conf.set("fs.defaultFS", "hdfs://master:9000");
//        conf.set("dfs.replication","1");

        //获取集群HDFS文件系统的对象
        FileSystem fs = FileSystem.get(conf);
        System.out.println("成功与Hadoop中的HDFS文件系统连接：" + fs);

        //需求1：在HDFS的/bigdata29/目录下创建一个文件夹 data
        //hadoop fs -mkdir /bigdata29/data
//        boolean b1 = fs.mkdirs(new Path("/bigdata29/data"));
//        System.out.println(b1);
//
//
//        //python/data/京东手机商品前10页信息.csv
//        //hadoop fs -put python/data/京东手机商品前10页信息.csv /bigdata29/data/
//        fs.copyFromLocalFile(new Path("python/data/京东手机商品前10页信息.csv"),new Path("/bigdata29/data/"));
//
//        //下载文件到本地
//        fs.copyToLocalFile(new Path("/bigdata29/data/demo1.csv"),new Path("hadoop/data/京东手机商品前10页信息.csv"));

        FileStatus fileStatus = fs.getFileStatus(new Path("/bigdata29/data/demo1.csv"));
//        System.out.println(fileStatus);

        RemoteIterator<LocatedFileStatus> listFiles = fs.listFiles(new Path("/bigdata29"), true);
        while (listFiles.hasNext()){
            LocatedFileStatus fileStatus1 = listFiles.next();
            Path path = fileStatus1.getPath();
            String fileName = path.getName();
            BlockLocation[] blockLocations = fileStatus1.getBlockLocations();
            for (BlockLocation blockLocation : blockLocations) {
                String[] names = blockLocation.getNames();
                String s = Arrays.toString(names);
                System.out.println(fileName+": "+ s);
                System.out.println("----------------------------------------------");
            }
        }



    }
}
