package com.caul.demo.hadoop.hdfs.client;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.LocatedFileStatus;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.fs.RemoteIterator;
import org.apache.hadoop.fs.permission.FsPermission;
import org.apache.hadoop.hdfs.HdfsConfiguration;

import java.io.IOException;
import java.net.URI;
import java.util.Date;

/**
 * Created by sdliang on 2018/3/29.
 */
public class HdfsClientDemo {

  public static void main(String[] args) throws IOException, InterruptedException {
    //创建客户端对象
    FileSystem hdfsClient = getHdfsClient();
    try {
      String root = "/demo";
      //delete when jvm closed
      Path pathDel = new Path(root);
      hdfsClient.deleteOnExit(pathDel);
      //mkdir
      hdfsClient.mkdirs(pathDel, FsPermission.getDirDefault());
      //upload
      String src = "E:/ws-idea/bigdata/data/demo.log";
      String dest = root;
      upload(hdfsClient, src, dest);
      //download
      src = root + "/demo.log";
      dest = "E:/ws-idea/bigdata/data/download/";
      download(hdfsClient, src, dest);
      //    hdfsClient.delete(pathDel, true);

      //list file:都是返回迭代器,如果直接返回文件,有可能太多
      listFiles(hdfsClient, pathDel);
      //list status:查询指定路径下的所有文件节点信息
      listStatus(hdfsClient, pathDel);
    } finally {
      hdfsClient.close();
    }
  }

  private static void listStatus(FileSystem hdfsClient, Path pathDel) throws IOException {
    FileStatus[] listStatus = hdfsClient.listStatus(pathDel);
    for (FileStatus status : listStatus) {
      System.out.println(status.getPath());
      System.out.println(status.isDirectory() ? "文件夹" : "文件");
    }
  }

  private static void listFiles(FileSystem hdfsClient, Path pathDel) throws IOException {
    RemoteIterator<LocatedFileStatus> fileItor = hdfsClient.listFiles(pathDel, true);

    while (fileItor.hasNext()) {
      LocatedFileStatus fileStatus = fileItor.next();
      System.out.println(fileStatus.getOwner());
      System.out.println(new Date(fileStatus.getModificationTime()));
      System.out.println(fileStatus.getPath().getName());
      System.out.println(fileStatus.getBlockLocations());
    }
  }

  private static void upload(FileSystem hdfsClient, String src, String dest) throws IOException {
    Path srcPath = new Path(src);
    Path destPath = new Path(dest);
    hdfsClient.copyFromLocalFile(srcPath, destPath);
    //流式上传
    //    FSDataOutputStream fsOs = hdfsClient.create(destPath, true);
    //    fsOs.write(bts);
  }

  private static void download(FileSystem hdfsClient, String src, String dest) throws IOException {
    Path srcPath = new Path(src);
    Path destPath = new Path(dest);
    //hadoop默认用自己的so命令存储文件, 需要设置useRawLocalFileSystem=true,改成用本机的文件系统
    hdfsClient.copyToLocalFile(false, srcPath, destPath, true);
    //流式下载
    //    FSDataInputStream fsIs = hdfsClient.open(srcPath);
    //    fsIs.seek(1024);//跳到起始位置
  }


  private static FileSystem getHdfsClient() throws IOException, InterruptedException {
    URI uri = URI.create("hdfs://cdh-namenode:9000/");
    Configuration conf = new HdfsConfiguration();
    conf.addDefaultResource("classpath:hdfs-settings.xml");
    String user = "root";
    return FileSystem.get(uri, conf, user);
  }

}
