package com.rock.code.hdfs;

import com.rock.code.bigdata.bigdatainterface.domain.Constants;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FSDataOutputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.hdfs.DistributedFileSystem;
import org.apache.hadoop.hdfs.HAUtil;
import org.apache.hadoop.io.IOUtils;
import org.apache.log4j.Logger;

import java.io.File;
import java.io.FileInputStream;
import java.io.IOException;
import java.net.InetSocketAddress;
import java.net.InetAddress;
import java.net.URI;

public class HdfsAPP {
    private Logger logger = Logger.getLogger(HdfsAPP.class);


    /**
     * hadoop-ha 获取文件系统
     *
     * @return
     * @throws Exception
     */
    public FileSystem getFileSystem() throws Exception {
        Configuration conf = new Configuration(false);
        String nameservices = "mycluster";
        String[] namenodesAddr = {Constants.NAMENODE_ADDR1, Constants.NAMENODE_ADDR2};
        String[] namenodes = {"nn1", "nn2"};
        conf.set("fs.defaultFS", "hdfs://" + nameservices);
        conf.set("dfs.nameservices", nameservices);
        conf.set("dfs.ha.namenodes." + nameservices, namenodes[0] + "," + namenodes[1]);
        conf.set("dfs.namenode.rpc-address." + nameservices + "." + namenodes[0], namenodesAddr[0]);
        conf.set("dfs.namenode.rpc-address." + nameservices + "." + namenodes[1], namenodesAddr[1]);
        conf.set("dfs.client.failover.proxy.provider." + nameservices, "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider");
        String hdfsRPCUrl = "hdfs://" + nameservices;
        DistributedFileSystem dfs = new DistributedFileSystem();
        dfs.initialize(URI.create(hdfsRPCUrl), conf);

        return dfs;
    }


    /**
     * 从hdfs上获取文件内容
     *
     * @param filePath
     */
    public void readHDFSFile(String filePath) {
        FSDataInputStream fsDataInputStream = null;
        try {
            Path path = new Path(filePath);
            fsDataInputStream = this.getFileSystem().open(path);
            IOUtils.copyBytes(fsDataInputStream, System.out, 4096, false);

        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (fsDataInputStream != null) {
                IOUtils.closeStream(fsDataInputStream);
            }
        }
    }

    /**
     * 将本地文件上传到hdfs上
     * @param localPath
     * @param hdfsPath
     */
    public void writeHDFS(String localPath, String hdfsPath) {
        FSDataOutputStream outputStream = null;
        FileInputStream fileInputStream = null;
        try {
            Path path = new Path(hdfsPath);
            outputStream = this.getFileSystem().create(path);
            fileInputStream = new FileInputStream(new File(localPath));
            IOUtils.copyBytes(fileInputStream, outputStream, 4096, false);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            if (fileInputStream != null) {
                IOUtils.closeStream(fileInputStream);
            }
            if (outputStream != null) {
                IOUtils.closeStream(outputStream);
            }

        }
    }

    /**
     * 在hdfs上创建文件
     *
     * @param path
     * @throws Exception
     */
    public void mkdirs(String path) throws Exception {
        try {
            // 创建目录
            this.getFileSystem().mkdirs(new Path(path));
        } catch (IllegalArgumentException | IOException e) {
            logger.error("hdfs mkdir error", e);
        }
    }

    /**
     * 从 HDFS 下载文件
     *
     * @param srcFile  the src file: 源路径
     * @param destPath the dest path: 目的路径
     */
    public  void get(String srcFile, String destPath) throws Exception {
        Path srcPath = new Path(srcFile);

        // 目的路径是Linux下的路径，如果在 windows 下测试，需要改写为Windows下的路径，比如D:\\hadoop\\test.txt
        Path dstPath = new Path(destPath);

        try {
            // 下载hdfs上的文件
            this.getFileSystem().copyToLocalFile(srcPath, dstPath);
        } catch (IOException e) {
            logger.error("hdfs get error", e);
        }
    }



    /**
     * 获取Hadoop active地址
     *
     * @throws IOException
     */
    public String getActiveNameNode() throws IOException {

        String hdfsPath = "";
        InetSocketAddress active = null;
        try {
            active = HAUtil.getAddressOfActive(this.getFileSystem());
        } catch (Exception e) {
            e.printStackTrace();
        }
        InetAddress address = active.getAddress();
        hdfsPath = "hdfs://" + address.getHostAddress() + ":" + active.getPort();
        System.out.println(hdfsPath);
        return hdfsPath;
    }


    public static void main(String[] args) throws Exception {
        new HdfsAPP().get(args[0],args[1]);
        System.out.println("文件下载完成");
    }

}
