package com.ibm.risk.irmp.common.utils;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.ipc.RemoteException;
import org.apache.hadoop.security.UserGroupInformation;
import org.apache.hadoop.util.Progressable;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

import java.io.*;
import java.net.URI;

//@Configuration
public class HDFSUtilJava implements HDFSUtil {
    private static final Logger log = LoggerFactory.getLogger(HDFSUtil.class);

    public static final String HADOOP_SECURITY_AUTHENTICATION = "hadoop.security.authentication";
    public static final String HADOOP_SECURITY_AUTHENTICATION_KERBEROS = "kerberos";
    public static final String JAVA_SECURITY_KRB5_CONF_KEY = "java.security.krb5.conf";

    //kerberos认证时间
    private static long lastTimeKerberosAuthenticated = 0;

    /**
     * Kerberos 认证
     */
    public void login(String krb5ConfFile, String principal, String keytab) throws Exception {
        //System.setProperty("sun.security.krb5.debug", "true");
        //上次认证两小时内不做重复认证
        if (System.currentTimeMillis() - lastTimeKerberosAuthenticated < 7200 * 1000) {
            return;
        }
        Configuration conf = new Configuration();
        //设置hadoop的登录认证为kerberos,默认的配置是simple
        conf.set("hadoop.security.authentication", HADOOP_SECURITY_AUTHENTICATION_KERBEROS);
        //指定kerberos的路径和文件名
        System.setProperty(JAVA_SECURITY_KRB5_CONF_KEY, krb5ConfFile);
        UserGroupInformation.setConfiguration(conf);
        // 设置kerberos的用户名,和该用户的认证文件
        UserGroupInformation.loginUserFromKeytab(principal, keytab);

        lastTimeKerberosAuthenticated = System.currentTimeMillis();
    }

    /**
     * 上传本地文件到HDFS
     *
     * @param localFile
     * @param hdfsFileName e.g. hdfs://192.168.1.103:9000/xxx
     */
    public void upload(File localFile, String hdfsFileName) throws Exception {
        try (InputStream in = new BufferedInputStream(new FileInputStream(localFile));) {
            // 云端HDFS文件路径 auth/hadoop
            // InputStream in = new BufferedInputStream(new FileInputStream(localFile));

            Configuration conf = new Configuration();
            conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
            conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
            // Java API操作hadoop报错Failed to connect to /xxx:50010 for block ... java.net.ConnectException: Connection timed out: no further information
            // 使得namenode返回datanode的域名,同时在开发机器的hosts文件中配置datanode对应的外网ip和域名 (不配置时返回HDFS内网datanode ip地址)
            conf.set("dfs.client.use.datanode.hostname", "true");
            //conf.set("auth", "");
            // 创建文件系统对象
            URI uri = URI.create(hdfsFileName);
            Path path = new Path(uri.getPath());
            FileSystem fs = FileSystem.get(uri, conf);
            if (!fs.exists(path)) {
                fs.create(path, true);
            } else {
                fs.delete(path, true);
            }
            log.info("uploading HDFS file: " + path);
            OutputStream out = fs.create(path, new Progressable() {
                @Override
                public void progress() {
                    log.info("~~Upload to HDFS~~");
                }
            });
            // 连接两个流，形成通道使输入流向输出流传输数据
            IOUtils.copyBytes(in, out, 1024, true);

            if (out != null) out.close();
        } catch (RemoteException exp) {
            log.warn(exp.getMessage());
        }
    }

    /**
     * HDFS文件下载
     *
     * @param hdfsFileName  HDFS文件路径
     * @param localFileName 本地文件路径
     */
    public void download(String hdfsFileName, String localFileName) throws Exception {
        try (FileOutputStream fileOutputStream = new FileOutputStream(localFileName)) {
//            System.setProperty("HADOOP_USER_NAME", RwaApplicationConfig.getHdfsUserName());
            log.info("download hdfs file: {}", hdfsFileName);

            Configuration conf = new Configuration();
            conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
            conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
            // Java API操作hadoop报错Failed to connect to /xxx:50010 for block ... java.net.ConnectException: Connection timed out: no further information
            conf.set("dfs.client.use.datanode.hostname", "true");
            // 创建文件系统对象
            URI uri = URI.create(hdfsFileName);
//            //获取文件状态
//            FileStatus fileStatus = fs.getFileStatus(new Path(hdfsFileName));

            // 从HDFS读出文件流
            try (FileSystem fs = FileSystem.get(uri, conf);
                 FSDataInputStream outHDFS = fs.open(new Path(hdfsFileName));
                 OutputStream inLocal = fileOutputStream) {
                IOUtils.copyBytes(outHDFS, inLocal, 1024, true);
            }
        }
    }

    @Override
    public String[] list(String path) throws Exception {
        throw new RuntimeException("Not implemented yet!");
    }

    public FileStatus getFileStatus(String hdfsFileName) throws IOException {
//        System.setProperty("HADOOP_USER_NAME", RwaApplicationConfig.getHdfsUserName());

        Configuration conf = new Configuration();
        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
        conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
        // Java API操作hadoop报错Failed to connect to /xxx:50010 for block ... java.net.ConnectException: Connection timed out: no further information
        conf.set("dfs.client.use.datanode.hostname", "true");
        // 创建文件系统对象
        URI uri = URI.create(hdfsFileName);
        FileSystem fs = FileSystem.get(uri, conf);

        FileStatus fileStatus = fs.getFileStatus(new Path(hdfsFileName));
        return fileStatus;
    }


}
