package com.iflytek.iot.hadoop.dfs.config;

import com.iflytek.iot.hadoop.dfs.service.FileHdfsService;
import com.iflytek.iot.hadoop.dfs.service.HdfsService;
import org.apache.commons.lang3.StringUtils;
import org.apache.hadoop.security.UserGroupInformation;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.context.properties.EnableConfigurationProperties;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

/**
 * @author yu 2019/11/28.
 */
@Configuration
@EnableConfigurationProperties({HdfsProperties.class})
public class HdfsAutoConfigure {

    private static final Logger LOGGER = LoggerFactory.getLogger(HdfsAutoConfigure.class);

    @Bean
    public HdfsService hdfsService(org.apache.hadoop.conf.Configuration configuration, HdfsProperties hdfsProperties) {
        HdfsService hdfsService = new FileHdfsService();
        hdfsService.setConfiguration(configuration);
        hdfsService.setHdfsProperties(hdfsProperties);
        return hdfsService;
    }

    @Bean
    public org.apache.hadoop.conf.Configuration configuration(HdfsProperties hdfsProperties) {
        try {
            org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
            String serverUrl = hdfsProperties.getServerUrl();
            if (StringUtils.isNotBlank(serverUrl)) {
                conf.set("fs.default.name", hdfsProperties.getServerUrl());
            } else {
                conf.set("fs.default.name", "hdfs://localhost:9000");
            }
            conf.setBoolean("dfs.support.append", true);
            conf.set("hadoop.job.ugi", "cluster");
            conf.set("fs.hdfs.impl", org.apache.hadoop.hdfs.DistributedFileSystem.class.getName());
            conf.set("fs.file.impl", org.apache.hadoop.fs.LocalFileSystem.class.getName());
            conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
            conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
            //enable kerberos 认证
            if (hdfsProperties.getKerberosEnable()) {
                //对windows系统特殊处理，方便调试
                if (System.getProperty("os.name").toLowerCase().startsWith("win")) {
                    //must set kerberos host and realm
                    System.setProperty("java.security.krb5.realm", hdfsProperties.getKerberosRealm());
                    System.setProperty("java.security.krb5.kdc", hdfsProperties.getKdcServer());
                }

                // server principal
                // the kerberos principle that the namenode is using
                conf.set("dfs.namenode.kerberos.principal.pattern", hdfsProperties.getKerborosPrincipalPattern());
                conf.set("hadoop.security.authentication", "kerberos");
                conf.set("hadoop.security.authorization", "true");
                UserGroupInformation.setConfiguration(conf);
                UserGroupInformation.loginUserFromKeytab(hdfsProperties.getKerberosUser(),hdfsProperties.getKerberosKeyPath());
            }
            return conf;
        } catch (Exception e) {
            LOGGER.error("hdfs配置错误", e);
            return null;
        }
    }
}
