package com.edu.springboot_disk_erban.utils;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.stereotype.Component;

import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;

@Component
public class HDFSConnectUtils {
    @Value("${HDFS_URL}")
    private String HDFS_URL;
    @Value("${HDFS_USER}")
    private String HDFS_USER;

    @Value("${HDFS_NAMENODE}")
    private String HDFS_NAMENODE;
    private Configuration conf;

    public FileSystem getFileSystem(){

        conf = new Configuration();
        //设置副本数量
        conf.set("dfs.replication", "2");
        //处理数据块的异常设置
        conf.set("dfs.client.block.write.replace-datanode-on-failure.enable", "true");
        conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER");
        conf.set("dfs.client.use.datanode.hostname","true");

        FileSystem fs = null;
        try {
            fs = FileSystem.get(new URI(HDFS_URL), conf, HDFS_USER);
        } catch (IOException e) {
            throw new RuntimeException(e);
        } catch (InterruptedException e) {
            throw new RuntimeException(e);
        } catch (URISyntaxException e) {
            throw new RuntimeException(e);
        }

        return fs;
    }

    public Configuration getConf(){
        return conf;
    }

    public String getHDFSNameNode(){
        return HDFS_NAMENODE;
    }
}
