package com.edata.bigdata.spark;

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;

public class HdfsConnector {
    public Logger logger = LoggerFactory.getLogger(this.getClass());
    public String PREFIX = "hdfs://";
    public String ENTRYPOINT = "localhost:8082,localhost:8082";
    public FileSystem client;
    public String activeNameNode;

    public void createHdfsClient() {
        Configuration conf = new Configuration();
        String[] nns = ENTRYPOINT.split(",");
        try {
            for (String nn : nns) {
                conf.set("fs.defaultFS", PREFIX + nn);
                client = FileSystem.get(conf);
                activeNameNode = PREFIX + nn;
                logger.info("{} is active", activeNameNode);
                break;
            }
        } catch (Exception e) {
            logger.error("无法创建HDFS客户端：{}", e.getMessage());

        }

    }
}
