package com.haisen.config;

import org.apache.hadoop.fs.FileSystem;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;

import java.io.IOException;

@Configuration
public class HadoopConfig {

    @Value("${hdfs.url}")
    private String url;
    @Value("${hdfs.namespace}")
    private String namespace;
    @Value("${hdfs.namenode1}")
    private String namenode1;
    @Value("${hdfs.namenode1addr}")
    private String namenode1addr;
    @Value("${hdfs.namenode2}")
    private String namenode2;
    @Value("${hdfs.namenode2addr}")
    private String namenode2addr;


    @Bean(name="fileSystem")
    public FileSystem fileSystem(org.apache.hadoop.conf.Configuration conf) throws  IOException{
            return FileSystem.get(conf);
    }

    @Bean
    public org.apache.hadoop.conf.Configuration conf(){
        /**
         * 读取配置文件方式1
         */
        //return new org.apache.hadoop.conf.Configuration(true);
        /**
         * 方式2：Configuration(True)，如果有源码，会读取core-default.xml,导致读取本地的文件了
         */
           org.apache.hadoop.conf.Configuration conf = new org.apache.hadoop.conf.Configuration();
            conf.set("fs.defaultFS", url);
            conf.set("dfs.nameservices", namespace);
            conf.set("dfs.ha.namenodes."+ namespace, namenode1+","+namenode2);
            conf.set("dfs.namenode.rpc-address."+ namespace +"."+namenode1,namenode1addr);
            conf.set("dfs.namenode.rpc-address."+ namespace +"."+namenode2,namenode2addr);
            //conf.setBoolean(name, value);
            conf.set("dfs.client.failover.proxy.provider."+ namespace,
                    "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")  ;
            return conf;
    }

}
