package org.example.common

import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.hdfs.DistributedFileSystem
import org.example.constant.ApolloConst

import java.net.URI

/**
 * hdfs配置接口
 */
trait Hdfsing {
  val hdfs: DistributedFileSystem = new DistributedFileSystem()
  val hdfsConf = new Configuration()
  //println(ApolloConst.FSName)
  hdfsConf.set("fs.defaultFS", ApolloConst.FSName)
  hdfsConf.set("dfs.nameservices", ApolloConst.hdfsName)
  hdfsConf.set("dfs.ha.namenodes.nameservices", ApolloConst.hdfsNodes)
  hdfsConf.set("dfs.namenode.rpc-address.nameservices.nn1", ApolloConst.hdfsNode1)
  hdfsConf.set("dfs.namenode.rpc-address.nameservices.nn2", ApolloConst.hdfsNode2)
  hdfsConf.set("dfs.client.failover.proxy.provider.zcbigdata", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")
  hdfsConf.setBoolean("dfs.client.block.write.replace-datanode-on-failure.enable", true)
  hdfsConf.set("dfs.client.block.write.replace-datanode-on-failure.policy","NEVER");
  hdfsConf.setBoolean("dfs.support.append", true)
  hdfs.initialize(URI.create(ApolloConst.hdfsUrl), hdfsConf)
}
