package com.spark.util.example

import com.spark.util.client.HDFSClient
import com.spark.util.core.Sparking
import com.spark.util.utils.PropertiesUtil
import org.apache.hadoop.conf.Configuration

object HDFSExample extends Sparking{

  def main(args: Array[String]): Unit = {

    val spark = getSparkSession(None)

    //第一种 不支持追加写入，有多少个分区就生成多少个文件
    spark
      .sparkContext
      .makeRDD(List.range(0,100))
      .repartition(1)
      .saveAsTextFile("hdfs://hikbigdata/test/a")

    //第二种 支持追加写入
    val conf = new Configuration()
    conf.set("fs.defaultFS", PropertiesUtil.getString("fs.defaultFS"))
    conf.set("dfs.nameservices", PropertiesUtil.getString("dfs.nameservices"))
    conf.set("dfs.ha.namenodes.nameservices", PropertiesUtil.getString("dfs.ha.namenodes.nameservices"))
    conf.set("dfs.namenode.rpc-address.nameservices.nn1", PropertiesUtil.getString("dfs.namenode.rpc-address.nameservices.nn1"))
    conf.set("dfs.namenode.rpc-address.nameservices.nn2", PropertiesUtil.getString("dfs.namenode.rpc-address.nameservices.nn2"))
    conf.set("dfs.client.failover.proxy.provider.chkjbigdata", "org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider")
    conf.setBoolean("dfs.support.append", true)
    val sink = HDFSClient(PropertiesUtil.getString("fs.url"), conf)
    sink.append("append","/test/a/part-00000")
  }
}
