package com.hadoop

import org.apache.hadoop.io.compress.{BZip2Codec, SnappyCodec}
import org.apache.hadoop.mapred.lib.MultipleTextOutputFormat
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

/**
  *
  * @author ymy.hadoop
  */
object SaveToHadoopDemo {
  // com.hadoop.SaveToHadoopDemo
  def main(args: Array[String]): Unit = {

      var arr = args

      println("===测试参数打印===")
      println("参数1 ：" + arr(0))
      println("参数2 ：" + arr(1))


     val spark = SparkSession
       .builder()
//       .master("local[*]")
       .appName("SaveToHadoopDemo")
       .getOrCreate()

     val sc = spark.sparkContext
//
     val a = sc.textFile("emp.txt")

//     a.map(x=>{
//       val arr = x.split("\t")
//       var temp = ""
//       for (value <- arr){
//         temp = temp + value + "\t"
//       }
//       (arr(4),temp)
//     }).saveAsTextFile("test/emp")


    a.map(x=>{
      val arr = x.split("\t")
      var temp = ""
      for (value <- arr){
        temp = temp + value + "\t"
      }
      (arr(4),temp)
    }).saveAsHadoopFile("/test/demo/",
      classOf[String],
      classOf[String],
      classOf[RDDMultipleTextOutputFormat])

//    a.map(x=>{
//      val arr = x.split("\t")
//      var temp = ""
//      for (value <- arr){
//        temp = temp + value + "\t"
//      }
//      (arr(4),temp)
//    }).saveAsTextFile("test/demo/",classOf[BZip2Codec])

//    a.map(x=>{
//      val arr = x.split("\t")
//      var temp = ""
//      for (value <- arr){
//        temp = temp + value + "\t"
//      }
//      (arr(4),temp)
//    }).saveAsTextFile("test/demo/")
//
//    spark.stop()
  }
}

//class RDDMultipleTextOutputFormat extends MultipleTextOutputFormat[Any,Any]{
//
//  override def generateFileNameForKeyValue(key:Any,value:Any,name:String):String={
//    val rKey = key.asInstanceOf[String]
//    (rKey + "/" + rKey + ".txt")
//  }
//}
