package com.hadoop

import org.apache.hadoop.mapred.lib.MultipleTextOutputFormat
import org.apache.spark.sql.SparkSession

/**
  *
  * @author ymy.hadoop
  */
object SaveToHadoopDemo2 {

  // com.hadoop.SaveToHadoopDemo2
  def main(args: Array[String]): Unit = {

    val arr = args
    var index = 0

    var inputPath = ""
    var outputPath = ""

    println("===测试参数打印===")
    for(str <- arr){
      println("参数"+ index + "===>" + str)
      index=index+1
      if (index == 1){
        inputPath = str
      }
      else if (index == 2){
        outputPath = str
      }
    }

    val spark = SparkSession.builder()
      .getOrCreate()

    val sc = spark.sparkContext

    sc.textFile(inputPath).map(x=>{
      val arr = x.split("\t")
      val time = arr(4)
      val year = time.split("-")(0)
      (year,arr.mkString("\t"))
    }).saveAsHadoopFile(outputPath,classOf[String],classOf[String],classOf[RDDMultipleTextOutputFormat])

    spark.stop()

  }
}

class RDDMultipleTextOutputFormat extends MultipleTextOutputFormat[Any,Any]{

  override def generateFileNameForKeyValue(key:Any,value:Any,name:String):String={
    val rKey = key.asInstanceOf[String]
    (rKey + "/" + rKey + ".txt")
  }
}