package chapter03

import org.apache.spark.{SparkConf, SparkContext}

object Test08_Salary {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[*]").setAppName("salary")
    val sc = new SparkContext(conf)
    val value = sc.textFile("input/Employee_salary_first_half.csv")
    println(value.take(5).mkString("Array(", ", ", ")"))
    //将内容转换为List然后去删除
    val strings = value.collect().toList.drop(1)
    println(strings.take(1))
    //取出全部的Net_Pay列
    val salary = strings.map(e => e.split(",")(6))
    //转换为RDD然后保存为文件
    val value1 = sc.makeRDD(salary,2)
    //value1.saveAsTextFile("input/salary")
    //下标分区的方式删除表头信息
    val value2 = value.mapPartitionsWithIndex((index, e) => {
      if (index == 0) e.drop(1) else e
    })
    println(value2.take(1).mkString("Array(", ", ", ")"))
    val value3 = value2.map(e => {
      e.split(",")(6)
    })
    value3.saveAsTextFile("input/salary")
    sc.stop()
  }
}
