package chapter03

import org.apache.spark.{SparkConf, SparkContext}

object Test09_getNet_pay {
  def main(args: Array[String]): Unit = {
    val pay = new SparkConf().setMaster("local[*]").setAppName("Pay")
    val sc = new SparkContext(pay)
    //读取文件
    val value = sc.textFile("input/Employee_salary_first_half.csv")
    println(value.take(2).mkString("Array(", ", ", ")"))
    //转为scala集合类型 然后删除第一行
    val strings:Array[String] = value.collect().drop(1)
    println(strings(0))
    val value1 = sc.makeRDD(strings)
    val value2 = value1.map(e => e.split(",")(6))
    println(value2.take(5).mkString("Array(", ", ", ")"))
    //使用带index的分区方法 去掉表头
    val value3 = value.mapPartitionsWithIndex(
      (index, e) => {
        if (index == 0) e.drop(1) else e
      }
    )
    println(value3.take(2).mkString("Array(", ", ", ")"))
    println(value3.map(e => e.split(",")(6)).take(3).toList)
  }
}
