package Job

import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD

object AveSalary {
  def main(args: Array[String]): Unit = {
    val sc: SparkContext = new SparkContext("local[*]", "AveSalary")
    val inputPath: String = "file/JobData/input"
    val outputPath: String = "file/JobData/output/AveSalary"

    val InfoLine:RDD[String] = sc.textFile(inputPath).repartition(2)

    val res = InfoLine.map(x =>{
      ("",x.split(",")(19).toDouble)
    })
      .reduceByKey(_ + _)

    val cnt = InfoLine.count()
   res.map(x => {
     x._2/cnt
   }).foreach(println)

    sc.stop()
  }

}
