package com.learn.lb.spark.sql.work

import org.apache.spark.{SparkConf, SparkContext}

/**
 * 求 每个月男人收入的平均值即
 *
 * @author laibo
 * @since 2019/8/21 16:43
 */
object EmployeeSalarySpark {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("EmployeeSalaryTask").setMaster("local[2]")
    val spark = new SparkContext(conf)
    val fileRdd = spark.textFile("E:\\idea\\workspace\\bd-learn\\spark-learn\\src\\main\\resources\\employe_salary.txt")
    type MyType = (Int, Double)
    fileRdd.filter(f => {
      val datas = f.split(",")
      datas(1).toInt == 1
    }).map(m => {
      val datas = m.split(",")
      val month = datas(0)
      val salary = datas(2).toDouble
      (month, salary)
    }).combineByKeyWithClassTag(
      (value: Double) => (1, value), //value   createCombiner: V => C, map端，改变value的返回值类型
      (newValue: MyType, value: Double) => (newValue._1 + 1, newValue._2 + value), //map端预聚合, 这里的newValue就是从createCombiner返回的值，第二个参数就是同一个key的下一个value
      (newValue: MyType, newValue2: MyType) => (newValue._1 + newValue2._1, newValue._2 + newValue2._2) //reduce端聚合,默认开启map端聚合
    ).map { case (month, (count, salary)) => (month, salary / count) }.sortBy(_._1)
      .collect().foreach(println)
  }
}
