package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo09GroupByKey {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()

    conf.setAppName("Demo09GroupByKey")
    conf.setMaster("local")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")

    stuRDD
      .map(line => (line.split(",")(4), 1))
      // groupByKey必须作用在KV格式的RDD上
      .groupByKey()
      // 不需要指定分组条件，默认按照Key进行分组，分完之后得到的分组数据 是只由Value构成的
      .foreach(println) // 后续可以接聚合操作

    // 统计班级的平均年龄
    stuRDD
      .map(line => (line.split(",")(4), line.split(",")(2).toInt))
      .groupByKey()
      .map(kv => s"${kv._1},${kv._2.sum.toDouble / kv._2.size}")
      .foreach(println)

  }
}
