package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo08GroupBy {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()

    conf.setAppName("Demo08GroupBy")
    conf.setMaster("local")

    val sc: SparkContext = new SparkContext(conf)

    val stuRDD: RDD[String] = sc.textFile("spark/data/stu/students.txt")

    // 统计班级人数
    stuRDD
      .map(_.split(",")(4))
      .groupBy(clazz => clazz)
      .map(kv => s"${kv._1},${kv._2.size}")
      .foreach(println)

    stuRDD
      .map(line => (line.split(",")(4), 1))
      // groupBy可以作用在任意格式的RDD上
      .groupBy(t2 => t2._1)
      // 需要指定分组条件，最后分完组之后每一个组的数据是分组前 完整的每条数据
      .foreach(println)

    // 统计班级的平均年龄
    stuRDD
      .map(line => (line.split(",")(4), line.split(",")(2).toInt))
      .groupBy(_._1)
      .map(kv => s"${kv._1},${kv._2.map(_._2).sum.toDouble / kv._2.size}")
      .foreach(println)


  }

}
