package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo7GroupBy {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("map")
    conf.setMaster("local")

    val sc = new SparkContext(conf)


    //读取学生表的数据
    val studentsRDD: RDD[String] = sc.textFile("data/students.txt")

    val splitRDD: RDD[Array[String]] = studentsRDD.map(studnet => studnet.split(","))

    //取出班级和年龄
    val clazzAndAgeRDD: RDD[(String, Int)] = splitRDD.map {
      case Array(_, _, age: String, _, clazz: String) =>
        (clazz, age.toInt)
    }

    /**
     * groupBy: 按照指定的字段进行分组，返回一个kv格式的rdd,
     * key是分组的字段，value是一个迭代器
     * 迭代器的数据没有完全加载的内存中，迭代器只能迭代一次
     *
     * groupBy算子需要将相同的key分到同一个分区中，所有会产生shuffle
     *
     *
     */
    //按照班级分组
    val kvRDD: RDD[(String, Iterable[(String, Int)])] = clazzAndAgeRDD.groupBy(kv => kv._1)

    //计算班级的平均年龄
    val avgAgeRDD: RDD[(String, Double)] = kvRDD.map {
      case (clazz: String, iter: Iterable[(String, Int)]) =>
        //计算平均年龄
        val ages: Iterable[Int] = iter.map(kv => kv._2)

        val avgAge: Double = ages.sum.toDouble / ages.size

        (clazz, avgAge)
    }

    avgAgeRDD.foreach(println)

    while (true){

    }

  }

}
