package com.shujia.core.transformations

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object GroupByOpt {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("groupBy算子操作")

    val sc = new SparkContext(conf)

    val lineRDD: RDD[String] = sc.textFile("spark/data/students.txt")
    //需求：计算每个班级的平均年龄
    val studentsRDD: RDD[(String, String, String, String, String)] = lineRDD.map((line: String) => {
      val infos: Array[String] = line.split(",")
      (infos(0), infos(1), infos(2), infos(3), infos(4))
    })


    /**
     * groupBy算子，自定义指定分组的条件，适用于任意元素类型的RDD
     * 分组后的结果，是将同一组的元素整体放在一个迭代器中,灵活度较高。
     */
    val classGroupRDD: RDD[(String, Iterable[(String, String, String, String, String)])] = studentsRDD.groupBy(_._5) //根据班级进行分组

    val resRDD: RDD[(String, Int)] = classGroupRDD.map((kv: (String, Iterable[(String, String, String, String, String)])) => {
      val clazz: String = kv._1
      val yuanSuList: List[(String, String, String, String, String)] = kv._2.toList
      val avgAge: Int = yuanSuList.map(_._3.toInt).sum / yuanSuList.size
      (clazz, avgAge)
    })

    resRDD.foreach(println)

    while (true){

    }


  }
}
