package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo8GroupByKey {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setAppName("map")
    conf.setMaster("local")

    val sc = new SparkContext(conf)


    //读取学生表的数据
    val studentsRDD: RDD[String] = sc.textFile("data/students.txt")

    val splitRDD: RDD[Array[String]] = studentsRDD.map(studnet => studnet.split(","))

    //取出班级和年龄
    val clazzAndAgeRDD: RDD[(String, Int)] = splitRDD.map {
      case Array(_, _, age: String, _, clazz: String) =>
        (clazz, age.toInt)
    }

    /**
     * groupByKey:按照key进行分组
     *
     */

    val groupByKeyRDD: RDD[(String, Iterable[Int])] = clazzAndAgeRDD.groupByKey()

    val avgAgeRDD: RDD[(String, Double)] = groupByKeyRDD.map {
      case (clazz: String, ages: Iterable[Int]) =>
        val avgAge: Double = ages.sum.toDouble / ages.size
        (clazz, avgAge)
    }

    avgAgeRDD.foreach(println)

    while (true) {

    }


    /**
     * groupBy和groupByKey的区别
     * 1、代码：groupBy可以在任何类型的rdd上使用，groupByKey只能作用在kv格式的RDD上
     * 2、groupByKey之后RDD的结构相对简单一点
     * 3、性能：groupByKey shuffle过程需要传输的数据量比groupBy小，性能更高
     *
     */

  }

}
