package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo10GroupByKey {
  def main(args: Array[String]): Unit = {
    /**
     * groupByKey：转换算子
     * 首先只有KV格式的RDD才能调用groupByKey算子
     * 可以将KV格式的RDD按照Key进行分组
     */
    val conf: SparkConf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Demo10GroupByKey")

    val sc: SparkContext = new SparkContext(conf)
    val stuRDD: RDD[Stu] = sc
      .textFile("spark/data/stu/students.txt")
      .map(line => {
        val splits: Array[String] = line.split(",")
        Stu(splits(0), splits(1), splits(2).toInt, splits(3), splits(4))
      })

    // 将数据变成KV格式
    // 按照班级分组
    val stuKVRDD: RDD[(String, Stu)] = stuRDD.map(stu => (stu.clazz, stu))
    val stuGrpRDD: RDD[(String, Iterable[Stu])] = stuKVRDD.groupByKey()
    stuGrpRDD.foreach(println)

    // 统计班级人数
    stuGrpRDD
      .map(kv => s"${kv._1},${kv._2.size}")
      .foreach(println)

  }

  case class Stu(id: String, name: String, age: Int, gender: String, clazz: String)

}
