package com.shujia.spark.core

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo5Group {
  def main(args: Array[String]): Unit = {
    //1、创建环境
    val conf = new SparkConf()
    //4:4核
    conf.setMaster("local")
    conf.setAppName("group")
    val sc = new SparkContext(conf)

    val linesRDD: RDD[String] = sc.textFile("data/students.txt")

    case class Student(id: String, name: String, age: Int, sex: String, clazz: String)

    /**
     * 转换算子：懒执行：不会立马执行，需要操作算子触发执行
     * 数据是一条一条在整个代码逻辑中流动
     */
    //1、map：传入一行返回一行
    val studentsRDD: RDD[Student] = linesRDD
      .map(_.split(","))
      .map {
        case Array(id, name, age, sex, clazz) => Student(id, name, age.toInt, sex, clazz)
      }

    //groupBy: 指定字段进行分组
    //底层会产生shuffle
    val groupByRDD: RDD[(String, Iterable[Student])] = studentsRDD
      .groupBy(student => student.clazz)

    //统计人数
    val clazzNumRDD: RDD[(String, Int)] = groupByRDD
      .mapValues(stus => stus.size)

    clazzNumRDD.foreach(println)

    //groupByKey: 按照key进行分组
    val groupByKeyRDD: RDD[(String, Iterable[Student])] = studentsRDD
      .map(stu => (stu.clazz, stu))
      .groupByKey()

    groupByKeyRDD.foreach(println)


    while (true) {}

  }
}
