package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo7GroupBy {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("flatMap")

    val sc = new SparkContext(conf)

    //分数表
    val linesRDD: RDD[String] = sc.textFile("data/score.txt")

    /**
     * 统计学生的总分
     *
     */

    val scoreRDD: RDD[(String, Int)] = linesRDD.map((line: String) => {
      val split: Array[String] = line.split(",")
      (split.head, split.last.toInt)
    })

    /**
     * groupBy: 指定一个字段进行分组
     */

    val groupByRDD: RDD[(String, Iterable[(String, Int)])] = scoreRDD.groupBy {
      case (id: String, _: Int) => id
    }

    groupByRDD
      .map {
        case (id: String, iter: Iterable[(String, Int)]) => {
          //取出分数季孙总分
          val sumSco: Int = iter.map { case (_: String, sco: Int) => sco }.sum
          (id, sumSco)
        }
      }
      .foreach(println)

    println("=" * 100)
    /**
     * groupByKey: 按照key进行分组，rdd必须是一个kv格式
     */
    val groupByKeyRDD: RDD[(String, Iterable[Int])] = scoreRDD.groupByKey()

    groupByKeyRDD
      .map {
        case (id: String, iter: Iterable[Int]) =>
          val sumSco: Int = iter.sum
          (id, sumSco)
      }
      .foreach(println)


    /**
     * groupByKey shuffle过程中需要传说的数据量比groupBy少，性能更高
     *
     */
    while (true){}

  }

}
