package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code18AggregateByKey2 {
  def main(args: Array[String]): Unit = {
    val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("AggregateByKey"))

    // 需求：统计所有班级的最高分总和
    val stuInfo = sc
      .textFile("spark_code/data/students.txt")
      .map {
        case line => {
          val splitRes: Array[String] = line.split(",")
          (splitRes(0), (splitRes(1), splitRes(2), splitRes(3), splitRes(4)))
        }
      }


    val totalScoreRDD: RDD[(String, Int)] = sc
      .textFile("spark_code/data/score.txt")
      .map {
        case line => {
          val splitRes: Array[String] = line.split(",")
          (splitRes(0), splitRes(2).toInt)
        }
      }
      .groupByKey()
      .mapValues {
        case iterator => iterator.sum
      }


    stuInfo
      .join(totalScoreRDD)
      .map {
        case (id, ((name, age, gender, clazz), totalScore)) => {
          (clazz, totalScore)
        }
      }
      // 每个班级的最高分
      .aggregateByKey(0)(
        {
          case (first, next) => math.max(first, next)
        }, {
          case (first, next) => math.max(first, next)
        }
      )
      .groupBy(x => 1)
      .mapValues{
        case iterator => {
          iterator.map(_._2).sum
        }
      }
      .foreach(println)

    // 作业：统计所有班级的最高分平均分

  }
}
