package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo19Student1 {
  def main(args: Array[String]): Unit = {
    /**
      * 2、统计总分大于年级平均分的学生
      */

    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("Demo14Agg")


    val sc = new SparkContext(conf)


    val scoreRDD: RDD[String] = sc.textFile("data/score.txt")


    val idAndScoreRDD: RDD[(String, Double)] = scoreRDD.map(s => {
      val split: Array[String] = s.split(",")
      (split(0), split(2).toDouble)
    })

    //1、计算学生的总分
    val stuSumSocreRDD: RDD[(String, Double)] = idAndScoreRDD.reduceByKey((x, y) => x + y)

    //所有的总分
    val scores: RDD[Double] = stuSumSocreRDD.map(kv => kv._2)

    //计算平均分
    val avgSco: Double = scores.sum() / scores.count()

    println(s"年级平均分：$avgSco")

    //取出总分打印平均分的学生
    val filterRDD: RDD[(String, Double)] = stuSumSocreRDD.filter {
      case (id: String, sco: Double) =>
        sco > avgSco
    }

    filterRDD.foreach(println)

  }

}
