package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo14Student {
  def main(args: Array[String]): Unit = {
    /**
     * 2、统计总分大于年级平均分的学生
     */
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("student")

    val sc = new SparkContext(conf)


    //读取文件
    val linesRDD: RDD[String] = sc.textFile("data/score.txt")

    //取出id和分数
    val scoreRDD: RDD[(String, Double)] = linesRDD.map((line: String) => {
      val split: Array[String] = line.split(",")
      val id: String = split.head
      val sco: Double = split.last.toDouble
      (id, sco)
    })

    /**
     * 1、统计学生的总分
     */
    val sumScoreRDD: RDD[(String, Double)] = scoreRDD.reduceByKey(_ + _)

    /**
     * 2、计算年级平均分
     */
    //总分
    val sumSco: Double = sumScoreRDD.map { case (_: String, sco: Double) => sco }.sum()
    //人数
    val num: Long = sumScoreRDD.count()
    //计算平均分
    val avgScore: Double = sumSco / num

    println(s"平均分：$avgScore")
    /**
     * 3、取出总分大于平均分的学生
     */
    val filterRDD: RDD[(String, Double)] = sumScoreRDD.filter { case (_: String, sco: Double) => sco > avgScore }

    filterRDD.foreach(println)

  }

}
