package com.shujia.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo16Student {

  def main(args: Array[String]): Unit = {


    /**
      *
      * 1、统计年级排名前十学生各科的分数 [学号,学生姓名，学生班级，科目名，分数]
      *
      */


    val conf: SparkConf = new SparkConf()
      .setAppName("pi")
      .setMaster("local")

    val sc = new SparkContext(conf)

    //读取分数表
    val scoreRDD: RDD[String] = sc.textFile("spark/data/score.txt")


    //以学号作为key, 分数作为value
    val scoreKVRDD: RDD[(String, Int)] = scoreRDD.map(line => {
      val split: Array[String] = line.split(",")
      val id: String = split(0)
      val sco: Int = split(2).toInt
      (id, sco)
    })

    //统计学生的总分
    val sumStuRDD: RDD[(String, Int)] = scoreKVRDD.reduceByKey((x, y) => x + y)


    //按照总分降序排序
    val sumScoreSortRDD: RDD[(String, Int)] = sumStuRDD.sortBy { case (id, sumScore) => -sumScore }


    /**
      * take 算子,返回一个数组,是一个操作算子
      *
      */

    //前十学生的学号和分数
    val top10SumScore: Array[(String, Int)] = sumScoreSortRDD.take(10)


    //取出前十学生的编号
    val top10Ids: Array[String] = top10SumScore.map { case (id, _) => id }


    //前十学生各科的分数
    val top10Score: RDD[String] = scoreRDD.filter(line => {
      val id: String = line.split(",")(0)
      //判断学生是否在数组中存在
      top10Ids.contains(id)
    })


    top10Score.foreach(println)


  }

}
