package com.csw.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

//3、统计年级排名前十学生各科的分数 [学号,学生姓名，学生班级，科目，分数]
object Text3 {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setMaster("local")
      .setAppName("text1")

    val sc: SparkContext = new SparkContext(conf)

    //读取数据
    val scoresRDD: RDD[String] = sc.textFile("spark/data/score.txt")

    val studentsRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    //获取id和score的kv集合
    val scoresIdScoreRDD: RDD[(String, Int)] = scoresRDD.map(i => (i.split(",")(0), i.split(",")(2).toInt))

    //求每一个id的sum(score)
    val scoreSumRDD: RDD[(String, Int)] = scoresIdScoreRDD.reduceByKey(_ + _)

    //求总分前十的学生id和score
    val sortScoreRDD: Array[(String, Int)] = {
      scoreSumRDD.sortBy(_._2, false)
        .take(10)
    }

    sortScoreRDD.foreach(println)
    //求总分前十的学生id
    val id1: Array[String] = sortScoreRDD.map(kv => {
      val id: String = kv._1
      id
    })


    //先把成绩表转换成kv格式，再把前十的id关联成绩表得出详细成绩信息
    val scoresMapRDD: RDD[(String, String)] = scoresRDD.map(i => {
      val id: String = i.split(",")(0)
      val subjectId: String = i.split(",")(1)
      val score: String = i.split(",")(2)

      (id, subjectId + "," + score)
    })

    val scoresFilterRDD: RDD[(String, String)] = scoresMapRDD.filter(kv => {
      val id: String = kv._1
      id1.contains(id)
    })

    //先对student表做处理，取出要显示的数据和学生表关联获得年级排名前十学生各科的分数
    val studentsMapRDD: RDD[(String, String)] = studentsRDD.map(i => {
      val id: String = i.split(",")(0)
      val name: String = i.split(",")(1)
      val clazz: String = i.split(",")(4)
      (id, name + "," + clazz)
    })

    val result: RDD[(String, (String, Option[String]))] = scoresFilterRDD.leftOuterJoin(studentsMapRDD)

    result.foreach(println)
  }
}
