package com.csw.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

//2、统计总分大于年级平均分的学生 [学号，姓名，班级，总分]
object Text4 {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
      .setMaster("local")
      .setAppName("text1")

    val sc: SparkContext = new SparkContext(conf)

    //读取数据
    val scoresRDD: RDD[String] = sc.textFile("spark/data/score.txt")

    val studentsRDD: RDD[String] = sc.textFile("spark/data/students.txt")

    //获取id和score的kv集合
    val scoresIdScoreRDD: RDD[(String, Int)] = scoresRDD.map(i => (i.split(",")(0), i.split(",")(2).toInt))

    //求每一个id的sum(score)
    val scoreSumRDD: RDD[(String, Int)] = scoresIdScoreRDD.reduceByKey(_ + _)

    //求年级平均分
    val scoresScoreRDD: RDD[Int] = scoreSumRDD.map(kv => kv._2)
    val scoreAvgRDD: Double = scoresScoreRDD.sum() / 1000

    //通过过滤取出总分大于年级平均分的学生
    val scoreFilterRDD: RDD[(String, Int)] = scoreSumRDD.filter(kv => {
      val score: Int = kv._2
      score > scoreAvgRDD
    })


    //先对student表做处理，取出要显示的数据和学生表关联总分大于年级平均分的学生
    val studentsMapRDD: RDD[(String, String)] = studentsRDD.map(i => {
      val id: String = i.split(",")(0)
      val name: String = i.split(",")(1)
      val clazz: String = i.split(",")(4)
      (id, name + "," + clazz)
    })

    val result: RDD[(String, (Int, Option[String]))] = scoreFilterRDD.leftOuterJoin(studentsMapRDD)

    result.foreach(println)
  }
}
