package com.shujia.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo15Student1 {

  def main(args: Array[String]): Unit = {

    /**
      * 统计总分大于年级平均分的学生 [学号，姓名，班级，总分]
      *
      */


    val conf: SparkConf = new SparkConf().setMaster("local[8]").setAppName("map")
    val sc: SparkContext = new SparkContext(conf)


    val students: RDD[String] = sc.textFile("spark/data/students.txt")
    val scores: RDD[String] = sc.textFile("spark/data/score.txt")


    /// 统计学生总分
    val sumScoreRDD: RDD[(String, Int)] = scores.map(line => {
      val id: String = line.split(",")(0)
      val sco: Int = line.split(",")(2).toInt

      (id, sco)
    }).reduceByKey(_ + _)


    //计算年级平均分
    val avgScore: Double = sumScoreRDD.map(_._2).sum() / sumScoreRDD.count().toDouble


    //取出总分大于平均分的学生
    val lastAvgRDD: RDD[(String, Int)] = sumScoreRDD.filter(kv => kv._2 > avgScore)


    //整理格式
    val studentKV: RDD[(String, String)] = students.map(line => {
      val split: Array[String] = line.split(",")

      val id: String = split(0)
      val name: String = split(1)
      val clazz: String = split(4)


      (id, name + "," + clazz)
    })


    val joinRDD: RDD[(String, (Int, String))] = lastAvgRDD.join(studentKV)

    val resultRDD: RDD[String] = joinRDD.map(kv => {
      val id: String = kv._1
      val sumScore: Int = kv._2._1

      val nameAndClazz: String = kv._2._2

      s"$id,$nameAndClazz,$sumScore"

    })


    resultRDD.foreach(println)

  }

}
