package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo13Student {
  def main(args: Array[String]): Unit = {

    //1、统计每个班级的总分的平均分

    // 创建spark环境
    val conf = new SparkConf()
    //local[*]： 使用计算所有的资源（CPU）
    conf.setMaster("local[*]")
    conf.setAppName("student")

    val sc = new SparkContext(conf)

    //读取分数表
    val scoreLinesRDD: RDD[String] = sc.textFile("spark/data/score.txt")

    //取出学号和分数
    val idAndSoreRDD: RDD[(String, Double)] = scoreLinesRDD
      .map(line => {
        val split: Array[String] = line.split(",")
        val id: String = split(0)
        val score: Double = split(2).toDouble
        (id, score)
      })
    //统计学生的总分
    val sumSCoreRDD: RDD[(String, Double)] = idAndSoreRDD.reduceByKey((x, y) => x + y)


    //读取学生表获取学生的班级
    val studentLinesRDD: RDD[String] = sc.textFile("spark/data/students.csv")

    //取出学号和班级
    val idAndClassRDD: RDD[(String, String)] = studentLinesRDD
      .map(line => {
        val split: Array[String] = line.split(",")
        val id: String = split(0)
        val clazz: String = split(4)
        (id, clazz)
      })

    //关联获取班级
    val joinRDD: RDD[(String, (Double, String))] = sumSCoreRDD.join(idAndClassRDD)

    //取出班级和总分
    val clazzAndSUmSCoreRDD: RDD[(String, Double)] = joinRDD
      .map {
        case (_, (sumScore: Double, clazz: String)) =>
          (clazz, sumScore)
      }

    //按照班级分组
    val groupByRDD: RDD[(String, Iterable[Double])] = clazzAndSUmSCoreRDD.groupByKey()

    //计算平均分
    val avgScoreRDD: RDD[(String, Double)] = groupByRDD
      .map {
        case (clazz: String, scores: Iterable[Double]) =>
          val avgScore: Double = scores.sum / scores.size
          (clazz, Math.round(avgScore))
      }

    avgScoreRDD.foreach(println)
  }

}
