package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}

object Demo9Student {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("app").setMaster("local")
    val sc = new SparkContext(conf)


    val scoreRDD = sc.textFile("data/score.txt")
    val studentRDD = sc.textFile("data/student.txt")

    /**
      * 计算学生平均分
      *
      */
    scoreRDD.map(line => {
      val split = line.split(",")
      val s_id = split(0)
      val score = split(2).toInt

      (s_id, (score, 1.0))
    }).reduceByKey((x, y) => {
      val sum = x._1 + y._1
      //总分
      val num = x._2 + y._2 //科目数
      (sum, num)
    }).map(kv => (kv._1, kv._2._1 / kv._2._2))
      .foreach(println)


    /**
      * 2、计算班级最高分
      *
      */

    val stuKV = studentRDD.map(line => (line.split(",")(0), line))
    val scoKV = scoreRDD.map(line => (line.split(",")(0), line))

    val kvRDD = stuKV.join(scoKV).map(kv => {
      val s_id = kv._1
      val clazz = kv._2._1.split(",")(4)
      val score = kv._2._2.split(",")(2).toInt

      (s_id + "_" + clazz, score)
    }).reduceByKey(_ + _)
      .map(kv => {
        val split = kv._1.split("_")
        val s_id = split(0)
        val clazz = split(1)
        val sum = kv._2
        (clazz, s_id + "_" + sum)
      })

    //通过groupbYkey实现
    kvRDD.groupByKey().map(kv => {
      val clazz = kv._1

      //获取班级里面分数最高的学生
      val maxStu = kv._2
        .toList
        .map(x => {
          val split = x.split("_")
          val s_id = split(0)
          val sumScore = split(1).toInt
          (s_id, sumScore)
        })
        .maxBy(_._2)

      (clazz, maxStu._1, maxStu._2)
    }).foreach(println)


    //通过reduceBeykey实现    在map先比较一次
    kvRDD.reduceByKey((x, y) => {
      val xScore = x.split("_")(1).toInt
      val yScore = y.split("_")(1).toInt

      //保留分数高的数据
      if (xScore > yScore) {
        x
      } else {
        y
      }
    }).foreach(println)

  }

}
