package com.shujia.spark

import org.apache.spark.{SparkConf, SparkContext}

object Demo17Student {
  def main(args: Array[String]): Unit = {

    /**
      * 3、统计每科都及格的学生 [学号，姓名，班级，科目，分数]
      *
      */
    val conf = new SparkConf()
      .setAppName("Demo16PageRank")
      .setMaster("local")

    val sc = new SparkContext(conf)


    val student = sc.textFile("spark/data/students.txt")
    val score = sc.textFile("spark/data/score.txt")
    val cource = sc.textFile("spark/data/cource.txt")


    val courceMap = cource.map(line => {
      val cuId = line.split(",")(0)
      (cuId, line)
    })
      //将rdd的数据拉去到Driver端端  转换成一个scala集合
      .collectAsMap()

    //通过广播变量实现map join
    val courceBro = sc.broadcast(courceMap)

    score.map(line => {
      val couMap = courceBro.value

      val split = line.split(",")
      val sId = split(0)
      val cId = split(1)
      val sco = split(2).toInt

      //科目总分
      val cou = couMap.getOrElse(cId, "")
      val couSum = cou.split(",")(2).toInt

      //判断是否及格
      val flag = if (sco >= couSum * 0.6) 1 else 0

      (sId, flag)
    }).reduceByKey((x, y) => x + y)
      .filter(kv => kv._2 == 6)
      .foreach(println)


    /**
      * 4、统计偏科最严重的前100名学生  [学号，姓名，班级，科目，分数]
      *
      */

    score.map(line => {
      val couMap = courceBro.value

      val split = line.split(",")
      val sId = split(0)
      val cId = split(1)
      val sco = split(2).toInt

      //科目总分
      val cou = couMap.getOrElse(cId, "")
      val couSum = cou.split(",")(2).toInt

      //标准化
      val std = sco / couSum.toDouble

      (sId, std)
    }).groupByKey()
      .map(kv => {
        val id = kv._1
        val stds = kv._2.toList

        //计算平均值
        val avg = stds.sum / stds.length

        //计算方差
        val D = stds.map(sco => (sco - avg) * (sco - avg)).sum / stds.length

        (id, D)
      }).sortBy(-_._2) //方差倒序排序
      .foreach(println)


  }
}
