package com.shujia.core

import org.apache.spark.{SparkConf, SparkContext}

object Demo10Student {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setAppName("app").setMaster("local")
    val sc = new SparkContext(conf)


    val scoreRDD = sc.textFile("data/score.txt")
    val studentRDD = sc.textFile("data/student.txt")

    /**
      * 3、计算偏科最严重的前100名学生
      *
      */

    //1、计算方差

    val sRDD = scoreRDD.map(line => {
      val split = line.split(",")
      val s_id = split(0)
      val score = split(2).toInt
      (s_id, score)
    })

    //计算平均分
    val avgRDD = sRDD
      .map(x => (x._1, (x._2, 1.0)))
      .reduceByKey((x, y) => (x._1 + y._1, x._2 + y._2)) //总分和科目数
      .map(x => (x._1, x._2._1 / x._2._2)) //平均分

    //计算方差
    val top100 = sRDD.join(avgRDD).map(kv => {
      val s_id = kv._1
      val score = kv._2._1
      val avg = kv._2._2

      (s_id, (avg - score) * (avg - score))
    }).reduceByKey(_ + _) //方差
      .sortBy(-_._2)
      .take(100) //取出100个学生   take是一个action算子
      .map(_._1)
      .toSet //set  判断一个元素是否存在的速度很快


    sRDD
      .filter(kv => top100.contains(kv._1)) //过滤出100个学生
      .foreach(println)

  }

}
