package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo19Student {
  def main(args: Array[String]): Unit = {


    val conf = new SparkConf()

    conf.setAppName("student")

    conf.setMaster("local")

    val sc = new SparkContext(conf)

    /**
      * 4、统计偏科最严重的前100名学生
      *
      * 通过方差评估偏科程度
      *
      * 1、计算方差
      * 2、计算每个人多个科目分数的平均值
      *
      *
      */


    //1、读取数据
    val scoresRDD: RDD[String] = sc.textFile("data/score.txt")

    //整理数据
    val kvRDD: RDD[(String, String, Double)] = scoresRDD
      //拆分数据
      .map(_.split(","))
      //过滤脏数据
      .filter(_.length == 3)
      .map {
        case Array(sId: String, cId: String, sco: String) =>
          (sId, cId, sco.toDouble)
      }

    //3、按照学号进行分组
    val groupByRDD: RDD[(String, Iterable[(String, String, Double)])] = kvRDD.groupBy(_._1)


    //计算方差
    val stdRDD: RDD[(String, Double)] = groupByRDD.map {
      case (id: String, iter: Iterable[(String, String, Double)]) =>
        //取出分数
        val scores: List[Double] = iter.map(_._3).toList
        /**
          * 计算方差
          * 1、计算平均值
          * 2、套公式算方差
          */
        val avg: Double = scores.sum / scores.length

        val std: Double = scores.map(i => (i - avg) * (i - avg)).sum / scores.length

        (id, std)
    }


    //按照方差排序
    val sortByRDD: RDD[(String, Double)] = stdRDD.sortBy(_._2, ascending = false)

    //偏科最严重的前100名学生
    val top: Array[(String, Double)] = sortByRDD.take(100)

    top.foreach(println)
  }

}
