package com.shujia.spark

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo22Student {
  def main(args: Array[String]): Unit = {

    /**
      * 4、统计偏科最严重的前100名学生  [学号，姓名，班级，科目，分数
      * 偏科程度判断依据：通过方差判断偏科程度
      *
      *
      */
    val conf: SparkConf = new SparkConf()
      .setAppName("student")
      .setMaster("local[4]")

    val sc = new SparkContext(conf)


    //读取分数表
    val scores: RDD[String] = sc.textFile("spark/data/score.txt")


    //科目表
    val cource: RDD[String] = sc.textFile("spark/data/cource.txt")

    val kvCource: RDD[(String, Double)] = cource
      .map(_.split(","))
      .map(arr => (arr(0), arr(2).toDouble))


    //key 是科目编号，value是科目总分
    val courceMap: collection.Map[String, Double] = kvCource.collectAsMap()

    //广播集合
    val bro: Broadcast[collection.Map[String, Double]] = sc.broadcast(courceMap)

    /**
      * map join  , 将小表加载到内存在map端进行表关联
      * 限制： 不能用于两大表的关联，
      *
      */

    //关联科目表
    val scoreCou: RDD[(String, Double, Double)] = scores.map(line => {
      val split: Array[String] = line.split(",")
      val id: String = split(0)
      val cId: String = split(1)
      val sco: Double = split(2).toDouble

      //通过科目编号获取科目总分
      val sumScore: Double = bro.value.getOrElse(cId, 100)

      (id, sco, sumScore)
    })

    //对分数进行归一化。将不同范围的分数拉到同一个级别上
    val oneScore: RDD[(String, Double)] = scoreCou.map {
      case (id: String, sco: Double, sumScsore: Double) =>
        (id, sco / sumScsore)

    }

    //计算学生的平均值
    val groupRDD: RDD[(String, Iterable[Double])] = oneScore.groupByKey()

    val avgSco: RDD[(String, Double)] = groupRDD.map {
      case (id: String, scos: Iterable[Double]) =>
        (id, scos.sum / 6)
    }


    val joinRDD: RDD[(String, (Double, Double))] = oneScore.join(avgSco)

    //计算分数和平均分的差值
    val chapingRDD: RDD[(String, Double)] = joinRDD.map {
      case (id: String, (sco: Double, acgSc: Double)) =>
        (id, (sco - acgSc) * (sco - acgSc))
    }

    //差值的和
    val fangRDD: RDD[(String, Double)] = chapingRDD.reduceByKey(_ + _)


    //按照方差降序排名
    val sortfangRDD: RDD[(String, Double)] = fangRDD.sortBy(_._2, ascending = false)


    //取出前100
    val top100Id: Array[String] = sortfangRDD.take(100).map(_._1)


    //取出这些学生的分数
    val filterScore: RDD[String] = scores.filter(line => top100Id.contains(line.split(",")(0)))



    filterScore.foreach(println)

  }

}
