package com.hw

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo6PainKeStu {
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("Filter")
    val sc = new SparkContext(conf)

    val scoreRDD: RDD[String] = sc.textFile("data/score.txt")

    /**
     * 1.计算出每个学生的平均分并加载到scoreRDD中
     * 2.计算出每个学生的科目数
     */
    val mapRDD: RDD[(String, Int)] = scoreRDD.map(
      line => {
        val split: Array[String] = line.split(",")
        (split(0), split(2).toInt)
      }
    )
    // 根据id分组
    val groupByRDD: RDD[(String, Iterable[(String, Int)])] = mapRDD.groupBy(
      kv => kv._1
    )

    // 获取迭代器长度作为科目数
    val countRDD: RDD[(String, Int)] = groupByRDD.map(
      kv => (kv._1, kv._2.size)
    )

    // 累加所有分数得到但学生总分
    val sumScoreRDD: RDD[(String, Int)] = mapRDD.reduceByKey((x, y) => x + y)

    // 三个表关联
    val joinRDD: RDD[(String, ((Int, Int), Int))] = mapRDD.join(countRDD).join(sumScoreRDD)


    val resList: Array[(String, Int, Int)] = joinRDD.map {
      case (id: String, ((score: Int, count: Int), sumScore: Int)) =>
        //计算方差
        val pf: Int = (score - (sumScore / count)) * (score - (sumScore / count))
        (id, score, count, sumScore, sumScore / count, pf)
    }.map {
        //整理数据 =>kv格式
      case (id: String, score: Int, count: Int, sumScore: Int, sumCount: Int, pf: Int) =>
        (id, (score, pf))
        //对方差数据进行累加
    }.reduceByKey((x, y) => (x._1, x._2 + y._2)).map {
        //匹配数据，整理数据
      case (id: String, (score: Int, pf: Int)) =>
        (id, score, pf)
    }.sortBy(_._3, false).take(100) //倒序排序，并取出前100行

    resList.foreach(println)

//    resId.foreach(println)

//    val resRDD: RDD[(String, Int)] = sumScoreRDD.filter(
//      kv => resId.contains(kv._1)
//    )
//    resRDD.foreach(println)

  }

}
