package com.qingguo.homework

import org.apache.spark.{SparkConf, SparkContext}

object student3 {
  def main(args: Array[String]): Unit = {

    /**
      * 3、统计每科都及格的学生 [学号，姓名，班级，科目，分数]
      */

    //创建spark任务
    val conf = new SparkConf()
    conf.setAppName("student3").setMaster("local")
    val sc = new SparkContext(conf)

    val score = sc.textFile("Spark/data/score.txt")

    val scoreKVRDD = score.map(line => {
      val split = line.split(",")
      val id = split(0)
      val kmbh = split(1)
      val score = split(2)
      (kmbh, id + "_" + score)
    })

    val cource = sc.textFile("Spark/data/cource.txt")

    val courceKVRDD = cource.map(line => {
      val split = line.split(",")
      val kmbh = split(0)
      val kmm = split(1)
      val maxscore = split(2)
      (kmbh, kmm + "_" + maxscore)
    })

    //关联maxscore
    val joinRDD = courceKVRDD
      .join(scoreKVRDD)
      .map(line => {
        val kmm = line._2._1.split("_")(0)
        val maxscore = line._2._1.split("_")(1)
        val id = line._2._2.split("_")(0)
        val score = line._2._2.split("_")(1)
        (id, kmm + "_" + score + "_" + maxscore)
      })

    //归一化，统一及格门数(应等于6)
    joinRDD
      .map(line => {
        val id = line._1
        val score = line._2.split("_")(1)
        val maxscore = line._2.split("_")(2)
        val bfb = score.toDouble / maxscore.toDouble
        (id, bfb)
      })
      .filter(_._2 >= 0.6)
      .map(line => {
        (line._1, 1)
      })
      .reduceByKey(_ + _)
      .filter(_._2 == 6)
      .foreach(println)



  }

}
