package com.shujia.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.rdd.RDD

object Demo21Student {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf()
      .setAppName("student")
      .setMaster("local[4]")

    val sc = new SparkContext(conf)


    /**
      *
      *
      * 3、统计每科都及格的学生 [学号，姓名，班级，科目，分数]
      * 1、将所有都及格的分数和对应的学号取出来
      * 2、统计每一个及格科目数
      * 3、过滤没有都及格的学生
      * 4、整理数据
      *
      *
      */


    //1、读取数据
    val scores: RDD[String] = sc.textFile("spark/data/score.txt")

    val kvScore: RDD[(String, (String, Double))] = scores.map(line => {
      val split: Array[String] = line.split(",")
      val id: String = split(0)
      val cId: String = split(1)
      val sco: Double = split(2).toDouble
      (cId, (id, sco))
    })


    //读取科目表
    val cource: RDD[String] = sc.textFile("spark/data/cource.txt")

    val kvCource: RDD[(String, Double)] = cource.map(line => {
      val split: Array[String] = line.split(",")
      val id: String = split(0)
      val sumScore: Double = split(2).toDouble

      (id, sumScore)
    })


    //关联分数表和科目表
    val joinRDD: RDD[(String, ((String, Double), Double))] = kvScore.join(kvCource)

    //过滤不及格的分数
    val filterRDD: RDD[(String, ((String, Double), Double))] = joinRDD.filter {
      //没有使用到的列可以用下划线代替
      case (_: String, ((_: String, sco: Double), sumScore: Double)) =>
        sco >= sumScore * 0.6

    }

    val idRDD: RDD[(String, Int)] = filterRDD.map {
      case (_: String, ((id: String, _: Double), _: Double)) => {
        (id, 1)
      }
    }

    //统计每个学生及格的科目数
    val value: RDD[(String, Int)] = idRDD.reduceByKey(_ + _)

    val filterStudent: RDD[(String, Int)] = value.filter(_._2 == 6)


    val ids: Array[String] = filterStudent.map(_._1).collect()

    //整理数据
    val studentScore: RDD[String] = scores.filter(line => ids.contains(line.split(",")(0)))

    studentScore.foreach(println)


  }
}
