package com.shujia.spark.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Demo21Student {
  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
    conf.setMaster("local")
    conf.setAppName("student")
    val sc = new SparkContext(conf)


    /**
      *
      * 3、统计每科都及格的学生
      */

    //1、读取分数表
    val scoreRDD: RDD[String] = sc.textFile("data/score.txt")

    //2、读取科目表
    val subjectRDD: RDD[String] = sc.textFile("data/subject.txt")


    //将数据转换成kv格式
    val scoreKVRDD: RDD[(String, (String, Int))] = scoreRDD
      .map(_.split(","))
      .filter(_.length == 3)
      .map {
        case Array(sid: String, cid: String, sco: String) =>
          (cid, (sid, sco.toInt))
      }

    val subjectKVRDD: RDD[(String, Int)] = subjectRDD
      .map(_.split(","))
      .filter(_.length == 3)
      .map {
        case Array(cid: String, _: String, sumSco: String) =>
          (cid, sumSco.toInt)
      }


    //关联学生表和分数表
    val joinRDD: RDD[(String, (Int, (String, Int)))] = subjectKVRDD.join(scoreKVRDD)

    //整理数据
    val comRDD: RDD[(String, String, Int, Int)] = joinRDD.map {
      case (cid: String, (sumSco: Int, (sid: String, sco: Int))) =>
        (sid, cid, sumSco, sco)
    }


    //取出所有及格的分数
    val filterRDD: RDD[(String, String, Int, Int)] = comRDD.filter {
      case (_: String, _: String, sumSco: Int, sco: Int) =>
        sco >= sumSco * 0.6
    }

    //统计每个学生及格的科目数量
    val groupByRDD: RDD[(String, Iterable[(String, String, Int, Int)])] = filterRDD.groupBy(_._1)
    val resultRDD: RDD[(String, Iterable[(String, String, Int, Int)])] = groupByRDD.filter(_._2.size == 6)

    //将数据展开
    val doujigeRDD: RDD[(String, String, Int, Int)] = resultRDD.flatMap(kv => kv._2)

    doujigeRDD.foreach(println)
  }

}
