package com.shengzai

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable.ListBuffer
import scala.io.{BufferedSource, Source}

//4.统计每科都及格的人数
object Code40Example04 {
  def main(args: Array[String]): Unit = {

    //读取文件并转换成列表
    val scoreList: List[String] = Source.fromFile("hadoop_code/src/data/score.txt").getLines().toList
    val subList: List[String] = Source.fromFile("data/subject.txt").getLines().toList

    //整理分数表
    val mapScore: List[(String, String, Int)] = scoreList.map(
      line => {
        val scoreSplit: Array[String] = line.split(",")
        (scoreSplit(0), scoreSplit(1), scoreSplit(2).toInt)
      }
    )

    //整理科目表
    val mapSub: List[(String, Int)] = subList.map(
      line => {
        val split: Array[String] = line.split(",")
        (split(0), split(2).toInt)
      }
    )

    //新建一个listBuffer，用于存储符合条件数据
    val listBuffer = new ListBuffer[(String,Int,Int)]()

    for (ele <- mapScore) {
      for (elem <- mapSub) {
        //如果科目编号相同，那么就添加进listBuffer
        if (ele._2.equals(elem._1)){
          listBuffer.append((ele._1,ele._3,elem._2))
        }
      }
    }

    // 过滤掉不及格的科目
    val filterList: ListBuffer[(String, Int, Int)] = listBuffer.filter(
      line => {
         line._2 >=line._3 *0.6
      }
    )

    //根据学生编号来聚合
    val groupBy: Map[String, ListBuffer[(String, Int, Int)]] = filterList.groupBy(
      line => {
        line._1
      }
    )

    //过滤掉没有6门及格的学生
    val res: Map[String, ListBuffer[(String, Int, Int)]] = groupBy.filter(
      line => {
        line._2.size == 6
      }
    )
    println(res.size)

  }

}

