package com.shujia.core

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object Code10Filter {
  def main(args: Array[String]): Unit = {
    /**
     * Filter:
     *    可以对数据进行做过滤操作，要求给定判断条件
     *
     */


    val sc = new SparkContext(new SparkConf().setMaster("local").setAppName("Mysql2Text"))

    val groupByRDD: RDD[(String, Iterable[GroupByScore])] = sc
      .textFile("spark_code/data/score.txt", 4)
      .map {
        case oneLine => {
          val splitRes: Array[String] = oneLine.split(",")
          GroupByScore(splitRes(0), splitRes(1), splitRes(2).toInt)
        }
      }
      .groupBy(_.id, 2)
    println(groupByRDD.getNumPartitions)

    val groupByIDRDD: RDD[(String, Int)] = groupByRDD
      .map {
        case (id, groupByScoreIter) => {
          println("map正在执行..")
          (id, groupByScoreIter.map(_.score).sum)
        }
      }
    groupByIDRDD
      .filter {
        case (id, total_score) => {
          total_score >= 450
        }
      }
      .saveAsTextFile("spark_code/output/score/more450")

    groupByIDRDD
      .filter {
        case (id, total_score) => {
          total_score < 450
        }
      }
      .saveAsTextFile("spark_code/output/score/less450")

//    while (true) {
//
//    }
  }
}


