package SparkRDD.RDD算子.测验

import org.apache.commons.lang.StringUtils
import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}
import org.junit

class Test {

  val conf = new SparkConf().setMaster("local[6]").setAppName("PMTest")
  val sc   = new SparkContext(conf)

  @junit.Test
  def PMTest: Unit ={

    // 读取数据
    val data: RDD[String] = sc.textFile("src/main/scala/Rdd算子/测验/fix_1.csv")

    // 处理数据
    //  1.抽取列
    val dataSplit: RDD[((String, String), String)] = data.map(item =>
      ((item.split(",")(26),item.split(",")(27)),item.split(",")(9)))
    //dataSplit.foreach(println(_))

    //  2.清洗
    //     2.1 过虑空的、nan
    val dataClean: RDD[((String, String), String)] = dataSplit.filter( item => StringUtils.isNotEmpty(item._2) && !item._2.equalsIgnoreCase("NA"))
    //dataClean.foreach(println(_))
    //     2.2 转换数据类型
    val dataInt: RDD[((String, String), Int)] = dataClean.map(item => ( item._1 , item._2.toInt) )
    //dataInt.foreach(println(_))

    //  3.聚合
    val dataReduce: RDD[((String, String), Int)] = dataInt.reduceByKey((curr, agg) => curr + agg )
    //dataReduce.foreach(println(_))

    //  4.排序
    val dataSort: RDD[((String, String), Int)] = dataReduce.sortBy(item => item._2,ascending = false)
    dataSort.take(48).foreach(println(_))


    //  5.关闭sc
    sc.stop()

  }

}
