package com.offcn.bigdata.spark.p1.p2

import com.offcn.bigdata.spark.p2._02CombineByKeyOps._
import org.apache.hadoop.io.Text
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat
import org.apache.spark.{SparkConf, SparkContext}

/**
  * action的行动算子操作
  *   count 计算当前rdd中的数据条数
  *   foreach 遍历
  *   take
  *   --------------------
  *   collect
  *   reduce
  *   countByKey
  *   saveAsXxx
  *   foreachPartition
  */
object _04ActionOps {
    def main(args: Array[String]): Unit = {
        val conf = new SparkConf()
            .setMaster("local[*]")
            .setAppName(s"${_03AggregateBykeyOps.getClass.getSimpleName}")
        val sc = new SparkContext(conf)
        val infos = sc.parallelize(List(
            Student("郭雪磊", 18, "山东"),
            Student("单 松", 20, "山东"),
            Student("刘宇航", 18, "河北"),
            Student("王健", 18, "河南"),
            Student("许迎港", 18, "河北"),
            Student("元永劫", 18, "黑龙江"),
            Student("林博", 18, "黑龙江"),
            Student("李佳奥", 18, "河南"),
            Student("冯世明", 18, "黑龙江"),
            Student("肖楚轩", 18, "山东"),
            Student("张皓", 18, "河南"),
            Student("冯岩", 18, "黑龙江")
        ), 2)
        /*
            collect 就是讲rdd中的数据从executor端拉取到driver端
                所以在执行该算子的时候需要注意，又能因为数据过大，造成driver的OOM(OutOfMemory)
                所以尽量在collect之前先filter过滤或者take
          */
        println("----------collect-----------")
        infos.collect().foreach(println)
        println("----------reduce-----------")
        /*
            reduceByKey是一个transformation操作，而reduce是一个action操作
         */
        val rdd = sc.parallelize(1 to 100)
        val sum = rdd.reduce(_+_)
        println("sum: " + sum)
        println("----------countByKey-----------")
        val p2Info = infos.map(stu => (stu.province, stu))
        val p2Counts= p2Info.countByKey()
        p2Counts.foreach{case (province, count) => {
            println(s"省份：${province}对应的人数为：${count}")
        }}
        println("----------saveXxx-----------")
//        p2Info.saveAsTextFile("file:/E:/data/out/spark/wc")
//        p2Info.
//            saveAsObjectFile("file:/E:/data/out/spark/obj")
        /*
            path: String,
          keyClass: Class[_],
          valueClass: Class[_],
          outputFormatClass: Class[_ <: NewOutputFormat[_, _]]
         */
        p2Info.saveAsNewAPIHadoopFile(
            "file:/E:/data/out/spark/hadoop",
            classOf[Text],
            classOf[Student],
            classOf[TextOutputFormat[Text, Student]]
        )


        sc.stop
    }
}
