package day02.operator.action

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

/**
 * @Author wsl
 * @Description
 */
object Action {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setAppName("rdd").setMaster("local[*]")
    val sc = new SparkContext(conf)

    val rdd: RDD[Int] = sc.makeRDD(List(1, 1, 2, 3, 4, 4, 4, 5, 5, 6), 2)
    val rdd2: RDD[(String, Int)] = sc.makeRDD(List(("a", 1), ("a", 2), ("a", 3), ("b", 1), ("b", 1), ("c", 1)))
    val rdd3: RDD[String] = sc.makeRDD(List("hello", "hello", "spark", "hadoop", "scala"))
    val rdd4: RDD[String] = sc.textFile("sparkcore/input/1.txt")
    //sum
    println(rdd.reduce(_ + _))

    //以数组形式
    rdd.collect().mkString(",").foreach(print)

    println(rdd.count())

    //wordcount
    println(rdd.countByValue())
    println(rdd3.countByValue())
    println(rdd4.flatMap(_.split(" ")).countByValue())

    println(rdd.first())

    println(rdd.take(2).mkString(","))

    //排序后的后几位
    println(rdd.takeOrdered(4)(Ordering.Int.reverse).reverse.mkString(","))

    println(rdd.aggregate(0)(_ + _, _ + _))
    println(rdd.fold(0)(_ + _))


    rdd2.countByKey().foreach(println)
    rdd2.countByValue().foreach(println)

    //save txt文件
    rdd.saveAsTextFile("/out1")
    //序列化成对象
    rdd.saveAsObjectFile("/out2")
    rdd.map((_, 1)).saveAsSequenceFile("/out3")

    sc.stop()
  }

}
