package com.gin.spark

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object L01_RddApi {

  def main(args: Array[String]): Unit = {
    //加载配置 获取spark上下文对象
    val conf = new SparkConf().setMaster("local").setAppName("L01")
    val sc = new SparkContext(conf)

    //数据源
    val dataRDD: RDD[Int] = sc.parallelize(List(1, 2, 3, 4, 5, 4, 3, 2, 1))

    //数据处理
    //过滤
    //完整写法
    //dataRDD.filter((x:Int)=>{x > 3})
    //精简写法
    val filterRDD: RDD[Int] = dataRDD.filter(_ > 3)
    val filterRes: Array[Int] = filterRDD.collect()
    filterRes.foreach(println)
    println("-------- --------")

    //利用 reduceByKey 进行去重(不关注reduce结果,只在汇聚后取key)
    //完整写法
    val reduceByKeyRes: RDD[Int] = dataRDD.map((x: Int) => {
      new Tuple2(x, 1)
    })
      //(x:oldValue  y:currentValue), 实际不关心累加结果值, 只需要获取key值
      .reduceByKey((x, y) => x + y)
      .map((x: Tuple2[Int, Int]) => {
        //取key值
        x._1
      })
    //简写
    //val reduceByKeyRes: RDD[Int] = dataRDD.map((_,1)).reduceByKey(_+_).map(_._1)
    reduceByKeyRes.foreach(println)
    println("-------- --------")

    //面向数据集开发: RDD (HadoopRDD MappartitionsRDD ShuffledRDD...)
    //1.基础API: map flatmap filter...
    //2.复合API: distinct reduceByKey combineByKey...
    //利用distinct api去重
    val distinctRes: RDD[Int] = dataRDD.distinct()
    distinctRes.foreach(println)
    println("-------- --------")




  }

}
