package com.bigdata.core.example

import org.apache.spark.rdd.RDD
import org.apache.spark.{SparkConf, SparkContext}

object PipelineTest {
  //ABC
  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local").setAppName("pipelineTest")
    val sc = new SparkContext(conf)
    sc.setLogLevel("error")

    val names: RDD[String] = sc.parallelize(Array[String]("zhangsan", "lisi", "wangwu"), 2)

    // 把窄依赖的没有shuffle的算子进行逻辑上的拼接，然后在一个Task中执行
    val trans1: RDD[String] = names.filter(s => {
      println("*****filter*****" + s)
      true
    })

    val trans2: RDD[String] = trans1.map(s => {
      println("######## map ##########" + s)
      s + "#"
    })

    val disRDD: RDD[String] = trans2.distinct(3)

    val result: RDD[(String, Int)] = disRDD.map(key => {
      println("stage 2..................")
      (key, 1)
    })

    result.foreach(println)

  }
}