package com.darrenchan.spark

import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * DStream和RDD交互
  */
object TransformApp {
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("TransformApp")
    /**
      * 创建StreamingContext需要两个参数：SparkConf和batch interval
      */
    val ssc = new StreamingContext(sparkConf, Seconds(5))

    // 构造黑名单
    val blackList = List("wyf", "sjj")
    val blackListRDD = ssc.sparkContext.parallelize(blackList).map((_, true)).cache() //防止一直读，加缓存

    val lines = ssc.socketTextStream("hadoop000", 6789)

    val result = lines.map(x => (x.split(",")(1), x)).transform( rdd =>
      rdd.leftOuterJoin(blackListRDD). // (K, V) and (K, W), returns a dataset of (K, (V, W))
        filter((_._2._2.getOrElse(false) != true)).
        map((_._2._1))
    )

    result.print()

    ssc.start()
    ssc.awaitTermination()
  }
}
