package com.learn.lb.spark.streaming

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
 * transform算子实战
 * 假设有日志数据：20190909,lisi
 * 根据黑名单过滤数据
 * @author laibo
 * @since 2019/9/9 21:24
 *
 */
object TransformDemo {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    //5秒接受一次数据
    val ssc = new StreamingContext(conf, Seconds(5))
    //返回的是InputDStream
    val lines = ssc.socketTextStream("master", 9999)
    //构建黑名单Rdd
    val blackRdd = ssc.sparkContext.parallelize(List("zs", "lisi")).map((_, true))
    val logDs = lines.map{m=>
      val logs = m.split(",")
      (logs(1), m)
    }.transform{rdd =>
      rdd.leftOuterJoin(blackRdd)
        .filter(!_._2._2.getOrElse(false))
        .map(_._2._1)
    }
    logDs.print()
    ssc.start()
    ssc.awaitTermination()
  }
}
