package com.sparkStreaming.demo

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 黑名单过滤
  */
object _22_TransformApp {


  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")

    /**
      * 创建StreamingContext需要两个参数：SparkConf和batch interval
      */
    val ssc = new StreamingContext(sparkConf, Seconds(5))


    /**
      * 构建黑名单
      */
    val blacks = List("zs", "ls")
    val blacksRDD = ssc.sparkContext.parallelize(blacks).map(x => (x, true))

    val ds1 = ssc.socketTextStream("hadoop000", 6789)
    /*

        20190808,zs
        20190808,ls
        20190808,ww


        ("zs", true)("ls",true)
     */

    val str = "20190808,zs"
    str.split(",") //["20190808","zs"]

    val ds2 =
      ds1
        .map(x => (x.split(",")(1), x)) //("zs","20190808,zs")
        .transform(rdd => {

        rdd.leftOuterJoin(blacksRDD) //(zs , [<20190808,zs>, <true>]) (lisi , [<20190808,ww>, <true>]) ,(ww , [<20190808,ww>, <null>])
          .filter(x => x._2._2.getOrElse(false) != true) //(ww , [<20190808,ww>, null])
          .map(x => x._2._1)


      })


    ds2.print()

    ssc.start()
    ssc.awaitTermination()
  }
}
