package com.jinghang.streaming_base

import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 黑名单过滤
  *
  * 20190808,zs
  * 20190808,ls
  * 20190808,ww
  */
object _22_TransformApp {


  def main(args: Array[String]): Unit = {

    val sparkConf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")

    /**
      * 创建StreamingContext需要两个参数：SparkConf和batch interval
      */
    val ssc = new StreamingContext(sparkConf, Seconds(3))
    ssc.sparkContext.setLogLevel("ERROR")




    val ds1 = ssc.socketTextStream("hadoop000", 6789)

    /**
      * 构建黑名单
      */
    val blacks = List("zs", "ls")

    val blacksRDD = ssc
      .sparkContext
      .parallelize(blacks)
      .map(x => (x, true))//zs，ls为黑名单：(zs, true)(ls,true)

    val str = "20190808,zs"
    str.split(",") //["20190808","zs"]

    val ds2 =
      ds1
        .map(x => (x.split(",")(1), x)) //("zs","20190808,zs") 黑名单：(zs, true)(ls,true)
        .transform(rdd => {

        rdd.leftOuterJoin(blacksRDD) //(zs , [<20190808,zs>, <true>]) (lisi , [<20190808,ww>, <true>]) ,(ww , [<20190808,ww>, <null>])
          .filter(x => x._2._2.getOrElse(false) != true) //(ww , [<20190808,ww>, null])
          .map(x => x._2._1)
      })


    ds2.print()

    ssc.start()
    ssc.awaitTermination()
  }
}
