package sparkstreaming.eighteenthday1.lesson5

import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, ReceiverInputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

/**
  *  transform 将 DStream 转化为 RDD 进行 细粒度的操作
  */
object WordBlacklist {
  def main(args: Array[String]): Unit = {
    val conf: SparkConf = new SparkConf().setMaster("local[*]").setAppName(s"${this.getClass.getSimpleName}")
    val sc: SparkContext = new SparkContext(conf)
    val ssc: StreamingContext = new StreamingContext(sc, Seconds(2))


    /**
      * 黑名单过滤
      * 一般来自mysql 或者 hbase 或者 redis中
      */
    val rdd: RDD[String] = ssc.sparkContext.parallelize(List("?", "*", "!"))
    val wordBlackListRDD: RDD[(String, Boolean)] = rdd.map {
      case param => {
        (param, true)
      }
    }

    val blackList: Array[(String, Boolean)] = wordBlackListRDD.collect()

    val blackListBroadcast: Broadcast[Array[(String, Boolean)]] = ssc.sparkContext.broadcast(blackList)

    val dstream: ReceiverInputDStream[String] = ssc.socketTextStream("hadoop01", 9999)

    val wordOneDStream: DStream[(String, Int)] = dstream.flatMap {
      case line => {
        line.split(" ")
      }
    }.map((_, 1))

    wordOneDStream.transform(rdd =>{
      val filterRDD: RDD[(String, Boolean)] = rdd.sparkContext.parallelize(blackListBroadcast.value)
      val joinRDD: RDD[(String, (Int, Option[Boolean]))] = rdd.leftOuterJoin(filterRDD)
      joinRDD.filter(tuple =>{
        tuple._2._2.isEmpty
      })
    }).map(tuple =>{
      (tuple._1, tuple._2._1)
    }).reduceByKey(_+_).print()


    ssc.start()
    ssc.awaitTermination()
    ssc.stop()
  }

}
