package com.bw.sparkstreaming.job6

import org.apache.log4j.{Level, Logger}
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.{SparkConf, SparkContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object BlackWordCount {
  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.ERROR)

    val sparkConf = new SparkConf().setAppName("BlackWordCount").setMaster("local[2]")
    val sc = new SparkContext(sparkConf)
    val ssc = new StreamingContext(sc, Seconds(2))

    val lines = ssc.socketTextStream("localhost", 9998)
    val words = lines.flatMap(_.split(","))
    val wordsDStream = words.map(x => (x, 1))

    /**
     * 首先要获取到黑名单，我们有可能是从Mysql，Redis里面去获取。
     *
     * &
     */
    val filterRDD: RDD[(String, Boolean)] = ssc.sparkContext.parallelize(List("$","?","!")).map((_,true))
    //广播黑名单数据
    val filterBroadBast = ssc.sparkContext.broadcast(filterRDD.collect())
    //mapRDD
    val filterResultRDD: DStream[(String, Int)] = wordsDStream.transform(rdd => {
      val filterRDD = ssc.sparkContext.parallelize(filterBroadBast.value)
      //RDD join  filterBroadBast  如果join不上的数据是不是就是我们需要的数据？
      /**
       *
       * (String(key), (Int(1), Option[Boolean])) 如果这个option没值
       */
      val result: RDD[(String, (Int, Option[Boolean]))] = rdd.leftOuterJoin(filterRDD)
      val joinResult = result.filter(tuple => {
        tuple._2._2.isEmpty //过滤出来我们需要的数据
      })
      //在Scala里面最后一行就是方法的返回值
      joinResult.map(tuple => (tuple._1, tuple._2._1)) //hadoop,1
    })

    val result = filterResultRDD.reduceByKey(_+_)

    result.print()

    ssc.start()
    ssc.awaitTermination()
    ssc.stop()

  }

}
