package com.example

import org.apache.spark.SparkConf
import org.apache.spark.broadcast.Broadcast
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.ConstantInputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}

object BlackListFilterTest {
  var conf: SparkConf = new SparkConf()
    .setAppName(this.getClass.getCanonicalName)
    .setMaster("local[*]")

  val ssc = new StreamingContext(conf, Seconds(10))

  def main(args: Array[String]): Unit = {

    ssc.sparkContext.setLogLevel("WARN")
    method1()

  }

  // 使用外连接
  def method1() = {
    // 黑名单数据
    val blackList = Array(("spark", true), ("scala", true))
    val blackListRDD = ssc.sparkContext.makeRDD(blackList)
    // 生成测试DStream。使用ConstantInputDStream
    val strArray: Array[String] = "spark java scala hadoop kafka hive hbase zookeeper"
      .split("\\s+")
      .zipWithIndex
      .map { case (word, idx) => s"$idx $word" }
    val rdd = ssc.sparkContext.makeRDD(strArray)
    val clickStream = new ConstantInputDStream(ssc, rdd)

    // 流式数据的处理
    val clickStreamFormatted = clickStream.map(value =>
      (value.split(" ")(1), value))
    clickStreamFormatted.transform(clickRDD => {
      // 通过leftOuterJoin操作既保留了左侧RDD的所有内容，又获得了内容是否在黑名单中
      val joinedBlackListRDD: RDD[(String, (String,
        Option[Boolean]))] = clickRDD.leftOuterJoin(blackListRDD)
      joinedBlackListRDD.filter { case (word, (streamingLine, flag)) =>
        if (flag.getOrElse(false)) false
        else true
      }.map {
        case (word, (streamingLine, flag)) => streamingLine
      }
    }).print()

    // 启动流式作业
    ssc.start()
    ssc.awaitTermination()
  }

  def method2() = {

    val blackList = Array(("spark", true), ("scala", true))
    val blackListRDD = ssc.sparkContext.makeRDD(blackList)


    // 生成测试DStream。使用ConstantInputDStream
    val strArray: Array[String] = "spark java scala hadoop kafka hive hbase zookeeper"
      .split("\\s+")
      .zipWithIndex
      .map { case (word, idx) => s"$idx $word" }
    val rdd = ssc.sparkContext.makeRDD(strArray)
    val clickStream = new ConstantInputDStream(ssc, rdd)
    // 流式数据的处理
    val clickStreamFormatted = clickStream.map(value =>
      (value.split(" ")(1), value))
    clickStreamFormatted.transform { clickRDD =>
      val spark = SparkSession
        .builder()
        .config(rdd.sparkContext.getConf)
        .getOrCreate()
      import spark.implicits._
      val clickDF: DataFrame = clickRDD.toDF("word", "line")
      val blackDF: DataFrame = blackListRDD.toDF("word", "flag")
      clickDF.join(blackDF, Seq("word"), "left")
        .filter("flag is null or flag = false")
        .select("line")
        .rdd
    }.print()
    // 启动流式作业
    ssc.start()
    ssc.awaitTermination()

  }

  // 直接过滤
  def method3() = {
    // 黑名单数据
    val blackList = Array(("spark", true), ("scala", true))
    val blackListBC: Broadcast[Array[String]] =
      ssc.sparkContext.broadcast(blackList.filter(_._2).map(_._1))
    // 生成测试DStream。使用ConstantInputDStream
    val strArray: Array[String] = "spark java scala hadoop kafka hive hbase zookeeper"
      .split("\\s+")
      .zipWithIndex
      .map { case (word, idx) => s"$idx $word" }
    val rdd = ssc.sparkContext.makeRDD(strArray)
    val clickStream = new ConstantInputDStream(ssc, rdd)
    // 流式数据的处理
    clickStream.map(value => (value.split(" ")(1), value))
      .filter { case (word, _) =>
        !blackListBC.value.contains(word)
      }
      .map(_._2)
      .print()
    // 启动流式作业
    ssc.start()
    ssc.awaitTermination()
  }

}
