package com.zhaosc.spark.stream

import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.Seconds

object TransformOperator {

  def main(args: Array[String]): Unit = {
    val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount").set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    val ssc = new StreamingContext(conf, Seconds(5)) //5秒处理一次消息

    val topic = Map("test" -> 1);
    // 这里的日志格式，就简化一下，就是date username的方式
    val kafkaStream = KafkaUtils.createStream(ssc, "localhost:2181", "MyFirstConsumerGroup", topic, StorageLevel.MEMORY_AND_DISK);
    // 先做一份模拟的黑名单RDD
    val blacklist = List("tom","zsc","zhaosc","zhaoshichao");

    val blackListBroadCast = ssc.sparkContext.broadcast(blacklist);

    val dstream = kafkaStream.map(v => {
      Tuple2(v._2.split(" ")(1), v._2)
    })
    //使用transform将DStream里面的RDD抽取出来后，调用了RDD的action类算子
    val result = dstream.transform(rdd => {
      rdd.filter(v => {
        !blackListBroadCast.value.contains(v._1)
      }).map(_._2)
    })

    result.print();
    ssc.start();
    ssc.awaitTermination();
  }
}