package day09.producer_and_consumer

import org.apache.spark.SparkConf
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Minutes, Seconds, StreamingContext}

object consumerDemo {
  /**
    * 模拟kafka消费者，通过receive方式读取生产者生产的数据
    *     (0)设置一下用户，否则没权限不让checkpoint：System.setProperty("HADOOP_USER_NAME", "root")
    *     (1)用Array(zkQuorum, group,topics:：可以放多个topic并且以“,”号分隔，numThreads：消费的线程数) =  args封装传递过来的信息zk的brokers信息 和  topic名
          (2)ssc上下文，检查点。用receive方式读取数据：KafkaUtils.createStream(ssc,zkQuorum,组，与消费的线程数map之后的topMap).map(_._2)
    * *   (3)处理数据，打印。
    *
    */
  def main(args: Array[String]): Unit = {
    System.setProperty("HADOOP_USER_NAME", "root")
    // zkQuorum：zk列表，group：group id，topics：可以放多个topic并且以“,”号分隔，numThreads：消费的线程数
    if (args.length < 4) {
      System.err.println("Usage: KafkaWordCount <zkQuorum> <group> <topics> <numThreads>")
      System.exit(1)
    }
    val conf: SparkConf = new SparkConf().setAppName("zz").setMaster("local[2]")
    val ssc = new StreamingContext(conf, Seconds(2))
    ssc.checkpoint("hdfs://master:9000/cp-2020-7-17")

    //args: master:2181,slave1:2181,slave2:2181 group01 mykafkawc 2
    val Array(zkQuorum, group, topics, numThreads) = args

    val topicMap: Map[String, Int] = topics.split(",").map((_,numThreads.toInt)).toMap
    val lines: DStream[String] = KafkaUtils.createStream(ssc,zkQuorum,group,topicMap,StorageLevel.MEMORY_AND_DISK).map((_._2))
    val wordCounts: DStream[(String, Long)] = lines.flatMap(_.split(" ")).map((_,1L)).reduceByKeyAndWindow(_+_, _-_, Minutes(10),Seconds(2),2)

    wordCounts.print()
    ssc.start()
    ssc.awaitTermination()

  }
}
