package space.xxhui

import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.KafkaUtils
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.{Seconds, StreamingContext}
import space.xxhui.config.StreamingConf
import space.xxhui.service.{SegmentService, WordFreqService}

/**
 * @author HitvzByHui
 *         date 2021-09-05
 */
object Main {


  def functionToCreateContext(): StreamingContext = {
    val sparkConf = new SparkConf().setAppName("WordFreqConsumer").setMaster(StreamingConf.master)
      .set("spark.default.parallelism", StreamingConf.parallelNum)
      .set("spark.streaming.concurrentJobs", StreamingConf.concurrentJobs)
      .set("spark.executor.memory", StreamingConf.executorMem)
      .set("spark.cores.max", StreamingConf.coresMax)
      .set("spark.local.dir", StreamingConf.localDir)
      .set("spark.streaming.kafka.maxRatePerPartition", StreamingConf.perMaxRate)
    val ssc = new StreamingContext(sparkConf, Seconds(StreamingConf.interval))
    //    ssc.checkpoint(Conf.localDir)

    // Create direct kafka stream with brokers and topics
    val topicsSet = StreamingConf.topics.split(",").toSet
    //val kafkaParams = Map[String, String]("metadata.broker.list" -> StreamingConf.brokers, "auto.offset.reset" -> "smallest", "group.id" -> StreamingConf.group)
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> StreamingConf.brokers,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> StreamingConf.group,
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean))
    val kafkaDirectStream = KafkaUtils.createDirectStream(ssc,
      PreferConsistent,
      Subscribe[String, String](topicsSet, kafkaParams))
    kafkaDirectStream.cache
    //经过分词得到新的stream
    val segmentedStream = kafkaDirectStream.map( rdd => rdd.value() ).repartition(10).transform(rdd => {
      rdd.flatMap(record => SegmentService.mapSegment(record.toString))
    })
    //以entity_timestamp_beeword为key,统计本batch内各个key的计数
    val countedStream = segmentedStream.reduceByKey(_ + _)

    countedStream.foreachRDD(WordFreqService.save(_))

    ssc
  }

  //直接运行会报错Kafka ConsumerRecord is not serializable. Use .map to extract fields before calling .persist or .window
  //提交到 spark 后则没问题
  def main(args: Array[String]): Unit = {
    // Create context with 2 second batch interval
    //    val ssc = StreamingContext.getOrCreate(Conf.localDir, functionToCreateContext _)
    val ssc = functionToCreateContext()
    // Start the computation
    ssc.start()
    ssc.awaitTermination()
  }



}
