package com.offset

import com.sparkStreaming.project.dao.CourseClickCountDAO
import com.sparkStreaming.project.entity.CourseClickCount
import kafka.common.TopicAndPartition
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import org.apache.spark.SparkConf
import org.apache.spark.streaming.kafka.{HasOffsetRanges, KafkaUtils}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable.ListBuffer

/*
      业务流程：
        1.创建ssc
        2.从kafka中获取数据   todo 读取offset
        3.根据业务逻辑处理数据
        4.将处理后的数据写到外部存储设备（hdfs，mysql，hbase） todo 存储offset
        5.启动程序等待程序终止

        问题:
             1.多执行：提交数据(存储到hbase上) 112 --> 116      --**-->>    提交offset 112
                 解决：幂等性（执行多次和执行一次结果一样）  或者 事务
            2.少执行：提交offset 112 --> 116    --**-->>    提交数据   116
               解决：只能是事务

       110 -->  110data 110
       120 -->  120data 120
       120 -->  120 return


  */
object OffsetApp03 {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf()
      .setAppName("")
      .setMaster("local[6]")
      .set("spark.testing.memory","500000000")

    val ssc = new StreamingContext(conf, Seconds(10))


    val map = Map[String, String](
      "metadata.broker.list" -> "hadoop000:9092",
      "auto.offset.reset" -> "smallest"
    )

    //val topics = Set[String]()
    val topics = "offset_topic".split(",").toSet


    /*
        fromOffsets: 每个topic partition 的偏移量
          获取偏移量
           从 MySQL，[zookeeper] ,redis , hbase 获取
     */
    val fromOffsets = Map[TopicAndPartition, Long](
      TopicAndPartition("offset_topic",0) -> 112
    )

    val message =
      if (fromOffsets.size == 0) {
        KafkaUtils
          .createDirectStream[String, String, StringDecoder, StringDecoder](ssc, map, topics)

      } else {

        val messageHandler = (mm: MessageAndMetadata[String, String]) =>{
          println(mm.topic)
          println(mm.partition)
          println(mm.offset)
          println(mm.key())
          println(mm.message())
          println(mm.keyDecoder)
          println(mm.valueDecoder)
          //println(mm.rawMessage.toString())

          (mm.key(), mm.message())
        }

        KafkaUtils
          .createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, map, fromOffsets, messageHandler)

      }

    message.foreachRDD(
      rdd => {
        if (!rdd.isEmpty()) {
          //处理业务逻辑
          println("rddCount: " + rdd.count())
          //rdd.foreach(println)

          //todo 添加处理业务逻辑的代码
          /*rdd.foreachPartition(partitionRecords => {

            val list = new ListBuffer[CourseClickCount]

            partitionRecords.foreach(pair => {
              list.append(CourseClickCount("1", 1))
            })

            //CourseClickCountDAO.save(list)
          })*/

          //提交offset (官网 streaming 集成kafka 页面)
          val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

         /* for (o <- offsetRanges) {
            println(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
          }*/

          offsetRanges.foreach(
            o => {

              //提交信息到外部存储：MySQL，redis，zookeeper
              println(s"${o.topic} ${o.partition} ${o.fromOffset} ${o.untilOffset}")
            }

          )
        }
      }
    )

    ssc.start()
    ssc.awaitTermination()

  }

}
