package com.bd03.streaminglearn.day0404

import java.sql.DriverManager

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.{SparkConf, TaskContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable

object StreamingOffsetManager2 {
  /**
   * Streaming链接kafka偏移量的管理
   *
   *groupid topic partition offset
   */

  def main(args: Array[String]): Unit = {
    Logger.getLogger("org").setLevel(Level.WARN)

    val conf = new SparkConf()
      .setMaster("local[*]")
      .setAppName(this.getClass.getSimpleName)
      //设置使用序列化的库
      //默认使用的序列化方式 java Serializer
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      //如果为 true，Spark 将 gracefully（缓慢地）关闭在 JVM 运行的 StreamingContext，而非立即执行。
     // .set("spark.streaming.stopGracefullyOnShutdown","true")
      //每个分区每秒钟拉取的最大范围
      .set("spark.streaming.kafka.maxRatePerPartition","100000000")

    val ssc = new StreamingContext(conf,Seconds(2))
    val groupId = "streaming_consumer4"
    val topic = List("countip")

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "hdp01:9092,hdp02:9092,hdp03:9092,hdp04:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> groupId,//0-100条
      "auto.offset.reset" -> "earliest",//理论上,所有的数据只消费一次,避免重复消费
      //偏移量的管理,本地管理,spark管理和kafka管理
      "enable.auto.commit" -> (false: java.lang.Boolean)//不自动管理偏移量,每次都是重新消费
    )
    //用来存储偏移量信息
    val offsets = new mutable.HashMap[TopicPartition,Long]()
    //从mysql中获取偏移量信息,存储到offsets中
    //该代码是在driver端执行的,不影响executor端
    val sqlSelect = "select * from sparkoffset where topic = ? and groupid= ?"
    val conn = DriverManager.getConnection("jdbc:mysql://hdp03:3306/spark?useUnicode=true&characterEncoding=utf-8",
      "root","root")
    val psSt = conn.prepareStatement(sqlSelect)
    psSt.setString(1,topic.head)
    psSt.setString(2,groupId)
    val set = psSt.executeQuery()
    while(set.next()){
      val topicSearch = set.getString("topic")
      val partitionSearch = set.getInt("parition")
      val offsetSearch = set.getLong("offset")
      offsets+=new TopicPartition(topicSearch,partitionSearch)->offsetSearch
    }
    conn.close()


    val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      ssc,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[String, String](topic, kafkaParams,offsets)
    )

    stream.foreachRDD(rdd=>{
      //只有最原始的rdd才可以使用asInstanceOf[HasOffsetRanges]
      val ranges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      //分区,rdd的分区就是topic的分区
      rdd.foreachPartition(it=>{
        //根据主键来确定修改值,如果有该主键就改值,没有该主键就添加值(主键可以是复合主键)
        val sql = "replace into sparkoffset values(?,?,?,?) "
        val conn = DriverManager.getConnection("jdbc:mysql://hdp03:3306/spark?useUnicode=true&characterEncoding=utf-8",
          "root", "root")
        //设置开启事物支持
        conn.setAutoCommit(false)
        val ps = conn.prepareStatement(sql)
        //获取分区数量
        val partition = TaskContext.get().partitionId()
        //该分区的range就在数组的对应位置
        val range = ranges(partition)
        ps.setString(1,groupId)
        ps.setString(2,topic.head)
        ps.setInt(3,partition)
        ps.setLong(4,range.untilOffset)
        ps.executeUpdate()
        conn.commit()//手动提交
        conn.close()
      })
    })

    stream.print()


    ssc.start()
    ssc.awaitTermination()


  }

}
