package com.itcast.spark.test

import java.lang
import java.sql.{DriverManager, ResultSet}

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.mutable

/**
 * DESC:
 */
object kafka02 {
  def main(args: Array[String]): Unit = {
    //1-准备环境
    val conf: SparkConf = new SparkConf().setAppName("SparkStreamingTCPTopK").setMaster("local[*]")
    val sc = new SparkContext(conf)
    sc.setLogLevel("WARN")
    val CKPT_DIR = "./datasets/checkpoint/checkpoint4/"
    //这里就是指定配置项将数据按照5秒为周期进行处理
    val ssc = new StreamingContext(sc, Seconds(5))
    StreamingContext.getActiveOrCreate(CKPT_DIR,()=>{
      val conf: SparkConf = new SparkConf().setAppName("SparkStreamingTCPTopK").setMaster("local[*]")
      val sc = new SparkContext(conf)
      sc.setLogLevel("WARN")
      //这里就是指定配置项将数据按照5秒为周期进行处理
      val ssc = new StreamingContext(sc, Seconds(5))
      ssc.checkpoint(CKPT_DIR)
      ssc
    })
    processData(ssc)
    //3-窗口的统计
    //val result: DStream[(String, Int)] = data.flatMap(_.split(" ")).map(x => (x, 1)).reduceByKeyAndWindow((x: Int, y: Int) => x + y, Seconds(10), Seconds(5))
    //4-结果输出print
    //result.print()
    //5-ssc.start
    ssc.start()
    //6-ssc.awaitTermination
    ssc.awaitTermination()
    //7-ssc.stop(true,true)
    ssc.stop(true, true)
  }

  def processData(ssc: StreamingContext): Unit = {
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "node01:9092,node02:9092,node03:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "SparkKafkaDemo",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: lang.Boolean)
    )
    //从Mysql中获取偏移量
    val OffSetMap: mutable.Map[TopicPartition, Long] = OffsetUtil.getOffsetMap("SparkKafkaDemo", "kafkatopic")
    val receive: InputDStream[ConsumerRecord[String, String]] = if (OffSetMap.size > 0) {
      //如果mysql中有偏移量就从mysql中消费
      println("如果mysql中有偏移量就从mysql中消费")
      KafkaUtils
        .createDirectStream[String, String](ssc,
          LocationStrategies.PreferConsistent,
          ConsumerStrategies.Subscribe[String, String](Array("kafkatopic"), kafkaParams, OffSetMap)
        )
    } else {
      //如果mysql中没有偏移量就从最新偏移量消费
      println("如果mysql中没有偏移量就从最新偏移量消费")
      KafkaUtils
        .createDirectStream[String, String](ssc,
          LocationStrategies.PreferConsistent,
          ConsumerStrategies.Subscribe[String, String](Array("kafkatopic"), kafkaParams)
        )
    }
    //2-读取数据
    /*    val receive: InputDStream[ConsumerRecord[String, String]] = KafkaUtils
          .createDirectStream[String, String](ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies.Subscribe[String, String](Array("kafkatopic"), kafkaParams,offsets)
          )*/
    //    val data: DStream[String] = receive.map(_.value())
    //获取状态
    receive.foreachRDD(rdd => {
      if (rdd.count() > 0) {
        //可以打印topic中的内容
        rdd.foreach(f => {
          println("topic info is:", f)
          println("topic value is:", f.value())
        }) //end rdd
        //获取offset
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        //打印具体的额开始位置的offset到结束位置的offset
        for (o <- offsetRanges) {
          println(s"topic:${o.topic} partiton:${o.partition} startoffset:${o.fromOffset} utiloffset:${o.untilOffset}")
        }
        //提交到offset到ck
        //receive.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
        OffsetUtil.saveOffsetRanges("SparkKafkaDemo", offsetRanges)
      } //end if
    })
  }

  object OffsetUtil {

    //从数据库读取偏移量
    def getOffsetMap(groupid: String, topic: String) = {
      val connection = DriverManager.getConnection("jdbc:mysql://node01:3306/bigdata?characterEncoding=UTF-8", "root", "123456")
      val pstmt = connection.prepareStatement("select * from t_offset where groupid=? and topic=?")
      pstmt.setString(1, groupid)
      pstmt.setString(2, topic)
      val rs: ResultSet = pstmt.executeQuery()
      val offsetMap = mutable.Map[TopicPartition, Long]()
      while (rs.next()) {
        offsetMap += new TopicPartition(rs.getString("topic"), rs.getInt("partition")) -> rs.getLong("offset")
      }
      rs.close()
      pstmt.close()
      connection.close()
      offsetMap
    }

    //将偏移量保存到数据库
    def saveOffsetRanges(groupid: String, offsetRange: Array[OffsetRange]) = {
      val connection = DriverManager.getConnection("jdbc:mysql://node01:3306/bigdata?characterEncoding=UTF-8", "root", "123456")
      //replace into表示之前有就替换,没有就插入
      val pstmt = connection.prepareStatement("replace into t_offset (`topic`, `partition`, `groupid`, `offset`) values(?,?,?,?)")
      for (o <- offsetRange) {
        pstmt.setString(1, o.topic)
        pstmt.setInt(2, o.partition)
        pstmt.setString(3, groupid)
        pstmt.setLong(4, o.untilOffset)
        pstmt.executeUpdate()
      }
      pstmt.close()
      connection.close()
    }
  }

}
