package cn.darksoul3.spark.streaming

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.lang
import java.sql.Connection

object WCWithStreamingByKafka {


  def readHistoryOffsetFromMysql(): Map[TopicPartition, Long] = {
    null
  }

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setAppName("WCWithStreaming").setMaster("local[*]")
    val ssc = new StreamingContext(conf, Seconds(5))
    ssc.sparkContext.setLogLevel("WARN")


    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "localhost:9092,anotherhost:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "use_a_separate_group_id_for_each_stream",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: lang.Boolean)
    )

    val topics = Array("topicA", "topicB")
    val offset: Map[TopicPartition,Long] = readHistoryOffsetFromMysql()

    val kafka: InputDStream[ConsumerRecord[String, String]] =
      KafkaUtils.createDirectStream(
        ssc,
        LocationStrategies.PreferConsistent,
        ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, offset)
      )



    kafka.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        val offset: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        val reduced = rdd.map(_.value())
          .flatMap(_.split("\\s+"))
          .map((_, 1))
          .reduceByKey(_ + _)

        val results = reduced.collect()

        var connection: Connection = null
        try {
          connection = DruidPool.getConnection
          connection.setAutoCommit(false)

          for (elem <- results) {
            //insert table
          }

          connection.commit()
        }catch {
          case e: Exception=>
            e.printStackTrace()
            connection.rollback()
            ssc.stop(stopSparkContext = true,stopGracefully = true)
        }finally {
          if(connection != null){connection.close()}
        }
      }
    })

    //compute(kafka)
    //computer2(kafka)

    ssc.start()
    ssc.awaitTermination()
  }


  private def computer2(kafka: InputDStream[ConsumerRecord[String, String]]): Unit = {
    var offset: Array[OffsetRange] = null
    val kafkaWithOffset = kafka.transform(rdd => {
      offset = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd
    })

    val word = kafkaWithOffset.map(_.value())
      .flatMap(_.split("\\s+"))
      .map((_, 1))

    val reduced = word.updateStateByKey((s: Seq[Int], o: Option[Int]) => {
      Some(s.sum + o.getOrElse(0))
    })

    reduced.foreachRDD(rdd => {

      rdd.foreachPartition(it => {

      })

      kafka.asInstanceOf[CanCommitOffsets].commitAsync(offset)

    })
  }


  private def compute(kafka: InputDStream[ConsumerRecord[String, String]]): Unit = {
    kafka.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        val offset: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        rdd.map(_.value())
          .flatMap(_.split("\\s+"))
          .map((_, 1))
          .reduceByKey(_ + _)
          .foreach(println)

        kafka.asInstanceOf[CanCommitOffsets].commitAsync(offset)
      }
    })
  }
}
