import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.common.TopicPartition
import scalikejdbc._
import com.typesafe.config.ConfigFactory
import kafka.common.TopicAndPartition
import org.apache.spark.{SparkConf, SparkContext, TaskContext}
import org.apache.spark.SparkContext._
import org.apache.spark.streaming._
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.{Assign, Subscribe}
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

import scala.collection.JavaConverters._

/**
  * 来自kafka的完全一次语义，通过在与结果相同批次的事务中存储偏移量。这个操作在driver端进行, 分批次进行
  */
object TransactionperBatch {


  def main(args: Array[String]): Unit = {

    val conf = ConfigFactory.load
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> conf.getString("kafka.brokers"),
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "transactional-example",
      //关闭自动提交
      "enable.auto.commit" -> (false: java.lang.Boolean),
      "auto.offset.reset" -> "none"
    )

    val jdbcDriver = conf.getString("jdbc.driver")
    val jdbcUrl = conf.getString("jdbc.url")
    val jdbcUser = conf.getString("jdbc.user")
    val jdbcPassword = conf.getString("jdbc.password")
    val ssc = setupSsc(kafkaParams, jdbcDriver, jdbcUrl, jdbcUser, jdbcPassword)
    ssc.start()
    ssc.awaitTermination()

  }

  def setupSsc(
                kaffaParams: Map[String, Object],
                jdbcDriver: String,
                jdbcUrl: String,
                jdbcUser: String,
                jdbcPassword: String
              )(): StreamingContext = {

    //配置ssc
    val sparkConf = new SparkConf().setMaster("local[*]")
      .setAppName(s"${this.getClass.getSimpleName}")
    val ssc = new StreamingContext(sparkConf, Seconds(10))

    //todo ..step1 从数据库中查询起始偏移量
    val fromOffsets = DB.readOnly { implicit session =>
      sql"select topic, part, off from txn_offsets".
        map { resultSet =>
          new TopicPartition(resultSet.string(1), resultSet.int(2)) -> resultSet.long(3)
        }.list.apply().toMap
    }
    //todo  .. step2创建kafkastream
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Assign[String, String](fromOffsets.keys.toList, kaffaParams, fromOffsets)
    ).map { record =>
      // we're just going to count messages per topic, don't care about the contents, so convert each message to (topic, 1)
      (record.topic, 1L)
    }
    //通过driver端写
    stream.foreachRDD { rdd =>
      // Note this block is running on the driver

      // Cast the rdd to an interface that lets us get an array of OffsetRange
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      // simplest possible "metric", namely a count of messages per topic
      // Notice the aggregation is done using spark methods, and results collected back to driver
      val results = rdd.reduceByKey {
        // This is the only block of code running on the executors.
        // reduceByKey did a shuffle, but that's fine, we're not relying on anything special about partitioning here
        _ + _
      }.collect


      //返回驱动程序的运行

      DB.localTx { implicit session =>
        //存储指标结果
        results.foreach { pair =>
          val (topic, metric) = pair
          val metricRows =
            sql"""
            UPDATE txn_data set metric = metric + ${metric}
            WHERE  topic = ${topic}
            """.update().apply()
          //判断业务指标是否更新成功
          if (metricRows != 1) {
            throw new Exception(
              s"""
                 |Got ${metricRows} row affected insteated of 1 when attemping to update metrics for ${topic}
             """.stripMargin)
          }
        }

        //存储偏移量
        offsetRanges.foreach { osr =>
          val offsetRows =
            sql"""
                update txn_offsets set off = ${osr.untilOffset}
                where topic = ${osr.topic} and part = ${osr.partition}
                and off = ${osr.fromOffset}
             """.update().apply()

          //判断偏移量是否更新成功
          if (offsetRows != 1) {
            throw new Exception(
              s"""
               |Got $offsetRows row affected instead of 1 when attempting to update offsets for ${osr.topic} ${osr.partition} ${osr.fromOffset} -> ${osr.untilOffset}
 was
               |a partition repeated after a worker failure?
             """.
                stripMargin)
        }
      }
    }
      }
    ssc
  }
}
