import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.kafka.common.TopicPartition
import scalikejdbc._
import com.typesafe.config.ConfigFactory
import kafka.common.TopicAndPartition
import org.apache.spark.{SparkConf, SparkContext, TaskContext}
import org.apache.spark.SparkContext._
import org.apache.spark.streaming._
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, KafkaUtils, OffsetRange}
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Assign
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent

import scala.collection.JavaConverters._

/**
  * kafka完全一次的语义实现
  * 通过将结果和偏移量打包在一个事务中提交
  * 这个操作在executor端的每个分区进行
  */

object TransactionalPerpartition {
  def main(args: Array[String]): Unit = {

    val conf = ConfigFactory.load
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> conf.getString("kafka.brokers"),
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "transactional-example",
      "enable.auto.commit" -> (false: java.lang.Boolean),
      "auto.offset.reset" -> "none"
    )
    val jdbcDriver = conf.getString("jdbc.driver")
    val jdbcUrl = conf.getString("jdbc.url")
    val jdbcUser = conf.getString("jdbc.user")
    val jdbcPassword = conf.getString("jdbc.password")

    val ssc = setupSsc(kafkaParams, jdbcDriver, jdbcUrl, jdbcUser, jdbcPassword)()
    ssc.start()
    ssc.awaitTermination()
  }

  def setupSsc(
                kafkaparams: Map[String, Object],
                jdbcDriver: String,
                jdbcUrl: String,
                jdbcUser: String,
                jdbcPassword: String
              )(): StreamingContext = {

    //设置streamingContext
    val sparkConf = new SparkConf().setMaster("local[*]").setAppName("test")
    val ssc = new StreamingContext(sparkConf, Seconds(60))

    //从数据库中加载初始偏移量，这里应该做一个判断：数据库没有的话，默认从earliest消费即可
    val fromdbOffsets = DB.readOnly { implicit session =>
      sql"select topic, part, off from txn_offsets".
        map { resultSet =>
          new TopicPartition(resultSet.string(1), resultSet.int(2)) -> resultSet.long(3)
        }.list.apply().toMap
    }

    //根据偏移量的值的大小创建kafkadstream
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Assign[String, String](fromdbOffsets.keys.toList, kafkaparams, fromdbOffsets)
    )

    stream.foreachRDD { rdd =>
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

      rdd.foreachPartition { iter =>
        //设置分区连接
        SetupJdbc(jdbcDriver, jdbcUrl, jdbcUser, jdbcPassword)
        //获取当前处于激活状态的taskContext的分区ID
        val osr = offsetRanges(TaskContext.getPartitionId())

        //业务逻辑
        val metric = iter.size

        //通过事务更新

        DB.localTx { implicit session =>
          val metricRows =
            sql"""
                 update txn_data set metric = metric + ${metric}
                 where topic = ${osr.topic}
               """.update.apply()

          //如果事务更新失败，抛出异常
          if (metricRows != 1) {
            throw new Exception(
              s"""
Got $metricRows rows affected instead of 1 when attempting to update metrics for
 ${osr.topic} ${osr.partition} ${osr.fromOffset} -> ${osr.untilOffset}
""")
          }

          //存储偏移量

          val offsetRows =
            sql"""
update txn_offsets set off = ${osr.untilOffset}
  where topic = ${osr.topic} and part = ${osr.partition} and off = ${osr.fromOffset}
""".update.apply()
          if (offsetRows != 1) {
            throw new Exception(
              s"""
Got $offsetRows rows affected instead of 1 when attempting to update offsets for
 ${osr.topic} ${osr.partition} ${osr.fromOffset} -> ${osr.untilOffset}
Was a partition repeated after a worker failure?
""")
          }
        }
      }

    }

    ssc

  }
}
