package kafka_day02

import java.sql.{Connection, DriverManager, PreparedStatement}

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

object Mysqloffsetkafkademo {




  def main(args: Array[String]): Unit = {

    //创建StreamingContext
    val isLocal = args(0).toBoolean

    val appName = args(1)

    val groupId = args(2)

    val topics = args(3).split(",")

    val conf = new SparkConf().setAppName(appName)

    val appNameAndGroupId  = appName + "_" + groupId

    if(isLocal) {
      conf.setMaster("local[*]")
    }

    val ssc = new StreamingContext(conf, Seconds(5))

    // kafka consumer相关的配置参数
    val kafkaParams = Map[String, String](
      "bootstrap.servers" -> "doit01:9092,doit02:9092,doit03:9092",
      // 反序列化的参数
      "key.deserializer" -> classOf[StringDeserializer].getName,
      "value.deserializer" -> classOf[StringDeserializer].getName,
      "group.id" -> "doit07",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> "false" //取消自动管理偏移量
    )

    //创建KafkaDStream之前，从数据库中读取历史偏移量，如果有就接着消费，如果没有就重头消费
    val offsets: Map[TopicPartition, Long] = MyUtils.queryHistoryOffetFromMySQL(appNameAndGroupId)

    //创建DStream（跟Kafka进行整合）
    //创建直连的数据流，使用新的API，效率更高
    val kafkaDStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      ssc,
      LocationStrategies.PreferConsistent, //如果Spark和Kafka在用一个阶段，任务优先调度到数据所在的机器上
      // 订阅主题
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, offsets)
    )

    //只有KafkaDStream中的KafkaRDD有偏移量信息，如果调用了Transformation算子，偏移量信息就不复存在了
    //只有第一手的RDD有偏移量信息，如果想获取Kafka的偏移量，只能通过KafkaUtils.createDirectStream返回的那个DStream获取
    //将kafkaDStream中的RDD依次遍历出来（目的是为了获取RDD中从kafka读取的偏移量）

    //val ds1 = kafkaDStream.map(_.value())
    kafkaDStream.foreachRDD(kafkaRDD => {
      //判断这个RDD是不是为空
      if(!kafkaRDD.isEmpty()) {

        //从KafkaDStream中获取的RDD是kafkaRDD（第一手的RDD，有偏移量）
        //将RDD强转成HasOffsetRanges类型就可以获取到偏移量了
        val offsetRanges = kafkaRDD.asInstanceOf[HasOffsetRanges].offsetRanges

        //数据计算逻辑
        val reduced: RDD[(String, Int)] = kafkaRDD.map(_.value()).flatMap(_.split(" ")).map((_, 1)).reduceByKey(_+_)
        //聚合后的数据不能通过rdd的foreachPartition进行处理，因为没法控制多个connection的事物
        //正确的方式：将聚合的数据收集到Driver端口，然后跟偏移量一同写入到MySQL数据库
        val result:Array[(String, Int)] = reduced.collect()

        var conn: Connection = null
        var ps1: PreparedStatement = null
        var ps2: PreparedStatement = null
        try {
          conn = DriverManager.getConnection("jdbc:mysql://localhost:3306/guanxi?characterEncoding=utf-8", "root", "123456")
          //开启事物
          conn.setAutoCommit(false)
          //使用MySQL的upset（ON DUPLICATE KEY UPDATE）建表时必须指定主键
          ps1 = conn.prepareStatement("INSERT INTO t_wordcount (word, counts) VALUES (?, ?)  ON DUPLICATE KEY UPDATE counts = counts + ?")

          //将计算结果进行累加
          for (tp <- result) {
            ps1.setString(1, tp._1)
            ps1.setInt(2, tp._2)
            ps1.setInt(3, tp._2)
            ps1.executeUpdate()
          }

          //使用MySQL的upset（ON DUPLICATE KEY UPDATE）建表时必须指定主键
          ps2 = conn.prepareStatement("INSERT INTO t_kafka_offset (app_gid, topic_partition, offset) VALUES (?, ?, ?) ON DUPLICATE KEY UPDATE offset = ?")

          //获取每一个分区的偏移量
          for (offsetRange <- offsetRanges) {
            val topic = offsetRange.topic
            val partition = offsetRange.partition
            //起始偏移量
            //val fromOffset = offsetRange.fromOffset
            //结束偏移量
            val untilOffset = offsetRange.untilOffset
            //println(s"====> topic: $topic, partition: $partition, offset: $fromOffset -> $untilOffset")
            ps2.setString(1, appName + "_" + groupId)
            ps2.setString(2, topic + "_" + partition)
            ps2.setLong(3, untilOffset)
            ps2.setLong(4, untilOffset)
            ps2.executeUpdate()
          }
          //提交事物
          conn.commit()
        } catch {
          case e: Exception => {
            //回滚事物
            conn.rollback()
            //将SparkStreaming应用停掉
            ssc.stop()
          }
        } finally {
          if(ps1 != null) {
            ps1.close()
          }
          if(ps2!=null){
            ps2.close()
          }
          if(conn != null) {
            conn.close()
          }
        }
      }

    })

    ssc.start()

    ssc.awaitTermination()



  }


}


