package com.offcn.bigdata.spark.streaming.p1

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, TaskContext}
import scalikejdbc.{ConnectionPool, DB}
import scalikejdbc._


/**
  * 事务{
  *     保存数据
  *     保存offset
  * }
  *
  *
  * 1. 创建测试的mysql数据库
       create database test;
    2. 新建topic： mytopic
       kafka-topics.sh --zookeeper bigdata01:2181/kafka --create --topic mytopic --partitions 3 --replication-factor 1
    3. 建表
       create table mytopic(topic varchar(200), partid int, offset bigint);
       create table mydata(name varchar(200), id int);

       初始化表：
        insert into mytopic(topic, partid, offset) values('mytopic',0,0);
        insert into mytopic(topic, partid, offset) values('mytopic',1,0);
        insert into mytopic(topic, partid, offset) values('mytopic',2,0);
    4. 往mytopic发送数据， 数据格式为 "字符,数字"  比如  abc,3
    5. 在pom文件加入依赖
       <dependency>
          <groupId>org.scalikejdbc</groupId>
          <artifactId>scalikejdbc_2.11</artifactId>
          <version>3.2.0</version>
      </dependency>

  */
object _07KafkaOffsetTransanction {
    def main(args: Array[String]): Unit = {
        val sparkConf = new SparkConf().setAppName("test").setMaster("local[2]")

        val processingInterval = 2
        val topic = "mytopic2"
        // Create direct kafka stream with brokers and topics
        val topics = topic.split(",").toSet
        val kafkaParams = Map[String, String](
            "bootstrap.servers" -> "bigdata01:9092,bigdata02:9092,bigdata03:9092",
            "key.deserializer" -> classOf[StringDeserializer].getName,
            "value.deserializer" -> classOf[StringDeserializer].getName,
            "group.id" -> "spark-kafka-group-2",
            "auto.offset.reset" -> "earliest",
            "enable.auto.commit" -> "false"
        )

        val ssc = new StreamingContext(sparkConf, Seconds(processingInterval))

        val driver = "com.mysql.jdbc.Driver"
        val jdbcUrl =  "jdbc:mysql://localhost:3306/test"
        val jdbcUser = "root"
        val jdbcPassword = "sorry"

        // 设置jdbc
        Class.forName(driver)
        // 设置连接池
        ConnectionPool.singleton(jdbcUrl, jdbcUser, jdbcPassword)
        val fromOffsets = DB.readOnly { implicit session => sql"select topic, partid, offset from mytopic".
            map { r =>
                new TopicPartition(r.string(1), r.int(2)) -> r.long(3)
            }.list.apply().toMap
        }

        val messages:DStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc,
                            LocationStrategies.PreferConsistent,
                            ConsumerStrategies.Subscribe(topics, kafkaParams, fromOffsets)
        )

        messages.foreachRDD(rdd=> {
            if(!rdd.isEmpty()) {
                rdd.foreachPartition(partiton=>{
                    val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
                    val pOffsetRange = offsetRanges(TaskContext.get.partitionId)

                    // localTx 开启事务操作
                    DB.localTx { implicit session =>
                        partiton.foreach(record=>{
                            val value = record.value()
                            println("value: " +value)
                            if(value.contains(",")) {
                                // 或者使用scalike的batch 插入
                                val name = value.split(",")(0)
                                val id = value.split(",")(1)
                                val dataResult = sql"""insert into  mydata(name,id) values (${name},${id})""".execute().apply()
                            }
                        })
//                        val i = 1 / 0
                        val offsetResult =
                            sql"""update mytopic set offset = ${pOffsetRange.untilOffset} where topic = ${pOffsetRange.topic} and partid = ${pOffsetRange.partition}""".update.apply()
                    }
                })
            }
        })
        ssc.start()
        ssc.awaitTermination()
    }
}

