package com.fwmagic.spark.streaming

import com.fwmagic.spark.streaming.util.{DBUtils, OffsetRangesUtils}
import java.sql.Connection
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 数据库：Mysql
  * 功能：SparkStreaming实现Exactly-Once的方式sink数据到Mysql
  *
  * 步骤：
  *     1、SparkStreaming直连方式连接到Kafka，即能获取到数据和偏移量
  *     2、手动控制mysql的事物提交,将更新结果数据和更新偏移量控制在一个事物里面
  *     3、编写业务逻辑
  *
  * sql脚本：
  * CREATE TABLE `t_kafka_offset` (
  * `app_gid` varchar(50) NOT NULL,
  * `topic_partition` varchar(50) NOT NULL,
  * `offset` bigint(20) DEFAULT NULL,
  * PRIMARY KEY (`app_gid`,`topic_partition`)
  * ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
  *
  * CREATE TABLE `t_word_counts` (
  * `word` varchar(50) NOT NULL,
  * `counts` bigint(20) DEFAULT NULL,
  * PRIMARY KEY (`word`)
  * ) ENGINE=InnoDB DEFAULT CHARSET=utf8mb4
  */
object ExactlyOnceWordCountOffsetSinkToMySQL {
    def main(args: Array[String]): Unit = {

        //定义参数
        val Array(isLocal, appName, groupId, allTopics) = args

        val conf = new SparkConf().setAppName(appName)

        //本地模式启动
        if (isLocal.toBoolean) {
            conf.setMaster("local[*]")
        }

        val ssc: StreamingContext = new StreamingContext(conf, Seconds(5))

//        ssc.sparkContext.setLogLevel("WARN")

        val topics: Array[String] = allTopics.split(",")

        val bootstrapServers = "192.168.62.131:9092,192.168.62.132:9092,192.168.62.133:9092"
//        val bootstrapServers = "localhost:9092"

        //定义参数
        val kafkaParams = Map[String, String](
            ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> bootstrapServers,
            ConsumerConfig.GROUP_ID_CONFIG -> groupId,
            ConsumerConfig.MAX_POLL_RECORDS_CONFIG -> "100",
            ConsumerConfig.AUTO_OFFSET_RESET_CONFIG -> "earliest",
            ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer",
            //默认:true,会每5s自动提交一次偏移量到Kafka的特殊Topic【__consumer_offsets】中
            //自己管理偏移量，手动提交,实现Exactly-Once
            ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> "false",
            ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> "org.apache.kafka.common.serialization.StringDeserializer"
        )


        //获取历史偏移量，以便启动程序后接着从上一次的位置继续消费
        val offsets: collection.Map[TopicPartition, Long] = OffsetRangesUtils.queryHistoryOffsetFromMysql(appName + "_" + groupId)

        val kafkaStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(ssc
            , LocationStrategies.PreferConsistent
            , ConsumerStrategies.Subscribe[String, String](topics, kafkaParams, offsets))

        /**
          * 由于task分布在多台机器上，没法保证多个task的写操作同时在一个事物里面，
          * 没法保证Exactly-Once,需要将Executor中Task的数据收集到Driver端，
          * 由Driver端来写支持事物的数据库。
          * 局限性：只适合聚合类的操作【数据量比较少】，如果数据到Driver端后，数据量很大，就不适合这种方式了。
          */
        kafkaStream.foreachRDD(rdd => {
            //判断当前批次是否有数据
            if (!rdd.isEmpty()) {
                //获取RDD所有分区的偏移量
                val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

                //获取数据计算
                val lines: RDD[String] = rdd.map(_.value())

                //实现业务逻辑，获取结果数据
                val reduced: RDD[(String, Int)] = lines.flatMap(_.split("\\s")).map((_, 1)).reduceByKey(_ + _)

                //触发Action,将数据拉到Driver端，方便统一管理事物
                val res: Array[(String, Int)] = reduced.collect()

                //获取mysql连接
                var con: Connection = null
                try {
                    con = DBUtils.getConnection()
                    //不自动提交事物
                    con.setAutoCommit(false)

                    //将计算结果写入mysql
                    OffsetRangesUtils.updateWordCountToMySQL(res, con)

                    //将偏移量写到mysql
                    OffsetRangesUtils.updateOffsetToMySQL(appName + "_" + groupId, offsetRanges, con)

                    //提交事物
                    con.commit()
                } catch {
                    case e: Exception => {
                        //事物回滚
                        con.rollback()
                        //让任务停掉
                        ssc.stop()
                        // e.printStackTrace()
                    }
                } finally {
                    //关闭连接
                    if (con != null) con.close()
                }
            }
        }
        )
        ssc.start()

        ssc.awaitTermination()
    }

}
