package com.carol.bigdata

import com.carol.bigdata.fuction.{LogParseUtil, MyAccumulator}
import com.carol.bigdata.utils.{Flag, TimeUtil}
import com.carol.bigdata.utils.{Flag, TimeUtil}
import com.carol.bigdata.fuction.LogParseUtil

import scala.collection.mutable
import org.apache.spark.rdd.RDD
import org.apache.spark.SparkContext
import org.apache.spark.sql.SparkSession
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kudu.spark.kudu.KuduContext
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{ConsumerStrategies, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}


object App {
    /*
     * mvn查看未用的包: mvn dependency:analyze
     * 程序主入口
     * spark-submit --master 填写实际master地址 --class com.carol.bigdata.App  /path/to/xxx.jar 程序命令行参数
     * spark-submit --master yarn --deploy-mode cluster  --num-executors 1  --driver-memory 512m --executor-memory 512m --executor-cores 1  --class com.carol.bigdata.App  --name Spark2HbaseFromCarol   realTimeStat-1.0.jar
     spark-submit --num-executors 1  --driver-memory 512m --executor-memory 512m --executor-cores 1  --class com.carol.bigdata.App  --name binlog2hbase   binlog2hbase-1.0.jar
     * */

    def main(args: Array[String]): Unit = {
        // 命令行参数
        Flag.Parse(args)

        // 构建spark流处理对象
        val sparkConf = Config.sparkConf
        val ssc = new StreamingContext(sparkConf, Seconds(Config.sparkSeconds))
        val sc: SparkContext = ssc.sparkContext
        sc.setLogLevel(Config.logLevel)
        val spark: SparkSession = SparkSession.builder()
          .config(conf = Config.sparkConf)
          .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
          .getOrCreate()

        // 广播配置参数
        val kuduMaster = sc.broadcast(Config.kuduMaster).value
        val threadPool = sc.broadcast(Config.threadPool).value
        // 定义流对象, Kafka中的partition与RDD中的partition是一一对应的并行读取Kafka数据
        val stream: InputDStream[ConsumerRecord[String, String]] = getStream(ssc, Config.topic, Config.kafkaParams)
        try {
            // 创建kudu连接上下文
            val kuduContext: KuduContext = new KuduContext(kuduMaster, sc)
            // 自定义的set集合累加器,累加game
            val myAccumulator: MyAccumulator = new MyAccumulator()
            // 注册累加器
            sc.register(myAccumulator, "gameSet")
            // stream.print(10)
            stream.foreachRDD((rdd: RDD[ConsumerRecord[String, String]]) => {
                // 打印时间
                TimeUtil.timer(
                    if (!rdd.isEmpty()) {
                        // 解析kafka日志，同时收集出现的game_id
                        val data = LogParseUtil.parseLog(rdd, myAccumulator)
                        if (!data.isEmpty()) {
                            // count: action操作触发日志解析的transform算子操作收集game_id
                            // println(s"data:  ${data.count()}")
                            // 缓存data,避免重复解析
                            data.persist()
                            // TODO: ETL处理,写入kudu表
                            // Task.run(kuduContext, spark, data, myAccumulator)
                            // TODO: 测试多线程写入∑
                            Task.multiThreadRun(kuduContext, spark, data, myAccumulator, threadPool)
                            // 该批次处理完毕,释放缓存
                            data.unpersist()
                        }
                    })
            })
        } catch {
            // 处理异常,反馈，业务程序数据上报有bug
            case e: Exception => e.printStackTrace()
        }
        // 启动守护进程
        ssc.start()
        ssc.awaitTermination()
        println("==> 任务结束 <==")
    }

    def getStream(ssc: StreamingContext,
                  topic: String,
                  kafkaParams: Map[String, Object],
                  offsetMap: mutable.Map[TopicPartition, Long] = null,
                  isOffset: Boolean = false): InputDStream[ConsumerRecord[String, String]] = {

        val stream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream[String, String](
            ssc,
            LocationStrategies.PreferConsistent,
            ConsumerStrategies.Subscribe[String, String](topic.split(","), kafkaParams))

        stream
    }
}
