package com.example.spark.streaming

import com.alibaba.fastjson.{JSONObject,JSON}
import com.example.util.{HiveConfigUtil, YamlUtil}
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.functions.col
import org.apache.spark.sql.{SaveMode, SparkSession}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies}
import org.apache.spark.streaming.{Seconds, StreamingContext}

import java.{lang, util}

/**
 * @title SimpleKafka
 * @date 8/16/2022 8:34 PM
 * @author leal123
 * @description 简单定时读取kafka 信息
 */
object SimpleKafka {
  def main(args: Array[String]): Unit = {
    //0- 参数设置
    // topicName hive col
    val topics = Set("SPARK_TEST_KAFKA")
    val hiveTableName: String = "ads_test_student"
    val colName = "id,name,age"
    val colNames = colName.split(",").toSeq
    //1. 准备实时环境 hive
    Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
    Logger.getLogger("org.apache.hadoop").setLevel(Level.WARN)
    Logger.getLogger("org.apache.kafka").setLevel(Level.WARN)
    val envInfo: util.LinkedHashMap[String, String] = YamlUtil.getEnvInfo("NODE_HIVE")
    val sparkConf: SparkConf = new SparkConf()
      .setAppName("SimpleKafka")
      .setMaster("local[4]")
      .set("spark.sql.warehouse.dir", envInfo.get("warehouse"))
      .set("hive.metastore.uris", envInfo.get("metastore"))
      .set("spark.streaming.kafka.maxRatePerPartition", "2000")
      .set("spark.streaming.backpressure.enabled", "true")
      .set("spark.streaming.stopGracefullyOnShutdown", "true")
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
      .set("spark.shuffle.file.buffer", "64K")
      .set("spark.reducer.maxSizeInFlight", "96M")
      .set("spark.executor.heartbeatInterval", "30000ms")
    val ssc: StreamingContext = new StreamingContext(sparkConf, Seconds(5))

    //kafka 参数
    val kafkaMap: Map[String, Object] = Map[String, Object](
      "bootstrap.servers" -> "node02:9092,node03:9092,node04:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "spark-test",
      "auto.offset.reset" -> "earliest",
      "enable.auto.commit" -> (false: lang.Boolean)
    )
    val kfStream: InputDStream[ConsumerRecord[String, String]] = KafkaUtils.createDirectStream(
      ssc, LocationStrategies.PreferConsistent, ConsumerStrategies.Subscribe[String, String](topics, kafkaMap)
    )

    val spark = SparkSession
      .builder()
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()
    //HiveConfigUtil.openDynamicPartition(sparkSession) //开启动态分区
    //HiveConfigUtil.setPartitions(sparkSession) //设置最大分区数
    //HiveConfigUtil.openCompression(sparkSession) //开启压缩
    //HiveConfigUtil.useSnappyCompression(sparkSession) //使用snappy压缩

    import spark.implicits._
    kfStream.map(ele => ele.value()).mapPartitions(partition => partition.map(
      ele => {
        jsonConvert(getJsonData(ele))
      }))
      .foreachRDD(rdd => {
        rdd.toDF()
          .write.mode(SaveMode.Append)
          .insertInto(hiveTableName)
      })
    //手动提交offset
    kfStream.foreachRDD(
      rdd => {
        val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
        kfStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
      })
    ssc.start()
    ssc.awaitTermination()
  }

  def getJsonData(data: String): JSONObject = {
    try {
      JSON.parseObject(data)
    } catch {
      case ex: Exception => println(ex)
        null
    }
  }

  def jsonConvert(json: JSONObject): Student = {
    println("data json is :" + json)
    val id = json.getString("id")
    val name = json.getString("name")
    val age = json.getString("age")
    Student(id, name, age)
  }
}
