package com.zh.hudi

import java.util.Properties

import com.zh.constants.Constants
import com.zh.util.{JSONUtils, PropertiesUtils, SparkUtils}
import org.apache.hudi.DataSourceWriteOptions
import org.apache.hudi.common.model.HoodieTableType
import org.apache.hudi.config.{HoodieIndexConfig, HoodieWriteConfig}
import org.apache.hudi.index.HoodieIndex
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges}

import scala.collection.mutable

object StreamingSinkHudi {

  /**
   * args 1 propFilePath
   * args 2 appName
   *
   * @param args
   */
  def main(args: Array[String]): Unit = {
    val sparkConf = new SparkConf()
    System.setProperty("HADOOP_USER_NAME", "spark")

    //参数处理
    val argsTuple: (String, String) = SparkUtils.argsHandle(args)

    //初始spark
    val spark = SparkUtils.initSpark(argsTuple._2, sparkConf)

    val confName = argsTuple._1

    /**
     * 配置读取
     */
    val Array(
    brokers, group, interval, topics, offsetreset, hudiTableName, hudiKey, hudiBasePath, hudiPts, hudiMergeKey
    ) = PropertiesUtils.parseArgsFromFile(confName, Array(
      Constants.KAFKA_BROKERS,
      Constants.KAFKA_GROUPID,
      Constants.SPARK_INTERVAL_SECONDS,
      Constants.KAFKA_TOPICS,
      Constants.KAFKA_AUTO_OFFSETRE_SET,
      Constants.HUDI_TABLE_NAME,
      Constants.HUDI_TABLE_BASEPATH,
      Constants.HUDI_RECORDKEY_FIELD_OPT_KEY,
      Constants.HUDI_PARTITIONPATH_FIELD_OPT_KEY,
      Constants.HUDI_PRECOMBINE_FIELD_OPT_KEY
    ))

    val prop = PropertiesUtils.getPropConfig(confName)

    //runner
    streamingRunner(spark, brokers, group, interval.toLong, topics, offsetreset, prop, hudiTableName, hudiKey, hudiBasePath, hudiPts, hudiMergeKey)

  }

  /**
   * straming app start
   *
   * @param spark
   * @param kafkaBrokers
   * @param groupId
   * @param interval
   * @param kafkaTopics
   * @param autooffsetreset
   * @param hudiTableName
   * @param hudiBasePath
   * @param hudiKey
   * @param hudiPts
   * @param hudiMergeKey
   */
  def streamingRunner(spark: SparkSession, kafkaBrokers: String, groupId: String, interval: Long, kafkaTopics: String, autooffsetreset: String,
                      properties: Properties, hudiTableName: String, hudiBasePath: String, hudiKey: String, hudiPts: String, hudiMergeKey: String): Unit = {

    //init streaming
    val ssc = SparkUtils.initSparkStreaming(spark, interval)
    ssc.checkpoint(properties.getProperty(Constants.SPARK_CHECKPOINT_DIR))

    //kafka source
    val dStream = SparkUtils.getKafkaDirectStream(ssc, kafkaBrokers, groupId, autooffsetreset, kafkaTopics)

    //println("dStream data ========================================================================")
    //dStream.print(10)

    //transform
    dStream.foreachRDD(x => {
      // get offset
      val offsetRanges = x.asInstanceOf[HasOffsetRanges].offsetRanges
      //get value rdd
      val jsonValueRdd = x.map(_.value).filter(JSONUtils.isJSONValidate)

      //sink
      saveHudi(spark, jsonValueRdd, properties, hudiTableName, hudiBasePath, hudiKey, hudiPts, hudiMergeKey)

      // commit offset
      dStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
    })

    ssc.start()
    ssc.awaitTermination()
  }

  /**
   * save hudi
   *
   * @param spark
   * @param rdd
   * @param hudiTableName
   * @param hudiBasePath
   * @param hudiKey
   * @param hudiPts
   * @param hudiMergeKey
   */
  def saveHudi(spark: SparkSession, rdd: RDD[String], properties: Properties, hudiTableName: String, hudiBasePath: String, hudiKey: String, hudiPts: String, hudiMergeKey: String): Unit = {
    import spark.implicits._
    val jsonDS = rdd.toDS
    if (jsonDS.count == 0) return

    //rdd to df
    val allDF: DataFrame = spark.read.json(jsonDS)
    allDF.createOrReplaceTempView("t1")

    val exprSql = properties.getProperty(Constants.EXEC_SELECT_EXPR_SQL)
    val resultDF = spark.sql(exprSql)

    //println("rdd data ========================================================================")
    //resultDF.printSchema()
    resultDF.show(3)

    resultDF.write
      .format("org.apache.hudi")
      //other params
      .options(hudiWriteConfig(properties))
      //primary key
      .option(DataSourceWriteOptions.RECORDKEY_FIELD_OPT_KEY, hudiKey)
      //timeline column
      .option(DataSourceWriteOptions.PRECOMBINE_FIELD_OPT_KEY, hudiMergeKey)
      //partition column
      .option(DataSourceWriteOptions.PARTITIONPATH_FIELD_OPT_KEY, hudiPts)
      //table name
      .option(HoodieWriteConfig.TABLE_NAME, hudiTableName)
      //sink mode
      .mode(properties.getProperty(Constants.HUDI_TABLE_MODE))
      //global hudi table basic path
      .save(hudiBasePath)
  }

  /**
   * hudi写入参数
   *
   * @return
   */
  def hudiWriteConfig(properties: Properties): mutable.Map[String, String] = {
    val confMap = new mutable.HashMap[String, String]()
    confMap.put("hoodie.insert.shuffle.parallelism", "10")
    confMap.put("hoodie.upsert.shuffle.parallelism", "10")
    //GLOBAL_BLOOM下起作用表数据发生变更时,分区是否发生变更
    confMap.put(HoodieIndexConfig.BLOOM_INDEX_UPDATE_PARTITION_PATH, "true")
    // HBASE,INMEMORY,BLOOM,GLOBAL_BLOOM
    confMap.put(HoodieIndexConfig.INDEX_TYPE_PROP, HoodieIndex.IndexType.GLOBAL_BLOOM.name())
    //hudi映射到hive的表名称
    //confMap.put(DataSourceWriteOptions.TABLE_NAME_OPT_KEY, "hudi_2_hive")
    //操作类型
    confMap.put(DataSourceWriteOptions.OPERATION_OPT_KEY, DataSourceWriteOptions.UPSERT_OPERATION_OPT_VAL)
    //表引擎
    confMap.put(DataSourceWriteOptions.TABLE_TYPE_OPT_KEY, HoodieTableType.MERGE_ON_READ.name)
    //分区字段格式
    confMap.put(DataSourceWriteOptions.HIVE_STYLE_PARTITIONING_OPT_KEY,"true")
    //批次去重
    confMap.put(DataSourceWriteOptions.INSERT_DROP_DUPS_OPT_KEY, "true")
    //同步hive数据
    if ("true".eq(properties.getProperty(Constants.HUDI_SYNC_HIVE)))
      syncHudiToHive(confMap, properties)

    confMap
  }

  /**
   * HUDI表同步到hive
   *
   * @param confMap
   */
  def syncHudiToHive(confMap: mutable.Map[String, String], properties: Properties): Unit = {
    //是否开启hive同步
    confMap.put(DataSourceWriteOptions.HIVE_SYNC_ENABLED_OPT_KEY, properties.getProperty(Constants.HUDI_SYNC_HIVE))
    //同步hive库,需要注意的是，hive库需要提前创建，hudi是不会自动创建的
    confMap.put(DataSourceWriteOptions.HIVE_DATABASE_OPT_KEY, properties.getProperty(Constants.HUDI_SYNC_HIVE_DATABASE))
    //同步hive表
    confMap.put(DataSourceWriteOptions.HIVE_TABLE_OPT_KEY, properties.getProperty(Constants.HUDI_SYNC_HIVE_TABLE))
    //hive表分区字段
    confMap.put(DataSourceWriteOptions.HIVE_PARTITION_FIELDS_OPT_KEY, properties.getProperty(Constants.HUDI_SYNC_HIVE_PARTITION))
    //hive2 jdbc连接
    confMap.put(DataSourceWriteOptions.HIVE_URL_OPT_KEY, properties.getProperty(Constants.HUDI_SYNC_HIVE_BEELINE_URL))
    confMap.put(DataSourceWriteOptions.HIVE_USER_OPT_KEY, properties.getProperty(Constants.HUDI_SYNC_HIVE_BEELINE_USER))
    confMap.put(DataSourceWriteOptions.HIVE_PASS_OPT_KEY, properties.getProperty(Constants.HUDI_SYNC_HIVE_BEELINE_PWSSWD))
    //分区表与非分区表的主键生成策略不同，需要注意
    confMap.put(DataSourceWriteOptions.HIVE_PARTITION_EXTRACTOR_CLASS_OPT_KEY, "org.apache.hudi.hive.MultiPartKeysValueExtractor")
  }

}
