package com.zh.constants

object Constants {
  /**
   * spark streaming 配置
   */
  val SPARK_INTERVAL_SECONDS: String = "spark.interval.seconds"
  val SPARK_CHECKPOINT_DIR: String = "spark.checkpoint.dir"

  /**
   * Kafka 配置
   */
  val KAFKA_TOPICS: String = "kafka.comsumer.topics"
  val KAFKA_BROKERS: String = "kafka.bootstrap.servers"
  val KAFKA_GROUPID: String = "kafka.group.id"
  val KAFKA_AUTO_OFFSETRE_SET: String = "kafka.auto.offset.reset"
  val KAFKA_MAX_POLL_INTERVAL_MS: String = "600000"
  val KAFKA_MAX_POLL_RECORDS: String = "100"

  val EXEC_SELECT_EXPR_SQL: String = "exec.selectExpr.sql"
  val EXEC_FEILE_EXPR_SQL: String = "exec.filterExpr.sql"

  /**
   * hudi 配置
   */
  val HUDI_TABLE_NAME = "hudi.table.name"
  /**
   * <li>`overwrite`: overwrite the existing data.</li>
   * <li>`append`: append the data.</li>
   * <li>`ignore`: ignore the operation (i.e. no-op).</li>
   * <li>`error` or `errorifexists`: default option, throw an exception at runtime.</li>
   */
  val HUDI_TABLE_MODE = "hudi.table.mode"
  val HUDI_TABLE_BASEPATH = "hudi.table.basepath"
  val HUDI_RECORDKEY_FIELD_OPT_KEY = "hudi.table.primary.key"
  val HUDI_PARTITIONPATH_FIELD_OPT_KEY = "hudi.table.partition.key"
  val HUDI_PRECOMBINE_FIELD_OPT_KEY = "hudi.table.merge.key"

  val HUDI_SYNC_HIVE = "hudi.sync.hive"
  val HUDI_SYNC_HIVE_DATABASE = "hudi.sync.hive.database"
  val HUDI_SYNC_HIVE_TABLE = "hudi.sync.hive.table"
  val HUDI_SYNC_HIVE_PARTITION = "hudi.sync.hive.partition"
  val HUDI_SYNC_HIVE_BEELINE_URL = "hudi.sync.hive.beeline.url"
  val HUDI_SYNC_HIVE_BEELINE_USER = "hudi.sync.hive.beeline.user"
  val HUDI_SYNC_HIVE_BEELINE_PWSSWD = "hudi.sync.hive.beeline.passwd"
}




