package cn.doit.utils

import com.typesafe.config.{Config, ConfigFactory}
import org.apache.kafka.common.serialization.StringDeserializer

object ConfigUtils {
private val config: Config = ConfigFactory.load()
  /**
    * kafka配置信息
    */
  private val _KAFKA_BROKERS: String = config.getString("kafka_brokers")
  private val _KAFKA_TOPIC = config.getString("kafka_topic").split(",")
  private val _KAFKA_GROUPID = config.getString("kafka_groupid")
  private val _KAFKA_GROUPID_TREND = config.getString("kafka_groupid_trend")


  private val _REDIS_HOST = config.getString("redis_host")
  private val _REDIS_PORT = config.getString("redis_port").toInt

  private val _BETCH_INTRAL = config.getString("betch_intral").toLong
  private val _BETCH_TREND = config.getString("betch_trend").toLong


   val kafkaParams = Map[String, Object](
    "bootstrap.servers" -> _KAFKA_BROKERS,
    "key.deserializer" -> classOf[StringDeserializer],
    "value.deserializer" -> classOf[StringDeserializer],
    //"auto.commit.interval.ms" -> (30000L:java.lang.Long),
    "session.timeout.ms" -> "300000",
    //于配置socket请求的最大超时时间，默认值为30秒
    "request.timeout.ms" -> "500000",
    //      "max.poll.records" -> (300: Integer),
    "group.id" -> _KAFKA_GROUPID_TREND,
    "auto.offset.reset" -> "latest",
    //      "auto.offset.reset" -> "earliest",
    "enable.auto.commit" -> (false: java.lang.Boolean)
  )

  /**
    * kafka的节点
    * @return
    */
  def kafka_brokers = _KAFKA_BROKERS
  /**
    * 主题
    * @return
    */
  def kafka_topic = _KAFKA_TOPIC

  /**
    * 实时业务组
    * @return
    */
  def kafka_groupid = _KAFKA_GROUPID

  /**
    * 趋势分析消费组
    * @return
    */
  def kafka_groupid_trend = _KAFKA_GROUPID_TREND

  /**
    * redis的主机
    * @return
    */
  def redis_host = _REDIS_HOST

  /**
    * redis端口号
    * @return
    */
  def redis_port = _REDIS_PORT

  /**
    * streaming的间隔时间
    * @return
    */
  def betch_intral = _BETCH_INTRAL

  /**
    * 趋势分析的Streaming间隔时间
    * @return
    */
  def betch_trend = _BETCH_TREND




}
