package cn.edu360.streaming.utils

import com.typesafe.config.ConfigFactory
import kafka.utils.{ZKGroupTopicDirs, ZkUtils}
import org.apache.kafka.common.security.JaasUtils
import org.apache.spark.SparkConf
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 基础Streaming
  * wzxjava@126.com
  * Created by wangzhixuan on 2017/05/24 16:42
  */

object BaseStreaming {
  def apply(path: String): BaseStreaming = {
    new BaseStreaming(path)
  }
}

@SerialVersionUID(99L) class BaseStreaming(val path: String) extends Serializable {
  @transient val config = ConfigFactory.load(path)
  val grok = config.getString("streaming.grok")
  val patterns = DefaultRegUtil.loadPatterns()
  val target = config.getString("streaming.hdfs")
  @transient val sparkConf = new SparkConf().setAppName(config.getString("streaming.appName"))
  sparkConf.set("spark.streaming.kafka.maxRatePerPartition", "10000")
  sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
  @transient val ssc = new StreamingContext(sparkConf, Seconds(config.getLong("streaming.batchDuration")))
  val brokers = config.getString("kafka.brokers")
  val fetch = config.getString("kafka.fetch")
  val topic = config.getString("kafka.topic")
  val groupId = config.getString("kafka.groupId")
  val zkHosts = config.getString("zk.zkHosts")
  val timeout = config.getInt("zk.timeout")
  @transient val topicDirs = new ZKGroupTopicDirs(groupId, topic)
  val zkTopicPath = s"${topicDirs.consumerOffsetDir}"
  val offsetRequest = config.getString("kafka.offsetRequest")
  val kafkaParams = Map[String, String](
    "metadata.broker.list" -> brokers,
    "group.id" -> groupId,
    "fetch.message.max.bytes" -> fetch,
    "auto.offset.reset" -> offsetRequest
  )
  @transient val zkUtils = ZkUtils(zkHosts, timeout, timeout, JaasUtils.isZkSecurityEnabled())

}
