package com.event.streaming
import com.event.streaming.process.consumer.TestConsumer
import com.event.streaming.config.Settings
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

import scala.util.Try

object Driver {
  def main(args: Array[String]): Unit = {
    Settings.initialize(Try(args(0)).toOption)
    for (spark <- createSparkSession()) {
      (new TestConsumer() with Settings.Source.StreamingConsumer).run(spark)
    }
  }

  private def createSparkSession():Option[SparkSession] = Try {
    val rate:Int = (Settings.Source.streamingBatchSize * 1.0D/Settings.Source.streamingInterval.toSeconds + 0.5D).toInt

    val cfg = new SparkConf()
    //set from the config file
    Settings.spark.foreach { case(k, v) => cfg.setIfMissing(k, v) }
    //runtime configs
    cfg.setIfMissing("spark.sql.crossJoin.enabled", "true")
    cfg.setIfMissing("spark.streaming.kafka.consumer.cache.enabled", "false")
    cfg.setIfMissing("spark.sql.autoBroadcastJoinThreshold", "-1")
    //spark streaming with kafka
    cfg.set("spark.streaming.backpressure.enabled", "true")
    cfg.set("spark.streaming.backpressure.initialRate", rate.toString())
    cfg.set("spark.streaming.kafka.maxRatePerPartition", rate.toString())
    cfg.set("spark.streaming.receiver.maxRate", rate.toString())
    //the spark-session
    SparkSession.builder
      .config("spark.sql.crossJoin.enabled", "true")
      .config("spark.streaming.kafka.consumer.cache.enabled", "false")
      .config("spark.sql.autoBroadcastJoinThreshold", "-1")
      .enableHiveSupport()
      .getOrCreate()

  }.toOption
}
