package com.carol.bigdata

import java.lang.reflect.Field

import com.carol.bigdata.utils.Flag
import org.apache.spark.SparkConf
import org.apache.hadoop.mapred.JobConf
import com.typesafe.config.ConfigFactory
import org.apache.hadoop.conf.Configuration
import org.apache.kafka.common.serialization.StringDeserializer


/*
 * 项目配置读取类
 * 统一管理
 */
object Config {

    private lazy val config = ConfigFactory.load()

    val appName: String = Flag.GetString("app.name", config.getString("app.name"))
    
    // TODO: kudu config
    val kuduMaster: String = Flag.GetString("kudu.master", config.getString("kudu.master"))
    val threadPool: Int = Flag.GetInt("spark.threadPool", 3)

    // TODO: kafka config
    val kafkaBrokers: String = Flag.GetString("kafka.broker.list", config.getString("kafka.broker.list"))
    val topic: String = Flag.GetString("kafka.topic", config.getString("kafka.topic"))
    val groupId: String = Flag.GetString("kafka.group.id", config.getString("kafka.group.id"))
    val kafkaEnableAutoCommit: Boolean = Flag.GetBoolean("kafka.enable.auto.commit", config.getBoolean("kafka.enable.auto.commit"))
    val kafkaAutoOffsetReset: String = Flag.GetString("kafka.auto.offset.reset", config.getString("kafka.auto.offset.reset"))
    val kafkaPartitionStrategy: String = Flag.GetString("partition.assignment.strategy", "org.apache.kafka.clients.consumer.RoundRobinAssignor")

    val kafkaParams: Map[String, Object] = Map[String, Object](
        "bootstrap.servers" -> kafkaBrokers,
        // 键的序列化器
        "key.deserializer" -> classOf[StringDeserializer],
        // 值的序列化器
        "value.deserializer" -> classOf[StringDeserializer],
        "group.id" -> groupId,
        "partition.assignment.strategy" -> kafkaPartitionStrategy,
        "auto.offset.reset" -> kafkaAutoOffsetReset,
        "enable.auto.commit" -> (kafkaEnableAutoCommit: java.lang.Boolean)
    )

    // TODO: spark config
    val sparkSeconds: Int = Flag.GetInt("spark.seconds", config.getInt("spark.seconds"))
    val logLevel: String = Flag.GetString("spark.log.level", config.getString("spark.log.level"))
    // local, yarn, yarn-cluster
    val sparkMaster: String = Flag.GetString("spark.master", "local[*]")
    val sparkAppName: String = Flag.GetString("spark.app.name", config.getString("spark.app.name"))
    val sparkMaxRatePerPartition: String = Flag.GetString("spark.streaming.kafka.maxRatePerPartition", config.getString("spark.streaming.kafka.maxRatePerPartition"))
    val sparkSerializer: String = Flag.GetString("spark.serializer", config.getString("spark.serializer"))
    val sparkEventQueueSize: String = Flag.GetString("spark.scheduler.listenerbus.eventqueue.size", config.getString("spark.scheduler.listenerbus.eventqueue.size"))

    val sparkConf: SparkConf = new SparkConf()
        .setAppName(sparkAppName)
        // 序列化开关设置为kryo，如果存在自定义类则需要.registerKryoClasses(Array(classOf[ProductClass]))
        .set("spark.serializer", sparkSerializer)
        // 设置kafka限速, 需要调整每个任务的值, 保证生产和消费处于平衡
        .set("spark.streaming.kafka.maxRatePerPartition", sparkMaxRatePerPartition)
        // 设置LiveListenerBus消息队列的消息数量
        .set("spark.scheduler.listenerbus.eventqueue.size", sparkEventQueueSize)
        // 允许存在多个spark context
        .set("spark.driver.allowMultipleContexts", "true")

    if (sparkMaster.contains("local")) {
        sparkConf.setMaster(sparkMaster)
    }

    // TODO: 其他配置
    val timeZone: String = Flag.GetString("timeZone", config.getString("timeZone"))


    print()

    def print(): Unit = {
        println("==> 当前Config配置")
        val fields: Array[Field] = Config.getClass.getDeclaredFields
        for (filed <- fields) {
            filed.setAccessible(true)
            try {
                if (filed.getName != "config")
                    println(s"${filed.getName}:${filed.get(Config)}")
            } catch {
                case e: Exception => e.printStackTrace()
            }
        }
    }

    def main(args: Array[String]): Unit = {
        Flag.Parse(args)
    }
}
