package com.cmnit.gatherdata

import com.cmnit.gatherdata.modules.module.{PhoenixModule, HbaseModule}
import com.cmnit.gatherdata.utils.ConfigurationManager
import org.apache.hadoop.security.UserGroupInformation
import org.apache.log4j.Logger
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession

object Main {
  val logger: Logger = Logger.getLogger(Main.getClass)

  def main(args: Array[String]): Unit = {
    // 获取运行环境
    val productOrTest = ConfigurationManager.getProperty("product.or.test")
    logger.info(s"====================== 当前运行环境：${productOrTest} ===================================")

    // 获取groupID
    var groupId = args(1)
    if (groupId == null || groupId == "") {
      groupId = ConfigurationManager.getProperty("group.id")
    }
    logger.info(s"======================= groupID：${groupId} ==========================================")

    // 获取日志级别
    val logLevel: String = ConfigurationManager.getProperty("log.level")

    // kerberos配置
    System.setProperty("java.security.auth.login.config", ConfigurationManager.getProperty("java.security.auth.login.config"))
    System.setProperty("java.security.krb5.conf", ConfigurationManager.getProperty("java.security.krb5.conf"))
    System.setProperty("zookeeper.sasl.clientconfig", "Client")
    System.setProperty("zookeeper.sasl.client", "true")

    // TODO 本地模式需要用户密码传入
    /*UserGroupInformation.loginUserFromKeytab(ConfigurationManager.getProperty("kerberos.principal"), ConfigurationManager.getProperty("keytab.file"))
    println("login user: " + UserGroupInformation.getLoginUser)*/

    // 离线计算参数
    val conf = new SparkConf()
    conf.set("spark.serializer", ConfigurationManager.getProperty("spark.serializer"))
    conf.set("spark.scheduler.mode", ConfigurationManager.getProperty("spark.scheduler.mode"))
    conf.set("spark.executor.extraJavaOptions", ConfigurationManager.getProperty("spark.executor.extraJavaOptions"))
    conf.set("spark.checkpointPath", ConfigurationManager.getProperty("spark.checkpointPath"))
    conf.set("spark.default.parallelism", ConfigurationManager.getProperty("spark.default.parallelism"))
    conf.set("spark.sql.shuffle.partitions", ConfigurationManager.getProperty("spark.sql.shuffle.partitions"))
    conf.set("sparkSession.debug.maxToStringFields", ConfigurationManager.getProperty("sparkSession.debug.maxToStringFields"))
    //流式计算参数
    conf.set("spark.streaming.concurrentJobs", "1")
    conf.set("spark.scheduler.mode", "FAIR")
    //确保application kill 后接收的数据能被处理完在关闭
    conf.set("spark.streaming.stopGracefullyOnShutdown", "true")

    val kafkaBootstrapServers = ConfigurationManager.getProperty("bootstrap.servers")
    val zkConnect = ConfigurationManager.getProperty("zookeeper.connect")

    //创建sparkSession
    val sparkSession: SparkSession = SparkSession
      .builder
      // TODO 本地模式修改master类型
      //.master("local[*]")
      .appName("kafka数据去重")
      .config(conf)
      .enableHiveSupport
      .getOrCreate
    sparkSession.sparkContext.setLogLevel(logLevel)
    sparkSession.sqlContext.setConf("hive.exec.dynamic.partition.mode", "nonstrict")

    HbaseModule.startStreaming(sparkSession, kafkaBootstrapServers, zkConnect, groupId)
    //PhoenixModule.startStreaming(sparkSession, kafkaBootstrapServers, zkConnect)
  }
}
