package com.xl.bigdata.spark.sql.example

import com.xl.bigdata.bean.LxApiLogBean
import com.xl.bigdata.spark.bean.LxApiLogBeanRdd
import com.xl.bigdata.util.FastJsonUtil
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.security.UserGroupInformation
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010.ConsumerStrategies.Subscribe
import org.apache.spark.streaming.kafka010.LocationStrategies.PreferConsistent
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, HasOffsetRanges, KafkaUtils}
import org.apache.spark.streaming.{Seconds, StreamingContext, Time}
import org.apache.spark.{SparkConf, SparkContext}

import scala.collection.JavaConversions._

import java.io.File
object SparkKafka2HiveV1 {
  val cluster = "qdcdh"
  val hadoopXml: String = cluster + "-" + "core-site.xml"
  val hdfsXml: String = cluster + "-" + "hdfs-site.xml"
  val hiveXml: String = cluster + "-" + "hive-site.xml"
  val keytabString = "./pub_pc_select.keytab"
  val principal = "pub_pc_select"

//  private val LOG = org.apache.log4j.Logger.getLogger("SparkKafka2HiveV1")

  def main(args: Array[String]): Unit = {


    val sparkConf = new SparkConf()
    val hiveConfiguration = new Configuration()
    hiveConfiguration.addResource(hiveXml)
    hiveConfiguration.filterNot(_.getKey.contains("spark"))
      .foreach(elem => {
        sparkConf.set(elem.getKey, elem.getValue)
      })
    sparkConf.set("hive.exec.dynamic.partition", "true")
    sparkConf.set("hive.exec.dynamic.partition.mode", "nonstrict")

//    sparkConf.setMaster("local[2]")
//    sparkConf.setAppName("SparkKafka2HiveV1")

    val sc = new SparkContext(sparkConf)

    val ssc = new StreamingContext(sc, Seconds(10))

    val ss = SparkSession.builder.config(sparkConf)
//      .master("local[2]")
      .enableHiveSupport()
      .getOrCreate
    val jaasConfig = "com.sun.security.auth.module.Krb5LoginModule " +
      "required serviceName='kafka' useKeyTab=true storeKey=true " +
      "keyTab='" + keytabString + "' principal='" + principal + "' renewTicket=true;"

//    只有连带kerberos的kafka才要之前那么写
//    val kafkaParams = Map[String, Object](
//      "bootstrap.servers" -> "10.163.205.94:9094,10.163.205.96:9094,10.163.205.95:9094",
//      "key.deserializer" -> classOf[StringDeserializer],
//      "value.deserializer" -> classOf[StringDeserializer],
//      "group.id" -> "pc_select_pc_lx_api_log_01502636",
//      "auto.offset.reset" -> "latest",
//      "enable.auto.commit" -> (false: java.lang.Boolean),
//      "security.protocol" -> "SASL_PLAINTEXT",
//      "sasl.jaas.config" -> jaasConfig
//    )

    val kafkaServersWALI = "10.163.205.94:9092,10.163.205.96:9092,10.163.205.95:9092"
    val kafkaServersPS = "10.163.205.94:9094,10.163.205.96:9094,10.163.205.95:9094"
    val kafkaServers = "10.205.91.70:9092,10.205.91.71:9092,10.205.91.72:9092,10.205.91.73:9092,10.205.91.74:9092,10.205.91.75:9092"
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> kafkaServersWALI,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "pc_select_pc_lx_api_log_01502636",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )

    val topics = Array("pc_select_pc_lx_api_log")
    val stream = KafkaUtils.createDirectStream[String, String](
      ssc,
      PreferConsistent,
      Subscribe[String, String](topics, kafkaParams)
    )
    stream.foreachRDD((rdd, time) => {
      val offsetRanges = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      var allLagOffset = 0L
      offsetRanges.array.foreach(offsetR => {

        val fromOffset = offsetR.fromOffset
        val untilOffset = offsetR.untilOffset
        val lagOffset = untilOffset - fromOffset
        allLagOffset = allLagOffset + lagOffset

//        LOG.info(offsetR.toString() + " 积压量 ：" + lagOffset)

      })
      println("总积压量 ：" + allLagOffset)
      if(rdd != null){
        yourCalculation(rdd, time, ss)
      }



      stream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges)
    })
    ssc.start() // Start the computation
    ssc.awaitTermination() // Wait for the computation to terminate
  }

  def yourCalculation(rdd: RDD[ConsumerRecord[String, String]], time: Time, ss: SparkSession): Unit = {

    import ss.implicits._

    val rddBean = rdd.map(json => {

      var lxApiLogBean: LxApiLogBean = null

      var inputBean = LxApiLogBeanRdd.apply(
        "",
        "",
        "",
        "",
        "",
        0,
        0,
        "",
        "",
        "",
        "",
        "",
        0
      )

      if (json.value().contains("message")) {
        lxApiLogBean = FastJsonUtil.getLxApiLogBean(json.value())
        if (!"".equals(lxApiLogBean.getStatus)) {
          inputBean = LxApiLogBeanRdd.apply(
            lxApiLogBean.getRequestDate,
            lxApiLogBean.getInterfaceName,
            lxApiLogBean.getRemoteAddr,
            lxApiLogBean.getRequestMethod,
            lxApiLogBean.getStatus,
            lxApiLogBean.getRequestTime,
            lxApiLogBean.getBodyBytesSent,
            lxApiLogBean.getRemarks,
            lxApiLogBean.getParam,
            lxApiLogBean.getUpstreamAddr,
            lxApiLogBean.getUpstreamStatus,
            lxApiLogBean.getUpstreamResponseTime,
            lxApiLogBean.getDatePartition
          )
        }
      }

      inputBean

    }).filter(b => {
      if (!"".equals(b.status))
        true
      else
        false
    }).toDF().repartition(3)

    println(rddBean.count())


    rddBean.createOrReplaceTempView("tmp_log")

    // save
    val hadoopConfiguration = new Configuration()
    hadoopConfiguration.addResource(hadoopXml)
    hadoopConfiguration.addResource(hdfsXml)
    // set
    hadoopConfiguration.foreach(elem => {
      ss.sparkContext.hadoopConfiguration.set("spark.hadoop." + elem.getKey, elem.getValue)
      ss.sparkContext.hadoopConfiguration.set(elem.getKey, elem.getValue)
    })
    // kerberos
    println("setting UGI conf")
    UserGroupInformation.setConfiguration(hadoopConfiguration)
    val keytab = new File(keytabString)
    println("Ability to read keytab '" + keytabString + "' : " + keytab.canRead)
    println("logging in via UGI and keytab.")
    UserGroupInformation.loginUserFromKeytabAndReturnUGI(principal, keytabString);
    println("finished login attempt via UGI and keytab. security init " + UserGroupInformation.isSecurityEnabled)
    // insert
    ss.sql("insert into dl_hic_seq.lx_api_log Select * from tmp_log DISTRIBUTE BY datePartition")
  }
}
