package com.xl.bigdata.spark.ss

import com.xl.bigdata.bean.LxApiLogBean
import com.xl.bigdata.spark.bean.LxApiLogBeanRdd
import com.xl.bigdata.spark.manager.manager.SparkSessionSingletonModel
import com.xl.bigdata.util.FastJsonUtil
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.streaming.kafka010.{CanCommitOffsets, ConsumerStrategies, HasOffsetRanges, KafkaUtils, LocationStrategies, OffsetRange}
import org.apache.spark._
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming._

object LxApiNginxLogApp {

  def main(args: Array[String]): Unit = {

    val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
    val ssc = new StreamingContext(conf, Seconds(1))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> "lx-es-08:9092,lx-es-09:9092,lx-es-10:9092",
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> "lx_api_log0001_test01",
      "auto.offset.reset" -> "latest",
      "enable.auto.commit" -> (false: java.lang.Boolean)   //让消费者不用自动提交偏移量
    )

    val topics = Array("lx_api_log")
    //sparkStreaming跟Kafka整合，使用的是官方推荐的直连方式，使用Kafka底层的消费API，效率更高
    val kafkaDStream = KafkaUtils.createDirectStream[String, String](
      ssc, //传入StreamingContext
      LocationStrategies.PreferConsistent, //位置策略
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams)  //消费策略
    )

    //foreachRDD既不是Transformation也不是Action
    //（foreachRDD中的函数也会定期的调用，传入到foreachRDD中的函数，是在Driver调用的）
    kafkaDStream.foreachRDD(rdd => {
      if(!rdd.isEmpty()) {

        //获取偏移量,第一手的RDD是特殊的RDD（KafkaRDD），只有KafkaRDD有偏移量(Driver端获取的)
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        val rddBean = rdd.map(json => {

          var lxApiLogBean: LxApiLogBean = null

          var inputBean = LxApiLogBeanRdd.apply(
            "",
            "",
            "",
            "",
            "",
            0,
            0,
            "",
            "",
            "",
            "",
            "",
            0
          )

          if (json.value().contains("message")) {

            lxApiLogBean = FastJsonUtil.getLxApiLogBean(json.value())

            inputBean = LxApiLogBeanRdd.apply(
              lxApiLogBean.getRequestDate,
              lxApiLogBean.getInterfaceName,
              lxApiLogBean.getRemoteAddr,
              lxApiLogBean.getRequestMethod,
              lxApiLogBean.getStatus,
              lxApiLogBean.getRequestTime,
              lxApiLogBean.getBodyBytesSent,
              lxApiLogBean.getRemarks,
              lxApiLogBean.getParam,
              lxApiLogBean.getUpstreamAddr,
              lxApiLogBean.getUpstreamStatus,
              lxApiLogBean.getUpstreamResponseTime,
              lxApiLogBean.getDatePartition
            )

          }

          inputBean

        })

        val rddBeanCount = rddBean.count()
        println("receive data count : " + rddBeanCount)

        offsetRanges.array.foreach( offsetR => {

          println(offsetR.toString())
        })


        //（在Driver端异步的更新偏移量,将偏移量写入到Kafka特殊的Topic中，__consumer_offset）
        kafkaDStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges) //在Driver提交的

      }
    })


    //开启
    ssc.start()
    //让程序一直运行，将Driver挂起
    ssc.awaitTermination()

  }

  def processRdd(rdd: RDD[LxApiLogBeanRdd], warehouseLocation: String): Unit = {


    var warehouseLocation = "hdfs://lx-es-06:8020/user/hive/warehouse"
    val spark = SparkSessionSingletonModel.getInstance(warehouseLocation)

    import spark.implicits._

    spark.sql("show databases").collect().foreach(println)
    val wordsDataFrame = rdd.toDF()

    if (wordsDataFrame.count() > 0) {

      wordsDataFrame.createOrReplaceTempView("tmp_log")

      spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")

      //      import spark.sql

      // hive插入表的sql = insert into yisadata.pass_info_10 Select * from tmp_pass_info DISTRIBUTE BY dateid
      spark.sql("insert into lexin.lx_api_log Select * from tmp_log DISTRIBUTE BY datePartition")

    }

    // 手动删除RDD
    rdd.unpersist()
  }

}
