package com.xl.bigdata.spark.sql

import com.xl.bigdata.bean.LxApiLogBean
import com.xl.bigdata.spark.bean.LxApiLogBeanRdd
import com.xl.bigdata.spark.manager.manager.SparkSessionSingletonModel
import com.xl.bigdata.util.FastJsonUtil
import org.apache.commons.cli.{CommandLine, MissingOptionException, Option, Options, PosixParser}
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}

object LxApiNginxLogApp {

  def main(args: Array[String]): Unit = {

    // true:集群模式， false：本地调试
    val isStart = false

    var cmd: CommandLine = null
    if (isStart) {
      cmd = initCmd(args)
    }

    // hbase Zk地址
    var hbaseZK = "lx-es-08,lx-es-09,lx-es-10"; //"lx-cs-04,lx-cs-05,lx-cs-06"; // "lx-cdh-04,lx-cdh-05,lx-cdh-06,lx-cdh-07,lx-cdh-08";
    if (cmd != null && cmd.hasOption("zk_server")) {
      hbaseZK = cmd.getOptionValue("zk_server")
    }

    // Kafka GroupId
    var kafkaGroupId = "lx_api_log0001_test01" //"gid20170050300002"
    if (cmd != null && cmd.hasOption("kafkaGroupId")) {
      kafkaGroupId = cmd.getOptionValue("kafkaGroupId")
    }

    // Kafka Topic名称
    var kafkaTopic = "lx_api_log" //"pre_analysis_passinfos"
    if (cmd != null && cmd.hasOption("kafkaTopic")) {
      kafkaTopic = cmd.getOptionValue("kafkaTopic")
    }

    // Kafka brokerID
    var kafkaBrokerID = "lx-es-08:9092,lx-es-09:9092,lx-es-10:9092" //"lx-cs-04:9092,lx-cs-05:9092,lx-cs-06:9092"; //"lx-cdh-03:9092,lx-cdh-04:9092,lx-cdh-05:9092,lx-cdh-06:9092,lx-cdh-07:9092" //ConfigIndex.comKafkaBrokerIDs
    if (cmd != null && cmd.hasOption("kafkaBrokerID")) {
      kafkaBrokerID = cmd.getOptionValue("kafkaBrokerID")
    }

    var warehouseLocation = "hdfs://lx-es-06:8020/user/hive/warehouse"
    if (cmd != null && cmd.hasOption("warehouseLocation")) {
      warehouseLocation = cmd.getOptionValue("warehouseLocation")
    }

    // Spark DL-读取kafka从最新开始或者从最早开始
    // Kafka0.8(largest/smallest)
    // Kafka0.10 latest, earliest
    var sparkOffsetReset = "latest"
    if (cmd != null && cmd.hasOption("sparkOffsetReset")) {
      sparkOffsetReset = cmd.getOptionValue("sparkOffsetReset")
    }

    // Spark 批处理的时间间隔
    var sparkBatchTime = 6
    if (cmd != null && cmd.hasOption("sparkBatchTime")) {
      sparkBatchTime = cmd.getOptionValue("sparkBatchTime").toInt
    }

    //    var builder = SparkSession.builder()
    //    builder
    //      .appName("LxApiLogApp")
    //      .config("spark.sql.warehouse.dir", warehouseLocation)
    //      .config("spark.sql.broadcastTimeout", 10000)
    //      .config("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
    //
    //    // false：启动本地模式
    //    if (!isStart) {
    //      builder.master("local[8]")
    //        .config("spark.streaming.kafka.maxRatePerPartition", "2")
    //    }

    //    val spark = builder.enableHiveSupport().getOrCreate()

    var sparkConf = new SparkConf()
      .setAppName("LxApiLogApp")
      .set("spark.streaming.stopGracefullyOnShutdown", "true") // 热停止
      .set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")

    // false：启动本地模式
    if (!isStart) {
      sparkConf.setMaster("local[2]")
      sparkConf.set("spark.streaming.backpressure.enabled", "true")
      sparkConf.set("spark.streaming.kafka.maxRatePerPartition", "2")
    }

    var sc = new SparkContext(sparkConf)

    val ssc = new StreamingContext(sc, Seconds(sparkBatchTime))

    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> kafkaBrokerID,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> kafkaGroupId,
      "auto.offset.reset" -> sparkOffsetReset,
      "enable.auto.commit" -> (false: java.lang.Boolean) //让消费者不用自动提交偏移量
    )

    val topics = Array(kafkaTopic)
    //sparkStreaming跟Kafka整合，使用的是官方推荐的直连方式，使用Kafka底层的消费API，效率更高
    val kafkaDStream = KafkaUtils.createDirectStream[String, String](
      ssc, //传入StreamingContext
      LocationStrategies.PreferConsistent, //位置策略
      ConsumerStrategies.Subscribe[String, String](topics, kafkaParams) //消费策略
    )

    //foreachRDD既不是Transformation也不是Action
    //（foreachRDD中的函数也会定期的调用，传入到foreachRDD中的函数，是在Driver调用的）
    kafkaDStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {

        //获取偏移量,第一手的RDD是特殊的RDD（KafkaRDD），只有KafkaRDD有偏移量(Driver端获取的)
        val offsetRanges: Array[OffsetRange] = rdd.asInstanceOf[HasOffsetRanges].offsetRanges

        val rddBean = rdd.map(json => {

          var lxApiLogBean: LxApiLogBean = null

          var inputBean = LxApiLogBeanRdd.apply(
            "",
            "",
            "",
            "",
            "",
            0,
            0,
            "",
            "",
            "",
            "",
            "",
            0
          )

          if (json.value().contains("message")) {
            lxApiLogBean = FastJsonUtil.getLxApiLogBean(json.value())
            if (!"".equals(lxApiLogBean.getStatus)) {
              inputBean = LxApiLogBeanRdd.apply(
                lxApiLogBean.getRequestDate,
                lxApiLogBean.getInterfaceName,
                lxApiLogBean.getRemoteAddr,
                lxApiLogBean.getRequestMethod,
                lxApiLogBean.getStatus,
                lxApiLogBean.getRequestTime,
                lxApiLogBean.getBodyBytesSent,
                lxApiLogBean.getRemarks,
                lxApiLogBean.getParam,
                lxApiLogBean.getUpstreamAddr,
                lxApiLogBean.getUpstreamStatus,
                lxApiLogBean.getUpstreamResponseTime,
                lxApiLogBean.getDatePartition
              )
            }
          }

          inputBean

        }).filter(b => {
          if (!"".equals(b.status))
            true
          else
            false
        })

        val rddBeanCount = rddBean.count()
        println("receive data count : " + rddBeanCount)

        var allLagOffset = 0L
        offsetRanges.array.foreach(offsetR => {

          val fromOffset = offsetR.fromOffset
          val untilOffset = offsetR.untilOffset
          val lagOffset = untilOffset - fromOffset
          allLagOffset = allLagOffset + lagOffset

          println(offsetR.toString() + " 积压量 ：" + lagOffset)

        })
        println("总积压量 ：" + allLagOffset)


        //（在Driver端异步的更新偏移量,将偏移量写入到Kafka特殊的Topic中，__consumer_offset）
        kafkaDStream.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges) //在Driver提交的

      }
    })


    //开启
    ssc.start()
    //让程序一直运行，将Driver挂起
    ssc.awaitTermination()

  }

  def processRdd(rdd: RDD[LxApiLogBeanRdd], warehouseLocation: String): Unit = {


    var warehouseLocation = "hdfs://lx-es-06:8020/user/hive/warehouse"
    val spark = SparkSessionSingletonModel.getInstance(warehouseLocation)

    import spark.implicits._

    spark.sql("show databases").collect().foreach(println)
    val wordsDataFrame = rdd.toDF()

    if (wordsDataFrame.count() > 0) {

      wordsDataFrame.createOrReplaceTempView("tmp_log")

      spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")

      //      import spark.sql

      // hive插入表的sql = insert into yisadata.pass_info_10 Select * from tmp_pass_info DISTRIBUTE BY dateid
      spark.sql("insert into lexin.lx_api_log Select * from tmp_log DISTRIBUTE BY datePartition")

    }

    // 手动删除RDD
    rdd.unpersist()
  }

  def processRddToHive(rdd: RDD[LxApiLogBeanRdd], warehouseLocation: String): Unit = {


    // var warehouseLocation = "hdfs://lx-es-06:8020/user/hive/warehouse"
    val spark = SparkSession.builder()
      .config("spark.sql.warehouse.dir", warehouseLocation)
      .config("spark.sql.broadcastTimeout", 1000)
      .config("spark.sql.shuffle.partitions", "1")
      .enableHiveSupport()
      .getOrCreate()

    import spark.implicits._

    spark.sql("show databases").collect().foreach(println)
    val wordsDataFrame = rdd.toDF()

    if (wordsDataFrame.count() > 0) {

      wordsDataFrame.createOrReplaceTempView("tmp_log")

      spark.sql("set hive.exec.dynamic.partition.mode=nonstrict")

      //      import spark.sql

      // hive插入表的sql = insert into yisadata.pass_info_10 Select * from tmp_pass_info DISTRIBUTE BY dateid
      spark.sql("insert into lexin.lx_api_log Select * from tmp_log DISTRIBUTE BY datePartition")

    }

    // 手动删除RDD
    rdd.unpersist()
  }

  // 初期化参数
  def initCmd(args: Array[String]): CommandLine = {

    var cmd: CommandLine = null

    val options: Options = new Options()

    try {

      // ZK地址
      var zkServer: Option = new Option("zk_server", true, "输入zookeeper服务器地址 例如：--zk_server lx-cdh-01")

      zkServer.setRequired(true)

      options.addOption(zkServer)

      // kafkaGroupId地址
      var kafkaGroupId: Option = new Option("kafkaGroupId", true, "输入kafkaGroupId 例如：--kafkaGroupId PhoneServiceForecastApp001")

      kafkaGroupId.setRequired(true)

      options.addOption(kafkaGroupId)

      // phoneServiceForecastBatchTimeSpark
      var sparkBatchTime: Option = new Option("sparkBatchTime", true, "输入spark批处理的时间间隔 例如：--sparkBatchTime 20")

      sparkBatchTime.setRequired(true)

      options.addOption(sparkBatchTime)

      // Kafka brokerID
      var kafkaBrokerID: Option = new Option("kafkaBrokerID", true, "输入spark批处理的时间间隔 例如：--kafkaBrokerID lx-cs-04:9092,lx-cs-05:9092,lx-cs-06:9092")

      kafkaBrokerID.setRequired(true)

      options.addOption(kafkaBrokerID)

      // Kafka sparkOffsetReset
      var sparkOffsetReset: Option = new Option("sparkOffsetReset", true, "输入spark消费kafka的位置 例如：--sparkOffsetReset latest/earliest")

      sparkOffsetReset.setRequired(true)

      options.addOption(sparkOffsetReset)

      // Kafka sparkOffsetReset
      var warehouseLocation: Option = new Option("warehouseLocation", true, "输入Hive文件位置位置 例如：--warehouseLocation hdfs://lx-cdh-01:8020/user/hive/warehouse")

      warehouseLocation.setRequired(true)

      options.addOption(warehouseLocation)

      // Kafka Topic名称
      var kafkaTopic: Option = new Option("kafkaTopic", true, "输入Topic名称 例如：--kafkaTopic lx_api_log")

      kafkaTopic.setRequired(true)

      options.addOption(kafkaTopic)

      val parser: PosixParser = new PosixParser()

      cmd = parser.parse(options, args)

    } catch {

      case ex: MissingOptionException => {

        println(ex)

        System.exit(1)
      }
    }

    cmd
  }

}
