package com.demo

import java.time.LocalDateTime

import com.kafka.streaming.KafkaOffsetManager
import kafka.api.OffsetRequest
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 优雅的停止spark streaming任务
  * Created by zhangbn on 2018/09/05
  */
object TestHDFS {
  val log = org.apache.log4j.LogManager.getLogger("TestHDFS")
  Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
  Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

  /**
    * Job执行入口
    * @param args
    */
  def main(args: Array[String]): Unit = {
    //创建StreamingContext
    val ssc = createStreamingContext()
    val startTime: Long = System.currentTimeMillis
    println(s"[${LocalDateTime.now()}]  SparkStreaming消费Kafka作业启动.......................  now_ms=${startTime}")
    //开始执行
    ssc.start
    //根据扫描文件关闭
    stopByMarkFile(ssc)
    ssc.awaitTermination()
  }

  def stopByMarkFile(ssc: StreamingContext): Unit = {
    //每隔十秒5分钟扫描一个消息是否存在
    val intervalMills = 300 * 1000
    var isStop = false
    val hdfs_file_path = "/home/admin/work/zhangbn/streaming/job/stop_2"
    while (!isStop) {
      isStop = ssc.awaitTerminationOrTimeout(intervalMills)
      if (!isStop && isExistsMarkFile(hdfs_file_path)) {
        log.warn("2秒后开始关闭sparstreaming程序.....")
        println(s"[${LocalDateTime.now()}]  2秒后开始关闭sparstreaming程序..... now_ms=${System.currentTimeMillis()}")
        Thread.sleep(2000)
        ssc.stop(true, true)
      }
    }
  }

  def isExistsMarkFile(hdfsFlie: String): Boolean = {
    val conf = new Configuration()
    val path = new Path(hdfsFlie)
    val fs = path.getFileSystem(conf)
    //println(s"[${LocalDateTime.now()}]  文件是否存在：" + fs.exists(path))
    fs.exists(path)
  }


  def createStreamingContext(): StreamingContext = {
    //第一次启动是否从最小的的offset开始消费
    val firstReadLastest = true
    val sparkConf = new SparkConf().setAppName("SparkStreaming-DirectKafka")
    //优雅的关闭
    sparkConf.set("spark.streaming.stopGracefullyOnShutdown", "true")
    //激活最佳消费速率
    sparkConf.set("spark.streaming.backpressure.enabled", "true")
    //第一次读取的最大数据值
    sparkConf.set("spark.streaming.backpressure.initialRate", "100")
    //每个进程每秒最多从kafka读取的数据条数
    sparkConf.set("spark.streaming.kafka.maxRatePerPartition", "100")

    //读取kafka配置信息,创建一个kafkaParams
    var kafkaParams = Map[String, String]("bootstrap.servers" -> "10.82.27.22:9092,10.82.27.23:9092,10.82.27.24:9092")

    if (firstReadLastest) {
      kafkaParams += ("auto.offset.reset" -> OffsetRequest.SmallestTimeString)
    }

    //创建zkClient
    val zkClient = new ZkClient("10.82.27.22:2181,10.82.27.23:2181,10.82.27.24:2181", 30000, 30000, ZKStringSerializer)
    //zk的路径
    val zkOffsetPath = "/sparkstreaming/vehicle_2"
    //topic名字
    val topicSet = "vehicle".split(",").toSet


    val spark = SparkSession
      .builder()
      .config(sparkConf)
      .enableHiveSupport()
      .getOrCreate()

    //创建StreamingContext
    val ssc = new StreamingContext(spark.sparkContext, Seconds(60))

    //从流中创建Rdd
    val kafkaStream: InputDStream[(String, String)] = createKafkaStream(ssc, kafkaParams, zkClient, zkOffsetPath, topicSet)

    //DStream上的转换操作,处理逻辑
    kafkaStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        val rdd2 = rdd.map(_._2)
        println(s"[${LocalDateTime.now()}]  ..................show rdd COUNT=${rdd2.count()}...................")
        rdd2.saveAsTextFile("/data01/Kafka2HDFS/vehicle_2")


//        import spark.implicits._
//        val rdd2DF = rdd2.toDF()
//        println(s"[${LocalDateTime.now()}]  ..................show rdd COUNT=${rdd2DF.count()}...................")
//        //rdd2DF.show(1)
//        rdd2DF.write.format("text").mode(SaveMode.Append).save("/data01/Kafka2HDFS/vehicle_1")
        println(s"[${LocalDateTime.now()}]  数据保存OK.........................................")
        //更新每个批次的偏移量到zk中
        KafkaOffsetManager.saveOffsets(zkClient, zkOffsetPath, rdd)
      }
    })

    //返回StreamContext
    ssc
  }


  def createKafkaStream(ssc: StreamingContext,
                        kafkaParams: Map[String, String],
                        zkClient: ZkClient,
                        zkOffsetPath: String,
                        topic: Set[String]): InputDStream[(String, String)] = {

    //目前仅支持一个topic的偏移量处理，读取zk里面偏移量字符串
    val zkOffsetData = KafkaOffsetManager.readOffsets(zkClient, zkOffsetPath, topic.last)

    val kafkaStream = zkOffsetData match {
      case None => //如果从zk里面没有读到偏移量,就说明是系统第一次启动
        log.info("系统第一次启动，没有读取到偏移量，默认从最小的offset开始消费")
        println(s"[${LocalDateTime.now()}]  系统第一次启动，没有读取到偏移量，默认从最小的offset开始消费")
        //使用最新的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topic)
      case Some(lastStopOffset) =>
        log.info("从zk中读取到偏移量,从上次的偏移量开始消费数据......" + Some(lastStopOffset))
        println(s"[${LocalDateTime.now()}]  从zk中读取到偏移量,从上次的偏移量开始消费数据......" + Some(lastStopOffset))
        val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message())
        //使用上次停止时候的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, lastStopOffset, messageHandler)
    }
    //返回创建的kafkaStream
    kafkaStream
  }

}
