package com.kafka.streaming

import java.sql.Connection
import java.time.LocalDateTime

import kafka.api.OffsetRequest
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.Path
import org.apache.log4j.{Level, Logger}
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.apache.spark.streaming.{Seconds, StreamingContext}

/**
  * 优雅的停止spark streaming任务
  * Created by zhangbn on 2018/09/05
  */
object SparkDirectStreaming {

  val log = org.apache.log4j.LogManager.getLogger("SparkDirectStreaming")
  Logger.getLogger("org.apache.spark").setLevel(Level.WARN)
  Logger.getLogger("org.eclipse.jetty.server").setLevel(Level.OFF)

  def main(args: Array[String]): Unit = {

    System.setProperty("hadoop.home.dir", "E:\\hadoop-common-2.7.3-bin-master")
    System.setProperty("HADOOP_USER_NAME", "root")

    //创建StreamingContext
    val ssc = createStreamingContext()

    //开始执行
    val startTime: Long = System.currentTimeMillis
    println(s"[${LocalDateTime.now()}]  SparkStreaming消费Kafka作业启动.......................  now_ms=${startTime}")
    ssc.start

    //根据扫描文件关闭
    stopByMarkFile(ssc)

    ssc.awaitTermination()
  }

  def stopByMarkFile(ssc: StreamingContext): Unit = {
    //每隔十秒10秒扫描一个消息是否存在
    val intervalMills = 900 * 1000
    var isStop = false
    val hdfs_file_path = "/data01/kafka2hdfs/spark/stop"
    while (!isStop) {
      isStop = ssc.awaitTerminationOrTimeout(intervalMills)
      if (!isStop && isExistsMarkFile(hdfs_file_path)) {
        log.warn("2秒后开始关闭sparstreaming程序.....")
        println(s"[${LocalDateTime.now()}]  2秒后开始关闭sparstreaming程序..... now_ms=${System.currentTimeMillis()}")
        Thread.sleep(2000)
        ssc.stop(true, true)
      }
    }
  }

  def isExistsMarkFile(hdfsFlie: String): Boolean = {
    val conf = new Configuration()
    val path = new Path(hdfsFlie)
    val fs = path.getFileSystem(conf)
    println(s"[${LocalDateTime.now()}]  文件是否存在：" + fs.exists(path))
    fs.exists(path)
  }

  /**
    * 创建StreamingContext
    *
    * @return
    */
  def createStreamingContext(): StreamingContext = {

    //是否使用local模式
    val isLocal = true

    //第一次启动是否从最新的offset开始消费
    val firstReadLastest = true

    val sparkConf = new SparkConf().setAppName("Direct-Kafka-Offset-to-Zookeeper")

    if (isLocal) {
      sparkConf.setMaster("local[4]")
    }


    //配置spark流信息

    //优雅的关闭
    sparkConf.set("spark.streaming.stopGracefullyOnShutdown", "true")
    //激活最佳消费速率
    sparkConf.set("spark.streaming.backpressure.enabled", "true")
//    //第一次读取的最大数据值
    sparkConf.set("spark.streaming.backpressure.initialRate", "5000000")
//    //每个进程每秒最多从kafka读取的数据条数
    sparkConf.set("spark.streaming.kafka.maxRatePerPartition", "10000")

    //读取kafka配置信息
    //创建一个kafkaParams
    var kafkaParams = Map[String, String]("bootstrap.servers" -> "hadoop01:9092,hadoop02:9092,hadoop03:9092")

    if (firstReadLastest) {
      kafkaParams += ("auto.offset.reset" -> OffsetRequest.SmallestTimeString)
    }

    //创建zkClient注意最后一个参数最好是ZKStringSerializer类型,不然写进去zk里面的偏移量是乱码
    val zkClient = new ZkClient("hadoop01:2181,hadoop02:2181,hadoop03:2181", 30000, 30000, ZKStringSerializer)
    //zk的路径
    val zkOffsetPath = "/sparkstreaming/vehicle_0906_01"
    //topic名字
    val topicSet = "vehicle".split(",").toSet

    //创建StreamingContext,每隔多少秒一个批次
    //val scc = new StreamingContext(sparkConf, Seconds(10))
    val scc = new StreamingContext(sparkConf, Seconds(900))

    //从流中创建Rdd
    val rdds: InputDStream[(String, String)] = createKafkaStream(scc, kafkaParams, zkClient, zkOffsetPath, topicSet)

    //DStream上的转换操作

    // 输入前10条到控制台，方便调试
    //处理逻辑
    rdds.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {
        //        val rowRdd = rdd.filter(msg => msg != null && msg.trim.length() != 0)
        //          .map(line => line.split("\\t")).map(arr => (arr(0).toInt, arr(1), arr(2)))

        println(s"[${LocalDateTime.now()}]  rdd的大小:${rdd.count()}")
        rdd.take(1).foreach(println)
        rdd.map(_._2).saveAsTextFile("/data01/kafka2hdfs/vehicle")
        println(s"[${LocalDateTime.now()}]  处理逻辑OK***********************************")

        //rdd.saveAsTextFile("/data01/kafka2hdfs/test/")

        //        rdd.foreachPartition(
        //          partitions => {
        //
        //            //每个partition是内的rdd是运行在同一worker之上的
        //            //迭代分区,里面的代码是运行在executor上面
        //            //如果没有使用广播变量,连接资源就在这个地方初始化
        //            //比如数据库,hbase,redis // executed at the worker
        //            //遍历每个分区的数据
        //            val conn = MysqlManager.getMysqlManager.getConnection
        //            val statement = conn.createStatement
        //            try {
        //              //我们在提交Mysql的操作的时候，并不是每条记录提交一次，
        //              //而是采用了批量提交的形式，所以需要将conn.setAutoCommit(false)，这样可以进一步提高mysql的效率。
        //              conn.setAutoCommit(false)
        //
        //              val ss = partitions.map( n => n._2.split("/"))
        //                .map(x => new Tuple3[String,String,String](x(0),x(1),x(2)))
        //              ss.foreach(msg => {
        //                log.info("--"+msg._1+"--"+msg._2+"--"+msg._3)
        //                //处理数据,处理逻辑
        //                val sql = "insert into test_vin(id, name, age) values ("+msg._1+",'"+msg._2+"',"+msg._3+")"
        //                log.info(sql.toString)
        //                statement.addBatch(sql)
        //              })
        //              statement.executeBatch
        //              conn.commit
        //            } catch {
        //              case e:Exception =>
        //                log.info("异常:" + e.toString)
        //            }finally {
        //              statement.close()
        //              conn.close()
        //            }
        //
        //          })

        //更新每个批次的偏移量到zk中，注意这段代码是在driver上执行的
        KafkaOffsetManager.saveOffsets(zkClient, zkOffsetPath, rdd)
      }
    })

    //返回StreamContext
    scc
  }


  /**
    *
    * @param ssc          StreamingContext
    * @param kafkaParams  配置kafka的参数
    * @param zkClient     zk连接的client
    * @param zkOffsetPath zk里面偏移量的路径
    * @param topic        需要处理的topic
    * @return InputDStream[(String, String)] 返回输入流
    */
  def createKafkaStream(ssc: StreamingContext,
                        kafkaParams: Map[String, String],
                        zkClient: ZkClient,
                        zkOffsetPath: String,
                        topic: Set[String]): InputDStream[(String, String)] = {

    //目前仅支持一个topic的偏移量处理，读取zk里面偏移量字符串
    val zkOffsetData = KafkaOffsetManager.readOffsets(zkClient, zkOffsetPath, topic.last)

    val kafkaStream = zkOffsetData match {
      case None => //如果从zk里面没有读到偏移量,就说明是系统第一次启动
        log.info("系统第一次启动，没有读取到偏移量，默认就从最小的offset开始消费")
        println(s"[${LocalDateTime.now()}] 系统第一次启动，没有读取到偏移量，默认就从最小的offset开始消费")
        //使用最新的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topic)
      case Some(lastStopOffset) =>
        log.info("从zk中读取到偏移量,从上次的偏移量开始消费数据......" + Some(lastStopOffset))
        println(s"[${LocalDateTime.now()}]  从zk中读取到偏移量,从上次的偏移量开始消费数据......" + Some(lastStopOffset))
        val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message())
        //使用上次停止时候的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, lastStopOffset, messageHandler)

    }
    //返回创建的kafkaStream
    kafkaStream
  }


  /**
    * 保存数据到数据库
    *
    * @param conn 数据库连接
    * @param sql  prepared statement sql
    * @param data 要保存的数据，Tuple3结构
    */
  def insert(conn: Connection, sql: String, data: (String, String, String)): Unit = {
    try {
      val ps = conn.prepareStatement(sql)
      ps.setString(1, data._1)
      ps.setString(2, data._2)
      ps.setString(3, data._3)
      ps.executeUpdate()
      ps.close()
    } catch {
      case e: Exception =>
        log.error("Error in execution of insert. " + e.getMessage)
    }
  }


}
