package cn.oldsix.spark.streaming.spark

import cn.oldsix.spark.utils.KafkaOffsetManager
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient
import org.apache.log4j.LogManager
import org.apache.spark.SparkConf
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.kafka.KafkaUtils
import org.spark_project.jetty.server.{Request, Server}
import org.spark_project.jetty.server.handler.{AbstractHandler, ContextHandler}
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import org.apache.hadoop.conf.Configuration
import org.apache.hadoop.fs.{FileSystem, Path}

/*
 * @Author : Wu.D.J
 * @Create : 2017.09.15
 **/
object SparkStreamingApplication {

    val log = LogManager.getLogger("SparkStreamingApplication")

    def main(args: Array[String]): Unit = {

        val checkpointDirectory = "/streaming/check/wc-app-4"

        val ssc = functionToCreateContext

        ssc.start()

        // 通过扫描hdfs路径是否存在的方式停止
        stopByHDFSMarkFile("/streaming/stop/wc-app-4", ssc)

        // 等待任务终止
        ssc.awaitTermination()

    }

    def functionToCreateContext() : StreamingContext = {
        val sparkConf : SparkConf = new SparkConf().setAppName("wc-app-4")
        //优雅的停止streaming程序
        sparkConf.set("spark.streaming.stopGracefullyOnShutdown", "true")
        //使用kryo序列化提高性能，自定义KryoSerializer类实现KryoSerializer接口，将需要进行序列化的pojo类注册进来
        //sparkConf.set("spark.serializer", "org.apache.spark.serializer.KryoSerializer")
        //sparkConf.set("spark.kryo.registrator", "cn.oldsix.spark.streaming.spark.MyKryoRegistrator")
        //限制每个进程每秒最多从kafka读取的数据条数
        //sparkConf.set("spark.streaming.kafka.maxRatePerPartition","2")
        //激活削峰功能
        //sparkConf.set("spark.streaming.backpressure.enabled", "true")
        //第一次读取的最大数据值
        //sparkConf.set("spark.streaming.backpressure.initialRate", "100")
        val topic : String = "wordcount"

        val brokerList : String = "oldsix:9092"

        val groupId : String = "wc-app-4"

        val zkOffsetPath : String = "/streaming/offset/wc-app-4"

        var kafkaParams : Map[String, String] = Map[String, String]("metadata.broker.list" -> "oldsix:9092", "auto.offset.reset" -> "smallest", "group.id" -> groupId)

        // 最后一个参数为ZKStringSerializer类型，保证写入zk的offset不会乱码
        val zkClient : ZkClient = new ZkClient("oldsix:2181", 10000, 10000, ZKStringSerializer)

        // 由于streaming应用程序代码更新时会导致checkpointPath反序列化失败，故放弃checkpointPath序列化的方法，手动管理kafka consumer offset
        // 将每批次消费的offset手动提交到zk，也保证kafka监控不受影响
        val ssc : StreamingContext = new StreamingContext(sparkConf, Seconds(30))

//        ssc.checkpoint(checkpointDirectory)

        val rdds : InputDStream[(String, String)] = createKafkaStream(ssc, kafkaParams, zkClient, zkOffsetPath, topic.split(",").toSet)

        // 作各种操作

        rdds.foreachRDD(rdd => {
            val topicData = rdd.map(_._2)
            val wordAndOne = topicData.flatMap(_.split(" ")).map((_,1))
            val result = wordAndOne.reduceByKey(_+_)
            result.saveAsTextFile("/home/oldsix/result/")
            KafkaOffsetManager.setConsumerOffsetsToZK(zkClient, zkOffsetPath, rdd)
        })

        /*val topicData = rdds.map(_._2)
        val wordAndOne = topicData.flatMap(_.split(" ")).map((_,1))
        val result = wordAndOne.reduceByKey(_+_).updateStateByKey((values : Seq[Int], state : Option[Int]) => {
            var newValue = state.getOrElse(0)
            for (value <- values) {
                newValue += value
            }
            Option(newValue)
        })
        result.print()*/

        ssc
    }

    /**
      *
      * @param ssc steamingContext上下文
      * @param kafkaParams kafka参数
      * @param zkClient zookeeper客户端
      * @param zkOffsetPath zk存放offset路径
      * @param topics kafka topic
      * @return kafka数据流
      */
    def createKafkaStream(ssc : StreamingContext,
                          kafkaParams : Map[String, String],
                          zkClient: ZkClient,
                          zkOffsetPath : String,
                          topics : Set[String]) : InputDStream[(String, String)] = {
        val zkOffsetData = KafkaOffsetManager.getConsumerOffsetsFromZK(zkClient, zkOffsetPath, topics.last)
        val kafkaStream = zkOffsetData match {
            case Some(lastOffset) =>
                log.info("......从zk中读取到offset偏移量,从上次的offset偏移量开始消费数据......")
                val messageHandler = (mmd : MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
                KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, lastOffset, messageHandler)
            case None =>
                log.info("......应用第一次启动,未读取到offset偏移量,根据配置参数读取topic的offset开始消费......")
                KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
        }
        kafkaStream
    }

    /**
      * 负责接收http请求来关闭spark-streaming应用程序
      * @param ssc StreamingContext上下文
      */
    class CloseStreamingAppHandler(ssc:StreamingContext) extends AbstractHandler {
        override def handle(s: String, baseRequest: Request, req: HttpServletRequest, response: HttpServletResponse): Unit ={
            log.info("......开始关闭spark-streaming应用程序......")
            ssc.stop(true,true)
            response.setContentType("text/html; charset=utf-8")
            response.setStatus(HttpServletResponse.SC_OK)
            val out = response.getWriter()
            out.println("spark-streaming application close success")
            baseRequest.setHandled(true)
            log.info("......spark-streaming应用程序关闭成功.....")
        }
    }

    /**
      * 负责启动守护的jetty服务
      * @param httpPort 对外暴露的http端口
      * @param ssc SteamingContext上下文
      */
    def daemonHttpServer(httpPort : Int, ssc : StreamingContext) : Unit = {
        val server = new Server(httpPort)
        val context = new ContextHandler()
        context.setContextPath("/close")
        context.setHandler(new CloseStreamingAppHandler(ssc))
        server.setHandler(context)
        server.start()
    }

    /**
      * 每隔10秒扫描一次标记的hdfs路径是否存在，若存在，则关闭spark应用程序
      * @param ssc StreamingContext上下文
      * @param hdfsPath hdfs路径
      */
    def stopByHDFSMarkFile(hdfsPath : String, ssc : StreamingContext) : Unit = {
        val intervalMills = 1 * 1000
        var isStop = false
        while (!isStop) {
            ssc.awaitTerminationOrTimeout(intervalMills)
            if (!isStop && isHDFSExistMarkFile(hdfsPath)) {
                log.info("......1秒后开始关闭spark-streaming应用程序......")
                Thread.sleep(1000)
                log.info("......开始关闭spark-streaming应用程序......")
                ssc.stop(true, true)
                isStop = true
            }
        }
        log.info("......spark-streaming应用程序关闭成功.....")
    }

    /**
      * 判断标记的hdfs路径是否存在
      * @param hdfsPath hdfs路径
      * @return
      */
    def isHDFSExistMarkFile(hdfsPath: String) : Boolean = {
        val conf : Configuration = new Configuration()
        conf.set("fs.defaultFS", "hdfs://oldsix:9000")
        conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem")
        val path : Path = new Path(hdfsPath)
        val fs : FileSystem = path.getFileSystem(conf)
        fs.exists(path)
    }
}
