package cn.wanda.modes

import cn.wanda.constants.Constants
import cn.wanda.kafka.KafkaHelper
import cn.wanda.projects.kafkamanager.KafkaOffsetManager
import cn.wanda.topologies.SparkTopoContext
import cn.wanda.topologies.bases.{SparkBatchTopology, SparkStreamingTopology, SparkTopology}
import cn.wanda.utils.Checker
import javax.servlet.http.{HttpServletRequest, HttpServletResponse}
import kafka.api.OffsetRequest
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import org.spark_project.jetty.server.{Request, Server}
import org.spark_project.jetty.server.handler.{AbstractHandler, ContextHandler}

object GenericMethod {

  lazy val log = org.apache.log4j.LogManager.getLogger("GenericMethod")

  /****
    * 负责启动守护的jetty服务
    * @param port 对外暴露的端口号
    * @param ssc Stream上下文
    */
  def daemonHttpServer(port:Int,ssc: StreamingContext)={
    val server=new Server(port)
    val context = new ContextHandler();
    context.setContextPath( "/close" );
    context.setHandler( new CloseStreamHandler(ssc) )
    server.setHandler(context)
    server.start()
  }

  /*** 负责接受http请求来优雅的关闭流
    * @param ssc  Stream上下文
    */
  class CloseStreamHandler(ssc:StreamingContext) extends AbstractHandler {
    override def handle(s: String, baseRequest: Request, req: HttpServletRequest, response: HttpServletResponse): Unit ={
      log.warn("开始关闭......")
      ssc.stop(true,true)//优雅的关闭
      response.setContentType("text/html; charset=utf-8");
      response.setStatus(HttpServletResponse.SC_OK);
      val out = response.getWriter();
      out.println("close success");
      baseRequest.setHandled(true);
      log.warn("关闭成功.....")
    }
  }




    def initialize[T <: SparkTopology](args: Array[String], appName: String): Unit = {

        val firstReadLastest=true
 //     val  zkClient= new ZkClient("10.161.16.143:2182", 30000, 30000,ZKStringSerializer)
//10.161.16.143:2182
        //192.168.0.13:2181,192.168.0.14:2181
  //      val zkOffsetPath="/sparkstreaming/20190412"








        val Array(proPath) = args

        val conf = new SparkConf().setAppName(appName)

        val stc = new SparkTopoContext(proPath)

        val topoName = stc.get(Constants.CLASS_NAME)


      val zookeeperserver=stc.get(Constants.ZOOKEEPERLIST)
   //   println("zookeeperserverzookeeperserverzookeeperserverzookeeperserver"+zookeeperserver)

     val  zkClient= new ZkClient(zookeeperserver, 30000, 50000,ZKStringSerializer)

      val zkOffsetPath=stc.get(Constants.ZOOKEEPEROFFETPATH)











        val sparkTopo = Class.forName(topoName).newInstance.asInstanceOf[T]

        sparkTopo.config(stc, conf)

        val sc = new SparkContext(conf)

        val session: SparkSession = SparkSession.builder().getOrCreate()

        sc.setLogLevel(stc.get(Constants.LOG_LEVEL))

        Checker.showMap(stc.getMap)

        sparkTopo match {
            case topo: SparkStreamingTopology =>

                val ssc = new StreamingContext(sc, Seconds(stc.get(Constants.DURATION).toLong))

                val brokerList = stc.get(Constants.BROKERS)

                val topicList = stc.get(Constants.TOPICS).split(",").toSet


                var kafkaParams = Map[String, String]("metadata.broker.list" -> brokerList)
                if (firstReadLastest) kafkaParams += ("auto.offset.reset"-> OffsetRequest.LargestTimeString)//从最新的开始消费
                val messagesDStream = KafkaHelper.loadTopicAndMessageFromKafka(ssc, topicList,zkClient,zkOffsetPath,kafkaParams)
       //         val zkOffsetData=KafkaOffsetManager.readOffsets(zkClient,zkOffsetPath,topicList.last)


                topo.process(messagesDStream, session)

                ssc.start()

              //启动接受停止请求的守护进程
              daemonHttpServer(5555,ssc)  //方式一通过Http方式优雅的关闭策略



                ssc.awaitTermination()

            case topo: SparkBatchTopology =>

                topo.process(sc, session)

            case _ =>
                println("ERORR：匹配Topo错误")

        }

    }

}
