package com.paic

import javax.servlet.http.{HttpServletRequest, HttpServletResponse}

import com.paic.SparkDirectStreaming.{createKafkaStream, daemonHttpServer, log}
import kafka.api.OffsetRequest
import kafka.message.MessageAndMetadata
import kafka.serializer.StringDecoder
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.{SparkConf, SparkContext, TaskContext}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka.KafkaUtils
import org.elasticsearch.spark.cfg.SparkSettings
import org.elasticsearch.spark.rdd.EsSpark
import org.slf4j.{Logger, LoggerFactory}

import scala.collection.mutable.ListBuffer
import org.elasticsearch.spark._
import org.spark_project.jetty.server.{Request, Server}
import org.spark_project.jetty.server.handler.{AbstractHandler, ContextHandler}


/**
  * Created by Jerry on 2018/5/9.
  *
  */
object SparkEsSink {
  val logger=LoggerFactory.getLogger(this.getClass)
  def createStreaming(isLocal:Boolean, esNodes:String, esPort:String, kafkaBrokers:String, kafkaTopic:String, zkUrl:String, batchDuration:Int,groupId:String): StreamingContext ={
    val isLocal=true//是否使用local模式
    val firstReadLastest=true//第一次启动是否从最新的开始消费

    val sparkConf=new SparkConf().setAppName("e-commerce-eS-sink")
    if (isLocal)  sparkConf.setMaster("local[1]") //local模式
    sparkConf.set("es.index.auto.create", "true")
    sparkConf.set("es.nodes",esNodes)
    sparkConf.set("es.port",esPort)
    sparkConf.set("es.nodes.wan.only","false")
    sparkConf.set("spark.streaming.stopGracefullyOnShutdown","true")//优雅的关闭
    sparkConf.set("spark.streaming.backpressure.enabled","true")//激活削峰功能
    sparkConf.set("spark.streaming.backpressure.initialRate","5000")//第一次读取的最大数据值
    sparkConf.set("spark.streaming.kafka.maxRatePerPartition","2000")//每个进程每秒最多从kafka读取的数据条数
    val sc = new SparkContext(sparkConf)
    var kafkaParams=Map[String,String]("bootstrap.servers"-> kafkaBrokers)//创建一个kafkaParams
    if (firstReadLastest)   kafkaParams += ("auto.offset.reset"-> OffsetRequest.LargestTimeString)//从最新的开始消费
    kafkaParams +=("consumer.group.id"->groupId)
    //创建zkClient注意最后一个参数最好是ZKStringSerializer类型的，不然写进去zk里面的偏移量是乱码
    val  zkClient= new ZkClient(zkUrl, 30000, 30000,ZKStringSerializer)
    val topicsSet=Set(kafkaTopic)//topic名字
    val ssc=new StreamingContext(sparkConf,Seconds(batchDuration))//创建StreamingContext,每隔多少秒一个批次
    val rdds:InputDStream[(String,String)]=createKafkaStream(ssc,kafkaParams,zkClient,topicsSet)

    //开始处理数据
    rdds.foreachRDD( rdd=>{

      if(!rdd.isEmpty()){//只处理有数据的rdd，没有数据的直接跳过

        //迭代分区，里面的代码是运行在executor上面
        val msgs=new ListBuffer[String]
        rdd.foreachPartition(partitions=>{
          //如果没有使用广播变量，连接资源就在这个地方初始化
          //比如数据库连接，hbase，elasticsearch，solr，等等
          //遍历每一个分区里面的消息
          partitions.foreach(msg=>{
//            log.info("读取的数据："+msg)
//            process(msg)  //处理每条数据
            msgs.append(msg._2)

          })
        })
        val dataRdd=sc.parallelize(msgs)
        EsSpark.saveJsonToEs(dataRdd,"spark/e-commerce")
        //更新每个批次的偏移量到zk中，注意这段代码是在driver上执行的

        KafkaOffsetManager.saveOffsets2Redis(kafkaTopic,rdd)
      }
    })


    ssc//返回StreamContext


  }

  /****
    *
    * @param ssc  StreamingContext
    * @param kafkaParams  配置kafka的参数
    * @param zkClient  zk连接的client
    * @param topics     需要处理的topic
    * @return   InputDStream[(String, String)] 返回输入流
    */
  def createKafkaStream(ssc: StreamingContext,
                        kafkaParams: Map[String, String],
                        zkClient: ZkClient,
                        topics: Set[String]): InputDStream[(String, String)]={
    //目前仅支持一个topic的偏移量处理，读取zk里面偏移量字符串
    val zkOffsetData=KafkaOffsetManager.readOffsetFromRedis(topics.last,zkClient)

    val kafkaStream = zkOffsetData match {
      case None =>  //如果从zk里面没有读到偏移量，就说明是系统第一次启动
        log.info("系统第一次启动，没有读取到偏移量，默认就最新的offset开始消费")
        //使用最新的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder](ssc, kafkaParams, topics)
      case Some(lastStopOffset) =>
        log.info("从zk中读取到偏移量，从上次的偏移量开始消费数据......")
        val messageHandler = (mmd: MessageAndMetadata[String, String]) => (mmd.key, mmd.message)
        //使用上次停止时候的偏移量创建DirectStream
        KafkaUtils.createDirectStream[String, String, StringDecoder, StringDecoder, (String, String)](ssc, kafkaParams, lastStopOffset, messageHandler)
    }
    kafkaStream//返回创建的kafkaStream
  }

  /**
    * handle msg and return to
    * @param msg
    */
  def process(msg: (String, String)): Unit={

  }

  def daemonHttpServer(port:Int,ssc: StreamingContext)={
    val server=new Server(port)
    val context = new ContextHandler();
    context.setContextPath( "/close" );
    context.setHandler( new CloseStreamHandler(ssc) )
    server.setHandler(context)
    server.start()
  }

  /*** 负责接受http请求来优雅的关闭流
    * @param ssc  Stream上下文
    */
  class CloseStreamHandler(ssc:StreamingContext) extends AbstractHandler {
    override def handle(s: String, baseRequest: Request, req: HttpServletRequest, response: HttpServletResponse): Unit ={
      log.warn("开始关闭......")
      ssc.stop(true,true)//优雅的关闭
      response.setContentType("text/html; charset=utf-8");
      response.setStatus(HttpServletResponse.SC_OK);
      val out = response.getWriter();
      out.println("close success");
      baseRequest.setHandled(true);
      log.warn("关闭成功.....")
    }
  }



  def main(args: Array[String]): Unit = {
    if(args.length <10){
      logger.error("args less than 10")
      sys.exit(0)
    }
    val isLocal=args(0).toBoolean
    val kafkaBrokers=args(1)
    val kafkaTopic=args(2)
    val zkUrl=args(3)
    val batchDuration=args(4).toInt
    val esNodes=args(5)
    val esPort=args(6)
    val groupId=args(7)
    val ssc = createStreaming(isLocal,esNodes,esPort,kafkaBrokers,kafkaTopic,zkUrl,batchDuration,groupId)
    ssc.start()
    daemonHttpServer(15555,ssc)
    ssc.awaitTermination()

  }


}
