package cn.wanda.projects.phoenixjob


import cn.wanda.constants.Constants
import cn.wanda.projects.kafkamanager.KafkaOffsetManager
import cn.wanda.projects.phoendb.Phoneix
import cn.wanda.topologies.SparkTopoContext
import cn.wanda.topologies.bases.SparkStreamingTopology
import kafka.utils.ZKStringSerializer
import org.I0Itec.zkclient.ZkClient
import org.apache.spark.SparkConf
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream

import scala.collection.mutable


class Sparktophoenix extends SparkStreamingTopology {
  var appParaMap: mutable.HashMap[String, String] = _

  lazy val log = org.apache.log4j.LogManager.getLogger("Sparktophoenix")

  override def process(DStream: InputDStream[(String, String)], session: SparkSession): Unit = {
    val firstReadLastest = true //第一次启动是否从最新的开始消费

    val phoenixurl = appParaMap("phoenix.url");
    log.warn(phoenixurl)
    val database = appParaMap("phoenix.database")
    log.warn(database)
    val create = appParaMap("create.table")
    log.warn(create)
    val phoen = new Phoneix(phoenixurl)

    val zkClient = new ZkClient(appParaMap("zookeeperlist.server"), 30000, 30000, ZKStringSerializer)
    //   val zkOffsetPath="/sparkstreaming/20190412"//zk的路径
    val zkOffsetPath = appParaMap("zookeeperoffset.path")
    log.warn(zkOffsetPath)


    DStream.foreachRDD(rdd => {
      if (!rdd.isEmpty()) {

        rdd.foreach(element => {

          println(element._2)
          phoen.JSONphoenix(element._2, database)
        })

      }

      KafkaOffsetManager.saveOffsets(zkClient, zkOffsetPath, rdd)


    })

  }

  override def config(context: SparkTopoContext, conf: SparkConf): Unit = {

    conf.setMaster("local[2]")
    appParaMap = context.getMap

  }
}
