package com.travel.programApp

import com.travel.common.{ConfigUtil, Constants, HBaseUtil, JedisUtil}
import com.travel.utils.HbaseTools
import org.apache.hadoop.hbase.client.Connection
import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.log4j.{Level, Logger}
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.kafka010.{HasOffsetRanges, OffsetRange}
import org.apache.spark.streaming.{Seconds, StreamingContext}
import org.apache.spark.{SparkConf, SparkContext}
import redis.clients.jedis.Jedis

object StreamingKafka {

  /**
    * 消费kafka的数据，将kafka的offset保存到了hbase里面去了
    * @param args
    */
  def main(args: Array[String]): Unit = {

    Logger.getLogger("org").setLevel(Level.ERROR)

    val brokers = ConfigUtil.getConfig(Constants.KAFKA_BOOTSTRAP_SERVERS)
    val topics = Array(ConfigUtil.getConfig(Constants.CHENG_DU_GPS_TOPIC),ConfigUtil.getConfig(Constants.HAI_KOU_GPS_TOPIC))
    val group:String = "gps_consum_group"
    val kafkaParams = Map[String, Object](
      "bootstrap.servers" -> brokers,
      "key.deserializer" -> classOf[StringDeserializer],
      "value.deserializer" -> classOf[StringDeserializer],
      "group.id" -> group,
      "auto.offset.reset" -> "latest",// earliest,latest,和none
      "enable.auto.commit" -> (false: java.lang.Boolean)
    )




    //如果使用direct模式消费， local[1] 可以正常消费到数据的
    //如果是recevier消费模式  local[1] 不可以处理数据
    val conf: SparkConf = new SparkConf().setMaster("local[1]").setAppName("streamingKafka")

    val sparkSession: SparkSession = SparkSession.builder().config(conf).getOrCreate()

    val context: SparkContext = sparkSession.sparkContext


    val streamingContext = new  StreamingContext(context,Seconds(5))


/*

    val connection: Connection = HbaseTools.getHbaseConn

    val admin: Admin = connection.getAdmin

    if(!admin.tableExists(TableName.valueOf(Constants.HBASE_OFFSET_STORE_TABLE))){

      val htableDescriptor = new HTableDescriptor(TableName.valueOf(Constants.HBASE_OFFSET_STORE_TABLE))

      htableDescriptor.addFamily(new HColumnDescriptor(Constants.HBASE_OFFSET_FAMILY_NAME))

      admin.createTable(htableDescriptor)

      admin.close()

    }


    val table: Table = connection.getTable(TableName.valueOf(Constants.HBASE_OFFSET_STORE_TABLE))
    val partitionToLong = new mutable.HashMap[TopicPartition,Long]()
    for(eachTopic <- topics){
      //构建hbase查询的rowkey
      val get = new Get((group+":"+eachTopic).getBytes())

      val result: Result = table.get(get)

      //获取到每一个cell的数据
      val cells: Array[Cell] = result.rawCells()
      for(result <- cells){

        //获取列名
        val topicAndPartition: String = Bytes.toString(CellUtil.cloneQualifier(result))
        val strings: Array[String] = topicAndPartition.split(":")
        //获取列值
        val offsetValue: String = Bytes.toString(CellUtil.cloneValue(result))

        val partition = new TopicPartition(strings(1),strings(2).toInt)

        partitionToLong +=(partition -> offsetValue.toLong)
      }
    }


    val consumerStrategy: ConsumerStrategy[String, String] = if (partitionToLong.size > 0) {
      //hbase当中已经有了offset的值
      ConsumerStrategies.SubscribePattern[String,String](Pattern.compile("(.*)gps_topic"), kafkaParams, partitionToLong)
    } else {
      ConsumerStrategies.SubscribePattern[String,String](Pattern.compile("(.*)gps_topic"), kafkaParams)
    }



    KafkaUtils.createDirectStream(streamingContext,LocationStrategies.PreferConsistent,consumerStrategy)

*/
    /**
      * streamingContext: StreamingContext, kafkaParams: Map[String, Object], topics: Array[String], group: String,matchPattern:String
      */
      //从kafka当中获取数据
    /**
      * 处理逻辑很简单
      * 从hbase当中获取offset的数据
      *
      * 如果存在
      * 如果不存在
      *
      * 消费kafka的数据得到inputDStream
      *
      * 处理inputDStream
      *
      * 保存offset到hbase里面去
      *
      */
    val result: InputDStream[ConsumerRecord[String, String]] = HbaseTools.getStreamingContextFromHBase(streamingContext,kafkaParams,topics,group,"(.*)gps_topic")

    //将获取到的数据保存到hbase以及redis当中去

    result.foreachRDD(eachRdd  =>{

      if(!eachRdd.isEmpty()){

        eachRdd.foreachPartition(eachPartition =>{

          val connection: Connection = HBaseUtil.getConnection
          val jedis: Jedis = JedisUtil.getJedis
          //将每一个分区的数据，保存到hbase以及redis里面去

          //将成都数据，保存到成都的hbase表里面去

          //将海口的数据，保存到海口的hbase表里面去


          eachPartition.foreach(record =>{
            //获取到了每一条数据

            //将数据保存到hbase以及redis了
            val consumerRecords: ConsumerRecord[String, String] = HbaseTools.saveToHBaseAndRedis(connection,jedis,record)
          })
          JedisUtil.returnJedis(jedis)
          connection.close()
        })

        //更新hbase的offset的值
        val offsetRanges: Array[OffsetRange] = eachRdd.asInstanceOf[HasOffsetRanges].offsetRanges
     ///   result.asInstanceOf[CanCommitOffsets].commitAsync(offsetRanges) 将offset保存到了kafka的默认的topic里面去了
        for(eachRange  <- offsetRanges){
          val startOffset: Long = eachRange.fromOffset
          val endOffset: Long = eachRange.untilOffset
          val partition: Int = eachRange.partition
          val topic: String = eachRange.topic
          //更新offset的值
          HbaseTools.saveBatchOffset(group,topic,partition+"",endOffset)
        }
      }
    })


    streamingContext.start()
    streamingContext.awaitTermination()

  }


}
