package com.edata.bigdata.kafka

import com.edata.bigdata.annotation.Edata_Executor
import com.edata.bigdata.util.{EDataUtils, Executor}
import org.apache.kafka.clients.consumer.{ConsumerConfig, ConsumerRecord}
import org.apache.kafka.common.TopicPartition
import org.apache.kafka.common.serialization.StringDeserializer
import org.apache.spark.rdd.RDD
import org.apache.spark.sql.{DataFrame, SparkSession}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010._
import org.apache.spark.streaming.{Seconds, StreamingContext}

import scala.collection.mutable.Map

@Edata_Executor(target = "KFCSMEXECUTOR")
class SparkKafkaConsumer[K, V] extends Executor {
  override var SESSION: SparkSession = _
  var SC: StreamingContext = _
  //var IDS: InputDStream[ConsumerRecord[K, V]] = _
  var TDS: DStream[ConsumerRecord[K, V]] = _
  var OFFSETS_HANDLER: Executor = _
  var OFFSETS_FROM_STREAM: Array[OffsetRange] = _
  val OFFSETS_SAVE_PATH = "offset.txt"


  override def initialize(): Unit = {
  }

  override def findDataFromPath[T](path: String, seperator: String)(implicit bean: Manifest[T]): DataFrame = {
    val rdd = SESSION.sparkContext.textFile(path)
    LOGGER.info(s"finding data from ${path}")
    createDataFrame(rdd, seperator)
  }


  override def saveDataToPath[T](data: RDD[String], path: String)(implicit bean: Manifest[T]): Unit = {
    LOGGER.info(s"saving data to ${path}")
    data.saveAsTextFile(path)
  }

  override def findData[T](args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    findDataFromPath(args(0), args(1))
  }

  override def saveData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    data.saveAsTextFile(args(0))
  }

  override def updateData[T](data: RDD[String], args: String*)(implicit bean: Manifest[T]): Unit = {
    LOGGER.warn("This method is deprecated")
  }

  override def findDataById[T](value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val className = bean.toString()
    val (_, id, _) = find_SCHEMA(className)
    val k_t = id.split(":")
    val data = findDataFromPath(args(0), args(1))
    data.filter(id + "==" + EDataUtils.convertVToQueryStrByDataType(k_t(0), k_t(1)))
  }

  override def findDataByProperty[T](key: String, value: String, args: String*)(implicit bean: Manifest[T]): DataFrame = {
    if (args.isEmpty) {
      throw new Exception("args parameter should not be empty")
    }
    val data = findDataFromPath(args(0), args(1))
    data.filter(s"${key}==${value}")
  }


  def createConsumer[T]()(implicit bean: Manifest[T]): Unit = {
    SC = new StreamingContext(SESSION.sparkContext, Seconds(5))

    //此处需要指定Map的[String,Object]类型，否则报错
    val kafkaParams = Map[String, Object](
      ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG -> KF_CSM_BOOTSTRAP,
      ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG -> classOf[StringDeserializer],
      ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG -> (false: java.lang.Boolean),
      ConsumerConfig.GROUP_ID_CONFIG -> KF_CSM_GROUP_ID
    )

    val offset = getOffsetData()
    val IDS = KafkaUtils.createDirectStream[K, V](
      SC,
      LocationStrategies.PreferConsistent,
      ConsumerStrategies.Subscribe[K, V](KF_CSM_TOPIC.split(","), kafkaParams, offset)
    )
    TDS = IDS.transform { rdd =>
      OFFSETS_FROM_STREAM = rdd.asInstanceOf[HasOffsetRanges].offsetRanges
      rdd
    }
  }

  def getOffsetData[T]()(implicit bean: Manifest[T]): Map[TopicPartition, Long] = {
    val storage_offset = OFFSETS_HANDLER.findDataByProperty("topic", KF_CSM_TOPIC, s"/${KF_CSM_TOPIC}/${KF_CSM_GROUP_ID}/${OFFSETS_SAVE_PATH}", ",").filter(s"group_id=='${KF_CSM_GROUP_ID}'").collect()
    val offsetMap = Map[TopicPartition, Long]()
    for (offset <- storage_offset) {
      val topic = EDataUtils.findColValOfRowByColName(offset,"topic","string")
      val partition = EDataUtils.findColValOfRowByColName(offset,"partition","string")
      val offsetNum = EDataUtils.findColValOfRowByColName(offset,"offset_num","string")
      offsetMap.put(new TopicPartition(topic, partition.asInstanceOf[Int]), offsetNum.asInstanceOf[Int])
    }
    offsetMap
  }

  def saveOffsetData[T]()(implicit bean: Manifest[T]): Unit = {
    val offset_array = OFFSETS_FROM_STREAM.map(data => {
      val ID = data.topic + "-" + KF_CSM_GROUP_ID + "-" + data.partition
      ID + "," + KF_CSM_GROUP_ID + "," + data.topic + "," + data.partition + "," + data.untilOffset
    })
    val rdd = SESSION.sparkContext.parallelize(offset_array)
    OFFSETS_HANDLER.updateData(rdd, s"/${KF_CSM_TOPIC}/${KF_CSM_GROUP_ID}/${OFFSETS_SAVE_PATH}", ",")
  }

  def start(): Unit = {
    SC.start()
    SC.awaitTermination()
  }



}
