package com.edata.bigdata.streaming

import org.apache.kafka.clients.consumer.ConsumerRecord
import org.apache.kafka.common.TopicPartition
import org.apache.spark.sql.SparkSession
import org.apache.spark.streaming.{Duration, StreamingContext}
import org.apache.spark.streaming.dstream.DStream
import org.apache.spark.streaming.kafka010.OffsetRange

import scala.collection.mutable.Map

trait Consumer[K, V] {
  var session: SparkSession
  var ds: DStream[ConsumerRecord[K, V]]
  var sc: StreamingContext
  var startingOffset:Map[TopicPartition, Long]
  var offsetsOfStream: Array[OffsetRange]
  var batchDuration: Duration

  def createDataStream(): Unit

  def setStartingOffset(topic:String,partition:Int,offset:Long):Unit

  def start(): Unit
}
