package com.edata.bigdata.flink.kafka

import com.edata.bigdata.annotations.Edata_Source
import com.edata.bigdata.flink.Source
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.serialization.DeserializationSchema
import org.apache.flink.api.common.typeinfo.TypeInformation
import org.apache.flink.connector.kafka.source.KafkaSource
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer
import org.apache.flink.streaming.api.scala._

@Edata_Source(target = "FKSOURCE")
class FKSource[T: TypeInformation] extends Source[T] with FKConnector {
  override var env: StreamExecutionEnvironment = _
  override var dataStream: DataStream[T] = _
  var deserialzationSchema: DeserializationSchema[T] = _
  var offsetMode = OffsetsInitializer.earliest()
  var wartermarkStrategy: WatermarkStrategy[T] = WatermarkStrategy.noWatermarks()

  var topics: String = "TOPIC_A,TOPIC_B"
  var groupId: String = "edata"
  var sourceName = "Kafka Source"

  override def createDataStream(): Unit = {
    val source = KafkaSource.builder[T]()
      .setBootstrapServers(brokers)
      .setTopics(topics.split(","): _*)
      .setValueOnlyDeserializer(deserialzationSchema)
      .setStartingOffsets(offsetMode)
      .setGroupId(groupId)
      .build()
    dataStream = env.fromSource(source, wartermarkStrategy, sourceName)
  }

  override def start(jobName: String): Unit = {
    env.execute(jobName)
  }

}
