package cn.getech.data.development.source

import java.util.Properties

import cn.getech.data.development.bean.FlinkStreamSyncHiveObj
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.apache.flink.api.scala._
import org.apache.flink.streaming.connectors.kafka.internals.KafkaTopicPartition
import org.slf4j.{Logger, LoggerFactory}

class FlinkRealtimeCollectJsonSource(env: StreamExecutionEnvironment, obj: FlinkStreamSyncHiveObj) extends FlinkSourceTrait[String] {

  private val logger: Logger = LoggerFactory.getLogger(this.getClass)

  override def getKafkaDataStream: DataStream[String] = {

    val properties = new Properties()
    properties.setProperty(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    properties.setProperty("flink.partition-discovery.interval-millis", "30000")
    obj.kafkaResource.params.foreach(x => properties.setProperty(x.field_name, x.class_type))
    if (obj.consumerByteRate != "0") {
      //kafkaSource.property("consumer_byte_rate", obj.consumerByteRate)//FETCH_MAX_BYTES_CONFIG
      properties.setProperty(ConsumerConfig.RECEIVE_BUFFER_CONFIG, obj.consumerByteRate)
    }

    val consumer = new FlinkKafkaConsumer[String](obj.kafkaResource.topic, new SimpleStringSchema(), properties)
    consumer.setCommitOffsetsOnCheckpoints(true)
    obj.kafkaResource.startupMode match {
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.EARLIEST =>
        consumer.setStartFromEarliest()
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.LATEST =>
        consumer.setStartFromLatest()
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.GROUPOFFSETS =>
        consumer.setStartFromGroupOffsets()
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.SPECIFICOFFSETS => {
        import java.util
        val map = new util.HashMap[KafkaTopicPartition, java.lang.Long]()
        try {
          // connector.specific-offsets: 0:1,1:2
          val bean = obj.kafkaResource.params.filter(x => x.field_name == "connector.specific-offsets").last
          bean.class_type.split(";").foreach(x => {
            val strings = x.split(",")
            val partitions = strings(0).split(":")
            val offsets = strings(1).split(":")
            var partition = ""
            var offset = ""
            if (partitions(0) == "partition") partition = partitions(1)
            if (offsets(0) == "offset") offset = offsets(1)
            println(s"partition: $partition, offset: $offset")
            map.put(new KafkaTopicPartition(obj.kafkaResource.topic, partition.toInt), offset.toLong)
          })
          consumer.setStartFromSpecificOffsets(map)
        } catch {
          case e: Exception =>
            logger.error("connector.specific-offsets Failed to parse data! Please check the data format...")
            e.printStackTrace()
        }
      }
    }
    env.addSource[String](consumer)
  }


  override def process(): Unit = ???
}
