package cn.getech.data.development.source

import cn.getech.data.development.bean.FlinkStreamSyncHiveObj
import org.apache.flink.api.scala._
import org.apache.flink.streaming.api.scala.DataStream
import org.apache.flink.table.api.bridge.scala.StreamTableEnvironment
import org.apache.flink.table.descriptors._
import org.apache.flink.types.Row
import org.apache.kafka.clients.consumer.ConsumerConfig
import org.slf4j.{Logger, LoggerFactory}

class FlinkRealtimeCollectCSVSource(tEnv: StreamTableEnvironment, obj: FlinkStreamSyncHiveObj) extends FlinkSourceTrait[Row] {

  private val logger: Logger = LoggerFactory.getLogger(this.getClass)

  override def getKafkaDataStream: DataStream[Row] = {
    tEnv.connect(getKafkaSource)
      .withFormat(new Csv().fieldDelimiter(obj.kafkaResource.delimitFormatType.charAt(0)))
      .withSchema(getKafkaSchema)
      .createTemporaryTable("kafka_source")
    tEnv.toAppendStream[Row](tEnv.from("kafka_source"))
  }

  private def getKafkaSource: Kafka = {
    val kafkaSource = new Kafka()
      .version("universal")
      .topic(obj.kafkaResource.topic)
      .property("ack", "all")
      .property(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
      .property(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer")
    if (obj.consumerByteRate != "0") {
      //      kafkaSource.property("consumer_byte_rate", obj.consumerByteRate)
      //FETCH_MAX_BYTES_CONFIG
      kafkaSource.property(ConsumerConfig.RECEIVE_BUFFER_CONFIG, obj.consumerByteRate)
    }
    obj.kafkaResource.params.foreach(x => kafkaSource.property(x.field_name, x.class_type))
    obj.kafkaResource.startupMode match {
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.EARLIEST =>
        kafkaSource.startFromEarliest()
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.LATEST =>
        kafkaSource.startFromLatest()
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.GROUPOFFSETS =>
        kafkaSource.startFromGroupOffsets()
      case cn.getech.data.development.enums.FlinkStreamStartUpMode.SPECIFICOFFSETS =>
        import java.util
        val map = new util.HashMap[Integer, java.lang.Long]()
        try {
          val bean = obj.kafkaResource.params.filter(x => x.field_name == "connector.specific-offsets").last
          bean.class_type.split(";").foreach(x => {
            val strings = x.split(",")
            val partitions = strings(0).split(":")
            val offsets = strings(1).split(":")
            var partition = ""
            var offset = ""
            if (partitions(0) == "partition") partition = partitions(1)
            if (offsets(0) == "offset") offset = offsets(1)
            println(s"partition: $partition, offset: $offset")
            map.put(partition.toInt, offset.toLong)
          })
          println("===========================================================")
          println(map)
          println("===========================================================")
          kafkaSource.startFromSpecificOffsets(map)
        } catch {
          case e: Exception =>
            logger.error("connector.specific-offsets Failed to parse data! Please check the data format...")
            e.printStackTrace()
        }

    }
    kafkaSource
  }

  // 获取kafka的
  private def getKafkaSchema: Schema = {
    val schema = new Schema()
    obj.jdbc.fields.foreach(x => schema.field(x.field_name, x.class_type))
    schema
  }

  override def process(): Unit = ???
}
