package cn.tedu.stream.source

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer

import java.util.Properties

/**
 * @author Amos
 * @date 2022/5/22
 */

object StreamKafkaSourceDemo {
  def main(args: Array[String]): Unit = {
    // 构建环境
    val env = StreamExecutionEnvironment.getExecutionEnvironment

    // 开启检查点
    env.enableCheckpointing(5000)

    // 构建数据源
    val topic = "test"
    val props = new Properties()
    props.setProperty("bootstrap.servers","hadoop01:9092,hadoop02:9092,hadoop03:9092");
    props.setProperty("group.id","test");
//    props.setProperty("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
//    props.setProperty("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
    //动态感知kafka 主题分区的增加单位毫秒
    props.setProperty("flink.partition-discovery.interval-millis", "5000");

    val consumer = new FlinkKafkaConsumer[String](topic, new SimpleStringSchema(), props)

//    // 1.默认：从消费者组的偏移量位置开始消费
//    consumer.setStartFromGroupOffsets()
//
//    // 2. 从最新的位置消费
//    consumer.setStartFromLatest()
//
//    // 3. 从最早的位置消费
//    consumer.setStartFromEarliest()

    // 4. 从指定位置开始消费

    // 5. 从指定时间戳开始消费，常用
//    consumer.setStartFromTimestamp(1653217236000L)




    import org.apache.flink.api.scala._
    val source: DataStream[String] = env.addSource(consumer)

    // 业务逻辑操作
    source.print()
    env.execute()
  }

}
