package com.shujia.source

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer08
import org.apache.flink.api.scala._

object Demo4ConnectorKafka {

  def main(args: Array[String]): Unit = {


    /**
      * 读取kafka数据
      *
      * 导入依赖包
      * <dependency>
      * <groupId>org.apache.flink</groupId>
      * <artifactId>flink-connector-kafka-0.8_2.11</artifactId>
      * <version>1.8.0</version>
      * </dependency>
      * 有版本差异
      *
      *
      */


    val env = StreamExecutionEnvironment.getExecutionEnvironment


    val kafkaProps = new Properties()
    kafkaProps.setProperty("zookeeper.connect", "node2:2181,node3:2181,node4:2181")
    kafkaProps.setProperty("bootstrap.servers", "node2:9092,node3:9092,node4:9092")
    kafkaProps.setProperty("group.id", "qweqwew")

    //构建kafkaSource
    val kafkaSource = new FlinkKafkaConsumer08[String]("f", new SimpleStringSchema, kafkaProps)

    /**
      * myConsumer.setStartFromEarliest();     // start from the earliest record possible
      * myConsumer.setStartFromLatest();       // start from the latest record
      * myConsumer.setStartFromTimestamp(...); // start from specified epoch timestamp (milliseconds)
      * myConsumer.setStartFromGroupOffsets(); // the default behaviour
      * 如果 消费者组没有保存过偏移量   默认读取最新的数据
      *
      */


    kafkaSource.setStartFromGroupOffsets()

    val ds = env.addSource(kafkaSource)

    ds.print()

    env.execute("flink-kafka")


  }

}
