package com.wyh.apitest.source

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaConsumer

/**
  * flink消费kafka
  *
  *  flink version:1.10.1
  *  kafka version:2.13
  *  zookeeper version:3.7
  *
  * //每一台都要起
  * kafka-server-start.sh -daemon /usr/local/soft/kafka_2.13-2.7.0/config/server.properties
  * //在某一台机器上创建生产者
  * kafka-console-producer.sh --broker-list master:9092,node1:9092,node2:9092 --topic sensor
  *
  *
  */
//定义样例类
//case class SensorReading(id:String,timestamp:Long,temperature:Double)
object SourceKafka {
  def main(args: Array[String]): Unit = {

    val streamEnv = StreamExecutionEnvironment.getExecutionEnvironment

    val properties = new Properties()

    //设置Kafka集群
    properties.setProperty("bootstrap.servers", "192.168.205.100:9092,192.168.205.101:9092,192.168.205.102:9092")
//    properties.setProperty("zookeeper.connect", "192.168.205.100:2181,192.168.205.101:2181,192.168.205.102:2181")
    //设置消费者组(可选)
    properties.setProperty("group.id", "consumer-group")
    //定义key和value的deserializer（可以不用管），因为构造方法中定义了SimpleStringSchema
//    properties.setProperty("key.deserializer",
//      "org.apache.kafka.common.serialization.StringDeserializer")
//    properties.setProperty("value.deserializer",
//      "org.apache.kafka.common.serialization.StringDeserializer")
    //定义kafka的读取数据方式（后面讲状态一致性的时候，kafka连接器其实可以自动做维护偏移量的）
    properties.setProperty("auto.offset.reset", "latest")

    //3、读取Kafka数据 （需要导入依赖flink-connector-kafka_2.12）
    val streamKafka = streamEnv.addSource(new FlinkKafkaConsumer[String]("sensor",
      new SimpleStringSchema(),
      properties))

    streamKafka.print()


    streamEnv.execute("Source Kafka")
  }

}