package com.wyh.apitest.sink

import java.lang
import java.util.Properties

import com.wyh.apitest.source.SensorReading
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer, FlinkKafkaProducer011, KafkaSerializationSchema}
import org.apache.kafka.clients.producer.ProducerRecord

object KafkaSinkDemo {
  def main(args: Array[String]): Unit = {
    //定义流式处理环境
    val streamEnv = StreamExecutionEnvironment.getExecutionEnvironment
    streamEnv.setParallelism(1)

    //    val sensorinfo: DataStream[String] = streamEnv.readTextFile("D:\\IdeaProjects\\FlinkStudyScala\\src\\main\\resources\\sensor.txt")
    //    //转换成样例类类型
    //    val dataStream = sensorinfo.map(s => {
    //      val arr: Array[String] = s.split(",")
    //      SensorReading(arr(0), arr(1).toLong, arr(2).toDouble).toString
    //    })

    val properties = new Properties()

    //设置Kafka集群
    properties.setProperty("bootstrap.servers", "192.168.205.100:9092,192.168.205.101:9092,192.168.205.102:9092")
    //    properties.setProperty("zookeeper.connect", "192.168.205.100:2181,192.168.205.101:2181,192.168.205.102:2181")
    //设置消费者组(可选)
    properties.setProperty("group.id", "consumer-group1")
    //定义key和value的deserializer（可以不用管），因为构造方法中定义了SimpleStringSchema
    //    properties.setProperty("key.deserializer",
    //      "org.apache.kafka.common.serialization.StringDeserializer")
    //    properties.setProperty("value.deserializer",
    //      "org.apache.kafka.common.serialization.StringDeserializer")
    //定义kafka的读取数据方式（后面讲状态一致性的时候，kafka连接器其实可以自动做维护偏移量的）
    properties.setProperty("auto.offset.reset", "latest")


    //从kafka sensor读取数据
    val streamKafka = streamEnv.addSource(new FlinkKafkaConsumer[String]("sensor",
      new SimpleStringSchema(),
      properties))
    //转化成SensorReading
    val dataStream = streamKafka.map(s => {
      val arr: Array[String] = s.split(",")
      SensorReading(arr(0), arr(1).toLong, arr(2).toDouble).toString
    })


    //写到Kafka sinktest去
    //    dataStream.addSink(new FlinkKafkaProducer[String]("192.168.205.100:9092,192.168.205.101:9092,192.168.205.102:9092","sinktest",new SimpleStringSchema()))
    dataStream.addSink(new FlinkKafkaProducer[String]("sinktest"
      , new ResultDtSerialization("sinktest"), properties, FlinkKafkaProducer.Semantic.EXACTLY_ONCE))


    streamEnv.execute("Kafka Sink Demo")
  }
}


class ResultDtSerialization(topic: String) extends KafkaSerializationSchema[String] {
  override def serialize(t: String, aLong: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
    new ProducerRecord[Array[Byte], Array[Byte]](topic, t.toString.getBytes())
  }
}