package com.mjf.day3

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer011

/**
 * Flink写入到Kafka
 */
object MyKafkaSink {
  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    env.setParallelism(1)

    val inputStream: DataStream[SensorReading] = env.addSource(new SensorSource)

    val dataStream: DataStream[String] = inputStream.map(_.toString)  // String方便序列化

    dataStream.addSink(new FlinkKafkaProducer011[String](
      "hadoop103:9092",
      "sinkTest",
      new SimpleStringSchema()) // String格式不需要做复杂的序列化实现
    )

    env.execute("MyKafkaSink")

  }
}
