package com.yeming.flink.practice.sink

import java.lang
import java.util.Properties

import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaProducer, KafkaSerializationSchema}
import org.apache.kafka.clients.producer.{ProducerConfig, ProducerRecord}

/**
 * Kafka作为Sink的第一种情况：字符串
 * 需求：把netcat中的每一个单词写到Kafka
 */
object KafkaSinkByKeyValue {

  def main(args: Array[String]): Unit = {

    //初始化环境
    val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
    //设置并行度
    streamEnv.setParallelism(1)
    //读取数据源：从netcat读取
    val stream: DataStream[String] = streamEnv.socketTextStream("f1", 9999)
    //数据转换
    val result: DataStream[(String, Int)] = stream.flatMap(_.split(" "))
      .map((_, 1))
      .keyBy(0)
      .sum(1)
    //数据Sink
    val props = new Properties()
    props.setProperty(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"f2:9092,f3:9092,f4:9092")


    result.addSink(new FlinkKafkaProducer[(String,Int)](
      "flinktest01",
      new MyKafkaSerializationSchema,
      props,
      FlinkKafkaProducer.Semantic.EXACTLY_ONCE))
    //开始执行流计算
    streamEnv.execute("KafkaSinkTest")
  }

  class MyKafkaSerializationSchema extends KafkaSerializationSchema[(String,Int)] {
    override def serialize(element: (String,Int), timestamp: lang.Long): ProducerRecord[Array[Byte], Array[Byte]] = {
      new ProducerRecord("flinktest01",element._1.getBytes(), (element._2 + "").getBytes())
    }
  }

}
