package com.shujia.flink.sink

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.connector.base.DeliveryGuarantee
import org.apache.flink.connector.kafka.sink.{KafkaRecordSerializationSchema, KafkaSink}
import org.apache.flink.streaming.api.scala.{DataStream, StreamExecutionEnvironment}

object Demo3KafkaSink {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    val linesDS: DataStream[String] = env.socketTextStream("master", 8888)

    /**
     * kafka sink  -- 将结果sink 到kafka中
     */
    val kafkaSink: KafkaSink[String] = KafkaSink
      .builder[String]()
      .setBootstrapServers("master:9092,node1:9092,node2:9092") //集群列表
      .setRecordSerializer(KafkaRecordSerializationSchema.builder()
        .setTopic("flink-topic") //topic
        .setValueSerializationSchema(new SimpleStringSchema()) //序列化类
        .build()
      )
      .setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE) //数据处理的语义，AT_LEAST_ONCE：至少一次
      .build()

    //使用kafka sink
    linesDS.sinkTo(kafkaSink)

    env.execute()

    /**
     * 使用kafka 命令号消费数据
     * kafka-console-consumer.sh --bootstrap-server master:9092,node1:9092,node2:9092 --from-beginning --topic flink-topic
     */
  }

}
