package com.shujia.flink.state

import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.connector.base.DeliveryGuarantee
import org.apache.flink.connector.kafka.sink.{KafkaRecordSerializationSchema, KafkaSink}
import org.apache.flink.connector.kafka.source.KafkaSource
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer
import org.apache.flink.streaming.api.scala._

import java.util.Properties

object Demo5KafkaSInkExactlyOnce {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    /**
     * 读取kafka中的数据
     */
    val source: KafkaSource[String] = KafkaSource
      .builder[String]
      .setBootstrapServers("master:9092,node1:9092,node2:9092") //指定kafka 节点列表
      .setTopics("lines") //指定读取的topic
      .setGroupId("my-group") //指定消费者组
      .setStartingOffsets(OffsetsInitializer.earliest) //读取所有的数据
      //.setStartingOffsets(OffsetsInitializer.latest())//从最末尾位点开始消费
      .setValueOnlyDeserializer(new SimpleStringSchema()) //指定反序列化的类
      .build

    //居于kafka source构建DataStream
    val linesDS: DataStream[String] = env.fromSource(source, WatermarkStrategy.noWatermarks(), "Kafka Source")

    //将一行转换成多行
    val wordsDS: DataStream[String] = linesDS.flatMap(_.split(","))

    /**
     * 将处理的结果保存到kafka中
     */
    val properties = new Properties()
    //不能打印kafka默认事务超时时间，默认是15分钟
    properties.setProperty("transaction.timeout.ms", 10 * 60 * 1000 + "")

    val kafkaSink: KafkaSink[String] = KafkaSink
      .builder[String]()
      .setBootstrapServers("master:9092,node1:9092,node2:9092") // kafka 集群列表
      .setKafkaProducerConfig(properties) //指定kafka额外的参数
      .setRecordSerializer(KafkaRecordSerializationSchema
        .builder[String]()
        .setTopic("words")
        .setValueSerializationSchema(new SimpleStringSchema()) //序列化类
        .build()
      )
      //数据处理的语义，AT_LEAST_ONCE：  至少一次，数据会出现重复
      //EXACTLY_ONCE: 唯一一次，数据不会重复
      .setDeliverGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
      .build()

    wordsDS.sinkTo(kafkaSink)

    /**
     *
     * --isolation-level  read_committed： 读去事务已经提交的数据
     * kafka-console-consumer.sh --bootstrap-server  master:9092,node1:9092,node2:9092 --isolation-level  read_committed --from-beginning --topic  words
     *
     */
    env.execute()
  }

}
