package com.shujia.flink.core

import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.connector.base.DeliveryGuarantee
import org.apache.flink.connector.kafka.sink.{KafkaRecordSerializationSchema, KafkaSink}
import org.apache.flink.connector.kafka.source.KafkaSource
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer
import org.apache.flink.streaming.api.scala._

object Demo5FLinkOnKafka {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    /**
     * source 和 sink的topic徐娅萍先创建好
     *
     */
    //kafka source
    val linesSource: KafkaSource[String] = KafkaSource
      .builder[String]
      .setBootstrapServers("master:9092")
      .setTopics("lines")
      .setGroupId("asdasdasasd")
      .setStartingOffsets(OffsetsInitializer.earliest)
      .setValueOnlyDeserializer(new SimpleStringSchema())
      .build


    //读取kafka中数据
    val linesDS: DataStream[String] = env.fromSource(linesSource, WatermarkStrategy.noWatermarks(), "Kafka Source")

    //处理数据
    val wordsDS: DataStream[String] = linesDS
      .flatMap(_.split(","))
      .filter(_.nonEmpty)


    //将处理好的数据保存到kafka中
    val sink: KafkaSink[String] = KafkaSink
      .builder[String]
      //指定kafka集群列表
      .setBootstrapServers("master:9092")
      .setRecordSerializer(
        KafkaRecordSerializationSchema
          .builder[String]
          //指定topic
          .setTopic("words")
          //指定数据的格式
          .setValueSerializationSchema(new SimpleStringSchema())
          .build
      )
      //设置数据处理的语义
      //.setDeliverGuarantee(DeliveryGuarantee.AT_LEAST_ONCE)
      //数据处理的唯一一次，内部会将两次checkpoint中间的数据保存再状态中
      .setDeliverGuarantee(DeliveryGuarantee.EXACTLY_ONCE)
      .build
    //保存到kafka
    wordsDS.sinkTo(sink)

    //启动flink
    env.execute()
  }

}
