package com.yanggu.flink.datastream_api.source

import cn.hutool.core.convert.Convert
import org.apache.flink.api.common.eventtime.WatermarkStrategy
import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.connector.kafka.source.KafkaSource
import org.apache.flink.connector.kafka.source.enumerator.initializer.OffsetsInitializer
import org.apache.flink.streaming.api.scala._
import org.apache.kafka.clients.consumer.OffsetResetStrategy

import java.util.Properties


/**
 * 从kafka中读取数据
 * flink官网关于kafka的source
 * https://nightlies.apache.org/flink/flink-docs-release-1.14/docs/connectors/datastream/kafka/
 */
object SourceFromKafka {

  def main(args: Array[String]): Unit = {
    val environment = StreamExecutionEnvironment.getExecutionEnvironment

    //broker集群地址
    val brokerList = "localhost:9092"
    //通过","拼接的多个topic
    val topics = "test-topic"
    //消费者组id
    val groupId = "test-groupId"
    //配置kafka的properties
    val prop = new Properties
    //使用建造者模式构建KafkaSource
    val kafkaSource = KafkaSource.builder[String]()
      //broker地址
      .setBootstrapServers(brokerList)
      //读取的topic
      .setTopics(Convert.toList(classOf[String], topics))
      //消费者组id
      .setGroupId(groupId)
      //value的序列化策略, 一般使用String即可
      .setValueOnlyDeserializer(new SimpleStringSchema)
      //设置kafka consumer的其他属性, 例如autocommit等
      .setProperties(prop)
      //从消费者组提交的偏移量开始，如果提交的偏移量不存在，读取最早的偏移量的数据
      .setStartingOffsets(OffsetsInitializer.committedOffsets(OffsetResetStrategy.EARLIEST))
      .build()

    val kafkaDataStream = environment.fromSource(kafkaSource, WatermarkStrategy.noWatermarks(), s"Kafka $topics Source")

    //抽取出共用方法
    //val stream2 = KafkaUtil.getKafkaDataStream(environment, brokerList, topics, groupId, prop)
    kafkaDataStream
      .flatMap(_.split(" "))
      .filter(_.nonEmpty)
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)
      .print()

    //开启任务
    environment.execute()

  }

}
