package com.shujia.flink.sink

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer}

object Demo3Onkafka {
  def main(args: Array[String]): Unit = {
    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment


    /**
      * 读取kafka中的数据
      *
      */

    val properties = new Properties()
    properties.setProperty("bootstrap.servers", "master:9092")
    properties.setProperty("group.id", "asdasdasd")

    /**
      * 创建一个topic
      * kafka-topics.sh --create --zookeeper master:2181 --replication-factor 1 --partitions 4 --topic lines
      *
      *
      * kafka-console-producer.sh --broker-list master:9092 --topic lines
      */
    //创建消费者
    val flinkKafkaConsumer = new FlinkKafkaConsumer[String](
      "lines",
      new SimpleStringSchema(),
      properties)

    flinkKafkaConsumer.setStartFromEarliest() // 尽可能从最早的记录开始

    val linesDS: DataStream[String] = env.addSource(flinkKafkaConsumer)

    val countDS: DataStream[String] = linesDS.flatMap(_.split(","))
      .map((_, 1))
      .keyBy(_._1)
      .sum(1)
      .map(kv => kv._1 + "\t" + kv._2)

    countDS.print()

    /**
      * 创建kafka 生产者
      *
      */

    val flinkKafkaProducer = new FlinkKafkaProducer[String](
      "master:9092", // broker 列表
      "count", // 目标 topic
      new SimpleStringSchema) // 序列化 schema

    countDS.addSink(flinkKafkaProducer)


    env.execute()

    /**
      *
      * kafka-console-consumer.sh --bootstrap-server  master:9092  --from-beginning --topic count
      *
      * 如果需要提交到服务器运行需要将 flink-sql-connector-kafka_2.11-1.11.0 上传奥 flink 的lib目录下
      *
      */
  }
}
