package com.shujia.sink

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala._
import org.apache.flink.streaming.connectors.kafka.{FlinkKafkaConsumer, FlinkKafkaProducer}

object Demo3OmKafka {

  def main(args: Array[String]): Unit = {

    val env: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment

    /**
      * 读取kafka数据
      */
    val properties = new Properties()
    properties.setProperty("bootstrap.servers","master:9092")
    properties.setProperty("group.id","asd")

    /**
      * kafka-topics.sh --create --zookeeper master:2181 --replication-factor 1 --partitions 4 --topic lines
      *
      * kafka-console-producer.sh --broker-list master:9092 --topic lines
      */
    //创建消费者
    val flinkKafkaConsumer: FlinkKafkaConsumer[String] = new FlinkKafkaConsumer[String](
      "lines",
      new SimpleStringSchema(),
      properties
    )

    flinkKafkaConsumer.setStartFromEarliest() // 尽可能从最早的记录开始

    val linesDS: DataStream[String] = env.addSource(flinkKafkaConsumer)


    //linesDS.print()

    val countDS: DataStream[String] = linesDS.flatMap(_.split(","))
        .map((_,1))
        .keyBy(_._1)
        .sum(1)
        .map(kv => kv._1 + "/" + kv._2)
        //.print()

    /**
      * 创建kafka生产者
      */
    val flinkKafkaProducer: FlinkKafkaProducer[String] = new FlinkKafkaProducer[String](
      "master:9092",
      "count",
      new SimpleStringSchema()
    )

    countDS.addSink(flinkKafkaProducer)

    env.execute()

    /**
      *
      * kafka-console-consumer.sh --bootstrap-server  master:9092  --from-beginning --topic count
      *
      * 如果需要提交到服务器运行需要将 flink-sql-connector-kafka_2.11-1.11.0 上传奥 flink 的lib目录下
      *
      */

  }

}
