package com.chb.flink.sink

import java.util.Properties

import org.apache.flink.api.common.serialization.SimpleStringSchema
import org.apache.flink.streaming.api.scala.StreamExecutionEnvironment
import org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer

/**
 * Kafka Sink
 */
object KafkaSinkByString {
    def main(args: Array[String]): Unit = {
        val streamEnv: StreamExecutionEnvironment = StreamExecutionEnvironment.getExecutionEnvironment
        streamEnv.setParallelism(1) //默认情况下每个任务的并行度为1
        import org.apache.flink.streaming.api.scala._

        //读取netcat流中数据 （实时流）, 注意要先启动nc -lk 8888
        val stream1: DataStream[String] = streamEnv.socketTextStream("10.0.0.201", 8888)

        //转换计算
        val result = stream1.flatMap(_.split(","))

        //数据写入Kafka，并且是String格式的数据
        result.addSink(new FlinkKafkaProducer[String]("10.0.0.201:9092", "test", new SimpleStringSchema()))


        streamEnv.execute()
    }
}